Bug 1776816: Update wgpu to b370b990 (2022-6-28). r=jgilbert

Differential Revision: https://phabricator.services.mozilla.com/D150447
This commit is contained in:
Jim Blandy 2022-06-28 20:53:15 +00:00
Родитель f8df240e86
Коммит 484ca146df
111 изменённых файлов: 5277 добавлений и 2163 удалений

Просмотреть файл

@ -85,12 +85,12 @@ rev = "3484d3e3ebdc8931493aa5df4d7ee9360a90e76b"
[source."https://github.com/gfx-rs/wgpu"] [source."https://github.com/gfx-rs/wgpu"]
git = "https://github.com/gfx-rs/wgpu" git = "https://github.com/gfx-rs/wgpu"
replace-with = "vendored-sources" replace-with = "vendored-sources"
rev = "32af4f56" rev = "b370b990"
[source."https://github.com/gfx-rs/naga"] [source."https://github.com/gfx-rs/naga"]
git = "https://github.com/gfx-rs/naga" git = "https://github.com/gfx-rs/naga"
replace-with = "vendored-sources" replace-with = "vendored-sources"
rev = "571302e" rev = "27d38aae"
[source."https://github.com/gfx-rs/metal-rs"] [source."https://github.com/gfx-rs/metal-rs"]
git = "https://github.com/gfx-rs/metal-rs" git = "https://github.com/gfx-rs/metal-rs"

19
Cargo.lock сгенерированный
Просмотреть файл

@ -68,6 +68,15 @@ dependencies = [
"log", "log",
] ]
[[package]]
name = "android_system_properties"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a20ae67ce26261f218e2b3f2f0d01887a9818283ca6fb260fa7c67e253d61c92"
dependencies = [
"libc",
]
[[package]] [[package]]
name = "anyhow" name = "anyhow"
version = "1.0.57" version = "1.0.57"
@ -3587,7 +3596,7 @@ checksum = "a2983372caf4480544083767bf2d27defafe32af49ab4df3a0b7fc90793a3664"
[[package]] [[package]]
name = "naga" name = "naga"
version = "0.8.0" version = "0.8.0"
source = "git+https://github.com/gfx-rs/naga?rev=571302e#571302e3ff09cb856f63a3683da308159872b7cc" source = "git+https://github.com/gfx-rs/naga?rev=27d38aae#27d38aae33fdbfa72197847038cb470720594cb1"
dependencies = [ dependencies = [
"bit-set", "bit-set",
"bitflags", "bitflags",
@ -6095,7 +6104,7 @@ dependencies = [
[[package]] [[package]]
name = "wgpu-core" name = "wgpu-core"
version = "0.12.0" version = "0.12.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=32af4f56#32af4f56079fd2203c46c9c452cfe33fd60a5721" source = "git+https://github.com/gfx-rs/wgpu?rev=b370b990#b370b990e543bd8c74c895a0f856604368806e5c"
dependencies = [ dependencies = [
"arrayvec", "arrayvec",
"bit-vec", "bit-vec",
@ -6112,6 +6121,7 @@ dependencies = [
"serde", "serde",
"smallvec", "smallvec",
"thiserror", "thiserror",
"web-sys",
"wgpu-hal", "wgpu-hal",
"wgpu-types", "wgpu-types",
] ]
@ -6119,8 +6129,9 @@ dependencies = [
[[package]] [[package]]
name = "wgpu-hal" name = "wgpu-hal"
version = "0.12.0" version = "0.12.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=32af4f56#32af4f56079fd2203c46c9c452cfe33fd60a5721" source = "git+https://github.com/gfx-rs/wgpu?rev=b370b990#b370b990e543bd8c74c895a0f856604368806e5c"
dependencies = [ dependencies = [
"android_system_properties",
"arrayvec", "arrayvec",
"ash", "ash",
"bit-set", "bit-set",
@ -6156,7 +6167,7 @@ dependencies = [
[[package]] [[package]]
name = "wgpu-types" name = "wgpu-types"
version = "0.12.0" version = "0.12.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=32af4f56#32af4f56079fd2203c46c9c452cfe33fd60a5721" source = "git+https://github.com/gfx-rs/wgpu?rev=b370b990#b370b990e543bd8c74c895a0f856604368806e5c"
dependencies = [ dependencies = [
"bitflags", "bitflags",
"bitflags_serde_shim", "bitflags_serde_shim",

Просмотреть файл

@ -84,13 +84,13 @@ ffi::WGPURenderPass* BeginRenderPass(
desc.depth_stencil_attachment = &dsDesc; desc.depth_stencil_attachment = &dsDesc;
} }
if (aDesc.mColorAttachments.Length() > WGPUMAX_COLOR_TARGETS) { if (aDesc.mColorAttachments.Length() > WGPUMAX_COLOR_ATTACHMENTS) {
aParent->GetDevice()->GenerateError(nsLiteralCString( aParent->GetDevice()->GenerateError(nsLiteralCString(
"Too many color attachments in GPURenderPassDescriptor")); "Too many color attachments in GPURenderPassDescriptor"));
return nullptr; return nullptr;
} }
std::array<ffi::WGPURenderPassColorAttachment, WGPUMAX_COLOR_TARGETS> std::array<ffi::WGPURenderPassColorAttachment, WGPUMAX_COLOR_ATTACHMENTS>
colorDescs = {}; colorDescs = {};
desc.color_attachments = colorDescs.data(); desc.color_attachments = colorDescs.data();
desc.color_attachments_length = aDesc.mColorAttachments.Length(); desc.color_attachments_length = aDesc.mColorAttachments.Length();

Просмотреть файл

@ -17,7 +17,7 @@ default = []
[dependencies.wgc] [dependencies.wgc]
package = "wgpu-core" package = "wgpu-core"
git = "https://github.com/gfx-rs/wgpu" git = "https://github.com/gfx-rs/wgpu"
rev = "32af4f56" rev = "b370b990"
#Note: "replay" shouldn't ideally be needed, #Note: "replay" shouldn't ideally be needed,
# but it allows us to serialize everything across IPC. # but it allows us to serialize everything across IPC.
features = ["replay", "trace", "serial-pass"] features = ["replay", "trace", "serial-pass"]
@ -25,12 +25,12 @@ features = ["replay", "trace", "serial-pass"]
[dependencies.wgt] [dependencies.wgt]
package = "wgpu-types" package = "wgpu-types"
git = "https://github.com/gfx-rs/wgpu" git = "https://github.com/gfx-rs/wgpu"
rev = "32af4f56" rev = "b370b990"
[dependencies.wgh] [dependencies.wgh]
package = "wgpu-hal" package = "wgpu-hal"
git = "https://github.com/gfx-rs/wgpu" git = "https://github.com/gfx-rs/wgpu"
rev = "32af4f56" rev = "b370b990"
[dependencies] [dependencies]
bincode = "1" bincode = "1"

Просмотреть файл

@ -20,11 +20,11 @@ origin:
# Human-readable identifier for this version/release # Human-readable identifier for this version/release
# Generally "version NNN", "tag SSS", "bookmark SSS" # Generally "version NNN", "tag SSS", "bookmark SSS"
release: commit 32af4f56 release: commit b370b990
# Revision to pull in # Revision to pull in
# Must be a long or short commit SHA (long preferred) # Must be a long or short commit SHA (long preferred)
revision: 32af4f56 revision: b370b990e543bd8c74c895a0f856604368806e5c
license: ['MIT', 'Apache-2.0'] license: ['MIT', 'Apache-2.0']

Просмотреть файл

@ -114,11 +114,11 @@ impl FragmentState<'_> {
fn to_wgpu(&self) -> wgc::pipeline::FragmentState { fn to_wgpu(&self) -> wgc::pipeline::FragmentState {
let color_targets = make_slice(self.targets, self.targets_length) let color_targets = make_slice(self.targets, self.targets_length)
.iter() .iter()
.map(|ct| wgt::ColorTargetState { .map(|ct| Some(wgt::ColorTargetState {
format: ct.format, format: ct.format,
blend: ct.blend.cloned(), blend: ct.blend.cloned(),
write_mask: ct.write_mask, write_mask: ct.write_mask,
}) }))
.collect(); .collect();
wgc::pipeline::FragmentState { wgc::pipeline::FragmentState {
stage: self.stage.to_wgpu(), stage: self.stage.to_wgpu(),
@ -600,9 +600,13 @@ pub extern "C" fn wgpu_device_create_render_bundle_encoder(
desc: &RenderBundleEncoderDescriptor, desc: &RenderBundleEncoderDescriptor,
bb: &mut ByteBuf, bb: &mut ByteBuf,
) -> *mut wgc::command::RenderBundleEncoder { ) -> *mut wgc::command::RenderBundleEncoder {
let color_formats: Vec<_> = make_slice(desc.color_formats, desc.color_formats_length)
.iter()
.map(|format| Some(format.clone()))
.collect();
let descriptor = wgc::command::RenderBundleEncoderDescriptor { let descriptor = wgc::command::RenderBundleEncoderDescriptor {
label: cow_label(&desc.label), label: cow_label(&desc.label),
color_formats: Cow::Borrowed(make_slice(desc.color_formats, desc.color_formats_length)), color_formats: Cow::Owned(color_formats),
depth_stencil: desc depth_stencil: desc
.depth_stencil_format .depth_stencil_format
.map(|&format| wgt::RenderBundleDepthStencil { .map(|&format| wgt::RenderBundleDepthStencil {
@ -701,14 +705,15 @@ pub unsafe extern "C" fn wgpu_command_encoder_begin_render_pass(
encoder_id: id::CommandEncoderId, encoder_id: id::CommandEncoderId,
desc: &RenderPassDescriptor, desc: &RenderPassDescriptor,
) -> *mut wgc::command::RenderPass { ) -> *mut wgc::command::RenderPass {
let color_attachments: Vec<_> = make_slice(desc.color_attachments, desc.color_attachments_length)
.iter()
.map(|format| Some(format.clone()))
.collect();
let pass = wgc::command::RenderPass::new( let pass = wgc::command::RenderPass::new(
encoder_id, encoder_id,
&wgc::command::RenderPassDescriptor { &wgc::command::RenderPassDescriptor {
label: cow_label(&desc.label), label: cow_label(&desc.label),
color_attachments: Cow::Borrowed(make_slice( color_attachments: Cow::Owned(color_attachments),
desc.color_attachments,
desc.color_attachments_length,
)),
depth_stencil_attachment: desc.depth_stencil_attachment.as_ref(), depth_stencil_attachment: desc.depth_stencil_attachment.as_ref(),
}, },
); );

Просмотреть файл

@ -0,0 +1 @@
{"files":{"CONTRIBUTING.md":"0834cb3b5e092977688d73d219a05bed23ae0ecb54b6d6e5d866ce07f6583b5e","Cargo.toml":"813fa9eacb5751a4d0a5654283e55a5252fb90e9ce12d5226cceba1c95fe8686","LICENSE-APACHE":"216486f29671a4262efe32af6d84a75bef398127f8c5f369b5c8305983887a06","LICENSE-MIT":"80f275e90d799911ed3830a7f242a2ef5a4ade2092fe0aa07bfb2d2cf2f2b95e","README.md":"6a18a69fa94ca0a1a786d25e8df347605ba4200c47d3ac6926e235b15c9878e6","src/lib.rs":"704c78cd30205cb923e80bab8a8a9d7e2fbf667a4bb7d2c00c586f6dec244612"},"package":"a20ae67ce26261f218e2b3f2f0d01887a9818283ca6fb260fa7c67e253d61c92"}

40
third_party/rust/android_system_properties/CONTRIBUTING.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,40 @@
# Contributing
Contributions are very much welcome. Here are the guidelines if you are thinking of helping us:
## Contributions
Contributions should be made in the form of GitHub pull requests.
Each pull request will be reviewed by a core contributor (someone with
permission to land patches) and either landed in the main tree or
given feedback for changes that would be required.
Should you wish to work on an issue, please claim it first by commenting on
the GitHub issue that you want to work on it. This is to prevent duplicated
efforts from contributors on the same issue.
## Pull Request Checklist
- Branch from the master branch and, if needed, rebase to the current master
branch before submitting your pull request. If it doesn't merge cleanly with
master you may be asked to rebase your changes.
- Commits should be as small as possible, while ensuring that each commit is
correct independently (i.e., each commit should compile and pass tests).
- If your patch is not getting reviewed or you need a specific person to review
it, you can @-reply a reviewer asking for a review in the pull request or a
comment.
- Whenever applicable, add tests relevant to the fixed bug or new feature.
For specific git instructions, see [GitHub workflow 101](https://github.com/servo/servo/wiki/Github-workflow).
## Conduct
We follow the [Rust Code of Conduct](http://www.rust-lang.org/conduct.html).
For escalation or moderation issues, please contact Nical (nical@fastmail.com) instead of the Rust moderation team.
## License
Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be licensed dual MIT/Apache 2, without any additional terms or conditions.

35
third_party/rust/android_system_properties/Cargo.toml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,35 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
name = "android_system_properties"
version = "0.1.2"
authors = ["Nicolas Silva <nical@fastmail.com>"]
description = "Minimal Android system properties wrapper"
homepage = "https://github.com/nical/android_system_properties"
documentation = "https://docs.rs/android_system_properties"
readme = "README.md"
keywords = ["android"]
license = "MIT/Apache-2.0"
repository = "https://github.com/nical/android_system_properties"
[package.metadata.docs.rs]
targets = [
"arm-linux-androideabi",
"armv7-linux-androideabi",
"aarch64-linux-android",
"i686-linux-android",
"x86_64-unknown-linux-gnu",
]
[dependencies.libc]
version = "0.2.126"

13
third_party/rust/android_system_properties/LICENSE-APACHE поставляемый Normal file
Просмотреть файл

@ -0,0 +1,13 @@
Copyright 2016 Nicolas Silva
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

20
third_party/rust/android_system_properties/LICENSE-MIT поставляемый Normal file
Просмотреть файл

@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2013 Nicolas Silva
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

37
third_party/rust/android_system_properties/README.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,37 @@
# android_system_properties
A minimal rust wrapper over android system properties
This crate is similar to the `android-properties` crate with the exception that
the necessary Android libc symbols are loaded dynamically instead of linked
statically. In practice this means that the same binary will work with old and
new versions of Android, even though the API for reading system properties changed
around Android L.
## Example
```rust
use android_system_properties::AndroidSystemProperties;
let properties = AndroidSystemProperties::new();
if let Some(value) = properties.get("persist.sys.timezone") {
println!("{}", value);
}
```
## Listing and setting properties
For the sake of simplicity this crate currently only contains what's needed by wgpu.
The implementations for listing and setting properties can be added back if anyone needs
them (let me know by filing an issue).
## License
Licensed under either of
* Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.

162
third_party/rust/android_system_properties/src/lib.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,162 @@
//! A thin rust wrapper for Andoird system properties.
//!
//! This crate is similar to the `android-properties` crate with the exception that
//! the necessary Android libc symbols are loaded dynamically instead of linked
//! statically. In practice this means that the same binary will work with old and
//! new versions of Android, even though the API for reading system properties changed
//! around Android L.
use std::{
ffi::{CStr, CString},
os::raw::{c_char, c_int, c_void},
};
#[cfg(target_os = "android")]
use std::mem;
unsafe fn property_callback(payload: *mut String, _name: *const c_char, value: *const c_char, _serial: u32) {
let cvalue = CStr::from_ptr(value);
(*payload) = cvalue.to_str().unwrap().to_string();
}
type Callback = unsafe fn(*mut String, *const c_char, *const c_char, u32);
type SystemPropertyGetFn = unsafe extern "C" fn(*const c_char, *mut c_char) -> c_int;
type SystemPropertyFindFn = unsafe extern "C" fn(*const c_char) -> *const c_void;
type SystemPropertyReadCallbackFn = unsafe extern "C" fn(*const c_void, Callback, *mut String) -> *const c_void;
#[derive(Debug)]
/// An object that can retrieve android system properties.
///
/// ## Example
///
/// ```
/// use android_system_properties::AndroidSystemProperties;
///
/// let properties = AndroidSystemProperties::new();
///
/// if let Some(value) = properties.get("persist.sys.timezone") {
/// println!("{}", value);
/// }
/// ```
pub struct AndroidSystemProperties {
libc_so: *mut c_void,
get_fn: Option<SystemPropertyGetFn>,
find_fn: Option<SystemPropertyFindFn>,
read_callback_fn: Option<SystemPropertyReadCallbackFn>,
}
impl AndroidSystemProperties {
#[cfg(not(target_os = "android"))]
/// Create an entry point for accessing Android properties.
pub fn new() -> Self {
AndroidSystemProperties {
libc_so: std::ptr::null_mut(),
find_fn: None,
read_callback_fn: None,
get_fn: None,
}
}
#[cfg(target_os = "android")]
/// Create an entry point for accessing Android properties.
pub fn new() -> Self {
let libc_name = CString::new("libc.so").unwrap();
let libc_so = unsafe { libc::dlopen(libc_name.as_ptr(), libc::RTLD_NOLOAD) };
let mut properties = AndroidSystemProperties {
libc_so,
find_fn: None,
read_callback_fn: None,
get_fn: None,
};
if libc_so.is_null() {
return properties;
}
unsafe fn load_fn(libc_so: *mut c_void, name: &str) -> Option<*const c_void> {
let cname = CString::new(name).unwrap();
let fn_ptr = libc::dlsym(libc_so, cname.as_ptr());
if fn_ptr.is_null() {
return None;
}
Some(fn_ptr)
}
unsafe {
properties.read_callback_fn = load_fn(libc_so, "__system_property_read_callback")
.map(|raw| mem::transmute::<*const c_void, SystemPropertyReadCallbackFn>(raw));
properties.find_fn = load_fn(libc_so, "__system_property_find")
.map(|raw| mem::transmute::<*const c_void, SystemPropertyFindFn>(raw));
// Fallback for old versions of Android.
if properties.read_callback_fn.is_none() || properties.find_fn.is_none() {
properties.get_fn = load_fn(libc_so, "__system_property_get")
.map(|raw| mem::transmute::<*const c_void, SystemPropertyGetFn>(raw));
}
}
properties
}
/// Retrieve a system property.
///
/// Returns None if the operation fails.
pub fn get(&self, name: &str) -> Option<String> {
let cname = CString::new(name).unwrap();
// If available, use the recommended approach to accessing properties (Android L and onward).
if let (Some(find_fn), Some(read_callback_fn)) = (self.find_fn, self.read_callback_fn) {
let info = unsafe { (find_fn)(cname.as_ptr()) };
if info.is_null() {
return None;
}
let mut result = String::new();
unsafe {
(read_callback_fn)(info, property_callback, &mut result);
}
return Some(result);
}
// Fall back to the older approach.
if let Some(get_fn) = self.get_fn {
// The constant is PROP_VALUE_MAX in Android's libc/include/sys/system_properties.h
const PROPERTY_VALUE_MAX: usize = 92;
let cvalue = CString::new(Vec::with_capacity(PROPERTY_VALUE_MAX)).unwrap();
let raw = cvalue.into_raw();
let len = unsafe { (get_fn)(cname.as_ptr(), raw) };
let bytes = unsafe {
let raw: *mut u8 = std::mem::transmute(raw); // Cast from *mut i8.
Vec::from_raw_parts(raw, len as usize, PROPERTY_VALUE_MAX)
};
if len > 0 {
String::from_utf8(bytes).ok()
} else {
None
}
} else {
None
}
}
}
impl Drop for AndroidSystemProperties {
fn drop(&mut self) {
if !self.libc_so.is_null() {
unsafe {
libc::dlclose(self.libc_so);
}
}
}
}

2
third_party/rust/naga/.cargo-checksum.json поставляемый

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Просмотреть файл

@ -48,6 +48,11 @@ jobs:
args: --all-features --workspace args: --all-features --workspace
- name: Check snapshots - name: Check snapshots
run: git diff --exit-code -- tests/out run: git diff --exit-code -- tests/out
- uses: actions-rs/cargo@v1
name: Check benchmarks
with:
command: check
args: --benches
clippy: clippy:
name: Clippy name: Clippy
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -75,7 +80,6 @@ jobs:
profile: minimal profile: minimal
toolchain: stable toolchain: stable
override: true override: true
- run: rustup component add clippy
- uses: actions-rs/cargo@v1 - uses: actions-rs/cargo@v1
with: with:
command: doc command: doc

Просмотреть файл

@ -5,12 +5,28 @@ on:
- 'tests/out/hlsl/*.hlsl' - 'tests/out/hlsl/*.hlsl'
jobs: jobs:
validate-windows: validate-windows-dxc:
name: HLSL name: HLSL via DXC
runs-on: windows-latest runs-on: windows-latest
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- name: Add DirectXShaderCompiler - name: Add DirectXShaderCompiler
uses: napokue/setup-dxc@v1.0.0 uses: napokue/setup-dxc@v1.0.0
- run: make validate-hlsl - run: make validate-hlsl-dxc
shell: sh shell: sh
validate-windows-fxc:
name: HLSL via FXC
runs-on: windows-latest
steps:
- uses: actions/checkout@v2
- name: Add fxc bin to PATH
run: |
Get-Childitem -Path "C:\Program Files (x86)\Windows Kits\10\bin\**\x64\fxc.exe" `
| Sort-Object -Property LastWriteTime -Descending `
| Select-Object -First 1 `
| Split-Path -Parent `
| Out-File -FilePath $Env:GITHUB_PATH -Encoding utf8 -Append
shell: powershell
- run: make validate-hlsl-fxc
shell: sh

312
third_party/rust/naga/CHANGELOG.md поставляемый
Просмотреть файл

@ -1,16 +1,288 @@
# Change Log # Change Log
## v0.9 (TBD) ## v0.9 (TBD)
- WGSL:
- semicolon after struct declaration are now optional - Fix minimal-versions of dependencies ([#1840](https://github.com/gfx-rs/naga/pull/1840)) **@teoxoy**
- commas are used to separate struct members instead of semicolons - Update MSRV to 1.56 ([#1838](https://github.com/gfx-rs/naga/pull/1838)) **@teoxoy**
- attributes are declared with `@attrib` instead of `[[attrib]]`
- `stride` attribute is removed API
- block comments are supported
- renames: - Rename `TypeFlags` `INTERFACE`/`HOST_SHARED` to `IO_SHARED`/`HOST_SHAREABLE` ([#1872](https://github.com/gfx-rs/naga/pull/1872)) **@jimblandy**
- `findLsb` -> `firstTrailingBit` - Expose more error information ([#1827](https://github.com/gfx-rs/naga/pull/1827), [#1937](https://github.com/gfx-rs/naga/pull/1937)) **@jakobhellermann** **@nical** **@jimblandy**
- `findMsb` -> `firstLeadingBit` - Do not unconditionally make error output colorful ([#1707](https://github.com/gfx-rs/naga/pull/1707)) **@rhysd**
- `smoothStep` -> `smoothstep` - Rename `StorageClass` to `AddressSpace` ([#1699](https://github.com/gfx-rs/naga/pull/1699)) **@kvark**
- Add a way to emit errors to a path ([#1640](https://github.com/gfx-rs/naga/pull/1640)) **@laptou**
CLI
- Add `bincode` representation ([#1729](https://github.com/gfx-rs/naga/pull/1729)) **@kvark**
- Include file path in WGSL parse error ([#1708](https://github.com/gfx-rs/naga/pull/1708)) **@rhysd**
- Add `--version` flag ([#1706](https://github.com/gfx-rs/naga/pull/1706)) **@rhysd**
- Support reading input from stdin via `--stdin-file-path` ([#1701](https://github.com/gfx-rs/naga/pull/1701)) **@rhysd**
- Use `panic = "abort"` ([#1597](https://github.com/gfx-rs/naga/pull/1597)) **@jrmuizel**
DOCS
- Standardize some docs ([#1660](https://github.com/gfx-rs/naga/pull/1660)) **@NoelTautges**
- Document `TypeInner::BindingArray` ([#1859](https://github.com/gfx-rs/naga/pull/1859)) **@jimblandy**
- Clarify accepted types for `Expression::AccessIndex` ([#1862](https://github.com/gfx-rs/naga/pull/1862)) **@NoelTautges**
- Document `proc::layouter` ([#1693](https://github.com/gfx-rs/naga/pull/1693)) **@jimblandy**
- Document Naga's promises around validation and panics ([#1828](https://github.com/gfx-rs/naga/pull/1828)) **@jimblandy**
- `FunctionInfo` doc fixes ([#1726](https://github.com/gfx-rs/naga/pull/1726)) **@jimblandy**
VALIDATOR
- Forbid returning pointers and atomics from functions ([#911](https://github.com/gfx-rs/naga/pull/911)) **@jimblandy**
- Let validation check for more unsupported builtins ([#1962](https://github.com/gfx-rs/naga/pull/1962)) **@jimblandy**
- Fix `Capabilities::SAMPLER_NON_UNIFORM_INDEXING` bitflag ([#1915](https://github.com/gfx-rs/naga/pull/1915)) **@cwfitzgerald**
- Properly check that user-defined IO uses IO-shareable types ([#912](https://github.com/gfx-rs/naga/pull/912)) **@jimblandy**
- Validate `ValuePointer` exactly like a `Pointer` to a `Scalar` ([#1875](https://github.com/gfx-rs/naga/pull/1875)) **@jimblandy**
- Reject empty structs ([#1826](https://github.com/gfx-rs/naga/pull/1826)) **@jimblandy**
- Validate uniform address space layout constraints ([#1812](https://github.com/gfx-rs/naga/pull/1812)) **@teoxoy**
- Improve `AddressSpace` related error messages ([#1710](https://github.com/gfx-rs/naga/pull/1710)) **@kvark**
WGSL-IN
Main breaking changes
- Commas to separate struct members (comma after last member is optional)
- `struct S { a: f32; b: i32; }` -> `struct S { a: f32, b: i32 }`
- Attribute syntax
- `[[binding(0), group(0)]]` -> `@binding(0) @group(0)`
- Entry point stage attributes
- `@stage(vertex)` -> `@vertex`
- `@stage(fragment)` -> `@fragment`
- `@stage(compute)` -> `@compute`
- Function renames
- `smoothStep` -> `smoothstep`
- `findLsb` -> `firstTrailingBit`
- `findMsb` -> `firstLeadingBit`
Specification Changes (relavant changes have also been applied to the WGSL backend)
- Update number literal format ([#1863](https://github.com/gfx-rs/naga/pull/1863)) **@teoxoy**
- Allow non-ascii characters in identifiers ([#1849](https://github.com/gfx-rs/naga/pull/1849)) **@teoxoy**
- Update reserved keywords ([#1847](https://github.com/gfx-rs/naga/pull/1847), [#1870](https://github.com/gfx-rs/naga/pull/1870), [#1905](https://github.com/gfx-rs/naga/pull/1905)) **@teoxoy** **@Gordon-F**
- Update entry point stage attributes ([#1833](https://github.com/gfx-rs/naga/pull/1833)) **@Gordon-F**
- Make colon in case optional ([#1801](https://github.com/gfx-rs/naga/pull/1801)) **@Gordon-F**
- Rename `smoothStep` to `smoothstep` ([#1800](https://github.com/gfx-rs/naga/pull/1800)) **@Gordon-F**
- Make semicolon after struct declaration optional ([#1791](https://github.com/gfx-rs/naga/pull/1791)) **@stshine**
- Use commas to separate struct members instead of semicolons ([#1773](https://github.com/gfx-rs/naga/pull/1773)) **@Gordon-F**
- Rename `findLsb`/`findMsb` to `firstTrailingBit`/`firstLeadingBit` ([#1735](https://github.com/gfx-rs/naga/pull/1735)) **@kvark**
- Make parenthesis optional for `if` and `switch` statements ([#1725](https://github.com/gfx-rs/naga/pull/1725)) **@Gordon-F**
- Declare attribtues with `@attrib` instead of `[[attrib]]` ([#1676](https://github.com/gfx-rs/naga/pull/1676)) **@kvark**
- Allow non-structure buffer types ([#1682](https://github.com/gfx-rs/naga/pull/1682)) **@kvark**
- Remove `stride` attribute ([#1681](https://github.com/gfx-rs/naga/pull/1681)) **@kvark**
Improvements
- Implement `firstTrailingBit`/`firstLeadingBit` u32 overloads ([#1865](https://github.com/gfx-rs/naga/pull/1865)) **@teoxoy**
- Add error for non-floating-point matrix ([#1917](https://github.com/gfx-rs/naga/pull/1917)) **@grovesNL**
- Implement partial vector & matrix identity constructors ([#1916](https://github.com/gfx-rs/naga/pull/1916)) **@teoxoy**
- Implement phony assignment ([#1866](https://github.com/gfx-rs/naga/pull/1866), [#1869](https://github.com/gfx-rs/naga/pull/1869)) **@teoxoy**
- Fix being able to match `~=` as LogicalOperation ([#1849](https://github.com/gfx-rs/naga/pull/1849)) **@teoxoy**
- Implement Binding Arrays ([#1845](https://github.com/gfx-rs/naga/pull/1845)) **@cwfitzgerald**
- Implement unary vector operators ([#1820](https://github.com/gfx-rs/naga/pull/1820)) **@teoxoy**
- Implement zero value constructors and constructors that infer their type from their parameters ([#1790](https://github.com/gfx-rs/naga/pull/1790)) **@teoxoy**
- Implement invariant attribute ([#1789](https://github.com/gfx-rs/naga/pull/1789), [#1822](https://github.com/gfx-rs/naga/pull/1822)) **@teoxoy** **@jimblandy**
- Implement increment and decrement statements ([#1788](https://github.com/gfx-rs/naga/pull/1788), [#1912](https://github.com/gfx-rs/naga/pull/1912)) **@teoxoy**
- Implement `while` loop ([#1787](https://github.com/gfx-rs/naga/pull/1787)) **@teoxoy**
- Fix array size on globals ([#1717](https://github.com/gfx-rs/naga/pull/1717)) **@jimblandy**
- Implement integer vector overloads for `dot` function ([#1689](https://github.com/gfx-rs/naga/pull/1689)) **@francesco-cattoglio**
- Implement block comments ([#1675](https://github.com/gfx-rs/naga/pull/1675)) **@kocsis1david**
- Implement assignment binary operators ([#1662](https://github.com/gfx-rs/naga/pull/1662)) **@kvark**
- Implement `radians`/`degrees` builtin functions ([#1627](https://github.com/gfx-rs/naga/pull/1627)) **@encounter**
- Implement `findLsb`/`findMsb` builtin functions ([#1473](https://github.com/gfx-rs/naga/pull/1473)) **@fintelia**
- Implement `textureGather`/`textureGatherCompare` builtin functions ([#1596](https://github.com/gfx-rs/naga/pull/1596)) **@kvark**
SPV-IN
- Implement `OpBitReverse` and `OpBitCount` ([#1954](https://github.com/gfx-rs/naga/pull/1954)) **@JCapucho**
- Add `MultiView` to `SUPPORTED_CAPABILITIES` ([#1934](https://github.com/gfx-rs/naga/pull/1934)) **@expenses**
- Translate `OpSMod` and `OpFMod` correctly ([#1867](https://github.com/gfx-rs/naga/pull/1867)) **@teoxoy**
- Error on unsupported `MatrixStride` ([#1805](https://github.com/gfx-rs/naga/pull/1805)) **@teoxoy**
- Align array stride for undecorated arrays ([#1724](https://github.com/gfx-rs/naga/pull/1724)) **@JCapucho**
GLSL-IN
- Fix matrix multiplication check ([#1953](https://github.com/gfx-rs/naga/pull/1953)) **@JCapucho**
- Fix panic (stop emitter in conditional) ([#1952](https://github.com/gfx-rs/naga/pull/1952)) **@JCapucho**
- Translate `mod` fn correctly ([#1867](https://github.com/gfx-rs/naga/pull/1867)) **@teoxoy**
- Make the ternary operator behave as an if ([#1877](https://github.com/gfx-rs/naga/pull/1877)) **@JCapucho**
- Add support for `clamp` function ([#1502](https://github.com/gfx-rs/naga/pull/1502)) **@sjinno**
- Better errors for bad constant expression ([#1501](https://github.com/gfx-rs/naga/pull/1501)) **@sjinno**
- Error on a `matCx2` used with the `std140` layout ([#1806](https://github.com/gfx-rs/naga/pull/1806)) **@teoxoy**
- Allow nested accesses in lhs positions ([#1794](https://github.com/gfx-rs/naga/pull/1794)) **@JCapucho**
- Use forced conversions for vector/matrix constructors ([#1796](https://github.com/gfx-rs/naga/pull/1796)) **@JCapucho**
- Add support for `barrier` function ([#1793](https://github.com/gfx-rs/naga/pull/1793)) **@fintelia**
- Fix panic (resume expression emit after `imageStore`) ([#1795](https://github.com/gfx-rs/naga/pull/1795)) **@JCapucho**
- Allow multiple array specifiers ([#1780](https://github.com/gfx-rs/naga/pull/1780)) **@JCapucho**
- Fix memory qualifiers being inverted ([#1779](https://github.com/gfx-rs/naga/pull/1779)) **@JCapucho**
- Support arrays as input/output types ([#1759](https://github.com/gfx-rs/naga/pull/1759)) **@JCapucho**
- Fix freestanding constructor parsing ([#1758](https://github.com/gfx-rs/naga/pull/1758)) **@JCapucho**
- Fix matrix - scalar operations ([#1757](https://github.com/gfx-rs/naga/pull/1757)) **@JCapucho**
- Fix matrix - matrix division ([#1757](https://github.com/gfx-rs/naga/pull/1757)) **@JCapucho**
- Fix matrix comparisons ([#1757](https://github.com/gfx-rs/naga/pull/1757)) **@JCapucho**
- Add support for `texelFetchOffset` ([#1746](https://github.com/gfx-rs/naga/pull/1746)) **@JCapucho**
- Inject `sampler2DMSArray` builtins on use ([#1737](https://github.com/gfx-rs/naga/pull/1737)) **@JCapucho**
- Inject `samplerCubeArray` builtins on use ([#1736](https://github.com/gfx-rs/naga/pull/1736)) **@JCapucho**
- Add support for image builtin functions ([#1723](https://github.com/gfx-rs/naga/pull/1723)) **@JCapucho**
- Add support for image declarations ([#1723](https://github.com/gfx-rs/naga/pull/1723)) **@JCapucho**
- Texture builtins fixes ([#1719](https://github.com/gfx-rs/naga/pull/1719)) **@JCapucho**
- Type qualifiers rework ([#1713](https://github.com/gfx-rs/naga/pull/1713)) **@JCapucho**
- `texelFetch` accept multisampled textures ([#1715](https://github.com/gfx-rs/naga/pull/1715)) **@JCapucho**
- Fix panic when culling nested block ([#1714](https://github.com/gfx-rs/naga/pull/1714)) **@JCapucho**
- Fix composite constructors ([#1631](https://github.com/gfx-rs/naga/pull/1631)) **@JCapucho**
- Fix using swizzle as out arguments ([#1632](https://github.com/gfx-rs/naga/pull/1632)) **@JCapucho**
SPV-OUT
- Implement `reverseBits` and `countOneBits` ([#1897](https://github.com/gfx-rs/naga/pull/1897)) **@hasali19**
- Use `OpCopyObject` for matrix identity casts ([#1916](https://github.com/gfx-rs/naga/pull/1916)) **@teoxoy**
- Use `OpCopyObject` for bool - bool conversion due to `OpBitcast` not being feasible for booleans ([#1916](https://github.com/gfx-rs/naga/pull/1916)) **@teoxoy**
- Zero init variables in function and private address spaces ([#1871](https://github.com/gfx-rs/naga/pull/1871)) **@teoxoy**
- Use `SRem` instead of `SMod` ([#1867](https://github.com/gfx-rs/naga/pull/1867)) **@teoxoy**
- Add support for integer vector - scalar multiplication ([#1820](https://github.com/gfx-rs/naga/pull/1820)) **@teoxoy**
- Add support for matrix addition and subtraction ([#1820](https://github.com/gfx-rs/naga/pull/1820)) **@teoxoy**
- Emit required decorations on wrapper struct types ([#1815](https://github.com/gfx-rs/naga/pull/1815)) **@jimblandy**
- Decorate array and struct type layouts unconditionally ([#1815](https://github.com/gfx-rs/naga/pull/1815)) **@jimblandy**
- Fix wrong `MatrixStride` for `matCx2` and `mat2xR` ([#1781](https://github.com/gfx-rs/naga/pull/1781)) **@teoxoy**
- Use `OpImageQuerySize` for MS images ([#1742](https://github.com/gfx-rs/naga/pull/1742)) **@JCapucho**
MSL-OUT
- Fix pointers to private or workgroup address spaces possibly being read only ([#1901](https://github.com/gfx-rs/naga/pull/1901)) **@teoxoy**
- Zero init variables in function address space ([#1871](https://github.com/gfx-rs/naga/pull/1871)) **@teoxoy**
- Make binding arrays play nice with bounds checks ([#1855](https://github.com/gfx-rs/naga/pull/1855)) **@cwfitzgerald**
- Permit `invariant` qualifier on vertex shader outputs ([#1821](https://github.com/gfx-rs/naga/pull/1821)) **@jimblandy**
- Fix packed `vec3` stores ([#1816](https://github.com/gfx-rs/naga/pull/1816)) **@teoxoy**
- Actually test push constants to be used ([#1767](https://github.com/gfx-rs/naga/pull/1767)) **@kvark**
- Properly rename entry point arguments for struct members ([#1766](https://github.com/gfx-rs/naga/pull/1766)) **@jimblandy**
- Qualify read-only storage with const ([#1763](https://github.com/gfx-rs/naga/pull/1763)) **@kvark**
- Fix not unary operator for integer scalars ([#1760](https://github.com/gfx-rs/naga/pull/1760)) **@vincentisambart**
- Add bounds checks for `ImageLoad` and `ImageStore` ([#1730](https://github.com/gfx-rs/naga/pull/1730)) **@jimblandy**
- Fix resource bindings for non-structures ([#1718](https://github.com/gfx-rs/naga/pull/1718)) **@kvark**
- Always check whether _buffer_sizes arg is needed ([#1717](https://github.com/gfx-rs/naga/pull/1717)) **@jimblandy**
- WGSL storage address space should always correspond to MSL device address space ([#1711](https://github.com/gfx-rs/naga/pull/1711)) **@wtholliday**
- Mitigation for MSL atomic bounds check ([#1703](https://github.com/gfx-rs/naga/pull/1703)) **@glalonde**
HLSL-OUT
- Fix fallthrough in switch statements ([#1920](https://github.com/gfx-rs/naga/pull/1920)) **@teoxoy**
- Fix missing break statements ([#1919](https://github.com/gfx-rs/naga/pull/1919)) **@teoxoy**
- Fix `countOneBits` and `reverseBits` for signed integers ([#1928](https://github.com/gfx-rs/naga/pull/1928)) **@hasali19**
- Fix array constructor return type ([#1914](https://github.com/gfx-rs/naga/pull/1914)) **@teoxoy**
- Fix hlsl output for writes to scalar/vector storage buffer ([#1903](https://github.com/gfx-rs/naga/pull/1903)) **@hasali19**
- Use `fmod` instead of `%` ([#1867](https://github.com/gfx-rs/naga/pull/1867)) **@teoxoy**
- Use wrapped constructors when loading from storage address space ([#1893](https://github.com/gfx-rs/naga/pull/1893)) **@teoxoy**
- Zero init struct constructor ([#1890](https://github.com/gfx-rs/naga/pull/1890)) **@teoxoy**
- Flesh out matrix handling documentation ([#1850](https://github.com/gfx-rs/naga/pull/1850)) **@jimblandy**
- Emit `row_major` qualifier on matrix uniform globals ([#1846](https://github.com/gfx-rs/naga/pull/1846)) **@jimblandy**
- Fix bool splat ([#1820](https://github.com/gfx-rs/naga/pull/1820)) **@teoxoy**
- Add more padding when necessary ([#1814](https://github.com/gfx-rs/naga/pull/1814)) **@teoxoy**
- Support multidimensional arrays ([#1814](https://github.com/gfx-rs/naga/pull/1814)) **@teoxoy**
- Don't output interpolation modifier if it's the default ([#1809](https://github.com/gfx-rs/naga/pull/1809)) **@NoelTautges**
- Fix `matCx2` translation for uniform buffers ([#1802](https://github.com/gfx-rs/naga/pull/1802)) **@teoxoy**
- Fix modifiers not being written in the vertex output and fragment input structs ([#1789](https://github.com/gfx-rs/naga/pull/1789)) **@teoxoy**
- Fix matrix not being declared as transposed ([#1784](https://github.com/gfx-rs/naga/pull/1784)) **@teoxoy**
- Insert padding between struct members ([#1786](https://github.com/gfx-rs/naga/pull/1786)) **@teoxoy**
- Fix not unary operator for integer scalars ([#1760](https://github.com/gfx-rs/naga/pull/1760)) **@vincentisambart**
GLSL-OUT
- Fix type error for `countOneBits` implementation ([#1897](https://github.com/gfx-rs/naga/pull/1897)) **@hasali19**
- Fix storage format for `Rgba8Unorm` ([#1955](https://github.com/gfx-rs/naga/pull/1955)) **@JCapucho**
- Implement bounds checks for `ImageLoad` ([#1889](https://github.com/gfx-rs/naga/pull/1889)) **@JCapucho**
- Fix feature search in expressions ([#1887](https://github.com/gfx-rs/naga/pull/1887)) **@JCapucho**
- Emit globals of any type ([#1823](https://github.com/gfx-rs/naga/pull/1823)) **@jimblandy**
- Add support for boolean vector `~`, `|` and `&` ops ([#1820](https://github.com/gfx-rs/naga/pull/1820)) **@teoxoy**
- Fix array function arguments ([#1814](https://github.com/gfx-rs/naga/pull/1814)) **@teoxoy**
- Write constant sized array type for uniform ([#1768](https://github.com/gfx-rs/naga/pull/1768)) **@hatoo**
- Texture function fixes ([#1742](https://github.com/gfx-rs/naga/pull/1742)) **@JCapucho**
- Push constants use anonymous uniforms ([#1683](https://github.com/gfx-rs/naga/pull/1683)) **@JCapucho**
- Add support for push constant emulation ([#1672](https://github.com/gfx-rs/naga/pull/1672)) **@JCapucho**
- Skip unsized types if unused ([#1649](https://github.com/gfx-rs/naga/pull/1649)) **@kvark**
- Write struct and array initializers ([#1644](https://github.com/gfx-rs/naga/pull/1644)) **@JCapucho**
## v0.8.5 (2022-01-25)
MSL-OUT
- Make VS-output positions invariant on even more systems ([#1697](https://github.com/gfx-rs/naga/pull/1697)) **@cwfitzgerald**
- Improve support for point primitives ([#1696](https://github.com/gfx-rs/naga/pull/1696)) **@kvark**
## v0.8.4 (2022-01-24)
MSL-OUT
- Make VS-output positions invariant if possible ([#1687](https://github.com/gfx-rs/naga/pull/1687)) **@kvark**
GLSL-OUT
- Fix `floatBitsToUint` spelling ([#1688](https://github.com/gfx-rs/naga/pull/1688)) **@cwfitzgerald**
- Call proper memory barrier functions ([#1680](https://github.com/gfx-rs/naga/pull/1680)) **@francesco-cattoglio**
## v0.8.3 (2022-01-20)
- Don't pin `indexmap` version ([#1666](https://github.com/gfx-rs/naga/pull/1666)) **@a1phyr**
MSL-OUT
- Fix support for point primitives ([#1674](https://github.com/gfx-rs/naga/pull/1674)) **@kvark**
GLSL-OUT
- Fix sampler association ([#1671](https://github.com/gfx-rs/naga/pull/1671)) **@JCapucho**
## v0.8.2 (2022-01-11)
VALIDATOR
- Check structure resource types ([#1639](https://github.com/gfx-rs/naga/pull/1639)) **@kvark**
WGSL-IN
- Improve type mismatch errors ([#1658](https://github.com/gfx-rs/naga/pull/1658)) **@Gordon-F**
SPV-IN
- Implement more sign agnostic operations ([#1651](https://github.com/gfx-rs/naga/pull/1651), [#1650](https://github.com/gfx-rs/naga/pull/1650)) **@JCapucho**
SPV-OUT
- Fix modulo operator (use `OpFRem` instead of `OpFMod`) ([#1653](https://github.com/gfx-rs/naga/pull/1653)) **@JCapucho**
MSL-OUT
- Fix `texture1d` accesses ([#1647](https://github.com/gfx-rs/naga/pull/1647)) **@jimblandy**
- Fix data packing functions ([#1637](https://github.com/gfx-rs/naga/pull/1637)) **@phoekz**
## v0.8.1 (2021-12-29)
API
- Make `WithSpan` clonable ([#1620](https://github.com/gfx-rs/naga/pull/1620)) **@jakobhellermann**
MSL-OUT
- Fix packed vec access ([#1634](https://github.com/gfx-rs/naga/pull/1634)) **@kvark**
- Fix packed float support ([#1630](https://github.com/gfx-rs/naga/pull/1630)) **@kvark**
HLSL-OUT
- Support arrays of matrices ([#1629](https://github.com/gfx-rs/naga/pull/1629)) **@kvark**
- Use `mad` instead of `fma` function ([#1580](https://github.com/gfx-rs/naga/pull/1580)) **@parasyte**
GLSL-OUT
- Fix conflicting names for globals ([#1616](https://github.com/gfx-rs/naga/pull/1616)) **@Gordon-F**
- Fix `fma` function ([#1580](https://github.com/gfx-rs/naga/pull/1580)) **@parasyte**
## v0.8 (2021-12-18) ## v0.8 (2021-12-18)
- development release for wgpu-0.12 - development release for wgpu-0.12
@ -26,7 +298,7 @@
- MSL-out: - MSL-out:
- full out-of-bounds checking - full out-of-bounds checking
### v0.7.3 (2021-12-14) ## v0.7.3 (2021-12-14)
- API: - API:
- `view_index` builtin - `view_index` builtin
- GLSL-out: - GLSL-out:
@ -34,7 +306,7 @@
- SPV-out: - SPV-out:
- fix incorrect pack/unpack - fix incorrect pack/unpack
### v0.7.2 (2021-12-01) ## v0.7.2 (2021-12-01)
- validator: - validator:
- check stores for proper pointer class - check stores for proper pointer class
- HLSL-out: - HLSL-out:
@ -50,7 +322,7 @@
- GLSL-in: - GLSL-in:
- don't panic on invalid integer operations - don't panic on invalid integer operations
### v0.7.1 (2021-10-12) ## v0.7.1 (2021-10-12)
- implement casts from and to booleans in the backends - implement casts from and to booleans in the backends
## v0.7 (2021-10-07) ## v0.7 (2021-10-07)
@ -83,7 +355,7 @@
- option to emit point size - option to emit point size
- option to clamp output depth - option to clamp output depth
### v0.6.3 (2021-09-08) ## v0.6.3 (2021-09-08)
- Reduced heap allocations when generating WGSL, HLSL, and GLSL - Reduced heap allocations when generating WGSL, HLSL, and GLSL
- WGSL-in: - WGSL-in:
- support module-scope `let` type inference - support module-scope `let` type inference
@ -96,7 +368,7 @@
- SPV-out: - SPV-out:
- allow working around Adreno issue with `OpName` - allow working around Adreno issue with `OpName`
### v0.6.2 (2021-09-01) ## v0.6.2 (2021-09-01)
- SPV-out fixes: - SPV-out fixes:
- requested capabilities for 1D and cube images, storage formats - requested capabilities for 1D and cube images, storage formats
- handling `break` and `continue` in a `switch` statement - handling `break` and `continue` in a `switch` statement
@ -110,7 +382,7 @@
- GLSL-in fixes: - GLSL-in fixes:
- avoid infinite loop on invalid statements - avoid infinite loop on invalid statements
### v0.6.1 (2021-08-24) ## v0.6.1 (2021-08-24)
- HLSL-out fixes: - HLSL-out fixes:
- array arguments - array arguments
- pointers to array arguments - pointers to array arguments
@ -165,7 +437,7 @@
- multisampling on GLES - multisampling on GLES
- WGSL is vastly improved and now usable - WGSL is vastly improved and now usable
### v0.4.2 (2021-05-28) ## v0.4.2 (2021-05-28)
- SPIR-V frontend: - SPIR-V frontend:
- fix image stores - fix image stores
- fix matrix stride check - fix matrix stride check
@ -175,7 +447,7 @@
- support sample interpolation - support sample interpolation
- write out swizzled vector accesses - write out swizzled vector accesses
### v0.4.1 (2021-05-14) ## v0.4.1 (2021-05-14)
- numerous additions and improvements to SPIR-V frontend: - numerous additions and improvements to SPIR-V frontend:
- int8, in16, int64 - int8, in16, int64
- null constant initializers for structs and matrices - null constant initializers for structs and matrices
@ -218,7 +490,7 @@
- `convert` example is transformed into the default binary target named `naga` - `convert` example is transformed into the default binary target named `naga`
- lots of frontend and backend fixes - lots of frontend and backend fixes
### v0.3.2 (2021-02-15) ## v0.3.2 (2021-02-15)
- fix logical expression types - fix logical expression types
- fix _FragDepth_ semantics - fix _FragDepth_ semantics
- spv-in: - spv-in:
@ -227,7 +499,7 @@
- add lots of missing math functions - add lots of missing math functions
- implement discard - implement discard
### v0.3.1 (2021-01-31) ## v0.3.1 (2021-01-31)
- wgsl: - wgsl:
- support constant array sizes - support constant array sizes
- spv-out: - spv-out:

5
third_party/rust/naga/Cargo.toml поставляемый
Просмотреть файл

@ -67,7 +67,10 @@ unicode-xid = { version = "0.2.3", optional = true }
bincode = "1" bincode = "1"
criterion = { version = "0.3", features = [] } criterion = { version = "0.3", features = [] }
diff = "0.1" diff = "0.1"
ron = "0.7" # Require at least version 0.7.1 of ron, this version changed how floating points are
# serialized by forcing them to always have the decimal part, this makes it backwards
# incompatible with our tests because we do a syntatic diff and not a semantic one.
ron = "~0.7.1"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
spirv = { version = "0.2", features = ["deserialize"] } spirv = { version = "0.2", features = ["deserialize"] }
rspirv = "0.11" rspirv = "0.11"

42
third_party/rust/naga/Makefile поставляемый
Просмотреть файл

@ -1,4 +1,4 @@
.PHONY: all clean validate-spv validate-msl validate-glsl validate-dot validate-wgsl validate-hlsl .PHONY: all clean validate-spv validate-msl validate-glsl validate-dot validate-wgsl validate-hlsl-dxc validate-hlsl-fxc
.SECONDARY: boids.metal quad.metal .SECONDARY: boids.metal quad.metal
SNAPSHOTS_BASE_IN=tests/in SNAPSHOTS_BASE_IN=tests/in
SNAPSHOTS_BASE_OUT=tests/out SNAPSHOTS_BASE_OUT=tests/out
@ -69,10 +69,10 @@ validate-wgsl: $(SNAPSHOTS_BASE_OUT)/wgsl/*.wgsl
cargo run $${file}; \ cargo run $${file}; \
done done
validate-hlsl: SHELL:=/bin/bash # required because config files uses arrays validate-hlsl-dxc: SHELL:=/bin/bash # required because config files uses arrays
validate-hlsl: $(SNAPSHOTS_BASE_OUT)/hlsl/*.hlsl validate-hlsl-dxc: $(SNAPSHOTS_BASE_OUT)/hlsl/*.hlsl
@set -e && for file in $^ ; do \ @set -e && for file in $^ ; do \
DXC_PARAMS="-Wno-parentheses-equality -Zi -Qembed_debug"; \ DXC_PARAMS="-Wno-parentheses-equality -Zi -Qembed_debug -Od"; \
echo "Validating" $${file#"$(SNAPSHOTS_BASE_OUT)/"}; \ echo "Validating" $${file#"$(SNAPSHOTS_BASE_OUT)/"}; \
config="$$(dirname $${file})/$$(basename $${file}).config"; \ config="$$(dirname $${file})/$$(basename $${file}).config"; \
. $${config}; \ . $${config}; \
@ -93,3 +93,37 @@ validate-hlsl: $(SNAPSHOTS_BASE_OUT)/hlsl/*.hlsl
done; \ done; \
echo "======================"; \ echo "======================"; \
done done
validate-hlsl-fxc: SHELL:=/bin/bash # required because config files uses arrays
validate-hlsl-fxc: $(SNAPSHOTS_BASE_OUT)/hlsl/*.hlsl
@set -e && for file in $^ ; do \
FXC_PARAMS="-Zi -Od"; \
echo "Validating" $${file#"$(SNAPSHOTS_BASE_OUT)/"}; \
config="$$(dirname $${file})/$$(basename $${file}).config"; \
. $${config}; \
for (( i=0; i<$${#vertex[@]}; i++ )); do \
name=`echo $${vertex[i]} | cut -d \: -f 1`; \
profile=`echo $${vertex[i]} | cut -d \: -f 2`; \
sm=`echo $${profile} | cut -d \_ -f 2`; \
if (( sm < 6 )); then \
(set -x; fxc $${file} -T $${profile} -E $${name} $${FXC_PARAMS} > /dev/null); \
fi \
done; \
for (( i=0; i<$${#fragment[@]}; i++ )); do \
name=`echo $${fragment[i]} | cut -d \: -f 1`; \
profile=`echo $${fragment[i]} | cut -d \: -f 2`; \
sm=`echo $${profile} | cut -d \_ -f 2`; \
if (( sm < 6 )); then \
(set -x; fxc $${file} -T $${profile} -E $${name} $${FXC_PARAMS} > /dev/null); \
fi \
done; \
for (( i=0; i<$${#compute[@]}; i++ )); do \
name=`echo $${compute[i]} | cut -d \: -f 1`; \
profile=`echo $${compute[i]} | cut -d \: -f 2`; \
sm=`echo $${profile} | cut -d \_ -f 2`; \
if (( sm < 6 )); then \
(set -x; fxc $${file} -T $${profile} -E $${name} $${FXC_PARAMS} > /dev/null); \
fi \
done; \
echo "======================"; \
done

6
third_party/rust/naga/README.md поставляемый
Просмотреть файл

@ -15,7 +15,7 @@ Front-end | Status | Feature | Notes |
--------------- | ------------------ | ------- | ----- | --------------- | ------------------ | ------- | ----- |
SPIR-V (binary) | :white_check_mark: | spv-in | | SPIR-V (binary) | :white_check_mark: | spv-in | |
WGSL | :white_check_mark: | wgsl-in | Fully validated | WGSL | :white_check_mark: | wgsl-in | Fully validated |
GLSL | :ok: | glsl-in | GLSL 440+ | GLSL | :ok: | glsl-in | GLSL 440+ and Vulkan semantics only |
Back-end | Status | Feature | Notes | Back-end | Status | Feature | Notes |
--------------- | ------------------ | -------- | ----- | --------------- | ------------------ | -------- | ----- |
@ -81,5 +81,7 @@ make validate-msl # for Metal shaders, requires XCode command-line tools install
make validate-glsl # for OpenGL shaders, requires GLSLang installed make validate-glsl # for OpenGL shaders, requires GLSLang installed
make validate-dot # for dot files, requires GraphViz installed make validate-dot # for dot files, requires GraphViz installed
make validate-wgsl # for WGSL shaders make validate-wgsl # for WGSL shaders
make validate-hlsl # for HLSL shaders. Note: this Make target makes use of the "sh" shell. This is not the default shell in Windows. make validate-hlsl-dxc # for HLSL shaders via DXC
make validate-hlsl-fxc # for HLSL shaders via FXC
# Note: HLSL Make targets make use of the "sh" shell. This is not the default shell in Windows.
``` ```

1
third_party/rust/naga/benches/criterion.rs поставляемый
Просмотреть файл

@ -255,6 +255,7 @@ fn backends(c: &mut Criterion) {
info, info,
&options, &options,
&pipeline_options, &pipeline_options,
naga::proc::BoundsCheckPolicies::default(),
) { ) {
Ok(mut writer) => { Ok(mut writer) => {
let _ = writer.write(); // can error if unsupported let _ = writer.write(); // can error if unsupported

4
third_party/rust/naga/src/back/dot/mod.rs поставляемый
Просмотреть файл

@ -81,11 +81,15 @@ impl StatementGraph {
S::Loop { S::Loop {
ref body, ref body,
ref continuing, ref continuing,
break_if,
} => { } => {
let body_id = self.add(body); let body_id = self.add(body);
self.flow.push((id, body_id, "body")); self.flow.push((id, body_id, "body"));
let continuing_id = self.add(continuing); let continuing_id = self.add(continuing);
self.flow.push((body_id, continuing_id, "continuing")); self.flow.push((body_id, continuing_id, "continuing"));
if let Some(expr) = break_if {
self.dependencies.push((id, expr, "break if"));
}
"Loop" "Loop"
} }
S::Return { value } => { S::Return { value } => {

Просмотреть файл

@ -38,6 +38,10 @@ bitflags::bitflags! {
const FMA = 1 << 18; const FMA = 1 << 18;
/// Texture samples query /// Texture samples query
const TEXTURE_SAMPLES = 1 << 19; const TEXTURE_SAMPLES = 1 << 19;
/// Texture levels query
const TEXTURE_LEVELS = 1 << 20;
/// Image size query
const IMAGE_SIZE = 1 << 21;
} }
} }
@ -104,9 +108,11 @@ impl FeaturesManager {
check_feature!(DYNAMIC_ARRAY_SIZE, 430, 310); check_feature!(DYNAMIC_ARRAY_SIZE, 430, 310);
check_feature!(MULTI_VIEW, 140, 310); check_feature!(MULTI_VIEW, 140, 310);
// Only available on glsl core, this means that opengl es can't query the number // Only available on glsl core, this means that opengl es can't query the number
// of samples in a image and neither do bound checks on the sample argument // of samples nor levels in a image and neither do bound checks on the sample nor
// of texelFecth // the level argument of texelFecth
check_feature!(TEXTURE_SAMPLES, 150); check_feature!(TEXTURE_SAMPLES, 150);
check_feature!(TEXTURE_LEVELS, 130);
check_feature!(IMAGE_SIZE, 430, 310);
// Return an error if there are missing features // Return an error if there are missing features
if missing.is_empty() { if missing.is_empty() {
@ -223,6 +229,11 @@ impl FeaturesManager {
)?; )?;
} }
if self.0.contains(Features::TEXTURE_LEVELS) && version < Version::Desktop(430) {
// https://www.khronos.org/registry/OpenGL/extensions/ARB/ARB_texture_query_levels.txt
writeln!(out, "#extension GL_ARB_texture_query_levels : require")?;
}
Ok(()) Ok(())
} }
} }
@ -376,27 +387,75 @@ impl<'a, W> Writer<'a, W> {
} }
} }
// Loop trough all expressions in both functions and entry points // We will need to pass some of the members to a closure, so we need
// to separate them otherwise the borrow checker will complain, this
// shouldn't be needed in rust 2021
let &mut Self {
module,
info,
ref mut features,
entry_point,
entry_point_idx,
ref policies,
..
} = self;
// Loop trough all expressions in both functions and the entry point
// to check for needed features // to check for needed features
for (_, expr) in self for (expressions, info) in module
.module
.functions .functions
.iter() .iter()
.flat_map(|(_, f)| f.expressions.iter()) .map(|(h, f)| (&f.expressions, &info[h]))
.chain(self.entry_point.function.expressions.iter()) .chain(std::iter::once((
&entry_point.function.expressions,
info.get_entry_point(entry_point_idx as usize),
)))
{ {
match *expr { for (_, expr) in expressions.iter() {
match *expr {
// Check for fused multiply add use // Check for fused multiply add use
Expression::Math { fun, .. } if fun == MathFunction::Fma => { Expression::Math { fun, .. } if fun == MathFunction::Fma => {
self.features.request(Features::FMA) features.request(Features::FMA)
} }
// Check for samples query // Check for queries that neeed aditonal features
Expression::ImageQuery { Expression::ImageQuery {
query: crate::ImageQuery::NumSamples, image,
query,
.. ..
} => self.features.request(Features::TEXTURE_SAMPLES), } => match query {
// Storage images use `imageSize` which is only available
// in glsl > 420
//
// layers queries are also implemented as size queries
crate::ImageQuery::Size { .. } | crate::ImageQuery::NumLayers => {
if let TypeInner::Image {
class: crate::ImageClass::Storage { .. }, ..
} = *info[image].ty.inner_with(&module.types) {
features.request(Features::IMAGE_SIZE)
}
},
crate::ImageQuery::NumLevels => features.request(Features::TEXTURE_LEVELS),
crate::ImageQuery::NumSamples => features.request(Features::TEXTURE_SAMPLES),
}
,
// Check for image loads that needs bound checking on the sample
// or level argument since this requires a feature
Expression::ImageLoad {
sample, level, ..
} => {
if policies.image != crate::proc::BoundsCheckPolicy::Unchecked {
if sample.is_some() {
features.request(Features::TEXTURE_SAMPLES)
}
if level.is_some() {
features.request(Features::TEXTURE_LEVELS)
}
}
}
_ => {} _ => {}
} }
}
} }
self.features.check_availability(self.options.version) self.features.check_availability(self.options.version)

665
third_party/rust/naga/src/back/glsl/mod.rs поставляемый
Просмотреть файл

@ -12,7 +12,6 @@ to output a [`Module`](crate::Module) into glsl
- 420 - 420
- 430 - 430
- 450 - 450
- 460
### ES ### ES
- 300 - 300
@ -69,6 +68,10 @@ pub const SUPPORTED_CORE_VERSIONS: &[u16] = &[330, 400, 410, 420, 430, 440, 450]
/// List of supported `es` GLSL versions. /// List of supported `es` GLSL versions.
pub const SUPPORTED_ES_VERSIONS: &[u16] = &[300, 310, 320]; pub const SUPPORTED_ES_VERSIONS: &[u16] = &[300, 310, 320];
/// The suffix of the variable that will hold the calculated clamped level
/// of detail for bounds checking in `ImageLoad`
const CLAMPED_LOD_SUFFIX: &str = "_clamped_lod";
/// Mapping between resources and bindings. /// Mapping between resources and bindings.
pub type BindingMap = std::collections::BTreeMap<crate::ResourceBinding, u8>; pub type BindingMap = std::collections::BTreeMap<crate::ResourceBinding, u8>;
@ -375,6 +378,8 @@ pub struct Writer<'a, W> {
out: W, out: W,
/// User defined configuration to be used. /// User defined configuration to be used.
options: &'a Options, options: &'a Options,
/// The bound checking policies to be used
policies: proc::BoundsCheckPolicies,
// Internal State // Internal State
/// Features manager used to store all the needed features and write them. /// Features manager used to store all the needed features and write them.
@ -410,6 +415,7 @@ impl<'a, W: Write> Writer<'a, W> {
info: &'a valid::ModuleInfo, info: &'a valid::ModuleInfo,
options: &'a Options, options: &'a Options,
pipeline_options: &'a PipelineOptions, pipeline_options: &'a PipelineOptions,
policies: proc::BoundsCheckPolicies,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
// Check if the requested version is supported // Check if the requested version is supported
if !options.version.is_supported() { if !options.version.is_supported() {
@ -437,6 +443,8 @@ impl<'a, W: Write> Writer<'a, W> {
info, info,
out, out,
options, options,
policies,
namer, namer,
features: FeaturesManager::new(), features: FeaturesManager::new(),
names, names,
@ -1518,20 +1526,30 @@ impl<'a, W: Write> Writer<'a, W> {
arg: Handle<crate::Expression>, arg: Handle<crate::Expression>,
arg1: Handle<crate::Expression>, arg1: Handle<crate::Expression>,
size: usize, size: usize,
ctx: &back::FunctionCtx<'_>,
) -> BackendResult { ) -> BackendResult {
// Write parantheses around the dot product expression to prevent operators
// with different precedences from applying earlier.
write!(self.out, "(")?; write!(self.out, "(")?;
let arg0_name = &self.named_expressions[&arg]; // Cycle trough all the components of the vector
let arg1_name = &self.named_expressions[&arg1];
// This will print an extra '+' at the beginning but that is fine in glsl
for index in 0..size { for index in 0..size {
let component = back::COMPONENTS[index]; let component = back::COMPONENTS[index];
write!( // Write the addition to the previous product
self.out, // This will print an extra '+' at the beginning but that is fine in glsl
" + {}.{} * {}.{}", write!(self.out, " + ")?;
arg0_name, component, arg1_name, component // Write the first vector expression, this expression is marked to be
)?; // cached so unless it can't be cached (for example, it's a Constant)
// it shouldn't produce large expressions.
self.write_expr(arg, ctx)?;
// Access the current component on the first vector
write!(self.out, ".{} * ", component)?;
// Write the second vector expression, this expression is marked to be
// cached so unless it can't be cached (for example, it's a Constant)
// it shouldn't produce large expressions.
self.write_expr(arg1, ctx)?;
// Access the current component on the second vector
write!(self.out, ".{}", component)?;
} }
write!(self.out, ")")?; write!(self.out, ")")?;
@ -1635,6 +1653,27 @@ impl<'a, W: Write> Writer<'a, W> {
None None
}; };
// If we are going to write an `ImageLoad` next and the target image
// is sampled and we are using the `Restrict` policy for bounds
// checking images we need to write a local holding the clamped lod.
if let crate::Expression::ImageLoad {
image,
level: Some(level_expr),
..
} = ctx.expressions[handle]
{
if let TypeInner::Image {
class: crate::ImageClass::Sampled { .. },
..
} = *ctx.info[image].ty.inner_with(&self.module.types)
{
if let proc::BoundsCheckPolicy::Restrict = self.policies.image {
write!(self.out, "{}", level)?;
self.write_clamped_lod(ctx, handle, image, level_expr)?
}
}
}
if let Some(name) = expr_name { if let Some(name) = expr_name {
write!(self.out, "{}", level)?; write!(self.out, "{}", level)?;
self.write_named_expr(handle, name, ctx)?; self.write_named_expr(handle, name, ctx)?;
@ -1761,16 +1800,26 @@ impl<'a, W: Write> Writer<'a, W> {
Statement::Loop { Statement::Loop {
ref body, ref body,
ref continuing, ref continuing,
break_if,
} => { } => {
if !continuing.is_empty() { if !continuing.is_empty() || break_if.is_some() {
let gate_name = self.namer.call("loop_init"); let gate_name = self.namer.call("loop_init");
writeln!(self.out, "{}bool {} = true;", level, gate_name)?; writeln!(self.out, "{}bool {} = true;", level, gate_name)?;
writeln!(self.out, "{}while(true) {{", level)?; writeln!(self.out, "{}while(true) {{", level)?;
writeln!(self.out, "{}if (!{}) {{", level.next(), gate_name)?; let l2 = level.next();
let l3 = l2.next();
writeln!(self.out, "{}if (!{}) {{", l2, gate_name)?;
for sta in continuing { for sta in continuing {
self.write_stmt(sta, ctx, level.next())?; self.write_stmt(sta, ctx, l3)?;
} }
writeln!(self.out, "{}}}", level.next())?; if let Some(condition) = break_if {
write!(self.out, "{}if (", l3)?;
self.write_expr(condition, ctx)?;
writeln!(self.out, ") {{")?;
writeln!(self.out, "{}break;", l3.next())?;
writeln!(self.out, "{}}}", l3)?;
}
writeln!(self.out, "{}}}", l2)?;
writeln!(self.out, "{}{} = false;", level.next(), gate_name)?; writeln!(self.out, "{}{} = false;", level.next(), gate_name)?;
} else { } else {
writeln!(self.out, "{}while(true) {{", level)?; writeln!(self.out, "{}while(true) {{", level)?;
@ -1933,19 +1982,7 @@ impl<'a, W: Write> Writer<'a, W> {
value, value,
} => { } => {
write!(self.out, "{}", level)?; write!(self.out, "{}", level)?;
// This will only panic if the module is invalid self.write_image_store(ctx, image, coordinate, array_index, value)?
let dim = match *ctx.info[image].ty.inner_with(&self.module.types) {
TypeInner::Image { dim, .. } => dim,
_ => unreachable!(),
};
write!(self.out, "imageStore(")?;
self.write_expr(image, ctx)?;
write!(self.out, ", ")?;
self.write_texture_coordinates(coordinate, array_index, dim, ctx)?;
write!(self.out, ", ")?;
self.write_expr(value, ctx)?;
writeln!(self.out, ");")?;
} }
// A `Call` is written `name(arguments)` where `arguments` is a comma separated expressions list // A `Call` is written `name(arguments)` where `arguments` is a comma separated expressions list
Statement::Call { Statement::Call {
@ -2320,51 +2357,13 @@ impl<'a, W: Write> Writer<'a, W> {
// End the function // End the function
write!(self.out, ")")? write!(self.out, ")")?
} }
// `ImageLoad` is also a bit complicated.
// There are two functions one for sampled
// images another for storage images, the former uses `texelFetch` and the latter uses
// `imageLoad`.
// Furthermore we have `index` which is always `Some` for sampled images
// and `None` for storage images, so we end up with two functions:
// `texelFetch(image, coordinate, index)` - for sampled images
// `imageLoad(image, coordinate)` - for storage images
Expression::ImageLoad { Expression::ImageLoad {
image, image,
coordinate, coordinate,
array_index, array_index,
sample, sample,
level, level,
} => { } => self.write_image_load(expr, ctx, image, coordinate, array_index, sample, level)?,
// This will only panic if the module is invalid
let (dim, class) = match *ctx.info[image].ty.inner_with(&self.module.types) {
TypeInner::Image {
dim,
arrayed: _,
class,
} => (dim, class),
_ => unreachable!(),
};
let fun_name = match class {
crate::ImageClass::Sampled { .. } => "texelFetch",
crate::ImageClass::Storage { .. } => "imageLoad",
// TODO: Is there even a function for this?
crate::ImageClass::Depth { multi: _ } => {
return Err(Error::Custom("TODO: depth sample loads".to_string()))
}
};
write!(self.out, "{}(", fun_name)?;
self.write_expr(image, ctx)?;
write!(self.out, ", ")?;
self.write_texture_coordinates(coordinate, array_index, dim, ctx)?;
if let Some(sample_or_level) = sample.or(level) {
write!(self.out, ", ")?;
self.write_expr(sample_or_level, ctx)?;
}
write!(self.out, ")")?;
}
// Query translates into one of the: // Query translates into one of the:
// - textureSize/imageSize // - textureSize/imageSize
// - textureQueryLevels // - textureQueryLevels
@ -2747,7 +2746,7 @@ impl<'a, W: Write> Writer<'a, W> {
.. ..
} => "dot", } => "dot",
crate::TypeInner::Vector { size, .. } => { crate::TypeInner::Vector { size, .. } => {
return self.write_dot_product(arg, arg1.unwrap(), size as usize) return self.write_dot_product(arg, arg1.unwrap(), size as usize, ctx)
} }
_ => unreachable!( _ => unreachable!(
"Correct TypeInner for dot product should be already validated" "Correct TypeInner for dot product should be already validated"
@ -2819,32 +2818,59 @@ impl<'a, W: Write> Writer<'a, W> {
let extract_bits = fun == Mf::ExtractBits; let extract_bits = fun == Mf::ExtractBits;
let insert_bits = fun == Mf::InsertBits; let insert_bits = fun == Mf::InsertBits;
// we might need to cast to unsigned integers since // Some GLSL functions always return signed integers (like findMSB),
// GLSL's findLSB / findMSB always return signed integers // so they need to be cast to uint if the argument is also an uint.
let need_extra_paren = { let ret_might_need_int_to_uint =
(fun == Mf::FindLsb || fun == Mf::FindMsb || fun == Mf::CountOneBits) matches!(fun, Mf::FindLsb | Mf::FindMsb | Mf::CountOneBits | Mf::Abs);
&& match *ctx.info[arg].ty.inner_with(&self.module.types) {
crate::TypeInner::Scalar { // Some GLSL functions only accept signed integers (like abs),
kind: crate::ScalarKind::Uint, // so they need their argument cast from uint to int.
.. let arg_might_need_uint_to_int = matches!(fun, Mf::Abs);
} => {
write!(self.out, "uint(")?; // Check if the argument is an unsigned integer and return the vector size
true // in case it's a vector
} let maybe_uint_size = match *ctx.info[arg].ty.inner_with(&self.module.types) {
crate::TypeInner::Vector { crate::TypeInner::Scalar {
kind: crate::ScalarKind::Uint, kind: crate::ScalarKind::Uint,
size, ..
.. } => Some(None),
} => { crate::TypeInner::Vector {
write!(self.out, "uvec{}(", size as u8)?; kind: crate::ScalarKind::Uint,
true size,
} ..
_ => false, } => Some(Some(size)),
} _ => None,
}; };
// Cast to uint if the function needs it
if ret_might_need_int_to_uint {
if let Some(maybe_size) = maybe_uint_size {
match maybe_size {
Some(size) => write!(self.out, "uvec{}(", size as u8)?,
None => write!(self.out, "uint(")?,
}
}
}
write!(self.out, "{}(", fun_name)?; write!(self.out, "{}(", fun_name)?;
// Cast to int if the function needs it
if arg_might_need_uint_to_int {
if let Some(maybe_size) = maybe_uint_size {
match maybe_size {
Some(size) => write!(self.out, "ivec{}(", size as u8)?,
None => write!(self.out, "int(")?,
}
}
}
self.write_expr(arg, ctx)?; self.write_expr(arg, ctx)?;
// Close the cast from uint to int
if arg_might_need_uint_to_int && maybe_uint_size.is_some() {
write!(self.out, ")")?
}
if let Some(arg) = arg1 { if let Some(arg) = arg1 {
write!(self.out, ", ")?; write!(self.out, ", ")?;
if extract_bits { if extract_bits {
@ -2877,7 +2903,8 @@ impl<'a, W: Write> Writer<'a, W> {
} }
write!(self.out, ")")?; write!(self.out, ")")?;
if need_extra_paren { // Close the cast from int to uint
if ret_might_need_int_to_uint && maybe_uint_size.is_some() {
write!(self.out, ")")? write!(self.out, ")")?
} }
} }
@ -2913,38 +2940,50 @@ impl<'a, W: Write> Writer<'a, W> {
None => { None => {
use crate::ScalarKind as Sk; use crate::ScalarKind as Sk;
let source_kind = inner.scalar_kind().unwrap(); let target_vector_type = match *inner {
let conv_op = match (source_kind, target_kind) { TypeInner::Vector { size, width, .. } => Some(TypeInner::Vector {
(Sk::Float, Sk::Sint) => "floatBitsToInt", size,
(Sk::Float, Sk::Uint) => "floatBitsToUint", width,
(Sk::Sint, Sk::Float) => "intBitsToFloat", kind: target_kind,
(Sk::Uint, Sk::Float) => "uintBitsToFloat", }),
// There is no way to bitcast between Uint/Sint in glsl. Use constructor conversion _ => None,
(Sk::Uint, Sk::Sint) => "int",
(Sk::Sint, Sk::Uint) => "uint",
(Sk::Bool, Sk::Sint) => "int",
(Sk::Bool, Sk::Uint) => "uint",
(Sk::Bool, Sk::Float) => "float",
(Sk::Sint, Sk::Bool) => "bool",
(Sk::Uint, Sk::Bool) => "bool",
(Sk::Float, Sk::Bool) => "bool",
// No conversion needed
(Sk::Sint, Sk::Sint) => "",
(Sk::Uint, Sk::Uint) => "",
(Sk::Float, Sk::Float) => "",
(Sk::Bool, Sk::Bool) => "",
}; };
write!(self.out, "{}", conv_op)?;
if !conv_op.is_empty() { let source_kind = inner.scalar_kind().unwrap();
write!(self.out, "(")?;
} match (source_kind, target_kind, target_vector_type) {
// No conversion needed
(Sk::Sint, Sk::Sint, _)
| (Sk::Uint, Sk::Uint, _)
| (Sk::Float, Sk::Float, _)
| (Sk::Bool, Sk::Bool, _) => {
self.write_expr(expr, ctx)?;
return Ok(());
}
// Cast to/from floats
(Sk::Float, Sk::Sint, _) => write!(self.out, "floatBitsToInt")?,
(Sk::Float, Sk::Uint, _) => write!(self.out, "floatBitsToUint")?,
(Sk::Sint, Sk::Float, _) => write!(self.out, "intBitsToFloat")?,
(Sk::Uint, Sk::Float, _) => write!(self.out, "uintBitsToFloat")?,
// Cast between vector types
(_, _, Some(vector)) => {
self.write_value_type(&vector)?;
}
// There is no way to bitcast between Uint/Sint in glsl. Use constructor conversion
(Sk::Uint | Sk::Bool, Sk::Sint, None) => write!(self.out, "int")?,
(Sk::Sint | Sk::Bool, Sk::Uint, None) => write!(self.out, "uint")?,
(Sk::Bool, Sk::Float, None) => write!(self.out, "float")?,
(Sk::Sint | Sk::Uint | Sk::Float, Sk::Bool, None) => {
write!(self.out, "bool")?
}
};
write!(self.out, "(")?;
self.write_expr(expr, ctx)?; self.write_expr(expr, ctx)?;
if !conv_op.is_empty() { write!(self.out, ")")?;
write!(self.out, ")")?
}
} }
} }
} }
@ -2961,25 +3000,71 @@ impl<'a, W: Write> Writer<'a, W> {
Ok(()) Ok(())
} }
fn write_texture_coordinates( /// Helper function to write the local holding the clamped lod
fn write_clamped_lod(
&mut self, &mut self,
ctx: &back::FunctionCtx,
expr: Handle<crate::Expression>,
image: Handle<crate::Expression>,
level_expr: Handle<crate::Expression>,
) -> Result<(), Error> {
// Define our local and start a call to `clamp`
write!(
self.out,
"int {}{}{} = clamp(",
back::BAKE_PREFIX,
expr.index(),
CLAMPED_LOD_SUFFIX
)?;
// Write the lod that will be clamped
self.write_expr(level_expr, ctx)?;
// Set the min value to 0 and start a call to `textureQueryLevels` to get
// the maximum value
write!(self.out, ", 0, textureQueryLevels(")?;
// Write the target image as an argument to `textureQueryLevels`
self.write_expr(image, ctx)?;
// Close the call to `textureQueryLevels` subtract 1 from it since
// the lod argument is 0 based, close the `clamp` call and end the
// local declaration statement.
writeln!(self.out, ") - 1);")?;
Ok(())
}
// Helper method used to retrieve how many elements a coordinate vector
// for the images operations need.
fn get_coordinate_vector_size(&self, dim: crate::ImageDimension, arrayed: bool) -> u8 {
// openGL es doesn't have 1D images so we need workaround it
let tex_1d_hack = dim == crate::ImageDimension::D1 && self.options.version.is_es();
// Get how many components the coordinate vector needs for the dimensions only
let tex_coord_size = match dim {
crate::ImageDimension::D1 => 1,
crate::ImageDimension::D2 => 2,
crate::ImageDimension::D3 => 3,
crate::ImageDimension::Cube => 2,
};
// Calculate the true size of the coordinate vector by adding 1 for arrayed images
// and another 1 if we need to workaround 1D images by making them 2D
tex_coord_size + tex_1d_hack as u8 + arrayed as u8
}
/// Helper method to write the coordinate vector for image operations
fn write_texture_coord(
&mut self,
ctx: &back::FunctionCtx,
vector_size: u8,
coordinate: Handle<crate::Expression>, coordinate: Handle<crate::Expression>,
array_index: Option<Handle<crate::Expression>>, array_index: Option<Handle<crate::Expression>>,
dim: crate::ImageDimension, // Emulate 1D images as 2D for profiles that don't support it (glsl es)
ctx: &back::FunctionCtx, tex_1d_hack: bool,
) -> Result<(), Error> { ) -> Result<(), Error> {
use crate::ImageDimension as IDim;
let tex_1d_hack = dim == IDim::D1 && self.options.version.is_es();
match array_index { match array_index {
// If the image needs an array indice we need to add it to the end of our
// coordinate vector, to do so we will use the `ivec(ivec, scalar)`
// constructor notation (NOTE: the inner `ivec` can also be a scalar, this
// is important for 1D arrayed images).
Some(layer_expr) => { Some(layer_expr) => {
let tex_coord_size = match dim { write!(self.out, "ivec{}(", vector_size)?;
IDim::D1 => 2,
IDim::D2 => 3,
IDim::D3 => 4,
IDim::Cube => 4,
};
write!(self.out, "ivec{}(", tex_coord_size + tex_1d_hack as u8)?;
self.write_expr(coordinate, ctx)?; self.write_expr(coordinate, ctx)?;
write!(self.out, ", ")?; write!(self.out, ", ")?;
// If we are replacing sampler1D with sampler2D we also need // If we are replacing sampler1D with sampler2D we also need
@ -2990,16 +3075,326 @@ impl<'a, W: Write> Writer<'a, W> {
self.write_expr(layer_expr, ctx)?; self.write_expr(layer_expr, ctx)?;
write!(self.out, ")")?; write!(self.out, ")")?;
} }
// Otherwise write just the expression (and the 1D hack if needed)
None => { None => {
if tex_1d_hack { if tex_1d_hack {
write!(self.out, "ivec2(")?; write!(self.out, "ivec2(")?;
} }
self.write_expr(coordinate, ctx)?; self.write_expr(coordinate, ctx)?;
if tex_1d_hack { if tex_1d_hack {
write!(self.out, ", 0.0)")?; write!(self.out, ", 0)")?;
} }
} }
} }
Ok(())
}
/// Helper method to write the `ImageStore` statement
fn write_image_store(
&mut self,
ctx: &back::FunctionCtx,
image: Handle<crate::Expression>,
coordinate: Handle<crate::Expression>,
array_index: Option<Handle<crate::Expression>>,
value: Handle<crate::Expression>,
) -> Result<(), Error> {
use crate::ImageDimension as IDim;
// NOTE: openGL requires that `imageStore`s have no effets when the texel is invalid
// so we don't need to generate bounds checks (OpenGL 4.2 Core §3.9.20)
// This will only panic if the module is invalid
let dim = match *ctx.info[image].ty.inner_with(&self.module.types) {
TypeInner::Image { dim, .. } => dim,
_ => unreachable!(),
};
// Begin our call to `imageStore`
write!(self.out, "imageStore(")?;
self.write_expr(image, ctx)?;
// Separate the image argument from the coordinates
write!(self.out, ", ")?;
// openGL es doesn't have 1D images so we need workaround it
let tex_1d_hack = dim == IDim::D1 && self.options.version.is_es();
// Write the coordinate vector
self.write_texture_coord(
ctx,
// Get the size of the coordinate vector
self.get_coordinate_vector_size(dim, array_index.is_some()),
coordinate,
array_index,
tex_1d_hack,
)?;
// Separate the coordinate from the value to write and write the expression
// of the value to write.
write!(self.out, ", ")?;
self.write_expr(value, ctx)?;
// End the call to `imageStore` and the statement.
writeln!(self.out, ");")?;
Ok(())
}
/// Helper method for writing an `ImageLoad` expression.
#[allow(clippy::too_many_arguments)]
fn write_image_load(
&mut self,
handle: Handle<crate::Expression>,
ctx: &back::FunctionCtx,
image: Handle<crate::Expression>,
coordinate: Handle<crate::Expression>,
array_index: Option<Handle<crate::Expression>>,
sample: Option<Handle<crate::Expression>>,
level: Option<Handle<crate::Expression>>,
) -> Result<(), Error> {
use crate::ImageDimension as IDim;
// `ImageLoad` is a bit complicated.
// There are two functions one for sampled
// images another for storage images, the former uses `texelFetch` and the
// latter uses `imageLoad`.
//
// Furthermore we have `level` which is always `Some` for sampled images
// and `None` for storage images, so we end up with two functions:
// - `texelFetch(image, coordinate, level)` for sampled images
// - `imageLoad(image, coordinate)` for storage images
//
// Finally we also have to consider bounds checking, for storage images
// this is easy since openGL requires that invalid texels always return
// 0, for sampled images we need to either verify that all arguments are
// in bounds (`ReadZeroSkipWrite`) or make them a valid texel (`Restrict`).
// This will only panic if the module is invalid
let (dim, class) = match *ctx.info[image].ty.inner_with(&self.module.types) {
TypeInner::Image {
dim,
arrayed: _,
class,
} => (dim, class),
_ => unreachable!(),
};
// Get the name of the function to be used for the load operation
// and the policy to be used with it.
let (fun_name, policy) = match class {
// Sampled images inherit the policy from the user passed policies
crate::ImageClass::Sampled { .. } => ("texelFetch", self.policies.image),
crate::ImageClass::Storage { .. } => {
// OpenGL 4.2 Core §3.9.20 defines that out of bounds texels in `imageLoad`s
// always return zero values so we don't need to generate bounds checks
("imageLoad", proc::BoundsCheckPolicy::Unchecked)
}
// TODO: Is there even a function for this?
crate::ImageClass::Depth { multi: _ } => {
return Err(Error::Custom(
"WGSL `textureLoad` from depth textures is not supported in GLSL".to_string(),
))
}
};
// openGL es doesn't have 1D images so we need workaround it
let tex_1d_hack = dim == IDim::D1 && self.options.version.is_es();
// Get the size of the coordinate vector
let vector_size = self.get_coordinate_vector_size(dim, array_index.is_some());
if let proc::BoundsCheckPolicy::ReadZeroSkipWrite = policy {
// To write the bounds checks for `ReadZeroSkipWrite` we will use a
// ternary operator since we are in the middle of an expression and
// need to return a value.
//
// NOTE: glsl does short circuit when evaluating logical
// expressions so we can be sure that after we test a
// condition it will be true for the next ones
// Write parantheses around the ternary operator to prevent problems with
// expressions emitted before or after it having more precedence
write!(self.out, "(",)?;
// The lod check needs to precede the size check since we need
// to use the lod to get the size of the image at that level.
if let Some(level_expr) = level {
self.write_expr(level_expr, ctx)?;
write!(self.out, " < textureQueryLevels(",)?;
self.write_expr(image, ctx)?;
// Chain the next check
write!(self.out, ") && ")?;
}
// Check that the sample arguments doesn't exceed the number of samples
if let Some(sample_expr) = sample {
self.write_expr(sample_expr, ctx)?;
write!(self.out, " < textureSamples(",)?;
self.write_expr(image, ctx)?;
// Chain the next check
write!(self.out, ") && ")?;
}
// We now need to write the size checks for the coordinates and array index
// first we write the comparation function in case the image is 1D non arrayed
// (and no 1D to 2D hack was needed) we are comparing scalars so the less than
// operator will suffice, but otherwise we'll be comparing two vectors so we'll
// need to use the `lessThan` function but it returns a vector of booleans (one
// for each comparison) so we need to fold it all in one scalar boolean, since
// we want all comparisons to pass we use the `all` function which will only
// return `true` if all the elements of the boolean vector are also `true`.
//
// So we'll end with one of the following forms
// - `coord < textureSize(image, lod)` for 1D images
// - `all(lessThan(coord, textureSize(image, lod)))` for normal images
// - `all(lessThan(ivec(coord, array_index), textureSize(image, lod)))`
// for arrayed images
// - `all(lessThan(coord, textureSize(image)))` for multi sampled images
if vector_size != 1 {
write!(self.out, "all(lessThan(")?;
}
// Write the coordinate vector
self.write_texture_coord(ctx, vector_size, coordinate, array_index, tex_1d_hack)?;
if vector_size != 1 {
// If we used the `lessThan` function we need to separate the
// coordinates from the image size.
write!(self.out, ", ")?;
} else {
// If we didn't use it (ie. 1D images) we perform the comparsion
// using the less than operator.
write!(self.out, " < ")?;
}
// Call `textureSize` to get our image size
write!(self.out, "textureSize(")?;
self.write_expr(image, ctx)?;
// `textureSize` uses the lod as a second argument for mipmapped images
if let Some(level_expr) = level {
// Separate the image from the lod
write!(self.out, ", ")?;
self.write_expr(level_expr, ctx)?;
}
// Close the `textureSize` call
write!(self.out, ")")?;
if vector_size != 1 {
// Close the `all` and `lessThan` calls
write!(self.out, "))")?;
}
// Finally end the condition part of the ternary operator
write!(self.out, " ? ")?;
}
// Begin the call to the function used to load the texel
write!(self.out, "{}(", fun_name)?;
self.write_expr(image, ctx)?;
write!(self.out, ", ")?;
// If we are using `Restrict` bounds checking we need to pass valid texel
// coordinates, to do so we use the `clamp` function to get a value between
// 0 and the image size - 1 (indexing begins at 0)
if let proc::BoundsCheckPolicy::Restrict = policy {
write!(self.out, "clamp(")?;
}
// Write the coordinate vector
self.write_texture_coord(ctx, vector_size, coordinate, array_index, tex_1d_hack)?;
// If we are using `Restrict` bounds checking we need to write the rest of the
// clamp we initiated before writing the coordinates.
if let proc::BoundsCheckPolicy::Restrict = policy {
// Write the min value 0
if vector_size == 1 {
write!(self.out, ", 0")?;
} else {
write!(self.out, ", ivec{}(0)", vector_size)?;
}
// Start the `textureSize` call to use as the max value.
write!(self.out, ", textureSize(")?;
self.write_expr(image, ctx)?;
// If the image is mipmapped we need to add the lod argument to the
// `textureSize` call, but this needs to be the clamped lod, this should
// have been generated earlier and put in a local.
if class.is_mipmapped() {
write!(
self.out,
", {}{}{}",
back::BAKE_PREFIX,
handle.index(),
CLAMPED_LOD_SUFFIX
)?;
}
// Close the `textureSize` call
write!(self.out, ")")?;
// Subtract 1 from the `textureSize` call since the coordinates are zero based.
if vector_size == 1 {
write!(self.out, " - 1")?;
} else {
write!(self.out, " - ivec{}(1)", vector_size)?;
}
// Close the `clamp` call
write!(self.out, ")")?;
// Add the clamped lod (if present) as the second argument to the
// image load function.
if level.is_some() {
write!(
self.out,
", {}{}{}",
back::BAKE_PREFIX,
handle.index(),
CLAMPED_LOD_SUFFIX
)?;
}
// If a sample argument is needed we need to clamp it between 0 and
// the number of samples the image has.
if let Some(sample_expr) = sample {
write!(self.out, ", clamp(")?;
self.write_expr(sample_expr, ctx)?;
// Set the min value to 0 and start the call to `textureSamples`
write!(self.out, ", 0, textureSamples(")?;
self.write_expr(image, ctx)?;
// Close the `textureSamples` call, subtract 1 from it since the sample
// argument is zero based, and close the `clamp` call
writeln!(self.out, ") - 1)")?;
}
} else if let Some(sample_or_level) = sample.or(level) {
// If no bounds checking is need just add the sample or level argument
// after the coordinates
write!(self.out, ", ")?;
self.write_expr(sample_or_level, ctx)?;
}
// Close the image load function.
write!(self.out, ")")?;
// If we were using the `ReadZeroSkipWrite` policy we need to end the first branch
// (which is taken if the condition is `true`) with a colon (`:`) and write the
// second branch which is just a 0 value.
if let proc::BoundsCheckPolicy::ReadZeroSkipWrite = policy {
// Get the kind of the output value.
let kind = match class {
// Only sampled images can reach here since storage images
// don't need bounds checks and depth images aren't implmented
crate::ImageClass::Sampled { kind, .. } => kind,
_ => unreachable!(),
};
// End the first branch
write!(self.out, " : ")?;
// Write the 0 value
write!(self.out, "{}vec4(", glsl_scalar(kind, 4)?.prefix,)?;
self.write_zero_init_scalar(kind)?;
// Close the zero value constructor
write!(self.out, ")")?;
// Close the parantheses surrounding our ternary
write!(self.out, ")")?;
}
Ok(()) Ok(())
} }
@ -3347,7 +3742,7 @@ const fn glsl_storage_format(format: crate::StorageFormat) -> &'static str {
Sf::Rg16Uint => "rg16ui", Sf::Rg16Uint => "rg16ui",
Sf::Rg16Sint => "rg16i", Sf::Rg16Sint => "rg16i",
Sf::Rg16Float => "rg16f", Sf::Rg16Float => "rg16f",
Sf::Rgba8Unorm => "rgba8ui", Sf::Rgba8Unorm => "rgba8",
Sf::Rgba8Snorm => "rgba8_snorm", Sf::Rgba8Snorm => "rgba8_snorm",
Sf::Rgba8Uint => "rgba8ui", Sf::Rgba8Uint => "rgba8ui",
Sf::Rgba8Sint => "rgba8i", Sf::Rgba8Sint => "rgba8i",

5
third_party/rust/naga/src/back/hlsl/conv.rs поставляемый
Просмотреть файл

@ -1,5 +1,7 @@
use std::borrow::Cow; use std::borrow::Cow;
use crate::proc::Alignment;
use super::Error; use super::Error;
impl crate::ScalarKind { impl crate::ScalarKind {
@ -49,8 +51,7 @@ impl crate::TypeInner {
rows, rows,
width, width,
} => { } => {
let aligned_rows = if rows > crate::VectorSize::Bi { 4 } else { 2 }; let stride = Alignment::from(rows) * width as u32;
let stride = aligned_rows * width as u32;
let last_row_size = rows as u32 * width as u32; let last_row_size = rows as u32 * width as u32;
((columns as u32 - 1) * stride) + last_row_size ((columns as u32 - 1) * stride) + last_row_size
} }

158
third_party/rust/naga/src/back/hlsl/help.rs поставляемый
Просмотреть файл

@ -54,6 +54,11 @@ pub(super) struct WrappedStructMatrixAccess {
pub(super) index: u32, pub(super) index: u32,
} }
#[derive(Clone, Copy, Debug, Hash, Eq, Ord, PartialEq, PartialOrd)]
pub(super) struct WrappedMatCx2 {
pub(super) columns: crate::VectorSize,
}
/// HLSL backend requires its own `ImageQuery` enum. /// HLSL backend requires its own `ImageQuery` enum.
/// ///
/// It is used inside `WrappedImageQuery` and should be unique per ImageQuery function. /// It is used inside `WrappedImageQuery` and should be unique per ImageQuery function.
@ -461,12 +466,36 @@ impl<'a, W: Write> super::Writer<'a, W> {
)?; )?;
} }
} }
_ => { ref other => {
writeln!( // We cast arrays of native HLSL `floatCx2`s to arrays of `matCx2`s
self.out, // (where the inner matrix is represented by a struct with C `float2` members).
"{}{}.{} = {}{};", // See the module-level block comment in mod.rs for details.
INDENT, RETURN_VARIABLE_NAME, field_name, ARGUMENT_VARIABLE_NAME, i, if let Some(super::writer::MatrixType {
)?; columns,
rows: crate::VectorSize::Bi,
width: 4,
}) = super::writer::get_inner_matrix_data(module, member.ty)
{
write!(
self.out,
"{}{}.{} = (__mat{}x2",
INDENT, RETURN_VARIABLE_NAME, field_name, columns as u8
)?;
if let crate::TypeInner::Array { base, size, .. } = *other {
self.write_array_size(module, base, size)?;
}
writeln!(self.out, "){}{};", ARGUMENT_VARIABLE_NAME, i,)?;
} else {
writeln!(
self.out,
"{}{}.{} = {}{};",
INDENT,
RETURN_VARIABLE_NAME,
field_name,
ARGUMENT_VARIABLE_NAME,
i,
)?;
}
} }
} }
} }
@ -715,7 +744,7 @@ impl<'a, W: Write> super::Writer<'a, W> {
for i in 0..columns as u8 { for i in 0..columns as u8 {
writeln!( writeln!(
self.out, self.out,
"{}case {}: {}.{}_{} = {};", "{}case {}: {{ {}.{}_{} = {}; break; }}",
INDENT, INDENT,
i, i,
STRUCT_ARGUMENT_VARIABLE_NAME, STRUCT_ARGUMENT_VARIABLE_NAME,
@ -809,7 +838,7 @@ impl<'a, W: Write> super::Writer<'a, W> {
for i in 0..columns as u8 { for i in 0..columns as u8 {
writeln!( writeln!(
self.out, self.out,
"{}case {}: {}.{}_{}[{}] = {};", "{}case {}: {{ {}.{}_{}[{}] = {}; break; }}",
INDENT, INDENT,
i, i,
STRUCT_ARGUMENT_VARIABLE_NAME, STRUCT_ARGUMENT_VARIABLE_NAME,
@ -1050,4 +1079,117 @@ impl<'a, W: Write> super::Writer<'a, W> {
} }
Ok(()) Ok(())
} }
pub(super) fn write_mat_cx2_typedef_and_functions(
&mut self,
WrappedMatCx2 { columns }: WrappedMatCx2,
) -> BackendResult {
use crate::back::INDENT;
// typedef
write!(self.out, "typedef struct {{ ")?;
for i in 0..columns as u8 {
write!(self.out, "float2 _{}; ", i)?;
}
writeln!(self.out, "}} __mat{}x2;", columns as u8)?;
// __get_col_of_mat
writeln!(
self.out,
"float2 __get_col_of_mat{}x2(__mat{}x2 mat, uint idx) {{",
columns as u8, columns as u8
)?;
writeln!(self.out, "{}switch(idx) {{", INDENT)?;
for i in 0..columns as u8 {
writeln!(self.out, "{}case {}: {{ return mat._{}; }}", INDENT, i, i)?;
}
writeln!(self.out, "{}default: {{ return (float2)0; }}", INDENT)?;
writeln!(self.out, "{}}}", INDENT)?;
writeln!(self.out, "}}")?;
// __set_col_of_mat
writeln!(
self.out,
"void __set_col_of_mat{}x2(__mat{}x2 mat, uint idx, float2 value) {{",
columns as u8, columns as u8
)?;
writeln!(self.out, "{}switch(idx) {{", INDENT)?;
for i in 0..columns as u8 {
writeln!(
self.out,
"{}case {}: {{ mat._{} = value; break; }}",
INDENT, i, i
)?;
}
writeln!(self.out, "{}}}", INDENT)?;
writeln!(self.out, "}}")?;
// __set_el_of_mat
writeln!(
self.out,
"void __set_el_of_mat{}x2(__mat{}x2 mat, uint idx, uint vec_idx, float value) {{",
columns as u8, columns as u8
)?;
writeln!(self.out, "{}switch(idx) {{", INDENT)?;
for i in 0..columns as u8 {
writeln!(
self.out,
"{}case {}: {{ mat._{}[vec_idx] = value; break; }}",
INDENT, i, i
)?;
}
writeln!(self.out, "{}}}", INDENT)?;
writeln!(self.out, "}}")?;
writeln!(self.out)?;
Ok(())
}
pub(super) fn write_all_mat_cx2_typedefs_and_functions(
&mut self,
module: &crate::Module,
) -> BackendResult {
for (handle, _) in module.global_variables.iter() {
let global = &module.global_variables[handle];
if global.space == crate::AddressSpace::Uniform {
if let Some(super::writer::MatrixType {
columns,
rows: crate::VectorSize::Bi,
width: 4,
}) = super::writer::get_inner_matrix_data(module, global.ty)
{
let entry = WrappedMatCx2 { columns };
if !self.wrapped.mat_cx2s.contains(&entry) {
self.write_mat_cx2_typedef_and_functions(entry)?;
self.wrapped.mat_cx2s.insert(entry);
}
}
}
}
for (_, ty) in module.types.iter() {
if let crate::TypeInner::Struct { ref members, .. } = ty.inner {
for member in members.iter() {
if let crate::TypeInner::Array { .. } = module.types[member.ty].inner {
if let Some(super::writer::MatrixType {
columns,
rows: crate::VectorSize::Bi,
width: 4,
}) = super::writer::get_inner_matrix_data(module, member.ty)
{
let entry = WrappedMatCx2 { columns };
if !self.wrapped.mat_cx2s.contains(&entry) {
self.write_mat_cx2_typedef_and_functions(entry)?;
self.wrapped.mat_cx2s.insert(entry);
}
}
}
}
}
}
Ok(())
}
} }

10
third_party/rust/naga/src/back/hlsl/mod.rs поставляемый
Просмотреть файл

@ -57,8 +57,8 @@ that the columns of a `matKx2<f32>` need only be [aligned as required
for `vec2<f32>`][ilov], which is [eight-byte alignment][8bb]. for `vec2<f32>`][ilov], which is [eight-byte alignment][8bb].
To compensate for this, any time a `matKx2<f32>` appears in a WGSL To compensate for this, any time a `matKx2<f32>` appears in a WGSL
`uniform` variable, whether directly as the variable's type or as a `uniform` variable, whether directly as the variable's type or as part
struct member, we actually emit `K` separate `float2` members, and of a struct/array, we actually emit `K` separate `float2` members, and
assemble/disassemble the matrix from its columns (in WGSL; rows in assemble/disassemble the matrix from its columns (in WGSL; rows in
HLSL) upon load and store. HLSL) upon load and store.
@ -92,14 +92,10 @@ float3x2 GetMatmOnBaz(Baz obj) {
We also emit an analogous `Set` function, as well as functions for We also emit an analogous `Set` function, as well as functions for
accessing individual columns by dynamic index. accessing individual columns by dynamic index.
At present, we do not generate correct HLSL when `matCx2<f32>` us used
directly as the type of a WGSL `uniform` global ([#1837]).
[hlsl]: https://docs.microsoft.com/en-us/windows/win32/direct3dhlsl/dx-graphics-hlsl [hlsl]: https://docs.microsoft.com/en-us/windows/win32/direct3dhlsl/dx-graphics-hlsl
[ilov]: https://gpuweb.github.io/gpuweb/wgsl/#internal-value-layout [ilov]: https://gpuweb.github.io/gpuweb/wgsl/#internal-value-layout
[16bb]: https://github.com/microsoft/DirectXShaderCompiler/wiki/Buffer-Packing#constant-buffer-packing [16bb]: https://github.com/microsoft/DirectXShaderCompiler/wiki/Buffer-Packing#constant-buffer-packing
[8bb]: https://gpuweb.github.io/gpuweb/wgsl/#alignment-and-size [8bb]: https://gpuweb.github.io/gpuweb/wgsl/#alignment-and-size
[#1837]: https://github.com/gfx-rs/naga/issues/1837
*/ */
mod conv; mod conv;
@ -253,6 +249,7 @@ struct Wrapped {
image_queries: crate::FastHashSet<help::WrappedImageQuery>, image_queries: crate::FastHashSet<help::WrappedImageQuery>,
constructors: crate::FastHashSet<help::WrappedConstructor>, constructors: crate::FastHashSet<help::WrappedConstructor>,
struct_matrix_access: crate::FastHashSet<help::WrappedStructMatrixAccess>, struct_matrix_access: crate::FastHashSet<help::WrappedStructMatrixAccess>,
mat_cx2s: crate::FastHashSet<help::WrappedMatCx2>,
} }
impl Wrapped { impl Wrapped {
@ -261,6 +258,7 @@ impl Wrapped {
self.image_queries.clear(); self.image_queries.clear();
self.constructors.clear(); self.constructors.clear();
self.struct_matrix_access.clear(); self.struct_matrix_access.clear();
self.mat_cx2s.clear();
} }
} }

Просмотреть файл

@ -6,7 +6,7 @@ HLSL backend uses byte address buffers for all storage buffers in IR.
use super::{super::FunctionCtx, BackendResult, Error}; use super::{super::FunctionCtx, BackendResult, Error};
use crate::{ use crate::{
proc::{NameKey, TypeResolution}, proc::{Alignment, NameKey, TypeResolution},
Handle, Handle,
}; };
@ -130,11 +130,7 @@ impl<W: fmt::Write> super::Writer<'_, W> {
)?; )?;
// Note: Matrices containing vec3s, due to padding, act like they contain vec4s. // Note: Matrices containing vec3s, due to padding, act like they contain vec4s.
let padded_rows = match rows { let row_stride = Alignment::from(rows) * width as u32;
crate::VectorSize::Tri => 4,
rows => rows as u32,
};
let row_stride = width as u32 * padded_rows;
let iter = (0..columns as u32).map(|i| { let iter = (0..columns as u32).map(|i| {
let ty_inner = crate::TypeInner::Vector { let ty_inner = crate::TypeInner::Vector {
size: rows, size: rows,
@ -277,11 +273,7 @@ impl<W: fmt::Write> super::Writer<'_, W> {
writeln!(self.out, ";")?; writeln!(self.out, ";")?;
// Note: Matrices containing vec3s, due to padding, act like they contain vec4s. // Note: Matrices containing vec3s, due to padding, act like they contain vec4s.
let padded_rows = match rows { let row_stride = Alignment::from(rows) * width as u32;
crate::VectorSize::Tri => 4,
rows => rows as u32,
};
let row_stride = width as u32 * padded_rows;
// then iterate the stores // then iterate the stores
for i in 0..columns as u32 { for i in 0..columns as u32 {
@ -409,12 +401,7 @@ impl<W: fmt::Write> super::Writer<'_, W> {
stride: width as u32, stride: width as u32,
}, },
crate::TypeInner::Matrix { columns, width, .. } => Parent::Array { crate::TypeInner::Matrix { columns, width, .. } => Parent::Array {
stride: width as u32 stride: Alignment::from(columns) * width as u32,
* if columns > crate::VectorSize::Bi {
4
} else {
2
},
}, },
_ => unreachable!(), _ => unreachable!(),
}, },

574
third_party/rust/naga/src/back/hlsl/writer.rs поставляемый
Просмотреть файл

@ -150,6 +150,8 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
.map(|ep| (ep.stage, ep.function.result.clone())) .map(|ep| (ep.stage, ep.function.result.clone()))
.collect::<Vec<(ShaderStage, Option<crate::FunctionResult>)>>(); .collect::<Vec<(ShaderStage, Option<crate::FunctionResult>)>>();
self.write_all_mat_cx2_typedefs_and_functions(module)?;
// Write all structs // Write all structs
for (handle, ty) in module.types.iter() { for (handle, ty) in module.types.iter() {
if let TypeInner::Struct { ref members, span } = ty.inner { if let TypeInner::Struct { ref members, span } = ty.inner {
@ -661,19 +663,41 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
if global.space == crate::AddressSpace::Uniform { if global.space == crate::AddressSpace::Uniform {
write!(self.out, " {{ ")?; write!(self.out, " {{ ")?;
// Even though Naga IR matrices are column-major, we must describe
// matrices passed from the CPU as being in row-major order. See let matrix_data = get_inner_matrix_data(module, global.ty);
// the module-level comments for details.
if let TypeInner::Matrix { .. } = module.types[global.ty].inner { // We treat matrices of the form `matCx2` as a sequence of C `vec2`s.
write!(self.out, "row_major ")?; // See the module-level block comment in mod.rs for details.
if let Some(MatrixType {
columns,
rows: crate::VectorSize::Bi,
width: 4,
}) = matrix_data
{
write!(
self.out,
"__mat{}x2 {}",
columns as u8,
&self.names[&NameKey::GlobalVariable(handle)]
)?;
} else {
// Even though Naga IR matrices are column-major, we must describe
// matrices passed from the CPU as being in row-major order.
// See the module-level block comment in mod.rs for details.
if matrix_data.is_some() {
write!(self.out, "row_major ")?;
}
self.write_type(module, global.ty)?;
let sub_name = &self.names[&NameKey::GlobalVariable(handle)];
write!(self.out, " {}", sub_name)?;
} }
self.write_type(module, global.ty)?;
let sub_name = &self.names[&NameKey::GlobalVariable(handle)];
write!(self.out, " {}", sub_name)?;
// need to write the array size if the type was emitted with `write_type` // need to write the array size if the type was emitted with `write_type`
if let TypeInner::Array { base, size, .. } = module.types[global.ty].inner { if let TypeInner::Array { base, size, .. } = module.types[global.ty].inner {
self.write_array_size(module, base, size)?; self.write_array_size(module, base, size)?;
} }
writeln!(self.out, "; }}")?; writeln!(self.out, "; }}")?;
} else { } else {
writeln!(self.out, ";")?; writeln!(self.out, ";")?;
@ -801,16 +825,31 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
write!(self.out, "{}", back::INDENT)?; write!(self.out, "{}", back::INDENT)?;
match module.types[member.ty].inner { match module.types[member.ty].inner {
TypeInner::Array { TypeInner::Array { base, size, .. } => {
base,
size,
stride: _,
} => {
// HLSL arrays are written as `type name[size]` // HLSL arrays are written as `type name[size]`
if let TypeInner::Matrix { .. } = module.types[base].inner {
write!(self.out, "row_major ")?; let matrix_data = get_inner_matrix_data(module, member.ty);
// We treat matrices of the form `matCx2` as a sequence of C `vec2`s.
// See the module-level block comment in mod.rs for details.
if let Some(MatrixType {
columns,
rows: crate::VectorSize::Bi,
width: 4,
}) = matrix_data
{
write!(self.out, "__mat{}x2", columns as u8)?;
} else {
// Even though Naga IR matrices are column-major, we must describe
// matrices passed from the CPU as being in row-major order.
// See the module-level block comment in mod.rs for details.
if matrix_data.is_some() {
write!(self.out, "row_major ")?;
}
self.write_type(module, base)?;
} }
self.write_type(module, base)?;
// Write `name` // Write `name`
write!( write!(
self.out, self.out,
@ -820,8 +859,8 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
// Write [size] // Write [size]
self.write_array_size(module, base, size)?; self.write_array_size(module, base, size)?;
} }
// We treat matrices of the form `matCx2` as a sequence of C `vec2`s // We treat matrices of the form `matCx2` as a sequence of C `vec2`s.
// (see top level module docs for details). // See the module-level block comment in mod.rs for details.
TypeInner::Matrix { TypeInner::Matrix {
rows, rows,
columns, columns,
@ -848,6 +887,9 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
self.write_modifier(binding)?; self.write_modifier(binding)?;
} }
// Even though Naga IR matrices are column-major, we must describe
// matrices passed from the CPU as being in row-major order.
// See the module-level block comment in mod.rs for details.
if let TypeInner::Matrix { .. } = module.types[member.ty].inner { if let TypeInner::Matrix { .. } = module.types[member.ty].inner {
write!(self.out, "row_major ")?; write!(self.out, "row_major ")?;
} }
@ -1285,17 +1327,6 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
} }
Statement::Store { pointer, value } => { Statement::Store { pointer, value } => {
let ty_inner = func_ctx.info[pointer].ty.inner_with(&module.types); let ty_inner = func_ctx.info[pointer].ty.inner_with(&module.types);
let array_info = match *ty_inner {
TypeInner::Pointer { base, .. } => match module.types[base].inner {
crate::TypeInner::Array {
size: crate::ArraySize::Constant(ch),
..
} => Some((ch, base)),
_ => None,
},
_ => None,
};
if let Some(crate::AddressSpace::Storage { .. }) = ty_inner.pointer_space() { if let Some(crate::AddressSpace::Storage { .. }) = ty_inner.pointer_space() {
let var_handle = self.fill_access_chain(module, pointer, func_ctx)?; let var_handle = self.fill_access_chain(module, pointer, func_ctx)?;
self.write_storage_store( self.write_storage_store(
@ -1305,26 +1336,9 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
func_ctx, func_ctx,
level, level,
)?; )?;
} else if let Some((const_handle, base_ty)) = array_info {
let size = module.constants[const_handle].to_array_length().unwrap();
writeln!(self.out, "{}{{", level)?;
write!(self.out, "{}", level.next())?;
self.write_type(module, base_ty)?;
write!(self.out, " _result[{}]=", size)?;
self.write_expr(module, value, func_ctx)?;
writeln!(self.out, ";")?;
write!(
self.out,
"{}for(int _i=0; _i<{}; ++_i) ",
level.next(),
size
)?;
self.write_expr(module, pointer, func_ctx)?;
writeln!(self.out, "[_i] = _result[_i];")?;
writeln!(self.out, "{}}}", level)?;
} else { } else {
// We treat matrices of the form `matCx2` as a sequence of C `vec2`s // We treat matrices of the form `matCx2` as a sequence of C `vec2`s.
// (see top level module docs for details). // See the module-level block comment in mod.rs for details.
// //
// We handle matrix Stores here directly (including sub accesses for Vectors and Scalars). // We handle matrix Stores here directly (including sub accesses for Vectors and Scalars).
// Loads are handled by `Expression::AccessIndex` (since sub accesses work fine for Loads). // Loads are handled by `Expression::AccessIndex` (since sub accesses work fine for Loads).
@ -1487,28 +1501,159 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
writeln!(self.out, ");")?; writeln!(self.out, ");")?;
} }
} else { } else {
self.write_expr(module, pointer, func_ctx)?; // We handle `Store`s to __matCx2 column vectors and scalar elements via
write!(self.out, " = ")?; // the previously injected functions __set_col_of_matCx2 / __set_el_of_matCx2.
self.write_expr(module, value, func_ctx)?; struct MatrixData {
writeln!(self.out, ";")? columns: crate::VectorSize,
base: Handle<crate::Expression>,
}
enum Index {
Expression(Handle<crate::Expression>),
Static(u32),
}
let mut matrix = None;
let mut vector = None;
let mut scalar = None;
let mut current_expr = pointer;
for _ in 0..3 {
let resolved = func_ctx.info[current_expr].ty.inner_with(&module.types);
match (resolved, &func_ctx.expressions[current_expr]) {
(
&TypeInner::ValuePointer {
size: Some(crate::VectorSize::Bi),
..
},
&crate::Expression::Access { base, index },
) => {
vector = Some(index);
current_expr = base;
}
(
&TypeInner::ValuePointer { size: None, .. },
&crate::Expression::Access { base, index },
) => {
scalar = Some(Index::Expression(index));
current_expr = base;
}
(
&TypeInner::ValuePointer { size: None, .. },
&crate::Expression::AccessIndex { base, index },
) => {
scalar = Some(Index::Static(index));
current_expr = base;
}
_ => {
if let Some(MatrixType {
columns,
rows: crate::VectorSize::Bi,
width: 4,
}) = get_inner_matrix_of_struct_array_member(
module,
current_expr,
func_ctx,
true,
) {
matrix = Some(MatrixData {
columns,
base: current_expr,
});
}
break;
}
}
}
if let (Some(MatrixData { columns, base }), Some(vec_index)) =
(matrix, vector)
{
if scalar.is_some() {
write!(self.out, "__set_el_of_mat{}x2", columns as u8)?;
} else {
write!(self.out, "__set_col_of_mat{}x2", columns as u8)?;
}
write!(self.out, "(")?;
self.write_expr(module, base, func_ctx)?;
write!(self.out, ", ")?;
self.write_expr(module, vec_index, func_ctx)?;
if let Some(scalar_index) = scalar {
write!(self.out, ", ")?;
match scalar_index {
Index::Static(index) => {
write!(self.out, "{}", index)?;
}
Index::Expression(index) => {
self.write_expr(module, index, func_ctx)?;
}
}
}
write!(self.out, ", ")?;
self.write_expr(module, value, func_ctx)?;
writeln!(self.out, ");")?;
} else {
self.write_expr(module, pointer, func_ctx)?;
write!(self.out, " = ")?;
// We cast the RHS of this store in cases where the LHS
// is a struct member with type:
// - matCx2 or
// - a (possibly nested) array of matCx2's
if let Some(MatrixType {
columns,
rows: crate::VectorSize::Bi,
width: 4,
}) = get_inner_matrix_of_struct_array_member(
module, pointer, func_ctx, false,
) {
let mut resolved =
func_ctx.info[pointer].ty.inner_with(&module.types);
if let TypeInner::Pointer { base, .. } = *resolved {
resolved = &module.types[base].inner;
}
write!(self.out, "(__mat{}x2", columns as u8)?;
if let TypeInner::Array { base, size, .. } = *resolved {
self.write_array_size(module, base, size)?;
}
write!(self.out, ")")?;
}
self.write_expr(module, value, func_ctx)?;
writeln!(self.out, ";")?
}
} }
} }
} }
Statement::Loop { Statement::Loop {
ref body, ref body,
ref continuing, ref continuing,
break_if,
} => { } => {
let l2 = level.next(); let l2 = level.next();
if !continuing.is_empty() { if !continuing.is_empty() || break_if.is_some() {
let gate_name = self.namer.call("loop_init"); let gate_name = self.namer.call("loop_init");
writeln!(self.out, "{}bool {} = true;", level, gate_name)?; writeln!(self.out, "{}bool {} = true;", level, gate_name)?;
writeln!(self.out, "{}while(true) {{", level)?; writeln!(self.out, "{}while(true) {{", level)?;
writeln!(self.out, "{}if (!{}) {{", l2, gate_name)?; writeln!(self.out, "{}if (!{}) {{", l2, gate_name)?;
let l3 = l2.next();
for sta in continuing.iter() { for sta in continuing.iter() {
self.write_stmt(module, sta, func_ctx, l2)?; self.write_stmt(module, sta, func_ctx, l3)?;
} }
writeln!(self.out, "{}}}", level.next())?; if let Some(condition) = break_if {
writeln!(self.out, "{}{} = false;", level.next(), gate_name)?; write!(self.out, "{}if (", l3)?;
self.write_expr(module, condition, func_ctx)?;
writeln!(self.out, ") {{")?;
writeln!(self.out, "{}break;", l3.next())?;
writeln!(self.out, "{}}}", l3)?;
}
writeln!(self.out, "{}}}", l2)?;
writeln!(self.out, "{}{} = false;", l2, gate_name)?;
} else { } else {
writeln!(self.out, "{}while(true) {{", level)?; writeln!(self.out, "{}while(true) {{", level)?;
} }
@ -1651,7 +1796,7 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
let indent_level_1 = level.next(); let indent_level_1 = level.next();
let indent_level_2 = indent_level_1.next(); let indent_level_2 = indent_level_1.next();
for case in cases { for (i, case) in cases.iter().enumerate() {
match case.value { match case.value {
crate::SwitchValue::Integer(value) => writeln!( crate::SwitchValue::Integer(value) => writeln!(
self.out, self.out,
@ -1663,25 +1808,35 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
} }
} }
// FXC doesn't support fallthrough so we duplicate the body of the following case blocks
if case.fall_through { if case.fall_through {
// Generate each fallthrough case statement in a new block. This is done to let curr_len = i + 1;
// prevent symbol collision of variables declared in these cases statements. let end_case_idx = curr_len
writeln!(self.out, "{}/* fallthrough */", indent_level_2)?; + cases
writeln!(self.out, "{}{{", indent_level_2)?; .iter()
} .skip(curr_len)
for sta in case.body.iter() { .position(|case| !case.fall_through)
self.write_stmt( .unwrap();
module, let indent_level_3 = indent_level_2.next();
sta, for case in &cases[i..=end_case_idx] {
func_ctx, writeln!(self.out, "{}{{", indent_level_2)?;
back::Level(indent_level_2.0 + usize::from(case.fall_through)), for sta in case.body.iter() {
)?; self.write_stmt(module, sta, func_ctx, indent_level_3)?;
} }
writeln!(self.out, "{}}}", indent_level_2)?;
}
if case.fall_through { let last_case = &cases[end_case_idx];
writeln!(self.out, "{}}}", indent_level_2)?; if last_case.body.last().map_or(true, |s| !s.is_terminator()) {
} else if case.body.last().map_or(true, |s| !s.is_terminator()) { writeln!(self.out, "{}break;", indent_level_2)?;
writeln!(self.out, "{}break;", indent_level_2)?; }
} else {
for sta in case.body.iter() {
self.write_stmt(module, sta, func_ctx, indent_level_2)?;
}
if case.body.last().map_or(true, |s| !s.is_terminator()) {
writeln!(self.out, "{}break;", indent_level_2)?;
}
} }
writeln!(self.out, "{}}}", indent_level_1)?; writeln!(self.out, "{}}}", indent_level_1)?;
@ -1844,6 +1999,26 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
{ {
// do nothing, the chain is written on `Load`/`Store` // do nothing, the chain is written on `Load`/`Store`
} else { } else {
// We use the function __get_col_of_matCx2 here in cases
// where `base`s type resolves to a matCx2 and is part of a
// struct member with type of (possibly nested) array of matCx2's.
//
// Note that this only works for `Load`s and we handle
// `Store`s differently in `Statement::Store`.
if let Some(MatrixType {
columns,
rows: crate::VectorSize::Bi,
width: 4,
}) = get_inner_matrix_of_struct_array_member(module, base, func_ctx, true)
{
write!(self.out, "__get_col_of_mat{}x2(", columns as u8)?;
self.write_expr(module, base, func_ctx)?;
write!(self.out, ", ")?;
self.write_expr(module, index, func_ctx)?;
write!(self.out, ")")?;
return Ok(());
}
let base_ty_res = &func_ctx.info[base].ty; let base_ty_res = &func_ctx.info[base].ty;
let resolved = base_ty_res.inner_with(&module.types); let resolved = base_ty_res.inner_with(&module.types);
@ -1876,18 +2051,64 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
{ {
// do nothing, the chain is written on `Load`/`Store` // do nothing, the chain is written on `Load`/`Store`
} else { } else {
fn write_access<W: fmt::Write>(
writer: &mut super::Writer<'_, W>,
resolved: &TypeInner,
base_ty_handle: Option<Handle<crate::Type>>,
index: u32,
) -> BackendResult {
match *resolved {
TypeInner::Vector { .. } => {
// Write vector access as a swizzle
write!(writer.out, ".{}", back::COMPONENTS[index as usize])?
}
TypeInner::Matrix { .. }
| TypeInner::Array { .. }
| TypeInner::BindingArray { .. }
| TypeInner::ValuePointer { .. } => write!(writer.out, "[{}]", index)?,
TypeInner::Struct { .. } => {
// This will never panic in case the type is a `Struct`, this is not true
// for other types so we can only check while inside this match arm
let ty = base_ty_handle.unwrap();
write!(
writer.out,
".{}",
&writer.names[&NameKey::StructMember(ty, index)]
)?
}
ref other => {
return Err(Error::Custom(format!("Cannot index {:?}", other)))
}
}
Ok(())
}
// We write the matrix column access in a special way since
// the type of `base` is our special __matCx2 struct.
if let Some(MatrixType {
rows: crate::VectorSize::Bi,
width: 4,
..
}) = get_inner_matrix_of_struct_array_member(module, base, func_ctx, true)
{
self.write_expr(module, base, func_ctx)?;
write!(self.out, "._{}", index)?;
return Ok(());
}
let base_ty_res = &func_ctx.info[base].ty; let base_ty_res = &func_ctx.info[base].ty;
let mut resolved = base_ty_res.inner_with(&module.types); let mut resolved = base_ty_res.inner_with(&module.types);
let base_ty_handle = match *resolved { let base_ty_handle = match *resolved {
TypeInner::Pointer { base, space: _ } => { TypeInner::Pointer { base, .. } => {
resolved = &module.types[base].inner; resolved = &module.types[base].inner;
Some(base) Some(base)
} }
_ => base_ty_res.handle(), _ => base_ty_res.handle(),
}; };
// We treat matrices of the form `matCx2` as a sequence of C `vec2`s // We treat matrices of the form `matCx2` as a sequence of C `vec2`s.
// (see top level module docs for details). // See the module-level block comment in mod.rs for details.
// //
// We handle matrix reconstruction here for Loads. // We handle matrix reconstruction here for Loads.
// Stores are handled directly by `Statement::Store`. // Stores are handled directly by `Statement::Store`.
@ -1910,34 +2131,10 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
} }
_ => {} _ => {}
} }
}; }
self.write_expr(module, base, func_ctx)?; self.write_expr(module, base, func_ctx)?;
write_access(self, resolved, base_ty_handle, index)?;
match *resolved {
TypeInner::Vector { .. } => {
// Write vector access as a swizzle
write!(self.out, ".{}", back::COMPONENTS[index as usize])?
}
TypeInner::Matrix { .. }
| TypeInner::Array { .. }
| TypeInner::BindingArray { .. }
| TypeInner::ValuePointer { .. } => write!(self.out, "[{}]", index)?,
TypeInner::Struct { .. } => {
// This will never panic in case the type is a `Struct`, this is not true
// for other types so we can only check while inside this match arm
let ty = base_ty_handle.unwrap();
write!(
self.out,
".{}",
&self.names[&NameKey::StructMember(ty, index)]
)?
}
ref other => {
return Err(Error::Custom(format!("Cannot index {:?}", other)))
}
}
} }
} }
Expression::FunctionArgument(pos) => { Expression::FunctionArgument(pos) => {
@ -2108,7 +2305,42 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
self.write_storage_load(module, var_handle, result_ty, func_ctx)?; self.write_storage_load(module, var_handle, result_ty, func_ctx)?;
} }
_ => { _ => {
let mut close_paren = false;
// We cast the value loaded to a native HLSL floatCx2
// in cases where it is of type:
// - __matCx2 or
// - a (possibly nested) array of __matCx2's
if let Some(MatrixType {
rows: crate::VectorSize::Bi,
width: 4,
..
}) = get_inner_matrix_of_struct_array_member(
module, pointer, func_ctx, false,
)
.or_else(|| get_inner_matrix_of_global_uniform(module, pointer, func_ctx))
{
let mut resolved = func_ctx.info[pointer].ty.inner_with(&module.types);
if let TypeInner::Pointer { base, .. } = *resolved {
resolved = &module.types[base].inner;
}
write!(self.out, "((")?;
if let TypeInner::Array { base, size, .. } = *resolved {
self.write_type(module, base)?;
self.write_array_size(module, base, size)?;
} else {
self.write_value_type(module, resolved)?;
}
write!(self.out, ")")?;
close_paren = true;
}
self.write_expr(module, pointer, func_ctx)?; self.write_expr(module, pointer, func_ctx)?;
if close_paren {
write!(self.out, ")")?;
}
} }
} }
} }
@ -2567,3 +2799,139 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
Ok(()) Ok(())
} }
} }
pub(super) struct MatrixType {
pub(super) columns: crate::VectorSize,
pub(super) rows: crate::VectorSize,
pub(super) width: crate::Bytes,
}
pub(super) fn get_inner_matrix_data(
module: &Module,
handle: Handle<crate::Type>,
) -> Option<MatrixType> {
match module.types[handle].inner {
TypeInner::Matrix {
columns,
rows,
width,
} => Some(MatrixType {
columns,
rows,
width,
}),
TypeInner::Array { base, .. } => get_inner_matrix_data(module, base),
_ => None,
}
}
/// Returns the matrix data if the access chain starting at `base`:
/// - starts with an expression with resolved type of [`TypeInner::Matrix`] if `direct = true`
/// - contains one or more expressions with resolved type of [`TypeInner::Array`] of [`TypeInner::Matrix`]
/// - ends at an expression with resolved type of [`TypeInner::Struct`]
pub(super) fn get_inner_matrix_of_struct_array_member(
module: &Module,
base: Handle<crate::Expression>,
func_ctx: &back::FunctionCtx<'_>,
direct: bool,
) -> Option<MatrixType> {
let mut mat_data = None;
let mut array_base = None;
let mut current_base = base;
loop {
let mut resolved = func_ctx.info[current_base].ty.inner_with(&module.types);
if let TypeInner::Pointer { base, .. } = *resolved {
resolved = &module.types[base].inner;
};
match *resolved {
TypeInner::Matrix {
columns,
rows,
width,
} => {
mat_data = Some(MatrixType {
columns,
rows,
width,
})
}
TypeInner::Array { base, .. } => {
array_base = Some(base);
}
TypeInner::Struct { .. } => {
if let Some(array_base) = array_base {
if direct {
return mat_data;
} else {
return get_inner_matrix_data(module, array_base);
}
}
break;
}
_ => break,
}
current_base = match func_ctx.expressions[current_base] {
crate::Expression::Access { base, .. } => base,
crate::Expression::AccessIndex { base, .. } => base,
_ => break,
};
}
None
}
/// Returns the matrix data if the access chain starting at `base`:
/// - starts with an expression with resolved type of [`TypeInner::Matrix`]
/// - contains zero or more expressions with resolved type of [`TypeInner::Array`] of [`TypeInner::Matrix`]
/// - ends with an [`Expression::GlobalVariable`](crate::Expression::GlobalVariable) in [`AddressSpace::Uniform`](crate::AddressSpace::Uniform)
fn get_inner_matrix_of_global_uniform(
module: &Module,
base: Handle<crate::Expression>,
func_ctx: &back::FunctionCtx<'_>,
) -> Option<MatrixType> {
let mut mat_data = None;
let mut array_base = None;
let mut current_base = base;
loop {
let mut resolved = func_ctx.info[current_base].ty.inner_with(&module.types);
if let TypeInner::Pointer { base, .. } = *resolved {
resolved = &module.types[base].inner;
};
match *resolved {
TypeInner::Matrix {
columns,
rows,
width,
} => {
mat_data = Some(MatrixType {
columns,
rows,
width,
})
}
TypeInner::Array { base, .. } => {
array_base = Some(base);
}
_ => break,
}
current_base = match func_ctx.expressions[current_base] {
crate::Expression::Access { base, .. } => base,
crate::Expression::AccessIndex { base, .. } => base,
crate::Expression::GlobalVariable(handle)
if module.global_variables[handle].space == crate::AddressSpace::Uniform =>
{
return mat_data.or_else(|| {
array_base.and_then(|array_base| get_inner_matrix_data(module, array_base))
})
}
_ => break,
};
}
None
}

51
third_party/rust/naga/src/back/msl/writer.rs поставляемый
Просмотреть файл

@ -364,6 +364,8 @@ pub struct Writer<W> {
put_expression_stack_pointers: FastHashSet<*const ()>, put_expression_stack_pointers: FastHashSet<*const ()>,
#[cfg(test)] #[cfg(test)]
put_block_stack_pointers: FastHashSet<*const ()>, put_block_stack_pointers: FastHashSet<*const ()>,
/// Set of (struct type, struct field index) denoting which fields require
/// padding inserted **before** them (i.e. between fields at index - 1 and index)
struct_member_pads: FastHashSet<(Handle<crate::Type>, u32)>, struct_member_pads: FastHashSet<(Handle<crate::Type>, u32)>,
} }
@ -647,6 +649,8 @@ impl<W: Write> Writer<W> {
} }
/// Finishes writing and returns the output. /// Finishes writing and returns the output.
// See https://github.com/rust-lang/rust-clippy/issues/4979.
#[allow(clippy::missing_const_for_fn)]
pub fn finish(self) -> W { pub fn finish(self) -> W {
self.out self.out
} }
@ -1221,20 +1225,30 @@ impl<W: Write> Writer<W> {
arg: Handle<crate::Expression>, arg: Handle<crate::Expression>,
arg1: Handle<crate::Expression>, arg1: Handle<crate::Expression>,
size: usize, size: usize,
context: &ExpressionContext,
) -> BackendResult { ) -> BackendResult {
// Write parantheses around the dot product expression to prevent operators
// with different precedences from applying earlier.
write!(self.out, "(")?; write!(self.out, "(")?;
let arg0_name = &self.named_expressions[&arg]; // Cycle trough all the components of the vector
let arg1_name = &self.named_expressions[&arg1];
// This will print an extra '+' at the beginning but that is fine in msl
for index in 0..size { for index in 0..size {
let component = back::COMPONENTS[index]; let component = back::COMPONENTS[index];
write!( // Write the addition to the previous product
self.out, // This will print an extra '+' at the beginning but that is fine in msl
" + {}.{} * {}.{}", write!(self.out, " + ")?;
arg0_name, component, arg1_name, component // Write the first vector expression, this expression is marked to be
)?; // cached so unless it can't be cached (for example, it's a Constant)
// it shouldn't produce large expressions.
self.put_expression(arg, context, true)?;
// Access the current component on the first vector
write!(self.out, ".{} * ", component)?;
// Write the second vector expression, this expression is marked to be
// cached so unless it can't be cached (for example, it's a Constant)
// it shouldn't produce large expressions.
self.put_expression(arg1, context, true)?;
// Access the current component on the second vector
write!(self.out, ".{}", component)?;
} }
write!(self.out, ")")?; write!(self.out, ")")?;
@ -1652,7 +1666,7 @@ impl<W: Write> Writer<W> {
.. ..
} => "dot", } => "dot",
crate::TypeInner::Vector { size, .. } => { crate::TypeInner::Vector { size, .. } => {
return self.put_dot_product(arg, arg1.unwrap(), size as usize) return self.put_dot_product(arg, arg1.unwrap(), size as usize, context)
} }
_ => unreachable!( _ => unreachable!(
"Correct TypeInner for dot product should be already validated" "Correct TypeInner for dot product should be already validated"
@ -2538,14 +2552,23 @@ impl<W: Write> Writer<W> {
crate::Statement::Loop { crate::Statement::Loop {
ref body, ref body,
ref continuing, ref continuing,
break_if,
} => { } => {
if !continuing.is_empty() { if !continuing.is_empty() || break_if.is_some() {
let gate_name = self.namer.call("loop_init"); let gate_name = self.namer.call("loop_init");
writeln!(self.out, "{}bool {} = true;", level, gate_name)?; writeln!(self.out, "{}bool {} = true;", level, gate_name)?;
writeln!(self.out, "{}while(true) {{", level)?; writeln!(self.out, "{}while(true) {{", level)?;
let lif = level.next(); let lif = level.next();
let lcontinuing = lif.next();
writeln!(self.out, "{}if (!{}) {{", lif, gate_name)?; writeln!(self.out, "{}if (!{}) {{", lif, gate_name)?;
self.put_block(lif.next(), continuing, context)?; self.put_block(lcontinuing, continuing, context)?;
if let Some(condition) = break_if {
write!(self.out, "{}if (", lcontinuing)?;
self.put_expression(condition, &context.expression, true)?;
writeln!(self.out, ") {{")?;
writeln!(self.out, "{}break;", lcontinuing.next())?;
writeln!(self.out, "{}}}", lcontinuing)?;
}
writeln!(self.out, "{}}}", lif)?; writeln!(self.out, "{}}}", lif)?;
writeln!(self.out, "{}{} = false;", lif, gate_name)?; writeln!(self.out, "{}{} = false;", lif, gate_name)?;
} else { } else {
@ -3080,6 +3103,10 @@ impl<W: Write> Writer<W> {
}; };
write!(self.out, "constant {} {} = {{", ty_name, name,)?; write!(self.out, "constant {} {} = {{", ty_name, name,)?;
for (i, &sub_handle) in components.iter().enumerate() { for (i, &sub_handle) in components.iter().enumerate() {
// insert padding initialization, if needed
if self.struct_member_pads.contains(&(ty, i as u32)) {
write!(self.out, ", {{}}")?;
}
let separator = if i != 0 { ", " } else { "" }; let separator = if i != 0 { ", " } else { "" };
let coco = ConstantContext { let coco = ConstantContext {
handle: sub_handle, handle: sub_handle,

96
third_party/rust/naga/src/back/spv/block.rs поставляемый
Просмотреть файл

@ -37,6 +37,28 @@ enum ExpressionPointer {
}, },
} }
/// The termination statement to be added to the end of the block
pub enum BlockExit {
/// Generates an OpReturn (void return)
Return,
/// Generates an OpBranch to the specified block
Branch {
/// The branch target block
target: Word,
},
/// Translates a loop `break if` into an `OpBranchConditional` to the
/// merge block if true (the merge block is passed through [`LoopContext::break_id`]
/// or else to the loop header (passed through [`preamble_id`])
///
/// [`preamble_id`]: Self::BreakIf::preamble_id
BreakIf {
/// The condition of the `break if`
condition: Handle<crate::Expression>,
/// The loop header block id
preamble_id: Word,
},
}
impl Writer { impl Writer {
// Flip Y coordinate to adjust for coordinate space difference // Flip Y coordinate to adjust for coordinate space difference
// between SPIR-V and our IR. // between SPIR-V and our IR.
@ -1491,7 +1513,7 @@ impl<'w> BlockContext<'w> {
&mut self, &mut self,
label_id: Word, label_id: Word,
statements: &[crate::Statement], statements: &[crate::Statement],
exit_id: Option<Word>, exit: BlockExit,
loop_context: LoopContext, loop_context: LoopContext,
) -> Result<(), Error> { ) -> Result<(), Error> {
let mut block = Block::new(label_id); let mut block = Block::new(label_id);
@ -1508,7 +1530,12 @@ impl<'w> BlockContext<'w> {
self.function.consume(block, Instruction::branch(scope_id)); self.function.consume(block, Instruction::branch(scope_id));
let merge_id = self.gen_id(); let merge_id = self.gen_id();
self.write_block(scope_id, block_statements, Some(merge_id), loop_context)?; self.write_block(
scope_id,
block_statements,
BlockExit::Branch { target: merge_id },
loop_context,
)?;
block = Block::new(merge_id); block = Block::new(merge_id);
} }
@ -1546,10 +1573,20 @@ impl<'w> BlockContext<'w> {
); );
if let Some(block_id) = accept_id { if let Some(block_id) = accept_id {
self.write_block(block_id, accept, Some(merge_id), loop_context)?; self.write_block(
block_id,
accept,
BlockExit::Branch { target: merge_id },
loop_context,
)?;
} }
if let Some(block_id) = reject_id { if let Some(block_id) = reject_id {
self.write_block(block_id, reject, Some(merge_id), loop_context)?; self.write_block(
block_id,
reject,
BlockExit::Branch { target: merge_id },
loop_context,
)?;
} }
block = Block::new(merge_id); block = Block::new(merge_id);
@ -1611,7 +1648,9 @@ impl<'w> BlockContext<'w> {
self.write_block( self.write_block(
*label_id, *label_id,
&case.body, &case.body,
Some(case_finish_id), BlockExit::Branch {
target: case_finish_id,
},
inner_context, inner_context,
)?; )?;
} }
@ -1619,7 +1658,12 @@ impl<'w> BlockContext<'w> {
// If no default was encountered write a empty block to satisfy the presence of // If no default was encountered write a empty block to satisfy the presence of
// a block the default label // a block the default label
if !reached_default { if !reached_default {
self.write_block(default_id, &[], Some(merge_id), inner_context)?; self.write_block(
default_id,
&[],
BlockExit::Branch { target: merge_id },
inner_context,
)?;
} }
block = Block::new(merge_id); block = Block::new(merge_id);
@ -1627,6 +1671,7 @@ impl<'w> BlockContext<'w> {
crate::Statement::Loop { crate::Statement::Loop {
ref body, ref body,
ref continuing, ref continuing,
break_if,
} => { } => {
let preamble_id = self.gen_id(); let preamble_id = self.gen_id();
self.function self.function
@ -1649,17 +1694,29 @@ impl<'w> BlockContext<'w> {
self.write_block( self.write_block(
body_id, body_id,
body, body,
Some(continuing_id), BlockExit::Branch {
target: continuing_id,
},
LoopContext { LoopContext {
continuing_id: Some(continuing_id), continuing_id: Some(continuing_id),
break_id: Some(merge_id), break_id: Some(merge_id),
}, },
)?; )?;
let exit = match break_if {
Some(condition) => BlockExit::BreakIf {
condition,
preamble_id,
},
None => BlockExit::Branch {
target: preamble_id,
},
};
self.write_block( self.write_block(
continuing_id, continuing_id,
continuing, continuing,
Some(preamble_id), exit,
LoopContext { LoopContext {
continuing_id: None, continuing_id: None,
break_id: Some(merge_id), break_id: Some(merge_id),
@ -1955,12 +2012,10 @@ impl<'w> BlockContext<'w> {
} }
} }
let termination = match exit_id { let termination = match exit {
Some(id) => Instruction::branch(id), // We're generating code for the top-level Block of the function, so we
// This can happen if the last branch had all the paths // need to end it with some kind of return instruction.
// leading out of the graph (i.e. returning). BlockExit::Return => match self.ir_function.result {
// Or it may be the end of the self.function.
None => match self.ir_function.result {
Some(ref result) if self.function.entry_point_context.is_none() => { Some(ref result) if self.function.entry_point_context.is_none() => {
let type_id = self.get_type_id(LookupType::Handle(result.ty)); let type_id = self.get_type_id(LookupType::Handle(result.ty));
let null_id = self.writer.write_constant_null(type_id); let null_id = self.writer.write_constant_null(type_id);
@ -1968,6 +2023,19 @@ impl<'w> BlockContext<'w> {
} }
_ => Instruction::return_void(), _ => Instruction::return_void(),
}, },
BlockExit::Branch { target } => Instruction::branch(target),
BlockExit::BreakIf {
condition,
preamble_id,
} => {
let condition_id = self.cached[condition];
Instruction::branch_conditional(
condition_id,
loop_context.break_id.unwrap(),
preamble_id,
)
}
}; };
self.function.consume(block, termination); self.function.consume(block, termination);

16
third_party/rust/naga/src/back/spv/writer.rs поставляемый
Просмотреть файл

@ -8,7 +8,7 @@ use super::{
use crate::{ use crate::{
arena::{Handle, UniqueArena}, arena::{Handle, UniqueArena},
back::spv::BindingInfo, back::spv::BindingInfo,
proc::TypeResolution, proc::{Alignment, TypeResolution},
valid::{FunctionInfo, ModuleInfo}, valid::{FunctionInfo, ModuleInfo},
}; };
use spirv::Word; use spirv::Word;
@ -574,7 +574,12 @@ impl Writer {
context context
.function .function
.consume(prelude, Instruction::branch(main_id)); .consume(prelude, Instruction::branch(main_id));
context.write_block(main_id, &ir_function.body, None, LoopContext::default())?; context.write_block(
main_id,
&ir_function.body,
super::block::BlockExit::Return,
LoopContext::default(),
)?;
// Consume the `BlockContext`, ending its borrows and letting the // Consume the `BlockContext`, ending its borrows and letting the
// `Writer` steal back its cached expression table and temp_list. // `Writer` steal back its cached expression table and temp_list.
@ -1379,10 +1384,7 @@ impl Writer {
width, width,
} = *member_array_subty_inner } = *member_array_subty_inner
{ {
let byte_stride = match rows { let byte_stride = Alignment::from(rows) * width as u32;
crate::VectorSize::Bi => 2 * width,
crate::VectorSize::Tri | crate::VectorSize::Quad => 4 * width,
};
self.annotations.push(Instruction::member_decorate( self.annotations.push(Instruction::member_decorate(
struct_id, struct_id,
index as u32, index as u32,
@ -1393,7 +1395,7 @@ impl Writer {
struct_id, struct_id,
index as u32, index as u32,
Decoration::MatrixStride, Decoration::MatrixStride,
&[byte_stride as u32], &[byte_stride],
)); ));
} }

20
third_party/rust/naga/src/back/wgsl/writer.rs поставляемый
Просмотреть файл

@ -908,6 +908,7 @@ impl<W: Write> Writer<W> {
Statement::Loop { Statement::Loop {
ref body, ref body,
ref continuing, ref continuing,
break_if,
} => { } => {
write!(self.out, "{}", level)?; write!(self.out, "{}", level)?;
writeln!(self.out, "loop {{")?; writeln!(self.out, "loop {{")?;
@ -917,11 +918,26 @@ impl<W: Write> Writer<W> {
self.write_stmt(module, sta, func_ctx, l2)?; self.write_stmt(module, sta, func_ctx, l2)?;
} }
if !continuing.is_empty() { // The continuing is optional so we don't need to write it if
// it is empty, but the `break if` counts as a continuing statement
// so even if `continuing` is empty we must generate it if a
// `break if` exists
if !continuing.is_empty() || break_if.is_some() {
writeln!(self.out, "{}continuing {{", l2)?; writeln!(self.out, "{}continuing {{", l2)?;
for sta in continuing.iter() { for sta in continuing.iter() {
self.write_stmt(module, sta, func_ctx, l2.next())?; self.write_stmt(module, sta, func_ctx, l2.next())?;
} }
// The `break if` is always the last
// statement of the `continuing` block
if let Some(condition) = break_if {
// The trailing space is important
write!(self.out, "{}break if ", l2.next())?;
self.write_expr(module, condition, func_ctx)?;
// Close the `break if` statement
writeln!(self.out, ";")?;
}
writeln!(self.out, "{}}}", l2)?; writeln!(self.out, "{}}}", l2)?;
} }
@ -1859,6 +1875,8 @@ impl<W: Write> Writer<W> {
Ok(()) Ok(())
} }
// See https://github.com/rust-lang/rust-clippy/issues/4979.
#[allow(clippy::missing_const_for_fn)]
pub fn finish(self) -> W { pub fn finish(self) -> W {
self.out self.out
} }

Просмотреть файл

@ -1556,7 +1556,7 @@ fn inject_common_builtin(
}; };
declaration.overloads.push(module.add_builtin( declaration.overloads.push(module.add_builtin(
vec![ty(), ty(), base_ty()], vec![ty(), ty(), base_ty()],
MacroCall::MathFunction(MathFunction::SmoothStep), MacroCall::SmoothStep { splatted: size },
)) ))
} }
} }
@ -1604,6 +1604,12 @@ pub enum MacroCall {
BitCast(Sk), BitCast(Sk),
Derivate(DerivativeAxis), Derivate(DerivativeAxis),
Barrier, Barrier,
/// SmoothStep needs a separate variant because it might need it's inputs
/// to be splatted depending on the overload
SmoothStep {
/// The size of the splat operation if some
splatted: Option<VectorSize>,
},
} }
impl MacroCall { impl MacroCall {
@ -2072,6 +2078,22 @@ impl MacroCall {
body.push(crate::Statement::Barrier(crate::Barrier::all()), meta); body.push(crate::Statement::Barrier(crate::Barrier::all()), meta);
return Ok(None); return Ok(None);
} }
MacroCall::SmoothStep { splatted } => {
ctx.implicit_splat(parser, &mut args[0], meta, splatted)?;
ctx.implicit_splat(parser, &mut args[1], meta, splatted)?;
ctx.add_expression(
Expression::Math {
fun: MathFunction::SmoothStep,
arg: args[0],
arg1: args.get(1).copied(),
arg2: args.get(2).copied(),
arg3: None,
},
Span::default(),
body,
)
}
})) }))
} }
} }
@ -2237,20 +2259,26 @@ pub fn sampled_to_depth(
meta: Span, meta: Span,
errors: &mut Vec<Error>, errors: &mut Vec<Error>,
) { ) {
// Get the a mutable type handle of the underlying image storage
let ty = match ctx[image] { let ty = match ctx[image] {
Expression::GlobalVariable(handle) => &mut module.global_variables.get_mut(handle).ty, Expression::GlobalVariable(handle) => &mut module.global_variables.get_mut(handle).ty,
Expression::FunctionArgument(i) => { Expression::FunctionArgument(i) => {
// Mark the function argument as carrying a depth texture
ctx.parameters_info[i as usize].depth = true; ctx.parameters_info[i as usize].depth = true;
// NOTE: We need to later also change the parameter type
&mut ctx.arguments[i as usize].ty &mut ctx.arguments[i as usize].ty
} }
_ => { _ => {
// Only globals and function arguments are allowed to carry an image
return errors.push(Error { return errors.push(Error {
kind: ErrorKind::SemanticError("Not a valid texture expression".into()), kind: ErrorKind::SemanticError("Not a valid texture expression".into()),
meta, meta,
}) });
} }
}; };
match module.types[*ty].inner { match module.types[*ty].inner {
// Update the image class to depth in case it already isn't
TypeInner::Image { TypeInner::Image {
class, class,
dim, dim,
@ -2270,6 +2298,7 @@ pub fn sampled_to_depth(
) )
} }
ImageClass::Depth { .. } => {} ImageClass::Depth { .. } => {}
// Other image classes aren't allowed to be transformed to depth
_ => errors.push(Error { _ => errors.push(Error {
kind: ErrorKind::SemanticError("Not a texture".into()), kind: ErrorKind::SemanticError("Not a texture".into()),
meta, meta,
@ -2280,6 +2309,15 @@ pub fn sampled_to_depth(
meta, meta,
}), }),
}; };
// Copy the handle to allow borrowing the `ctx` again
let ty = *ty;
// If the image was passed trough a function argument we also need to change
// the corresponding parameter
if let Expression::FunctionArgument(i) = ctx[image] {
ctx.parameters[i as usize] = ty;
}
} }
bitflags::bitflags! { bitflags::bitflags! {

Просмотреть файл

@ -617,11 +617,14 @@ impl Context {
width: right_width, width: right_width,
}, },
) => { ) => {
let dimensions_ok = if op == BinaryOperator::Multiply {
left_columns == right_rows
} else {
left_columns == right_columns && left_rows == right_rows
};
// Check that the two arguments have the same dimensions // Check that the two arguments have the same dimensions
if left_columns != right_columns if !dimensions_ok || left_width != right_width {
|| left_rows != right_rows
|| left_width != right_width
{
parser.errors.push(Error { parser.errors.push(Error {
kind: ErrorKind::SemanticError( kind: ErrorKind::SemanticError(
format!( format!(
@ -1174,6 +1177,10 @@ impl Context {
self.emit_end(&mut reject_body); self.emit_end(&mut reject_body);
} }
} }
} else {
// Technically there's nothing to flush but later we will need to
// add some expressions that must not be emitted.
self.emit_end(body)
} }
// We need to get the type of the resulting expression to create the local, // We need to get the type of the resulting expression to create the local,

Просмотреть файл

@ -671,6 +671,13 @@ impl Parser {
let overload_param_ty = &self.module.types[*overload_parameter].inner; let overload_param_ty = &self.module.types[*overload_parameter].inner;
let call_arg_ty = self.resolve_type(ctx, call_argument.0, call_argument.1)?; let call_arg_ty = self.resolve_type(ctx, call_argument.0, call_argument.1)?;
log::trace!(
"Testing parameter {}\n\tOverload = {:?}\n\tCall = {:?}",
i,
overload_param_ty,
call_arg_ty
);
// Storage images cannot be directly compared since while the access is part of the // Storage images cannot be directly compared since while the access is part of the
// type in naga's IR, in glsl they are a qualifier and don't enter in the match as // type in naga's IR, in glsl they are a qualifier and don't enter in the match as
// long as the access needed is satisfied. // long as the access needed is satisfied.

Просмотреть файл

@ -16,7 +16,7 @@ use super::{
error::{Error, ErrorKind}, error::{Error, ErrorKind},
Span, Span,
}; };
use crate::{front::align_up, Arena, Constant, Handle, Type, TypeInner, UniqueArena}; use crate::{proc::Alignment, Arena, Constant, Handle, Type, TypeInner, UniqueArena};
/// Struct with information needed for defining a struct member. /// Struct with information needed for defining a struct member.
/// ///
@ -28,7 +28,7 @@ pub struct TypeAlignSpan {
/// with a different stride set. /// with a different stride set.
pub ty: Handle<Type>, pub ty: Handle<Type>,
/// The alignment required by the type. /// The alignment required by the type.
pub align: u32, pub align: Alignment,
/// The size of the type. /// The size of the type.
pub span: u32, pub span: u32,
} }
@ -54,15 +54,15 @@ pub fn calculate_offset(
let (align, span) = match types[ty].inner { let (align, span) = match types[ty].inner {
// 1. If the member is a scalar consuming N basic machine units, // 1. If the member is a scalar consuming N basic machine units,
// the base alignment is N. // the base alignment is N.
TypeInner::Scalar { width, .. } => (width as u32, width as u32), TypeInner::Scalar { width, .. } => (Alignment::from_width(width), width as u32),
// 2. If the member is a two- or four-component vector with components // 2. If the member is a two- or four-component vector with components
// consuming N basic machine units, the base alignment is 2N or 4N, respectively. // consuming N basic machine units, the base alignment is 2N or 4N, respectively.
// 3. If the member is a three-component vector with components consuming N // 3. If the member is a three-component vector with components consuming N
// basic machine units, the base alignment is 4N. // basic machine units, the base alignment is 4N.
TypeInner::Vector { size, width, .. } => match size { TypeInner::Vector { size, width, .. } => (
crate::VectorSize::Tri => (4 * width as u32, 3 * width as u32), Alignment::from(size) * Alignment::from_width(width),
_ => (size as u32 * width as u32, size as u32 * width as u32), size as u32 * width as u32,
}, ),
// 4. If the member is an array of scalars or vectors, the base alignment and array // 4. If the member is an array of scalars or vectors, the base alignment and array
// stride are set to match the base alignment of a single array element, according // stride are set to match the base alignment of a single array element, according
// to rules (1), (2), and (3), and rounded up to the base alignment of a vec4. // to rules (1), (2), and (3), and rounded up to the base alignment of a vec4.
@ -71,14 +71,14 @@ pub fn calculate_offset(
let info = calculate_offset(base, meta, layout, types, constants, errors); let info = calculate_offset(base, meta, layout, types, constants, errors);
let name = types[ty].name.clone(); let name = types[ty].name.clone();
let mut align = info.align;
let mut stride = (align).max(info.span);
// See comment at the beginning of the function // See comment at the beginning of the function
if StructLayout::Std430 != layout { let (align, stride) = if StructLayout::Std430 == layout {
stride = align_up(stride, 16); (info.align, info.align.round_up(info.span))
align = align_up(align, 16); } else {
} let align = info.align.max(Alignment::MIN_UNIFORM);
(align, align.round_up(info.span))
};
let span = match size { let span = match size {
crate::ArraySize::Constant(s) => { crate::ArraySize::Constant(s) => {
@ -111,14 +111,11 @@ pub fn calculate_offset(
rows, rows,
width, width,
} => { } => {
let mut align = match rows { let mut align = Alignment::from(rows) * Alignment::from_width(width);
crate::VectorSize::Tri => (4 * width as u32),
_ => (rows as u32 * width as u32),
};
// See comment at the beginning of the function // See comment at the beginning of the function
if StructLayout::Std430 != layout { if StructLayout::Std430 != layout {
align = align_up(align, 16); align = align.max(Alignment::MIN_UNIFORM);
} }
// See comment on the error kind // See comment on the error kind
@ -133,15 +130,16 @@ pub fn calculate_offset(
} }
TypeInner::Struct { ref members, .. } => { TypeInner::Struct { ref members, .. } => {
let mut span = 0; let mut span = 0;
let mut align = 0; let mut align = Alignment::ONE;
let mut members = members.clone(); let mut members = members.clone();
let name = types[ty].name.clone(); let name = types[ty].name.clone();
for member in members.iter_mut() { for member in members.iter_mut() {
let info = calculate_offset(member.ty, meta, layout, types, constants, errors); let info = calculate_offset(member.ty, meta, layout, types, constants, errors);
span = align_up(span, info.align); let member_alignment = info.align;
align = align.max(info.align); span = member_alignment.round_up(span);
align = member_alignment.max(align);
member.ty = info.ty; member.ty = info.ty;
member.offset = span; member.offset = span;
@ -149,7 +147,7 @@ pub fn calculate_offset(
span += info.span; span += info.span;
} }
span = align_up(span, align); span = align.round_up(span);
let ty_span = types.get_span(ty); let ty_span = types.get_span(ty);
ty = types.insert( ty = types.insert(
@ -167,7 +165,7 @@ pub fn calculate_offset(
kind: ErrorKind::SemanticError("Invalid struct member type".into()), kind: ErrorKind::SemanticError("Invalid struct member type".into()),
meta, meta,
}); });
(1, 0) (Alignment::ONE, 0)
} }
}; };

Просмотреть файл

@ -12,6 +12,7 @@ use crate::{
variables::{GlobalOrConstant, VarDeclaration}, variables::{GlobalOrConstant, VarDeclaration},
Error, ErrorKind, Parser, Span, Error, ErrorKind, Parser, Span,
}, },
proc::Alignment,
AddressSpace, Block, Expression, FunctionResult, Handle, ScalarKind, Statement, StructMember, AddressSpace, Block, Expression, FunctionResult, Handle, ScalarKind, Statement, StructMember,
Type, TypeInner, Type, TypeInner,
}; };
@ -570,7 +571,7 @@ impl<'source> ParsingContext<'source> {
layout: StructLayout, layout: StructLayout,
) -> Result<u32> { ) -> Result<u32> {
let mut span = 0; let mut span = 0;
let mut align = 0; let mut align = Alignment::ONE;
loop { loop {
// TODO: type_qualifier // TODO: type_qualifier
@ -593,8 +594,9 @@ impl<'source> ParsingContext<'source> {
&mut parser.errors, &mut parser.errors,
); );
span = crate::front::align_up(span, info.align); let member_alignment = info.align;
align = align.max(info.align); span = member_alignment.round_up(span);
align = member_alignment.max(align);
members.push(StructMember { members.push(StructMember {
name: Some(name), name: Some(name),
@ -610,7 +612,7 @@ impl<'source> ParsingContext<'source> {
} }
} }
span = crate::front::align_up(span, align); span = align.round_up(span);
Ok(span) Ok(span)
} }

Просмотреть файл

@ -177,6 +177,8 @@ impl<'source> ParsingContext<'source> {
ctx.emit_restart(body); ctx.emit_restart(body);
let mut cases = Vec::new(); let mut cases = Vec::new();
// Track if any default case is present in the switch statement.
let mut default_present = false;
self.expect(parser, TokenValue::LeftBrace)?; self.expect(parser, TokenValue::LeftBrace)?;
loop { loop {
@ -215,6 +217,7 @@ impl<'source> ParsingContext<'source> {
} }
TokenValue::Default => { TokenValue::Default => {
self.bump(parser)?; self.bump(parser)?;
default_present = true;
crate::SwitchValue::Default crate::SwitchValue::Default
} }
TokenValue::RightBrace => { TokenValue::RightBrace => {
@ -273,6 +276,40 @@ impl<'source> ParsingContext<'source> {
meta.subsume(end_meta); meta.subsume(end_meta);
// NOTE: do not unwrap here since a switch statement isn't required
// to have any cases.
if let Some(case) = cases.last_mut() {
// GLSL requires that the last case not be empty, so we check
// that here and produce an error otherwise (fall_trough must
// also be checked because `break`s count as statements but
// they aren't added to the body)
if case.body.is_empty() && case.fall_through {
parser.errors.push(Error {
kind: ErrorKind::SemanticError(
"last case/default label must be followed by statements".into(),
),
meta,
})
}
// GLSL allows the last case to not have any `break` statement,
// this would mark it as fall trough but naga's IR requires that
// the last case must not be fall trough, so we mark need to mark
// the last case as not fall trough always.
case.fall_through = false;
}
// Add an empty default case in case non was present, this is needed because
// naga's IR requires that all switch statements must have a default case but
// GLSL doesn't require that, so we might need to add an empty default case.
if !default_present {
cases.push(SwitchCase {
value: crate::SwitchValue::Default,
body: Block::new(),
fall_through: false,
})
}
body.push(Statement::Switch { selector, cases }, meta); body.push(Statement::Switch { selector, cases }, meta);
meta meta
@ -321,6 +358,7 @@ impl<'source> ParsingContext<'source> {
Statement::Loop { Statement::Loop {
body: loop_body, body: loop_body,
continuing: Block::new(), continuing: Block::new(),
break_if: None,
}, },
meta, meta,
); );
@ -374,6 +412,7 @@ impl<'source> ParsingContext<'source> {
Statement::Loop { Statement::Loop {
body: loop_body, body: loop_body,
continuing: Block::new(), continuing: Block::new(),
break_if: None,
}, },
meta, meta,
); );
@ -476,6 +515,7 @@ impl<'source> ParsingContext<'source> {
Statement::Loop { Statement::Loop {
body: block, body: block,
continuing, continuing,
break_if: None,
}, },
meta, meta,
); );

17
third_party/rust/naga/src/front/mod.rs поставляемый
Просмотреть файл

@ -133,20 +133,3 @@ impl ops::Index<Handle<crate::Expression>> for Typifier {
&self.resolutions[handle.index()] &self.resolutions[handle.index()]
} }
} }
/// Helper function used for aligning `value` to the next multiple of `align`
///
/// # Notes:
/// - `align` must be a power of two.
/// - The returned value will be greater or equal to `value`.
/// # Examples:
/// ```ignore
/// assert_eq!(0, align_up(0, 16));
/// assert_eq!(16, align_up(1, 16));
/// assert_eq!(16, align_up(16, 16));
/// assert_eq!(334, align_up(333, 2));
/// assert_eq!(384, align_up(257, 128));
/// ```
pub const fn align_up(value: u32, align: u32) -> u32 {
((value.wrapping_sub(1)) & !(align - 1)).wrapping_add(align)
}

Просмотреть файл

@ -556,7 +556,11 @@ impl<'function> BlockContext<'function> {
let continuing = lower_impl(blocks, bodies, continuing); let continuing = lower_impl(blocks, bodies, continuing);
block.push( block.push(
crate::Statement::Loop { body, continuing }, crate::Statement::Loop {
body,
continuing,
break_if: None,
},
crate::Span::default(), crate::Span::default(),
) )
} }

47
third_party/rust/naga/src/front/spv/mod.rs поставляемый
Просмотреть файл

@ -39,7 +39,7 @@ use function::*;
use crate::{ use crate::{
arena::{Arena, Handle, UniqueArena}, arena::{Arena, Handle, UniqueArena},
proc::Layouter, proc::{Alignment, Layouter},
FastHashMap, FastHashSet, FastHashMap, FastHashSet,
}; };
@ -71,6 +71,7 @@ pub const SUPPORTED_CAPABILITIES: &[spirv::Capability] = &[
spirv::Capability::Float16, spirv::Capability::Float16,
spirv::Capability::Float64, spirv::Capability::Float64,
spirv::Capability::Geometry, spirv::Capability::Geometry,
spirv::Capability::MultiView,
// tricky ones // tricky ones
spirv::Capability::UniformBufferArrayDynamicIndexing, spirv::Capability::UniformBufferArrayDynamicIndexing,
spirv::Capability::StorageBufferArrayDynamicIndexing, spirv::Capability::StorageBufferArrayDynamicIndexing,
@ -305,7 +306,8 @@ struct LookupExpression {
/// ///
/// Note that, while a SPIR-V result id can be used in any block dominated /// Note that, while a SPIR-V result id can be used in any block dominated
/// by its definition, a Naga `Expression` is only in scope for the rest of /// by its definition, a Naga `Expression` is only in scope for the rest of
/// its subtree. `Parser::get_expr_handle` takes care of /// its subtree. `Parser::get_expr_handle` takes care of spilling the result
/// to a `LocalVariable` which can then be used anywhere.
handle: Handle<crate::Expression>, handle: Handle<crate::Expression>,
/// The SPIR-V type of this result. /// The SPIR-V type of this result.
@ -2391,6 +2393,34 @@ impl<I: Iterator<Item = u32>> Parser<I> {
}, },
); );
} }
Op::BitReverse | Op::BitCount => {
inst.expect(4)?;
let result_type_id = self.next()?;
let result_id = self.next()?;
let base_id = self.next()?;
let base_lexp = self.lookup_expression.lookup(base_id)?;
let base_handle = get_expr_handle!(base_id, base_lexp);
let expr = crate::Expression::Math {
fun: match inst.op {
Op::BitReverse => crate::MathFunction::ReverseBits,
Op::BitCount => crate::MathFunction::CountOneBits,
_ => unreachable!(),
},
arg: base_handle,
arg1: None,
arg2: None,
arg3: None,
};
self.lookup_expression.insert(
result_id,
LookupExpression {
handle: ctx.expressions.append(expr, span),
type_id: result_type_id,
block_id,
},
);
}
Op::OuterProduct => { Op::OuterProduct => {
inst.expect(5)?; inst.expect(5)?;
@ -3535,6 +3565,7 @@ impl<I: Iterator<Item = u32>> Parser<I> {
S::Loop { S::Loop {
ref mut body, ref mut body,
ref mut continuing, ref mut continuing,
break_if: _,
} => { } => {
self.patch_statements(body, expressions, fun_parameter_sampling)?; self.patch_statements(body, expressions, fun_parameter_sampling)?;
self.patch_statements(continuing, expressions, fun_parameter_sampling)?; self.patch_statements(continuing, expressions, fun_parameter_sampling)?;
@ -4347,7 +4378,7 @@ impl<I: Iterator<Item = u32>> Parser<I> {
let mut member_lookups = Vec::with_capacity(members.capacity()); let mut member_lookups = Vec::with_capacity(members.capacity());
let mut storage_access = crate::StorageAccess::empty(); let mut storage_access = crate::StorageAccess::empty();
let mut span = 0; let mut span = 0;
let mut alignment = 1; let mut alignment = Alignment::ONE;
for i in 0..u32::from(inst.wc) - 2 { for i in 0..u32::from(inst.wc) - 2 {
let type_id = self.next()?; let type_id = self.next()?;
let ty = self.lookup_type.lookup(type_id)?.handle; let ty = self.lookup_type.lookup(type_id)?.handle;
@ -4363,8 +4394,9 @@ impl<I: Iterator<Item = u32>> Parser<I> {
row_major: decor.matrix_major == Some(Majority::Row), row_major: decor.matrix_major == Some(Majority::Row),
}); });
span = crate::front::align_up(span, self.layouter[ty].alignment.get()); let member_alignment = self.layouter[ty].alignment;
alignment = self.layouter[ty].alignment.get().max(alignment); span = member_alignment.round_up(span);
alignment = member_alignment.max(alignment);
let mut binding = decor.io_binding().ok(); let mut binding = decor.io_binding().ok();
if let Some(offset) = decor.offset { if let Some(offset) = decor.offset {
@ -4382,8 +4414,7 @@ impl<I: Iterator<Item = u32>> Parser<I> {
} = *inner } = *inner
{ {
if let Some(stride) = decor.matrix_stride { if let Some(stride) = decor.matrix_stride {
let aligned_rows = if rows > crate::VectorSize::Bi { 4 } else { 2 }; let expected_stride = Alignment::from(rows) * width as u32;
let expected_stride = aligned_rows * width as u32;
if stride.get() != expected_stride { if stride.get() != expected_stride {
return Err(Error::UnsupportedMatrixStride { return Err(Error::UnsupportedMatrixStride {
stride: stride.get(), stride: stride.get(),
@ -4406,7 +4437,7 @@ impl<I: Iterator<Item = u32>> Parser<I> {
}); });
} }
span = crate::front::align_up(span, alignment); span = alignment.round_up(span);
let inner = crate::TypeInner::Struct { span, members }; let inner = crate::TypeInner::Struct { span, members };

Просмотреть файл

@ -196,9 +196,7 @@ fn parse_constructor_type<'a>(
} }
(Token::Paren('<'), ConstructorType::PartialArray) => { (Token::Paren('<'), ConstructorType::PartialArray) => {
lexer.expect_generic_paren('<')?; lexer.expect_generic_paren('<')?;
let base = parser let base = parser.parse_type_decl(lexer, None, type_arena, const_arena)?;
.parse_type_decl(lexer, None, type_arena, const_arena)?
.0;
let size = if lexer.skip(Token::Separator(',')) { let size = if lexer.skip(Token::Separator(',')) {
let const_handle = parser.parse_const_expression(lexer, type_arena, const_arena)?; let const_handle = parser.parse_const_expression(lexer, type_arena, const_arena)?;
ArraySize::Constant(const_handle) ArraySize::Constant(const_handle)

463
third_party/rust/naga/src/front/wgsl/lexer.rs поставляемый
Просмотреть файл

@ -1,269 +1,10 @@
use super::{conv, Error, ExpectedToken, NumberType, Span, Token, TokenSpan}; use super::{conv, number::consume_number, Error, ExpectedToken, Span, Token, TokenSpan};
fn consume_any(input: &str, what: impl Fn(char) -> bool) -> (&str, &str) { fn consume_any(input: &str, what: impl Fn(char) -> bool) -> (&str, &str) {
let pos = input.find(|c| !what(c)).unwrap_or(input.len()); let pos = input.find(|c| !what(c)).unwrap_or(input.len());
input.split_at(pos) input.split_at(pos)
} }
/// Tries to skip a given prefix in the input string.
/// Returns whether the prefix was present and could therefore be skipped,
/// the remaining str and the number of *bytes* skipped.
pub fn try_skip_prefix<'a, 'b>(input: &'a str, prefix: &'b str) -> (bool, &'a str, usize) {
if let Some(rem) = input.strip_prefix(prefix) {
(true, rem, prefix.len())
} else {
(false, input, 0)
}
}
#[derive(Clone, Copy, PartialEq, Eq)]
enum NLDigitState {
Nothing,
LeadingZero,
DigitBeforeDot,
OnlyDot,
DigitsThenDot,
DigitAfterDot,
Exponent,
SignAfterExponent,
DigitAfterExponent,
}
struct NumberLexerState {
_minus: bool,
hex: bool,
leading_zeros: usize,
digit_state: NLDigitState,
uint_suffix: bool,
}
impl NumberLexerState {
// TODO: add proper error reporting, possibly through try_into_token function returning Result
pub fn _is_valid_number(&self) -> bool {
match *self {
Self {
_minus: false, // No negative zero for integers.
hex,
leading_zeros,
digit_state: NLDigitState::LeadingZero,
..
} => hex || leading_zeros == 1, // No leading zeros allowed in non-hex integers, "0" is always allowed.
Self {
_minus: minus,
hex,
leading_zeros,
digit_state: NLDigitState::DigitBeforeDot,
uint_suffix,
} => {
(hex || leading_zeros == 0) // No leading zeros allowed in non-hex integers.
// In this state the number has non-zero digits,
// i.e. it is not just "0".
&& (minus ^ uint_suffix) // Either a negative number, or and unsigned integer, not both.
}
_ => self.is_float(),
}
}
pub fn is_float(&self) -> bool {
!self.uint_suffix
&& (self.digit_state == NLDigitState::DigitsThenDot
|| self.digit_state == NLDigitState::DigitAfterDot
|| self.digit_state == NLDigitState::DigitAfterExponent)
}
}
fn consume_number(input: &str) -> (Token, &str) {
let (minus, working_substr, minus_offset) = try_skip_prefix(input, "-");
let (hex, working_substr, hex_offset) = try_skip_prefix(working_substr, "0x");
let mut state = NumberLexerState {
_minus: minus,
hex,
leading_zeros: 0,
digit_state: NLDigitState::Nothing,
uint_suffix: false,
};
let mut what = |c| {
match state {
NumberLexerState {
uint_suffix: true, ..
} => return false, // Scanning is done once we've reached a type suffix.
NumberLexerState {
hex,
digit_state: NLDigitState::Nothing,
..
} => match c {
'0' => {
state.digit_state = NLDigitState::LeadingZero;
state.leading_zeros += 1;
}
'1'..='9' => {
state.digit_state = NLDigitState::DigitBeforeDot;
}
'a'..='f' | 'A'..='F' if hex => {
state.digit_state = NLDigitState::DigitBeforeDot;
}
'.' => {
state.digit_state = NLDigitState::OnlyDot;
}
_ => return false,
},
NumberLexerState {
hex,
digit_state: NLDigitState::LeadingZero,
..
} => match c {
'0' => {
// We stay in NLDigitState::LeadingZero.
state.leading_zeros += 1;
}
'1'..='9' => {
state.digit_state = NLDigitState::DigitBeforeDot;
}
'a'..='f' | 'A'..='F' if hex => {
state.digit_state = NLDigitState::DigitBeforeDot;
}
'.' => {
state.digit_state = NLDigitState::DigitsThenDot;
}
'e' | 'E' if !hex => {
state.digit_state = NLDigitState::Exponent;
}
'p' | 'P' if hex => {
state.digit_state = NLDigitState::Exponent;
}
'u' => {
// We stay in NLDigitState::LeadingZero.
state.uint_suffix = true;
}
_ => return false,
},
NumberLexerState {
hex,
digit_state: NLDigitState::DigitBeforeDot,
..
} => match c {
'0'..='9' => {
// We stay in NLDigitState::DigitBeforeDot.
}
'a'..='f' | 'A'..='F' if hex => {
// We stay in NLDigitState::DigitBeforeDot.
}
'.' => {
state.digit_state = NLDigitState::DigitsThenDot;
}
'e' | 'E' if !hex => {
state.digit_state = NLDigitState::Exponent;
}
'p' | 'P' if hex => {
state.digit_state = NLDigitState::Exponent;
}
'u' => {
// We stay in NLDigitState::DigitBeforeDot.
state.uint_suffix = true;
}
_ => return false,
},
NumberLexerState {
hex,
digit_state: NLDigitState::OnlyDot,
..
} => match c {
'0'..='9' => {
state.digit_state = NLDigitState::DigitAfterDot;
}
'a'..='f' | 'A'..='F' if hex => {
state.digit_state = NLDigitState::DigitAfterDot;
}
_ => return false,
},
NumberLexerState {
hex,
digit_state: NLDigitState::DigitsThenDot | NLDigitState::DigitAfterDot,
..
} => match c {
'0'..='9' => {
state.digit_state = NLDigitState::DigitAfterDot;
}
'a'..='f' | 'A'..='F' if hex => {
state.digit_state = NLDigitState::DigitAfterDot;
}
'e' | 'E' if !hex => {
state.digit_state = NLDigitState::Exponent;
}
'p' | 'P' if hex => {
state.digit_state = NLDigitState::Exponent;
}
_ => return false,
},
NumberLexerState {
digit_state: NLDigitState::Exponent,
..
} => match c {
'0'..='9' => {
state.digit_state = NLDigitState::DigitAfterExponent;
}
'-' | '+' => {
state.digit_state = NLDigitState::SignAfterExponent;
}
_ => return false,
},
NumberLexerState {
digit_state: NLDigitState::SignAfterExponent | NLDigitState::DigitAfterExponent,
..
} => match c {
'0'..='9' => {
state.digit_state = NLDigitState::DigitAfterExponent;
}
_ => return false,
},
}
// No match branch has rejected this yet, so we are still in a number literal
true
};
let pos = working_substr
.find(|c| !what(c))
.unwrap_or(working_substr.len());
let (value, rest) = input.split_at(pos + minus_offset + hex_offset);
// NOTE: This code can use string slicing,
// because number literals are exclusively ASCII.
// This means all relevant characters fit into one byte
// and using string slicing (which slices UTF-8 bytes) works for us.
// TODO: A syntax error can already be recognized here, possibly report it at this stage.
// Return possibly knowably incorrect (given !state.is_valid_number()) token for now.
(
Token::Number {
value: if state.uint_suffix {
&value[..value.len() - 1]
} else {
value
},
ty: if state.uint_suffix {
NumberType::Uint
} else if state.is_float() {
NumberType::Float
} else {
NumberType::Sint
},
},
rest,
)
}
fn consume_token(input: &str, generic: bool) -> (Token<'_>, &str) { fn consume_token(input: &str, generic: bool) -> (Token<'_>, &str) {
let mut chars = input.chars(); let mut chars = input.chars();
let cur = match chars.next() { let cur = match chars.next() {
@ -632,6 +373,9 @@ impl<'a> Lexer<'a> {
} }
} }
#[cfg(test)]
use super::{number::Number, NumberError};
#[cfg(test)] #[cfg(test)]
fn sub_test(source: &str, expected_tokens: &[Token]) { fn sub_test(source: &str, expected_tokens: &[Token]) {
let mut lex = Lexer::new(source); let mut lex = Lexer::new(source);
@ -641,41 +385,195 @@ fn sub_test(source: &str, expected_tokens: &[Token]) {
assert_eq!(lex.next().0, Token::End); assert_eq!(lex.next().0, Token::End);
} }
#[test]
fn test_numbers() {
// WGSL spec examples //
// decimal integer
sub_test(
"0x123 0X123u 1u 123 0 0i 0x3f",
&[
Token::Number(Ok(Number::I32(291))),
Token::Number(Ok(Number::U32(291))),
Token::Number(Ok(Number::U32(1))),
Token::Number(Ok(Number::I32(123))),
Token::Number(Ok(Number::I32(0))),
Token::Number(Ok(Number::I32(0))),
Token::Number(Ok(Number::I32(63))),
],
);
// decimal floating point
sub_test(
"0.e+4f 01. .01 12.34 .0f 0h 1e-3 0xa.fp+2 0x1P+4f 0X.3 0x3p+2h 0X1.fp-4 0x3.2p+2h",
&[
Token::Number(Ok(Number::F32(0.))),
Token::Number(Ok(Number::F32(1.))),
Token::Number(Ok(Number::F32(0.01))),
Token::Number(Ok(Number::F32(12.34))),
Token::Number(Ok(Number::F32(0.))),
Token::Number(Err(NumberError::UnimplementedF16)),
Token::Number(Ok(Number::F32(0.001))),
Token::Number(Ok(Number::F32(43.75))),
Token::Number(Ok(Number::F32(16.))),
Token::Number(Ok(Number::F32(0.1875))),
Token::Number(Err(NumberError::UnimplementedF16)),
Token::Number(Ok(Number::F32(0.12109375))),
Token::Number(Err(NumberError::UnimplementedF16)),
],
);
// MIN / MAX //
// min / max decimal signed integer
sub_test(
"-2147483648i 2147483647i -2147483649i 2147483648i",
&[
Token::Number(Ok(Number::I32(i32::MIN))),
Token::Number(Ok(Number::I32(i32::MAX))),
Token::Number(Err(NumberError::NotRepresentable)),
Token::Number(Err(NumberError::NotRepresentable)),
],
);
// min / max decimal unsigned integer
sub_test(
"0u 4294967295u -1u 4294967296u",
&[
Token::Number(Ok(Number::U32(u32::MIN))),
Token::Number(Ok(Number::U32(u32::MAX))),
Token::Number(Err(NumberError::NotRepresentable)),
Token::Number(Err(NumberError::NotRepresentable)),
],
);
// min / max hexadecimal signed integer
sub_test(
"-0x80000000i 0x7FFFFFFFi -0x80000001i 0x80000000i",
&[
Token::Number(Ok(Number::I32(i32::MIN))),
Token::Number(Ok(Number::I32(i32::MAX))),
Token::Number(Err(NumberError::NotRepresentable)),
Token::Number(Err(NumberError::NotRepresentable)),
],
);
// min / max hexadecimal unsigned integer
sub_test(
"0x0u 0xFFFFFFFFu -0x1u 0x100000000u",
&[
Token::Number(Ok(Number::U32(u32::MIN))),
Token::Number(Ok(Number::U32(u32::MAX))),
Token::Number(Err(NumberError::NotRepresentable)),
Token::Number(Err(NumberError::NotRepresentable)),
],
);
/// ≈ 2^-126 * 2^23 (= 2^149)
const SMALLEST_POSITIVE_SUBNORMAL_F32: f32 = 1e-45;
/// ≈ 2^-126 * (1 2^23)
const LARGEST_SUBNORMAL_F32: f32 = 1.1754942e-38;
/// ≈ 2^-126
const SMALLEST_POSITIVE_NORMAL_F32: f32 = f32::MIN_POSITIVE;
/// ≈ 1 2^24
const LARGEST_F32_LESS_THAN_ONE: f32 = 0.99999994;
/// ≈ 1 + 2^23
const SMALLEST_F32_LARGER_THAN_ONE: f32 = 1.0000001;
/// ≈ -(2^127 * (2 2^23))
const SMALLEST_NORMAL_F32: f32 = f32::MIN;
/// ≈ 2^127 * (2 2^23)
const LARGEST_NORMAL_F32: f32 = f32::MAX;
// decimal floating point
sub_test(
"1e-45f 1.1754942e-38f 1.17549435e-38f 0.99999994f 1.0000001f -3.40282347e+38f 3.40282347e+38f",
&[
Token::Number(Ok(Number::F32(
SMALLEST_POSITIVE_SUBNORMAL_F32,
))),
Token::Number(Ok(Number::F32(
LARGEST_SUBNORMAL_F32,
))),
Token::Number(Ok(Number::F32(
SMALLEST_POSITIVE_NORMAL_F32,
))),
Token::Number(Ok(Number::F32(
LARGEST_F32_LESS_THAN_ONE,
))),
Token::Number(Ok(Number::F32(
SMALLEST_F32_LARGER_THAN_ONE,
))),
Token::Number(Ok(Number::F32(
SMALLEST_NORMAL_F32,
))),
Token::Number(Ok(Number::F32(
LARGEST_NORMAL_F32,
))),
],
);
sub_test(
"-3.40282367e+38f 3.40282367e+38f",
&[
Token::Number(Err(NumberError::NotRepresentable)), // ≈ -2^128
Token::Number(Err(NumberError::NotRepresentable)), // ≈ 2^128
],
);
// hexadecimal floating point
sub_test(
"0x1p-149f 0x7FFFFFp-149f 0x1p-126f 0xFFFFFFp-24f 0x800001p-23f -0xFFFFFFp+104f 0xFFFFFFp+104f",
&[
Token::Number(Ok(Number::F32(
SMALLEST_POSITIVE_SUBNORMAL_F32,
))),
Token::Number(Ok(Number::F32(
LARGEST_SUBNORMAL_F32,
))),
Token::Number(Ok(Number::F32(
SMALLEST_POSITIVE_NORMAL_F32,
))),
Token::Number(Ok(Number::F32(
LARGEST_F32_LESS_THAN_ONE,
))),
Token::Number(Ok(Number::F32(
SMALLEST_F32_LARGER_THAN_ONE,
))),
Token::Number(Ok(Number::F32(
SMALLEST_NORMAL_F32,
))),
Token::Number(Ok(Number::F32(
LARGEST_NORMAL_F32,
))),
],
);
sub_test(
"-0x1p128f 0x1p128f 0x1.000001p0f",
&[
Token::Number(Err(NumberError::NotRepresentable)), // = -2^128
Token::Number(Err(NumberError::NotRepresentable)), // = 2^128
Token::Number(Err(NumberError::NotRepresentable)),
],
);
}
#[test] #[test]
fn test_tokens() { fn test_tokens() {
sub_test("id123_OK", &[Token::Word("id123_OK")]); sub_test("id123_OK", &[Token::Word("id123_OK")]);
sub_test( sub_test(
"92No", "92No",
&[ &[Token::Number(Ok(Number::I32(92))), Token::Word("No")],
Token::Number {
value: "92",
ty: NumberType::Sint,
},
Token::Word("No"),
],
); );
sub_test( sub_test(
"2u3o", "2u3o",
&[ &[
Token::Number { Token::Number(Ok(Number::U32(2))),
value: "2", Token::Number(Ok(Number::I32(3))),
ty: NumberType::Uint,
},
Token::Number {
value: "3",
ty: NumberType::Sint,
},
Token::Word("o"), Token::Word("o"),
], ],
); );
sub_test( sub_test(
"2.4f44po", "2.4f44po",
&[ &[
Token::Number { Token::Number(Ok(Number::F32(2.4))),
value: "2.4", Token::Number(Ok(Number::I32(44))),
ty: NumberType::Float, Token::Word("po"),
},
Token::Word("f44po"),
], ],
); );
sub_test( sub_test(
@ -715,10 +613,7 @@ fn test_variable_decl() {
Token::Attribute, Token::Attribute,
Token::Word("group"), Token::Word("group"),
Token::Paren('('), Token::Paren('('),
Token::Number { Token::Number(Ok(Number::I32(0))),
value: "0",
ty: NumberType::Sint,
},
Token::Paren(')'), Token::Paren(')'),
Token::Word("var"), Token::Word("var"),
Token::Paren('<'), Token::Paren('<'),

506
third_party/rust/naga/src/front/wgsl/mod.rs поставляемый
Просмотреть файл

@ -7,7 +7,7 @@ Frontend for [WGSL][wgsl] (WebGPU Shading Language).
mod construction; mod construction;
mod conv; mod conv;
mod lexer; mod lexer;
mod number_literals; mod number;
#[cfg(test)] #[cfg(test)]
mod tests; mod tests;
@ -16,31 +16,24 @@ use crate::{
proc::{ proc::{
ensure_block_returns, Alignment, Layouter, ResolveContext, ResolveError, TypeResolution, ensure_block_returns, Alignment, Layouter, ResolveContext, ResolveError, TypeResolution,
}, },
span::SourceLocation,
span::Span as NagaSpan, span::Span as NagaSpan,
Bytes, ConstantInner, FastHashMap, ScalarValue, ConstantInner, FastHashMap, ScalarValue,
}; };
use self::{ use self::{lexer::Lexer, number::Number};
lexer::Lexer,
number_literals::{
get_f32_literal, get_i32_literal, get_u32_literal, parse_generic_non_negative_int_literal,
parse_non_negative_sint_literal,
},
};
use codespan_reporting::{ use codespan_reporting::{
diagnostic::{Diagnostic, Label}, diagnostic::{Diagnostic, Label},
files::{Files, SimpleFile}, files::SimpleFile,
term::{ term::{
self, self,
termcolor::{ColorChoice, ColorSpec, StandardStream, WriteColor}, termcolor::{ColorChoice, ColorSpec, StandardStream, WriteColor},
}, },
}; };
use hexf_parse::ParseHexfError;
use std::{ use std::{
borrow::Cow, borrow::Cow,
convert::TryFrom, convert::TryFrom,
io::{self, Write}, io::{self, Write},
num::{NonZeroU32, ParseFloatError, ParseIntError},
ops, ops,
}; };
use thiserror::Error; use thiserror::Error;
@ -48,19 +41,12 @@ use thiserror::Error;
type Span = ops::Range<usize>; type Span = ops::Range<usize>;
type TokenSpan<'a> = (Token<'a>, Span); type TokenSpan<'a> = (Token<'a>, Span);
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum NumberType {
Sint,
Uint,
Float,
}
#[derive(Copy, Clone, Debug, PartialEq)] #[derive(Copy, Clone, Debug, PartialEq)]
pub enum Token<'a> { pub enum Token<'a> {
Separator(char), Separator(char),
Paren(char), Paren(char),
Attribute, Attribute,
Number { value: &'a str, ty: NumberType }, Number(Result<Number, NumberError>),
Word(&'a str), Word(&'a str),
Operation(char), Operation(char),
LogicalOperation(char), LogicalOperation(char),
@ -74,14 +60,18 @@ pub enum Token<'a> {
End, End,
} }
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum NumberType {
I32,
U32,
F32,
}
#[derive(Copy, Clone, Debug, PartialEq)] #[derive(Copy, Clone, Debug, PartialEq)]
pub enum ExpectedToken<'a> { pub enum ExpectedToken<'a> {
Token(Token<'a>), Token(Token<'a>),
Identifier, Identifier,
Number { Number(NumberType),
ty: Option<NumberType>,
width: Option<Bytes>,
},
Integer, Integer,
Constant, Constant,
/// Expected: constant, parenthesized expression, identifier /// Expected: constant, parenthesized expression, identifier
@ -100,36 +90,25 @@ pub enum ExpectedToken<'a> {
GlobalItem, GlobalItem,
} }
#[derive(Clone, Debug, Error)] #[derive(Clone, Copy, Debug, Error, PartialEq)]
pub enum BadIntError { pub enum NumberError {
#[error(transparent)] #[error("invalid numeric literal format")]
ParseIntError(#[from] ParseIntError), Invalid,
#[error("non-hex negative zero integer literals are not allowed")] #[error("numeric literal not representable by target type")]
NegativeZero, NotRepresentable,
#[error("leading zeros for non-hex integer literals are not allowed")] #[error("unimplemented f16 type")]
LeadingZeros, UnimplementedF16,
}
#[derive(Clone, Debug, Error)]
pub enum BadFloatError {
#[error(transparent)]
ParseFloatError(#[from] ParseFloatError),
#[error(transparent)]
ParseHexfError(#[from] ParseHexfError),
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub enum Error<'a> { pub enum Error<'a> {
Unexpected(TokenSpan<'a>, ExpectedToken<'a>), Unexpected(TokenSpan<'a>, ExpectedToken<'a>),
UnexpectedComponents(Span), UnexpectedComponents(Span),
BadU32(Span, BadIntError), BadNumber(Span, NumberError),
BadI32(Span, BadIntError),
/// A negative signed integer literal where both signed and unsigned, /// A negative signed integer literal where both signed and unsigned,
/// but only non-negative literals are allowed. /// but only non-negative literals are allowed.
NegativeInt(Span), NegativeInt(Span),
BadFloat(Span, BadFloatError),
BadU32Constant(Span), BadU32Constant(Span),
BadScalarWidth(Span, Bytes),
BadMatrixScalarKind(Span, crate::ScalarKind, u8), BadMatrixScalarKind(Span, crate::ScalarKind, u8),
BadAccessor(Span), BadAccessor(Span),
BadTexture(Span), BadTexture(Span),
@ -146,7 +125,9 @@ pub enum Error<'a> {
BadIncrDecrReferenceType(Span), BadIncrDecrReferenceType(Span),
InvalidResolve(ResolveError), InvalidResolve(ResolveError),
InvalidForInitializer(Span), InvalidForInitializer(Span),
InvalidGatherComponent(Span, i32), /// A break if appeared outside of a continuing block
InvalidBreakIf(Span),
InvalidGatherComponent(Span, u32),
InvalidConstructorComponentType(Span, i32), InvalidConstructorComponentType(Span, i32),
InvalidIdentifierUnderscore(Span), InvalidIdentifierUnderscore(Span),
ReservedIdentifierPrefix(Span), ReservedIdentifierPrefix(Span),
@ -160,7 +141,9 @@ pub enum Error<'a> {
UnknownType(Span), UnknownType(Span),
UnknownStorageFormat(Span), UnknownStorageFormat(Span),
UnknownConservativeDepth(Span), UnknownConservativeDepth(Span),
ZeroSizeOrAlign(Span), SizeAttributeTooLow(Span, u32),
AlignAttributeTooLow(Span, Alignment),
NonPowerOfTwoAlignAttribute(Span),
InconsistentBinding(Span), InconsistentBinding(Span),
UnknownLocalFunction(Span), UnknownLocalFunction(Span),
TypeNotConstructible(Span), TypeNotConstructible(Span),
@ -191,9 +174,7 @@ impl<'a> Error<'a> {
Token::Separator(c) => format!("'{}'", c), Token::Separator(c) => format!("'{}'", c),
Token::Paren(c) => format!("'{}'", c), Token::Paren(c) => format!("'{}'", c),
Token::Attribute => "@".to_string(), Token::Attribute => "@".to_string(),
Token::Number { value, .. } => { Token::Number(_) => "number".to_string(),
format!("number ({})", value)
}
Token::Word(s) => s.to_string(), Token::Word(s) => s.to_string(),
Token::Operation(c) => format!("operation ('{}')", c), Token::Operation(c) => format!("operation ('{}')", c),
Token::LogicalOperation(c) => format!("logical operation ('{}')", c), Token::LogicalOperation(c) => format!("logical operation ('{}')", c),
@ -209,25 +190,12 @@ impl<'a> Error<'a> {
} }
} }
ExpectedToken::Identifier => "identifier".to_string(), ExpectedToken::Identifier => "identifier".to_string(),
ExpectedToken::Number { ty, width } => { ExpectedToken::Number(ty) => {
let literal_ty_str = match ty { match ty {
Some(NumberType::Float) => "floating-point", NumberType::I32 => "32-bit signed integer literal",
Some(NumberType::Uint) => "unsigned integer", NumberType::U32 => "32-bit unsigned integer literal",
Some(NumberType::Sint) => "signed integer", NumberType::F32 => "32-bit floating-point literal",
None => "arbitrary number", }.to_string()
};
if let Some(width) = width {
format!(
"{} literal of {}-bit width",
literal_ty_str,
width as u32 * 8,
)
} else {
format!(
"{} literal of arbitrary width",
literal_ty_str,
)
}
}, },
ExpectedToken::Integer => "unsigned/signed integer literal".to_string(), ExpectedToken::Integer => "unsigned/signed integer literal".to_string(),
ExpectedToken::Constant => "constant".to_string(), ExpectedToken::Constant => "constant".to_string(),
@ -257,21 +225,13 @@ impl<'a> Error<'a> {
labels: vec![(bad_span.clone(), "unexpected components".into())], labels: vec![(bad_span.clone(), "unexpected components".into())],
notes: vec![], notes: vec![],
}, },
Error::BadU32(ref bad_span, ref err) => ParseError { Error::BadNumber(ref bad_span, ref err) => ParseError {
message: format!( message: format!(
"expected unsigned integer literal, found `{}`", "{}: `{}`",
&source[bad_span.clone()], err,&source[bad_span.clone()],
), ),
labels: vec![(bad_span.clone(), "expected unsigned integer".into())], labels: vec![(bad_span.clone(), err.to_string().into())],
notes: vec![err.to_string()], notes: vec![],
},
Error::BadI32(ref bad_span, ref err) => ParseError {
message: format!(
"expected integer literal, found `{}`",
&source[bad_span.clone()],
),
labels: vec![(bad_span.clone(), "expected signed integer".into())],
notes: vec![err.to_string()],
}, },
Error::NegativeInt(ref bad_span) => ParseError { Error::NegativeInt(ref bad_span) => ParseError {
message: format!( message: format!(
@ -281,14 +241,6 @@ impl<'a> Error<'a> {
labels: vec![(bad_span.clone(), "expected non-negative integer".into())], labels: vec![(bad_span.clone(), "expected non-negative integer".into())],
notes: vec![], notes: vec![],
}, },
Error::BadFloat(ref bad_span, ref err) => ParseError {
message: format!(
"expected floating-point literal, found `{}`",
&source[bad_span.clone()],
),
labels: vec![(bad_span.clone(), "expected floating-point literal".into())],
notes: vec![err.to_string()],
},
Error::BadU32Constant(ref bad_span) => ParseError { Error::BadU32Constant(ref bad_span) => ParseError {
message: format!( message: format!(
"expected unsigned integer constant expression, found `{}`", "expected unsigned integer constant expression, found `{}`",
@ -297,11 +249,6 @@ impl<'a> Error<'a> {
labels: vec![(bad_span.clone(), "expected unsigned integer".into())], labels: vec![(bad_span.clone(), "expected unsigned integer".into())],
notes: vec![], notes: vec![],
}, },
Error::BadScalarWidth(ref bad_span, width) => ParseError {
message: format!("invalid width of `{}` bits for literal", width as u32 * 8,),
labels: vec![(bad_span.clone(), "invalid width".into())],
notes: vec!["the only valid width is 32 for now".to_string()],
},
Error::BadMatrixScalarKind( Error::BadMatrixScalarKind(
ref span, ref span,
kind, kind,
@ -362,6 +309,11 @@ impl<'a> Error<'a> {
labels: vec![(bad_span.clone(), "not an assignment or function call".into())], labels: vec![(bad_span.clone(), "not an assignment or function call".into())],
notes: vec![], notes: vec![],
}, },
Error::InvalidBreakIf(ref bad_span) => ParseError {
message: "A break if is only allowed in a continuing block".to_string(),
labels: vec![(bad_span.clone(), "not in a continuing block".into())],
notes: vec![],
},
Error::InvalidGatherComponent(ref bad_span, component) => ParseError { Error::InvalidGatherComponent(ref bad_span, component) => ParseError {
message: format!("textureGather component {} doesn't exist, must be 0, 1, 2, or 3", component), message: format!("textureGather component {} doesn't exist, must be 0, 1, 2, or 3", component),
labels: vec![(bad_span.clone(), "invalid component".into())], labels: vec![(bad_span.clone(), "invalid component".into())],
@ -422,9 +374,19 @@ impl<'a> Error<'a> {
labels: vec![(bad_span.clone(), "unknown type".into())], labels: vec![(bad_span.clone(), "unknown type".into())],
notes: vec![], notes: vec![],
}, },
Error::ZeroSizeOrAlign(ref bad_span) => ParseError { Error::SizeAttributeTooLow(ref bad_span, min_size) => ParseError {
message: "struct member size or alignment must not be 0".to_string(), message: format!("struct member size must be at least {}", min_size),
labels: vec![(bad_span.clone(), "struct member size or alignment must not be 0".into())], labels: vec![(bad_span.clone(), format!("must be at least {}", min_size).into())],
notes: vec![],
},
Error::AlignAttributeTooLow(ref bad_span, min_align) => ParseError {
message: format!("struct member alignment must be at least {}", min_align),
labels: vec![(bad_span.clone(), format!("must be at least {}", min_align).into())],
notes: vec![],
},
Error::NonPowerOfTwoAlignAttribute(ref bad_span) => ParseError {
message: "struct member alignment must be a power of 2".to_string(),
labels: vec![(bad_span.clone(), "must be a power of 2".into())],
notes: vec![], notes: vec![],
}, },
Error::InconsistentBinding(ref span) => ParseError { Error::InconsistentBinding(ref span) => ParseError {
@ -1193,8 +1155,8 @@ impl Composition {
#[derive(Default)] #[derive(Default)]
struct TypeAttributes { struct TypeAttributes {
// Although WGSL nas no type attributes at the moment, it had them in the past // Although WGSL nas no type attributes at the moment, it had them in the past
// (`[[stride]]`) and may as well acquire some again in the future. // (`[[stride]]`) and may as well acquire some again in the future.
// Therefore, we are leaving the plumbing in for now. // Therefore, we are leaving the plumbing in for now.
} }
#[derive(Clone, Debug, PartialEq)] #[derive(Clone, Debug, PartialEq)]
@ -1234,7 +1196,7 @@ impl BindingParser {
match name { match name {
"location" => { "location" => {
lexer.expect(Token::Paren('('))?; lexer.expect(Token::Paren('('))?;
self.location = Some(parse_non_negative_sint_literal(lexer, 4)?); self.location = Some(Parser::parse_non_negative_i32_literal(lexer)?);
lexer.expect(Token::Paren(')'))?; lexer.expect(Token::Paren(')'))?;
} }
"builtin" => { "builtin" => {
@ -1365,19 +1327,11 @@ impl ParseError {
writer.into_string() writer.into_string()
} }
/// Returns the 1-based line number and column of the first label in the /// Returns a [`SourceLocation`] for the first label in the error message.
/// error message. pub fn location(&self, source: &str) -> Option<SourceLocation> {
pub fn location(&self, source: &str) -> (usize, usize) { self.labels
let files = SimpleFile::new("wgsl", source); .get(0)
match self.labels.get(0) { .map(|label| NagaSpan::new(label.0.start as u32, label.0.end as u32).location(source))
Some(label) => {
let location = files
.location((), label.0.start)
.expect("invalid span location");
(location.line_number, location.column_number)
}
None => (1, 1),
}
} }
} }
@ -1431,38 +1385,45 @@ impl Parser {
lexer.span_from(initial) lexer.span_from(initial)
} }
fn get_constant_inner<'a>(
word: &'a str,
ty: NumberType,
token_span: TokenSpan<'a>,
) -> Result<ConstantInner, Error<'a>> {
let span = token_span.1;
let value = match ty {
NumberType::Sint => {
get_i32_literal(word, span).map(|val| crate::ScalarValue::Sint(val as i64))?
}
NumberType::Uint => {
get_u32_literal(word, span).map(|val| crate::ScalarValue::Uint(val as u64))?
}
NumberType::Float => {
get_f32_literal(word, span).map(|val| crate::ScalarValue::Float(val as f64))?
}
};
Ok(crate::ConstantInner::Scalar { value, width: 4 })
}
fn parse_switch_value<'a>(lexer: &mut Lexer<'a>, uint: bool) -> Result<i32, Error<'a>> { fn parse_switch_value<'a>(lexer: &mut Lexer<'a>, uint: bool) -> Result<i32, Error<'a>> {
let token_span = lexer.next(); let token_span = lexer.next();
let word = match token_span.0 { match token_span.0 {
Token::Number { value, .. } => value, Token::Number(Ok(Number::U32(num))) if uint => Ok(num as i32),
_ => return Err(Error::Unexpected(token_span, ExpectedToken::Integer)), Token::Number(Ok(Number::I32(num))) if !uint => Ok(num),
}; Token::Number(Err(e)) => Err(Error::BadNumber(token_span.1, e)),
_ => Err(Error::Unexpected(token_span, ExpectedToken::Integer)),
}
}
match uint { /// Parse a non-negative signed integer literal.
true => get_u32_literal(word, token_span.1).map(|v| v as i32), /// This is for attributes like `size`, `location` and others.
false => get_i32_literal(word, token_span.1), fn parse_non_negative_i32_literal<'a>(lexer: &mut Lexer<'a>) -> Result<u32, Error<'a>> {
match lexer.next() {
(Token::Number(Ok(Number::I32(num))), span) => {
u32::try_from(num).map_err(|_| Error::NegativeInt(span))
}
(Token::Number(Err(e)), span) => Err(Error::BadNumber(span, e)),
other => Err(Error::Unexpected(
other,
ExpectedToken::Number(NumberType::I32),
)),
}
}
/// Parse a non-negative integer literal that may be either signed or unsigned.
/// This is for the `workgroup_size` attribute and array lengths.
/// Note: these values should be no larger than [`i32::MAX`], but this is not checked here.
fn parse_generic_non_negative_int_literal<'a>(lexer: &mut Lexer<'a>) -> Result<u32, Error<'a>> {
match lexer.next() {
(Token::Number(Ok(Number::I32(num))), span) => {
u32::try_from(num).map_err(|_| Error::NegativeInt(span))
}
(Token::Number(Ok(Number::U32(num))), _) => Ok(num),
(Token::Number(Err(e)), span) => Err(Error::BadNumber(span, e)),
other => Err(Error::Unexpected(
other,
ExpectedToken::Number(NumberType::I32),
)),
} }
} }
@ -1615,7 +1576,7 @@ impl Parser {
"bitcast" => { "bitcast" => {
let _ = lexer.next(); let _ = lexer.next();
lexer.expect_generic_paren('<')?; lexer.expect_generic_paren('<')?;
let ((ty, _access), type_span) = lexer.capture_span(|lexer| { let (ty, type_span) = lexer.capture_span(|lexer| {
self.parse_type_decl(lexer, None, ctx.types, ctx.constants) self.parse_type_decl(lexer, None, ctx.types, ctx.constants)
})?; })?;
lexer.expect_generic_paren('>')?; lexer.expect_generic_paren('>')?;
@ -2006,17 +1967,9 @@ impl Parser {
"textureGather" => { "textureGather" => {
let _ = lexer.next(); let _ = lexer.next();
lexer.open_arguments()?; lexer.open_arguments()?;
let component = if let ( let component = if let (Token::Number(..), span) = lexer.peek() {
Token::Number { let index = Self::parse_non_negative_i32_literal(lexer)?;
value,
ty: NumberType::Sint,
},
span,
) = lexer.peek()
{
let _ = lexer.next();
lexer.expect(Token::Separator(','))?; lexer.expect(Token::Separator(','))?;
let index = get_i32_literal(value, span.clone())?;
*crate::SwizzleComponent::XYZW *crate::SwizzleComponent::XYZW
.get(index as usize) .get(index as usize)
.ok_or(Error::InvalidGatherComponent(span, index))? .ok_or(Error::InvalidGatherComponent(span, index))?
@ -2219,9 +2172,22 @@ impl Parser {
let inner = match first_token_span { let inner = match first_token_span {
(Token::Word("true"), _) => crate::ConstantInner::boolean(true), (Token::Word("true"), _) => crate::ConstantInner::boolean(true),
(Token::Word("false"), _) => crate::ConstantInner::boolean(false), (Token::Word("false"), _) => crate::ConstantInner::boolean(false),
(Token::Number { value, ty }, _) => { (Token::Number(num), _) => match num {
Self::get_constant_inner(value, ty, first_token_span)? Ok(Number::I32(num)) => crate::ConstantInner::Scalar {
} value: crate::ScalarValue::Sint(num as i64),
width: 4,
},
Ok(Number::U32(num)) => crate::ConstantInner::Scalar {
value: crate::ScalarValue::Uint(num as u64),
width: 4,
},
Ok(Number::F32(num)) => crate::ConstantInner::Scalar {
value: crate::ScalarValue::Float(num as f64),
width: 4,
},
Ok(Number::AbstractInt(_) | Number::AbstractFloat(_)) => unreachable!(),
Err(e) => return Err(Error::BadNumber(first_token_span.1, e)),
},
(Token::Word(name), name_span) => { (Token::Word(name), name_span) => {
// look for an existing constant first // look for an existing constant first
for (handle, var) in const_arena.iter() { for (handle, var) in const_arena.iter() {
@ -2312,10 +2278,8 @@ impl Parser {
self.pop_scope(lexer); self.pop_scope(lexer);
expr expr
} }
token @ (Token::Word("true" | "false") | Token::Number { .. }, _) => { (Token::Word("true" | "false") | Token::Number(..), _) => {
let _ = lexer.next(); let const_handle = self.parse_const_expression(lexer, ctx.types, ctx.constants)?;
let const_handle =
self.parse_const_expression_impl(token, lexer, None, ctx.types, ctx.constants)?;
let span = NagaSpan::from(self.pop_scope(lexer)); let span = NagaSpan::from(self.pop_scope(lexer));
TypedExpression::non_reference( TypedExpression::non_reference(
ctx.interrupt_emitter(crate::Expression::Constant(const_handle), span), ctx.interrupt_emitter(crate::Expression::Constant(const_handle), span),
@ -2781,11 +2745,11 @@ impl Parser {
lexer: &mut Lexer<'a>, lexer: &mut Lexer<'a>,
type_arena: &mut UniqueArena<crate::Type>, type_arena: &mut UniqueArena<crate::Type>,
const_arena: &mut Arena<crate::Constant>, const_arena: &mut Arena<crate::Constant>,
) -> Result<(&'a str, Span, Handle<crate::Type>, crate::StorageAccess), Error<'a>> { ) -> Result<(&'a str, Span, Handle<crate::Type>), Error<'a>> {
let (name, name_span) = lexer.next_ident_with_span()?; let (name, name_span) = lexer.next_ident_with_span()?;
lexer.expect(Token::Separator(':'))?; lexer.expect(Token::Separator(':'))?;
let (ty, access) = self.parse_type_decl(lexer, None, type_arena, const_arena)?; let ty = self.parse_type_decl(lexer, None, type_arena, const_arena)?;
Ok((name, name_span, ty, access)) Ok((name, name_span, ty))
} }
fn parse_variable_decl<'a>( fn parse_variable_decl<'a>(
@ -2815,7 +2779,7 @@ impl Parser {
} }
let name = lexer.next_ident()?; let name = lexer.next_ident()?;
lexer.expect(Token::Separator(':'))?; lexer.expect(Token::Separator(':'))?;
let (ty, _access) = self.parse_type_decl(lexer, None, type_arena, const_arena)?; let ty = self.parse_type_decl(lexer, None, type_arena, const_arena)?;
let init = if lexer.skip(Token::Operation('=')) { let init = if lexer.skip(Token::Operation('=')) {
let handle = self.parse_const_expression(lexer, type_arena, const_arena)?; let handle = self.parse_const_expression(lexer, type_arena, const_arena)?;
@ -2841,7 +2805,7 @@ impl Parser {
const_arena: &mut Arena<crate::Constant>, const_arena: &mut Arena<crate::Constant>,
) -> Result<(Vec<crate::StructMember>, u32), Error<'a>> { ) -> Result<(Vec<crate::StructMember>, u32), Error<'a>> {
let mut offset = 0; let mut offset = 0;
let mut alignment = Alignment::new(1).unwrap(); let mut struct_alignment = Alignment::ONE;
let mut members = Vec::new(); let mut members = Vec::new();
lexer.expect(Token::Paren('{'))?; lexer.expect(Token::Paren('{'))?;
@ -2853,30 +2817,32 @@ impl Parser {
ExpectedToken::Token(Token::Separator(',')), ExpectedToken::Token(Token::Separator(',')),
)); ));
} }
let (mut size, mut align) = (None, None); let (mut size_attr, mut align_attr) = (None, None);
self.push_scope(Scope::Attribute, lexer); self.push_scope(Scope::Attribute, lexer);
let mut bind_parser = BindingParser::default(); let mut bind_parser = BindingParser::default();
while lexer.skip(Token::Attribute) { while lexer.skip(Token::Attribute) {
match lexer.next_ident_with_span()? { match lexer.next_ident_with_span()? {
("size", _) => { ("size", _) => {
lexer.expect(Token::Paren('('))?; lexer.expect(Token::Paren('('))?;
let (value, span) = lexer let (value, span) =
.capture_span(|lexer| parse_non_negative_sint_literal(lexer, 4))?; lexer.capture_span(Self::parse_non_negative_i32_literal)?;
lexer.expect(Token::Paren(')'))?; lexer.expect(Token::Paren(')'))?;
size = Some(NonZeroU32::new(value).ok_or(Error::ZeroSizeOrAlign(span))?); size_attr = Some((value, span));
} }
("align", _) => { ("align", _) => {
lexer.expect(Token::Paren('('))?; lexer.expect(Token::Paren('('))?;
let (value, span) = lexer let (value, span) =
.capture_span(|lexer| parse_non_negative_sint_literal(lexer, 4))?; lexer.capture_span(Self::parse_non_negative_i32_literal)?;
lexer.expect(Token::Paren(')'))?; lexer.expect(Token::Paren(')'))?;
align = Some(NonZeroU32::new(value).ok_or(Error::ZeroSizeOrAlign(span))?); align_attr = Some((value, span));
} }
(word, word_span) => bind_parser.parse(lexer, word, word_span)?, (word, word_span) => bind_parser.parse(lexer, word, word_span)?,
} }
} }
let bind_span = self.pop_scope(lexer); let bind_span = self.pop_scope(lexer);
let mut binding = bind_parser.finish(bind_span)?;
let (name, span) = match lexer.next() { let (name, span) = match lexer.next() {
(Token::Word(word), span) => (word, span), (Token::Word(word), span) => (word, span),
other => return Err(Error::Unexpected(other, ExpectedToken::FieldName)), other => return Err(Error::Unexpected(other, ExpectedToken::FieldName)),
@ -2885,29 +2851,57 @@ impl Parser {
return Err(Error::ReservedKeyword(span)); return Err(Error::ReservedKeyword(span));
} }
lexer.expect(Token::Separator(':'))?; lexer.expect(Token::Separator(':'))?;
let (ty, _access) = self.parse_type_decl(lexer, None, type_arena, const_arena)?; let ty = self.parse_type_decl(lexer, None, type_arena, const_arena)?;
ready = lexer.skip(Token::Separator(',')); ready = lexer.skip(Token::Separator(','));
self.layouter.update(type_arena, const_arena).unwrap(); self.layouter.update(type_arena, const_arena).unwrap();
let (range, align) = self.layouter.member_placement(offset, ty, align, size); let member_min_size = self.layouter[ty].size;
alignment = alignment.max(align); let member_min_alignment = self.layouter[ty].alignment;
offset = range.end;
let member_size = if let Some((size, span)) = size_attr {
if size < member_min_size {
return Err(Error::SizeAttributeTooLow(span, member_min_size));
} else {
size
}
} else {
member_min_size
};
let member_alignment = if let Some((align, span)) = align_attr {
if let Some(alignment) = Alignment::new(align) {
if alignment < member_min_alignment {
return Err(Error::AlignAttributeTooLow(span, member_min_alignment));
} else {
alignment
}
} else {
return Err(Error::NonPowerOfTwoAlignAttribute(span));
}
} else {
member_min_alignment
};
offset = member_alignment.round_up(offset);
struct_alignment = struct_alignment.max(member_alignment);
let mut binding = bind_parser.finish(bind_span)?;
if let Some(ref mut binding) = binding { if let Some(ref mut binding) = binding {
binding.apply_default_interpolation(&type_arena[ty].inner); binding.apply_default_interpolation(&type_arena[ty].inner);
} }
members.push(crate::StructMember { members.push(crate::StructMember {
name: Some(name.to_owned()), name: Some(name.to_owned()),
ty, ty,
binding, binding,
offset: range.start, offset,
}); });
offset += member_size;
} }
let span = Layouter::round_up(alignment, offset); let struct_size = struct_alignment.round_up(offset);
Ok((members, span)) Ok((members, struct_size))
} }
fn parse_matrix_scalar_type<'a>( fn parse_matrix_scalar_type<'a>(
@ -3012,7 +3006,7 @@ impl Parser {
let (ident, span) = lexer.next_ident_with_span()?; let (ident, span) = lexer.next_ident_with_span()?;
let mut space = conv::map_address_space(ident, span)?; let mut space = conv::map_address_space(ident, span)?;
lexer.expect(Token::Separator(','))?; lexer.expect(Token::Separator(','))?;
let (base, _access) = self.parse_type_decl(lexer, None, type_arena, const_arena)?; let base = self.parse_type_decl(lexer, None, type_arena, const_arena)?;
if let crate::AddressSpace::Storage { ref mut access } = space { if let crate::AddressSpace::Storage { ref mut access } = space {
*access = if lexer.skip(Token::Separator(',')) { *access = if lexer.skip(Token::Separator(',')) {
lexer.next_storage_access()? lexer.next_storage_access()?
@ -3025,7 +3019,7 @@ impl Parser {
} }
"array" => { "array" => {
lexer.expect_generic_paren('<')?; lexer.expect_generic_paren('<')?;
let (base, _access) = self.parse_type_decl(lexer, None, type_arena, const_arena)?; let base = self.parse_type_decl(lexer, None, type_arena, const_arena)?;
let size = if lexer.skip(Token::Separator(',')) { let size = if lexer.skip(Token::Separator(',')) {
let const_handle = let const_handle =
self.parse_const_expression(lexer, type_arena, const_arena)?; self.parse_const_expression(lexer, type_arena, const_arena)?;
@ -3043,7 +3037,7 @@ impl Parser {
} }
"binding_array" => { "binding_array" => {
lexer.expect_generic_paren('<')?; lexer.expect_generic_paren('<')?;
let (base, _access) = self.parse_type_decl(lexer, None, type_arena, const_arena)?; let base = self.parse_type_decl(lexer, None, type_arena, const_arena)?;
let size = if lexer.skip(Token::Separator(',')) { let size = if lexer.skip(Token::Separator(',')) {
let const_handle = let const_handle =
self.parse_const_expression(lexer, type_arena, const_arena)?; self.parse_const_expression(lexer, type_arena, const_arena)?;
@ -3258,7 +3252,7 @@ impl Parser {
debug_name: Option<&'a str>, debug_name: Option<&'a str>,
type_arena: &mut UniqueArena<crate::Type>, type_arena: &mut UniqueArena<crate::Type>,
const_arena: &mut Arena<crate::Constant>, const_arena: &mut Arena<crate::Constant>,
) -> Result<(Handle<crate::Type>, crate::StorageAccess), Error<'a>> { ) -> Result<Handle<crate::Type>, Error<'a>> {
self.push_scope(Scope::TypeDecl, lexer); self.push_scope(Scope::TypeDecl, lexer);
let attribute = TypeAttributes::default(); let attribute = TypeAttributes::default();
@ -3267,7 +3261,6 @@ impl Parser {
return Err(Error::Unexpected(other, ExpectedToken::TypeAttribute)); return Err(Error::Unexpected(other, ExpectedToken::TypeAttribute));
} }
let storage_access = crate::StorageAccess::default();
let (name, name_span) = lexer.next_ident_with_span()?; let (name, name_span) = lexer.next_ident_with_span()?;
let handle = self.parse_type_decl_name( let handle = self.parse_type_decl_name(
lexer, lexer,
@ -3282,7 +3275,7 @@ impl Parser {
// Only set span if it's the first occurrence of the type. // Only set span if it's the first occurrence of the type.
// Type spans therefore should only be used for errors in type declarations; // Type spans therefore should only be used for errors in type declarations;
// use variable spans/expression spans/etc. otherwise // use variable spans/expression spans/etc. otherwise
Ok((handle, storage_access)) Ok(handle)
} }
/// Parse an assignment statement (will also parse increment and decrement statements) /// Parse an assignment statement (will also parse increment and decrement statements)
@ -3510,7 +3503,7 @@ impl Parser {
return Err(Error::ReservedKeyword(name_span)); return Err(Error::ReservedKeyword(name_span));
} }
let given_ty = if lexer.skip(Token::Separator(':')) { let given_ty = if lexer.skip(Token::Separator(':')) {
let (ty, _access) = self.parse_type_decl( let ty = self.parse_type_decl(
lexer, lexer,
None, None,
context.types, context.types,
@ -3571,7 +3564,7 @@ impl Parser {
return Err(Error::ReservedKeyword(name_span)); return Err(Error::ReservedKeyword(name_span));
} }
let given_ty = if lexer.skip(Token::Separator(':')) { let given_ty = if lexer.skip(Token::Separator(':')) {
let (ty, _access) = self.parse_type_decl( let ty = self.parse_type_decl(
lexer, lexer,
None, None,
context.types, context.types,
@ -3825,26 +3818,7 @@ impl Parser {
Some(crate::Statement::Switch { selector, cases }) Some(crate::Statement::Switch { selector, cases })
} }
"loop" => { "loop" => Some(self.parse_loop(lexer, context.reborrow(), &mut emitter)?),
let _ = lexer.next();
let mut body = crate::Block::new();
let mut continuing = crate::Block::new();
lexer.expect(Token::Paren('{'))?;
loop {
if lexer.skip(Token::Word("continuing")) {
continuing = self.parse_block(lexer, context.reborrow(), false)?;
lexer.expect(Token::Paren('}'))?;
break;
}
if lexer.skip(Token::Paren('}')) {
break;
}
self.parse_statement(lexer, context.reborrow(), &mut body, false)?;
}
Some(crate::Statement::Loop { body, continuing })
}
"while" => { "while" => {
let _ = lexer.next(); let _ = lexer.next();
let mut body = crate::Block::new(); let mut body = crate::Block::new();
@ -3877,6 +3851,7 @@ impl Parser {
Some(crate::Statement::Loop { Some(crate::Statement::Loop {
body, body,
continuing: crate::Block::new(), continuing: crate::Block::new(),
break_if: None,
}) })
} }
"for" => { "for" => {
@ -3949,10 +3924,22 @@ impl Parser {
self.parse_statement(lexer, context.reborrow(), &mut body, false)?; self.parse_statement(lexer, context.reborrow(), &mut body, false)?;
} }
Some(crate::Statement::Loop { body, continuing }) Some(crate::Statement::Loop {
body,
continuing,
break_if: None,
})
} }
"break" => { "break" => {
let _ = lexer.next(); let (_, mut span) = lexer.next();
// Check if the next token is an `if`, this indicates
// that the user tried to type out a `break if` which
// is illegal in this position.
let (peeked_token, peeked_span) = lexer.peek();
if let Token::Word("if") = peeked_token {
span.end = peeked_span.end;
return Err(Error::InvalidBreakIf(span));
}
Some(crate::Statement::Break) Some(crate::Statement::Break)
} }
"continue" => { "continue" => {
@ -4055,6 +4042,84 @@ impl Parser {
Ok(()) Ok(())
} }
fn parse_loop<'a>(
&mut self,
lexer: &mut Lexer<'a>,
mut context: StatementContext<'a, '_, '_>,
emitter: &mut super::Emitter,
) -> Result<crate::Statement, Error<'a>> {
let _ = lexer.next();
let mut body = crate::Block::new();
let mut continuing = crate::Block::new();
let mut break_if = None;
lexer.expect(Token::Paren('{'))?;
loop {
if lexer.skip(Token::Word("continuing")) {
// Branch for the `continuing` block, this must be
// the last thing in the loop body
// Expect a opening brace to start the continuing block
lexer.expect(Token::Paren('{'))?;
loop {
if lexer.skip(Token::Word("break")) {
// Branch for the `break if` statement, this statement
// has the form `break if <expr>;` and must be the last
// statement in a continuing block
// The break must be followed by an `if` to form
// the break if
lexer.expect(Token::Word("if"))?;
// Start the emitter to begin parsing an expression
emitter.start(context.expressions);
let condition = self.parse_general_expression(
lexer,
context.as_expression(&mut body, emitter),
)?;
// Add all emits to the continuing body
continuing.extend(emitter.finish(context.expressions));
// Set the condition of the break if to the newly parsed
// expression
break_if = Some(condition);
// Expext a semicolon to close the statement
lexer.expect(Token::Separator(';'))?;
// Expect a closing brace to close the continuing block,
// since the break if must be the last statement
lexer.expect(Token::Paren('}'))?;
// Stop parsing the continuing block
break;
} else if lexer.skip(Token::Paren('}')) {
// If we encounter a closing brace it means we have reached
// the end of the continuing block and should stop processing
break;
} else {
// Otherwise try to parse a statement
self.parse_statement(lexer, context.reborrow(), &mut continuing, false)?;
}
}
// Since the continuing block must be the last part of the loop body,
// we expect to see a closing brace to end the loop body
lexer.expect(Token::Paren('}'))?;
break;
}
if lexer.skip(Token::Paren('}')) {
// If we encounter a closing brace it means we have reached
// the end of the loop body and should stop processing
break;
}
// Otherwise try to parse a statement
self.parse_statement(lexer, context.reborrow(), &mut body, false)?;
}
Ok(crate::Statement::Loop {
body,
continuing,
break_if,
})
}
fn parse_block<'a>( fn parse_block<'a>(
&mut self, &mut self,
lexer: &mut Lexer<'a>, lexer: &mut Lexer<'a>,
@ -4146,7 +4211,7 @@ impl Parser {
)); ));
} }
let mut binding = self.parse_varying_binding(lexer)?; let mut binding = self.parse_varying_binding(lexer)?;
let (param_name, param_name_span, param_type, _access) = let (param_name, param_name_span, param_type) =
self.parse_variable_ident_decl(lexer, &mut module.types, &mut module.constants)?; self.parse_variable_ident_decl(lexer, &mut module.types, &mut module.constants)?;
if crate::keywords::wgsl::RESERVED.contains(&param_name) { if crate::keywords::wgsl::RESERVED.contains(&param_name) {
return Err(Error::ReservedKeyword(param_name_span)); return Err(Error::ReservedKeyword(param_name_span));
@ -4176,8 +4241,7 @@ impl Parser {
// read return type // read return type
let result = if lexer.skip(Token::Arrow) && !lexer.skip(Token::Word("void")) { let result = if lexer.skip(Token::Arrow) && !lexer.skip(Token::Word("void")) {
let mut binding = self.parse_varying_binding(lexer)?; let mut binding = self.parse_varying_binding(lexer)?;
let (ty, _access) = let ty = self.parse_type_decl(lexer, None, &mut module.types, &mut module.constants)?;
self.parse_type_decl(lexer, None, &mut module.types, &mut module.constants)?;
if let Some(ref mut binding) = binding { if let Some(ref mut binding) = binding {
binding.apply_default_interpolation(&module.types[ty].inner); binding.apply_default_interpolation(&module.types[ty].inner);
} }
@ -4244,12 +4308,12 @@ impl Parser {
match lexer.next_ident_with_span()? { match lexer.next_ident_with_span()? {
("binding", _) => { ("binding", _) => {
lexer.expect(Token::Paren('('))?; lexer.expect(Token::Paren('('))?;
bind_index = Some(parse_non_negative_sint_literal(lexer, 4)?); bind_index = Some(Self::parse_non_negative_i32_literal(lexer)?);
lexer.expect(Token::Paren(')'))?; lexer.expect(Token::Paren(')'))?;
} }
("group", _) => { ("group", _) => {
lexer.expect(Token::Paren('('))?; lexer.expect(Token::Paren('('))?;
bind_group = Some(parse_non_negative_sint_literal(lexer, 4)?); bind_group = Some(Self::parse_non_negative_i32_literal(lexer)?);
lexer.expect(Token::Paren(')'))?; lexer.expect(Token::Paren(')'))?;
} }
("vertex", _) => { ("vertex", _) => {
@ -4263,8 +4327,9 @@ impl Parser {
} }
("workgroup_size", _) => { ("workgroup_size", _) => {
lexer.expect(Token::Paren('('))?; lexer.expect(Token::Paren('('))?;
workgroup_size = [1u32; 3];
for (i, size) in workgroup_size.iter_mut().enumerate() { for (i, size) in workgroup_size.iter_mut().enumerate() {
*size = parse_generic_non_negative_int_literal(lexer, 4)?; *size = Self::parse_generic_non_negative_int_literal(lexer)?;
match lexer.next() { match lexer.next() {
(Token::Paren(')'), _) => break, (Token::Paren(')'), _) => break,
(Token::Separator(','), _) if i != 2 => (), (Token::Separator(','), _) if i != 2 => (),
@ -4276,11 +4341,6 @@ impl Parser {
} }
} }
} }
for size in workgroup_size.iter_mut() {
if *size == 0 {
*size = 1;
}
}
} }
("early_depth_test", _) => { ("early_depth_test", _) => {
let conservative = if lexer.skip(Token::Paren('(')) { let conservative = if lexer.skip(Token::Paren('(')) {
@ -4334,7 +4394,7 @@ impl Parser {
(Token::Word("type"), _) => { (Token::Word("type"), _) => {
let name = lexer.next_ident()?; let name = lexer.next_ident()?;
lexer.expect(Token::Operation('='))?; lexer.expect(Token::Operation('='))?;
let (ty, _access) = self.parse_type_decl( let ty = self.parse_type_decl(
lexer, lexer,
Some(name), Some(name),
&mut module.types, &mut module.types,
@ -4358,7 +4418,7 @@ impl Parser {
}); });
} }
let given_ty = if lexer.skip(Token::Separator(':')) { let given_ty = if lexer.skip(Token::Separator(':')) {
let (ty, _access) = self.parse_type_decl( let ty = self.parse_type_decl(
lexer, lexer,
None, None,
&mut module.types, &mut module.types,

445
third_party/rust/naga/src/front/wgsl/number.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,445 @@
use std::borrow::Cow;
use super::{NumberError, Token};
/// When using this type assume no Abstract Int/Float for now
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum Number {
/// Abstract Int (-2^63 ≤ i < 2^63)
AbstractInt(i64),
/// Abstract Float (IEEE-754 binary64)
AbstractFloat(f64),
/// Concrete i32
I32(i32),
/// Concrete u32
U32(u32),
/// Concrete f32
F32(f32),
}
impl Number {
/// Convert abstract numbers to a plausible concrete counterpart.
///
/// Return concrete numbers unchanged. If the conversion would be
/// lossy, return an error.
fn abstract_to_concrete(self) -> Result<Number, NumberError> {
match self {
Number::AbstractInt(num) => {
use std::convert::TryFrom;
i32::try_from(num)
.map(Number::I32)
.map_err(|_| NumberError::NotRepresentable)
}
Number::AbstractFloat(num) => {
let num = num as f32;
if num.is_finite() {
Ok(Number::F32(num))
} else {
Err(NumberError::NotRepresentable)
}
}
num => Ok(num),
}
}
}
// TODO: when implementing Creation-Time Expressions, remove the ability to match the minus sign
pub(super) fn consume_number(input: &str) -> (Token<'_>, &str) {
let (result, rest) = parse(input);
(
Token::Number(result.and_then(Number::abstract_to_concrete)),
rest,
)
}
enum Kind {
Int(IntKind),
Float(FloatKind),
}
enum IntKind {
I32,
U32,
}
enum FloatKind {
F32,
F16,
}
// The following regexes (from the WGSL spec) will be matched:
// int_literal:
// | / 0 [iu]? /
// | / [1-9][0-9]* [iu]? /
// | / 0[xX][0-9a-fA-F]+ [iu]? /
// decimal_float_literal:
// | / 0 [fh] /
// | / [1-9][0-9]* [fh] /
// | / [0-9]* \.[0-9]+ ([eE][+-]?[0-9]+)? [fh]? /
// | / [0-9]+ \.[0-9]* ([eE][+-]?[0-9]+)? [fh]? /
// | / [0-9]+ [eE][+-]?[0-9]+ [fh]? /
// hex_float_literal:
// | / 0[xX][0-9a-fA-F]* \.[0-9a-fA-F]+ ([pP][+-]?[0-9]+ [fh]?)? /
// | / 0[xX][0-9a-fA-F]+ \.[0-9a-fA-F]* ([pP][+-]?[0-9]+ [fh]?)? /
// | / 0[xX][0-9a-fA-F]+ [pP][+-]?[0-9]+ [fh]? /
// You could visualize the regex below via https://debuggex.com to get a rough idea what `parse` is doing
// -?(?:0[xX](?:([0-9a-fA-F]+\.[0-9a-fA-F]*|[0-9a-fA-F]*\.[0-9a-fA-F]+)(?:([pP][+-]?[0-9]+)([fh]?))?|([0-9a-fA-F]+)([pP][+-]?[0-9]+)([fh]?)|([0-9a-fA-F]+)([iu]?))|((?:[0-9]+[eE][+-]?[0-9]+|(?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:[eE][+-]?[0-9]+)?))([fh]?)|((?:[0-9]|[1-9][0-9]+))([iufh]?))
fn parse(input: &str) -> (Result<Number, NumberError>, &str) {
/// returns `true` and consumes `X` bytes from the given byte buffer
/// if the given `X` nr of patterns are found at the start of the buffer
macro_rules! consume {
($bytes:ident, $($($pattern:pat)|*),*) => {
match $bytes {
&[$($($pattern)|*),*, ref rest @ ..] => { $bytes = rest; true },
_ => false,
}
};
}
/// consumes one byte from the given byte buffer
/// if one of the given patterns are found at the start of the buffer
/// returning the corresponding expr for the matched pattern
macro_rules! consume_map {
($bytes:ident, [$($($pattern:pat)|* => $to:expr),*]) => {
match $bytes {
$( &[$($pattern)|*, ref rest @ ..] => { $bytes = rest; Some($to) }, )*
_ => None,
}
};
}
/// consumes all consecutive bytes matched by the `0-9` pattern from the given byte buffer
/// returning the number of consumed bytes
macro_rules! consume_dec_digits {
($bytes:ident) => {{
let start_len = $bytes.len();
while let &[b'0'..=b'9', ref rest @ ..] = $bytes {
$bytes = rest;
}
start_len - $bytes.len()
}};
}
/// consumes all consecutive bytes matched by the `0-9 | a-f | A-F` pattern from the given byte buffer
/// returning the number of consumed bytes
macro_rules! consume_hex_digits {
($bytes:ident) => {{
let start_len = $bytes.len();
while let &[b'0'..=b'9' | b'a'..=b'f' | b'A'..=b'F', ref rest @ ..] = $bytes {
$bytes = rest;
}
start_len - $bytes.len()
}};
}
/// maps the given `&[u8]` (tail of the initial `input: &str`) to a `&str`
macro_rules! rest_to_str {
($bytes:ident) => {
&input[input.len() - $bytes.len()..]
};
}
struct ExtractSubStr<'a>(&'a str);
impl<'a> ExtractSubStr<'a> {
/// given an `input` and a `start` (tail of the `input`)
/// creates a new [ExtractSubStr]
fn start(input: &'a str, start: &'a [u8]) -> Self {
let start = input.len() - start.len();
Self(&input[start..])
}
/// given an `end` (tail of the initial `input`)
/// returns a substring of `input`
fn end(&self, end: &'a [u8]) -> &'a str {
let end = self.0.len() - end.len();
&self.0[..end]
}
}
let mut bytes = input.as_bytes();
let general_extract = ExtractSubStr::start(input, bytes);
let is_negative = consume!(bytes, b'-');
if consume!(bytes, b'0', b'x' | b'X') {
let digits_extract = ExtractSubStr::start(input, bytes);
let consumed = consume_hex_digits!(bytes);
if consume!(bytes, b'.') {
let consumed_after_period = consume_hex_digits!(bytes);
if consumed + consumed_after_period == 0 {
return (Err(NumberError::Invalid), rest_to_str!(bytes));
}
let significand = general_extract.end(bytes);
if consume!(bytes, b'p' | b'P') {
consume!(bytes, b'+' | b'-');
let consumed = consume_dec_digits!(bytes);
if consumed == 0 {
return (Err(NumberError::Invalid), rest_to_str!(bytes));
}
let number = general_extract.end(bytes);
let kind = consume_map!(bytes, [b'f' => FloatKind::F32, b'h' => FloatKind::F16]);
(parse_hex_float(number, kind), rest_to_str!(bytes))
} else {
(
parse_hex_float_missing_exponent(significand, None),
rest_to_str!(bytes),
)
}
} else {
if consumed == 0 {
return (Err(NumberError::Invalid), rest_to_str!(bytes));
}
let significand = general_extract.end(bytes);
let digits = digits_extract.end(bytes);
let exp_extract = ExtractSubStr::start(input, bytes);
if consume!(bytes, b'p' | b'P') {
consume!(bytes, b'+' | b'-');
let consumed = consume_dec_digits!(bytes);
if consumed == 0 {
return (Err(NumberError::Invalid), rest_to_str!(bytes));
}
let exponent = exp_extract.end(bytes);
let kind = consume_map!(bytes, [b'f' => FloatKind::F32, b'h' => FloatKind::F16]);
(
parse_hex_float_missing_period(significand, exponent, kind),
rest_to_str!(bytes),
)
} else {
let kind = consume_map!(bytes, [b'i' => IntKind::I32, b'u' => IntKind::U32]);
(
parse_hex_int(is_negative, digits, kind),
rest_to_str!(bytes),
)
}
}
} else {
let is_first_zero = bytes.first() == Some(&b'0');
let consumed = consume_dec_digits!(bytes);
if consume!(bytes, b'.') {
let consumed_after_period = consume_dec_digits!(bytes);
if consumed + consumed_after_period == 0 {
return (Err(NumberError::Invalid), rest_to_str!(bytes));
}
if consume!(bytes, b'e' | b'E') {
consume!(bytes, b'+' | b'-');
let consumed = consume_dec_digits!(bytes);
if consumed == 0 {
return (Err(NumberError::Invalid), rest_to_str!(bytes));
}
}
let number = general_extract.end(bytes);
let kind = consume_map!(bytes, [b'f' => FloatKind::F32, b'h' => FloatKind::F16]);
(parse_dec_float(number, kind), rest_to_str!(bytes))
} else {
if consumed == 0 {
return (Err(NumberError::Invalid), rest_to_str!(bytes));
}
if consume!(bytes, b'e' | b'E') {
consume!(bytes, b'+' | b'-');
let consumed = consume_dec_digits!(bytes);
if consumed == 0 {
return (Err(NumberError::Invalid), rest_to_str!(bytes));
}
let number = general_extract.end(bytes);
let kind = consume_map!(bytes, [b'f' => FloatKind::F32, b'h' => FloatKind::F16]);
(parse_dec_float(number, kind), rest_to_str!(bytes))
} else {
// make sure the multi-digit numbers don't start with zero
if consumed > 1 && is_first_zero {
return (Err(NumberError::Invalid), rest_to_str!(bytes));
}
let digits_with_sign = general_extract.end(bytes);
let kind = consume_map!(bytes, [
b'i' => Kind::Int(IntKind::I32),
b'u' => Kind::Int(IntKind::U32),
b'f' => Kind::Float(FloatKind::F32),
b'h' => Kind::Float(FloatKind::F16)
]);
(
parse_dec(is_negative, digits_with_sign, kind),
rest_to_str!(bytes),
)
}
}
}
}
fn parse_hex_float_missing_exponent(
// format: -?0[xX] ( [0-9a-fA-F]+\.[0-9a-fA-F]* | [0-9a-fA-F]*\.[0-9a-fA-F]+ )
significand: &str,
kind: Option<FloatKind>,
) -> Result<Number, NumberError> {
let hexf_input = format!("{}{}", significand, "p0");
parse_hex_float(&hexf_input, kind)
}
fn parse_hex_float_missing_period(
// format: -?0[xX] [0-9a-fA-F]+
significand: &str,
// format: [pP][+-]?[0-9]+
exponent: &str,
kind: Option<FloatKind>,
) -> Result<Number, NumberError> {
let hexf_input = format!("{}.{}", significand, exponent);
parse_hex_float(&hexf_input, kind)
}
fn parse_hex_int(
is_negative: bool,
// format: [0-9a-fA-F]+
digits: &str,
kind: Option<IntKind>,
) -> Result<Number, NumberError> {
let digits_with_sign = if is_negative {
Cow::Owned(format!("-{}", digits))
} else {
Cow::Borrowed(digits)
};
parse_int(&digits_with_sign, kind, 16, is_negative)
}
fn parse_dec(
is_negative: bool,
// format: -? ( [0-9] | [1-9][0-9]+ )
digits_with_sign: &str,
kind: Option<Kind>,
) -> Result<Number, NumberError> {
match kind {
None => parse_int(digits_with_sign, None, 10, is_negative),
Some(Kind::Int(kind)) => parse_int(digits_with_sign, Some(kind), 10, is_negative),
Some(Kind::Float(kind)) => parse_dec_float(digits_with_sign, Some(kind)),
}
}
// Float parsing notes
// The following chapters of IEEE 754-2019 are relevant:
//
// 7.4 Overflow (largest finite number is exceeded by what would have been
// the rounded floating-point result were the exponent range unbounded)
//
// 7.5 Underflow (tiny non-zero result is detected;
// for decimal formats tininess is detected before rounding when a non-zero result
// computed as though both the exponent range and the precision were unbounded
// would lie strictly between 2^126)
//
// 7.6 Inexact (rounded result differs from what would have been computed
// were both exponent range and precision unbounded)
// The WGSL spec requires us to error:
// on overflow for decimal floating point literals
// on overflow and inexact for hexadecimal floating point literals
// (underflow is not mentioned)
// hexf_parse errors on overflow, underflow, inexact
// rust std lib float from str handles overflow, underflow, inexact transparently (rounds and will not error)
// Therefore we only check for overflow manually for decimal floating point literals
// input format: -?0[xX] ( [0-9a-fA-F]+\.[0-9a-fA-F]* | [0-9a-fA-F]*\.[0-9a-fA-F]+ ) [pP][+-]?[0-9]+
fn parse_hex_float(input: &str, kind: Option<FloatKind>) -> Result<Number, NumberError> {
match kind {
None => match hexf_parse::parse_hexf64(input, false) {
Ok(num) => Ok(Number::AbstractFloat(num)),
// can only be ParseHexfErrorKind::Inexact but we can't check since it's private
_ => Err(NumberError::NotRepresentable),
},
Some(FloatKind::F32) => match hexf_parse::parse_hexf32(input, false) {
Ok(num) => Ok(Number::F32(num)),
// can only be ParseHexfErrorKind::Inexact but we can't check since it's private
_ => Err(NumberError::NotRepresentable),
},
Some(FloatKind::F16) => Err(NumberError::UnimplementedF16),
}
}
// input format: -? ( [0-9]+\.[0-9]* | [0-9]*\.[0-9]+ ) ([eE][+-]?[0-9]+)?
// | -? [0-9]+ [eE][+-]?[0-9]+
fn parse_dec_float(input: &str, kind: Option<FloatKind>) -> Result<Number, NumberError> {
match kind {
None => {
let num = input.parse::<f64>().unwrap(); // will never fail
num.is_finite()
.then(|| Number::AbstractFloat(num))
.ok_or(NumberError::NotRepresentable)
}
Some(FloatKind::F32) => {
let num = input.parse::<f32>().unwrap(); // will never fail
num.is_finite()
.then(|| Number::F32(num))
.ok_or(NumberError::NotRepresentable)
}
Some(FloatKind::F16) => Err(NumberError::UnimplementedF16),
}
}
fn parse_int(
input: &str,
kind: Option<IntKind>,
radix: u32,
is_negative: bool,
) -> Result<Number, NumberError> {
fn map_err(e: core::num::ParseIntError) -> NumberError {
match *e.kind() {
core::num::IntErrorKind::PosOverflow | core::num::IntErrorKind::NegOverflow => {
NumberError::NotRepresentable
}
_ => unreachable!(),
}
}
match kind {
None => match i64::from_str_radix(input, radix) {
Ok(num) => Ok(Number::AbstractInt(num)),
Err(e) => Err(map_err(e)),
},
Some(IntKind::I32) => match i32::from_str_radix(input, radix) {
Ok(num) => Ok(Number::I32(num)),
Err(e) => Err(map_err(e)),
},
Some(IntKind::U32) if is_negative => Err(NumberError::NotRepresentable),
Some(IntKind::U32) => match u32::from_str_radix(input, radix) {
Ok(num) => Ok(Number::U32(num)),
Err(e) => Err(map_err(e)),
},
}
}

Просмотреть файл

@ -1,204 +0,0 @@
use std::convert::TryFrom;
use hexf_parse::parse_hexf32;
use crate::Bytes;
use super::{
lexer::{try_skip_prefix, Lexer},
BadFloatError, BadIntError, Error, ExpectedToken, NumberType, Span, Token,
};
fn check_int_literal(word_without_minus: &str, minus: bool, hex: bool) -> Result<(), BadIntError> {
let leading_zeros = word_without_minus
.bytes()
.take_while(|&b| b == b'0')
.count();
if word_without_minus == "0" && minus {
Err(BadIntError::NegativeZero)
} else if word_without_minus != "0" && !hex && leading_zeros != 0 {
Err(BadIntError::LeadingZeros)
} else {
Ok(())
}
}
pub fn get_i32_literal(word: &str, span: Span) -> Result<i32, Error<'_>> {
let (minus, word_without_minus, _) = try_skip_prefix(word, "-");
let (hex, word_without_minus_and_0x, _) = try_skip_prefix(word_without_minus, "0x");
check_int_literal(word_without_minus, minus, hex)
.map_err(|e| Error::BadI32(span.clone(), e))?;
let parsed_val = match (hex, minus) {
(true, true) => i32::from_str_radix(&format!("-{}", word_without_minus_and_0x), 16),
(true, false) => i32::from_str_radix(word_without_minus_and_0x, 16),
(false, _) => word.parse(),
};
parsed_val.map_err(|e| Error::BadI32(span, e.into()))
}
pub fn get_u32_literal(word: &str, span: Span) -> Result<u32, Error<'_>> {
let (minus, word_without_minus, _) = try_skip_prefix(word, "-");
let (hex, word_without_minus_and_0x, _) = try_skip_prefix(word_without_minus, "0x");
check_int_literal(word_without_minus, minus, hex)
.map_err(|e| Error::BadU32(span.clone(), e))?;
// We need to add a minus here as well, since the lexer also accepts syntactically incorrect negative uints
let parsed_val = match (hex, minus) {
(true, true) => u32::from_str_radix(&format!("-{}", word_without_minus_and_0x), 16),
(true, false) => u32::from_str_radix(word_without_minus_and_0x, 16),
(false, _) => word.parse(),
};
parsed_val.map_err(|e| Error::BadU32(span, e.into()))
}
pub fn get_f32_literal(word: &str, span: Span) -> Result<f32, Error<'_>> {
let hex = word.starts_with("0x") || word.starts_with("-0x");
let parsed_val = if hex {
parse_hexf32(word, false).map_err(BadFloatError::ParseHexfError)
} else {
word.parse::<f32>().map_err(BadFloatError::ParseFloatError)
};
parsed_val.map_err(|e| Error::BadFloat(span, e))
}
pub(super) fn _parse_uint_literal<'a>(
lexer: &mut Lexer<'a>,
width: Bytes,
) -> Result<u32, Error<'a>> {
let token_span = lexer.next();
if width != 4 {
// Only 32-bit literals supported by the spec and naga for now!
return Err(Error::BadScalarWidth(token_span.1, width));
}
match token_span {
(
Token::Number {
value,
ty: NumberType::Uint,
},
span,
) => get_u32_literal(value, span),
other => Err(Error::Unexpected(
other,
ExpectedToken::Number {
ty: Some(NumberType::Uint),
width: Some(width),
},
)),
}
}
/// Parse a non-negative signed integer literal.
/// This is for attributes like `size`, `location` and others.
pub(super) fn parse_non_negative_sint_literal<'a>(
lexer: &mut Lexer<'a>,
width: Bytes,
) -> Result<u32, Error<'a>> {
let token_span = lexer.next();
if width != 4 {
// Only 32-bit literals supported by the spec and naga for now!
return Err(Error::BadScalarWidth(token_span.1, width));
}
match token_span {
(
Token::Number {
value,
ty: NumberType::Sint,
},
span,
) => {
let i32_val = get_i32_literal(value, span.clone())?;
u32::try_from(i32_val).map_err(|_| Error::NegativeInt(span))
}
other => Err(Error::Unexpected(
other,
ExpectedToken::Number {
ty: Some(NumberType::Sint),
width: Some(width),
},
)),
}
}
/// Parse a non-negative integer literal that may be either signed or unsigned.
/// This is for the `workgroup_size` attribute and array lengths.
/// Note: these values should be no larger than [`i32::MAX`], but this is not checked here.
pub(super) fn parse_generic_non_negative_int_literal<'a>(
lexer: &mut Lexer<'a>,
width: Bytes,
) -> Result<u32, Error<'a>> {
let token_span = lexer.next();
if width != 4 {
// Only 32-bit literals supported by the spec and naga for now!
return Err(Error::BadScalarWidth(token_span.1, width));
}
match token_span {
(
Token::Number {
value,
ty: NumberType::Sint,
},
span,
) => {
let i32_val = get_i32_literal(value, span.clone())?;
u32::try_from(i32_val).map_err(|_| Error::NegativeInt(span))
}
(
Token::Number {
value,
ty: NumberType::Uint,
},
span,
) => get_u32_literal(value, span),
other => Err(Error::Unexpected(
other,
ExpectedToken::Number {
ty: Some(NumberType::Sint),
width: Some(width),
},
)),
}
}
pub(super) fn _parse_float_literal<'a>(
lexer: &mut Lexer<'a>,
width: Bytes,
) -> Result<f32, Error<'a>> {
let token_span = lexer.next();
if width != 4 {
// Only 32-bit literals supported by the spec and naga for now!
return Err(Error::BadScalarWidth(token_span.1, width));
}
match token_span {
(
Token::Number {
value,
ty: NumberType::Float,
},
span,
) => get_f32_literal(value, span),
other => Err(Error::Unexpected(
other,
ExpectedToken::Number {
ty: Some(NumberType::Float),
width: Some(width),
},
)),
}
}

72
third_party/rust/naga/src/front/wgsl/tests.rs поставляемый
Просмотреть файл

@ -14,76 +14,6 @@ fn parse_comment() {
.unwrap(); .unwrap();
} }
// Regexes for the literals are taken from the working draft at
// https://www.w3.org/TR/2021/WD-WGSL-20210806/#literals
#[test]
fn parse_decimal_floats() {
// /^(-?[0-9]*\.[0-9]+|-?[0-9]+\.[0-9]*)((e|E)(\+|-)?[0-9]+)?$/
parse_str("let a : f32 = -1.;").unwrap();
parse_str("let a : f32 = -.1;").unwrap();
parse_str("let a : f32 = 42.1234;").unwrap();
parse_str("let a : f32 = -1.E3;").unwrap();
parse_str("let a : f32 = -.1e-5;").unwrap();
parse_str("let a : f32 = 2.3e+55;").unwrap();
assert!(parse_str("let a : f32 = 42.1234f;").is_err());
assert!(parse_str("let a : f32 = 42.1234f32;").is_err());
}
#[test]
fn parse_hex_floats() {
// /^-?0x([0-9a-fA-F]*\.?[0-9a-fA-F]+|[0-9a-fA-F]+\.[0-9a-fA-F]*)(p|P)(\+|-)?[0-9]+$/
parse_str("let a : f32 = -0xa.p1;").unwrap();
parse_str("let a : f32 = -0x.fp9;").unwrap();
parse_str("let a : f32 = 0x2a.4D2P4;").unwrap();
parse_str("let a : f32 = -0x.1p-5;").unwrap();
parse_str("let a : f32 = 0xC.8p+55;").unwrap();
parse_str("let a : f32 = 0x1p1;").unwrap();
assert!(parse_str("let a : f32 = 0x1p1f;").is_err());
assert!(parse_str("let a : f32 = 0x1p1f32;").is_err());
}
#[test]
fn parse_decimal_ints() {
// i32 /^-?0x[0-9a-fA-F]+|0|-?[1-9][0-9]*$/
parse_str("let a : i32 = 0;").unwrap();
parse_str("let a : i32 = 1092;").unwrap();
parse_str("let a : i32 = -9923;").unwrap();
assert!(parse_str("let a : i32 = -0;").is_err());
assert!(parse_str("let a : i32 = 01;").is_err());
assert!(parse_str("let a : i32 = 1.0;").is_err());
assert!(parse_str("let a : i32 = 1i;").is_err());
assert!(parse_str("let a : i32 = 1i32;").is_err());
// u32 /^0x[0-9a-fA-F]+u|0u|[1-9][0-9]*u$/
parse_str("let a : u32 = 0u;").unwrap();
parse_str("let a : u32 = 1092u;").unwrap();
assert!(parse_str("let a : u32 = -0u;").is_err());
assert!(parse_str("let a : u32 = 01u;").is_err());
assert!(parse_str("let a : u32 = 1.0u;").is_err());
assert!(parse_str("let a : u32 = 1u32;").is_err());
}
#[test]
fn parse_hex_ints() {
// i32 /^-?0x[0-9a-fA-F]+|0|-?[1-9][0-9]*$/
parse_str("let a : i32 = -0x0;").unwrap();
parse_str("let a : i32 = 0x2a4D2;").unwrap();
assert!(parse_str("let a : i32 = 0x2a4D2i;").is_err());
assert!(parse_str("let a : i32 = 0x2a4D2i32;").is_err());
// u32 /^0x[0-9a-fA-F]+u|0u|[1-9][0-9]*u$/
parse_str("let a : u32 = 0x0u;").unwrap();
parse_str("let a : u32 = 0x2a4D2u;").unwrap();
assert!(parse_str("let a : u32 = 0x2a4D2u32;").is_err());
}
#[test] #[test]
fn parse_types() { fn parse_types() {
parse_str("let a : i32 = 2;").unwrap(); parse_str("let a : i32 = 2;").unwrap();
@ -161,7 +91,7 @@ fn parse_struct() {
struct Bar { struct Bar {
@size(16) x: vec2<i32>, @size(16) x: vec2<i32>,
@align(16) y: f32, @align(16) y: f32,
@size(32) @align(8) z: vec3<f32>, @size(32) @align(128) z: vec3<f32>,
}; };
struct Empty {} struct Empty {}
var<storage,read_write> s: Foo; var<storage,read_write> s: Foo;

16
third_party/rust/naga/src/lib.rs поставляемый
Просмотреть файл

@ -211,7 +211,7 @@ pub mod valid;
pub use crate::arena::{Arena, Handle, Range, UniqueArena}; pub use crate::arena::{Arena, Handle, Range, UniqueArena};
pub use crate::span::{Span, SpanContext, WithSpan}; pub use crate::span::{SourceLocation, Span, SpanContext, WithSpan};
#[cfg(feature = "arbitrary")] #[cfg(feature = "arbitrary")]
use arbitrary::Arbitrary; use arbitrary::Arbitrary;
#[cfg(feature = "deserialize")] #[cfg(feature = "deserialize")]
@ -1439,11 +1439,23 @@ pub enum Statement {
/// this loop. (It may have `Break` and `Continue` statements targeting /// this loop. (It may have `Break` and `Continue` statements targeting
/// loops or switches nested within the `continuing` block.) /// loops or switches nested within the `continuing` block.)
/// ///
/// If present, `break_if` is an expression which is evaluated after the
/// continuing block. If its value is true, control continues after the
/// `Loop` statement, rather than branching back to the top of body as
/// usual. The `break_if` expression corresponds to a "break if" statement
/// in WGSL, or a loop whose back edge is an `OpBranchConditional`
/// instruction in SPIR-V.
///
/// [`Break`]: Statement::Break /// [`Break`]: Statement::Break
/// [`Continue`]: Statement::Continue /// [`Continue`]: Statement::Continue
/// [`Kill`]: Statement::Kill /// [`Kill`]: Statement::Kill
/// [`Return`]: Statement::Return /// [`Return`]: Statement::Return
Loop { body: Block, continuing: Block }, /// [`break if`]: Self::Loop::break_if
Loop {
body: Block,
continuing: Block,
break_if: Option<Handle<Expression>>,
},
/// Exits the innermost enclosing [`Loop`] or [`Switch`]. /// Exits the innermost enclosing [`Loop`] or [`Switch`].
/// ///

175
third_party/rust/naga/src/proc/layouter.rs поставляемый
Просмотреть файл

@ -1,7 +1,86 @@
use crate::arena::{Arena, BadHandle, Handle, UniqueArena}; use crate::arena::{Arena, BadHandle, Handle, UniqueArena};
use std::{num::NonZeroU32, ops}; use std::{fmt::Display, num::NonZeroU32, ops};
pub type Alignment = NonZeroU32; /// A newtype struct where its only valid values are powers of 2
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
pub struct Alignment(NonZeroU32);
impl Alignment {
pub const ONE: Self = Self(unsafe { NonZeroU32::new_unchecked(1) });
pub const TWO: Self = Self(unsafe { NonZeroU32::new_unchecked(2) });
pub const FOUR: Self = Self(unsafe { NonZeroU32::new_unchecked(4) });
pub const EIGHT: Self = Self(unsafe { NonZeroU32::new_unchecked(8) });
pub const SIXTEEN: Self = Self(unsafe { NonZeroU32::new_unchecked(16) });
pub const MIN_UNIFORM: Self = Self::SIXTEEN;
pub const fn new(n: u32) -> Option<Self> {
if n.is_power_of_two() {
// SAFETY: value can't be 0 since we just checked if it's a power of 2
Some(Self(unsafe { NonZeroU32::new_unchecked(n) }))
} else {
None
}
}
/// # Panics
/// If `width` is not a power of 2
pub fn from_width(width: u8) -> Self {
Self::new(width as u32).unwrap()
}
/// Returns whether or not `n` is a multiple of this alignment.
pub const fn is_aligned(&self, n: u32) -> bool {
// equivalent to: `n % self.0.get() == 0` but much faster
n & (self.0.get() - 1) == 0
}
/// Round `n` up to the nearest alignment boundary.
pub const fn round_up(&self, n: u32) -> u32 {
// equivalent to:
// match n % self.0.get() {
// 0 => n,
// rem => n + (self.0.get() - rem),
// }
let mask = self.0.get() - 1;
(n + mask) & !mask
}
}
impl Display for Alignment {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0.get().fmt(f)
}
}
impl ops::Mul<u32> for Alignment {
type Output = u32;
fn mul(self, rhs: u32) -> Self::Output {
self.0.get() * rhs
}
}
impl ops::Mul for Alignment {
type Output = Alignment;
fn mul(self, rhs: Alignment) -> Self::Output {
// SAFETY: both lhs and rhs are powers of 2, the result will be a power of 2
Self(unsafe { NonZeroU32::new_unchecked(self.0.get() * rhs.0.get()) })
}
}
impl From<crate::VectorSize> for Alignment {
fn from(size: crate::VectorSize) -> Self {
match size {
crate::VectorSize::Bi => Alignment::TWO,
crate::VectorSize::Tri => Alignment::FOUR,
crate::VectorSize::Quad => Alignment::FOUR,
}
}
}
/// Size and alignment information for a type. /// Size and alignment information for a type.
#[derive(Clone, Copy, Debug, Hash, PartialEq)] #[derive(Clone, Copy, Debug, Hash, PartialEq)]
@ -15,7 +94,7 @@ pub struct TypeLayout {
impl TypeLayout { impl TypeLayout {
/// Produce the stride as if this type is a base of an array. /// Produce the stride as if this type is a base of an array.
pub const fn to_stride(&self) -> u32 { pub const fn to_stride(&self) -> u32 {
Layouter::round_up(self.alignment, self.size) self.alignment.round_up(self.size)
} }
} }
@ -49,8 +128,8 @@ pub enum LayoutErrorInner {
InvalidArrayElementType(Handle<crate::Type>), InvalidArrayElementType(Handle<crate::Type>),
#[error("Struct member[{0}] type {1:?} doesn't exist")] #[error("Struct member[{0}] type {1:?} doesn't exist")]
InvalidStructMemberType(u32, Handle<crate::Type>), InvalidStructMemberType(u32, Handle<crate::Type>),
#[error("Zero width is not supported")] #[error("Type width must be a power of two")]
ZeroWidth, NonPowerOfTwoWidth,
#[error("Array size is a bad handle")] #[error("Array size is a bad handle")]
BadHandle(#[from] BadHandle), BadHandle(#[from] BadHandle),
} }
@ -74,40 +153,6 @@ impl Layouter {
self.layouts.clear(); self.layouts.clear();
} }
/// Round `offset` up to the nearest `alignment` boundary.
pub const fn round_up(alignment: Alignment, offset: u32) -> u32 {
match offset & (alignment.get() - 1) {
0 => offset,
other => offset + alignment.get() - other,
}
}
/// Return the offset and span of a struct member.
///
/// The member must fall at or after `offset`. The member's alignment and
/// size are `align` and `size` if given, defaulting to the values this
/// `Layouter` has previously determined for `ty`.
///
/// The return value is the range of offsets within the containing struct to
/// reserve for this member, along with the alignment used. The containing
/// struct must have sufficient space and alignment to accommodate these.
pub fn member_placement(
&self,
offset: u32,
ty: Handle<crate::Type>,
align: Option<Alignment>,
size: Option<NonZeroU32>,
) -> (ops::Range<u32>, Alignment) {
let layout = self.layouts[ty.index()];
let alignment = align.unwrap_or(layout.alignment);
let start = Self::round_up(alignment, offset);
let span = match size {
Some(size) => size.get(),
None => layout.size,
};
(start..start + span, alignment)
}
/// Extend this `Layouter` with layouts for any new entries in `types`. /// Extend this `Layouter` with layouts for any new entries in `types`.
/// ///
/// Ensure that every type in `types` has a corresponding [TypeLayout] in /// Ensure that every type in `types` has a corresponding [TypeLayout] in
@ -135,42 +180,38 @@ impl Layouter {
.try_size(constants) .try_size(constants)
.map_err(|error| LayoutErrorInner::BadHandle(error).with(ty_handle))?; .map_err(|error| LayoutErrorInner::BadHandle(error).with(ty_handle))?;
let layout = match ty.inner { let layout = match ty.inner {
Ti::Scalar { width, .. } | Ti::Atomic { width, .. } => TypeLayout { Ti::Scalar { width, .. } | Ti::Atomic { width, .. } => {
size, let alignment = Alignment::new(width as u32)
alignment: Alignment::new(width as u32) .ok_or(LayoutErrorInner::NonPowerOfTwoWidth.with(ty_handle))?;
.ok_or(LayoutErrorInner::ZeroWidth.with(ty_handle))?, TypeLayout { size, alignment }
}, }
Ti::Vector { Ti::Vector {
size: vec_size, size: vec_size,
width, width,
.. ..
} => TypeLayout { } => {
size, let alignment = Alignment::new(width as u32)
alignment: { .ok_or(LayoutErrorInner::NonPowerOfTwoWidth.with(ty_handle))?;
let count = if vec_size >= crate::VectorSize::Tri { TypeLayout {
4 size,
} else { alignment: Alignment::from(vec_size) * alignment,
2 }
}; }
Alignment::new(count * width as u32)
.ok_or(LayoutErrorInner::ZeroWidth.with(ty_handle))?
},
},
Ti::Matrix { Ti::Matrix {
columns: _, columns: _,
rows, rows,
width, width,
} => TypeLayout { } => {
size, let alignment = Alignment::new(width as u32)
alignment: { .ok_or(LayoutErrorInner::NonPowerOfTwoWidth.with(ty_handle))?;
let count = if rows >= crate::VectorSize::Tri { 4 } else { 2 }; TypeLayout {
Alignment::new(count * width as u32) size,
.ok_or(LayoutErrorInner::ZeroWidth.with(ty_handle))? alignment: Alignment::from(rows) * alignment,
}, }
}, }
Ti::Pointer { .. } | Ti::ValuePointer { .. } => TypeLayout { Ti::Pointer { .. } | Ti::ValuePointer { .. } => TypeLayout {
size, size,
alignment: Alignment::new(1).unwrap(), alignment: Alignment::ONE,
}, },
Ti::Array { Ti::Array {
base, base,
@ -185,7 +226,7 @@ impl Layouter {
}, },
}, },
Ti::Struct { span, ref members } => { Ti::Struct { span, ref members } => {
let mut alignment = Alignment::new(1).unwrap(); let mut alignment = Alignment::ONE;
for (index, member) in members.iter().enumerate() { for (index, member) in members.iter().enumerate() {
alignment = if member.ty < ty_handle { alignment = if member.ty < ty_handle {
alignment.max(self[member.ty].alignment) alignment.max(self[member.ty].alignment)
@ -204,7 +245,7 @@ impl Layouter {
} }
Ti::Image { .. } | Ti::Sampler { .. } | Ti::BindingArray { .. } => TypeLayout { Ti::Image { .. } | Ti::Sampler { .. } | Ti::BindingArray { .. } => TypeLayout {
size, size,
alignment: Alignment::new(1).unwrap(), alignment: Alignment::ONE,
}, },
}; };
debug_assert!(size <= layout.size); debug_assert!(size <= layout.size);

5
third_party/rust/naga/src/proc/mod.rs поставляемый
Просмотреть файл

@ -113,10 +113,7 @@ impl super::TypeInner {
columns, columns,
rows, rows,
width, width,
} => { } => Alignment::from(rows) * width as u32 * columns as u32,
let aligned_rows = if rows > crate::VectorSize::Bi { 4 } else { 2 };
columns as u32 * aligned_rows * width as u32
}
Self::Pointer { .. } | Self::ValuePointer { .. } => POINTER_SPAN, Self::Pointer { .. } | Self::ValuePointer { .. } => POINTER_SPAN,
Self::Array { Self::Array {
base: _, base: _,

154
third_party/rust/naga/src/span.rs поставляемый
Просмотреть файл

@ -59,6 +59,21 @@ impl Span {
pub fn is_defined(&self) -> bool { pub fn is_defined(&self) -> bool {
*self != Self::default() *self != Self::default()
} }
/// Return a [`SourceLocation`] for this span in the provided source.
pub fn location(&self, source: &str) -> SourceLocation {
let prefix = &source[..self.start as usize];
let line_number = prefix.matches('\n').count() as u32 + 1;
let line_start = prefix.rfind('\n').map(|pos| pos + 1).unwrap_or(0);
let line_position = source[line_start..self.start as usize].chars().count() as u32 + 1;
SourceLocation {
line_number,
line_position,
offset: self.start,
length: self.end - self.start,
}
}
} }
impl From<Range<usize>> for Span { impl From<Range<usize>> for Span {
@ -70,6 +85,25 @@ impl From<Range<usize>> for Span {
} }
} }
/// A human-readable representation for a span, tailored for text source.
///
/// Corresponds to the positional members of [`GPUCompilationMessage`][gcm] from
/// the WebGPU specification, except that `offset` and `length` are in bytes
/// (UTF-8 code units), instead of UTF-16 code units.
///
/// [gcm]: https://www.w3.org/TR/webgpu/#gpucompilationmessage
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct SourceLocation {
/// 1-based line number.
pub line_number: u32,
/// 1-based column of the start of this span
pub line_position: u32,
/// 0-based Offset in code units (in bytes) of the start of the span.
pub offset: u32,
/// Length in code units (in bytes) of the span.
pub length: u32,
}
/// A source code span together with "context", a user-readable description of what part of the error it refers to. /// A source code span together with "context", a user-readable description of what part of the error it refers to.
pub type SpanContext = (Span, String); pub type SpanContext = (Span, String);
@ -186,6 +220,22 @@ impl<E> WithSpan<E> {
res.spans.extend(self.spans); res.spans.extend(self.spans);
res res
} }
#[cfg(feature = "span")]
/// Return a [`SourceLocation`] for our first span, if we have one.
pub fn location(&self, source: &str) -> Option<SourceLocation> {
if self.spans.is_empty() {
return None;
}
Some(self.spans[0].0.location(source))
}
#[cfg(not(feature = "span"))]
/// Return a [`SourceLocation`] for our first span, if we have one.
pub fn location(&self, _source: &str) -> Option<SourceLocation> {
None
}
} }
/// Convenience trait for [`Error`] to be able to apply spans to anything. /// Convenience trait for [`Error`] to be able to apply spans to anything.
@ -273,3 +323,107 @@ impl<T, E, E2> MapErrWithSpan<E, E2> for Result<T, WithSpan<E>> {
self.map_err(|e| e.and_then(func).into_other::<E2>()) self.map_err(|e| e.and_then(func).into_other::<E2>())
} }
} }
#[test]
fn span_location() {
let source = "12\n45\n\n89\n";
assert_eq!(
Span { start: 0, end: 1 }.location(source),
SourceLocation {
line_number: 1,
line_position: 1,
offset: 0,
length: 1
}
);
assert_eq!(
Span { start: 1, end: 2 }.location(source),
SourceLocation {
line_number: 1,
line_position: 2,
offset: 1,
length: 1
}
);
assert_eq!(
Span { start: 2, end: 3 }.location(source),
SourceLocation {
line_number: 1,
line_position: 3,
offset: 2,
length: 1
}
);
assert_eq!(
Span { start: 3, end: 5 }.location(source),
SourceLocation {
line_number: 2,
line_position: 1,
offset: 3,
length: 2
}
);
assert_eq!(
Span { start: 4, end: 6 }.location(source),
SourceLocation {
line_number: 2,
line_position: 2,
offset: 4,
length: 2
}
);
assert_eq!(
Span { start: 5, end: 6 }.location(source),
SourceLocation {
line_number: 2,
line_position: 3,
offset: 5,
length: 1
}
);
assert_eq!(
Span { start: 6, end: 7 }.location(source),
SourceLocation {
line_number: 3,
line_position: 1,
offset: 6,
length: 1
}
);
assert_eq!(
Span { start: 7, end: 8 }.location(source),
SourceLocation {
line_number: 4,
line_position: 1,
offset: 7,
length: 1
}
);
assert_eq!(
Span { start: 8, end: 9 }.location(source),
SourceLocation {
line_number: 4,
line_position: 2,
offset: 8,
length: 1
}
);
assert_eq!(
Span { start: 9, end: 10 }.location(source),
SourceLocation {
line_number: 4,
line_position: 3,
offset: 9,
length: 1
}
);
assert_eq!(
Span { start: 10, end: 11 }.location(source),
SourceLocation {
line_number: 5,
line_position: 1,
offset: 10,
length: 1
}
);
}

1
third_party/rust/naga/src/valid/analyzer.rs поставляемый
Просмотреть файл

@ -841,6 +841,7 @@ impl FunctionInfo {
S::Loop { S::Loop {
ref body, ref body,
ref continuing, ref continuing,
break_if: _,
} => { } => {
let body_uniformity = let body_uniformity =
self.process_block(body, other_functions, disruptor, expression_arena)?; self.process_block(body, other_functions, disruptor, expression_arena)?;

28
third_party/rust/naga/src/valid/function.rs поставляемый
Просмотреть файл

@ -86,6 +86,8 @@ pub enum FunctionError {
}, },
#[error("Argument '{name}' at index {index} has a type that can't be passed into functions.")] #[error("Argument '{name}' at index {index} has a type that can't be passed into functions.")]
InvalidArgumentType { index: usize, name: String }, InvalidArgumentType { index: usize, name: String },
#[error("The function's given return type cannot be returned from functions")]
NonConstructibleReturnType,
#[error("Argument '{name}' at index {index} is a pointer of space {space:?}, which can't be passed into functions.")] #[error("Argument '{name}' at index {index} is a pointer of space {space:?}, which can't be passed into functions.")]
InvalidArgumentPointerSpace { InvalidArgumentPointerSpace {
index: usize, index: usize,
@ -497,6 +499,7 @@ impl super::Validator {
S::Loop { S::Loop {
ref body, ref body,
ref continuing, ref continuing,
break_if,
} => { } => {
// special handling for block scoping is needed here, // special handling for block scoping is needed here,
// because the continuing{} block inherits the scope // because the continuing{} block inherits the scope
@ -518,6 +521,20 @@ impl super::Validator {
&context.with_abilities(ControlFlowAbility::empty()), &context.with_abilities(ControlFlowAbility::empty()),
)? )?
.stages; .stages;
if let Some(condition) = break_if {
match *context.resolve_type(condition, &self.valid_expression_set)? {
Ti::Scalar {
kind: crate::ScalarKind::Bool,
width: _,
} => {}
_ => {
return Err(FunctionError::InvalidIfType(condition)
.with_span_handle(condition, context.expressions))
}
}
}
for handle in self.valid_expression_list.drain(base_expression_count..) { for handle in self.valid_expression_list.drain(base_expression_count..) {
self.valid_expression_set.remove(handle.index()); self.valid_expression_set.remove(handle.index());
} }
@ -894,6 +911,17 @@ impl super::Validator {
} }
} }
#[cfg(feature = "validate")]
if let Some(ref result) = fun.result {
if !self.types[result.ty.index()]
.flags
.contains(super::TypeFlags::CONSTRUCTIBLE)
{
return Err(FunctionError::NonConstructibleReturnType
.with_span_handle(result.ty, &module.types));
}
}
self.valid_expression_set.clear(); self.valid_expression_set.clear();
self.valid_expression_list.clear(); self.valid_expression_list.clear();
for (handle, expr) in fun.expressions.iter() { for (handle, expr) in fun.expressions.iter() {

33
third_party/rust/naga/src/valid/interface.rs поставляемый
Просмотреть файл

@ -135,6 +135,16 @@ impl VaryingContext<'_> {
} }
self.built_ins.insert(canonical); self.built_ins.insert(canonical);
let required = match built_in {
Bi::ClipDistance => Capabilities::CLIP_DISTANCE,
Bi::CullDistance => Capabilities::CULL_DISTANCE,
Bi::PrimitiveIndex => Capabilities::PRIMITIVE_INDEX,
_ => Capabilities::empty(),
};
if !self.capabilities.contains(required) {
return Err(VaryingError::UnsupportedCapability(required));
}
let width = 4; let width = 4;
let (visible, type_good) = match built_in { let (visible, type_good) = match built_in {
Bi::BaseInstance | Bi::BaseVertex | Bi::InstanceIndex | Bi::VertexIndex => ( Bi::BaseInstance | Bi::BaseVertex | Bi::InstanceIndex | Bi::VertexIndex => (
@ -206,21 +216,14 @@ impl VaryingContext<'_> {
width: crate::BOOL_WIDTH, width: crate::BOOL_WIDTH,
}, },
), ),
Bi::PrimitiveIndex => { Bi::PrimitiveIndex => (
if !self.capabilities.contains(Capabilities::PRIMITIVE_INDEX) { self.stage == St::Fragment && !self.output,
return Err(VaryingError::UnsupportedCapability( *ty_inner
Capabilities::PRIMITIVE_INDEX, == Ti::Scalar {
)); kind: Sk::Uint,
} width,
( },
self.stage == St::Fragment && !self.output, ),
*ty_inner
== Ti::Scalar {
kind: Sk::Uint,
width,
},
)
}
Bi::SampleIndex => ( Bi::SampleIndex => (
self.stage == St::Fragment && !self.output, self.stage == St::Fragment && !self.output,
*ty_inner *ty_inner

8
third_party/rust/naga/src/valid/mod.rs поставляемый
Просмотреть файл

@ -82,11 +82,11 @@ bitflags::bitflags! {
#[cfg_attr(feature = "serialize", derive(serde::Serialize))] #[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))] #[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
pub struct Capabilities: u8 { pub struct Capabilities: u8 {
/// Support for `AddressSpace:PushConstant`. /// Support for [`AddressSpace:PushConstant`].
const PUSH_CONSTANT = 0x1; const PUSH_CONSTANT = 0x1;
/// Float values with width = 8. /// Float values with width = 8.
const FLOAT64 = 0x2; const FLOAT64 = 0x2;
/// Support for `Builtin:PrimitiveIndex`. /// Support for [`Builtin:PrimitiveIndex`].
const PRIMITIVE_INDEX = 0x4; const PRIMITIVE_INDEX = 0x4;
/// Support for non-uniform indexing of sampled textures and storage buffer arrays. /// Support for non-uniform indexing of sampled textures and storage buffer arrays.
const SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING = 0x8; const SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING = 0x8;
@ -94,6 +94,10 @@ bitflags::bitflags! {
const UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING = 0x10; const UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING = 0x10;
/// Support for non-uniform indexing of samplers. /// Support for non-uniform indexing of samplers.
const SAMPLER_NON_UNIFORM_INDEXING = 0x20; const SAMPLER_NON_UNIFORM_INDEXING = 0x20;
/// Support for [`Builtin::ClipDistance`].
const CLIP_DISTANCE = 0x40;
/// Support for [`Builtin::CullDistance`].
const CULL_DISTANCE = 0x80;
} }
} }

124
third_party/rust/naga/src/valid/type.rs поставляемый
Просмотреть файл

@ -4,8 +4,6 @@ use crate::{
proc::Alignment, proc::Alignment,
}; };
const UNIFORM_MIN_ALIGNMENT: Alignment = unsafe { Alignment::new_unchecked(16) };
bitflags::bitflags! { bitflags::bitflags! {
/// Flags associated with [`Type`]s by [`Validator`]. /// Flags associated with [`Type`]s by [`Validator`].
/// ///
@ -52,20 +50,29 @@ bitflags::bitflags! {
/// This type can be passed as a function argument. /// This type can be passed as a function argument.
const ARGUMENT = 0x40; const ARGUMENT = 0x40;
/// A WGSL [constructible] type.
///
/// The constructible types are scalars, vectors, matrices, fixed-size
/// arrays of constructible types, and structs whose members are all
/// constructible.
///
/// [constructible]: https://gpuweb.github.io/gpuweb/wgsl/#constructible
const CONSTRUCTIBLE = 0x80;
} }
} }
#[derive(Clone, Copy, Debug, thiserror::Error)] #[derive(Clone, Copy, Debug, thiserror::Error)]
pub enum Disalignment { pub enum Disalignment {
#[error("The array stride {stride} is not a multiple of the required alignment {alignment}")] #[error("The array stride {stride} is not a multiple of the required alignment {alignment}")]
ArrayStride { stride: u32, alignment: u32 }, ArrayStride { stride: u32, alignment: Alignment },
#[error("The struct span {span}, is not a multiple of the required alignment {alignment}")] #[error("The struct span {span}, is not a multiple of the required alignment {alignment}")]
StructSpan { span: u32, alignment: u32 }, StructSpan { span: u32, alignment: Alignment },
#[error("The struct member[{index}] offset {offset} is not a multiple of the required alignment {alignment}")] #[error("The struct member[{index}] offset {offset} is not a multiple of the required alignment {alignment}")]
MemberOffset { MemberOffset {
index: u32, index: u32,
offset: u32, offset: u32,
alignment: u32, alignment: Alignment,
}, },
#[error("The struct member[{index}] offset {offset} must be at least {expected}")] #[error("The struct member[{index}] offset {offset} must be at least {expected}")]
MemberOffsetAfterStruct { MemberOffsetAfterStruct {
@ -125,8 +132,8 @@ pub enum TypeError {
EmptyStruct, EmptyStruct,
} }
// Only makes sense if `flags.contains(HOST_SHARED)` // Only makes sense if `flags.contains(HOST_SHAREABLE)`
type LayoutCompatibility = Result<Option<Alignment>, (Handle<crate::Type>, Disalignment)>; type LayoutCompatibility = Result<Alignment, (Handle<crate::Type>, Disalignment)>;
fn check_member_layout( fn check_member_layout(
accum: &mut LayoutCompatibility, accum: &mut LayoutCompatibility,
@ -136,20 +143,18 @@ fn check_member_layout(
parent_handle: Handle<crate::Type>, parent_handle: Handle<crate::Type>,
) { ) {
*accum = match (*accum, member_layout) { *accum = match (*accum, member_layout) {
(Ok(cur_alignment), Ok(align)) => { (Ok(cur_alignment), Ok(alignment)) => {
let align = align.unwrap().get(); if alignment.is_aligned(member.offset) {
if member.offset % align != 0 { Ok(cur_alignment.max(alignment))
} else {
Err(( Err((
parent_handle, parent_handle,
Disalignment::MemberOffset { Disalignment::MemberOffset {
index: member_index, index: member_index,
offset: member.offset, offset: member.offset,
alignment: align, alignment,
}, },
)) ))
} else {
let combined_alignment = ((cur_alignment.unwrap().get() - 1) | (align - 1)) + 1;
Ok(Alignment::new(combined_alignment))
} }
} }
(Err(e), _) | (_, Err(e)) => Err(e), (Err(e), _) | (_, Err(e)) => Err(e),
@ -183,13 +188,12 @@ impl TypeInfo {
const fn dummy() -> Self { const fn dummy() -> Self {
TypeInfo { TypeInfo {
flags: TypeFlags::empty(), flags: TypeFlags::empty(),
uniform_layout: Ok(None), uniform_layout: Ok(Alignment::ONE),
storage_layout: Ok(None), storage_layout: Ok(Alignment::ONE),
} }
} }
const fn new(flags: TypeFlags, align: u32) -> Self { const fn new(flags: TypeFlags, alignment: Alignment) -> Self {
let alignment = Alignment::new(align);
TypeInfo { TypeInfo {
flags, flags,
uniform_layout: Ok(alignment), uniform_layout: Ok(alignment),
@ -237,8 +241,9 @@ impl super::Validator {
| TypeFlags::SIZED | TypeFlags::SIZED
| TypeFlags::COPY | TypeFlags::COPY
| TypeFlags::ARGUMENT | TypeFlags::ARGUMENT
| TypeFlags::CONSTRUCTIBLE
| shareable, | shareable,
width as u32, Alignment::from_width(width),
) )
} }
Ti::Vector { size, kind, width } => { Ti::Vector { size, kind, width } => {
@ -250,15 +255,15 @@ impl super::Validator {
} else { } else {
TypeFlags::empty() TypeFlags::empty()
}; };
let count = if size >= crate::VectorSize::Tri { 4 } else { 2 };
TypeInfo::new( TypeInfo::new(
TypeFlags::DATA TypeFlags::DATA
| TypeFlags::SIZED | TypeFlags::SIZED
| TypeFlags::COPY | TypeFlags::COPY
| TypeFlags::HOST_SHAREABLE | TypeFlags::HOST_SHAREABLE
| TypeFlags::ARGUMENT | TypeFlags::ARGUMENT
| TypeFlags::CONSTRUCTIBLE
| shareable, | shareable,
count * (width as u32), Alignment::from(size) * Alignment::from_width(width),
) )
} }
Ti::Matrix { Ti::Matrix {
@ -269,14 +274,14 @@ impl super::Validator {
if !self.check_width(crate::ScalarKind::Float, width) { if !self.check_width(crate::ScalarKind::Float, width) {
return Err(TypeError::InvalidWidth(crate::ScalarKind::Float, width)); return Err(TypeError::InvalidWidth(crate::ScalarKind::Float, width));
} }
let count = if rows >= crate::VectorSize::Tri { 4 } else { 2 };
TypeInfo::new( TypeInfo::new(
TypeFlags::DATA TypeFlags::DATA
| TypeFlags::SIZED | TypeFlags::SIZED
| TypeFlags::COPY | TypeFlags::COPY
| TypeFlags::HOST_SHAREABLE | TypeFlags::HOST_SHAREABLE
| TypeFlags::ARGUMENT, | TypeFlags::ARGUMENT
count * (width as u32), | TypeFlags::CONSTRUCTIBLE,
Alignment::from(rows) * Alignment::from_width(width),
) )
} }
Ti::Atomic { kind, width } => { Ti::Atomic { kind, width } => {
@ -289,7 +294,7 @@ impl super::Validator {
} }
TypeInfo::new( TypeInfo::new(
TypeFlags::DATA | TypeFlags::SIZED | TypeFlags::HOST_SHAREABLE, TypeFlags::DATA | TypeFlags::SIZED | TypeFlags::HOST_SHAREABLE,
width as u32, Alignment::from_width(width),
) )
} }
Ti::Pointer { base, space } => { Ti::Pointer { base, space } => {
@ -332,7 +337,10 @@ impl super::Validator {
// Pointers cannot be stored in variables, structure members, or // Pointers cannot be stored in variables, structure members, or
// array elements, so we do not mark them as `DATA`. // array elements, so we do not mark them as `DATA`.
TypeInfo::new(argument_flag | TypeFlags::SIZED | TypeFlags::COPY, 0) TypeInfo::new(
argument_flag | TypeFlags::SIZED | TypeFlags::COPY,
Alignment::ONE,
)
} }
Ti::ValuePointer { Ti::ValuePointer {
size: _, size: _,
@ -359,7 +367,10 @@ impl super::Validator {
// Pointers cannot be stored in variables, structure members, or // Pointers cannot be stored in variables, structure members, or
// array elements, so we do not mark them as `DATA`. // array elements, so we do not mark them as `DATA`.
TypeInfo::new(argument_flag | TypeFlags::SIZED | TypeFlags::COPY, 0) TypeInfo::new(
argument_flag | TypeFlags::SIZED | TypeFlags::COPY,
Alignment::ONE,
)
} }
Ti::Array { base, size, stride } => { Ti::Array { base, size, stride } => {
if base >= handle { if base >= handle {
@ -379,42 +390,27 @@ impl super::Validator {
}); });
} }
let general_alignment = base_layout.alignment.get(); let general_alignment = base_layout.alignment;
let uniform_layout = match base_info.uniform_layout { let uniform_layout = match base_info.uniform_layout {
Ok(base_alignment) => { Ok(base_alignment) => {
// combine the alignment requirements let alignment = base_alignment
let align = base_alignment
.unwrap()
.get()
.max(general_alignment) .max(general_alignment)
.max(UNIFORM_MIN_ALIGNMENT.get()); .max(Alignment::MIN_UNIFORM);
if stride % align != 0 { if alignment.is_aligned(stride) {
Err(( Ok(alignment)
handle,
Disalignment::ArrayStride {
stride,
alignment: align,
},
))
} else { } else {
Ok(Alignment::new(align)) Err((handle, Disalignment::ArrayStride { stride, alignment }))
} }
} }
Err(e) => Err(e), Err(e) => Err(e),
}; };
let storage_layout = match base_info.storage_layout { let storage_layout = match base_info.storage_layout {
Ok(base_alignment) => { Ok(base_alignment) => {
let align = base_alignment.unwrap().get().max(general_alignment); let alignment = base_alignment.max(general_alignment);
if stride % align != 0 { if alignment.is_aligned(stride) {
Err(( Ok(alignment)
handle,
Disalignment::ArrayStride {
stride,
alignment: align,
},
))
} else { } else {
Ok(Alignment::new(align)) Err((handle, Disalignment::ArrayStride { stride, alignment }))
} }
} }
Err(e) => Err(e), Err(e) => Err(e),
@ -467,7 +463,7 @@ impl super::Validator {
return Err(TypeError::NonPositiveArrayLength(const_handle)); return Err(TypeError::NonPositiveArrayLength(const_handle));
} }
TypeFlags::SIZED | TypeFlags::ARGUMENT TypeFlags::SIZED | TypeFlags::ARGUMENT | TypeFlags::CONSTRUCTIBLE
} }
crate::ArraySize::Dynamic => { crate::ArraySize::Dynamic => {
// Non-SIZED types may only appear as the last element of a structure. // Non-SIZED types may only appear as the last element of a structure.
@ -495,10 +491,11 @@ impl super::Validator {
| TypeFlags::COPY | TypeFlags::COPY
| TypeFlags::HOST_SHAREABLE | TypeFlags::HOST_SHAREABLE
| TypeFlags::IO_SHAREABLE | TypeFlags::IO_SHAREABLE
| TypeFlags::ARGUMENT, | TypeFlags::ARGUMENT
1, | TypeFlags::CONSTRUCTIBLE,
Alignment::ONE,
); );
ti.uniform_layout = Ok(Some(UNIFORM_MIN_ALIGNMENT)); ti.uniform_layout = Ok(Alignment::MIN_UNIFORM);
let mut min_offset = 0; let mut min_offset = 0;
@ -523,7 +520,7 @@ impl super::Validator {
ti.flags &= base_info.flags; ti.flags &= base_info.flags;
if member.offset < min_offset { if member.offset < min_offset {
//HACK: this could be nicer. We want to allow some structures // HACK: this could be nicer. We want to allow some structures
// to not bother with offsets/alignments if they are never // to not bother with offsets/alignments if they are never
// used for host sharing. // used for host sharing.
if member.offset == 0 { if member.offset == 0 {
@ -536,7 +533,6 @@ impl super::Validator {
} }
} }
//Note: `unwrap()` is fine because `Layouter` goes first and checks this
let base_size = types[member.ty].inner.size(constants); let base_size = types[member.ty].inner.size(constants);
min_offset = member.offset + base_size; min_offset = member.offset + base_size;
if min_offset > span { if min_offset > span {
@ -568,7 +564,7 @@ impl super::Validator {
// the start of any following member must be at least roundUp(16, SizeOf(S)). // the start of any following member must be at least roundUp(16, SizeOf(S)).
if let Some((span, offset)) = prev_struct_data { if let Some((span, offset)) = prev_struct_data {
let diff = member.offset - offset; let diff = member.offset - offset;
let min = crate::valid::Layouter::round_up(UNIFORM_MIN_ALIGNMENT, span); let min = Alignment::MIN_UNIFORM.round_up(span);
if diff < min { if diff < min {
ti.uniform_layout = Err(( ti.uniform_layout = Err((
handle, handle,
@ -603,16 +599,18 @@ impl super::Validator {
} }
} }
let alignment = self.layouter[handle].alignment.get(); let alignment = self.layouter[handle].alignment;
if span % alignment != 0 { if !alignment.is_aligned(span) {
ti.uniform_layout = Err((handle, Disalignment::StructSpan { span, alignment })); ti.uniform_layout = Err((handle, Disalignment::StructSpan { span, alignment }));
ti.storage_layout = Err((handle, Disalignment::StructSpan { span, alignment })); ti.storage_layout = Err((handle, Disalignment::StructSpan { span, alignment }));
} }
ti ti
} }
Ti::Image { .. } | Ti::Sampler { .. } => TypeInfo::new(TypeFlags::ARGUMENT, 0), Ti::Image { .. } | Ti::Sampler { .. } => {
Ti::BindingArray { .. } => TypeInfo::new(TypeFlags::empty(), 0), TypeInfo::new(TypeFlags::ARGUMENT, Alignment::ONE)
}
Ti::BindingArray { .. } => TypeInfo::new(TypeFlags::empty(), Alignment::ONE),
}) })
} }
} }

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"73d22ddbc04b486026d12675ef898363c6eea04ae23a9251acdd1b000c73b126","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","build.rs":"fedccfe06a4d75ba68233f0756de4161213c5d25851963f5b0521d8b7292b395","src/binding_model.rs":"79f024cdb136f44066d67ec7dc56bde7fdf3bf9e89874dc4db40e504099b2822","src/command/bind.rs":"309f3f1b1719d25115d385368cff0a2c85e94da825b2930141db78235901c673","src/command/bundle.rs":"6f940e6de1e84b858790e2801ba82c83f2bc0c6afbff962576b3dd64ac315de3","src/command/clear.rs":"568aaf9d0843bada18b68980b52dd8021830c28fff36551459fad5f6baea72e1","src/command/compute.rs":"b58ae86ffbd8280af27f063d514b17c5dafee3f3ddfd5637ca050135681eb764","src/command/draw.rs":"1b9b6531b7536bc0f864ab9fdeff376993de04e33554e84c7b2db7dc65e31327","src/command/memory_init.rs":"03c3267b311f389af859615ceea8a648b402a323062cc8f0fe2690a0fb390b97","src/command/mod.rs":"c0f00529bce224972d844d2fdc9f659ffa065512086315b7bcd767501961ee1a","src/command/query.rs":"34d22d33e4713ff7ca0b345b14cdbb6177236e782b5dfb38d907215c4deb6907","src/command/render.rs":"b21201c5b9574e98c066f988bcf91b1cde0d1847fc1db683291cb059a10f3dd8","src/command/transfer.rs":"7e5e13f04fef63e036291b2838c0f0097717ec497f98f420b71296b2cc691907","src/conv.rs":"87097903e86048c9110f526f7df1749186f84cb663d75d9d40a0c467befc89ea","src/device/life.rs":"857a71da94f5f6f043f304ada7dc9ab95c6a26ed0ff63f3d64a77942e28bcafe","src/device/mod.rs":"8b886c68cd2aaec9aabdbaea0f2f256fe546ae0242fe7c9b0b8a55686f215071","src/device/queue.rs":"5fe332a0d27dafff720b19e436d991a35affd2a8031f78c2a81439a49105edd6","src/device/trace.rs":"de575a8213c8ae9df711e4b6afe5736d71ac65bf141375fe044d3b6c4375e039","src/error.rs":"34a4adbb6ec669d8de22b932363506eeef1b1115c422bcc8daa3b26f62378518","src/hub.rs":"4cc404cc79578d7a6757f74ab1fbeeb357a13a4de5f0fe87affaea8895395c8d","src/id.rs":"3ec97d09f900f34f9ad38a555ddcadb77bd9977d3d39bfad030b9b34649cf502","src/init_tracker/buffer.rs":"ccdddaace101f921463bf6c62ed5aca01a6520717a850b5d4442c3551e9f1875","src/init_tracker/mod.rs":"273c6ee67a927784a617899c6fe5560e47108248ab67cabdc2eebcba53133364","src/init_tracker/texture.rs":"d02babc4f194f91853b5e9a71bd5b20d9434842cf242f29ed9d39661bfa44980","src/instance.rs":"4a19ac634a4dd22938586e3bc554ab69f079abb2d836ef932f06cee1655d9336","src/lib.rs":"f44250478f095aa7d61fb4773692037f465d1e8df9c5626000723d4e1961166e","src/pipeline.rs":"ffabdc74656717276241b1ca2ed043fabf18795662a523828193aea99d7a9ef5","src/present.rs":"5b760e252242be41d70f09cc46b95f2bfcb8258c3482755a7bec3b5a7e4bbcb6","src/resource.rs":"50021911ff214165a32129eabc2275945c2fd22bb736fad2977634ea8ef8362d","src/track/buffer.rs":"1a7400ec55f3c16bc074c46d11b9515762b558a333d36eb236d2e7d99701bbe5","src/track/mod.rs":"3a4b07c8f1ff168609ca521b441e1e2acc00c62d7e9e4dc39cb8ab83d9813d58","src/track/range.rs":"5bbfed6e103b3234d9de8e42057022da6d628c2cc1db6bb51b88f87f2d8adf8b","src/track/stateless.rs":"593ec39e01e18048100ab0e1869f430851b83b96bd0497b8e524efda38782a46","src/track/texture.rs":"de154923e4825fa120360aae61aec27370b44196464edea6468bf933976ea20c","src/validation.rs":"27c76c48eaf3ca6be111855d7b1ab8ef94c2f73ed5d5e4f758d82799099f014b"},"package":null} {"files":{"Cargo.toml":"d5e071d03da58ff1154b4667fd8863b802928abcf9e732fa00b6414481a8201e","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","build.rs":"fedccfe06a4d75ba68233f0756de4161213c5d25851963f5b0521d8b7292b395","src/binding_model.rs":"630414d5f50f18ede6b52387d49d0a3528d40b4804c2339d22e28a119b961666","src/command/bind.rs":"309f3f1b1719d25115d385368cff0a2c85e94da825b2930141db78235901c673","src/command/bundle.rs":"1d8e00a4b654e3d84bb18f311e686c9387c93d7307313b0f2797ff8e66e77439","src/command/clear.rs":"681837be0d0cda73b4e8ffb22b2e9dd8a663bd7cce191a14ff83ebb3489968cd","src/command/compute.rs":"fe6d43d09583a7c71da4de7e5a7ef0318b753d09c1081b5f3f52ef80add3a442","src/command/draw.rs":"bc9fa98a0897e82e88404433716b70ab66922e5128d524a4d5a2c63c627f78e8","src/command/memory_init.rs":"03c3267b311f389af859615ceea8a648b402a323062cc8f0fe2690a0fb390b97","src/command/mod.rs":"f096b26f9bc200a2e1c279225bf1197df82746b1b5f53174647f2f9ccb846d0b","src/command/query.rs":"34d22d33e4713ff7ca0b345b14cdbb6177236e782b5dfb38d907215c4deb6907","src/command/render.rs":"bb5670d09998f40f4e73315f8a5d59be603f8be3bba75ea5f8629ad45556a891","src/command/transfer.rs":"7e5e13f04fef63e036291b2838c0f0097717ec497f98f420b71296b2cc691907","src/conv.rs":"d2a18b98fd9361aab8d32044093e57c532140a1914b33b290eb2eb1306a801b7","src/device/life.rs":"6390be9658a59b0470288c9fd81f6a94f76a0afd34eafd80c14640064b779b6c","src/device/mod.rs":"3d7d289edaa50d06992b83c4d1494084e448fcb6895c55e394598ff93d5b3d1d","src/device/queue.rs":"d77574ceb5fa573b03026b6d2c37cc7c89e1e4e8934868bf2001a3fce737a0d5","src/device/trace.rs":"9c03f5ec06cae37f294179a2285e2af7a5497db8a5572bf1dbe50136943d69be","src/error.rs":"34a4adbb6ec669d8de22b932363506eeef1b1115c422bcc8daa3b26f62378518","src/hub.rs":"d8a904e188a9168c1552765b44944a15565f2f18dc6692c03e8eb533cdee02e6","src/id.rs":"910d5ef3403b0c476ef17bbaf27270653cf50382a0e1092206004ab775a91246","src/init_tracker/buffer.rs":"ccdddaace101f921463bf6c62ed5aca01a6520717a850b5d4442c3551e9f1875","src/init_tracker/mod.rs":"273c6ee67a927784a617899c6fe5560e47108248ab67cabdc2eebcba53133364","src/init_tracker/texture.rs":"d02babc4f194f91853b5e9a71bd5b20d9434842cf242f29ed9d39661bfa44980","src/instance.rs":"8494a66e40267370fd7a61d7c73c12549f3ef88c2dd5b2e268fdf177576024f0","src/lib.rs":"5f2803283022856ac9c2534d3096cb10e0e39f3cc7ddd7c973e8beffe9b3cc1f","src/pipeline.rs":"9b3d1ce4452a585c6a5b88201799f4bebb85ad236c62098e0ebb54c43522bdcf","src/present.rs":"89d0c226a6dec63c3e4255578634fb635a46c32412fd16065717fadd71435201","src/resource.rs":"51236b4893f1471d21e3e74cecf084f201b3fb0ac89fe1398619f6caf75a64bd","src/track/buffer.rs":"30b072df6d128d9beb3d885d01e6578d3559bfa4f15c36fd0236996975bbe596","src/track/mod.rs":"73a6bb425a28b4bf639a197a2eea9134548aaca869745bd8153e276561034984","src/track/range.rs":"5bbfed6e103b3234d9de8e42057022da6d628c2cc1db6bb51b88f87f2d8adf8b","src/track/stateless.rs":"593ec39e01e18048100ab0e1869f430851b83b96bd0497b8e524efda38782a46","src/track/texture.rs":"de154923e4825fa120360aae61aec27370b44196464edea6468bf933976ea20c","src/validation.rs":"27c76c48eaf3ca6be111855d7b1ab8ef94c2f73ed5d5e4f758d82799099f014b"},"package":null}

3
third_party/rust/wgpu-core/Cargo.toml поставляемый
Просмотреть файл

@ -42,7 +42,7 @@ thiserror = "1"
[dependencies.naga] [dependencies.naga]
git = "https://github.com/gfx-rs/naga" git = "https://github.com/gfx-rs/naga"
rev = "571302e" rev = "27d38aae"
#version = "0.8" #version = "0.8"
features = ["span", "validate", "wgsl-in"] features = ["span", "validate", "wgsl-in"]
@ -58,6 +58,7 @@ version = "0.12"
[target.'cfg(target_arch = "wasm32")'.dependencies] [target.'cfg(target_arch = "wasm32")'.dependencies]
hal = { path = "../wgpu-hal", package = "wgpu-hal", version = "0.12", features = ["gles"] } hal = { path = "../wgpu-hal", package = "wgpu-hal", version = "0.12", features = ["gles"] }
web-sys = { version = "0.3", features = ["HtmlCanvasElement"] }
[target.'cfg(all(not(target_arch = "wasm32"), any(target_os = "ios", target_os = "macos")))'.dependencies] [target.'cfg(all(not(target_arch = "wasm32"), any(target_os = "ios", target_os = "macos")))'.dependencies]
hal = { path = "../wgpu-hal", package = "wgpu-hal", version = "0.12", features = ["metal"] } hal = { path = "../wgpu-hal", package = "wgpu-hal", version = "0.12", features = ["metal"] }

Просмотреть файл

@ -24,6 +24,8 @@ use thiserror::Error;
pub enum BindGroupLayoutEntryError { pub enum BindGroupLayoutEntryError {
#[error("cube dimension is not expected for texture storage")] #[error("cube dimension is not expected for texture storage")]
StorageTextureCube, StorageTextureCube,
#[error("Read-write and read-only storage textures are not allowed by webgpu, they require the native only feature TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES")]
StorageTextureReadWrite,
#[error("arrays of bindings unsupported for this type of binding")] #[error("arrays of bindings unsupported for this type of binding")]
ArrayUnsupported, ArrayUnsupported,
#[error(transparent)] #[error(transparent)]

Просмотреть файл

@ -6,6 +6,18 @@ times, on different encoders. Constructing a render bundle lets `wgpu` validate
and analyze its commands up front, so that replaying a bundle can be more and analyze its commands up front, so that replaying a bundle can be more
efficient than simply re-recording its commands each time. efficient than simply re-recording its commands each time.
Not all commands are available in bundles; for example, a render bundle may not
contain a [`RenderCommand::SetViewport`] command.
Most of `wgpu`'s backend graphics APIs have something like bundles. For example,
Vulkan calls them "secondary command buffers", and Metal calls them "indirect
command buffers". Although we plan to take advantage of these platform features
at some point in the future, for now `wgpu`'s implementation of render bundles
does not use them: at the hal level, `wgpu` render bundles just replay the
commands.
## Render Bundle Isolation
One important property of render bundles is that the draw calls in a render One important property of render bundles is that the draw calls in a render
bundle depend solely on the pipeline and state established within the render bundle depend solely on the pipeline and state established within the render
bundle itself. A draw call in a bundle will never use a vertex buffer, say, that bundle itself. A draw call in a bundle will never use a vertex buffer, say, that
@ -17,14 +29,11 @@ Render passes are also isolated from the effects of bundles. After executing a
render bundle, a render pass's pipeline, bind groups, and vertex and index render bundle, a render pass's pipeline, bind groups, and vertex and index
buffers are are unset, so the bundle cannot affect later draw calls in the pass. buffers are are unset, so the bundle cannot affect later draw calls in the pass.
Not all commands are available in bundles; for example, a render bundle may not A render pass is not fully isolated from a bundle's effects on push constant
contain a [`RenderCommand::SetViewport`] command. values. Draw calls following a bundle's execution will see whatever values the
bundle writes to push constant storage. Setting a pipeline initializes any push
Most of `wgpu`'s backend graphics APIs have something like bundles. For example, constant storage it could access to zero, and this initialization may also be
Vulkan calls them "secondary command buffers", and Metal calls them "indirect visible after bundle execution.
command buffers". However, `wgpu`'s implementation of render bundles does not
take advantage of those underlying platform features. At the hal level, `wgpu`
render bundles just replay the commands.
## Render Bundle Lifecycle ## Render Bundle Lifecycle
@ -105,7 +114,7 @@ pub struct RenderBundleEncoderDescriptor<'a> {
pub label: Label<'a>, pub label: Label<'a>,
/// The formats of the color attachments that this render bundle is capable to rendering to. This /// The formats of the color attachments that this render bundle is capable to rendering to. This
/// must match the formats of the color attachments in the renderpass this render bundle is executed in. /// must match the formats of the color attachments in the renderpass this render bundle is executed in.
pub color_formats: Cow<'a, [wgt::TextureFormat]>, pub color_formats: Cow<'a, [Option<wgt::TextureFormat>]>,
/// Information about the depth attachment that this render bundle is capable to rendering to. The format /// Information about the depth attachment that this render bundle is capable to rendering to. The format
/// must match the format of the depth attachments in the renderpass this render bundle is executed in. /// must match the format of the depth attachments in the renderpass this render bundle is executed in.
pub depth_stencil: Option<wgt::RenderBundleDepthStencil>, pub depth_stencil: Option<wgt::RenderBundleDepthStencil>,
@ -122,7 +131,8 @@ pub struct RenderBundleEncoder {
base: BasePass<RenderCommand>, base: BasePass<RenderCommand>,
parent_id: id::DeviceId, parent_id: id::DeviceId,
pub(crate) context: RenderPassContext, pub(crate) context: RenderPassContext,
pub(crate) is_ds_read_only: bool, pub(crate) is_depth_read_only: bool,
pub(crate) is_stencil_read_only: bool,
// Resource binding dedupe state. // Resource binding dedupe state.
#[cfg_attr(feature = "serial-pass", serde(skip))] #[cfg_attr(feature = "serial-pass", serde(skip))]
@ -137,6 +147,20 @@ impl RenderBundleEncoder {
parent_id: id::DeviceId, parent_id: id::DeviceId,
base: Option<BasePass<RenderCommand>>, base: Option<BasePass<RenderCommand>>,
) -> Result<Self, CreateRenderBundleError> { ) -> Result<Self, CreateRenderBundleError> {
let (is_depth_read_only, is_stencil_read_only) = match desc.depth_stencil {
Some(ds) => {
let aspects = hal::FormatAspects::from(ds.format);
(
!aspects.contains(hal::FormatAspects::DEPTH) || ds.depth_read_only,
!aspects.contains(hal::FormatAspects::STENCIL) || ds.stencil_read_only,
)
}
// There's no depth/stencil attachment, so these values just don't
// matter. Choose the most accommodating value, to simplify
// validation.
None => (true, true),
};
//TODO: validate that attachment formats are renderable, //TODO: validate that attachment formats are renderable,
// have expected aspects, support multisampling. // have expected aspects, support multisampling.
Ok(Self { Ok(Self {
@ -144,7 +168,7 @@ impl RenderBundleEncoder {
parent_id, parent_id,
context: RenderPassContext { context: RenderPassContext {
attachments: AttachmentData { attachments: AttachmentData {
colors: if desc.color_formats.len() > hal::MAX_COLOR_TARGETS { colors: if desc.color_formats.len() > hal::MAX_COLOR_ATTACHMENTS {
return Err(CreateRenderBundleError::TooManyColorAttachments); return Err(CreateRenderBundleError::TooManyColorAttachments);
} else { } else {
desc.color_formats.iter().cloned().collect() desc.color_formats.iter().cloned().collect()
@ -161,15 +185,9 @@ impl RenderBundleEncoder {
}, },
multiview: desc.multiview, multiview: desc.multiview,
}, },
is_ds_read_only: match desc.depth_stencil {
Some(ds) => {
let aspects = hal::FormatAspects::from(ds.format);
(!aspects.contains(hal::FormatAspects::DEPTH) || ds.depth_read_only)
&& (!aspects.contains(hal::FormatAspects::STENCIL) || ds.stencil_read_only)
}
None => false,
},
is_depth_read_only,
is_stencil_read_only,
current_bind_groups: BindGroupStateChange::new(), current_bind_groups: BindGroupStateChange::new(),
current_pipeline: StateChange::new(), current_pipeline: StateChange::new(),
}) })
@ -188,7 +206,8 @@ impl RenderBundleEncoder {
sample_count: 0, sample_count: 0,
multiview: None, multiview: None,
}, },
is_ds_read_only: false, is_depth_read_only: false,
is_stencil_read_only: false,
current_bind_groups: BindGroupStateChange::new(), current_bind_groups: BindGroupStateChange::new(),
current_pipeline: StateChange::new(), current_pipeline: StateChange::new(),
@ -236,18 +255,13 @@ impl RenderBundleEncoder {
&*pipeline_guard, &*pipeline_guard,
&*query_set_guard, &*query_set_guard,
), ),
index: IndexState::new(),
vertex: (0..hal::MAX_VERTEX_BUFFERS)
.map(|_| VertexState::new())
.collect(),
bind: (0..hal::MAX_BIND_GROUPS).map(|_| None).collect(),
push_constant_ranges: PushConstantState::new(),
flat_dynamic_offsets: Vec::new(),
used_bind_groups: 0,
pipeline: None, pipeline: None,
bind: (0..hal::MAX_BIND_GROUPS).map(|_| None).collect(),
vertex: (0..hal::MAX_VERTEX_BUFFERS).map(|_| None).collect(),
index: None,
flat_dynamic_offsets: Vec::new(),
}; };
let mut commands = Vec::new(); let mut commands = Vec::new();
let mut pipeline_layout_id = None::<id::Valid<id::PipelineLayoutId>>;
let mut buffer_memory_init_actions = Vec::new(); let mut buffer_memory_init_actions = Vec::new();
let mut texture_memory_init_actions = Vec::new(); let mut texture_memory_init_actions = Vec::new();
@ -328,8 +342,6 @@ impl RenderBundleEncoder {
RenderCommand::SetPipeline(pipeline_id) => { RenderCommand::SetPipeline(pipeline_id) => {
let scope = PassErrorScope::SetPipelineRender(pipeline_id); let scope = PassErrorScope::SetPipelineRender(pipeline_id);
state.pipeline = Some(pipeline_id);
let pipeline: &pipeline::RenderPipeline<A> = state let pipeline: &pipeline::RenderPipeline<A> = state
.trackers .trackers
.render_pipelines .render_pipelines
@ -344,26 +356,27 @@ impl RenderBundleEncoder {
.map_err(RenderCommandError::IncompatiblePipelineTargets) .map_err(RenderCommandError::IncompatiblePipelineTargets)
.map_pass_err(scope)?; .map_pass_err(scope)?;
if pipeline.flags.contains(PipelineFlags::WRITES_DEPTH_STENCIL) if (pipeline.flags.contains(PipelineFlags::WRITES_DEPTH)
&& self.is_ds_read_only && self.is_depth_read_only)
|| (pipeline.flags.contains(PipelineFlags::WRITES_STENCIL)
&& self.is_stencil_read_only)
{ {
return Err(RenderCommandError::IncompatiblePipelineRods) return Err(RenderCommandError::IncompatiblePipelineRods)
.map_pass_err(scope); .map_pass_err(scope);
} }
let layout = &pipeline_layout_guard[pipeline.layout_id.value]; let layout = &pipeline_layout_guard[pipeline.layout_id.value];
pipeline_layout_id = Some(pipeline.layout_id.value); let pipeline_state = PipelineState::new(pipeline_id, pipeline, layout);
state.set_pipeline(
pipeline.strip_index_format,
&pipeline.vertex_strides,
&layout.bind_group_layout_ids,
&layout.push_constant_ranges,
);
commands.push(command); commands.push(command);
if let Some(iter) = state.flush_push_constants() {
// If this pipeline uses push constants, zero out their values.
if let Some(iter) = pipeline_state.zero_push_constants() {
commands.extend(iter) commands.extend(iter)
} }
state.invalidate_bind_groups(&pipeline_state, layout);
state.pipeline = Some(pipeline_state);
} }
RenderCommand::SetIndexBuffer { RenderCommand::SetIndexBuffer {
buffer_id, buffer_id,
@ -391,8 +404,7 @@ impl RenderBundleEncoder {
offset..end, offset..end,
MemoryInitKind::NeedsInitializedMemory, MemoryInitKind::NeedsInitializedMemory,
)); ));
state.index.set_format(index_format); state.set_index_buffer(buffer_id, index_format, offset..end);
state.index.set_buffer(buffer_id, offset..end);
} }
RenderCommand::SetVertexBuffer { RenderCommand::SetVertexBuffer {
slot, slot,
@ -420,7 +432,7 @@ impl RenderBundleEncoder {
offset..end, offset..end,
MemoryInitKind::NeedsInitializedMemory, MemoryInitKind::NeedsInitializedMemory,
)); ));
state.vertex[slot as usize].set_buffer(buffer_id, offset..end); state.vertex[slot as usize] = Some(VertexState::new(buffer_id, offset..end));
} }
RenderCommand::SetPushConstant { RenderCommand::SetPushConstant {
stages, stages,
@ -431,10 +443,8 @@ impl RenderBundleEncoder {
let scope = PassErrorScope::SetPushConstant; let scope = PassErrorScope::SetPushConstant;
let end_offset = offset + size_bytes; let end_offset = offset + size_bytes;
let pipeline_layout_id = pipeline_layout_id let pipeline = state.pipeline(scope)?;
.ok_or(DrawError::MissingPipeline) let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id];
.map_pass_err(scope)?;
let pipeline_layout = &pipeline_layout_guard[pipeline_layout_id];
pipeline_layout pipeline_layout
.validate_push_constant_ranges(stages, offset, end_offset) .validate_push_constant_ranges(stages, offset, end_offset)
@ -451,9 +461,11 @@ impl RenderBundleEncoder {
let scope = PassErrorScope::Draw { let scope = PassErrorScope::Draw {
indexed: false, indexed: false,
indirect: false, indirect: false,
pipeline: state.pipeline, pipeline: state.pipeline_id(),
}; };
let vertex_limits = state.vertex_limits(); let pipeline = state.pipeline(scope)?;
let used_bind_groups = pipeline.used_bind_groups;
let vertex_limits = state.vertex_limits(pipeline);
let last_vertex = first_vertex + vertex_count; let last_vertex = first_vertex + vertex_count;
if last_vertex > vertex_limits.vertex_limit { if last_vertex > vertex_limits.vertex_limit {
return Err(DrawError::VertexBeyondLimit { return Err(DrawError::VertexBeyondLimit {
@ -473,7 +485,7 @@ impl RenderBundleEncoder {
.map_pass_err(scope); .map_pass_err(scope);
} }
commands.extend(state.flush_vertices()); commands.extend(state.flush_vertices());
commands.extend(state.flush_binds(base.dynamic_offsets)); commands.extend(state.flush_binds(used_bind_groups, base.dynamic_offsets));
commands.push(command); commands.push(command);
} }
RenderCommand::DrawIndexed { RenderCommand::DrawIndexed {
@ -486,11 +498,17 @@ impl RenderBundleEncoder {
let scope = PassErrorScope::Draw { let scope = PassErrorScope::Draw {
indexed: true, indexed: true,
indirect: false, indirect: false,
pipeline: state.pipeline, pipeline: state.pipeline_id(),
};
let pipeline = state.pipeline(scope)?;
let used_bind_groups = pipeline.used_bind_groups;
let index = match state.index {
Some(ref index) => index,
None => return Err(DrawError::MissingIndexBuffer).map_pass_err(scope),
}; };
//TODO: validate that base_vertex + max_index() is within the provided range //TODO: validate that base_vertex + max_index() is within the provided range
let vertex_limits = state.vertex_limits(); let vertex_limits = state.vertex_limits(pipeline);
let index_limit = state.index.limit(); let index_limit = index.limit();
let last_index = first_index + index_count; let last_index = first_index + index_count;
if last_index > index_limit { if last_index > index_limit {
return Err(DrawError::IndexBeyondLimit { return Err(DrawError::IndexBeyondLimit {
@ -508,9 +526,9 @@ impl RenderBundleEncoder {
}) })
.map_pass_err(scope); .map_pass_err(scope);
} }
commands.extend(state.index.flush()); commands.extend(state.flush_index());
commands.extend(state.flush_vertices()); commands.extend(state.flush_vertices());
commands.extend(state.flush_binds(base.dynamic_offsets)); commands.extend(state.flush_binds(used_bind_groups, base.dynamic_offsets));
commands.push(command); commands.push(command);
} }
RenderCommand::MultiDrawIndirect { RenderCommand::MultiDrawIndirect {
@ -522,12 +540,15 @@ impl RenderBundleEncoder {
let scope = PassErrorScope::Draw { let scope = PassErrorScope::Draw {
indexed: false, indexed: false,
indirect: true, indirect: true,
pipeline: state.pipeline, pipeline: state.pipeline_id(),
}; };
device device
.require_downlevel_flags(wgt::DownlevelFlags::INDIRECT_EXECUTION) .require_downlevel_flags(wgt::DownlevelFlags::INDIRECT_EXECUTION)
.map_pass_err(scope)?; .map_pass_err(scope)?;
let pipeline = state.pipeline(scope)?;
let used_bind_groups = pipeline.used_bind_groups;
let buffer: &resource::Buffer<A> = state let buffer: &resource::Buffer<A> = state
.trackers .trackers
.buffers .buffers
@ -545,7 +566,7 @@ impl RenderBundleEncoder {
)); ));
commands.extend(state.flush_vertices()); commands.extend(state.flush_vertices());
commands.extend(state.flush_binds(base.dynamic_offsets)); commands.extend(state.flush_binds(used_bind_groups, base.dynamic_offsets));
commands.push(command); commands.push(command);
} }
RenderCommand::MultiDrawIndirect { RenderCommand::MultiDrawIndirect {
@ -557,12 +578,15 @@ impl RenderBundleEncoder {
let scope = PassErrorScope::Draw { let scope = PassErrorScope::Draw {
indexed: true, indexed: true,
indirect: true, indirect: true,
pipeline: state.pipeline, pipeline: state.pipeline_id(),
}; };
device device
.require_downlevel_flags(wgt::DownlevelFlags::INDIRECT_EXECUTION) .require_downlevel_flags(wgt::DownlevelFlags::INDIRECT_EXECUTION)
.map_pass_err(scope)?; .map_pass_err(scope)?;
let pipeline = state.pipeline(scope)?;
let used_bind_groups = pipeline.used_bind_groups;
let buffer: &resource::Buffer<A> = state let buffer: &resource::Buffer<A> = state
.trackers .trackers
.buffers .buffers
@ -579,9 +603,14 @@ impl RenderBundleEncoder {
MemoryInitKind::NeedsInitializedMemory, MemoryInitKind::NeedsInitializedMemory,
)); ));
commands.extend(state.index.flush()); let index = match state.index {
Some(ref mut index) => index,
None => return Err(DrawError::MissingIndexBuffer).map_pass_err(scope),
};
commands.extend(index.flush());
commands.extend(state.flush_vertices()); commands.extend(state.flush_vertices());
commands.extend(state.flush_binds(base.dynamic_offsets)); commands.extend(state.flush_binds(used_bind_groups, base.dynamic_offsets));
commands.push(command); commands.push(command);
} }
RenderCommand::MultiDrawIndirect { .. } RenderCommand::MultiDrawIndirect { .. }
@ -589,7 +618,7 @@ impl RenderBundleEncoder {
RenderCommand::PushDebugGroup { color: _, len: _ } => unimplemented!(), RenderCommand::PushDebugGroup { color: _, len: _ } => unimplemented!(),
RenderCommand::InsertDebugMarker { color: _, len: _ } => unimplemented!(), RenderCommand::InsertDebugMarker { color: _, len: _ } => unimplemented!(),
RenderCommand::PopDebugGroup => unimplemented!(), RenderCommand::PopDebugGroup => unimplemented!(),
RenderCommand::WriteTimestamp { .. } RenderCommand::WriteTimestamp { .. } // Must check the WRITE_TIMESTAMP_INSIDE_PASSES feature
| RenderCommand::BeginPipelineStatisticsQuery { .. } | RenderCommand::BeginPipelineStatisticsQuery { .. }
| RenderCommand::EndPipelineStatisticsQuery => unimplemented!(), | RenderCommand::EndPipelineStatisticsQuery => unimplemented!(),
RenderCommand::ExecuteBundle(_) RenderCommand::ExecuteBundle(_)
@ -608,7 +637,8 @@ impl RenderBundleEncoder {
string_data: Vec::new(), string_data: Vec::new(),
push_constant_data: Vec::new(), push_constant_data: Vec::new(),
}, },
is_ds_read_only: self.is_ds_read_only, is_depth_read_only: self.is_depth_read_only,
is_stencil_read_only: self.is_stencil_read_only,
device_id: Stored { device_id: Stored {
value: id::Valid(self.parent_id), value: id::Valid(self.parent_id),
ref_count: device.life_guard.add_ref(), ref_count: device.life_guard.add_ref(),
@ -686,7 +716,8 @@ pub struct RenderBundle<A: HalApi> {
// Normalized command stream. It can be executed verbatim, // Normalized command stream. It can be executed verbatim,
// without re-binding anything on the pipeline change. // without re-binding anything on the pipeline change.
base: BasePass<RenderCommand>, base: BasePass<RenderCommand>,
pub(super) is_ds_read_only: bool, pub(super) is_depth_read_only: bool,
pub(super) is_stencil_read_only: bool,
pub(crate) device_id: Stored<id::DeviceId>, pub(crate) device_id: Stored<id::DeviceId>,
pub(crate) used: RenderBundleScope<A>, pub(crate) used: RenderBundleScope<A>,
pub(super) buffer_memory_init_actions: Vec<BufferInitTrackerAction>, pub(super) buffer_memory_init_actions: Vec<BufferInitTrackerAction>,
@ -907,39 +938,22 @@ impl<A: HalApi> Resource for RenderBundle<A> {
/// A render bundle's current index buffer state. /// A render bundle's current index buffer state.
/// ///
/// [`RenderBundleEncoder::finish`] uses this to drop redundant /// [`RenderBundleEncoder::finish`] records the currently set index buffer here,
/// `SetIndexBuffer` commands from the final [`RenderBundle`]. It /// and calls [`State::flush_index`] before any indexed draw command to produce
/// records index buffer state changes here, and then calls this
/// type's [`flush`] method before any indexed draw command to produce
/// a `SetIndexBuffer` command if one is necessary. /// a `SetIndexBuffer` command if one is necessary.
///
/// [`flush`]: IndexState::flush
#[derive(Debug)] #[derive(Debug)]
struct IndexState { struct IndexState {
buffer: Option<id::BufferId>, buffer: id::BufferId,
format: wgt::IndexFormat, format: wgt::IndexFormat,
pipeline_format: Option<wgt::IndexFormat>,
range: Range<wgt::BufferAddress>, range: Range<wgt::BufferAddress>,
is_dirty: bool, is_dirty: bool,
} }
impl IndexState { impl IndexState {
/// Return a fresh state: no index buffer has been set yet.
fn new() -> Self {
Self {
buffer: None,
format: wgt::IndexFormat::default(),
pipeline_format: None,
range: 0..0,
is_dirty: false,
}
}
/// Return the number of entries in the current index buffer. /// Return the number of entries in the current index buffer.
/// ///
/// Panic if no index buffer has been set. /// Panic if no index buffer has been set.
fn limit(&self) -> u32 { fn limit(&self) -> u32 {
assert!(self.buffer.is_some());
let bytes_per_index = match self.format { let bytes_per_index = match self.format {
wgt::IndexFormat::Uint16 => 2, wgt::IndexFormat::Uint16 => 2,
wgt::IndexFormat::Uint32 => 4, wgt::IndexFormat::Uint32 => 4,
@ -947,13 +961,13 @@ impl IndexState {
((self.range.end - self.range.start) / bytes_per_index) as u32 ((self.range.end - self.range.start) / bytes_per_index) as u32
} }
/// Prepare for an indexed draw, producing a `SetIndexBuffer` /// Generate a `SetIndexBuffer` command to prepare for an indexed draw
/// command if necessary. /// command, if needed.
fn flush(&mut self) -> Option<RenderCommand> { fn flush(&mut self) -> Option<RenderCommand> {
if self.is_dirty { if self.is_dirty {
self.is_dirty = false; self.is_dirty = false;
Some(RenderCommand::SetIndexBuffer { Some(RenderCommand::SetIndexBuffer {
buffer_id: self.buffer.unwrap(), buffer_id: self.buffer,
index_format: self.format, index_format: self.format,
offset: self.range.start, offset: self.range.start,
size: wgt::BufferSize::new(self.range.end - self.range.start), size: wgt::BufferSize::new(self.range.end - self.range.start),
@ -962,21 +976,6 @@ impl IndexState {
None None
} }
} }
/// Set the current index buffer's format.
fn set_format(&mut self, format: wgt::IndexFormat) {
if self.format != format {
self.format = format;
self.is_dirty = true;
}
}
/// Set the current index buffer.
fn set_buffer(&mut self, id: id::BufferId, range: Range<wgt::BufferAddress>) {
self.buffer = Some(id);
self.range = range;
self.is_dirty = true;
}
} }
/// The state of a single vertex buffer slot during render bundle encoding. /// The state of a single vertex buffer slot during render bundle encoding.
@ -990,33 +989,20 @@ impl IndexState {
/// [`flush`]: IndexState::flush /// [`flush`]: IndexState::flush
#[derive(Debug)] #[derive(Debug)]
struct VertexState { struct VertexState {
buffer: Option<id::BufferId>, buffer: id::BufferId,
range: Range<wgt::BufferAddress>, range: Range<wgt::BufferAddress>,
stride: wgt::BufferAddress,
rate: wgt::VertexStepMode,
is_dirty: bool, is_dirty: bool,
} }
impl VertexState { impl VertexState {
/// Construct a fresh `VertexState`: no buffer has been set for fn new(buffer: id::BufferId, range: Range<wgt::BufferAddress>) -> Self {
/// this slot.
fn new() -> Self {
Self { Self {
buffer: None, buffer,
range: 0..0, range,
stride: 0, is_dirty: true,
rate: wgt::VertexStepMode::Vertex,
is_dirty: false,
} }
} }
/// Set this slot's vertex buffer.
fn set_buffer(&mut self, buffer_id: id::BufferId, range: Range<wgt::BufferAddress>) {
self.buffer = Some(buffer_id);
self.range = range;
self.is_dirty = true;
}
/// Generate a `SetVertexBuffer` command for this slot, if necessary. /// Generate a `SetVertexBuffer` command for this slot, if necessary.
/// ///
/// `slot` is the index of the vertex buffer slot that `self` tracks. /// `slot` is the index of the vertex buffer slot that `self` tracks.
@ -1025,7 +1011,7 @@ impl VertexState {
self.is_dirty = false; self.is_dirty = false;
Some(RenderCommand::SetVertexBuffer { Some(RenderCommand::SetVertexBuffer {
slot, slot,
buffer_id: self.buffer.unwrap(), buffer_id: self.buffer,
offset: self.range.start, offset: self.range.start,
size: wgt::BufferSize::new(self.range.end - self.range.start), size: wgt::BufferSize::new(self.range.end - self.range.start),
}) })
@ -1053,30 +1039,6 @@ struct BindState {
is_dirty: bool, is_dirty: bool,
} }
#[derive(Debug)]
struct PushConstantState {
ranges: ArrayVec<wgt::PushConstantRange, { SHADER_STAGE_COUNT }>,
is_dirty: bool,
}
impl PushConstantState {
fn new() -> Self {
Self {
ranges: ArrayVec::new(),
is_dirty: false,
}
}
fn set_push_constants(&mut self, new_ranges: &[wgt::PushConstantRange]) -> bool {
if &*self.ranges != new_ranges {
self.ranges = new_ranges.iter().cloned().collect();
self.is_dirty = true;
true
} else {
false
}
}
}
#[derive(Debug)] #[derive(Debug)]
struct VertexLimitState { struct VertexLimitState {
/// Length of the shortest vertex rate vertex buffer /// Length of the shortest vertex rate vertex buffer
@ -1089,6 +1051,64 @@ struct VertexLimitState {
instance_limit_slot: u32, instance_limit_slot: u32,
} }
/// The bundle's current pipeline, and some cached information needed for validation.
struct PipelineState {
/// The pipeline's id.
id: id::RenderPipelineId,
/// The id of the pipeline's layout.
layout_id: id::Valid<id::PipelineLayoutId>,
/// How this pipeline's vertex shader traverses each vertex buffer, indexed
/// by vertex buffer slot number.
steps: Vec<pipeline::VertexStep>,
/// Ranges of push constants this pipeline uses, copied from the pipeline
/// layout.
push_constant_ranges: ArrayVec<wgt::PushConstantRange, { SHADER_STAGE_COUNT }>,
/// The number of bind groups this pipeline uses.
used_bind_groups: usize,
}
impl PipelineState {
fn new<A: HalApi>(
pipeline_id: id::RenderPipelineId,
pipeline: &pipeline::RenderPipeline<A>,
layout: &binding_model::PipelineLayout<A>,
) -> Self {
Self {
id: pipeline_id,
layout_id: pipeline.layout_id.value,
steps: pipeline.vertex_steps.to_vec(),
push_constant_ranges: layout.push_constant_ranges.iter().cloned().collect(),
used_bind_groups: layout.bind_group_layout_ids.len(),
}
}
/// Return a sequence of commands to zero the push constant ranges this
/// pipeline uses. If no initialization is necessary, return `None`.
fn zero_push_constants(&self) -> Option<impl Iterator<Item = RenderCommand>> {
if !self.push_constant_ranges.is_empty() {
let nonoverlapping_ranges =
super::bind::compute_nonoverlapping_ranges(&self.push_constant_ranges);
Some(
nonoverlapping_ranges
.into_iter()
.map(|range| RenderCommand::SetPushConstant {
stages: range.stages,
offset: range.range.start,
size_bytes: range.range.end - range.range.start,
values_offset: None, // write zeros
}),
)
} else {
None
}
}
}
/// State for analyzing and cleaning up bundle command streams. /// State for analyzing and cleaning up bundle command streams.
/// ///
/// To minimize state updates, [`RenderBundleEncoder::finish`] /// To minimize state updates, [`RenderBundleEncoder::finish`]
@ -1096,21 +1116,25 @@ struct VertexLimitState {
/// [`SetIndexBuffer`] to the simulated state stored here, and then /// [`SetIndexBuffer`] to the simulated state stored here, and then
/// calls the `flush_foo` methods before draw calls to produce the /// calls the `flush_foo` methods before draw calls to produce the
/// update commands we actually need. /// update commands we actually need.
///
/// [`SetBindGroup`]: RenderCommand::SetBindGroup
/// [`SetIndexBuffer`]: RenderCommand::SetIndexBuffer
struct State<A: HalApi> { struct State<A: HalApi> {
/// Resources used by this bundle. This will become [`RenderBundle::used`]. /// Resources used by this bundle. This will become [`RenderBundle::used`].
trackers: RenderBundleScope<A>, trackers: RenderBundleScope<A>,
/// The current index buffer. We flush this state before indexed /// The currently set pipeline, if any.
/// draw commands. pipeline: Option<PipelineState>,
index: IndexState,
/// The state of each vertex buffer slot.
vertex: ArrayVec<VertexState, { hal::MAX_VERTEX_BUFFERS }>,
/// The bind group set at each index, if any. /// The bind group set at each index, if any.
bind: ArrayVec<Option<BindState>, { hal::MAX_BIND_GROUPS }>, bind: ArrayVec<Option<BindState>, { hal::MAX_BIND_GROUPS }>,
push_constant_ranges: PushConstantState, /// The state of each vertex buffer slot.
vertex: ArrayVec<Option<VertexState>, { hal::MAX_VERTEX_BUFFERS }>,
/// The current index buffer, if one has been set. We flush this state
/// before indexed draw commands.
index: Option<IndexState>,
/// Dynamic offset values used by the cleaned-up command sequence. /// Dynamic offset values used by the cleaned-up command sequence.
/// ///
@ -1119,35 +1143,31 @@ struct State<A: HalApi> {
/// ///
/// [`dynamic_offsets`]: BasePass::dynamic_offsets /// [`dynamic_offsets`]: BasePass::dynamic_offsets
flat_dynamic_offsets: Vec<wgt::DynamicOffset>, flat_dynamic_offsets: Vec<wgt::DynamicOffset>,
used_bind_groups: usize,
pipeline: Option<id::RenderPipelineId>,
} }
impl<A: HalApi> State<A> { impl<A: HalApi> State<A> {
fn vertex_limits(&self) -> VertexLimitState { fn vertex_limits(&self, pipeline: &PipelineState) -> VertexLimitState {
let mut vert_state = VertexLimitState { let mut vert_state = VertexLimitState {
vertex_limit: u32::MAX, vertex_limit: u32::MAX,
vertex_limit_slot: 0, vertex_limit_slot: 0,
instance_limit: u32::MAX, instance_limit: u32::MAX,
instance_limit_slot: 0, instance_limit_slot: 0,
}; };
for (idx, vbs) in self.vertex.iter().enumerate() { for (idx, (vbs, step)) in self.vertex.iter().zip(&pipeline.steps).enumerate() {
if vbs.stride == 0 { if let Some(ref vbs) = *vbs {
continue; let limit = ((vbs.range.end - vbs.range.start) / step.stride) as u32;
} match step.mode {
let limit = ((vbs.range.end - vbs.range.start) / vbs.stride) as u32; wgt::VertexStepMode::Vertex => {
match vbs.rate { if limit < vert_state.vertex_limit {
wgt::VertexStepMode::Vertex => { vert_state.vertex_limit = limit;
if limit < vert_state.vertex_limit { vert_state.vertex_limit_slot = idx as _;
vert_state.vertex_limit = limit; }
vert_state.vertex_limit_slot = idx as _;
} }
} wgt::VertexStepMode::Instance => {
wgt::VertexStepMode::Instance => { if limit < vert_state.instance_limit {
if limit < vert_state.instance_limit { vert_state.instance_limit = limit;
vert_state.instance_limit = limit; vert_state.instance_limit_slot = idx as _;
vert_state.instance_limit_slot = idx as _; }
} }
} }
} }
@ -1155,8 +1175,21 @@ impl<A: HalApi> State<A> {
vert_state vert_state
} }
/// Return the id of the current pipeline, if any.
fn pipeline_id(&self) -> Option<id::RenderPipelineId> {
self.pipeline.as_ref().map(|p| p.id)
}
/// Return the current pipeline state. Return an error if none is set.
fn pipeline(&self, scope: PassErrorScope) -> Result<&PipelineState, RenderBundleError> {
self.pipeline
.as_ref()
.ok_or(DrawError::MissingPipeline)
.map_pass_err(scope)
}
/// Mark all non-empty bind group table entries from `index` onwards as dirty. /// Mark all non-empty bind group table entries from `index` onwards as dirty.
fn invalidate_group_from(&mut self, index: usize) { fn invalidate_bind_group_from(&mut self, index: usize) {
for contents in self.bind[index..].iter_mut().flatten() { for contents in self.bind[index..].iter_mut().flatten() {
contents.is_dirty = true; contents.is_dirty = true;
} }
@ -1190,83 +1223,106 @@ impl<A: HalApi> State<A> {
// Once we've changed the bind group at a particular index, all // Once we've changed the bind group at a particular index, all
// subsequent indices need to be rewritten. // subsequent indices need to be rewritten.
self.invalidate_group_from(slot as usize + 1); self.invalidate_bind_group_from(slot as usize + 1);
} }
fn set_pipeline( /// Determine which bind group slots need to be re-set after a pipeline change.
///
/// Given that we are switching from the current pipeline state to `new`,
/// whose layout is `layout`, mark all the bind group slots that we need to
/// emit new `SetBindGroup` commands for as dirty.
///
/// According to `wgpu_hal`'s rules:
///
/// - If the layout of any bind group slot changes, then that slot and
/// all following slots must have their bind groups re-established.
///
/// - Changing the push constant ranges at all requires re-establishing
/// all bind groups.
fn invalidate_bind_groups(
&mut self, &mut self,
index_format: Option<wgt::IndexFormat>, new: &PipelineState,
vertex_strides: &[(wgt::BufferAddress, wgt::VertexStepMode)], layout: &binding_model::PipelineLayout<A>,
layout_ids: &[id::Valid<id::BindGroupLayoutId>],
push_constant_layouts: &[wgt::PushConstantRange],
) { ) {
self.index.pipeline_format = index_format; match self.pipeline {
None => {
// Establishing entirely new pipeline state.
self.invalidate_bind_group_from(0);
}
Some(ref old) => {
if old.id == new.id {
// Everything is derived from the pipeline, so if the id has
// not changed, there's no need to consider anything else.
return;
}
for (vs, &(stride, step_mode)) in self.vertex.iter_mut().zip(vertex_strides) { // Any push constant change invalidates all groups.
if vs.stride != stride || vs.rate != step_mode { if old.push_constant_ranges != new.push_constant_ranges {
vs.stride = stride; self.invalidate_bind_group_from(0);
vs.rate = step_mode; } else {
vs.is_dirty = true; let first_changed = self
.bind
.iter()
.zip(&layout.bind_group_layout_ids)
.position(|(entry, &layout_id)| match *entry {
Some(ref contents) => contents.layout_id != layout_id,
None => false,
});
if let Some(slot) = first_changed {
self.invalidate_bind_group_from(slot);
}
}
} }
} }
let push_constants_changed = self
.push_constant_ranges
.set_push_constants(push_constant_layouts);
self.used_bind_groups = layout_ids.len();
let invalid_from = if push_constants_changed {
Some(0)
} else {
self.bind
.iter()
.zip(layout_ids)
.position(|(entry, &layout_id)| match *entry {
Some(ref contents) => contents.layout_id != layout_id,
None => false,
})
};
if let Some(slot) = invalid_from {
self.invalidate_group_from(slot);
}
} }
fn flush_push_constants(&mut self) -> Option<impl Iterator<Item = RenderCommand>> { /// Set the bundle's current index buffer and its associated parameters.
let is_dirty = self.push_constant_ranges.is_dirty; fn set_index_buffer(
&mut self,
if is_dirty { buffer: id::BufferId,
let nonoverlapping_ranges = format: wgt::IndexFormat,
super::bind::compute_nonoverlapping_ranges(&self.push_constant_ranges.ranges); range: Range<wgt::BufferAddress>,
) {
Some( match self.index {
nonoverlapping_ranges Some(ref current)
.into_iter() if current.buffer == buffer
.map(|range| RenderCommand::SetPushConstant { && current.format == format
stages: range.stages, && current.range == range =>
offset: range.range.start, {
size_bytes: range.range.end - range.range.start, return
values_offset: None, }
}), _ => (),
)
} else {
None
} }
self.index = Some(IndexState {
buffer,
format,
range,
is_dirty: true,
});
}
/// Generate a `SetIndexBuffer` command to prepare for an indexed draw
/// command, if needed.
fn flush_index(&mut self) -> Option<RenderCommand> {
self.index.as_mut().and_then(|index| index.flush())
} }
fn flush_vertices(&mut self) -> impl Iterator<Item = RenderCommand> + '_ { fn flush_vertices(&mut self) -> impl Iterator<Item = RenderCommand> + '_ {
self.vertex self.vertex
.iter_mut() .iter_mut()
.enumerate() .enumerate()
.flat_map(|(i, vs)| vs.flush(i as u32)) .flat_map(|(i, vs)| vs.as_mut().and_then(|vs| vs.flush(i as u32)))
} }
/// Generate `SetBindGroup` commands for any bind groups that need to be updated. /// Generate `SetBindGroup` commands for any bind groups that need to be updated.
fn flush_binds( fn flush_binds(
&mut self, &mut self,
used_bind_groups: usize,
dynamic_offsets: &[wgt::DynamicOffset], dynamic_offsets: &[wgt::DynamicOffset],
) -> impl Iterator<Item = RenderCommand> + '_ { ) -> impl Iterator<Item = RenderCommand> + '_ {
// Append each dirty bind group's dynamic offsets to `flat_dynamic_offsets`. // Append each dirty bind group's dynamic offsets to `flat_dynamic_offsets`.
for contents in self.bind[..self.used_bind_groups].iter().flatten() { for contents in self.bind[..used_bind_groups].iter().flatten() {
if contents.is_dirty { if contents.is_dirty {
self.flat_dynamic_offsets self.flat_dynamic_offsets
.extend_from_slice(&dynamic_offsets[contents.dynamic_offsets.clone()]); .extend_from_slice(&dynamic_offsets[contents.dynamic_offsets.clone()]);
@ -1275,7 +1331,7 @@ impl<A: HalApi> State<A> {
// Then, generate `SetBindGroup` commands to update the dirty bind // Then, generate `SetBindGroup` commands to update the dirty bind
// groups. After this, all bind groups are clean. // groups. After this, all bind groups are clean.
self.bind[..self.used_bind_groups] self.bind[..used_bind_groups]
.iter_mut() .iter_mut()
.enumerate() .enumerate()
.flat_map(|(i, entry)| { .flat_map(|(i, entry)| {

Просмотреть файл

@ -408,7 +408,7 @@ fn clear_texture_via_render_passes<A: hal::Api>(
for depth_or_layer in layer_or_depth_range { for depth_or_layer in layer_or_depth_range {
let color_attachments_tmp; let color_attachments_tmp;
let (color_attachments, depth_stencil_attachment) = if is_color { let (color_attachments, depth_stencil_attachment) = if is_color {
color_attachments_tmp = [hal::ColorAttachment { color_attachments_tmp = [Some(hal::ColorAttachment {
target: hal::Attachment { target: hal::Attachment {
view: dst_texture.get_clear_view(mip_level, depth_or_layer), view: dst_texture.get_clear_view(mip_level, depth_or_layer),
usage: hal::TextureUses::COLOR_TARGET, usage: hal::TextureUses::COLOR_TARGET,
@ -416,7 +416,7 @@ fn clear_texture_via_render_passes<A: hal::Api>(
resolve_target: None, resolve_target: None,
ops: hal::AttachmentOps::STORE, ops: hal::AttachmentOps::STORE,
clear_value: wgt::Color::TRANSPARENT, clear_value: wgt::Color::TRANSPARENT,
}]; })];
(&color_attachments_tmp[..], None) (&color_attachments_tmp[..], None)
} else { } else {
( (

Просмотреть файл

@ -9,7 +9,7 @@ use crate::{
BasePass, BasePassRef, BindGroupStateChange, CommandBuffer, CommandEncoderError, BasePass, BasePassRef, BindGroupStateChange, CommandBuffer, CommandEncoderError,
CommandEncoderStatus, MapPassErr, PassErrorScope, QueryUseError, StateChange, CommandEncoderStatus, MapPassErr, PassErrorScope, QueryUseError, StateChange,
}, },
device::MissingDownlevelFlags, device::{MissingDownlevelFlags, MissingFeatures},
error::{ErrorFormatter, PrettyError}, error::{ErrorFormatter, PrettyError},
hub::{Global, GlobalIdentityHandlerFactory, HalApi, Storage, Token}, hub::{Global, GlobalIdentityHandlerFactory, HalApi, Storage, Token},
id, id,
@ -43,11 +43,24 @@ pub enum ComputeCommand {
bind_group_id: id::BindGroupId, bind_group_id: id::BindGroupId,
}, },
SetPipeline(id::ComputePipelineId), SetPipeline(id::ComputePipelineId),
/// Set a range of push constants to values stored in [`BasePass::push_constant_data`].
SetPushConstant { SetPushConstant {
/// The byte offset within the push constant storage to write to. This
/// must be a multiple of four.
offset: u32, offset: u32,
/// The number of bytes to write. This must be a multiple of four.
size_bytes: u32, size_bytes: u32,
/// Index in [`BasePass::push_constant_data`] of the start of the data
/// to be written.
///
/// Note: this is not a byte offset like `offset`. Rather, it is the
/// index of the first `u32` element in `push_constant_data` to read.
values_offset: u32, values_offset: u32,
}, },
Dispatch([u32; 3]), Dispatch([u32; 3]),
DispatchIndirect { DispatchIndirect {
buffer_id: id::BufferId, buffer_id: id::BufferId,
@ -179,6 +192,8 @@ pub enum ComputePassErrorInner {
#[error(transparent)] #[error(transparent)]
QueryUse(#[from] QueryUseError), QueryUse(#[from] QueryUseError),
#[error(transparent)] #[error(transparent)]
MissingFeatures(#[from] MissingFeatures),
#[error(transparent)]
MissingDownlevelFlags(#[from] MissingDownlevelFlags), MissingDownlevelFlags(#[from] MissingDownlevelFlags),
} }
@ -253,6 +268,7 @@ impl<A: HalApi> State<A> {
Ok(()) Ok(())
} }
// `extra_buffer` is there to represent the indirect buffer that is also part of the usage scope.
fn flush_states( fn flush_states(
&mut self, &mut self,
raw_encoder: &mut A::CommandEncoder, raw_encoder: &mut A::CommandEncoder,
@ -260,6 +276,7 @@ impl<A: HalApi> State<A> {
bind_group_guard: &Storage<BindGroup<A>, id::BindGroupId>, bind_group_guard: &Storage<BindGroup<A>, id::BindGroupId>,
buffer_guard: &Storage<Buffer<A>, id::BufferId>, buffer_guard: &Storage<Buffer<A>, id::BufferId>,
texture_guard: &Storage<Texture<A>, id::TextureId>, texture_guard: &Storage<Texture<A>, id::TextureId>,
indirect_buffer: Option<id::Valid<id::BufferId>>,
) -> Result<(), UsageConflict> { ) -> Result<(), UsageConflict> {
for id in self.binder.list_active() { for id in self.binder.list_active() {
unsafe { unsafe {
@ -280,6 +297,13 @@ impl<A: HalApi> State<A> {
} }
} }
// Add the state of the indirect buffer if it hasn't been hit before.
unsafe {
base_trackers
.buffers
.set_and_remove_from_usage_scope_sparse(&mut self.scope.buffers, indirect_buffer);
}
log::trace!("Encoding dispatch barriers"); log::trace!("Encoding dispatch barriers");
CommandBuffer::drain_barriers(raw_encoder, base_trackers, buffer_guard, texture_guard); CommandBuffer::drain_barriers(raw_encoder, base_trackers, buffer_guard, texture_guard);
@ -569,6 +593,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
&*bind_group_guard, &*bind_group_guard,
&*buffer_guard, &*buffer_guard,
&*texture_guard, &*texture_guard,
None,
) )
.map_pass_err(scope)?; .map_pass_err(scope)?;
@ -644,6 +669,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
&*bind_group_guard, &*bind_group_guard,
&*buffer_guard, &*buffer_guard,
&*texture_guard, &*texture_guard,
Some(id::Valid(buffer_id)),
) )
.map_pass_err(scope)?; .map_pass_err(scope)?;
unsafe { unsafe {
@ -685,6 +711,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
} => { } => {
let scope = PassErrorScope::WriteTimestamp; let scope = PassErrorScope::WriteTimestamp;
device
.require_features(wgt::Features::WRITE_TIMESTAMP_INSIDE_PASSES)
.map_pass_err(scope)?;
let query_set: &resource::QuerySet<A> = cmd_buf let query_set: &resource::QuerySet<A> = cmd_buf
.trackers .trackers
.query_sets .query_sets

Просмотреть файл

@ -87,10 +87,12 @@ pub enum RenderCommandError {
MissingTextureUsage(#[from] MissingTextureUsageError), MissingTextureUsage(#[from] MissingTextureUsageError),
#[error(transparent)] #[error(transparent)]
PushConstants(#[from] PushConstantUploadError), PushConstants(#[from] PushConstantUploadError),
#[error("Invalid Viewport parameters")] #[error("Viewport width {0} and/or height {1} are less than or equal to 0")]
InvalidViewport, InvalidViewportDimension(f32, f32),
#[error("Invalid ScissorRect parameters")] #[error("Viewport minDepth {0} and/or maxDepth {1} are not in [0, 1]")]
InvalidScissorRect, InvalidViewportDepth(f32, f32),
#[error("Scissor {0:?} is not contained in the render target {1:?}")]
InvalidScissorRect(Rect<u32>, wgt::Extent3d),
#[error("Support for {0} is not implemented yet")] #[error("Support for {0} is not implemented yet")]
Unimplemented(&'static str), Unimplemented(&'static str),
} }
@ -170,13 +172,32 @@ pub enum RenderCommand {
depth_max: f32, depth_max: f32,
}, },
SetScissor(Rect<u32>), SetScissor(Rect<u32>),
/// Set a range of push constants to values stored in [`BasePass::push_constant_data`].
///
/// See [`wgpu::RenderPass::set_push_constants`] for a detailed explanation
/// of the restrictions these commands must satisfy.
SetPushConstant { SetPushConstant {
/// Which stages we are setting push constant values for.
stages: wgt::ShaderStages, stages: wgt::ShaderStages,
/// The byte offset within the push constant storage to write to. This
/// must be a multiple of four.
offset: u32, offset: u32,
/// The number of bytes to write. This must be a multiple of four.
size_bytes: u32, size_bytes: u32,
/// None means there is no data and the data should be an array of zeros.
/// Index in [`BasePass::push_constant_data`] of the start of the data
/// to be written.
/// ///
/// Facilitates clears in renderbundles which explicitly do their clears. /// Note: this is not a byte offset like `offset`. Rather, it is the
/// index of the first `u32` element in `push_constant_data` to read.
///
/// `None` means zeros should be written to the destination range, and
/// there is no corresponding data in `push_constant_data`. This is used
/// by render bundles, which explicitly clear out any state that
/// post-bundle code might see.
values_offset: Option<u32>, values_offset: Option<u32>,
}, },
Draw { Draw {

Просмотреть файл

@ -287,6 +287,10 @@ pub struct BasePass<C> {
/// instruction consumes the next `len` bytes from this vector. /// instruction consumes the next `len` bytes from this vector.
pub string_data: Vec<u8>, pub string_data: Vec<u8>,
/// Data used by `SetPushConstant` instructions.
///
/// See the documentation for [`RenderCommand::SetPushConstant`]
/// and [`ComputeCommand::SetPushConstant`] for details.
pub push_constant_data: Vec<u32>, pub push_constant_data: Vec<u32>,
} }

Просмотреть файл

@ -17,7 +17,7 @@ use crate::{
hub::{Global, GlobalIdentityHandlerFactory, HalApi, Storage, Token}, hub::{Global, GlobalIdentityHandlerFactory, HalApi, Storage, Token},
id, id,
init_tracker::{MemoryInitKind, TextureInitRange, TextureInitTrackerAction}, init_tracker::{MemoryInitKind, TextureInitRange, TextureInitTrackerAction},
pipeline::PipelineFlags, pipeline::{self, PipelineFlags},
resource::{self, Buffer, Texture, TextureView}, resource::{self, Buffer, Texture, TextureView},
track::{TextureSelector, UsageConflict, UsageScope}, track::{TextureSelector, UsageConflict, UsageScope},
validation::{ validation::{
@ -131,20 +131,40 @@ pub struct RenderPassDepthStencilAttachment {
} }
impl RenderPassDepthStencilAttachment { impl RenderPassDepthStencilAttachment {
fn is_read_only(&self, aspects: hal::FormatAspects) -> Result<bool, RenderPassErrorInner> { /// Validate the given aspects' read-only flags against their load
if aspects.contains(hal::FormatAspects::DEPTH) && !self.depth.read_only { /// and store ops.
return Ok(false); ///
/// When an aspect is read-only, its load and store ops must be
/// `LoadOp::Load` and `StoreOp::Store`.
///
/// On success, return a pair `(depth, stencil)` indicating
/// whether the depth and stencil passes are read-only.
fn depth_stencil_read_only(
&self,
aspects: hal::FormatAspects,
) -> Result<(bool, bool), RenderPassErrorInner> {
let mut depth_read_only = true;
let mut stencil_read_only = true;
if aspects.contains(hal::FormatAspects::DEPTH) {
if self.depth.read_only
&& (self.depth.load_op, self.depth.store_op) != (LoadOp::Load, StoreOp::Store)
{
return Err(RenderPassErrorInner::InvalidDepthOps);
}
depth_read_only = self.depth.read_only;
} }
if (self.depth.load_op, self.depth.store_op) != (LoadOp::Load, StoreOp::Store) {
return Err(RenderPassErrorInner::InvalidDepthOps); if aspects.contains(hal::FormatAspects::STENCIL) {
if self.stencil.read_only
&& (self.stencil.load_op, self.stencil.store_op) != (LoadOp::Load, StoreOp::Store)
{
return Err(RenderPassErrorInner::InvalidStencilOps);
}
stencil_read_only = self.stencil.read_only;
} }
if aspects.contains(hal::FormatAspects::STENCIL) && !self.stencil.read_only {
return Ok(false); Ok((depth_read_only, stencil_read_only))
}
if (self.stencil.load_op, self.stencil.store_op) != (LoadOp::Load, StoreOp::Store) {
return Err(RenderPassErrorInner::InvalidStencilOps);
}
Ok(true)
} }
} }
@ -153,7 +173,7 @@ impl RenderPassDepthStencilAttachment {
pub struct RenderPassDescriptor<'a> { pub struct RenderPassDescriptor<'a> {
pub label: Label<'a>, pub label: Label<'a>,
/// The color attachments of the render pass. /// The color attachments of the render pass.
pub color_attachments: Cow<'a, [RenderPassColorAttachment]>, pub color_attachments: Cow<'a, [Option<RenderPassColorAttachment>]>,
/// The depth and stencil attachment of the render pass, if any. /// The depth and stencil attachment of the render pass, if any.
pub depth_stencil_attachment: Option<&'a RenderPassDepthStencilAttachment>, pub depth_stencil_attachment: Option<&'a RenderPassDepthStencilAttachment>,
} }
@ -162,7 +182,7 @@ pub struct RenderPassDescriptor<'a> {
pub struct RenderPass { pub struct RenderPass {
base: BasePass<RenderCommand>, base: BasePass<RenderCommand>,
parent_id: id::CommandEncoderId, parent_id: id::CommandEncoderId,
color_targets: ArrayVec<RenderPassColorAttachment, { hal::MAX_COLOR_TARGETS }>, color_targets: ArrayVec<Option<RenderPassColorAttachment>, { hal::MAX_COLOR_ATTACHMENTS }>,
depth_stencil_target: Option<RenderPassDepthStencilAttachment>, depth_stencil_target: Option<RenderPassDepthStencilAttachment>,
// Resource binding dedupe state. // Resource binding dedupe state.
@ -278,16 +298,17 @@ impl IndexState {
#[derive(Clone, Copy, Debug)] #[derive(Clone, Copy, Debug)]
struct VertexBufferState { struct VertexBufferState {
total_size: BufferAddress, total_size: BufferAddress,
stride: BufferAddress, step: pipeline::VertexStep,
rate: VertexStepMode,
bound: bool, bound: bool,
} }
impl VertexBufferState { impl VertexBufferState {
const EMPTY: Self = Self { const EMPTY: Self = Self {
total_size: 0, total_size: 0,
stride: 0, step: pipeline::VertexStep {
rate: VertexStepMode::Vertex, stride: 0,
mode: VertexStepMode::Vertex,
},
bound: false, bound: false,
}; };
} }
@ -312,11 +333,11 @@ impl VertexState {
self.vertex_limit = u32::MAX; self.vertex_limit = u32::MAX;
self.instance_limit = u32::MAX; self.instance_limit = u32::MAX;
for (idx, vbs) in self.inputs.iter().enumerate() { for (idx, vbs) in self.inputs.iter().enumerate() {
if vbs.stride == 0 || !vbs.bound { if vbs.step.stride == 0 || !vbs.bound {
continue; continue;
} }
let limit = (vbs.total_size / vbs.stride) as u32; let limit = (vbs.total_size / vbs.step.stride) as u32;
match vbs.rate { match vbs.step.mode {
VertexStepMode::Vertex => { VertexStepMode::Vertex => {
if limit < self.vertex_limit { if limit < self.vertex_limit {
self.vertex_limit = limit; self.vertex_limit = limit;
@ -420,7 +441,7 @@ pub enum RenderPassErrorInner {
InvalidDepthStencilAttachmentFormat(wgt::TextureFormat), InvalidDepthStencilAttachmentFormat(wgt::TextureFormat),
#[error("attachment format {0:?} can not be resolved")] #[error("attachment format {0:?} can not be resolved")]
UnsupportedResolveTargetFormat(wgt::TextureFormat), UnsupportedResolveTargetFormat(wgt::TextureFormat),
#[error("necessary attachments are missing")] #[error("missing color or depth_stencil attachments, at least one is required.")]
MissingAttachments, MissingAttachments,
#[error("attachments have differing sizes: {previous:?} is followed by {mismatch:?}")] #[error("attachments have differing sizes: {previous:?} is followed by {mismatch:?}")]
AttachmentsDimensionMismatch { AttachmentsDimensionMismatch {
@ -474,8 +495,18 @@ pub enum RenderPassErrorInner {
ResourceUsageConflict(#[from] UsageConflict), ResourceUsageConflict(#[from] UsageConflict),
#[error("render bundle has incompatible targets, {0}")] #[error("render bundle has incompatible targets, {0}")]
IncompatibleBundleTargets(#[from] RenderPassCompatibilityError), IncompatibleBundleTargets(#[from] RenderPassCompatibilityError),
#[error("render bundle has an incompatible read-only depth/stencil flag: bundle is {bundle}, while the pass is {pass}")] #[error(
IncompatibleBundleRods { pass: bool, bundle: bool }, "render bundle has incompatible read-only flags: \
bundle has flags depth = {bundle_depth} and stencil = {bundle_stencil}, \
while the pass has flags depth = {pass_depth} and stencil = {pass_stencil}. \
Read-only renderpasses are only compatible with read-only bundles for that aspect."
)]
IncompatibleBundleRods {
pass_depth: bool,
pass_stencil: bool,
bundle_depth: bool,
bundle_stencil: bool,
},
#[error(transparent)] #[error(transparent)]
RenderCommand(#[from] RenderCommandError), RenderCommand(#[from] RenderCommandError),
#[error(transparent)] #[error(transparent)]
@ -558,14 +589,15 @@ impl<A: hal::Api> TextureView<A> {
} }
} }
const MAX_TOTAL_ATTACHMENTS: usize = hal::MAX_COLOR_TARGETS + hal::MAX_COLOR_TARGETS + 1; const MAX_TOTAL_ATTACHMENTS: usize = hal::MAX_COLOR_ATTACHMENTS + hal::MAX_COLOR_ATTACHMENTS + 1;
type AttachmentDataVec<T> = ArrayVec<T, MAX_TOTAL_ATTACHMENTS>; type AttachmentDataVec<T> = ArrayVec<T, MAX_TOTAL_ATTACHMENTS>;
struct RenderPassInfo<'a, A: HalApi> { struct RenderPassInfo<'a, A: HalApi> {
context: RenderPassContext, context: RenderPassContext,
usage_scope: UsageScope<A>, usage_scope: UsageScope<A>,
render_attachments: AttachmentDataVec<RenderAttachment<'a>>, // All render attachments, including depth/stencil render_attachments: AttachmentDataVec<RenderAttachment<'a>>, // All render attachments, including depth/stencil
is_ds_read_only: bool, is_depth_read_only: bool,
is_stencil_read_only: bool,
extent: wgt::Extent3d, extent: wgt::Extent3d,
_phantom: PhantomData<A>, _phantom: PhantomData<A>,
@ -614,7 +646,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
fn start( fn start(
device: &Device<A>, device: &Device<A>,
label: Option<&str>, label: Option<&str>,
color_attachments: &[RenderPassColorAttachment], color_attachments: &[Option<RenderPassColorAttachment>],
depth_stencil_attachment: Option<&RenderPassDepthStencilAttachment>, depth_stencil_attachment: Option<&RenderPassDepthStencilAttachment>,
cmd_buf: &mut CommandBuffer<A>, cmd_buf: &mut CommandBuffer<A>,
view_guard: &'a Storage<TextureView<A>, id::TextureViewId>, view_guard: &'a Storage<TextureView<A>, id::TextureViewId>,
@ -626,7 +658,8 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
// We default to false intentionally, even if depth-stencil isn't used at all. // We default to false intentionally, even if depth-stencil isn't used at all.
// This allows us to use the primary raw pipeline in `RenderPipeline`, // This allows us to use the primary raw pipeline in `RenderPipeline`,
// instead of the special read-only one, which would be `None`. // instead of the special read-only one, which would be `None`.
let mut is_ds_read_only = false; let mut is_depth_read_only = false;
let mut is_stencil_read_only = false;
let mut render_attachments = AttachmentDataVec::<RenderAttachment>::new(); let mut render_attachments = AttachmentDataVec::<RenderAttachment>::new();
let mut discarded_surfaces = AttachmentDataVec::new(); let mut discarded_surfaces = AttachmentDataVec::new();
@ -689,15 +722,19 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
expected: sample_count, expected: sample_count,
}); });
} }
if sample_count != 1 && sample_count != 4 {
return Err(RenderPassErrorInner::InvalidSampleCount(sample_count));
}
attachment_type_name = type_name; attachment_type_name = type_name;
Ok(()) Ok(())
}; };
let mut colors = ArrayVec::<hal::ColorAttachment<A>, { hal::MAX_COLOR_TARGETS }>::new(); let mut colors =
ArrayVec::<Option<hal::ColorAttachment<A>>, { hal::MAX_COLOR_ATTACHMENTS }>::new();
let mut depth_stencil = None; let mut depth_stencil = None;
if let Some(at) = depth_stencil_attachment { if let Some(at) = depth_stencil_attachment {
let view = cmd_buf let view: &TextureView<A> = cmd_buf
.trackers .trackers
.views .views
.add_single(&*view_guard, at.view) .add_single(&*view_guard, at.view)
@ -786,8 +823,9 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
} }
} }
let usage = if at.is_read_only(ds_aspects)? { (is_depth_read_only, is_stencil_read_only) = at.depth_stencil_read_only(ds_aspects)?;
is_ds_read_only = true;
let usage = if is_depth_read_only && is_stencil_read_only {
hal::TextureUses::DEPTH_STENCIL_READ | hal::TextureUses::RESOURCE hal::TextureUses::DEPTH_STENCIL_READ | hal::TextureUses::RESOURCE
} else { } else {
hal::TextureUses::DEPTH_STENCIL_WRITE hal::TextureUses::DEPTH_STENCIL_WRITE
@ -806,7 +844,13 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
} }
for at in color_attachments { for at in color_attachments {
let color_view = cmd_buf let at = if let Some(attachment) = at.as_ref() {
attachment
} else {
colors.push(None);
continue;
};
let color_view: &TextureView<A> = cmd_buf
.trackers .trackers
.views .views
.add_single(&*view_guard, at.view) .add_single(&*view_guard, at.view)
@ -836,7 +880,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
let mut hal_resolve_target = None; let mut hal_resolve_target = None;
if let Some(resolve_target) = at.resolve_target { if let Some(resolve_target) = at.resolve_target {
let resolve_view = cmd_buf let resolve_view: &TextureView<A> = cmd_buf
.trackers .trackers
.views .views
.add_single(&*view_guard, resolve_target) .add_single(&*view_guard, resolve_target)
@ -885,7 +929,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
}); });
} }
colors.push(hal::ColorAttachment { colors.push(Some(hal::ColorAttachment {
target: hal::Attachment { target: hal::Attachment {
view: &color_view.raw, view: &color_view.raw,
usage: hal::TextureUses::COLOR_TARGET, usage: hal::TextureUses::COLOR_TARGET,
@ -893,28 +937,30 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
resolve_target: hal_resolve_target, resolve_target: hal_resolve_target,
ops: at.channel.hal_ops(), ops: at.channel.hal_ops(),
clear_value: at.channel.clear_value, clear_value: at.channel.clear_value,
}); }));
} }
if sample_count != 1 && sample_count != 4 { let extent = extent.ok_or(RenderPassErrorInner::MissingAttachments)?;
return Err(RenderPassErrorInner::InvalidSampleCount(sample_count)); let multiview = detected_multiview.expect("Multiview was not detected, no attachments");
}
let view_data = AttachmentData { let view_data = AttachmentData {
colors: color_attachments colors: color_attachments
.iter() .iter()
.map(|at| view_guard.get(at.view).unwrap()) .map(|at| at.as_ref().map(|at| view_guard.get(at.view).unwrap()))
.collect(), .collect(),
resolves: color_attachments resolves: color_attachments
.iter() .iter()
.filter_map(|at| at.resolve_target) .filter_map(|at| match *at {
.map(|attachment| view_guard.get(attachment).unwrap()) Some(RenderPassColorAttachment {
resolve_target: Some(resolve),
..
}) => Some(view_guard.get(resolve).unwrap()),
_ => None,
})
.collect(), .collect(),
depth_stencil: depth_stencil_attachment.map(|at| view_guard.get(at.view).unwrap()), depth_stencil: depth_stencil_attachment.map(|at| view_guard.get(at.view).unwrap()),
}; };
let extent = extent.ok_or(RenderPassErrorInner::MissingAttachments)?;
let multiview = detected_multiview.expect("Multiview was not detected, no attachments");
let context = RenderPassContext { let context = RenderPassContext {
attachments: view_data.map(|view| view.desc.format), attachments: view_data.map(|view| view.desc.format),
sample_count, sample_count,
@ -937,7 +983,8 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
context, context,
usage_scope: UsageScope::new(buffer_guard, texture_guard), usage_scope: UsageScope::new(buffer_guard, texture_guard),
render_attachments, render_attachments,
is_ds_read_only, is_depth_read_only,
is_stencil_read_only,
extent, extent,
_phantom: PhantomData, _phantom: PhantomData,
pending_discard_init_fixups, pending_discard_init_fixups,
@ -1041,7 +1088,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
&self, &self,
encoder_id: id::CommandEncoderId, encoder_id: id::CommandEncoderId,
base: BasePassRef<RenderCommand>, base: BasePassRef<RenderCommand>,
color_attachments: &[RenderPassColorAttachment], color_attachments: &[Option<RenderPassColorAttachment>],
depth_stencil_attachment: Option<&RenderPassDepthStencilAttachment>, depth_stencil_attachment: Option<&RenderPassDepthStencilAttachment>,
) -> Result<(), RenderPassError> { ) -> Result<(), RenderPassError> {
profiling::scope!("run_render_pass", "CommandEncoder"); profiling::scope!("run_render_pass", "CommandEncoder");
@ -1156,7 +1203,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
); );
dynamic_offset_count += num_dynamic_offsets as usize; dynamic_offset_count += num_dynamic_offsets as usize;
let bind_group = cmd_buf let bind_group: &crate::binding_model::BindGroup<A> = cmd_buf
.trackers .trackers
.bind_groups .bind_groups
.add_single(&*bind_group_guard, bind_group_id) .add_single(&*bind_group_guard, bind_group_id)
@ -1220,7 +1267,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let scope = PassErrorScope::SetPipelineRender(pipeline_id); let scope = PassErrorScope::SetPipelineRender(pipeline_id);
state.pipeline = Some(pipeline_id); state.pipeline = Some(pipeline_id);
let pipeline = cmd_buf let pipeline: &pipeline::RenderPipeline<A> = cmd_buf
.trackers .trackers
.render_pipelines .render_pipelines
.add_single(&*render_pipeline_guard, pipeline_id) .add_single(&*render_pipeline_guard, pipeline_id)
@ -1234,8 +1281,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
state.pipeline_flags = pipeline.flags; state.pipeline_flags = pipeline.flags;
if pipeline.flags.contains(PipelineFlags::WRITES_DEPTH_STENCIL) if (pipeline.flags.contains(PipelineFlags::WRITES_DEPTH)
&& info.is_ds_read_only && info.is_depth_read_only)
|| (pipeline.flags.contains(PipelineFlags::WRITES_STENCIL)
&& info.is_stencil_read_only)
{ {
return Err(RenderCommandError::IncompatiblePipelineRods) return Err(RenderCommandError::IncompatiblePipelineRods)
.map_pass_err(scope); .map_pass_err(scope);
@ -1304,24 +1353,25 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
state.index.pipeline_format = pipeline.strip_index_format; state.index.pipeline_format = pipeline.strip_index_format;
let vertex_strides_len = pipeline.vertex_strides.len(); let vertex_steps_len = pipeline.vertex_steps.len();
state.vertex.buffers_required = vertex_strides_len as u32; state.vertex.buffers_required = vertex_steps_len as u32;
while state.vertex.inputs.len() < vertex_strides_len { // Initialize each `vertex.inputs[i].step` from
// `pipeline.vertex_steps[i]`. Enlarge `vertex.inputs`
// as necessary to accomodate all slots in the
// pipeline. If `vertex.inputs` is longer, fill the
// extra entries with default `VertexStep`s.
while state.vertex.inputs.len() < vertex_steps_len {
state.vertex.inputs.push(VertexBufferState::EMPTY); state.vertex.inputs.push(VertexBufferState::EMPTY);
} }
// Update vertex buffer limits // This is worse as a `zip`, but it's close.
for (vbs, &(stride, rate)) in let mut steps = pipeline.vertex_steps.iter();
state.vertex.inputs.iter_mut().zip(&pipeline.vertex_strides) for input in state.vertex.inputs.iter_mut() {
{ input.step = steps.next().cloned().unwrap_or_default();
vbs.stride = stride;
vbs.rate = rate;
}
for vbs in state.vertex.inputs.iter_mut().skip(vertex_strides_len) {
vbs.stride = 0;
vbs.rate = VertexStepMode::Vertex;
} }
// Update vertex buffer limits.
state.vertex.update_limits(); state.vertex.update_limits();
} }
RenderCommand::SetIndexBuffer { RenderCommand::SetIndexBuffer {
@ -1451,14 +1501,17 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
depth_max, depth_max,
} => { } => {
let scope = PassErrorScope::SetViewport; let scope = PassErrorScope::SetViewport;
if rect.w <= 0.0 if rect.w <= 0.0 || rect.h <= 0.0 {
|| rect.h <= 0.0 return Err(RenderCommandError::InvalidViewportDimension(
|| depth_min < 0.0 rect.w, rect.h,
|| depth_min > 1.0 ))
|| depth_max < 0.0 .map_pass_err(scope);
|| depth_max > 1.0 }
{ if !(0.0..=1.0).contains(&depth_min) || !(0.0..=1.0).contains(&depth_max) {
return Err(RenderCommandError::InvalidViewport).map_pass_err(scope); return Err(RenderCommandError::InvalidViewportDepth(
depth_min, depth_max,
))
.map_pass_err(scope);
} }
let r = hal::Rect { let r = hal::Rect {
x: rect.x, x: rect.x,
@ -1510,7 +1563,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|| rect.x + rect.w > info.extent.width || rect.x + rect.w > info.extent.width
|| rect.y + rect.h > info.extent.height || rect.y + rect.h > info.extent.height
{ {
return Err(RenderCommandError::InvalidScissorRect).map_pass_err(scope); return Err(RenderCommandError::InvalidScissorRect(*rect, info.extent))
.map_pass_err(scope);
} }
let r = hal::Rect { let r = hal::Rect {
x: rect.x, x: rect.x,
@ -1827,6 +1881,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
} => { } => {
let scope = PassErrorScope::WriteTimestamp; let scope = PassErrorScope::WriteTimestamp;
device
.require_features(wgt::Features::WRITE_TIMESTAMP_INSIDE_PASSES)
.map_pass_err(scope)?;
let query_set: &resource::QuerySet<A> = cmd_buf let query_set: &resource::QuerySet<A> = cmd_buf
.trackers .trackers
.query_sets .query_sets
@ -1886,10 +1944,14 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.map_err(RenderPassErrorInner::IncompatibleBundleTargets) .map_err(RenderPassErrorInner::IncompatibleBundleTargets)
.map_pass_err(scope)?; .map_pass_err(scope)?;
if info.is_ds_read_only != bundle.is_ds_read_only { if (info.is_depth_read_only && !bundle.is_depth_read_only)
|| (info.is_stencil_read_only && !bundle.is_stencil_read_only)
{
return Err(RenderPassErrorInner::IncompatibleBundleRods { return Err(RenderPassErrorInner::IncompatibleBundleRods {
pass: info.is_ds_read_only, pass_depth: info.is_depth_read_only,
bundle: bundle.is_ds_read_only, pass_stencil: info.is_stencil_read_only,
bundle_depth: bundle.is_depth_read_only,
bundle_stencil: bundle.is_stencil_read_only,
}) })
.map_pass_err(scope); .map_pass_err(scope);
} }

9
third_party/rust/wgpu-core/src/conv.rs поставляемый
Просмотреть файл

@ -116,14 +116,7 @@ pub fn check_texture_dimension_size(
use wgt::TextureDimension::*; use wgt::TextureDimension::*;
let (extent_limits, sample_limit) = match dimension { let (extent_limits, sample_limit) = match dimension {
D1 => ( D1 => ([limits.max_texture_dimension_1d, 1, 1], 1),
[
limits.max_texture_dimension_1d,
1,
limits.max_texture_array_layers,
],
1,
),
D2 => ( D2 => (
[ [
limits.max_texture_dimension_2d, limits.max_texture_dimension_2d,

Просмотреть файл

@ -224,6 +224,8 @@ struct ActiveSubmission<A: hal::Api> {
pub enum WaitIdleError { pub enum WaitIdleError {
#[error(transparent)] #[error(transparent)]
Device(#[from] DeviceError), Device(#[from] DeviceError),
#[error("Tried to wait using a submission index from the wrong device. Submission index is from device {0:?}. Called poll on device {1:?}.")]
WrongSubmissionIndex(id::QueueId, id::DeviceId),
#[error("GPU got stuck :(")] #[error("GPU got stuck :(")]
StuckGpu, StuckGpu,
} }
@ -459,7 +461,7 @@ impl<A: hal::Api> LifetimeTracker<A> {
impl<A: HalApi> LifetimeTracker<A> { impl<A: HalApi> LifetimeTracker<A> {
/// Identify resources to free, according to `trackers` and `self.suspected_resources`. /// Identify resources to free, according to `trackers` and `self.suspected_resources`.
/// ///
/// Given `trackers`, the [`TrackerSet`] belonging to same [`Device`] as /// Given `trackers`, the [`Tracker`] belonging to same [`Device`] as
/// `self`, and `hub`, the [`Hub`] to which that `Device` belongs: /// `self`, and `hub`, the [`Hub`] to which that `Device` belongs:
/// ///
/// Remove from `trackers` each resource mentioned in /// Remove from `trackers` each resource mentioned in

356
third_party/rust/wgpu-core/src/device/mod.rs поставляемый
Просмотреть файл

@ -52,15 +52,15 @@ pub enum HostMap {
#[derive(Clone, Debug, Hash, PartialEq)] #[derive(Clone, Debug, Hash, PartialEq)]
#[cfg_attr(feature = "serial-pass", derive(serde::Deserialize, serde::Serialize))] #[cfg_attr(feature = "serial-pass", derive(serde::Deserialize, serde::Serialize))]
pub(crate) struct AttachmentData<T> { pub(crate) struct AttachmentData<T> {
pub colors: ArrayVec<T, { hal::MAX_COLOR_TARGETS }>, pub colors: ArrayVec<Option<T>, { hal::MAX_COLOR_ATTACHMENTS }>,
pub resolves: ArrayVec<T, { hal::MAX_COLOR_TARGETS }>, pub resolves: ArrayVec<T, { hal::MAX_COLOR_ATTACHMENTS }>,
pub depth_stencil: Option<T>, pub depth_stencil: Option<T>,
} }
impl<T: PartialEq> Eq for AttachmentData<T> {} impl<T: PartialEq> Eq for AttachmentData<T> {}
impl<T> AttachmentData<T> { impl<T> AttachmentData<T> {
pub(crate) fn map<U, F: Fn(&T) -> U>(&self, fun: F) -> AttachmentData<U> { pub(crate) fn map<U, F: Fn(&T) -> U>(&self, fun: F) -> AttachmentData<U> {
AttachmentData { AttachmentData {
colors: self.colors.iter().map(&fun).collect(), colors: self.colors.iter().map(|c| c.as_ref().map(&fun)).collect(),
resolves: self.resolves.iter().map(&fun).collect(), resolves: self.resolves.iter().map(&fun).collect(),
depth_stencil: self.depth_stencil.as_ref().map(&fun), depth_stencil: self.depth_stencil.as_ref().map(&fun),
} }
@ -78,8 +78,8 @@ pub(crate) struct RenderPassContext {
pub enum RenderPassCompatibilityError { pub enum RenderPassCompatibilityError {
#[error("Incompatible color attachment: the renderpass expected {0:?} but was given {1:?}")] #[error("Incompatible color attachment: the renderpass expected {0:?} but was given {1:?}")]
IncompatibleColorAttachment( IncompatibleColorAttachment(
ArrayVec<TextureFormat, { hal::MAX_COLOR_TARGETS }>, ArrayVec<Option<TextureFormat>, { hal::MAX_COLOR_ATTACHMENTS }>,
ArrayVec<TextureFormat, { hal::MAX_COLOR_TARGETS }>, ArrayVec<Option<TextureFormat>, { hal::MAX_COLOR_ATTACHMENTS }>,
), ),
#[error( #[error(
"Incompatible depth-stencil attachment: the renderpass expected {0:?} but was given {1:?}" "Incompatible depth-stencil attachment: the renderpass expected {0:?} but was given {1:?}"
@ -428,6 +428,9 @@ impl<A: HalApi> Device<A> {
/// Check this device for completed commands. /// Check this device for completed commands.
/// ///
/// The `maintain` argument tells how the maintence function should behave, either
/// blocking or just polling the current state of the gpu.
///
/// Return a pair `(closures, queue_empty)`, where: /// Return a pair `(closures, queue_empty)`, where:
/// ///
/// - `closures` is a list of actions to take: mapping buffers, notifying the user /// - `closures` is a list of actions to take: mapping buffers, notifying the user
@ -439,7 +442,7 @@ impl<A: HalApi> Device<A> {
fn maintain<'this, 'token: 'this, G: GlobalIdentityHandlerFactory>( fn maintain<'this, 'token: 'this, G: GlobalIdentityHandlerFactory>(
&'this self, &'this self,
hub: &Hub<A, G>, hub: &Hub<A, G>,
force_wait: bool, maintain: wgt::Maintain<queue::WrappedSubmissionIndex>,
token: &mut Token<'token, Self>, token: &mut Token<'token, Self>,
) -> Result<(UserClosures, bool), WaitIdleError> { ) -> Result<(UserClosures, bool), WaitIdleError> {
profiling::scope!("maintain", "Device"); profiling::scope!("maintain", "Device");
@ -463,14 +466,21 @@ impl<A: HalApi> Device<A> {
); );
life_tracker.triage_mapped(hub, token); life_tracker.triage_mapped(hub, token);
let last_done_index = if force_wait { let last_done_index = if maintain.is_wait() {
let current_index = self.active_submission_index; let index_to_wait_for = match maintain {
wgt::Maintain::WaitForSubmissionIndex(submission_index) => {
// We don't need to check to see if the queue id matches
// as we already checked this from inside the poll call.
submission_index.index
}
_ => self.active_submission_index,
};
unsafe { unsafe {
self.raw self.raw
.wait(&self.fence, current_index, CLEANUP_WAIT_MS) .wait(&self.fence, index_to_wait_for, CLEANUP_WAIT_MS)
.map_err(DeviceError::from)? .map_err(DeviceError::from)?
}; };
current_index index_to_wait_for
} else { } else {
unsafe { unsafe {
self.raw self.raw
@ -566,6 +576,14 @@ impl<A: HalApi> Device<A> {
transient: bool, transient: bool,
) -> Result<resource::Buffer<A>, resource::CreateBufferError> { ) -> Result<resource::Buffer<A>, resource::CreateBufferError> {
debug_assert_eq!(self_id.backend(), A::VARIANT); debug_assert_eq!(self_id.backend(), A::VARIANT);
if desc.size > self.limits.max_buffer_size {
return Err(resource::CreateBufferError::MaxBufferSize {
requested: desc.size,
maximum: self.limits.max_buffer_size,
});
}
let mut usage = conv::map_buffer_usage(desc.usage); let mut usage = conv::map_buffer_usage(desc.usage);
if desc.usage.is_empty() { if desc.usage.is_empty() {
@ -668,47 +686,10 @@ impl<A: HalApi> Device<A> {
adapter: &crate::instance::Adapter<A>, adapter: &crate::instance::Adapter<A>,
desc: &resource::TextureDescriptor, desc: &resource::TextureDescriptor,
) -> Result<resource::Texture<A>, resource::CreateTextureError> { ) -> Result<resource::Texture<A>, resource::CreateTextureError> {
let format_desc = desc.format.describe(); use resource::{CreateTextureError, TextureDimensionError};
if desc.dimension != wgt::TextureDimension::D2 {
// Depth textures can only be 2D
if format_desc.sample_type == wgt::TextureSampleType::Depth {
return Err(resource::CreateTextureError::InvalidDepthDimension(
desc.dimension,
desc.format,
));
}
// Renderable textures can only be 2D
if desc.usage.contains(wgt::TextureUsages::RENDER_ATTACHMENT) {
return Err(resource::CreateTextureError::InvalidDimensionUsages(
wgt::TextureUsages::RENDER_ATTACHMENT,
desc.dimension,
));
}
// Compressed textures can only be 2D
if format_desc.is_compressed() {
return Err(resource::CreateTextureError::InvalidCompressedDimension(
desc.dimension,
desc.format,
));
}
}
let format_features = self
.describe_format_features(adapter, desc.format)
.map_err(|error| resource::CreateTextureError::MissingFeatures(desc.format, error))?;
if desc.usage.is_empty() { if desc.usage.is_empty() {
return Err(resource::CreateTextureError::EmptyUsage); return Err(CreateTextureError::EmptyUsage);
}
let missing_allowed_usages = desc.usage - format_features.allowed_usages;
if !missing_allowed_usages.is_empty() {
return Err(resource::CreateTextureError::InvalidFormatUsages(
missing_allowed_usages,
desc.format,
));
} }
conv::check_texture_dimension_size( conv::check_texture_dimension_size(
@ -718,15 +699,114 @@ impl<A: HalApi> Device<A> {
&self.limits, &self.limits,
)?; )?;
let format_desc = desc.format.describe();
if desc.dimension != wgt::TextureDimension::D2 {
// Depth textures can only be 2D
if format_desc.sample_type == wgt::TextureSampleType::Depth {
return Err(CreateTextureError::InvalidDepthDimension(
desc.dimension,
desc.format,
));
}
// Renderable textures can only be 2D
if desc.usage.contains(wgt::TextureUsages::RENDER_ATTACHMENT) {
return Err(CreateTextureError::InvalidDimensionUsages(
wgt::TextureUsages::RENDER_ATTACHMENT,
desc.dimension,
));
}
// Compressed textures can only be 2D
if format_desc.is_compressed() {
return Err(CreateTextureError::InvalidCompressedDimension(
desc.dimension,
desc.format,
));
}
}
if format_desc.is_compressed() {
let block_width = format_desc.block_dimensions.0 as u32;
let block_height = format_desc.block_dimensions.1 as u32;
if desc.size.width % block_width != 0 {
return Err(CreateTextureError::InvalidDimension(
TextureDimensionError::NotMultipleOfBlockWidth {
width: desc.size.width,
block_width,
format: desc.format,
},
));
}
if desc.size.height % block_height != 0 {
return Err(CreateTextureError::InvalidDimension(
TextureDimensionError::NotMultipleOfBlockHeight {
height: desc.size.height,
block_height,
format: desc.format,
},
));
}
}
if desc.sample_count > 1 {
if desc.mip_level_count != 1 {
return Err(CreateTextureError::InvalidMipLevelCount {
requested: desc.mip_level_count,
maximum: 1,
});
}
if desc.size.depth_or_array_layers != 1 {
return Err(CreateTextureError::InvalidDimension(
TextureDimensionError::MultisampledDepthOrArrayLayer(
desc.size.depth_or_array_layers,
),
));
}
if desc.usage.contains(wgt::TextureUsages::STORAGE_BINDING) {
return Err(CreateTextureError::InvalidMultisampledStorageBinding);
}
if !desc.usage.contains(wgt::TextureUsages::RENDER_ATTACHMENT) {
return Err(CreateTextureError::MultisampledNotRenderAttachment);
}
if !format_desc
.guaranteed_format_features
.flags
.contains(wgt::TextureFormatFeatureFlags::MULTISAMPLE)
{
return Err(CreateTextureError::InvalidMultisampledFormat(desc.format));
}
}
let mips = desc.mip_level_count; let mips = desc.mip_level_count;
let max_levels_allowed = desc.size.max_mips(desc.dimension).min(hal::MAX_MIP_LEVELS); let max_levels_allowed = desc.size.max_mips(desc.dimension).min(hal::MAX_MIP_LEVELS);
if mips == 0 || mips > max_levels_allowed { if mips == 0 || mips > max_levels_allowed {
return Err(resource::CreateTextureError::InvalidMipLevelCount { return Err(CreateTextureError::InvalidMipLevelCount {
requested: mips, requested: mips,
maximum: max_levels_allowed, maximum: max_levels_allowed,
}); });
} }
let format_features = self
.describe_format_features(adapter, desc.format)
.map_err(|error| CreateTextureError::MissingFeatures(desc.format, error))?;
let missing_allowed_usages = desc.usage - format_features.allowed_usages;
if !missing_allowed_usages.is_empty() {
return Err(CreateTextureError::InvalidFormatUsages(
missing_allowed_usages,
desc.format,
));
}
// TODO: validate missing TextureDescriptor::view_formats.
// Enforce having COPY_DST/DEPTH_STENCIL_WRIT/COLOR_TARGET otherwise we wouldn't be able to initialize the texture. // Enforce having COPY_DST/DEPTH_STENCIL_WRIT/COLOR_TARGET otherwise we wouldn't be able to initialize the texture.
let hal_usage = conv::map_texture_usage(desc.usage, desc.format.into()) let hal_usage = conv::map_texture_usage(desc.usage, desc.format.into())
| if format_desc.sample_type == wgt::TextureSampleType::Depth { | if format_desc.sample_type == wgt::TextureSampleType::Depth {
@ -1117,6 +1197,18 @@ impl<A: HalApi> Device<A> {
} }
pipeline::ShaderModuleSource::Naga(module) => (module, String::new()), pipeline::ShaderModuleSource::Naga(module) => (module, String::new()),
}; };
for (_, var) in module.global_variables.iter() {
match var.binding {
Some(ref br) if br.group >= self.limits.max_bind_groups => {
return Err(pipeline::CreateShaderModuleError::InvalidGroupIndex {
bind: br.clone(),
group: br.group,
limit: self.limits.max_bind_groups,
});
}
_ => continue,
};
}
use naga::valid::Capabilities as Caps; use naga::valid::Capabilities as Caps;
profiling::scope!("naga::validate"); profiling::scope!("naga::validate");
@ -1370,6 +1462,20 @@ impl<A: HalApi> Device<A> {
} }
_ => (), _ => (),
} }
match access {
wgt::StorageTextureAccess::ReadOnly
| wgt::StorageTextureAccess::ReadWrite
if !self.features.contains(
wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES,
) =>
{
return Err(binding_model::CreateBindGroupLayoutError::Entry {
binding: entry.binding,
error: binding_model::BindGroupLayoutEntryError::StorageTextureReadWrite,
});
}
_ => (),
}
( (
Some( Some(
wgt::Features::TEXTURE_BINDING_ARRAY wgt::Features::TEXTURE_BINDING_ARRAY
@ -2359,9 +2465,11 @@ impl<A: HalApi> Device<A> {
.map_or(&[][..], |fragment| &fragment.targets); .map_or(&[][..], |fragment| &fragment.targets);
let depth_stencil_state = desc.depth_stencil.as_ref(); let depth_stencil_state = desc.depth_stencil.as_ref();
if !color_targets.is_empty() && { let cts: ArrayVec<_, { hal::MAX_COLOR_ATTACHMENTS }> =
let first = &color_targets[0]; color_targets.iter().filter_map(|x| x.as_ref()).collect();
color_targets[1..] if !cts.is_empty() && {
let first = &cts[0];
cts[1..]
.iter() .iter()
.any(|ct| ct.write_mask != first.write_mask || ct.blend != first.blend) .any(|ct| ct.write_mask != first.write_mask || ct.blend != first.blend)
} { } {
@ -2372,13 +2480,14 @@ impl<A: HalApi> Device<A> {
let mut io = validation::StageIo::default(); let mut io = validation::StageIo::default();
let mut validated_stages = wgt::ShaderStages::empty(); let mut validated_stages = wgt::ShaderStages::empty();
let mut vertex_strides = Vec::with_capacity(desc.vertex.buffers.len()); let mut vertex_steps = Vec::with_capacity(desc.vertex.buffers.len());
let mut vertex_buffers = Vec::with_capacity(desc.vertex.buffers.len()); let mut vertex_buffers = Vec::with_capacity(desc.vertex.buffers.len());
let mut total_attributes = 0; let mut total_attributes = 0;
for (i, vb_state) in desc.vertex.buffers.iter().enumerate() { for (i, vb_state) in desc.vertex.buffers.iter().enumerate() {
vertex_strides vertex_steps.alloc().init(pipeline::VertexStep {
.alloc() stride: vb_state.array_stride,
.init((vb_state.array_stride, vb_state.step_mode)); mode: vb_state.step_mode,
});
if vb_state.attributes.is_empty() { if vb_state.attributes.is_empty() {
continue; continue;
} }
@ -2473,29 +2582,32 @@ impl<A: HalApi> Device<A> {
} }
for (i, cs) in color_targets.iter().enumerate() { for (i, cs) in color_targets.iter().enumerate() {
let error = loop { if let Some(cs) = cs.as_ref() {
let format_features = self.describe_format_features(adapter, cs.format)?; let error = loop {
if !format_features let format_features = self.describe_format_features(adapter, cs.format)?;
.allowed_usages if !format_features
.contains(wgt::TextureUsages::RENDER_ATTACHMENT) .allowed_usages
{ .contains(wgt::TextureUsages::RENDER_ATTACHMENT)
break Some(pipeline::ColorStateError::FormatNotRenderable(cs.format)); {
} break Some(pipeline::ColorStateError::FormatNotRenderable(cs.format));
if cs.blend.is_some() && !format_features.flags.contains(Tfff::FILTERABLE) { }
break Some(pipeline::ColorStateError::FormatNotBlendable(cs.format)); if cs.blend.is_some() && !format_features.flags.contains(Tfff::FILTERABLE) {
} break Some(pipeline::ColorStateError::FormatNotBlendable(cs.format));
if !hal::FormatAspects::from(cs.format).contains(hal::FormatAspects::COLOR) { }
break Some(pipeline::ColorStateError::FormatNotColor(cs.format)); if !hal::FormatAspects::from(cs.format).contains(hal::FormatAspects::COLOR) {
} break Some(pipeline::ColorStateError::FormatNotColor(cs.format));
if desc.multisample.count > 1 && !format_features.flags.contains(Tfff::MULTISAMPLE) }
{ if desc.multisample.count > 1
break Some(pipeline::ColorStateError::FormatNotMultisampled(cs.format)); && !format_features.flags.contains(Tfff::MULTISAMPLE)
} {
break Some(pipeline::ColorStateError::FormatNotMultisampled(cs.format));
}
break None; break None;
}; };
if let Some(e) = error { if let Some(e) = error {
return Err(pipeline::CreateRenderPipelineError::ColorState(i as u8, e)); return Err(pipeline::CreateRenderPipelineError::ColorState(i as u8, e));
}
} }
} }
@ -2647,13 +2759,13 @@ impl<A: HalApi> Device<A> {
}; };
if validated_stages.contains(wgt::ShaderStages::FRAGMENT) { if validated_stages.contains(wgt::ShaderStages::FRAGMENT) {
for (i, state) in color_targets.iter().enumerate() { for (i, output) in io.iter() {
match io.get(&(i as wgt::ShaderLocation)) { match color_targets.get(*i as usize) {
Some(output) => { Some(&Some(ref state)) => {
validation::check_texture_format(state.format, &output.ty).map_err( validation::check_texture_format(state.format, &output.ty).map_err(
|pipeline| { |pipeline| {
pipeline::CreateRenderPipelineError::ColorState( pipeline::CreateRenderPipelineError::ColorState(
i as u8, *i as u8,
pipeline::ColorStateError::IncompatibleFormat { pipeline::ColorStateError::IncompatibleFormat {
pipeline, pipeline,
shader: output.ty, shader: output.ty,
@ -2662,11 +2774,14 @@ impl<A: HalApi> Device<A> {
}, },
)?; )?;
} }
None if state.write_mask.is_empty() => {} Some(&None) => {
None => { return Err(
log::warn!("Missing fragment output[{}], expected {:?}", i, state,); pipeline::CreateRenderPipelineError::InvalidFragmentOutputLocation(*i),
);
}
_ => {
return Err(pipeline::CreateRenderPipelineError::ColorState( return Err(pipeline::CreateRenderPipelineError::ColorState(
i as u8, *i as u8,
pipeline::ColorStateError::Missing, pipeline::ColorStateError::Missing,
)); ));
} }
@ -2700,6 +2815,14 @@ impl<A: HalApi> Device<A> {
self.require_features(wgt::Features::MULTIVIEW)?; self.require_features(wgt::Features::MULTIVIEW)?;
} }
for size in shader_binding_sizes.values() {
if size.get() % 16 != 0 {
self.require_downlevel_flags(
wgt::DownlevelFlags::BUFFER_BINDINGS_NOT_16_BYTE_ALIGNED,
)?;
}
}
let late_sized_buffer_groups = let late_sized_buffer_groups =
Device::make_late_sized_buffer_groups(&shader_binding_sizes, layout, &*bgl_guard); Device::make_late_sized_buffer_groups(&shader_binding_sizes, layout, &*bgl_guard);
@ -2735,7 +2858,10 @@ impl<A: HalApi> Device<A> {
let pass_context = RenderPassContext { let pass_context = RenderPassContext {
attachments: AttachmentData { attachments: AttachmentData {
colors: color_targets.iter().map(|state| state.format).collect(), colors: color_targets
.iter()
.map(|state| state.as_ref().map(|s| s.format))
.collect(),
resolves: ArrayVec::new(), resolves: ArrayVec::new(),
depth_stencil: depth_stencil_state.as_ref().map(|state| state.format), depth_stencil: depth_stencil_state.as_ref().map(|state| state.format),
}, },
@ -2744,7 +2870,7 @@ impl<A: HalApi> Device<A> {
}; };
let mut flags = pipeline::PipelineFlags::empty(); let mut flags = pipeline::PipelineFlags::empty();
for state in color_targets.iter() { for state in color_targets.iter().filter_map(|s| s.as_ref()) {
if let Some(ref bs) = state.blend { if let Some(ref bs) = state.blend {
if bs.color.uses_constant() | bs.alpha.uses_constant() { if bs.color.uses_constant() | bs.alpha.uses_constant() {
flags |= pipeline::PipelineFlags::BLEND_CONSTANT; flags |= pipeline::PipelineFlags::BLEND_CONSTANT;
@ -2755,8 +2881,11 @@ impl<A: HalApi> Device<A> {
if ds.stencil.is_enabled() && ds.stencil.needs_ref_value() { if ds.stencil.is_enabled() && ds.stencil.needs_ref_value() {
flags |= pipeline::PipelineFlags::STENCIL_REFERENCE; flags |= pipeline::PipelineFlags::STENCIL_REFERENCE;
} }
if !ds.is_read_only() { if !ds.is_depth_read_only() {
flags |= pipeline::PipelineFlags::WRITES_DEPTH_STENCIL; flags |= pipeline::PipelineFlags::WRITES_DEPTH;
}
if !ds.is_stencil_read_only() {
flags |= pipeline::PipelineFlags::WRITES_STENCIL;
} }
} }
@ -2773,7 +2902,7 @@ impl<A: HalApi> Device<A> {
pass_context, pass_context,
flags, flags,
strip_index_format: desc.primitive.strip_index_format, strip_index_format: desc.primitive.strip_index_format,
vertex_strides, vertex_steps,
late_sized_buffer_groups, late_sized_buffer_groups,
life_guard: LifeGuard::new(desc.label.borrow_or_default()), life_guard: LifeGuard::new(desc.label.borrow_or_default()),
}; };
@ -3003,12 +3132,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.map_err(|_| instance::IsSurfaceSupportedError::InvalidSurface)?; .map_err(|_| instance::IsSurfaceSupportedError::InvalidSurface)?;
Ok(adapter.is_surface_supported(surface)) Ok(adapter.is_surface_supported(surface))
} }
pub fn surface_get_preferred_format<A: HalApi>( pub fn surface_get_supported_formats<A: HalApi>(
&self, &self,
surface_id: id::SurfaceId, surface_id: id::SurfaceId,
adapter_id: id::AdapterId, adapter_id: id::AdapterId,
) -> Result<TextureFormat, instance::GetSurfacePreferredFormatError> { ) -> Result<Vec<TextureFormat>, instance::GetSurfacePreferredFormatError> {
profiling::scope!("surface_get_preferred_format"); profiling::scope!("Surface::get_supported_formats");
let hub = A::hub(self); let hub = A::hub(self);
let mut token = Token::root(); let mut token = Token::root();
@ -3021,7 +3150,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.get(surface_id) .get(surface_id)
.map_err(|_| instance::GetSurfacePreferredFormatError::InvalidSurface)?; .map_err(|_| instance::GetSurfacePreferredFormatError::InvalidSurface)?;
surface.get_preferred_format(adapter) surface.get_supported_formats(adapter)
} }
pub fn device_features<A: HalApi>( pub fn device_features<A: HalApi>(
@ -3221,6 +3350,18 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
fid.assign_error(label.borrow_or_default(), &mut token); fid.assign_error(label.borrow_or_default(), &mut token);
} }
/// Assign `id_in` an error with the given `label`.
///
/// See `create_buffer_error` for more context and explaination.
pub fn create_texture_error<A: HalApi>(&self, id_in: Input<G, id::TextureId>, label: Label) {
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.textures.prepare(id_in);
let (_, mut token) = hub.devices.read(&mut token);
fid.assign_error(label.borrow_or_default(), &mut token);
}
#[cfg(feature = "replay")] #[cfg(feature = "replay")]
pub fn device_wait_for_buffer<A: HalApi>( pub fn device_wait_for_buffer<A: HalApi>(
&self, &self,
@ -4368,7 +4509,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
desc: trace::new_render_bundle_encoder_descriptor( desc: trace::new_render_bundle_encoder_descriptor(
desc.label.clone(), desc.label.clone(),
&bundle_encoder.context, &bundle_encoder.context,
bundle_encoder.is_ds_read_only, bundle_encoder.is_depth_read_only,
bundle_encoder.is_stencil_read_only,
), ),
base: bundle_encoder.to_base_pass(), base: bundle_encoder.to_base_pass(),
}); });
@ -4957,16 +5099,25 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
pub fn device_poll<A: HalApi>( pub fn device_poll<A: HalApi>(
&self, &self,
device_id: id::DeviceId, device_id: id::DeviceId,
force_wait: bool, maintain: wgt::Maintain<queue::WrappedSubmissionIndex>,
) -> Result<bool, WaitIdleError> { ) -> Result<bool, WaitIdleError> {
let (closures, queue_empty) = { let (closures, queue_empty) = {
if let wgt::Maintain::WaitForSubmissionIndex(submission_index) = maintain {
if submission_index.queue_id != device_id {
return Err(WaitIdleError::WrongSubmissionIndex(
submission_index.queue_id,
device_id,
));
}
}
let hub = A::hub(self); let hub = A::hub(self);
let mut token = Token::root(); let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token); let (device_guard, mut token) = hub.devices.read(&mut token);
device_guard device_guard
.get(device_id) .get(device_id)
.map_err(|_| DeviceError::Invalid)? .map_err(|_| DeviceError::Invalid)?
.maintain(hub, force_wait, &mut token)? .maintain(hub, maintain, &mut token)?
}; };
closures.fire(); closures.fire();
@ -4994,7 +5145,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (device_guard, mut token) = hub.devices.read(&mut token); let (device_guard, mut token) = hub.devices.read(&mut token);
for (id, device) in device_guard.iter(A::VARIANT) { for (id, device) in device_guard.iter(A::VARIANT) {
let (cbs, queue_empty) = device.maintain(hub, force_wait, &mut token)?; let maintain = if force_wait {
wgt::Maintain::Wait
} else {
wgt::Maintain::Poll
};
let (cbs, queue_empty) = device.maintain(hub, maintain, &mut token)?;
all_queue_empty = all_queue_empty && queue_empty; all_queue_empty = all_queue_empty && queue_empty;
// If the device's own `RefCount` clone is the only one left, and // If the device's own `RefCount` clone is the only one left, and

Просмотреть файл

@ -12,7 +12,7 @@ use crate::{
id, id,
init_tracker::{has_copy_partial_init_tracker_coverage, TextureInitRange}, init_tracker::{has_copy_partial_init_tracker_coverage, TextureInitRange},
resource::{BufferAccessError, BufferMapState, TextureInner}, resource::{BufferAccessError, BufferMapState, TextureInner},
track, FastHashSet, track, FastHashSet, SubmissionIndex,
}; };
use hal::{CommandEncoder as _, Device as _, Queue as _}; use hal::{CommandEncoder as _, Device as _, Queue as _};
@ -30,8 +30,8 @@ const WRITE_COMMAND_BUFFERS_PER_POOL: usize = 64;
#[repr(C)] #[repr(C)]
pub struct SubmittedWorkDoneClosureC { pub struct SubmittedWorkDoneClosureC {
callback: unsafe extern "C" fn(user_data: *mut u8), pub callback: unsafe extern "C" fn(user_data: *mut u8),
user_data: *mut u8, pub user_data: *mut u8,
} }
unsafe impl Send for SubmittedWorkDoneClosureC {} unsafe impl Send for SubmittedWorkDoneClosureC {}
@ -79,6 +79,13 @@ impl SubmittedWorkDoneClosure {
} }
} }
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct WrappedSubmissionIndex {
pub queue_id: id::QueueId,
pub index: SubmissionIndex,
}
struct StagingData<A: hal::Api> { struct StagingData<A: hal::Api> {
buffer: A::Buffer, buffer: A::Buffer,
} }
@ -620,10 +627,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
&self, &self,
queue_id: id::QueueId, queue_id: id::QueueId,
command_buffer_ids: &[id::CommandBufferId], command_buffer_ids: &[id::CommandBufferId],
) -> Result<(), QueueSubmitError> { ) -> Result<WrappedSubmissionIndex, QueueSubmitError> {
profiling::scope!("submit", "Queue"); profiling::scope!("submit", "Queue");
let callbacks = { let (submit_index, callbacks) = {
let hub = A::hub(self); let hub = A::hub(self);
let mut token = Token::root(); let mut token = Token::root();
@ -958,23 +965,27 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
// This will schedule destruction of all resources that are no longer needed // This will schedule destruction of all resources that are no longer needed
// by the user but used in the command stream, among other things. // by the user but used in the command stream, among other things.
let (closures, _) = match device.maintain(hub, false, &mut token) { let (closures, _) = match device.maintain(hub, wgt::Maintain::Wait, &mut token) {
Ok(closures) => closures, Ok(closures) => closures,
Err(WaitIdleError::Device(err)) => return Err(QueueSubmitError::Queue(err)), Err(WaitIdleError::Device(err)) => return Err(QueueSubmitError::Queue(err)),
Err(WaitIdleError::StuckGpu) => return Err(QueueSubmitError::StuckGpu), Err(WaitIdleError::StuckGpu) => return Err(QueueSubmitError::StuckGpu),
Err(WaitIdleError::WrongSubmissionIndex(..)) => unreachable!(),
}; };
device.pending_writes.temp_resources = pending_write_resources; device.pending_writes.temp_resources = pending_write_resources;
device.temp_suspected.clear(); device.temp_suspected.clear();
device.lock_life(&mut token).post_submit(); device.lock_life(&mut token).post_submit();
closures (submit_index, closures)
}; };
// the closures should execute with nothing locked! // the closures should execute with nothing locked!
callbacks.fire(); callbacks.fire();
Ok(()) Ok(WrappedSubmissionIndex {
queue_id,
index: submit_index,
})
} }
pub fn queue_get_timestamp_period<A: HalApi>( pub fn queue_get_timestamp_period<A: HalApi>(

Просмотреть файл

@ -13,17 +13,17 @@ pub const FILE_NAME: &str = "trace.ron";
pub(crate) fn new_render_bundle_encoder_descriptor<'a>( pub(crate) fn new_render_bundle_encoder_descriptor<'a>(
label: crate::Label<'a>, label: crate::Label<'a>,
context: &'a super::RenderPassContext, context: &'a super::RenderPassContext,
is_ds_read_only: bool, depth_read_only: bool,
stencil_read_only: bool,
) -> crate::command::RenderBundleEncoderDescriptor<'a> { ) -> crate::command::RenderBundleEncoderDescriptor<'a> {
crate::command::RenderBundleEncoderDescriptor { crate::command::RenderBundleEncoderDescriptor {
label, label,
color_formats: Cow::Borrowed(&context.attachments.colors), color_formats: Cow::Borrowed(&context.attachments.colors),
depth_stencil: context.attachments.depth_stencil.map(|format| { depth_stencil: context.attachments.depth_stencil.map(|format| {
let aspects = hal::FormatAspects::from(format);
wgt::RenderBundleDepthStencil { wgt::RenderBundleDepthStencil {
format, format,
depth_read_only: is_ds_read_only && aspects.contains(hal::FormatAspects::DEPTH), depth_read_only,
stencil_read_only: is_ds_read_only && aspects.contains(hal::FormatAspects::STENCIL), stencil_read_only,
} }
}), }),
sample_count: context.sample_count, sample_count: context.sample_count,
@ -176,7 +176,7 @@ pub enum Command {
}, },
RunRenderPass { RunRenderPass {
base: crate::command::BasePass<crate::command::RenderCommand>, base: crate::command::BasePass<crate::command::RenderCommand>,
target_colors: Vec<crate::command::RenderPassColorAttachment>, target_colors: Vec<Option<crate::command::RenderPassColorAttachment>>,
target_depth_stencil: Option<crate::command::RenderPassDepthStencilAttachment>, target_depth_stencil: Option<crate::command::RenderPassDepthStencilAttachment>,
}, },
} }

12
third_party/rust/wgpu-core/src/hub.rs поставляемый
Просмотреть файл

@ -930,6 +930,18 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
hal_instance_callback(hal_instance) hal_instance_callback(hal_instance)
} }
/// # Safety
///
/// - The raw handles obtained from the Instance must not be manually destroyed
pub unsafe fn from_instance(factory: G, instance: Instance) -> Self {
profiling::scope!("new", "Global");
Self {
instance,
surfaces: Registry::without_backend(&factory, "Surface"),
hubs: Hubs::new(&factory),
}
}
pub fn clear_backend<A: HalApi>(&self, _dummy: ()) { pub fn clear_backend<A: HalApi>(&self, _dummy: ()) {
let mut surface_guard = self.surfaces.data.write(); let mut surface_guard = self.surfaces.data.write();
let hub = A::hub(self); let hub = A::hub(self);

29
third_party/rust/wgpu-core/src/id.rs поставляемый
Просмотреть файл

@ -22,6 +22,35 @@ const BACKEND_SHIFT: usize = INDEX_BITS * 2 - BACKEND_BITS;
pub const EPOCH_MASK: u32 = (1 << (EPOCH_BITS)) - 1; pub const EPOCH_MASK: u32 = (1 << (EPOCH_BITS)) - 1;
type Dummy = hal::api::Empty; type Dummy = hal::api::Empty;
/// An identifier for a wgpu object.
///
/// An `Id<T>` value identifies a value stored in a [`Global`]'s [`Hub`]'s [`Storage`].
/// `Storage` implements [`Index`] and [`IndexMut`], accepting `Id` values as indices.
///
/// ## Note on `Id` typing
///
/// You might assume that an `Id<T>` can only be used to retrieve a resource of
/// type `T`, but that is not quite the case. The id types in `wgpu-core`'s
/// public API ([`TextureId`], for example) can refer to resources belonging to
/// any backend, but the corresponding resource types ([`Texture<A>`], for
/// example) are always parameterized by a specific backend `A`.
///
/// So the `T` in `Id<T>` is usually a resource type like `Texture<Empty>`,
/// where [`Empty`] is the `wgpu_hal` dummy back end. These empty types are
/// never actually used, beyond just making sure you access each `Storage` with
/// the right kind of identifier. The members of [`Hub<A>`] pair up each
/// `X<Empty>` type with the resource type `X<A>`, for some specific backend
/// `A`.
///
/// [`Global`]: crate::hub::Global
/// [`Hub`]: crate::hub::Hub
/// [`Hub<A>`]: crate::hub::Hub
/// [`Storage`]: crate::hub::Storage
/// [`Texture<A>`]: crate::resource::Texture
/// [`Index`]: std::ops::Index
/// [`IndexMut`]: std::ops::IndexMut
/// [`Registry`]: crate::hub::Registry
/// [`Empty`]: hal::api::Empty
#[repr(transparent)] #[repr(transparent)]
#[cfg_attr(feature = "trace", derive(serde::Serialize), serde(into = "SerialId"))] #[cfg_attr(feature = "trace", derive(serde::Serialize), serde(into = "SerialId"))]
#[cfg_attr( #[cfg_attr(

78
third_party/rust/wgpu-core/src/instance.rs поставляемый
Просмотреть файл

@ -23,8 +23,8 @@ pub struct HalSurface<A: hal::Api> {
#[error("Limit '{name}' value {requested} is better than allowed {allowed}")] #[error("Limit '{name}' value {requested} is better than allowed {allowed}")]
pub struct FailedLimit { pub struct FailedLimit {
name: &'static str, name: &'static str,
requested: u32, requested: u64,
allowed: u32, allowed: u64,
} }
fn check_limits(requested: &wgt::Limits, allowed: &wgt::Limits) -> Vec<FailedLimit> { fn check_limits(requested: &wgt::Limits, allowed: &wgt::Limits) -> Vec<FailedLimit> {
@ -152,23 +152,12 @@ impl crate::hub::Resource for Surface {
} }
impl Surface { impl Surface {
pub fn get_preferred_format<A: HalApi>( pub fn get_supported_formats<A: HalApi>(
&self, &self,
adapter: &Adapter<A>, adapter: &Adapter<A>,
) -> Result<wgt::TextureFormat, GetSurfacePreferredFormatError> { ) -> Result<Vec<wgt::TextureFormat>, GetSurfacePreferredFormatError> {
// Check the four formats mentioned in the WebGPU spec.
// Also, prefer sRGB over linear as it is better in
// representing perceived colors.
let preferred_formats = [
wgt::TextureFormat::Bgra8UnormSrgb,
wgt::TextureFormat::Rgba8UnormSrgb,
wgt::TextureFormat::Bgra8Unorm,
wgt::TextureFormat::Rgba8Unorm,
wgt::TextureFormat::Rgba16Float,
];
let suf = A::get_surface(self); let suf = A::get_surface(self);
let caps = unsafe { let mut caps = unsafe {
profiling::scope!("surface_capabilities"); profiling::scope!("surface_capabilities");
adapter adapter
.raw .raw
@ -177,11 +166,10 @@ impl Surface {
.ok_or(GetSurfacePreferredFormatError::UnsupportedQueueFamily)? .ok_or(GetSurfacePreferredFormatError::UnsupportedQueueFamily)?
}; };
preferred_formats // TODO: maybe remove once we support texture view changing srgb-ness
.iter() caps.formats.sort_by_key(|f| !f.describe().srgb);
.cloned()
.find(|preferred| caps.formats.contains(preferred)) Ok(caps.formats)
.ok_or(GetSurfacePreferredFormatError::NotFound)
} }
} }
@ -354,8 +342,6 @@ pub enum IsSurfaceSupportedError {
#[derive(Clone, Debug, Error)] #[derive(Clone, Debug, Error)]
pub enum GetSurfacePreferredFormatError { pub enum GetSurfacePreferredFormatError {
#[error("no suitable format found")]
NotFound,
#[error("invalid adapter")] #[error("invalid adapter")]
InvalidAdapter, InvalidAdapter,
#[error("invalid surface")] #[error("invalid surface")]
@ -493,6 +479,52 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
id.0 id.0
} }
#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
pub fn create_surface_webgl_canvas(
&self,
canvas: &web_sys::HtmlCanvasElement,
id_in: Input<G, SurfaceId>,
) -> SurfaceId {
profiling::scope!("create_surface_webgl_canvas", "Instance");
let surface = Surface {
presentation: None,
gl: self.instance.gl.as_ref().map(|inst| HalSurface {
raw: {
inst.create_surface_from_canvas(canvas)
.expect("Create surface from canvas")
},
}),
};
let mut token = Token::root();
let id = self.surfaces.prepare(id_in).assign(surface, &mut token);
id.0
}
#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
pub fn create_surface_webgl_offscreen_canvas(
&self,
canvas: &web_sys::OffscreenCanvas,
id_in: Input<G, SurfaceId>,
) -> SurfaceId {
profiling::scope!("create_surface_webgl_offscreen_canvas", "Instance");
let surface = Surface {
presentation: None,
gl: self.instance.gl.as_ref().map(|inst| HalSurface {
raw: {
inst.create_surface_from_offscreen_canvas(canvas)
.expect("Create surface from offscreen canvas")
},
}),
};
let mut token = Token::root();
let id = self.surfaces.prepare(id_in).assign(surface, &mut token);
id.0
}
#[cfg(dx12)] #[cfg(dx12)]
/// # Safety /// # Safety
/// ///

54
third_party/rust/wgpu-core/src/lib.rs поставляемый
Просмотреть файл

@ -48,7 +48,7 @@ pub mod resource;
mod track; mod track;
mod validation; mod validation;
pub use hal::{api, MAX_BIND_GROUPS, MAX_COLOR_TARGETS, MAX_VERTEX_BUFFERS}; pub use hal::{api, MAX_BIND_GROUPS, MAX_COLOR_ATTACHMENTS, MAX_VERTEX_BUFFERS};
use atomic::{AtomicUsize, Ordering}; use atomic::{AtomicUsize, Ordering};
@ -238,36 +238,50 @@ If you are running this program on native and not in a browser and wish to work
Adapter::downlevel_properties or Device::downlevel_properties to get a listing of the features the current \ Adapter::downlevel_properties or Device::downlevel_properties to get a listing of the features the current \
platform supports."; platform supports.";
/// Call a `Global` method, dispatching dynamically to the appropriate back end. /// Dispatch on an [`Id`]'s backend to a backend-generic method.
/// ///
/// Uses of this macro have the form: /// Uses of this macro have the form:
/// ///
/// ```ignore /// ```ignore
/// ///
/// gfx_select!(id => global.method(args...)) /// gfx_select!(id => value.method(args...))
/// ///
/// ``` /// ```
/// ///
/// where `id` is some [`id::Id`] resource id, `global` is a [`hub::Global`], /// This expands to an expression that calls `value.method::<A>(args...)` for
/// and `method` is any method on [`Global`] that takes a single generic /// the backend `A` selected by `id`. The expansion matches on `id.backend()`,
/// parameter that implements [`hal::Api`] (for example, /// with an arm for each backend type in [`wgpu_types::Backend`] which calls the
/// [`Global::device_create_buffer`]). /// specialization of `method` for the given backend. This allows resource
/// identifiers to select backends dynamically, even though many `wgpu_core`
/// methods are compiled and optimized for a specific back end.
/// ///
/// The `wgpu-core` crate can support multiple back ends simultaneously (Vulkan, /// This macro is typically used to call methods on [`wgpu_core::hub::Global`],
/// Metal, etc.), depending on features and availability. Each [`Id`]'s value /// many of which take a single `hal::Api` type parameter. For example, to
/// indicates which back end its resource belongs to. This macro does a switch /// create a new buffer on the device indicated by `device_id`, one would say:
/// on `id`'s back end, and calls the `Global` method specialized for that back
/// end.
/// ///
/// Internally to `wgpu-core`, most types take the back end (some type that /// ```ignore
/// implements `hal::Api`) as a generic parameter, so their methods are compiled /// gfx_select!(device_id => global.device_create_buffer(device_id, ...))
/// with full knowledge of which back end they're working with. This macro /// ```
/// serves as the boundary between dynamic `Id` values provided by `wgpu-core`'s
/// users and the crate's mostly-monomorphized implementation, selecting the
/// `hal::Api` implementation appropriate to the `Id` value's back end.
/// ///
/// [`Global`]: hub::Global /// where the `device_create_buffer` method is defined like this:
/// [`Global::device_create_buffer`]: hub::Global::device_create_buffer ///
/// ```ignore
/// impl<...> Global<...> {
/// pub fn device_create_buffer<A: hal::Api>(&self, ...) -> ...
/// { ... }
/// }
/// ```
///
/// That `gfx_select!` call uses `device_id`'s backend to select the right
/// backend type `A` for a call to `Global::device_create_buffer<A>`.
///
/// However, there's nothing about this macro that is specific to `hub::Global`.
/// For example, Firefox's embedding of `wgpu_core` defines its own types with
/// methods that take `hal::Api` type parameters. Firefox uses `gfx_select!` to
/// dynamically dispatch to the right specialization based on the resource's id.
///
/// [`wgpu_types::Backend`]: wgt::Backend
/// [`wgpu_core::hub::Global`]: crate::hub::Global
/// [`Id`]: id::Id /// [`Id`]: id::Id
#[macro_export] #[macro_export]
macro_rules! gfx_select { macro_rules! gfx_select {

46
third_party/rust/wgpu-core/src/pipeline.rs поставляемый
Просмотреть файл

@ -124,6 +124,24 @@ pub enum CreateShaderModuleError {
Validation(#[from] ShaderError<naga::WithSpan<naga::valid::ValidationError>>), Validation(#[from] ShaderError<naga::WithSpan<naga::valid::ValidationError>>),
#[error(transparent)] #[error(transparent)]
MissingFeatures(#[from] MissingFeatures), MissingFeatures(#[from] MissingFeatures),
#[error(
"shader global {bind:?} uses a group index {group} that exceeds the max_bind_groups limit of {limit}."
)]
InvalidGroupIndex {
bind: naga::ResourceBinding,
group: u32,
limit: u32,
},
}
impl CreateShaderModuleError {
pub fn location(&self, source: &str) -> Option<naga::SourceLocation> {
match *self {
CreateShaderModuleError::Parsing(ref err) => err.inner.location(source),
CreateShaderModuleError::Validation(ref err) => err.inner.location(source),
_ => None,
}
}
} }
/// Describes a programmable pipeline stage. /// Describes a programmable pipeline stage.
@ -231,7 +249,7 @@ pub struct FragmentState<'a> {
/// The compiled fragment stage and its entry point. /// The compiled fragment stage and its entry point.
pub stage: ProgrammableStageDescriptor<'a>, pub stage: ProgrammableStageDescriptor<'a>,
/// The effect of draw calls on the color aspect of the output target. /// The effect of draw calls on the color aspect of the output target.
pub targets: Cow<'a, [wgt::ColorTargetState]>, pub targets: Cow<'a, [Option<wgt::ColorTargetState>]>,
} }
/// Describes a render (graphics) pipeline. /// Describes a render (graphics) pipeline.
@ -299,6 +317,8 @@ pub enum CreateRenderPipelineError {
Device(#[from] DeviceError), Device(#[from] DeviceError),
#[error("pipeline layout is invalid")] #[error("pipeline layout is invalid")]
InvalidLayout, InvalidLayout,
#[error("fragment output @location({0}) is invalid")]
InvalidFragmentOutputLocation(u32),
#[error("unable to derive an implicit layout")] #[error("unable to derive an implicit layout")]
Implicit(#[from] ImplicitLayoutError), Implicit(#[from] ImplicitLayoutError),
#[error("color state [{0}] is invalid")] #[error("color state [{0}] is invalid")]
@ -352,7 +372,27 @@ bitflags::bitflags! {
pub struct PipelineFlags: u32 { pub struct PipelineFlags: u32 {
const BLEND_CONSTANT = 1 << 0; const BLEND_CONSTANT = 1 << 0;
const STENCIL_REFERENCE = 1 << 1; const STENCIL_REFERENCE = 1 << 1;
const WRITES_DEPTH_STENCIL = 1 << 2; const WRITES_DEPTH = 1 << 2;
const WRITES_STENCIL = 1 << 3;
}
}
/// How a render pipeline will retrieve attributes from a particular vertex buffer.
#[derive(Clone, Copy, Debug)]
pub struct VertexStep {
/// The byte stride in the buffer between one attribute value and the next.
pub stride: wgt::BufferAddress,
/// Whether the buffer is indexed by vertex number or instance number.
pub mode: wgt::VertexStepMode,
}
impl Default for VertexStep {
fn default() -> Self {
Self {
stride: 0,
mode: wgt::VertexStepMode::Vertex,
}
} }
} }
@ -364,7 +404,7 @@ pub struct RenderPipeline<A: hal::Api> {
pub(crate) pass_context: RenderPassContext, pub(crate) pass_context: RenderPassContext,
pub(crate) flags: PipelineFlags, pub(crate) flags: PipelineFlags,
pub(crate) strip_index_format: Option<wgt::IndexFormat>, pub(crate) strip_index_format: Option<wgt::IndexFormat>,
pub(crate) vertex_strides: Vec<(wgt::BufferAddress, wgt::VertexStepMode)>, pub(crate) vertex_steps: Vec<VertexStep>,
pub(crate) late_sized_buffer_groups: ArrayVec<LateSizedBufferGroup, { hal::MAX_BIND_GROUPS }>, pub(crate) late_sized_buffer_groups: ArrayVec<LateSizedBufferGroup, { hal::MAX_BIND_GROUPS }>,
pub(crate) life_guard: LifeGuard, pub(crate) life_guard: LifeGuard,
} }

7
third_party/rust/wgpu-core/src/present.rs поставляемый
Просмотреть файл

@ -123,7 +123,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let _ = device; let _ = device;
let suf = A::get_surface_mut(surface); let suf = A::get_surface_mut(surface);
let (texture_id, status) = match unsafe { suf.raw.acquire_texture(FRAME_TIMEOUT_MS) } { let (texture_id, status) = match unsafe {
suf.raw
.acquire_texture(Some(std::time::Duration::from_millis(
FRAME_TIMEOUT_MS as u64,
)))
} {
Ok(Some(ast)) => { Ok(Some(ast)) => {
let clear_view_desc = hal::TextureViewDescriptor { let clear_view_desc = hal::TextureViewDescriptor {
label: Some("(wgpu internal) clear surface texture view"), label: Some("(wgpu internal) clear surface texture view"),

30
third_party/rust/wgpu-core/src/resource.rs поставляемый
Просмотреть файл

@ -47,8 +47,8 @@ unsafe impl<A: hal::Api> Sync for BufferMapState<A> {}
#[repr(C)] #[repr(C)]
pub struct BufferMapCallbackC { pub struct BufferMapCallbackC {
callback: unsafe extern "C" fn(status: BufferMapAsyncStatus, user_data: *mut u8), pub callback: unsafe extern "C" fn(status: BufferMapAsyncStatus, user_data: *mut u8),
user_data: *mut u8, pub user_data: *mut u8,
} }
unsafe impl Send for BufferMapCallbackC {} unsafe impl Send for BufferMapCallbackC {}
@ -174,6 +174,8 @@ pub enum CreateBufferError {
EmptyUsage, EmptyUsage,
#[error("`MAP` usage can only be combined with the opposite `COPY`, requested {0:?}")] #[error("`MAP` usage can only be combined with the opposite `COPY`, requested {0:?}")]
UsageMismatch(wgt::BufferUsages), UsageMismatch(wgt::BufferUsages),
#[error("Buffer size {requested} is greater than the maximum buffer size ({maximum})")]
MaxBufferSize { requested: u64, maximum: u64 },
} }
impl<A: hal::Api> Resource for Buffer<A> { impl<A: hal::Api> Resource for Buffer<A> {
@ -330,14 +332,28 @@ pub enum TextureErrorDimension {
pub enum TextureDimensionError { pub enum TextureDimensionError {
#[error("Dimension {0:?} is zero")] #[error("Dimension {0:?} is zero")]
Zero(TextureErrorDimension), Zero(TextureErrorDimension),
#[error("Dimension {0:?} value {given} exceeds the limit of {limit}")] #[error("Dimension {dim:?} value {given} exceeds the limit of {limit}")]
LimitExceeded { LimitExceeded {
dim: TextureErrorDimension, dim: TextureErrorDimension,
given: u32, given: u32,
limit: u32, limit: u32,
}, },
#[error("sample count {0} is invalid")] #[error("Sample count {0} is invalid")]
InvalidSampleCount(u32), InvalidSampleCount(u32),
#[error("Width {width} is not a multiple of {format:?}'s block width ({block_width})")]
NotMultipleOfBlockWidth {
width: u32,
block_width: u32,
format: wgt::TextureFormat,
},
#[error("Height {height} is not a multiple of {format:?}'s block height ({block_height})")]
NotMultipleOfBlockHeight {
height: u32,
block_height: u32,
format: wgt::TextureFormat,
},
#[error("Multisampled texture depth or array layers must be 1, got {0}")]
MultisampledDepthOrArrayLayer(u32),
} }
#[derive(Clone, Debug, Error)] #[derive(Clone, Debug, Error)]
@ -360,6 +376,12 @@ pub enum CreateTextureError {
InvalidFormatUsages(wgt::TextureUsages, wgt::TextureFormat), InvalidFormatUsages(wgt::TextureUsages, wgt::TextureFormat),
#[error("Texture usages {0:?} are not allowed on a texture of dimensions {1:?}")] #[error("Texture usages {0:?} are not allowed on a texture of dimensions {1:?}")]
InvalidDimensionUsages(wgt::TextureUsages, wgt::TextureDimension), InvalidDimensionUsages(wgt::TextureUsages, wgt::TextureDimension),
#[error("Texture usage STORAGE_BINDING is not allowed for multisampled textures")]
InvalidMultisampledStorageBinding,
#[error("Format {0:?} does not support multisampling")]
InvalidMultisampledFormat(wgt::TextureFormat),
#[error("Multisampled textures must have RENDER_ATTACHMENT usage")]
MultisampledNotRenderAttachment,
#[error("Texture format {0:?} can't be used due to missing features.")] #[error("Texture format {0:?} can't be used due to missing features.")]
MissingFeatures(wgt::TextureFormat, #[source] MissingFeatures), MissingFeatures(wgt::TextureFormat, #[source] MissingFeatures),
} }

Просмотреть файл

@ -467,8 +467,8 @@ impl<A: hub::HalApi> BufferTracker<A> {
/// This is a really funky method used by Compute Passes to generate /// This is a really funky method used by Compute Passes to generate
/// barriers after a call to dispatch without needing to iterate /// barriers after a call to dispatch without needing to iterate
/// over all elements in the usage scope. We use each the /// over all elements in the usage scope. We use each the
/// bind group as a source of which IDs to look at. The bind groups /// a given iterator of ids as a source of which IDs to look at.
/// must have first been added to the usage scope. /// All the IDs must have first been added to the usage scope.
/// ///
/// # Safety /// # Safety
/// ///
@ -477,15 +477,15 @@ impl<A: hub::HalApi> BufferTracker<A> {
pub unsafe fn set_and_remove_from_usage_scope_sparse( pub unsafe fn set_and_remove_from_usage_scope_sparse(
&mut self, &mut self,
scope: &mut BufferUsageScope<A>, scope: &mut BufferUsageScope<A>,
bind_group_state: &BufferBindGroupState<A>, id_source: impl IntoIterator<Item = Valid<BufferId>>,
) { ) {
let incoming_size = scope.state.len(); let incoming_size = scope.state.len();
if incoming_size > self.start.len() { if incoming_size > self.start.len() {
self.set_size(incoming_size); self.set_size(incoming_size);
} }
for &(id, ref ref_count, _) in bind_group_state.buffers.iter() { for id in id_source {
let (index32, epoch, _) = id.0.unzip(); let (index32, _, _) = id.0.unzip();
let index = index32 as usize; let index = index32 as usize;
scope.debug_assert_in_bounds(index); scope.debug_assert_in_bounds(index);
@ -504,9 +504,8 @@ impl<A: hub::HalApi> BufferTracker<A> {
state: &scope.state, state: &scope.state,
}, },
None, None,
ResourceMetadataProvider::Direct { ResourceMetadataProvider::Indirect {
epoch, metadata: &scope.metadata,
ref_count: Cow::Borrowed(ref_count),
}, },
&mut self.temp, &mut self.temp,
); );

9
third_party/rust/wgpu-core/src/track/mod.rs поставляемый
Просмотреть файл

@ -690,9 +690,10 @@ impl<A: hub::HalApi> Tracker<A> {
/// the state given for those resources in the UsageScope. It also /// the state given for those resources in the UsageScope. It also
/// removes all touched resources from the usage scope. /// removes all touched resources from the usage scope.
/// ///
/// If a transition is needed to get the resources into the needed state, /// If a transition is needed to get the resources into the needed
/// those transitions are stored within the tracker. A subsequent /// state, those transitions are stored within the tracker. A
/// call to [`Self::drain`] is needed to get those transitions. /// subsequent call to [`BufferTracker::drain`] or
/// [`TextureTracker::drain`] is needed to get those transitions.
/// ///
/// This is a really funky method used by Compute Passes to generate /// This is a really funky method used by Compute Passes to generate
/// barriers after a call to dispatch without needing to iterate /// barriers after a call to dispatch without needing to iterate
@ -714,7 +715,7 @@ impl<A: hub::HalApi> Tracker<A> {
bind_group: &BindGroupStates<A>, bind_group: &BindGroupStates<A>,
) { ) {
self.buffers self.buffers
.set_and_remove_from_usage_scope_sparse(&mut scope.buffers, &bind_group.buffers); .set_and_remove_from_usage_scope_sparse(&mut scope.buffers, bind_group.buffers.used());
self.textures.set_and_remove_from_usage_scope_sparse( self.textures.set_and_remove_from_usage_scope_sparse(
textures, textures,
&mut scope.textures, &mut scope.textures,

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"61752b031b63d9ce967085f9a43eb3dbbad3b472bd264612a88faf3e7d6fcd57","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","README.md":"78377f5876fafd77963eff7e3c2ba3a7e3ad5cf9201b09ed5612e49c2288eb18","examples/halmark/main.rs":"fefa4f8d16f1a40156e0c0ce7aee06569b222a7a6284b69a000adeebb34a915d","examples/halmark/shader.wgsl":"59e3628abe34c66708bf0106658e791ef24357df3cae72194d34ff07b40e8007","examples/raw-gles.em.html":"70fbe68394a1a4522192de1dcfaf7d399f60d7bdf5de70b708f9bb0417427546","examples/raw-gles.rs":"d6db84b269b934409ef85fa31914b3f4bc7e7012e40269aad3eff5454eae2a09","src/auxil/dxgi/conv.rs":"310a23866e652257e3dca55c85c78420118e6dea4e69ff907db4a52bda9ac1c5","src/auxil/dxgi/exception.rs":"f8d69d3d475e03c4d96d22778e5a6f322afd98fcfafb1414cd4a76239fa97a37","src/auxil/dxgi/factory.rs":"82451fcfcc1f73a570ae9e708c94efa9c125d269dfb7396de97da5b32f8a4090","src/auxil/dxgi/mod.rs":"63db737b48378d4843e2f7904f104790688029ff614bc80387cd9efe444f1e52","src/auxil/dxgi/result.rs":"20c8eb03d738062dff198feca6327addb9882ed0462be842c789eadf7dca0573","src/auxil/mod.rs":"f899555124ad6d44f5326ef935f4911702539fd933ec2ab07c6009badb3ea22c","src/auxil/renderdoc.rs":"3a4da908ebd6230177ca55c541c8278639e83d78badb4595a941aea30dd7f80f","src/dx11/adapter.rs":"bf123464ef748d021f2e0c40d27b3f6bdd50222c6f91cce6d25686a912eef093","src/dx11/command.rs":"cdad8dcdb800acba56c931f1726ddada652af18db0f066465af643f82a034492","src/dx11/device.rs":"76ac52095c639482adc2058509cd3acafd49cebc0694fcd64f8d9f53abc823de","src/dx11/instance.rs":"3bbf2730956472cb8023bd8fbd2d53e49f93c5e4ce3d14664112a293a165d191","src/dx11/library.rs":"0da08a780eefa7ff50f2e0998117202f26e5dd3d3a433c58b585801cff9863d2","src/dx11/mod.rs":"e4f7c6100e1bec479b41f3e3af96e01d53e6597c1c3a8fcde6f14cc9eb8537f8","src/dx12/adapter.rs":"3d830a70684c568a0b3f226beecc8e0dd311c3efd2b1be2caa629f688e98511e","src/dx12/command.rs":"e48636f686f4ff9efc1758f4e54522aeda284d27439c87c6a669a55352294d58","src/dx12/conv.rs":"e1bc82d9f0c019bb67aa7ee8d59e4677c047e56fee4ce3154ebc50e5388850cd","src/dx12/descriptor.rs":"7145d3dc6be13fae4cf6bb8bf34a1ea1749ad87e5f429b84f3cbbea7bf63c148","src/dx12/device.rs":"1dd830070de6e0a755164f96408d50e5c8a1bbfee539a1183a57c8f93c79e669","src/dx12/instance.rs":"ccc36443cb1df8ab8ed8366cf8599ec3d75fb5fefa5f9bb0f0f0b5e6fc1c5102","src/dx12/mod.rs":"e88f7396dca4aba859a6e28d3f9de64a57a0df85acd53cecd6ada3d96386062c","src/dx12/view.rs":"b7a5cb8933f30517a97b4bd767e10b4c09f686dbf493d53b9b265d2d0b16f1a6","src/empty.rs":"6bf65e405f63eff49b25d079af644b352b95a9c7edcf3a57be2e96a50307b66b","src/gles/adapter.rs":"47403c6cf736659b6c035873346e0aa1760b8b4b5763e64b9783e1358e599ba0","src/gles/command.rs":"31c85f3841131dc34553f7a66339396650ceb19763fa6c194c10fb4a5a3fc07e","src/gles/conv.rs":"1462ce906a4fe83139cc8375e385f8ce5a15d70588b81083ae8d5d9104f4457e","src/gles/device.rs":"66c30c4010f410bf3b8a03ee9d8e14753832fa2b6e17b518481281f06e3d7cd9","src/gles/egl.rs":"16516ef1ad62a976996a1b2123fd89ce6835a8468a2915841efd558516bb8b4f","src/gles/mod.rs":"75612e8ddd91735ba7b1bb7ecb58210b7b8469bde9671e437206c010600d16a2","src/gles/queue.rs":"b6dd8404ff53f1f9a8c9de87d4b78bd42468c146560d26fb585801d813919dab","src/gles/shaders/clear.frag":"aac702eed9ece5482db5ba6783a678b119a5e7802b1ecf93f4975dee8acab0b3","src/gles/shaders/clear.vert":"8f636168e1da2cac48091c466a543c3b09fb4a0dd8c60c1c9bf34cc890766740","src/gles/shaders/present.frag":"dd9a43c339a2fa4ccf7f6a1854c6f400cabf271a7d5e9230768e9f39d47f3ff5","src/gles/shaders/present.vert":"6e85d489403d80b81cc94790730bb53b309dfc5eeede8f1ea3412a660f31d357","src/gles/web.rs":"083500c0b36d079a82754895d06b993ea8ed4393690b226c85f07cbec373a730","src/lib.rs":"dbd24a5fa263412c16cf821e6ff51ebda07608776accbd8b0bfb792435740619","src/metal/adapter.rs":"78f4a9eff186ab919e7c8900c08cd0b3325e15bf1656d5dcdae30a96a9d76f87","src/metal/command.rs":"b06983d7e11cdde526b7c9f5f0b86f1ea8faef02b7666367cb231211a8301570","src/metal/conv.rs":"2349ec6331a7a471c06615be249dc22b808742aca222e6d8861662d848b0c094","src/metal/device.rs":"dd823c8e12ba3ed69ef7cdcb543e8d995d0056d1f838516b0901068c83d8ffe2","src/metal/mod.rs":"c4f3959732f5f506fa881aa5812205a6452d6a946d661d7f81d1c7785359a10c","src/metal/surface.rs":"82836cadc751d94fb016bd590cdfec5649cbfae2f44d14599ed074dfb0a004dc","src/vulkan/adapter.rs":"90c4f57483589a09d9840c3f93efb8da66bc9eb5be975899877aa0192f86e4bd","src/vulkan/command.rs":"60d1867acd0e46c34dabecea708cd776a1f435721b6673a506b5bb8aee87ff80","src/vulkan/conv.rs":"b480f9d1cde0df92d6f9a07e8a42b86aaeb251f9b0692038286f4994caf45fec","src/vulkan/device.rs":"9b264c74f581345be889f1ed61ad6f7ab22e12e04183eb954dbfed1681c32d0c","src/vulkan/instance.rs":"c078d529f6955a662a3adc7739ffdb8a01b83dbef8dd1e2c3810d232b82cbb18","src/vulkan/mod.rs":"1ba41f2ea7650dc0757c1444ef62c95a8aa0f6671d98c73b4ea80eb4ea60f289"},"package":null} {"files":{"Cargo.toml":"187fffaaf3f370c2fac8a1060093b5c11c680711ed067b5d5a607178f93e4b30","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","README.md":"78377f5876fafd77963eff7e3c2ba3a7e3ad5cf9201b09ed5612e49c2288eb18","examples/halmark/main.rs":"dc6e304e13882ba49f7cf81ed62982cb2dc1d239410e703d074a7a40db0ba362","examples/halmark/shader.wgsl":"59e3628abe34c66708bf0106658e791ef24357df3cae72194d34ff07b40e8007","examples/raw-gles.em.html":"70fbe68394a1a4522192de1dcfaf7d399f60d7bdf5de70b708f9bb0417427546","examples/raw-gles.rs":"4fcf856aac125c7de14665fb3032ff457782ba03446e96f77845e5d5623ec23e","src/auxil/dxgi/conv.rs":"310a23866e652257e3dca55c85c78420118e6dea4e69ff907db4a52bda9ac1c5","src/auxil/dxgi/exception.rs":"f8d69d3d475e03c4d96d22778e5a6f322afd98fcfafb1414cd4a76239fa97a37","src/auxil/dxgi/factory.rs":"82451fcfcc1f73a570ae9e708c94efa9c125d269dfb7396de97da5b32f8a4090","src/auxil/dxgi/mod.rs":"63db737b48378d4843e2f7904f104790688029ff614bc80387cd9efe444f1e52","src/auxil/dxgi/result.rs":"20c8eb03d738062dff198feca6327addb9882ed0462be842c789eadf7dca0573","src/auxil/mod.rs":"f899555124ad6d44f5326ef935f4911702539fd933ec2ab07c6009badb3ea22c","src/auxil/renderdoc.rs":"3a4da908ebd6230177ca55c541c8278639e83d78badb4595a941aea30dd7f80f","src/dx11/adapter.rs":"fe0c5f92e70520a9097e109ada0eeb135eb01eaa20918069bbfec56fa241bb8b","src/dx11/command.rs":"cdad8dcdb800acba56c931f1726ddada652af18db0f066465af643f82a034492","src/dx11/device.rs":"76ac52095c639482adc2058509cd3acafd49cebc0694fcd64f8d9f53abc823de","src/dx11/instance.rs":"3bbf2730956472cb8023bd8fbd2d53e49f93c5e4ce3d14664112a293a165d191","src/dx11/library.rs":"0da08a780eefa7ff50f2e0998117202f26e5dd3d3a433c58b585801cff9863d2","src/dx11/mod.rs":"41aead56ca3e00fd5e11d8f67ea3284f305247ac298089a0216dc7396b49bf57","src/dx12/adapter.rs":"cfbb6eb8b98d861457f43795e20e8c6badd12148b31722ff126ae0c065b1855e","src/dx12/command.rs":"7612cd93a97edce571c2215b3bfbbfe545daa13eaf8d83a0cedd7ca785f2e069","src/dx12/conv.rs":"996ebf03b9826ba568a4e00c088eba972c1fce706671b1d8007a8945154c7ca7","src/dx12/descriptor.rs":"7145d3dc6be13fae4cf6bb8bf34a1ea1749ad87e5f429b84f3cbbea7bf63c148","src/dx12/device.rs":"ea715d3b31c697678e610d1696ceb36fc7a46df6647cce2dd1b7dd00e4639298","src/dx12/instance.rs":"ccc36443cb1df8ab8ed8366cf8599ec3d75fb5fefa5f9bb0f0f0b5e6fc1c5102","src/dx12/mod.rs":"3c2c1d6c2c0774a841b81b56748f7b9ecf97e24a80ec6554703903350bb027d7","src/dx12/view.rs":"b7a5cb8933f30517a97b4bd767e10b4c09f686dbf493d53b9b265d2d0b16f1a6","src/empty.rs":"389ea75882d0974c26649bb028aacdade28f4e0ea418a55970118e2de949e6fc","src/gles/adapter.rs":"0dab3dfa4c9277ba40ed8224f31aac8f3255c56f2dad39680dec9bf2d901f796","src/gles/command.rs":"306301fcf2ca75fb6a638b2c69dc0597aa50b2ebb16eaca6ec7005b4cfd84a30","src/gles/conv.rs":"1462ce906a4fe83139cc8375e385f8ce5a15d70588b81083ae8d5d9104f4457e","src/gles/device.rs":"54555c751a99c5327b21561a5ef1d2c3b4aee34db24b926d4393c2ec5cb8d8ad","src/gles/egl.rs":"e4263e63112a53001cc8cb8527ac2cc2e8004f8502272aaf5212a6e2c12de31b","src/gles/mod.rs":"6a90b7e84c699fed4b0ef2ab1a97e35337095eba36275116191678c3232601f2","src/gles/queue.rs":"3aee4a426a9ef6b62b1085780b42ea920c9a2a889894d5bccb968c98ed990879","src/gles/shaders/clear.frag":"aac702eed9ece5482db5ba6783a678b119a5e7802b1ecf93f4975dee8acab0b3","src/gles/shaders/clear.vert":"8f636168e1da2cac48091c466a543c3b09fb4a0dd8c60c1c9bf34cc890766740","src/gles/shaders/present.frag":"dd9a43c339a2fa4ccf7f6a1854c6f400cabf271a7d5e9230768e9f39d47f3ff5","src/gles/shaders/present.vert":"6e85d489403d80b81cc94790730bb53b309dfc5eeede8f1ea3412a660f31d357","src/gles/web.rs":"24722575d4bb818dcc22b75f58af27f163a8b4d8284242e9f8ae5a27280086c7","src/lib.rs":"491c9aa88310a6dd7bb3fe6524edd7fbd88ad7e7733d42ae728ef3d74c23e62d","src/metal/adapter.rs":"bac7ba96fbea9aca823777442e72dc3fd407db7d890e2982b68926d179478770","src/metal/command.rs":"bae3efabc3bff841ebe1e584e25a8e9172ddaffdf3f3d363ba08f2aad43a68a8","src/metal/conv.rs":"2349ec6331a7a471c06615be249dc22b808742aca222e6d8861662d848b0c094","src/metal/device.rs":"a3bfbd9fd1db6eb465bc5d4732e441383c19deb19d617c74a451c8cca93ba509","src/metal/mod.rs":"c4f3959732f5f506fa881aa5812205a6452d6a946d661d7f81d1c7785359a10c","src/metal/surface.rs":"9ec61f4162b85b8a653e426bae037a36f66d62a300c62eea75129dc34f370a59","src/vulkan/adapter.rs":"617a9a5cba8139e1e4784ec2fe7833f3d04c593828aee3c960b57317a6722b90","src/vulkan/command.rs":"ddc38c2732ba34eb065d0785f923fb226cf7f95c39f5a9c8d7fcfa1c90cab38b","src/vulkan/conv.rs":"b480f9d1cde0df92d6f9a07e8a42b86aaeb251f9b0692038286f4994caf45fec","src/vulkan/device.rs":"2323b201cc594a3dd0ee1f552a655389bfd8bd3fcb6b763cfba179d255669bfe","src/vulkan/instance.rs":"27a7bed9d0a7304e4f1da57ff008457ef7904a03a952a33a70d32d8edf61de8b","src/vulkan/mod.rs":"93eb2f416669f810cd748e003a1d90cd9942f2aee365c89d01efd0a003d4e5c6"},"package":null}

7
third_party/rust/wgpu-hal/Cargo.toml поставляемый
Просмотреть файл

@ -90,16 +90,19 @@ wasm-bindgen = { version = "0.2" }
web-sys = { version = "0.3", features = ["Window", "HtmlCanvasElement", "WebGl2RenderingContext"] } web-sys = { version = "0.3", features = ["Window", "HtmlCanvasElement", "WebGl2RenderingContext"] }
js-sys = { version = "0.3" } js-sys = { version = "0.3" }
[target.'cfg(target_os = "android")'.dependencies]
android_system_properties = "0.1.1"
[dependencies.naga] [dependencies.naga]
git = "https://github.com/gfx-rs/naga" git = "https://github.com/gfx-rs/naga"
rev = "571302e" rev = "27d38aae"
#version = "0.8" #version = "0.8"
# DEV dependencies # DEV dependencies
[dev-dependencies.naga] [dev-dependencies.naga]
git = "https://github.com/gfx-rs/naga" git = "https://github.com/gfx-rs/naga"
rev = "571302e" rev = "27d38aae"
#version = "0.8" #version = "0.8"
features = ["wgsl-in"] features = ["wgsl-in"]

Просмотреть файл

@ -238,11 +238,11 @@ impl<A: hal::Api> Example<A> {
}, },
depth_stencil: None, depth_stencil: None,
multisample: wgt::MultisampleState::default(), multisample: wgt::MultisampleState::default(),
color_targets: &[wgt::ColorTargetState { color_targets: &[Some(wgt::ColorTargetState {
format: surface_config.format, format: surface_config.format,
blend: Some(wgt::BlendState::ALPHA_BLENDING), blend: Some(wgt::BlendState::ALPHA_BLENDING),
write_mask: wgt::ColorWrites::default(), write_mask: wgt::ColorWrites::default(),
}], })],
multiview: None, multiview: None,
}; };
let pipeline = unsafe { device.create_render_pipeline(&pipeline_desc).unwrap() }; let pipeline = unsafe { device.create_render_pipeline(&pipeline_desc).unwrap() };
@ -614,7 +614,7 @@ impl<A: hal::Api> Example<A> {
let ctx = &mut self.contexts[self.context_index]; let ctx = &mut self.contexts[self.context_index];
let surface_tex = unsafe { self.surface.acquire_texture(!0).unwrap().unwrap().texture }; let surface_tex = unsafe { self.surface.acquire_texture(None).unwrap().unwrap().texture };
let target_barrier0 = hal::TextureBarrier { let target_barrier0 = hal::TextureBarrier {
texture: surface_tex.borrow(), texture: surface_tex.borrow(),
@ -646,7 +646,7 @@ impl<A: hal::Api> Example<A> {
depth_or_array_layers: 1, depth_or_array_layers: 1,
}, },
sample_count: 1, sample_count: 1,
color_attachments: &[hal::ColorAttachment { color_attachments: &[Some(hal::ColorAttachment {
target: hal::Attachment { target: hal::Attachment {
view: &surface_tex_view, view: &surface_tex_view,
usage: hal::TextureUses::COLOR_TARGET, usage: hal::TextureUses::COLOR_TARGET,
@ -659,7 +659,7 @@ impl<A: hal::Api> Example<A> {
b: 0.3, b: 0.3,
a: 1.0, a: 1.0,
}, },
}], })],
depth_stencil_attachment: None, depth_stencil_attachment: None,
multiview: None, multiview: None,
}; };

Просмотреть файл

@ -163,7 +163,7 @@ fn fill_screen(exposed: &hal::ExposedAdapter<hal::api::Gles>, width: u32, height
depth_or_array_layers: 1, depth_or_array_layers: 1,
}, },
sample_count: 1, sample_count: 1,
color_attachments: &[hal::ColorAttachment { color_attachments: &[Some(hal::ColorAttachment {
target: hal::Attachment { target: hal::Attachment {
view: &view, view: &view,
usage: hal::TextureUses::COLOR_TARGET, usage: hal::TextureUses::COLOR_TARGET,
@ -171,7 +171,7 @@ fn fill_screen(exposed: &hal::ExposedAdapter<hal::api::Gles>, width: u32, height
resolve_target: None, resolve_target: None,
ops: hal::AttachmentOps::STORE, ops: hal::AttachmentOps::STORE,
clear_value: wgt::Color::BLUE, clear_value: wgt::Color::BLUE,
}], })],
depth_stencil_attachment: None, depth_stencil_attachment: None,
multiview: None, multiview: None,
}; };

Просмотреть файл

@ -130,6 +130,7 @@ impl super::Adapter {
if feature_level >= FL11_0 { if feature_level >= FL11_0 {
downlevel |= wgt::DownlevelFlags::INDIRECT_EXECUTION; downlevel |= wgt::DownlevelFlags::INDIRECT_EXECUTION;
downlevel |= wgt::DownlevelFlags::WEBGPU_TEXTURE_FORMAT_SUPPORT;
features |= wgt::Features::TEXTURE_COMPRESSION_BC; features |= wgt::Features::TEXTURE_COMPRESSION_BC;
} }
@ -219,6 +220,8 @@ impl super::Adapter {
max_compute_workgroup_size_y: max_workgroup_size_xy, max_compute_workgroup_size_y: max_workgroup_size_xy,
max_compute_workgroup_size_z: max_workgroup_size_z, max_compute_workgroup_size_z: max_workgroup_size_z,
max_compute_workgroups_per_dimension, max_compute_workgroups_per_dimension,
// D3D11_BUFFER_DESC represents the buffer size as a 32 bit int.
max_buffer_size: u32::MAX as u64,
}; };
// //

2
third_party/rust/wgpu-hal/src/dx11/mod.rs поставляемый
Просмотреть файл

@ -124,7 +124,7 @@ impl crate::Surface<Api> for Surface {
unsafe fn acquire_texture( unsafe fn acquire_texture(
&mut self, &mut self,
timeout_ms: u32, _timeout: Option<std::time::Duration>,
) -> Result<Option<crate::AcquiredSurfaceTexture<Api>>, crate::SurfaceError> { ) -> Result<Option<crate::AcquiredSurfaceTexture<Api>>, crate::SurfaceError> {
todo!() todo!()
} }

Просмотреть файл

@ -41,6 +41,10 @@ impl super::Adapter {
} }
} }
pub fn raw_adapter(&self) -> &native::DxgiAdapter {
&self.raw
}
#[allow(trivial_casts)] #[allow(trivial_casts)]
pub(super) fn expose( pub(super) fn expose(
adapter: native::DxgiAdapter, adapter: native::DxgiAdapter,
@ -198,6 +202,7 @@ impl super::Adapter {
| wgt::Features::VERTEX_WRITABLE_STORAGE | wgt::Features::VERTEX_WRITABLE_STORAGE
| wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES | wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES
| wgt::Features::TIMESTAMP_QUERY | wgt::Features::TIMESTAMP_QUERY
| wgt::Features::WRITE_TIMESTAMP_INSIDE_PASSES
| wgt::Features::TEXTURE_COMPRESSION_BC | wgt::Features::TEXTURE_COMPRESSION_BC
| wgt::Features::CLEAR_TEXTURE | wgt::Features::CLEAR_TEXTURE
| wgt::Features::TEXTURE_FORMAT_16BIT_NORM; | wgt::Features::TEXTURE_FORMAT_16BIT_NORM;
@ -278,6 +283,7 @@ impl super::Adapter {
max_compute_workgroup_size_z: d3d12::D3D12_CS_THREAD_GROUP_MAX_Z, max_compute_workgroup_size_z: d3d12::D3D12_CS_THREAD_GROUP_MAX_Z,
max_compute_workgroups_per_dimension: max_compute_workgroups_per_dimension:
d3d12::D3D12_CS_DISPATCH_MAX_THREAD_GROUPS_PER_DIMENSION, d3d12::D3D12_CS_DISPATCH_MAX_THREAD_GROUPS_PER_DIMENSION,
max_buffer_size: u64::MAX,
}, },
alignments: crate::Alignments { alignments: crate::Alignments {
buffer_copy_offset: wgt::BufferSize::new( buffer_copy_offset: wgt::BufferSize::new(

43
third_party/rust/wgpu-hal/src/dx12/command.rs поставляемый
Просмотреть файл

@ -579,11 +579,15 @@ impl crate::CommandEncoder<super::Api> for super::CommandEncoder {
unsafe fn begin_render_pass(&mut self, desc: &crate::RenderPassDescriptor<super::Api>) { unsafe fn begin_render_pass(&mut self, desc: &crate::RenderPassDescriptor<super::Api>) {
self.begin_pass(super::PassKind::Render, desc.label); self.begin_pass(super::PassKind::Render, desc.label);
let mut color_views = [native::CpuDescriptor { ptr: 0 }; crate::MAX_COLOR_ATTACHMENTS];
let mut color_views = [native::CpuDescriptor { ptr: 0 }; crate::MAX_COLOR_TARGETS];
for (rtv, cat) in color_views.iter_mut().zip(desc.color_attachments.iter()) { for (rtv, cat) in color_views.iter_mut().zip(desc.color_attachments.iter()) {
*rtv = cat.target.view.handle_rtv.unwrap().raw; if let Some(cat) = cat.as_ref() {
*rtv = cat.target.view.handle_rtv.unwrap().raw;
} else {
*rtv = self.null_rtv_handle.raw;
}
} }
let ds_view = match desc.depth_stencil_attachment { let ds_view = match desc.depth_stencil_attachment {
None => ptr::null(), None => ptr::null(),
Some(ref ds) => { Some(ref ds) => {
@ -605,23 +609,26 @@ impl crate::CommandEncoder<super::Api> for super::CommandEncoder {
self.pass.resolves.clear(); self.pass.resolves.clear();
for (rtv, cat) in color_views.iter().zip(desc.color_attachments.iter()) { for (rtv, cat) in color_views.iter().zip(desc.color_attachments.iter()) {
if !cat.ops.contains(crate::AttachmentOps::LOAD) { if let Some(cat) = cat.as_ref() {
let value = [ if !cat.ops.contains(crate::AttachmentOps::LOAD) {
cat.clear_value.r as f32, let value = [
cat.clear_value.g as f32, cat.clear_value.r as f32,
cat.clear_value.b as f32, cat.clear_value.g as f32,
cat.clear_value.a as f32, cat.clear_value.b as f32,
]; cat.clear_value.a as f32,
list.clear_render_target_view(*rtv, value, &[]); ];
} list.clear_render_target_view(*rtv, value, &[]);
if let Some(ref target) = cat.resolve_target { }
self.pass.resolves.push(super::PassResolve { if let Some(ref target) = cat.resolve_target {
src: cat.target.view.target_base, self.pass.resolves.push(super::PassResolve {
dst: target.view.target_base, src: cat.target.view.target_base,
format: target.view.raw_format, dst: target.view.target_base,
}); format: target.view.raw_format,
});
}
} }
} }
if let Some(ref ds) = desc.depth_stencil_attachment { if let Some(ref ds) = desc.depth_stencil_attachment {
let mut flags = native::ClearFlags::empty(); let mut flags = native::ClearFlags::empty();
let aspects = ds.target.view.format_aspects; let aspects = ds.target.view.format_aspects;

26
third_party/rust/wgpu-hal/src/dx12/conv.rs поставляемый
Просмотреть файл

@ -267,7 +267,7 @@ fn map_blend_component(
} }
pub fn map_render_targets( pub fn map_render_targets(
color_targets: &[wgt::ColorTargetState], color_targets: &[Option<wgt::ColorTargetState>],
) -> [d3d12::D3D12_RENDER_TARGET_BLEND_DESC; d3d12::D3D12_SIMULTANEOUS_RENDER_TARGET_COUNT as usize] ) -> [d3d12::D3D12_RENDER_TARGET_BLEND_DESC; d3d12::D3D12_SIMULTANEOUS_RENDER_TARGET_COUNT as usize]
{ {
let dummy_target = d3d12::D3D12_RENDER_TARGET_BLEND_DESC { let dummy_target = d3d12::D3D12_RENDER_TARGET_BLEND_DESC {
@ -285,17 +285,19 @@ pub fn map_render_targets(
let mut raw_targets = [dummy_target; d3d12::D3D12_SIMULTANEOUS_RENDER_TARGET_COUNT as usize]; let mut raw_targets = [dummy_target; d3d12::D3D12_SIMULTANEOUS_RENDER_TARGET_COUNT as usize];
for (raw, ct) in raw_targets.iter_mut().zip(color_targets.iter()) { for (raw, ct) in raw_targets.iter_mut().zip(color_targets.iter()) {
raw.RenderTargetWriteMask = ct.write_mask.bits() as u8; if let Some(ct) = ct.as_ref() {
if let Some(ref blend) = ct.blend { raw.RenderTargetWriteMask = ct.write_mask.bits() as u8;
let (color_op, color_src, color_dst) = map_blend_component(&blend.color, false); if let Some(ref blend) = ct.blend {
let (alpha_op, alpha_src, alpha_dst) = map_blend_component(&blend.alpha, true); let (color_op, color_src, color_dst) = map_blend_component(&blend.color, false);
raw.BlendEnable = 1; let (alpha_op, alpha_src, alpha_dst) = map_blend_component(&blend.alpha, true);
raw.BlendOp = color_op; raw.BlendEnable = 1;
raw.SrcBlend = color_src; raw.BlendOp = color_op;
raw.DestBlend = color_dst; raw.SrcBlend = color_src;
raw.BlendOpAlpha = alpha_op; raw.DestBlend = color_dst;
raw.SrcBlendAlpha = alpha_src; raw.BlendOpAlpha = alpha_op;
raw.DestBlendAlpha = alpha_dst; raw.SrcBlendAlpha = alpha_src;
raw.DestBlendAlpha = alpha_dst;
}
} }
} }

26
third_party/rust/wgpu-hal/src/dx12/device.rs поставляемый
Просмотреть файл

@ -125,6 +125,20 @@ impl super::Device {
)?, )?,
}; };
let mut rtv_pool = descriptor::CpuPool::new(raw, native::DescriptorHeapType::Rtv);
let null_rtv_handle = rtv_pool.alloc_handle();
// A null pResource is used to initialize a null descriptor,
// which guarantees D3D11-like null binding behavior (reading 0s, writes are discarded)
raw.create_render_target_view(
native::WeakPtr::null(),
&native::RenderTargetViewDesc::texture_2d(
winapi::shared::dxgiformat::DXGI_FORMAT_R8G8B8A8_UNORM,
0,
0,
),
null_rtv_handle.raw,
);
Ok(super::Device { Ok(super::Device {
raw, raw,
present_queue, present_queue,
@ -134,10 +148,7 @@ impl super::Device {
}, },
private_caps, private_caps,
shared: Arc::new(shared), shared: Arc::new(shared),
rtv_pool: Mutex::new(descriptor::CpuPool::new( rtv_pool: Mutex::new(rtv_pool),
raw,
native::DescriptorHeapType::Rtv,
)),
dsv_pool: Mutex::new(descriptor::CpuPool::new( dsv_pool: Mutex::new(descriptor::CpuPool::new(
raw, raw,
native::DescriptorHeapType::Dsv, native::DescriptorHeapType::Dsv,
@ -153,6 +164,7 @@ impl super::Device {
library: Arc::clone(library), library: Arc::clone(library),
#[cfg(feature = "renderdoc")] #[cfg(feature = "renderdoc")]
render_doc: Default::default(), render_doc: Default::default(),
null_rtv_handle,
}) })
} }
@ -306,6 +318,7 @@ impl super::Device {
impl crate::Device<super::Api> for super::Device { impl crate::Device<super::Api> for super::Device {
unsafe fn exit(self, queue: super::Queue) { unsafe fn exit(self, queue: super::Queue) {
self.rtv_pool.lock().free_handle(self.null_rtv_handle);
self.rtv_pool.into_inner().destroy(); self.rtv_pool.into_inner().destroy();
self.dsv_pool.into_inner().destroy(); self.dsv_pool.into_inner().destroy();
self.srv_uav_pool.into_inner().destroy(); self.srv_uav_pool.into_inner().destroy();
@ -658,6 +671,7 @@ impl crate::Device<super::Api> for super::Device {
allocator, allocator,
device: self.raw, device: self.raw,
shared: Arc::clone(&self.shared), shared: Arc::clone(&self.shared),
null_rtv_handle: self.null_rtv_handle.clone(),
list: None, list: None,
free_lists: Vec::new(), free_lists: Vec::new(),
pass: super::PassState::new(), pass: super::PassState::new(),
@ -1283,7 +1297,9 @@ impl crate::Device<super::Api> for super::Device {
let mut rtv_formats = [dxgiformat::DXGI_FORMAT_UNKNOWN; let mut rtv_formats = [dxgiformat::DXGI_FORMAT_UNKNOWN;
d3d12::D3D12_SIMULTANEOUS_RENDER_TARGET_COUNT as usize]; d3d12::D3D12_SIMULTANEOUS_RENDER_TARGET_COUNT as usize];
for (rtv_format, ct) in rtv_formats.iter_mut().zip(desc.color_targets) { for (rtv_format, ct) in rtv_formats.iter_mut().zip(desc.color_targets) {
*rtv_format = auxil::dxgi::conv::map_texture_format(ct.format); if let Some(ct) = ct.as_ref() {
*rtv_format = auxil::dxgi::conv::map_texture_format(ct.format);
}
} }
let bias = desc let bias = desc

19
third_party/rust/wgpu-hal/src/dx12/mod.rs поставляемый
Просмотреть файл

@ -229,6 +229,7 @@ pub struct Device {
library: Arc<native::D3D12Lib>, library: Arc<native::D3D12Lib>,
#[cfg(feature = "renderdoc")] #[cfg(feature = "renderdoc")]
render_doc: crate::auxil::renderdoc::RenderDoc, render_doc: crate::auxil::renderdoc::RenderDoc,
null_rtv_handle: descriptor::Handle,
} }
unsafe impl Send for Device {} unsafe impl Send for Device {}
@ -287,7 +288,7 @@ enum PassKind {
struct PassState { struct PassState {
has_label: bool, has_label: bool,
resolves: ArrayVec<PassResolve, { crate::MAX_COLOR_TARGETS }>, resolves: ArrayVec<PassResolve, { crate::MAX_COLOR_ATTACHMENTS }>,
layout: PipelineLayoutShared, layout: PipelineLayoutShared,
root_elements: [RootElement; MAX_ROOT_ELEMENTS], root_elements: [RootElement; MAX_ROOT_ELEMENTS],
dirty_root_elements: u64, dirty_root_elements: u64,
@ -329,6 +330,7 @@ pub struct CommandEncoder {
allocator: native::CommandAllocator, allocator: native::CommandAllocator,
device: native::Device, device: native::Device,
shared: Arc<DeviceShared>, shared: Arc<DeviceShared>,
null_rtv_handle: descriptor::Handle,
list: Option<native::GraphicsCommandList>, list: Option<native::GraphicsCommandList>,
free_lists: Vec<native::GraphicsCommandList>, free_lists: Vec<native::GraphicsCommandList>,
pass: PassState, pass: PassState,
@ -527,7 +529,14 @@ impl SwapChain {
self.raw self.raw
} }
unsafe fn wait(&mut self, timeout_ms: u32) -> Result<bool, crate::SurfaceError> { unsafe fn wait(
&mut self,
timeout: Option<std::time::Duration>,
) -> Result<bool, crate::SurfaceError> {
let timeout_ms = match timeout {
Some(duration) => duration.as_millis() as u32,
None => winbase::INFINITE,
};
match synchapi::WaitForSingleObject(self.waitable, timeout_ms) { match synchapi::WaitForSingleObject(self.waitable, timeout_ms) {
winbase::WAIT_ABANDONED | winbase::WAIT_FAILED => Err(crate::SurfaceError::Lost), winbase::WAIT_ABANDONED | winbase::WAIT_FAILED => Err(crate::SurfaceError::Lost),
winbase::WAIT_OBJECT_0 => Ok(true), winbase::WAIT_OBJECT_0 => Ok(true),
@ -690,7 +699,7 @@ impl crate::Surface<Api> for Surface {
unsafe fn unconfigure(&mut self, device: &Device) { unsafe fn unconfigure(&mut self, device: &Device) {
if let Some(mut sc) = self.swap_chain.take() { if let Some(mut sc) = self.swap_chain.take() {
let _ = sc.wait(winbase::INFINITE); let _ = sc.wait(None);
//TODO: this shouldn't be needed, //TODO: this shouldn't be needed,
// but it complains that the queue is still used otherwise // but it complains that the queue is still used otherwise
let _ = device.wait_idle(); let _ = device.wait_idle();
@ -701,11 +710,11 @@ impl crate::Surface<Api> for Surface {
unsafe fn acquire_texture( unsafe fn acquire_texture(
&mut self, &mut self,
timeout_ms: u32, timeout: Option<std::time::Duration>,
) -> Result<Option<crate::AcquiredSurfaceTexture<Api>>, crate::SurfaceError> { ) -> Result<Option<crate::AcquiredSurfaceTexture<Api>>, crate::SurfaceError> {
let sc = self.swap_chain.as_mut().unwrap(); let sc = self.swap_chain.as_mut().unwrap();
sc.wait(timeout_ms)?; sc.wait(timeout)?;
let base_index = sc.raw.GetCurrentBackBufferIndex() as usize; let base_index = sc.raw.GetCurrentBackBufferIndex() as usize;
let index = (base_index + sc.acquired_count) % sc.resources.len(); let index = (base_index + sc.acquired_count) % sc.resources.len();

2
third_party/rust/wgpu-hal/src/empty.rs поставляемый
Просмотреть файл

@ -66,7 +66,7 @@ impl crate::Surface<Api> for Context {
unsafe fn acquire_texture( unsafe fn acquire_texture(
&mut self, &mut self,
timeout_ms: u32, timeout: Option<std::time::Duration>,
) -> Result<Option<crate::AcquiredSurfaceTexture<Api>>, crate::SurfaceError> { ) -> Result<Option<crate::AcquiredSurfaceTexture<Api>>, crate::SurfaceError> {
Ok(None) Ok(None)
} }

32
third_party/rust/wgpu-hal/src/gles/adapter.rs поставляемый
Просмотреть файл

@ -212,6 +212,9 @@ impl super::Adapter {
naga::back::glsl::Version::Embedded(value) naga::back::glsl::Version::Embedded(value)
}; };
// ANGLE provides renderer strings like: "ANGLE (Apple, Apple M1 Pro, OpenGL 4.1)"
let is_angle = renderer.contains("ANGLE");
let vertex_shader_storage_blocks = if supports_storage { let vertex_shader_storage_blocks = if supports_storage {
gl.get_parameter_i32(glow::MAX_VERTEX_SHADER_STORAGE_BLOCKS) as u32 gl.get_parameter_i32(glow::MAX_VERTEX_SHADER_STORAGE_BLOCKS) as u32
} else { } else {
@ -289,6 +292,12 @@ impl super::Adapter {
wgt::DownlevelFlags::ANISOTROPIC_FILTERING, wgt::DownlevelFlags::ANISOTROPIC_FILTERING,
extensions.contains("EXT_texture_filter_anisotropic"), extensions.contains("EXT_texture_filter_anisotropic"),
); );
downlevel_flags.set(
wgt::DownlevelFlags::BUFFER_BINDINGS_NOT_16_BYTE_ALIGNED,
!(cfg!(target_arch = "wasm32") || is_angle),
);
let is_ext_color_buffer_float_supported = extensions.contains("EXT_color_buffer_float");
let mut features = wgt::Features::empty() let mut features = wgt::Features::empty()
| wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES | wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES
@ -459,6 +468,7 @@ impl super::Adapter {
0 0
}, },
max_compute_workgroups_per_dimension, max_compute_workgroups_per_dimension,
max_buffer_size: i32::MAX as u64,
}; };
let mut workarounds = super::Workarounds::empty(); let mut workarounds = super::Workarounds::empty();
@ -500,6 +510,7 @@ impl super::Adapter {
workarounds, workarounds,
shading_language_version, shading_language_version,
max_texture_size, max_texture_size,
is_ext_color_buffer_float_supported,
}), }),
}, },
info: Self::make_info(vendor, renderer), info: Self::make_info(vendor, renderer),
@ -619,6 +630,13 @@ impl crate::Adapter<super::Api> for super::Adapter {
unfilterable | Tfc::COLOR_ATTACHMENT | Tfc::MULTISAMPLE | Tfc::MULTISAMPLE_RESOLVE; unfilterable | Tfc::COLOR_ATTACHMENT | Tfc::MULTISAMPLE | Tfc::MULTISAMPLE_RESOLVE;
let filterable_renderable = filterable | renderable | Tfc::COLOR_ATTACHMENT_BLEND; let filterable_renderable = filterable | renderable | Tfc::COLOR_ATTACHMENT_BLEND;
let storage = Tfc::STORAGE | Tfc::STORAGE_READ_WRITE; let storage = Tfc::STORAGE | Tfc::STORAGE_READ_WRITE;
let float_renderable = if self.shared.is_ext_color_buffer_float_supported {
Tfc::COLOR_ATTACHMENT | Tfc::COLOR_ATTACHMENT_BLEND
} else {
Tfc::empty()
};
match format { match format {
Tf::R8Unorm => filterable_renderable, Tf::R8Unorm => filterable_renderable,
Tf::R8Snorm => filterable, Tf::R8Snorm => filterable,
@ -628,37 +646,37 @@ impl crate::Adapter<super::Api> for super::Adapter {
Tf::R16Sint => renderable, Tf::R16Sint => renderable,
Tf::R16Unorm => empty, Tf::R16Unorm => empty,
Tf::R16Snorm => empty, Tf::R16Snorm => empty,
Tf::R16Float => filterable, Tf::R16Float => filterable | float_renderable,
Tf::Rg8Unorm => filterable_renderable, Tf::Rg8Unorm => filterable_renderable,
Tf::Rg8Snorm => filterable, Tf::Rg8Snorm => filterable,
Tf::Rg8Uint => renderable, Tf::Rg8Uint => renderable,
Tf::Rg8Sint => renderable, Tf::Rg8Sint => renderable,
Tf::R32Uint => renderable | storage, Tf::R32Uint => renderable | storage,
Tf::R32Sint => renderable | storage, Tf::R32Sint => renderable | storage,
Tf::R32Float => unfilterable | storage, Tf::R32Float => unfilterable | storage | float_renderable,
Tf::Rg16Uint => renderable, Tf::Rg16Uint => renderable,
Tf::Rg16Sint => renderable, Tf::Rg16Sint => renderable,
Tf::Rg16Unorm => empty, Tf::Rg16Unorm => empty,
Tf::Rg16Snorm => empty, Tf::Rg16Snorm => empty,
Tf::Rg16Float => filterable, Tf::Rg16Float => filterable | float_renderable,
Tf::Rgba8Unorm | Tf::Rgba8UnormSrgb => filterable_renderable | storage, Tf::Rgba8Unorm | Tf::Rgba8UnormSrgb => filterable_renderable | storage,
Tf::Bgra8Unorm | Tf::Bgra8UnormSrgb => filterable_renderable, Tf::Bgra8Unorm | Tf::Bgra8UnormSrgb => filterable_renderable,
Tf::Rgba8Snorm => filterable, Tf::Rgba8Snorm => filterable,
Tf::Rgba8Uint => renderable | storage, Tf::Rgba8Uint => renderable | storage,
Tf::Rgba8Sint => renderable | storage, Tf::Rgba8Sint => renderable | storage,
Tf::Rgb10a2Unorm => filterable_renderable, Tf::Rgb10a2Unorm => filterable_renderable,
Tf::Rg11b10Float => filterable, Tf::Rg11b10Float => filterable | float_renderable,
Tf::Rg32Uint => renderable, Tf::Rg32Uint => renderable,
Tf::Rg32Sint => renderable, Tf::Rg32Sint => renderable,
Tf::Rg32Float => unfilterable, Tf::Rg32Float => unfilterable | float_renderable,
Tf::Rgba16Uint => renderable | storage, Tf::Rgba16Uint => renderable | storage,
Tf::Rgba16Sint => renderable | storage, Tf::Rgba16Sint => renderable | storage,
Tf::Rgba16Unorm => empty, Tf::Rgba16Unorm => empty,
Tf::Rgba16Snorm => empty, Tf::Rgba16Snorm => empty,
Tf::Rgba16Float => filterable | storage, Tf::Rgba16Float => filterable | storage | float_renderable,
Tf::Rgba32Uint => renderable | storage, Tf::Rgba32Uint => renderable | storage,
Tf::Rgba32Sint => renderable | storage, Tf::Rgba32Sint => renderable | storage,
Tf::Rgba32Float => unfilterable | storage, Tf::Rgba32Float => unfilterable | storage | float_renderable,
Tf::Depth32Float Tf::Depth32Float
| Tf::Depth32FloatStencil8 | Tf::Depth32FloatStencil8
| Tf::Depth24Plus | Tf::Depth24Plus

43
third_party/rust/wgpu-hal/src/gles/command.rs поставляемый
Просмотреть файл

@ -17,14 +17,14 @@ pub(super) struct State {
vertex_buffers: vertex_buffers:
[(super::VertexBufferDesc, Option<super::BufferBinding>); crate::MAX_VERTEX_BUFFERS], [(super::VertexBufferDesc, Option<super::BufferBinding>); crate::MAX_VERTEX_BUFFERS],
vertex_attributes: ArrayVec<super::AttributeDesc, { super::MAX_VERTEX_ATTRIBUTES }>, vertex_attributes: ArrayVec<super::AttributeDesc, { super::MAX_VERTEX_ATTRIBUTES }>,
color_targets: ArrayVec<super::ColorTargetDesc, { crate::MAX_COLOR_TARGETS }>, color_targets: ArrayVec<super::ColorTargetDesc, { crate::MAX_COLOR_ATTACHMENTS }>,
stencil: super::StencilState, stencil: super::StencilState,
depth_bias: wgt::DepthBiasState, depth_bias: wgt::DepthBiasState,
samplers: [Option<glow::Sampler>; super::MAX_SAMPLERS], samplers: [Option<glow::Sampler>; super::MAX_SAMPLERS],
texture_slots: [TextureSlotDesc; super::MAX_TEXTURE_SLOTS], texture_slots: [TextureSlotDesc; super::MAX_TEXTURE_SLOTS],
render_size: wgt::Extent3d, render_size: wgt::Extent3d,
resolve_attachments: ArrayVec<(u32, super::TextureView), { crate::MAX_COLOR_TARGETS }>, resolve_attachments: ArrayVec<(u32, super::TextureView), { crate::MAX_COLOR_ATTACHMENTS }>,
invalidate_attachments: ArrayVec<u32, { crate::MAX_COLOR_TARGETS + 2 }>, invalidate_attachments: ArrayVec<u32, { crate::MAX_COLOR_ATTACHMENTS + 2 }>,
has_pass_label: bool, has_pass_label: bool,
instance_vbuf_mask: usize, instance_vbuf_mask: usize,
dirty_vbuf_mask: usize, dirty_vbuf_mask: usize,
@ -327,6 +327,7 @@ impl crate::CommandEncoder<super::Api> for super::CommandEncoder {
dst: dst_raw, dst: dst_raw,
dst_target, dst_target,
copy, copy,
dst_is_cubemap: dst.is_cubemap,
}) })
} }
} }
@ -428,7 +429,8 @@ impl crate::CommandEncoder<super::Api> for super::CommandEncoder {
match desc match desc
.color_attachments .color_attachments
.first() .first()
.map(|at| &at.target.view.inner) .filter(|at| at.is_some())
.and_then(|at| at.as_ref().map(|at| &at.target.view.inner))
{ {
// default framebuffer (provided externally) // default framebuffer (provided externally)
Some(&super::TextureInner::DefaultRenderbuffer) => { Some(&super::TextureInner::DefaultRenderbuffer) => {
@ -443,18 +445,20 @@ impl crate::CommandEncoder<super::Api> for super::CommandEncoder {
.push(C::ResetFramebuffer { is_default: false }); .push(C::ResetFramebuffer { is_default: false });
for (i, cat) in desc.color_attachments.iter().enumerate() { for (i, cat) in desc.color_attachments.iter().enumerate() {
let attachment = glow::COLOR_ATTACHMENT0 + i as u32; if let Some(cat) = cat.as_ref() {
self.cmd_buffer.commands.push(C::BindAttachment { let attachment = glow::COLOR_ATTACHMENT0 + i as u32;
attachment, self.cmd_buffer.commands.push(C::BindAttachment {
view: cat.target.view.clone(), attachment,
}); view: cat.target.view.clone(),
if let Some(ref rat) = cat.resolve_target { });
self.state if let Some(ref rat) = cat.resolve_target {
.resolve_attachments self.state
.push((attachment, rat.view.clone())); .resolve_attachments
} .push((attachment, rat.view.clone()));
if !cat.ops.contains(crate::AttachmentOps::STORE) { }
self.state.invalidate_attachments.push(attachment); if !cat.ops.contains(crate::AttachmentOps::STORE) {
self.state.invalidate_attachments.push(attachment);
}
} }
} }
if let Some(ref dsat) = desc.depth_stencil_attachment { if let Some(ref dsat) = desc.depth_stencil_attachment {
@ -504,7 +508,12 @@ impl crate::CommandEncoder<super::Api> for super::CommandEncoder {
}); });
// issue the clears // issue the clears
for (i, cat) in desc.color_attachments.iter().enumerate() { for (i, cat) in desc
.color_attachments
.iter()
.filter_map(|at| at.as_ref())
.enumerate()
{
if !cat.ops.contains(crate::AttachmentOps::LOAD) { if !cat.ops.contains(crate::AttachmentOps::LOAD) {
let c = &cat.clear_value; let c = &cat.clear_value;
self.cmd_buffer self.cmd_buffer

39
third_party/rust/wgpu-hal/src/gles/device.rs поставляемый
Просмотреть файл

@ -144,6 +144,23 @@ impl super::Device {
.position(|ep| ep.name.as_str() == stage.entry_point) .position(|ep| ep.name.as_str() == stage.entry_point)
.ok_or(crate::PipelineError::EntryPoint(naga_stage))?; .ok_or(crate::PipelineError::EntryPoint(naga_stage))?;
use naga::proc::BoundsCheckPolicy;
// The image bounds checks require the TEXTURE_LEVELS feature available in GL core 1.3+.
let version = gl.version();
let image_check = if !version.is_embedded && (version.major, version.minor) >= (1, 3) {
BoundsCheckPolicy::ReadZeroSkipWrite
} else {
BoundsCheckPolicy::Unchecked
};
// Other bounds check are either provided by glsl or not implemented yet.
let policies = naga::proc::BoundsCheckPolicies {
index: BoundsCheckPolicy::Unchecked,
buffer: BoundsCheckPolicy::Unchecked,
image: image_check,
binding_array: BoundsCheckPolicy::Unchecked,
};
let mut output = String::new(); let mut output = String::new();
let mut writer = glsl::Writer::new( let mut writer = glsl::Writer::new(
&mut output, &mut output,
@ -151,6 +168,7 @@ impl super::Device {
&shader.info, &shader.info,
&context.layout.naga_options, &context.layout.naga_options,
&pipeline_options, &pipeline_options,
policies,
) )
.map_err(|e| { .map_err(|e| {
let msg = format!("{}", e); let msg = format!("{}", e);
@ -528,7 +546,7 @@ impl crate::Device<super::Api> for super::Device {
depth: 1, depth: 1,
}; };
let inner = if render_usage.contains(desc.usage) let (inner, is_cubemap) = if render_usage.contains(desc.usage)
&& desc.dimension == wgt::TextureDimension::D2 && desc.dimension == wgt::TextureDimension::D2
&& desc.size.depth_or_array_layers == 1 && desc.size.depth_or_array_layers == 1
{ {
@ -559,10 +577,10 @@ impl crate::Device<super::Api> for super::Device {
} }
gl.bind_renderbuffer(glow::RENDERBUFFER, None); gl.bind_renderbuffer(glow::RENDERBUFFER, None);
super::TextureInner::Renderbuffer { raw } (super::TextureInner::Renderbuffer { raw }, false)
} else { } else {
let raw = gl.create_texture().unwrap(); let raw = gl.create_texture().unwrap();
let (target, is_3d) = match desc.dimension { let (target, is_3d, is_cubemap) = match desc.dimension {
wgt::TextureDimension::D1 | wgt::TextureDimension::D2 => { wgt::TextureDimension::D1 | wgt::TextureDimension::D2 => {
if desc.size.depth_or_array_layers > 1 { if desc.size.depth_or_array_layers > 1 {
//HACK: detect a cube map //HACK: detect a cube map
@ -575,17 +593,17 @@ impl crate::Device<super::Api> for super::Device {
None None
}; };
match cube_count { match cube_count {
None => (glow::TEXTURE_2D_ARRAY, true), None => (glow::TEXTURE_2D_ARRAY, true, false),
Some(1) => (glow::TEXTURE_CUBE_MAP, false), Some(1) => (glow::TEXTURE_CUBE_MAP, false, true),
Some(_) => (glow::TEXTURE_CUBE_MAP_ARRAY, true), Some(_) => (glow::TEXTURE_CUBE_MAP_ARRAY, true, true),
} }
} else { } else {
(glow::TEXTURE_2D, false) (glow::TEXTURE_2D, false, false)
} }
} }
wgt::TextureDimension::D3 => { wgt::TextureDimension::D3 => {
copy_size.depth = desc.size.depth_or_array_layers; copy_size.depth = desc.size.depth_or_array_layers;
(glow::TEXTURE_3D, true) (glow::TEXTURE_3D, true, false)
} }
}; };
@ -639,7 +657,7 @@ impl crate::Device<super::Api> for super::Device {
} }
gl.bind_texture(target, None); gl.bind_texture(target, None);
super::TextureInner::Texture { raw, target } (super::TextureInner::Texture { raw, target }, is_cubemap)
}; };
Ok(super::Texture { Ok(super::Texture {
@ -653,6 +671,7 @@ impl crate::Device<super::Api> for super::Device {
format: desc.format, format: desc.format,
format_desc, format_desc,
copy_size, copy_size,
is_cubemap,
}) })
} }
unsafe fn destroy_texture(&self, texture: super::Texture) { unsafe fn destroy_texture(&self, texture: super::Texture) {
@ -983,7 +1002,7 @@ impl crate::Device<super::Api> for super::Device {
let color_targets = { let color_targets = {
let mut targets = Vec::new(); let mut targets = Vec::new();
for ct in desc.color_targets.iter() { for ct in desc.color_targets.iter().filter_map(|at| at.as_ref()) {
targets.push(super::ColorTargetDesc { targets.push(super::ColorTargetDesc {
mask: ct.write_mask, mask: ct.write_mask,
blend: ct.blend.as_ref().map(conv::map_blend), blend: ct.blend.as_ref().map(conv::map_blend),

3
third_party/rust/wgpu-hal/src/gles/egl.rs поставляемый
Просмотреть файл

@ -1194,7 +1194,7 @@ impl crate::Surface<super::Api> for Surface {
unsafe fn acquire_texture( unsafe fn acquire_texture(
&mut self, &mut self,
_timeout_ms: u32, //TODO _timeout_ms: Option<Duration>, //TODO
) -> Result<Option<crate::AcquiredSurfaceTexture<super::Api>>, crate::SurfaceError> { ) -> Result<Option<crate::AcquiredSurfaceTexture<super::Api>>, crate::SurfaceError> {
let sc = self.swapchain.as_ref().unwrap(); let sc = self.swapchain.as_ref().unwrap();
let texture = super::Texture { let texture = super::Texture {
@ -1210,6 +1210,7 @@ impl crate::Surface<super::Api> for Surface {
height: sc.extent.height, height: sc.extent.height,
depth: 1, depth: 1,
}, },
is_cubemap: false,
}; };
Ok(Some(crate::AcquiredSurfaceTexture { Ok(Some(crate::AcquiredSurfaceTexture {
texture, texture,

6
third_party/rust/wgpu-hal/src/gles/mod.rs поставляемый
Просмотреть файл

@ -184,6 +184,7 @@ struct AdapterShared {
workarounds: Workarounds, workarounds: Workarounds,
shading_language_version: naga::back::glsl::Version, shading_language_version: naga::back::glsl::Version,
max_texture_size: u32, max_texture_size: u32,
is_ext_color_buffer_float_supported: bool,
} }
pub struct Adapter { pub struct Adapter {
@ -262,6 +263,7 @@ pub struct Texture {
#[allow(unused)] #[allow(unused)]
format_desc: TextureFormatDesc, format_desc: TextureFormatDesc,
copy_size: crate::CopyExtent, copy_size: crate::CopyExtent,
is_cubemap: bool,
} }
impl Texture { impl Texture {
@ -281,6 +283,7 @@ impl Texture {
height: 0, height: 0,
depth: 0, depth: 0,
}, },
is_cubemap: false,
} }
} }
} }
@ -564,7 +567,7 @@ struct PrimitiveState {
unclipped_depth: bool, unclipped_depth: bool,
} }
type InvalidatedAttachments = ArrayVec<u32, { crate::MAX_COLOR_TARGETS + 2 }>; type InvalidatedAttachments = ArrayVec<u32, { crate::MAX_COLOR_ATTACHMENTS + 2 }>;
#[derive(Debug)] #[derive(Debug)]
enum Command { enum Command {
@ -616,6 +619,7 @@ enum Command {
dst: glow::Texture, dst: glow::Texture,
dst_target: BindTarget, dst_target: BindTarget,
copy: crate::TextureCopy, copy: crate::TextureCopy,
dst_is_cubemap: bool,
}, },
CopyBufferToTexture { CopyBufferToTexture {
src: Buffer, src: Buffer,

84
third_party/rust/wgpu-hal/src/gles/queue.rs поставляемый
Просмотреть файл

@ -50,7 +50,7 @@ impl super::Queue {
// Reset the draw buffers to what they were before the clear // Reset the draw buffers to what they were before the clear
let indices = (0..self.draw_buffer_count as u32) let indices = (0..self.draw_buffer_count as u32)
.map(|i| glow::COLOR_ATTACHMENT0 + i) .map(|i| glow::COLOR_ATTACHMENT0 + i)
.collect::<ArrayVec<_, { crate::MAX_COLOR_TARGETS }>>(); .collect::<ArrayVec<_, { crate::MAX_COLOR_ATTACHMENTS }>>();
gl.draw_buffers(&indices); gl.draw_buffers(&indices);
} }
#[cfg(not(target_arch = "wasm32"))] #[cfg(not(target_arch = "wasm32"))]
@ -213,19 +213,39 @@ impl super::Queue {
ref range, ref range,
} => match dst.raw { } => match dst.raw {
Some(buffer) => { Some(buffer) => {
gl.bind_buffer(glow::COPY_READ_BUFFER, Some(self.zero_buffer)); // When `INDEX_BUFFER_ROLE_CHANGE` isn't available, we can't copy into the
gl.bind_buffer(dst_target, Some(buffer)); // index buffer from the zero buffer. This would fail in Chrome with the
let mut dst_offset = range.start; // following message:
while dst_offset < range.end { //
let size = (range.end - dst_offset).min(super::ZERO_BUFFER_SIZE as u64); // > Cannot copy into an element buffer destination from a non-element buffer
gl.copy_buffer_sub_data( // > source
glow::COPY_READ_BUFFER, //
dst_target, // Instead, we'll upload zeroes into the buffer.
0, let can_use_zero_buffer = self
dst_offset as i32, .shared
size as i32, .private_caps
); .contains(super::PrivateCapabilities::INDEX_BUFFER_ROLE_CHANGE)
dst_offset += size; || dst_target != glow::ELEMENT_ARRAY_BUFFER;
if can_use_zero_buffer {
gl.bind_buffer(glow::COPY_READ_BUFFER, Some(self.zero_buffer));
gl.bind_buffer(dst_target, Some(buffer));
let mut dst_offset = range.start;
while dst_offset < range.end {
let size = (range.end - dst_offset).min(super::ZERO_BUFFER_SIZE as u64);
gl.copy_buffer_sub_data(
glow::COPY_READ_BUFFER,
dst_target,
0,
dst_offset as i32,
size as i32,
);
dst_offset += size;
}
} else {
gl.bind_buffer(dst_target, Some(buffer));
let zeroes = vec![0u8; (range.end - range.start) as usize];
gl.buffer_sub_data_u8_slice(dst_target, range.start as i32, &zeroes);
} }
} }
None => { None => {
@ -308,10 +328,10 @@ impl super::Queue {
src_target, src_target,
dst, dst,
dst_target, dst_target,
dst_is_cubemap,
ref copy, ref copy,
} => { } => {
//TODO: handle 3D copies //TODO: handle 3D copies
//TODO: handle cubemap copies
gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(self.copy_fbo)); gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(self.copy_fbo));
if is_layered_target(src_target) { if is_layered_target(src_target) {
//TODO: handle GLES without framebuffer_texture_3d //TODO: handle GLES without framebuffer_texture_3d
@ -333,7 +353,18 @@ impl super::Queue {
} }
gl.bind_texture(dst_target, Some(dst)); gl.bind_texture(dst_target, Some(dst));
if is_layered_target(dst_target) { if dst_is_cubemap {
gl.copy_tex_sub_image_2d(
CUBEMAP_FACES[copy.dst_base.array_layer as usize],
copy.dst_base.mip_level as i32,
copy.dst_base.origin.x as i32,
copy.dst_base.origin.y as i32,
copy.src_base.origin.x as i32,
copy.src_base.origin.y as i32,
copy.size.width as i32,
copy.size.height as i32,
);
} else if is_layered_target(dst_target) {
gl.copy_tex_sub_image_3d( gl.copy_tex_sub_image_3d(
dst_target, dst_target,
copy.dst_base.mip_level as i32, copy.dst_base.mip_level as i32,
@ -397,7 +428,7 @@ impl super::Queue {
} }
}; };
match dst_target { match dst_target {
glow::TEXTURE_3D | glow::TEXTURE_2D_ARRAY => { glow::TEXTURE_3D => {
gl.tex_sub_image_3d( gl.tex_sub_image_3d(
dst_target, dst_target,
copy.texture_base.mip_level as i32, copy.texture_base.mip_level as i32,
@ -412,6 +443,21 @@ impl super::Queue {
unpack_data, unpack_data,
); );
} }
glow::TEXTURE_2D_ARRAY => {
gl.tex_sub_image_3d(
dst_target,
copy.texture_base.mip_level as i32,
copy.texture_base.origin.x as i32,
copy.texture_base.origin.y as i32,
copy.texture_base.array_layer as i32,
copy.size.width as i32,
copy.size.height as i32,
copy.size.depth as i32,
format_desc.external,
format_desc.data_type,
unpack_data,
);
}
glow::TEXTURE_2D => { glow::TEXTURE_2D => {
gl.tex_sub_image_2d( gl.tex_sub_image_2d(
dst_target, dst_target,
@ -662,7 +708,7 @@ impl super::Queue {
None, None,
0, 0,
); );
for i in 0..crate::MAX_COLOR_TARGETS { for i in 0..crate::MAX_COLOR_ATTACHMENTS {
let target = glow::COLOR_ATTACHMENT0 + i as u32; let target = glow::COLOR_ATTACHMENT0 + i as u32;
gl.framebuffer_texture_2d( gl.framebuffer_texture_2d(
glow::DRAW_FRAMEBUFFER, glow::DRAW_FRAMEBUFFER,
@ -717,7 +763,7 @@ impl super::Queue {
self.draw_buffer_count = count; self.draw_buffer_count = count;
let indices = (0..count as u32) let indices = (0..count as u32)
.map(|i| glow::COLOR_ATTACHMENT0 + i) .map(|i| glow::COLOR_ATTACHMENT0 + i)
.collect::<ArrayVec<_, { crate::MAX_COLOR_TARGETS }>>(); .collect::<ArrayVec<_, { crate::MAX_COLOR_ATTACHMENTS }>>();
gl.draw_buffers(&indices); gl.draw_buffers(&indices);
if self if self

104
third_party/rust/wgpu-hal/src/gles/web.rs поставляемый
Просмотреть файл

@ -5,7 +5,7 @@ use wasm_bindgen::JsCast;
use super::TextureFormatDesc; use super::TextureFormatDesc;
/// A wrapper around a [`glow::Context`] to provide a fake `lock()` api that makes it compatible /// A wrapper around a [`glow::Context`] to provide a fake `lock()` api that makes it compatible
/// with the `AdapterContext` API fromt the EGL implementation. /// with the `AdapterContext` API from the EGL implementation.
pub struct AdapterContext { pub struct AdapterContext {
pub glow_context: glow::Context, pub glow_context: glow::Context,
} }
@ -25,7 +25,62 @@ impl AdapterContext {
#[derive(Debug)] #[derive(Debug)]
pub struct Instance { pub struct Instance {
canvas: Mutex<Option<web_sys::HtmlCanvasElement>>, webgl2_context: Mutex<Option<web_sys::WebGl2RenderingContext>>,
}
impl Instance {
pub fn create_surface_from_canvas(
&self,
canvas: &web_sys::HtmlCanvasElement,
) -> Result<Surface, crate::InstanceError> {
let webgl2_context = canvas
.get_context_with_context_options("webgl2", &Self::create_context_options())
.expect("Cannot create WebGL2 context")
.and_then(|context| context.dyn_into::<web_sys::WebGl2RenderingContext>().ok())
.expect("Cannot convert into WebGL2 context");
*self.webgl2_context.lock() = Some(webgl2_context.clone());
Ok(Surface {
webgl2_context,
present_program: None,
swapchain: None,
texture: None,
presentable: true,
})
}
pub fn create_surface_from_offscreen_canvas(
&self,
canvas: &web_sys::OffscreenCanvas,
) -> Result<Surface, crate::InstanceError> {
let webgl2_context = canvas
.get_context_with_context_options("webgl2", &Self::create_context_options())
.expect("Cannot create WebGL2 context")
.and_then(|context| context.dyn_into::<web_sys::WebGl2RenderingContext>().ok())
.expect("Cannot convert into WebGL2 context");
*self.webgl2_context.lock() = Some(webgl2_context.clone());
Ok(Surface {
webgl2_context,
present_program: None,
swapchain: None,
texture: None,
presentable: true,
})
}
fn create_context_options() -> js_sys::Object {
let context_options = js_sys::Object::new();
js_sys::Reflect::set(
&context_options,
&"antialias".into(),
&wasm_bindgen::JsValue::FALSE,
)
.expect("Cannot create context options");
context_options
}
} }
// SAFE: WASM doesn't have threads // SAFE: WASM doesn't have threads
@ -35,28 +90,14 @@ unsafe impl Send for Instance {}
impl crate::Instance<super::Api> for Instance { impl crate::Instance<super::Api> for Instance {
unsafe fn init(_desc: &crate::InstanceDescriptor) -> Result<Self, crate::InstanceError> { unsafe fn init(_desc: &crate::InstanceDescriptor) -> Result<Self, crate::InstanceError> {
Ok(Instance { Ok(Instance {
canvas: Mutex::new(None), webgl2_context: Mutex::new(None),
}) })
} }
unsafe fn enumerate_adapters(&self) -> Vec<crate::ExposedAdapter<super::Api>> { unsafe fn enumerate_adapters(&self) -> Vec<crate::ExposedAdapter<super::Api>> {
let canvas_guard = self.canvas.lock(); let context_guard = self.webgl2_context.lock();
let gl = match *canvas_guard { let gl = match *context_guard {
Some(ref canvas) => { Some(ref webgl2_context) => glow::Context::from_webgl2_context(webgl2_context.clone()),
let context_options = js_sys::Object::new();
js_sys::Reflect::set(
&context_options,
&"antialias".into(),
&wasm_bindgen::JsValue::FALSE,
)
.expect("Cannot create context options");
let webgl2_context = canvas
.get_context_with_context_options("webgl2", &context_options)
.expect("Cannot create WebGL2 context")
.and_then(|context| context.dyn_into::<web_sys::WebGl2RenderingContext>().ok())
.expect("Cannot convert into WebGL2 context");
glow::Context::from_webgl2_context(webgl2_context)
}
None => return Vec::new(), None => return Vec::new(),
}; };
@ -79,26 +120,18 @@ impl crate::Instance<super::Api> for Instance {
.dyn_into() .dyn_into()
.expect("Failed to downcast to canvas type"); .expect("Failed to downcast to canvas type");
*self.canvas.lock() = Some(canvas.clone()); self.create_surface_from_canvas(&canvas)
Ok(Surface {
canvas,
present_program: None,
swapchain: None,
texture: None,
presentable: true,
})
} else { } else {
unreachable!() unreachable!()
} }
} }
unsafe fn destroy_surface(&self, surface: Surface) { unsafe fn destroy_surface(&self, surface: Surface) {
let mut canvas_option_ref = self.canvas.lock(); let mut context_option_ref = self.webgl2_context.lock();
if let Some(canvas) = canvas_option_ref.as_ref() { if let Some(context) = context_option_ref.as_ref() {
if canvas == &surface.canvas { if context == &surface.webgl2_context {
*canvas_option_ref = None; *context_option_ref = None;
} }
} }
} }
@ -106,7 +139,7 @@ impl crate::Instance<super::Api> for Instance {
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct Surface { pub struct Surface {
canvas: web_sys::HtmlCanvasElement, webgl2_context: web_sys::WebGl2RenderingContext,
pub(super) swapchain: Option<Swapchain>, pub(super) swapchain: Option<Swapchain>,
texture: Option<glow::Texture>, texture: Option<glow::Texture>,
pub(super) presentable: bool, pub(super) presentable: bool,
@ -253,7 +286,7 @@ impl crate::Surface<super::Api> for Surface {
unsafe fn acquire_texture( unsafe fn acquire_texture(
&mut self, &mut self,
_timeout_ms: u32, _timeout_ms: Option<std::time::Duration>, //TODO
) -> Result<Option<crate::AcquiredSurfaceTexture<super::Api>>, crate::SurfaceError> { ) -> Result<Option<crate::AcquiredSurfaceTexture<super::Api>>, crate::SurfaceError> {
let sc = self.swapchain.as_ref().unwrap(); let sc = self.swapchain.as_ref().unwrap();
let texture = super::Texture { let texture = super::Texture {
@ -270,6 +303,7 @@ impl crate::Surface<super::Api> for Surface {
height: sc.extent.height, height: sc.extent.height,
depth: 1, depth: 1,
}, },
is_cubemap: false,
}; };
Ok(Some(crate::AcquiredSurfaceTexture { Ok(Some(crate::AcquiredSurfaceTexture {
texture, texture,

18
third_party/rust/wgpu-hal/src/lib.rs поставляемый
Просмотреть файл

@ -97,7 +97,7 @@ use thiserror::Error;
pub const MAX_ANISOTROPY: u8 = 16; pub const MAX_ANISOTROPY: u8 = 16;
pub const MAX_BIND_GROUPS: usize = 8; pub const MAX_BIND_GROUPS: usize = 8;
pub const MAX_VERTEX_BUFFERS: usize = 16; pub const MAX_VERTEX_BUFFERS: usize = 16;
pub const MAX_COLOR_TARGETS: usize = 8; pub const MAX_COLOR_ATTACHMENTS: usize = 8;
pub const MAX_MIP_LEVELS: u32 = 16; pub const MAX_MIP_LEVELS: u32 = 16;
/// Size of a single occlusion/timestamp query, when copied into a buffer, in bytes. /// Size of a single occlusion/timestamp query, when copied into a buffer, in bytes.
pub const QUERY_SIZE: wgt::BufferAddress = 8; pub const QUERY_SIZE: wgt::BufferAddress = 8;
@ -193,10 +193,19 @@ pub trait Surface<A: Api>: Send + Sync {
unsafe fn unconfigure(&mut self, device: &A::Device); unsafe fn unconfigure(&mut self, device: &A::Device);
/// Returns the next texture to be presented by the swapchain for drawing
///
/// A `timeout` of `None` means to wait indefinitely, with no timeout.
///
/// # Portability
///
/// Some backends can't support a timeout when acquiring a texture and
/// the timeout will be ignored.
///
/// Returns `None` on timing out. /// Returns `None` on timing out.
unsafe fn acquire_texture( unsafe fn acquire_texture(
&mut self, &mut self,
timeout_ms: u32, timeout: Option<std::time::Duration>,
) -> Result<Option<AcquiredSurfaceTexture<A>>, SurfaceError>; ) -> Result<Option<AcquiredSurfaceTexture<A>>, SurfaceError>;
unsafe fn discard_texture(&mut self, texture: A::SurfaceTexture); unsafe fn discard_texture(&mut self, texture: A::SurfaceTexture);
} }
@ -304,6 +313,7 @@ pub trait Device<A: Api>: Send + Sync {
unsafe fn create_fence(&self) -> Result<A::Fence, DeviceError>; unsafe fn create_fence(&self) -> Result<A::Fence, DeviceError>;
unsafe fn destroy_fence(&self, fence: A::Fence); unsafe fn destroy_fence(&self, fence: A::Fence);
unsafe fn get_fence_value(&self, fence: &A::Fence) -> Result<FenceValue, DeviceError>; unsafe fn get_fence_value(&self, fence: &A::Fence) -> Result<FenceValue, DeviceError>;
/// Calling wait with a lower value than the current fence value will immediately return.
unsafe fn wait( unsafe fn wait(
&self, &self,
fence: &A::Fence, fence: &A::Fence,
@ -1014,7 +1024,7 @@ pub struct RenderPipelineDescriptor<'a, A: Api> {
/// The fragment stage for this pipeline. /// The fragment stage for this pipeline.
pub fragment_stage: Option<ProgrammableStage<'a, A>>, pub fragment_stage: Option<ProgrammableStage<'a, A>>,
/// The effect of draw calls on the color aspect of the output target. /// The effect of draw calls on the color aspect of the output target.
pub color_targets: &'a [wgt::ColorTargetState], pub color_targets: &'a [Option<wgt::ColorTargetState>],
/// If the pipeline will be used with a multiview render pass, this indicates how many array /// If the pipeline will be used with a multiview render pass, this indicates how many array
/// layers the attachments will have. /// layers the attachments will have.
pub multiview: Option<NonZeroU32>, pub multiview: Option<NonZeroU32>,
@ -1169,7 +1179,7 @@ pub struct RenderPassDescriptor<'a, A: Api> {
pub label: Label<'a>, pub label: Label<'a>,
pub extent: wgt::Extent3d, pub extent: wgt::Extent3d,
pub sample_count: u32, pub sample_count: u32,
pub color_attachments: &'a [ColorAttachment<'a, A>], pub color_attachments: &'a [Option<ColorAttachment<'a, A>>],
pub depth_stencil_attachment: Option<DepthStencilAttachment<'a, A>>, pub depth_stencil_attachment: Option<DepthStencilAttachment<'a, A>>,
pub multiview: Option<NonZeroU32>, pub multiview: Option<NonZeroU32>,
} }

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше