Bug 1908573 - build(webgpu): update WGPU to aeb2067e8120c1ff480625c00b9571db8d01d5a4 (`trunk` as of 2024-07-17) r=webgpu-reviewers,supply-chain-reviewers,jimb

Differential Revision: https://phabricator.services.mozilla.com/D216897
This commit is contained in:
Erich Gubler 2024-07-19 03:36:50 +00:00
Родитель e2690966f8
Коммит 55a951167b
67 изменённых файлов: 2246 добавлений и 1291 удалений

Просмотреть файл

@ -25,9 +25,9 @@ git = "https://github.com/franziskuskiefer/cose-rust"
rev = "43c22248d136c8b38fe42ea709d08da6355cf04b"
replace-with = "vendored-sources"
[source."git+https://github.com/gfx-rs/wgpu?rev=a0c185a28c232ee2ab63f72d6fd3a63a3f787309"]
[source."git+https://github.com/gfx-rs/wgpu?rev=aeb2067e8120c1ff480625c00b9571db8d01d5a4"]
git = "https://github.com/gfx-rs/wgpu"
rev = "a0c185a28c232ee2ab63f72d6fd3a63a3f787309"
rev = "aeb2067e8120c1ff480625c00b9571db8d01d5a4"
replace-with = "vendored-sources"
[source."git+https://github.com/glandium/mio?rev=9a2ef335c366044ffe73b1c4acabe50a1daefe05"]

18
Cargo.lock сгенерированный
Просмотреть файл

@ -1240,7 +1240,7 @@ dependencies = [
[[package]]
name = "d3d12"
version = "0.20.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=a0c185a28c232ee2ab63f72d6fd3a63a3f787309#a0c185a28c232ee2ab63f72d6fd3a63a3f787309"
source = "git+https://github.com/gfx-rs/wgpu?rev=aeb2067e8120c1ff480625c00b9571db8d01d5a4#aeb2067e8120c1ff480625c00b9571db8d01d5a4"
dependencies = [
"bitflags 2.6.0",
"libloading",
@ -1525,9 +1525,9 @@ dependencies = [
[[package]]
name = "document-features"
version = "0.2.8"
version = "0.2.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ef5282ad69563b5fc40319526ba27e0e7363d552a896f0297d54f767717f9b95"
checksum = "cb6969eaabd2421f8a2775cfd2471a2b634372b4a25d41e3bd647b79912850a0"
dependencies = [
"litrs",
]
@ -3660,9 +3660,9 @@ dependencies = [
[[package]]
name = "metal"
version = "0.28.0"
version = "0.29.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5637e166ea14be6063a3f8ba5ccb9a4159df7d8f6d61c02fc3d480b1f90dcfcb"
checksum = "7ecfd3296f8c56b7c1f6fbac3c71cefa9d78ce009850c45000015f206dc7fa21"
dependencies = [
"bitflags 2.6.0",
"block",
@ -4032,7 +4032,7 @@ checksum = "a2983372caf4480544083767bf2d27defafe32af49ab4df3a0b7fc90793a3664"
[[package]]
name = "naga"
version = "0.20.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=a0c185a28c232ee2ab63f72d6fd3a63a3f787309#a0c185a28c232ee2ab63f72d6fd3a63a3f787309"
source = "git+https://github.com/gfx-rs/wgpu?rev=aeb2067e8120c1ff480625c00b9571db8d01d5a4#aeb2067e8120c1ff480625c00b9571db8d01d5a4"
dependencies = [
"arrayvec",
"bit-set",
@ -6770,7 +6770,7 @@ dependencies = [
[[package]]
name = "wgpu-core"
version = "0.20.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=a0c185a28c232ee2ab63f72d6fd3a63a3f787309#a0c185a28c232ee2ab63f72d6fd3a63a3f787309"
source = "git+https://github.com/gfx-rs/wgpu?rev=aeb2067e8120c1ff480625c00b9571db8d01d5a4#aeb2067e8120c1ff480625c00b9571db8d01d5a4"
dependencies = [
"arrayvec",
"bit-vec",
@ -6795,7 +6795,7 @@ dependencies = [
[[package]]
name = "wgpu-hal"
version = "0.20.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=a0c185a28c232ee2ab63f72d6fd3a63a3f787309#a0c185a28c232ee2ab63f72d6fd3a63a3f787309"
source = "git+https://github.com/gfx-rs/wgpu?rev=aeb2067e8120c1ff480625c00b9571db8d01d5a4#aeb2067e8120c1ff480625c00b9571db8d01d5a4"
dependencies = [
"android_system_properties",
"arrayvec",
@ -6834,7 +6834,7 @@ dependencies = [
[[package]]
name = "wgpu-types"
version = "0.20.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=a0c185a28c232ee2ab63f72d6fd3a63a3f787309#a0c185a28c232ee2ab63f72d6fd3a63a3f787309"
source = "git+https://github.com/gfx-rs/wgpu?rev=aeb2067e8120c1ff480625c00b9571db8d01d5a4#aeb2067e8120c1ff480625c00b9571db8d01d5a4"
dependencies = [
"bitflags 2.6.0",
"js-sys",

Просмотреть файл

@ -17,7 +17,7 @@ default = []
[dependencies.wgc]
package = "wgpu-core"
git = "https://github.com/gfx-rs/wgpu"
rev = "a0c185a28c232ee2ab63f72d6fd3a63a3f787309"
rev = "aeb2067e8120c1ff480625c00b9571db8d01d5a4"
# TODO: remove the replay feature on the next update containing https://github.com/gfx-rs/wgpu/pull/5182
features = ["serde", "replay", "trace", "strict_asserts", "wgsl", "api_log_info"]
@ -26,37 +26,37 @@ features = ["serde", "replay", "trace", "strict_asserts", "wgsl", "api_log_info"
[target.'cfg(any(target_os = "macos", target_os = "ios"))'.dependencies.wgc]
package = "wgpu-core"
git = "https://github.com/gfx-rs/wgpu"
rev = "a0c185a28c232ee2ab63f72d6fd3a63a3f787309"
rev = "aeb2067e8120c1ff480625c00b9571db8d01d5a4"
features = ["metal"]
# We want the wgpu-core Direct3D backends on Windows.
[target.'cfg(windows)'.dependencies.wgc]
package = "wgpu-core"
git = "https://github.com/gfx-rs/wgpu"
rev = "a0c185a28c232ee2ab63f72d6fd3a63a3f787309"
rev = "aeb2067e8120c1ff480625c00b9571db8d01d5a4"
features = ["dx12"]
# We want the wgpu-core Vulkan backend on Linux and Windows.
[target.'cfg(any(windows, all(unix, not(any(target_os = "macos", target_os = "ios")))))'.dependencies.wgc]
package = "wgpu-core"
git = "https://github.com/gfx-rs/wgpu"
rev = "a0c185a28c232ee2ab63f72d6fd3a63a3f787309"
rev = "aeb2067e8120c1ff480625c00b9571db8d01d5a4"
features = ["vulkan"]
[dependencies.wgt]
package = "wgpu-types"
git = "https://github.com/gfx-rs/wgpu"
rev = "a0c185a28c232ee2ab63f72d6fd3a63a3f787309"
rev = "aeb2067e8120c1ff480625c00b9571db8d01d5a4"
[dependencies.wgh]
package = "wgpu-hal"
git = "https://github.com/gfx-rs/wgpu"
rev = "a0c185a28c232ee2ab63f72d6fd3a63a3f787309"
rev = "aeb2067e8120c1ff480625c00b9571db8d01d5a4"
features = ["windows_rs", "oom_panic", "device_lost_panic", "internal_error_panic"]
[target.'cfg(windows)'.dependencies.d3d12]
git = "https://github.com/gfx-rs/wgpu"
rev = "a0c185a28c232ee2ab63f72d6fd3a63a3f787309"
rev = "aeb2067e8120c1ff480625c00b9571db8d01d5a4"
[target.'cfg(windows)'.dependencies]
winapi = "0.3"

Просмотреть файл

@ -20,11 +20,11 @@ origin:
# Human-readable identifier for this version/release
# Generally "version NNN", "tag SSS", "bookmark SSS"
release: commit a0c185a28c232ee2ab63f72d6fd3a63a3f787309
release: aeb2067e8120c1ff480625c00b9571db8d01d5a4 (2024-07-17T23:11:04Z).
# Revision to pull in
# Must be a long or short commit SHA (long preferred)
revision: a0c185a28c232ee2ab63f72d6fd3a63a3f787309
revision: aeb2067e8120c1ff480625c00b9571db8d01d5a4
license: ['MIT', 'Apache-2.0']

Просмотреть файл

@ -331,7 +331,7 @@ impl ImplicitLayout<'_> {
pipeline: identities.pipeline_layouts.process(backend),
bind_groups: Cow::Owned(
(0..8) // hal::MAX_BIND_GROUPS
.map(|_| Some(identities.bind_group_layouts.process(backend)))
.map(|_| identities.bind_group_layouts.process(backend))
.collect(),
),
}
@ -1276,7 +1276,7 @@ pub unsafe extern "C" fn wgpu_client_create_compute_pipeline(
let implicit = ImplicitLayout::new(identities.select(backend), backend);
ptr::write(implicit_pipeline_layout_id, Some(implicit.pipeline));
for (i, bgl_id) in implicit.bind_groups.iter().enumerate() {
*implicit_bind_group_layout_ids.add(i) = *bgl_id;
*implicit_bind_group_layout_ids.add(i) = Some(*bgl_id);
}
Some(implicit)
}
@ -1331,7 +1331,7 @@ pub unsafe extern "C" fn wgpu_client_create_render_pipeline(
let implicit = ImplicitLayout::new(identities.select(backend), backend);
ptr::write(implicit_pipeline_layout_id, Some(implicit.pipeline));
for (i, bgl_id) in implicit.bind_groups.iter().enumerate() {
*implicit_bind_group_layout_ids.add(i) = *bgl_id;
*implicit_bind_group_layout_ids.add(i) = Some(*bgl_id);
}
Some(implicit)
}

Просмотреть файл

@ -109,7 +109,7 @@ pub struct AdapterInformation<S> {
#[derive(serde::Serialize, serde::Deserialize)]
struct ImplicitLayout<'a> {
pipeline: id::PipelineLayoutId,
bind_groups: Cow<'a, [Option<id::BindGroupLayoutId>]>,
bind_groups: Cow<'a, [id::BindGroupLayoutId]>,
}
#[derive(serde::Serialize, serde::Deserialize)]

Просмотреть файл

@ -848,7 +848,7 @@ impl Global {
let implicit_ids = implicit
.as_ref()
.map(|imp| wgc::device::ImplicitPipelineIds {
root_id: Some(imp.pipeline),
root_id: imp.pipeline,
group_ids: &imp.bind_groups,
});
let (_, error) = self.device_create_compute_pipeline::<A>(
@ -865,7 +865,7 @@ impl Global {
let implicit_ids = implicit
.as_ref()
.map(|imp| wgc::device::ImplicitPipelineIds {
root_id: Some(imp.pipeline),
root_id: imp.pipeline,
group_ids: &imp.bind_groups,
});
let (_, error) =

Просмотреть файл

@ -1474,11 +1474,11 @@ delta = "0.19.0 -> 0.20.0"
[[audits.d3d12]]
who = [
"Jim Blandy <jimb@red-bean.com>",
"Erich Gubler <erichdongubler@gmail.com>",
"Teodor Tanasoaia <ttanasoaia@mozilla.com>",
"Erich Gubler <erichdongubler@gmail.com>",
]
criteria = "safe-to-deploy"
delta = "0.20.0 -> 0.20.0@git:a0c185a28c232ee2ab63f72d6fd3a63a3f787309"
delta = "0.20.0 -> 0.20.0@git:aeb2067e8120c1ff480625c00b9571db8d01d5a4"
importable = false
[[audits.darling]]
@ -3077,11 +3077,11 @@ delta = "0.19.2 -> 0.20.0"
[[audits.naga]]
who = [
"Jim Blandy <jimb@red-bean.com>",
"Erich Gubler <erichdongubler@gmail.com>",
"Teodor Tanasoaia <ttanasoaia@mozilla.com>",
"Erich Gubler <erichdongubler@gmail.com>",
]
criteria = "safe-to-deploy"
delta = "0.20.0 -> 0.20.0@git:a0c185a28c232ee2ab63f72d6fd3a63a3f787309"
delta = "0.20.0 -> 0.20.0@git:aeb2067e8120c1ff480625c00b9571db8d01d5a4"
importable = false
[[audits.net2]]
@ -5030,11 +5030,11 @@ delta = "0.19.3 -> 0.20.0"
[[audits.wgpu-core]]
who = [
"Jim Blandy <jimb@red-bean.com>",
"Erich Gubler <erichdongubler@gmail.com>",
"Teodor Tanasoaia <ttanasoaia@mozilla.com>",
"Erich Gubler <erichdongubler@gmail.com>",
]
criteria = "safe-to-deploy"
delta = "0.20.0 -> 0.20.0@git:a0c185a28c232ee2ab63f72d6fd3a63a3f787309"
delta = "0.20.0 -> 0.20.0@git:aeb2067e8120c1ff480625c00b9571db8d01d5a4"
importable = false
[[audits.wgpu-hal]]
@ -5098,11 +5098,11 @@ delta = "0.19.3 -> 0.20.0"
[[audits.wgpu-hal]]
who = [
"Jim Blandy <jimb@red-bean.com>",
"Erich Gubler <erichdongubler@gmail.com>",
"Teodor Tanasoaia <ttanasoaia@mozilla.com>",
"Erich Gubler <erichdongubler@gmail.com>",
]
criteria = "safe-to-deploy"
delta = "0.20.0 -> 0.20.0@git:a0c185a28c232ee2ab63f72d6fd3a63a3f787309"
delta = "0.20.0 -> 0.20.0@git:aeb2067e8120c1ff480625c00b9571db8d01d5a4"
importable = false
[[audits.wgpu-types]]
@ -5166,11 +5166,11 @@ delta = "0.19.2 -> 0.20.0"
[[audits.wgpu-types]]
who = [
"Jim Blandy <jimb@red-bean.com>",
"Erich Gubler <erichdongubler@gmail.com>",
"Teodor Tanasoaia <ttanasoaia@mozilla.com>",
"Erich Gubler <erichdongubler@gmail.com>",
]
criteria = "safe-to-deploy"
delta = "0.20.0 -> 0.20.0@git:a0c185a28c232ee2ab63f72d6fd3a63a3f787309"
delta = "0.20.0 -> 0.20.0@git:aeb2067e8120c1ff480625c00b9571db8d01d5a4"
importable = false
[[audits.whatsys]]

2
third_party/rust/d3d12/.cargo-checksum.json поставляемый
Просмотреть файл

@ -1 +1 @@
{"files":{"CHANGELOG.md":"45fa76b0e5bc51721887147000e9e78a5934cb04d1ad628e501ef2082763d353","Cargo.toml":"9938addd7ce2c7785a9ca11eb0049271317f9b05fdf0d7330d4a80f0e07ab500","README.md":"76cee3209f773a62535de6c9724b53f158406359f35b4d48b17ac3747b6c102e","src/com.rs":"cfd6556a7abf38cba57559038f9f2cf86274418448fb2745436c251a99575e05","src/command_allocator.rs":"ef01059a661749470f3772d188fe0fab0f002e1d154facdab4b9b2932f4b2d93","src/command_list.rs":"8723f3b755b721e0dbb234bd604956c1b7922a2368231197495daa3fa6548e63","src/debug.rs":"aa33b98f7c3e71cba75fc42c6ca9af72d96b45122422c16e48525e24590c57bf","src/descriptor.rs":"fea0b820de1566b54d17d8d0c67e6f5a2126eda19526397eb710ff7d6db9db9e","src/device.rs":"c1dd479aabd22bced0d407523d60629ad1da439fb47ad89fe7b48bae1c4b23e5","src/dxgi.rs":"1516186845b91bf3df813a29b4a0e00a85ca5649fb7a2755da43fba984c41a42","src/heap.rs":"dae2380684896c97e97ed022929f79ce2cc4f5418a3ec34883086f7c88f423d0","src/lib.rs":"612e2f471b84502d219da3fb86ee13f3cbd6faf17d77407bab6c84e51ec424d0","src/pso.rs":"ff819c321536695e34a3be9a6051cf3e57765049a4a2035db6ab27add5a7978a","src/query.rs":"ff61a2b76a108afc1f082724bb9b07ac8b52afbe97356e0fcf6df0ff7e53e07d","src/queue.rs":"bd32813d0b8a3bedf3223b69ade9f9c799a138a9e27d970f86435d9ce32d1557","src/resource.rs":"931c255c845eb621fc1b9e807b0effd92a2cd20e624c2beaa88506019a7a43a4","src/sync.rs":"5c287fb7498242a397eb1f08887be9cff9b48dc7cb13af5792cce5f7182b55f8"},"package":null}
{"files":{"CHANGELOG.md":"45fa76b0e5bc51721887147000e9e78a5934cb04d1ad628e501ef2082763d353","Cargo.toml":"9938addd7ce2c7785a9ca11eb0049271317f9b05fdf0d7330d4a80f0e07ab500","README.md":"76cee3209f773a62535de6c9724b53f158406359f35b4d48b17ac3747b6c102e","src/com.rs":"cfd6556a7abf38cba57559038f9f2cf86274418448fb2745436c251a99575e05","src/command_allocator.rs":"ef01059a661749470f3772d188fe0fab0f002e1d154facdab4b9b2932f4b2d93","src/command_list.rs":"8723f3b755b721e0dbb234bd604956c1b7922a2368231197495daa3fa6548e63","src/debug.rs":"aa33b98f7c3e71cba75fc42c6ca9af72d96b45122422c16e48525e24590c57bf","src/descriptor.rs":"fea0b820de1566b54d17d8d0c67e6f5a2126eda19526397eb710ff7d6db9db9e","src/device.rs":"c1dd479aabd22bced0d407523d60629ad1da439fb47ad89fe7b48bae1c4b23e5","src/dxgi.rs":"1516186845b91bf3df813a29b4a0e00a85ca5649fb7a2755da43fba984c41a42","src/heap.rs":"dae2380684896c97e97ed022929f79ce2cc4f5418a3ec34883086f7c88f423d0","src/lib.rs":"612e2f471b84502d219da3fb86ee13f3cbd6faf17d77407bab6c84e51ec424d0","src/pso.rs":"ff819c321536695e34a3be9a6051cf3e57765049a4a2035db6ab27add5a7978a","src/query.rs":"b046b922f48e817fe252d9b2f859c036f54635779e84103ca53d1b2ca9c18e02","src/queue.rs":"bd32813d0b8a3bedf3223b69ade9f9c799a138a9e27d970f86435d9ce32d1557","src/resource.rs":"931c255c845eb621fc1b9e807b0effd92a2cd20e624c2beaa88506019a7a43a4","src/sync.rs":"5c287fb7498242a397eb1f08887be9cff9b48dc7cb13af5792cce5f7182b55f8"},"package":null}

2
third_party/rust/d3d12/src/query.rs поставляемый
Просмотреть файл

@ -8,7 +8,7 @@ pub enum QueryHeapType {
Timestamp = d3d12::D3D12_QUERY_HEAP_TYPE_TIMESTAMP,
PipelineStatistics = d3d12::D3D12_QUERY_HEAP_TYPE_PIPELINE_STATISTICS,
SOStatistics = d3d12::D3D12_QUERY_HEAP_TYPE_SO_STATISTICS,
// VideoDecodeStatistcs = d3d12::D3D12_QUERY_HEAP_TYPE_VIDEO_DECODE_STATISTICS,
// VideoDecodeStatistics = d3d12::D3D12_QUERY_HEAP_TYPE_VIDEO_DECODE_STATISTICS,
// CopyQueueTimestamp = d3d12::D3D12_QUERY_HEAP_TYPE_COPY_QUEUE_TIMESTAMP,
}

Просмотреть файл

@ -1 +1 @@
{"files":{"CHANGELOG.md":"c1ccf4587ca168b3baa54580469c5dcc776decac0d996d3bb31d2341b47efa11","Cargo.toml":"390d32c2b791a6745c075c474e6d57c65d5f77f0e7190ff8a8c5342fbb40722a","LICENSE-APACHE":"074e6e32c86a4c0ef8b3ed25b721ca23aca83df277cd88106ef7177c354615ff","LICENSE-MIT":"aa893340d14b9844625be6a50ac644169a01b52f0211cbf81b09e1874c8cd81d","README.md":"89a83c4acc6891e5651772fc78a1d6362070774eaa6c5b5d4bfbe9e57a957be9","lib.rs":"2f4ede9d0619d85449891d9055605188db681d57b405e40e529831266e014ee5","rustfmt.toml":"f74204a6f92aa7422a16ecb2ffe2d5bae0f123b778d08b5db1a398a3c9ca4306","tests/self-doc.rs":"24bbda93f3b323c0b7c543c1df3bf45522b8026283103211805f070de66abadc"},"package":"ef5282ad69563b5fc40319526ba27e0e7363d552a896f0297d54f767717f9b95"}
{"files":{"CHANGELOG.md":"4a628703d5fc4ce692f255732a077d6d611aff64d2fddc3b2b4cd6c1bcc35bcf","Cargo.toml":"6e4dc75115db673e743d41149144c638ad8e11da7cfb5435d55e60ea61bd2016","LICENSE-APACHE":"074e6e32c86a4c0ef8b3ed25b721ca23aca83df277cd88106ef7177c354615ff","LICENSE-MIT":"aa893340d14b9844625be6a50ac644169a01b52f0211cbf81b09e1874c8cd81d","README.md":"89a83c4acc6891e5651772fc78a1d6362070774eaa6c5b5d4bfbe9e57a957be9","lib.rs":"f7915d9cd43170ce74f4551b51c39b7a91acbe244ede6ab3ca2fa1c50d505040","rustfmt.toml":"f74204a6f92aa7422a16ecb2ffe2d5bae0f123b778d08b5db1a398a3c9ca4306","tests/self-doc.rs":"24bbda93f3b323c0b7c543c1df3bf45522b8026283103211805f070de66abadc"},"package":"cb6969eaabd2421f8a2775cfd2471a2b634372b4a25d41e3bd647b79912850a0"}

Просмотреть файл

@ -1,7 +1,17 @@
# Changelog
## 0.2.1O - 2024-07-12
## 0.2.7 - 2023-12-29
* Revert parsing of multi-lines string while parsing features
(Keep parsing of multi-lines string when detecting if we need to use Cargo.toml.orig)
## 0.2.9 - 2024-07-11
* Fix parsing of multi-lines string (#25)
* Fix `out_of_scope_macro_calls` compatibility warning
* Fix documentation having too many `#` (#22)
## 0.2.8 - 2023-12-29
* Remove `\n` between features (#17)
* Don't throw an error when there is no features in Cargo.toml (#20)

Просмотреть файл

@ -12,10 +12,10 @@
[package]
edition = "2018"
name = "document-features"
version = "0.2.8"
authors = ["Slint Developers <info@slint-ui.com>"]
version = "0.2.10"
authors = ["Slint Developers <info@slint.dev>"]
description = "Extract documentation for the feature flags from comments in Cargo.toml"
homepage = "https://slint-ui.com"
homepage = "https://slint.rs"
readme = "README.md"
keywords = [
"documentation",

157
third_party/rust/document-features/lib.rs поставляемый
Просмотреть файл

@ -41,40 +41,38 @@ in where they occur. Use them to group features, for example.
## Examples:
*/
// Note: because rustdoc escapes the first `#` of a line starting with `#`,
// these docs comments have one more `#` ,
#![doc = self_test!(/**
[package]
name = "..."
## ...
# ...
[features]
default = ["foo"]
##! This comments goes on top
#! This comments goes on top
### The foo feature enables the `foo` functions
## The foo feature enables the `foo` functions
foo = []
### The bar feature enables the bar module
## The bar feature enables the bar module
bar = []
##! ### Experimental features
##! The following features are experimental
#! ### Experimental features
#! The following features are experimental
### Enable the fusion reactor
###
### Can lead to explosions
## Enable the fusion reactor
##
## Can lead to explosions
fusion = []
[dependencies]
document-features = "0.2"
##! ### Optional dependencies
#! ### Optional dependencies
### Enable this feature to implement the trait for the types from the genial crate
## Enable this feature to implement the trait for the types from the genial crate
genial = { version = "0.2", optional = true }
### This awesome dependency is specified in its own table
## This awesome dependency is specified in its own table
[dependencies.awesome]
version = "1.3.5"
optional = true
@ -256,11 +254,11 @@ fn document_features_impl(args: &Args) -> Result<TokenStream, TokenStream> {
let mut cargo_toml = std::fs::read_to_string(Path::new(&path).join("Cargo.toml"))
.map_err(|e| error(&format!("Can't open Cargo.toml: {:?}", e)))?;
if !cargo_toml.contains("\n##") && !cargo_toml.contains("\n#!") {
if !has_doc_comments(&cargo_toml) {
// On crates.io, Cargo.toml is usually "normalized" and stripped of all comments.
// The original Cargo.toml has been renamed Cargo.toml.orig
if let Ok(orig) = std::fs::read_to_string(Path::new(&path).join("Cargo.toml.orig")) {
if orig.contains("##") || orig.contains("#!") {
if has_doc_comments(&orig) {
cargo_toml = orig;
}
}
@ -270,6 +268,109 @@ fn document_features_impl(args: &Args) -> Result<TokenStream, TokenStream> {
Ok(std::iter::once(proc_macro::TokenTree::from(proc_macro::Literal::string(&result))).collect())
}
/// Check if the Cargo.toml has comments that looks like doc comments.
fn has_doc_comments(cargo_toml: &str) -> bool {
let mut lines = cargo_toml.lines().map(str::trim);
while let Some(line) = lines.next() {
if line.starts_with("## ") || line.starts_with("#! ") {
return true;
}
let before_coment = line.split_once('#').map_or(line, |(before, _)| before);
if line.starts_with("#") {
continue;
}
if let Some((_, mut quote)) = before_coment.split_once("\"\"\"") {
loop {
// skip slashes.
if let Some((_, s)) = quote.split_once('\\') {
quote = s.strip_prefix('\\').or_else(|| s.strip_prefix('"')).unwrap_or(s);
continue;
}
// skip quotes.
if let Some((_, out_quote)) = quote.split_once("\"\"\"") {
let out_quote = out_quote.trim_start_matches('"');
let out_quote =
out_quote.split_once('#').map_or(out_quote, |(before, _)| before);
if let Some((_, q)) = out_quote.split_once("\"\"\"") {
quote = q;
continue;
}
break;
};
match lines.next() {
Some(l) => quote = l,
None => return false,
}
}
}
}
false
}
#[test]
fn test_has_doc_coment() {
assert!(has_doc_comments("foo\nbar\n## comment\nddd"));
assert!(!has_doc_comments("foo\nbar\n#comment\nddd"));
assert!(!has_doc_comments(
r#"
[[package.metadata.release.pre-release-replacements]]
exactly = 1 # not a doc comment
file = "CHANGELOG.md"
replace = """
<!-- next-header -->
## [Unreleased] - ReleaseDate
"""
search = "<!-- next-header -->"
array = ["""foo""", """
bar""", """eee
## not a comment
"""]
"#
));
assert!(has_doc_comments(
r#"
[[package.metadata.release.pre-release-replacements]]
exactly = 1 # """
file = "CHANGELOG.md"
replace = """
<!-- next-header -->
## [Unreleased] - ReleaseDate
"""
search = "<!-- next-header -->"
array = ["""foo""", """
bar""", """eee
## not a comment
"""]
## This is a comment
feature = "45"
"#
));
assert!(!has_doc_comments(
r#"
[[package.metadata.release.pre-release-replacements]]
value = """" string \"""
## within the string
\""""
another_string = """"" # """
## also within"""
"#
));
assert!(has_doc_comments(
r#"
[[package.metadata.release.pre-release-replacements]]
value = """" string \"""
## within the string
\""""
another_string = """"" # """
## also within"""
## out of the string
foo = bar
"#
));
}
fn process_toml(cargo_toml: &str, args: &Args) -> Result<String, String> {
// Get all lines between the "[features]" and the next block
let mut lines = cargo_toml
@ -465,14 +566,20 @@ fn test_get_balanced() {
#[doc(hidden)]
/// Helper macro for the tests. Do not use
pub fn self_test_helper(input: TokenStream) -> TokenStream {
process_toml((&input).to_string().trim_matches(|c| c == '"' || c == '#'), &Args::default())
.map_or_else(
|e| error(&e),
|r| {
std::iter::once(proc_macro::TokenTree::from(proc_macro::Literal::string(&r)))
.collect()
},
)
let mut code = String::new();
for line in (&input).to_string().trim_matches(|c| c == '"' || c == '#').lines() {
// Rustdoc removes the lines that starts with `# ` and removes one `#` from lines that starts with # followed by space.
// We need to re-add the `#` that was removed by rustdoc to get the original.
if line.strip_prefix('#').map_or(false, |x| x.is_empty() || x.starts_with(' ')) {
code += "#";
}
code += line;
code += "\n";
}
process_toml(&code, &Args::default()).map_or_else(
|e| error(&e),
|r| std::iter::once(proc_macro::TokenTree::from(proc_macro::Literal::string(&r))).collect(),
)
}
#[cfg(feature = "self-test")]
@ -507,6 +614,8 @@ macro_rules! self_test {
};
}
use self_test;
// The following struct is inserted only during generation of the documentation in order to exploit doc-tests.
// These doc-tests are used to check that invalid arguments to the `document_features!` macro cause a compile time error.
// For a more principled way of testing compilation error, maybe investigate <https://docs.rs/trybuild>.

2
third_party/rust/metal/.cargo-checksum.json поставляемый

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

1318
third_party/rust/metal/Cargo.lock сгенерированный поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

16
third_party/rust/metal/Cargo.toml поставляемый
Просмотреть файл

@ -12,7 +12,7 @@
[package]
edition = "2021"
name = "metal"
version = "0.28.0"
version = "0.29.0"
authors = ["gfx-rs developers"]
exclude = [
"guide/**/*",
@ -34,7 +34,13 @@ license = "MIT OR Apache-2.0"
repository = "https://github.com/gfx-rs/metal-rs"
[package.metadata.docs.rs]
default-target = "x86_64-apple-darwin"
targets = [
"aarch64-apple-darwin",
"aarch64-apple-ios",
"aarch64-apple-ios-sim",
"x86_64-apple-darwin",
"x86_64-apple-ios",
]
[[example]]
name = "window"
@ -116,13 +122,13 @@ version = "0.2.4"
version = "1"
[dev-dependencies.cocoa]
version = "0.24.0"
version = "0.25.0"
[dev-dependencies.cty]
version = "0.2.1"
[dev-dependencies.glam]
version = "0.22"
version = "0.27"
[dev-dependencies.png]
version = "0.17"
@ -134,7 +140,7 @@ version = "0.8"
version = "0.1.4"
[dev-dependencies.winit]
version = "0.27"
version = "0.29"
[features]
default = ["link"]

163
third_party/rust/metal/examples/circle/main.rs поставляемый
Просмотреть файл

@ -3,7 +3,7 @@ use metal::*;
use winit::{
event::{Event, WindowEvent},
event_loop::{ControlFlow, EventLoop},
platform::macos::WindowExtMacOS,
raw_window_handle::{HasWindowHandle, RawWindowHandle},
};
use cocoa::{appkit::NSView, base::id as cocoa_id};
@ -33,7 +33,7 @@ pub struct AAPLVertex {
fn main() {
// Create a window for viewing the content
let event_loop = EventLoop::new();
let event_loop = EventLoop::new().unwrap();
let size = winit::dpi::LogicalSize::new(800, 600);
let window = winit::window::WindowBuilder::new()
@ -100,9 +100,11 @@ fn main() {
layer.set_presents_with_transaction(false);
unsafe {
let view = window.ns_view() as cocoa_id;
view.setWantsLayer(YES);
view.setLayer(mem::transmute(layer.as_ref()));
if let Ok(RawWindowHandle::AppKit(rw)) = window.window_handle().map(|wh| wh.as_raw()) {
let view = rw.ns_view.as_ptr() as cocoa_id;
view.setWantsLayer(YES);
view.setLayer(mem::transmute(layer.as_ref()));
}
}
let draw_size = window.inner_size();
@ -119,80 +121,87 @@ fn main() {
)
};
event_loop.run(move |event, _, control_flow| {
autoreleasepool(|| {
// ControlFlow::Wait pauses the event loop if no events are available to process.
// This is ideal for non-game applications that only update in response to user
// input, and uses significantly less power/CPU time than ControlFlow::Poll.
*control_flow = ControlFlow::Wait;
event_loop
.run(move |event, event_loop| {
autoreleasepool(|| {
// ControlFlow::Wait pauses the event loop if no events are available to process.
// This is ideal for non-game applications that only update in response to user
// input, and uses significantly less power/CPU time than ControlFlow::Poll.
event_loop.set_control_flow(ControlFlow::Wait);
match event {
Event::WindowEvent {
event: WindowEvent::CloseRequested,
..
} => {
println!("The close button was pressed; stopping");
*control_flow = ControlFlow::Exit
match event {
Event::AboutToWait => window.request_redraw(),
Event::WindowEvent { event, .. } => {
match event {
WindowEvent::CloseRequested => {
println!("The close button was pressed; stopping");
event_loop.exit();
}
WindowEvent::RedrawRequested => {
// It's preferrable to render in this event rather than in MainEventsCleared, since
// rendering in here allows the program to gracefully handle redraws requested
// by the OS.
let drawable = match layer.next_drawable() {
Some(drawable) => drawable,
None => return,
};
// Create a new command buffer for each render pass to the current drawable
let command_buffer = command_queue.new_command_buffer();
// Obtain a renderPassDescriptor generated from the view's drawable textures.
let render_pass_descriptor = RenderPassDescriptor::new();
handle_render_pass_color_attachment(
&render_pass_descriptor,
drawable.texture(),
);
handle_render_pass_sample_buffer_attachment(
&render_pass_descriptor,
&counter_sample_buffer,
);
// Create a render command encoder.
let encoder = command_buffer
.new_render_command_encoder(&render_pass_descriptor);
encoder.set_render_pipeline_state(&pipeline_state);
// Pass in the parameter data.
encoder.set_vertex_buffer(0, Some(&vbuf), 0);
// Draw the triangles which will eventually form the circle.
encoder.draw_primitives(MTLPrimitiveType::TriangleStrip, 0, 1080);
encoder.end_encoding();
resolve_samples_into_buffer(
&command_buffer,
&counter_sample_buffer,
&destination_buffer,
);
// Schedule a present once the framebuffer is complete using the current drawable.
command_buffer.present_drawable(&drawable);
// Finalize rendering here & push the command buffer to the GPU.
command_buffer.commit();
command_buffer.wait_until_completed();
let mut cpu_end = 0;
let mut gpu_end = 0;
device.sample_timestamps(&mut cpu_end, &mut gpu_end);
handle_timestamps(
&destination_buffer,
cpu_start,
cpu_end,
gpu_start,
gpu_end,
);
}
_ => (),
}
}
_ => (),
}
Event::MainEventsCleared => {
// Queue a RedrawRequested event.
window.request_redraw();
}
Event::RedrawRequested(_) => {
// It's preferrable to render in this event rather than in MainEventsCleared, since
// rendering in here allows the program to gracefully handle redraws requested
// by the OS.
let drawable = match layer.next_drawable() {
Some(drawable) => drawable,
None => return,
};
// Create a new command buffer for each render pass to the current drawable
let command_buffer = command_queue.new_command_buffer();
// Obtain a renderPassDescriptor generated from the view's drawable textures.
let render_pass_descriptor = RenderPassDescriptor::new();
handle_render_pass_color_attachment(
&render_pass_descriptor,
drawable.texture(),
);
handle_render_pass_sample_buffer_attachment(
&render_pass_descriptor,
&counter_sample_buffer,
);
// Create a render command encoder.
let encoder =
command_buffer.new_render_command_encoder(&render_pass_descriptor);
encoder.set_render_pipeline_state(&pipeline_state);
// Pass in the parameter data.
encoder.set_vertex_buffer(0, Some(&vbuf), 0);
// Draw the triangles which will eventually form the circle.
encoder.draw_primitives(MTLPrimitiveType::TriangleStrip, 0, 1080);
encoder.end_encoding();
resolve_samples_into_buffer(
&command_buffer,
&counter_sample_buffer,
&destination_buffer,
);
// Schedule a present once the framebuffer is complete using the current drawable.
command_buffer.present_drawable(&drawable);
// Finalize rendering here & push the command buffer to the GPU.
command_buffer.commit();
command_buffer.wait_until_completed();
let mut cpu_end = 0;
let mut gpu_end = 0;
device.sample_timestamps(&mut cpu_end, &mut gpu_end);
handle_timestamps(&destination_buffer, cpu_start, cpu_end, gpu_start, gpu_end);
}
_ => (),
}
});
});
});
})
.unwrap();
}
// If we want to draw a circle, we need to draw it out of the three primitive

Просмотреть файл

@ -6,11 +6,11 @@ use core_graphics_types::geometry::CGSize;
use metal::*;
use objc::{rc::autoreleasepool, runtime::YES};
use std::mem;
use winit::platform::macos::WindowExtMacOS;
use winit::{
event::{Event, WindowEvent},
event_loop::ControlFlow,
raw_window_handle::{HasWindowHandle, RawWindowHandle},
};
fn prepare_render_pass_descriptor(descriptor: &RenderPassDescriptorRef, texture: &TextureRef) {
@ -23,13 +23,13 @@ fn prepare_render_pass_descriptor(descriptor: &RenderPassDescriptorRef, texture:
}
fn main() {
let events_loop = winit::event_loop::EventLoop::new();
let event_loop = winit::event_loop::EventLoop::new().unwrap();
let size = winit::dpi::LogicalSize::new(800, 600);
let window = winit::window::WindowBuilder::new()
.with_inner_size(size)
.with_title("Metal Mesh Shader Example".to_string())
.build(&events_loop)
.build(&event_loop)
.unwrap();
let device = Device::system_default().expect("no device found");
@ -40,9 +40,11 @@ fn main() {
layer.set_presents_with_transaction(false);
unsafe {
let view = window.ns_view() as cocoa_id;
view.setWantsLayer(YES);
view.setLayer(mem::transmute(layer.as_ref()));
if let Ok(RawWindowHandle::AppKit(rw)) = window.window_handle().map(|wh| wh.as_raw()) {
let view = rw.ns_view.as_ptr() as cocoa_id;
view.setWantsLayer(YES);
view.setLayer(mem::transmute(layer.as_ref()));
}
}
let draw_size = window.inner_size();
@ -70,49 +72,57 @@ fn main() {
let command_queue = device.new_command_queue();
events_loop.run(move |event, _, control_flow| {
autoreleasepool(|| {
*control_flow = ControlFlow::Poll;
event_loop
.run(move |event, event_loop| {
autoreleasepool(|| {
event_loop.set_control_flow(ControlFlow::Poll);
match event {
Event::WindowEvent { event, .. } => match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::Resized(size) => {
layer.set_drawable_size(CGSize::new(size.width as f64, size.height as f64));
match event {
Event::AboutToWait => {
window.request_redraw();
}
_ => (),
},
Event::MainEventsCleared => {
window.request_redraw();
Event::WindowEvent { event, .. } => match event {
WindowEvent::CloseRequested => event_loop.exit(),
WindowEvent::Resized(size) => {
layer.set_drawable_size(CGSize::new(
size.width as f64,
size.height as f64,
));
}
WindowEvent::RedrawRequested => {
let drawable = match layer.next_drawable() {
Some(drawable) => drawable,
None => return,
};
let render_pass_descriptor = RenderPassDescriptor::new();
prepare_render_pass_descriptor(
&render_pass_descriptor,
drawable.texture(),
);
let command_buffer = command_queue.new_command_buffer();
let encoder =
command_buffer.new_render_command_encoder(&render_pass_descriptor);
encoder.set_render_pipeline_state(&pipeline_state);
encoder.draw_mesh_threads(
MTLSize::new(1, 1, 1),
MTLSize::new(1, 1, 1),
MTLSize::new(1, 1, 1),
);
encoder.end_encoding();
command_buffer.present_drawable(&drawable);
command_buffer.commit();
}
_ => (),
},
_ => {}
}
Event::RedrawRequested(_) => {
let drawable = match layer.next_drawable() {
Some(drawable) => drawable,
None => return,
};
let render_pass_descriptor = RenderPassDescriptor::new();
prepare_render_pass_descriptor(&render_pass_descriptor, drawable.texture());
let command_buffer = command_queue.new_command_buffer();
let encoder =
command_buffer.new_render_command_encoder(&render_pass_descriptor);
encoder.set_render_pipeline_state(&pipeline_state);
encoder.draw_mesh_threads(
MTLSize::new(1, 1, 1),
MTLSize::new(1, 1, 1),
MTLSize::new(1, 1, 1),
);
encoder.end_encoding();
command_buffer.present_drawable(&drawable);
command_buffer.commit();
}
_ => {}
}
});
});
});
})
.unwrap();
}

Просмотреть файл

@ -8,7 +8,7 @@ use std::mem;
use winit::{
event::{Event, WindowEvent},
event_loop::ControlFlow,
platform::macos::WindowExtMacOS,
raw_window_handle::{HasWindowHandle, RawWindowHandle},
};
pub mod camera;
@ -31,13 +31,13 @@ fn find_raytracing_supporting_device() -> Device {
}
fn main() {
let events_loop = winit::event_loop::EventLoop::new();
let event_loop = winit::event_loop::EventLoop::new().unwrap();
let size = winit::dpi::LogicalSize::new(800, 600);
let window = winit::window::WindowBuilder::new()
.with_inner_size(size)
.with_title("Metal Raytracing Example".to_string())
.build(&events_loop)
.build(&event_loop)
.unwrap();
let device = find_raytracing_supporting_device();
@ -48,9 +48,11 @@ fn main() {
layer.set_presents_with_transaction(false);
unsafe {
let view = window.ns_view() as cocoa_id;
view.setWantsLayer(YES);
view.setLayer(mem::transmute(layer.as_ref()));
if let Ok(RawWindowHandle::AppKit(rw)) = window.window_handle().map(|wh| wh.as_raw()) {
let view = rw.ns_view.as_ptr() as cocoa_id;
view.setWantsLayer(YES);
view.setLayer(mem::transmute(layer.as_ref()));
}
}
let draw_size = window.inner_size();
@ -60,28 +62,28 @@ fn main() {
let mut renderer = renderer::Renderer::new(device);
renderer.window_resized(cg_size);
events_loop.run(move |event, _, control_flow| {
autoreleasepool(|| {
*control_flow = ControlFlow::Poll;
event_loop
.run(move |event, event_loop| {
autoreleasepool(|| {
event_loop.set_control_flow(ControlFlow::Poll);
match event {
Event::WindowEvent { event, .. } => match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::Resized(size) => {
let size = CGSize::new(size.width as f64, size.height as f64);
layer.set_drawable_size(size);
renderer.window_resized(size);
}
_ => (),
},
Event::MainEventsCleared => {
window.request_redraw();
match event {
Event::AboutToWait => window.request_redraw(),
Event::WindowEvent { event, .. } => match event {
WindowEvent::CloseRequested => event_loop.exit(),
WindowEvent::Resized(size) => {
let size = CGSize::new(size.width as f64, size.height as f64);
layer.set_drawable_size(size);
renderer.window_resized(size);
}
WindowEvent::RedrawRequested => {
renderer.draw(&layer);
}
_ => (),
},
_ => {}
}
Event::RedrawRequested(_) => {
renderer.draw(&layer);
}
_ => {}
}
});
});
});
})
.unwrap();
}

Просмотреть файл

@ -7,7 +7,7 @@ use objc::{rc::autoreleasepool, runtime::YES};
use winit::{
event::{Event, WindowEvent},
event_loop::ControlFlow,
platform::macos::WindowExtMacOS,
raw_window_handle::{HasWindowHandle, RawWindowHandle},
};
use std::mem;
@ -43,9 +43,11 @@ impl App {
layer.set_presents_with_transaction(false);
layer.set_framebuffer_only(false);
unsafe {
let view = window.ns_view() as cocoa_id;
view.setWantsLayer(YES);
view.setLayer(mem::transmute(layer.as_ref()));
if let Ok(RawWindowHandle::AppKit(rw)) = window.window_handle().map(|wh| wh.as_raw()) {
let view = rw.ns_view.as_ptr() as cocoa_id;
view.setWantsLayer(YES);
view.setLayer(mem::transmute(layer.as_ref()));
}
}
let draw_size = window.inner_size();
layer.set_drawable_size(CGSize::new(draw_size.width as f64, draw_size.height as f64));
@ -141,37 +143,37 @@ impl App {
}
fn main() {
let events_loop = winit::event_loop::EventLoop::new();
let event_loop = winit::event_loop::EventLoop::new().unwrap();
let size = winit::dpi::LogicalSize::new(800, 600);
let window = winit::window::WindowBuilder::new()
.with_inner_size(size)
.with_title("Metal Shader Dylib Example".to_string())
.build(&events_loop)
.build(&event_loop)
.unwrap();
let mut app = App::new(&window);
events_loop.run(move |event, _, control_flow| {
autoreleasepool(|| {
*control_flow = ControlFlow::Poll;
event_loop
.run(move |event, event_loop| {
autoreleasepool(|| {
event_loop.set_control_flow(ControlFlow::Poll);
match event {
Event::WindowEvent { event, .. } => match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::Resized(size) => {
app.resize(size.width, size.height);
}
_ => (),
},
Event::MainEventsCleared => {
window.request_redraw();
match event {
Event::AboutToWait => window.request_redraw(),
Event::WindowEvent { event, .. } => match event {
WindowEvent::CloseRequested => event_loop.exit(),
WindowEvent::Resized(size) => {
app.resize(size.width, size.height);
}
WindowEvent::RedrawRequested => {
app.draw();
}
_ => (),
},
_ => {}
}
Event::RedrawRequested(_) => {
app.draw();
}
_ => {}
}
});
});
});
})
.unwrap();
}

210
third_party/rust/metal/examples/window/main.rs поставляемый
Просмотреть файл

@ -13,11 +13,11 @@ use core_graphics_types::geometry::CGSize;
use metal::*;
use objc::{rc::autoreleasepool, runtime::YES};
use std::mem;
use winit::platform::macos::WindowExtMacOS;
use winit::{
event::{Event, WindowEvent},
event_loop::ControlFlow,
raw_window_handle::{HasWindowHandle, RawWindowHandle},
};
#[repr(C)]
@ -85,13 +85,13 @@ fn prepare_render_pass_descriptor(descriptor: &RenderPassDescriptorRef, texture:
}
fn main() {
let events_loop = winit::event_loop::EventLoop::new();
let event_loop = winit::event_loop::EventLoop::new().unwrap();
let size = winit::dpi::LogicalSize::new(800, 600);
let window = winit::window::WindowBuilder::new()
.with_inner_size(size)
.with_title("Metal Window Example".to_string())
.build(&events_loop)
.build(&event_loop)
.unwrap();
let device = Device::system_default().expect("no device found");
@ -102,9 +102,11 @@ fn main() {
layer.set_presents_with_transaction(false);
unsafe {
let view = window.ns_view() as cocoa_id;
view.setWantsLayer(YES);
view.setLayer(mem::transmute(layer.as_ref()));
if let Ok(RawWindowHandle::AppKit(rw)) = window.window_handle().map(|wh| wh.as_raw()) {
let view = rw.ns_view.as_ptr() as cocoa_id;
view.setWantsLayer(YES);
view.setLayer(mem::transmute(layer.as_ref()));
}
}
let draw_size = window.inner_size();
@ -161,101 +163,107 @@ fn main() {
MTLResourceOptions::CPUCacheModeDefaultCache | MTLResourceOptions::StorageModeManaged,
);
events_loop.run(move |event, _, control_flow| {
autoreleasepool(|| {
*control_flow = ControlFlow::Poll;
event_loop
.run(move |event, event_loop| {
autoreleasepool(|| {
event_loop.set_control_flow(ControlFlow::Poll);
match event {
Event::WindowEvent { event, .. } => match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::Resized(size) => {
layer.set_drawable_size(CGSize::new(size.width as f64, size.height as f64));
}
_ => (),
},
Event::MainEventsCleared => {
window.request_redraw();
match event {
Event::AboutToWait => window.request_redraw(),
Event::WindowEvent { event, .. } => match event {
WindowEvent::CloseRequested => event_loop.exit(),
WindowEvent::Resized(size) => {
layer.set_drawable_size(CGSize::new(
size.width as f64,
size.height as f64,
));
}
WindowEvent::RedrawRequested => {
let p = vbuf.contents();
let vertex_data = [
0.0f32,
0.5,
1.0,
0.0,
0.0,
-0.5 + (r.cos() / 2. + 0.5),
-0.5,
0.0,
1.0,
0.0,
0.5 - (r.cos() / 2. + 0.5),
-0.5,
0.0,
0.0,
1.0,
];
unsafe {
std::ptr::copy(
vertex_data.as_ptr(),
p as *mut f32,
(vertex_data.len() * mem::size_of::<f32>()) as usize,
);
}
vbuf.did_modify_range(crate::NSRange::new(
0 as u64,
(vertex_data.len() * mem::size_of::<f32>()) as u64,
));
let drawable = match layer.next_drawable() {
Some(drawable) => drawable,
None => return,
};
let render_pass_descriptor = RenderPassDescriptor::new();
prepare_render_pass_descriptor(
&render_pass_descriptor,
drawable.texture(),
);
let command_buffer = command_queue.new_command_buffer();
let encoder =
command_buffer.new_render_command_encoder(&render_pass_descriptor);
encoder.set_scissor_rect(MTLScissorRect {
x: 20,
y: 20,
width: 100,
height: 100,
});
encoder.set_render_pipeline_state(&clear_rect_pipeline_state);
encoder.set_vertex_buffer(0, Some(&clear_rect_buffer), 0);
encoder.draw_primitives_instanced(
metal::MTLPrimitiveType::TriangleStrip,
0,
4,
1,
);
let physical_size = window.inner_size();
encoder.set_scissor_rect(MTLScissorRect {
x: 0,
y: 0,
width: physical_size.width as _,
height: physical_size.height as _,
});
encoder.set_render_pipeline_state(&triangle_pipeline_state);
encoder.set_vertex_buffer(0, Some(&vbuf), 0);
encoder.draw_primitives(MTLPrimitiveType::Triangle, 0, 3);
encoder.end_encoding();
command_buffer.present_drawable(&drawable);
command_buffer.commit();
r += 0.01f32;
}
_ => (),
},
_ => {}
}
Event::RedrawRequested(_) => {
let p = vbuf.contents();
let vertex_data = [
0.0f32,
0.5,
1.0,
0.0,
0.0,
-0.5 + (r.cos() / 2. + 0.5),
-0.5,
0.0,
1.0,
0.0,
0.5 - (r.cos() / 2. + 0.5),
-0.5,
0.0,
0.0,
1.0,
];
unsafe {
std::ptr::copy(
vertex_data.as_ptr(),
p as *mut f32,
(vertex_data.len() * mem::size_of::<f32>()) as usize,
);
}
vbuf.did_modify_range(crate::NSRange::new(
0 as u64,
(vertex_data.len() * mem::size_of::<f32>()) as u64,
));
let drawable = match layer.next_drawable() {
Some(drawable) => drawable,
None => return,
};
let render_pass_descriptor = RenderPassDescriptor::new();
prepare_render_pass_descriptor(&render_pass_descriptor, drawable.texture());
let command_buffer = command_queue.new_command_buffer();
let encoder =
command_buffer.new_render_command_encoder(&render_pass_descriptor);
encoder.set_scissor_rect(MTLScissorRect {
x: 20,
y: 20,
width: 100,
height: 100,
});
encoder.set_render_pipeline_state(&clear_rect_pipeline_state);
encoder.set_vertex_buffer(0, Some(&clear_rect_buffer), 0);
encoder.draw_primitives_instanced(
metal::MTLPrimitiveType::TriangleStrip,
0,
4,
1,
);
let physical_size = window.inner_size();
encoder.set_scissor_rect(MTLScissorRect {
x: 0,
y: 0,
width: physical_size.width as _,
height: physical_size.height as _,
});
encoder.set_render_pipeline_state(&triangle_pipeline_state);
encoder.set_vertex_buffer(0, Some(&vbuf), 0);
encoder.draw_primitives(MTLPrimitiveType::Triangle, 0, 3);
encoder.end_encoding();
command_buffer.present_drawable(&drawable);
command_buffer.commit();
r += 0.01f32;
}
_ => {}
}
});
});
});
})
.unwrap();
}

Просмотреть файл

@ -7,7 +7,7 @@
use super::*;
bitflags! {
bitflags::bitflags! {
#[derive(Copy, Clone, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub struct MTLAccelerationStructureInstanceOptions: u32 {
const None = 0;
@ -26,6 +26,7 @@ pub enum MTLAccelerationStructureInstanceDescriptorType {
Default = 0,
UserID = 1,
Motion = 2,
Indirect = 3,
}
#[derive(Clone, Copy, PartialEq, Debug, Default)]
@ -49,6 +50,17 @@ pub struct MTLAccelerationStructureUserIDInstanceDescriptor {
pub user_id: u32,
}
#[derive(Clone, Copy, PartialEq, Debug, Default)]
#[repr(C)]
pub struct MTLIndirectAccelerationStructureInstanceDescriptor {
pub transformation_matrix: [[f32; 3]; 4],
pub options: MTLAccelerationStructureInstanceOptions,
pub mask: u32,
pub intersection_function_table_offset: u32,
pub user_id: u32,
pub acceleration_structure_id: u64,
}
pub enum MTLAccelerationStructureDescriptor {}
foreign_obj_type! {
@ -69,7 +81,9 @@ impl PrimitiveAccelerationStructureDescriptor {
pub fn descriptor() -> Self {
unsafe {
let class = class!(MTLPrimitiveAccelerationStructureDescriptor);
msg_send![class, descriptor]
let ptr: *mut Object = msg_send![class, descriptor];
let ptr: *mut Object = msg_send![ptr, retain];
Self::from_ptr(ptr as _)
}
}
}
@ -91,6 +105,12 @@ foreign_obj_type! {
type ParentType = Resource;
}
impl AccelerationStructureRef {
pub fn gpu_resource_id(&self) -> MTLResourceID {
unsafe { msg_send![self, gpuResourceID] }
}
}
pub enum MTLAccelerationStructureGeometryDescriptor {}
foreign_obj_type! {
@ -132,7 +152,9 @@ impl AccelerationStructureTriangleGeometryDescriptor {
pub fn descriptor() -> Self {
unsafe {
let class = class!(MTLAccelerationStructureTriangleGeometryDescriptor);
msg_send![class, descriptor]
let ptr: *mut Object = msg_send![class, descriptor];
let ptr: *mut Object = msg_send![ptr, retain];
Self::from_ptr(ptr as _)
}
}
}
@ -191,7 +213,9 @@ impl AccelerationStructureBoundingBoxGeometryDescriptor {
pub fn descriptor() -> Self {
unsafe {
let class = class!(MTLAccelerationStructureBoundingBoxGeometryDescriptor);
msg_send![class, descriptor]
let ptr: *mut Object = msg_send![class, descriptor];
let ptr: *mut Object = msg_send![ptr, retain];
Self::from_ptr(ptr as _)
}
}
}
@ -204,6 +228,14 @@ impl AccelerationStructureBoundingBoxGeometryDescriptorRef {
pub fn set_bounding_box_count(&self, count: NSUInteger) {
unsafe { msg_send![self, setBoundingBoxCount: count] }
}
pub fn set_bounding_box_stride(&self, stride: NSUInteger) {
unsafe { msg_send![self, setBoundingBoxStride: stride] }
}
pub fn set_bounding_box_buffer_offset(&self, offset: NSUInteger) {
unsafe { msg_send![self, setBoundingBoxBufferOffset: offset] }
}
}
pub enum MTLInstanceAccelerationStructureDescriptor {}
@ -218,7 +250,9 @@ impl InstanceAccelerationStructureDescriptor {
pub fn descriptor() -> Self {
unsafe {
let class = class!(MTLInstanceAccelerationStructureDescriptor);
msg_send![class, descriptor]
let ptr: *mut Object = msg_send![class, descriptor];
let ptr: *mut Object = msg_send![ptr, retain];
Self::from_ptr(ptr as _)
}
}
}
@ -252,6 +286,75 @@ impl InstanceAccelerationStructureDescriptorRef {
}
}
pub enum MTLIndirectInstanceAccelerationStructureDescriptor {}
foreign_obj_type! {
type CType = MTLIndirectInstanceAccelerationStructureDescriptor;
pub struct IndirectInstanceAccelerationStructureDescriptor;
type ParentType = AccelerationStructureDescriptor;
}
impl IndirectInstanceAccelerationStructureDescriptor {
pub fn descriptor() -> Self {
unsafe {
let class = class!(MTLIndirectInstanceAccelerationStructureDescriptor);
let ptr: *mut Object = msg_send![class, descriptor];
let ptr: *mut Object = msg_send![ptr, retain];
Self::from_ptr(ptr as _)
}
}
}
impl IndirectInstanceAccelerationStructureDescriptorRef {
pub fn set_instance_descriptor_buffer(&self, buffer: &BufferRef) {
unsafe { msg_send![self, setInstanceDescriptorBuffer: buffer] }
}
pub fn set_instance_descriptor_buffer_offset(&self, offset: NSUInteger) {
unsafe { msg_send![self, setInstanceDescriptorBufferOffset: offset] }
}
pub fn set_instance_descriptor_stride(&self, stride: NSUInteger) {
unsafe { msg_send![self, setInstanceDescriptorStride: stride] }
}
pub fn set_max_instance_count(&self, count: NSUInteger) {
unsafe { msg_send![self, setMaxInstanceCount: count] }
}
pub fn set_instance_count_buffer(&self, buffer: &BufferRef) {
unsafe { msg_send![self, setInstanceCountBuffer: buffer] }
}
pub fn set_instance_count_buffer_offset(&self, offset: NSUInteger) {
unsafe { msg_send![self, setInstanceCountBufferOffset: offset] }
}
pub fn set_instance_descriptor_type(&self, ty: MTLAccelerationStructureInstanceDescriptorType) {
unsafe { msg_send![self, setInstanceDescriptorType: ty] }
}
pub fn set_motion_transform_buffer(&self, buffer: &BufferRef) {
unsafe { msg_send![self, setMotionTransformBuffer: buffer] }
}
pub fn set_motion_transform_buffer_offset(&self, offset: NSUInteger) {
unsafe { msg_send![self, setMotionTransformBufferOffset: offset] }
}
pub fn set_max_motion_transform_count(&self, count: NSUInteger) {
unsafe { msg_send![self, setMaxMotionTransformCount: count] }
}
pub fn set_motion_transform_count_buffer(&self, buffer: &BufferRef) {
unsafe { msg_send![self, setMotionTransformCountBuffer: buffer] }
}
pub fn set_motion_transform_count_buffer_offset(&self, offset: NSUInteger) {
unsafe { msg_send![self, setMotionTransformCountBufferOffset: offset] }
}
}
pub enum MTLAccelerationStructureCommandEncoder {}
foreign_obj_type! {
@ -270,11 +373,26 @@ impl AccelerationStructureCommandEncoderRef {
) {
unsafe {
msg_send![
self,
buildAccelerationStructure: acceleration_structure
descriptor: descriptor
scratchBuffer: scratch_buffer
scratchBufferOffset: scratch_buffer_offset]
self,
buildAccelerationStructure: acceleration_structure
descriptor: descriptor
scratchBuffer: scratch_buffer
scratchBufferOffset: scratch_buffer_offset
]
}
}
pub fn copy_acceleration_structure(
&self,
source_acceleration_structure: &AccelerationStructureRef,
destination_acceleration_structure: &AccelerationStructureRef,
) {
unsafe {
msg_send![
self,
copyAccelerationStructure: source_acceleration_structure
toAccelerationStructure: destination_acceleration_structure
]
}
}
@ -294,6 +412,24 @@ impl AccelerationStructureCommandEncoderRef {
}
}
pub fn write_compacted_acceleration_structure_size_with_type(
&self,
acceleration_structure: &AccelerationStructureRef,
to_buffer: &BufferRef,
offset: NSUInteger,
size_data_type: MTLDataType,
) {
unsafe {
msg_send![
self,
writeCompactedAccelerationStructureSize: acceleration_structure
toBuffer: to_buffer
offset: offset
sizeDataType: size_data_type
]
}
}
pub fn copy_and_compact_acceleration_structure(
&self,
source: &AccelerationStructureRef,
@ -307,6 +443,81 @@ impl AccelerationStructureCommandEncoderRef {
]
}
}
pub fn refit_acceleration_structure(
&self,
source_acceleration_structure: &AccelerationStructureRef,
descriptor: &self::AccelerationStructureDescriptorRef,
destination_acceleration_structure: Option<&AccelerationStructureRef>,
scratch_buffer: &BufferRef,
scratch_buffer_offset: NSUInteger,
) {
unsafe {
msg_send![
self,
refitAccelerationStructure: source_acceleration_structure
descriptor: descriptor
destination: destination_acceleration_structure
scratchBuffer: scratch_buffer
scratchBufferOffset: scratch_buffer_offset
]
}
}
pub fn update_fence(&self, fence: &FenceRef) {
unsafe { msg_send![self, updateFence: fence] }
}
pub fn wait_for_fence(&self, fence: &FenceRef) {
unsafe { msg_send![self, waitForFence: fence] }
}
pub fn use_heap(&self, heap: &HeapRef) {
unsafe { msg_send![self, useHeap: heap] }
}
pub fn use_heaps(&self, heaps: &[&HeapRef]) {
unsafe {
msg_send![self,
useHeaps: heaps.as_ptr()
count: heaps.len() as NSUInteger
]
}
}
pub fn use_resource(&self, resource: &ResourceRef, usage: MTLResourceUsage) {
unsafe {
msg_send![self,
useResource: resource
usage: usage
]
}
}
pub fn use_resources(&self, resources: &[&ResourceRef], usage: MTLResourceUsage) {
unsafe {
msg_send![self,
useResources: resources.as_ptr()
count: resources.len() as NSUInteger
usage: usage
]
}
}
pub fn sample_counters_in_buffer(
&self,
sample_buffer: &CounterSampleBufferRef,
sample_index: NSUInteger,
with_barrier: bool,
) {
unsafe {
msg_send![self,
sampleCountersInBuffer: sample_buffer
atSampleIndex: sample_index
withBarrier: with_barrier
]
}
}
}
pub enum MTLIntersectionFunctionTableDescriptor {}
@ -345,4 +556,112 @@ impl IntersectionFunctionTableRef {
pub fn set_function(&self, function: &FunctionHandleRef, index: NSUInteger) {
unsafe { msg_send![self, setFunction: function atIndex: index] }
}
pub fn set_functions(&self, functions: &[&FunctionHandleRef], start_index: NSUInteger) {
unsafe {
msg_send![self, setFunctions: functions.as_ptr() withRange: NSRange { location: start_index, length: functions.len() as _ }]
}
}
pub fn set_buffer(&self, index: NSUInteger, buffer: Option<&BufferRef>, offset: NSUInteger) {
unsafe { msg_send![self, setBuffer:buffer offset:offset atIndex:index] }
}
pub fn set_buffers(
&self,
start_index: NSUInteger,
data: &[Option<&BufferRef>],
offsets: &[NSUInteger],
) {
debug_assert_eq!(offsets.len(), data.len());
unsafe {
msg_send![self,
setBuffers: data.as_ptr()
offsets: offsets.as_ptr()
withRange: NSRange {
location: start_index,
length: data.len() as _,
}
]
}
}
pub fn set_visible_function_table(
&self,
buffer_index: NSUInteger,
visible_function_table: Option<&VisibleFunctionTableRef>,
) {
unsafe {
msg_send![self,
setVisibleFunctionTable:visible_function_table
atBufferIndex:buffer_index]
}
}
pub fn set_visible_function_tables(
&self,
buffer_start_index: NSUInteger,
visible_function_tables: &[&VisibleFunctionTableRef],
) {
unsafe {
msg_send![self,
setVisibleFunctionTables:visible_function_tables.as_ptr()
withBufferRange: NSRange {
location: buffer_start_index,
length: visible_function_tables.len() as _,
}]
}
}
pub fn gpu_resource_id(&self) -> MTLResourceID {
unsafe { msg_send![self, gpuResourceID] }
}
}
/// See <https://developer.apple.com/documentation/metal/mtlvisiblefunctiontabledescriptor>
pub enum MTLVisibleFunctionTableDescriptor {}
foreign_obj_type! {
type CType = MTLVisibleFunctionTableDescriptor;
pub struct VisibleFunctionTableDescriptor;
type ParentType = NsObject;
}
impl VisibleFunctionTableDescriptor {
pub fn new() -> Self {
unsafe {
let class = class!(MTLVisibleFunctionTableDescriptor);
msg_send![class, new]
}
}
}
impl VisibleFunctionTableDescriptorRef {
pub fn set_function_count(&self, count: NSUInteger) {
unsafe { msg_send![self, setFunctionCount: count] }
}
}
/// See <https://developer.apple.com/documentation/metal/mtlvisiblefunctiontable>
pub enum MTLVisibleFunctionTable {}
foreign_obj_type! {
type CType = MTLVisibleFunctionTable;
pub struct VisibleFunctionTable;
type ParentType = Resource;
}
impl VisibleFunctionTableRef {
pub fn set_functions(&self, functions: &[&FunctionRef]) {
let ns_array = Array::<Function>::from_slice(functions);
unsafe { msg_send![self, setFunctions: ns_array] }
}
pub fn set_function(&self, index: NSUInteger, function: &FunctionHandleRef) {
unsafe { msg_send![self, setFunction: function atIndex: index] }
}
pub fn gpu_resource_id(&self) -> MTLResourceID {
unsafe { msg_send![self, gpuResourceID] }
}
}

24
third_party/rust/metal/src/argument.rs поставляемый
Просмотреть файл

@ -106,6 +106,30 @@ pub enum MTLDataType {
RGB10A2Unorm = 75,
RG11B10Float = 76,
RGB9E5Float = 77,
RenderPipeline = 78,
ComputePipeline = 79,
IndirectCommandBuffer = 80,
Long = 81,
Long2 = 82,
Long3 = 83,
Long4 = 84,
ULong = 85,
ULong2 = 86,
ULong3 = 87,
ULong4 = 88,
VisibleFunctionTable = 115,
IntersectionFunctionTable = 116,
PrimitiveAccelerationStructure = 117,
InstanceAccelerationStructure = 118,
BFloat = 121,
BFloat2 = 122,
BFloat3 = 123,
BFloat4 = 124,
}
/// See <https://developer.apple.com/documentation/metal/mtlargumenttype>

29
third_party/rust/metal/src/device.rs поставляемый
Просмотреть файл

@ -7,7 +7,8 @@
use super::*;
use block::{Block, ConcreteBlock};
use block::Block;
use log::warn;
use objc::runtime::{NO, YES};
use std::{ffi::CStr, os::raw::c_char, path::Path, ptr};
@ -92,7 +93,7 @@ pub enum MTLDeviceLocation {
Unspecified = u64::MAX,
}
bitflags! {
bitflags::bitflags! {
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub struct PixelFormatCapabilities: u32 {
const Filter = 1 << 0;
@ -1430,7 +1431,7 @@ pub enum MTLSparseTextureRegionAlignmentMode {
Inward = 1,
}
bitflags! {
bitflags::bitflags! {
/// Options that determine how Metal prepares the pipeline.
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub struct MTLPipelineOption: NSUInteger {
@ -1471,6 +1472,8 @@ pub type dispatch_queue_t = *mut Object;
#[allow(non_camel_case_types)]
type dispatch_block_t = *const Block<(), ()>;
const DISPATCH_DATA_DESTRUCTOR_DEFAULT: dispatch_block_t = ptr::null();
#[cfg_attr(
all(feature = "link", any(target_os = "macos", target_os = "ios")),
link(name = "System", kind = "dylib")
@ -1704,12 +1707,20 @@ impl DeviceRef {
pub fn new_library_with_data(&self, library_data: &[u8]) -> Result<Library, String> {
unsafe {
let destructor_block = ConcreteBlock::new(|| {}).copy();
// SAFETY:
// `library_data` does not necessarily outlive the dispatch data
// in which it will be contained (since the dispatch data will be
// contained in the MTLLibrary returned by this function).
//
// To prevent the MTLLibrary from referencing the data outside of
// its lifetime, we use DISPATCH_DATA_DESTRUCTOR_DEFAULT as the
// destructor block, which will make `dispatch_data_create` copy
// the buffer for us automatically.
let data = dispatch_data_create(
library_data.as_ptr() as *const std::ffi::c_void,
library_data.len() as crate::c_size_t,
&_dispatch_main_q as *const _ as dispatch_queue_t,
&*destructor_block.deref(),
DISPATCH_DATA_DESTRUCTOR_DEFAULT,
);
let library: *mut MTLLibrary = try_objc! { err =>
@ -2049,6 +2060,14 @@ impl DeviceRef {
unsafe { msg_send![self, heapBufferSizeAndAlignWithLength: length options: options] }
}
/// Only available on macos(13.0), ios(16.0)
pub fn heap_acceleration_structure_size_and_align_with_size(
&self,
size: NSUInteger,
) -> MTLSizeAndAlign {
unsafe { msg_send![self, heapAccelerationStructureSizeAndAlignWithSize: size] }
}
pub fn heap_texture_size_and_align(
&self,
descriptor: &TextureDescriptorRef,

13
third_party/rust/metal/src/drawable.rs поставляемый
Просмотреть файл

@ -5,8 +5,13 @@
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use block::Block;
use super::NSUInteger;
type DrawablePresentedHandler<'a> = Block<(&'a DrawableRef,), ()>;
type CFTimeInterval = f64;
/// See <https://developer.apple.com/documentation/metal/mtldrawable>
pub enum MTLDrawable {}
@ -23,4 +28,12 @@ impl DrawableRef {
pub fn drawable_id(&self) -> NSUInteger {
unsafe { msg_send![self, drawableID] }
}
pub fn add_presented_handler(&self, block: &DrawablePresentedHandler) {
unsafe { msg_send![self, addPresentedHandler: block] }
}
pub fn presented_time(&self) -> CFTimeInterval {
unsafe { msg_send![self, presentedTime] }
}
}

86
third_party/rust/metal/src/encoder.rs поставляемый
Просмотреть файл

@ -74,7 +74,7 @@ pub enum MTLTriangleFillMode {
Lines = 1,
}
bitflags! {
bitflags::bitflags! {
/// https://developer.apple.com/documentation/metal/mtlblitoption
#[allow(non_upper_case_globals)]
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)]
@ -444,6 +444,34 @@ impl RenderCommandEncoderRef {
}
}
pub fn set_vertex_visible_function_table(
&self,
buffer_index: NSUInteger,
visible_function_table: Option<&VisibleFunctionTableRef>,
) {
unsafe {
msg_send![self,
setVertexVisibleFunctionTable:visible_function_table
atBufferIndex:buffer_index]
}
}
pub fn set_vertex_visible_function_tables(
&self,
buffer_start_index: NSUInteger,
visible_function_tables: &[&VisibleFunctionTableRef],
) {
unsafe {
msg_send![self,
setVertexVisibleFunctionTables:visible_function_tables.as_ptr()
withBufferRange: NSRange {
location: buffer_start_index,
length: visible_function_tables.len() as _,
}
]
}
}
// Specifying Resources for a Object Shader Function
/// Only available in (macos(13.0), ios(16.0))
@ -866,6 +894,34 @@ impl RenderCommandEncoderRef {
}
}
pub fn set_fragment_visible_function_table(
&self,
buffer_index: NSUInteger,
visible_function_table: Option<&VisibleFunctionTableRef>,
) {
unsafe {
msg_send![self,
setFragmentVisibleFunctionTable:visible_function_table
atBufferIndex:buffer_index]
}
}
pub fn set_fragment_visible_function_tables(
&self,
buffer_start_index: NSUInteger,
visible_function_tables: &[&VisibleFunctionTableRef],
) {
unsafe {
msg_send![self,
setFragmentVisibleFunctionTables:visible_function_tables.as_ptr()
withBufferRange: NSRange {
location: buffer_start_index,
length: visible_function_tables.len() as _,
}
]
}
}
// Drawing Geometric Primitives
pub fn draw_primitives(
@ -1594,6 +1650,34 @@ impl ComputeCommandEncoderRef {
}
}
pub fn set_visible_function_table(
&self,
buffer_index: NSUInteger,
visible_function_table: Option<&VisibleFunctionTableRef>,
) {
unsafe {
msg_send![self,
setVisibleFunctionTable:visible_function_table
atBufferIndex:buffer_index]
}
}
pub fn set_visible_function_tables(
&self,
buffer_start_index: NSUInteger,
visible_function_tables: &[&VisibleFunctionTableRef],
) {
unsafe {
msg_send![self,
setVisibleFunctionTables:visible_function_tables.as_ptr()
withBufferRange: NSRange {
location: buffer_start_index,
length: visible_function_tables.len() as _,
}
]
}
}
pub fn dispatch_thread_groups(
&self,
thread_groups_count: MTLSize,

72
third_party/rust/metal/src/heap.rs поставляемый
Просмотреть файл

@ -148,6 +148,69 @@ impl HeapRef {
}
}
}
/// Only available on macOS 13.0+ & iOS 16.0+
pub fn new_acceleration_structure_with_descriptor(
&self,
descriptor: &AccelerationStructureDescriptorRef,
) -> Option<AccelerationStructure> {
unsafe {
let ptr: *mut MTLAccelerationStructure =
msg_send![self, newAccelerationStructureWithDescriptor: descriptor];
if !ptr.is_null() {
Some(AccelerationStructure::from_ptr(ptr))
} else {
None
}
}
}
/// Only available on macOS 13.0+ & iOS 16.0+
pub fn new_acceleration_structure_with_descriptor_offset(
&self,
descriptor: &AccelerationStructureDescriptorRef,
offset: u64,
) -> Option<AccelerationStructure> {
unsafe {
let ptr: *mut MTLAccelerationStructure = msg_send![self, newAccelerationStructureWithDescriptor:descriptor
offset:offset];
if !ptr.is_null() {
Some(AccelerationStructure::from_ptr(ptr))
} else {
None
}
}
}
/// Only available on macOS 13.0+ & iOS 16.0+
pub fn new_acceleration_structure_with_size(&self, size: u64) -> Option<AccelerationStructure> {
unsafe {
let ptr: *mut MTLAccelerationStructure =
msg_send![self, newAccelerationStructureWithSize:size];
if !ptr.is_null() {
Some(AccelerationStructure::from_ptr(ptr))
} else {
None
}
}
}
/// Only available on macOS 13.0+ & iOS 16.0+
pub fn new_acceleration_structure_with_size_offset(
&self,
size: u64,
offset: u64,
) -> Option<AccelerationStructure> {
unsafe {
let ptr: *mut MTLAccelerationStructure = msg_send![self, newAccelerationStructureWithSize:size
offset:offset];
if !ptr.is_null() {
Some(AccelerationStructure::from_ptr(ptr))
} else {
None
}
}
}
}
/// See <https://developer.apple.com/documentation/metal/mtlheapdescriptor/>
@ -197,6 +260,11 @@ impl HeapDescriptorRef {
unsafe { msg_send![self, hazardTrackingMode] }
}
/// Only available on macos(10.15), ios(13.0)
pub fn set_hazard_tracking_mode(&self, hazard_tracking_mode: MTLHazardTrackingMode) {
unsafe { msg_send![self, setHazardTrackingMode: hazard_tracking_mode] }
}
/// Only available on macos(10.15), ios(13.0)
pub fn resource_options(&self) -> MTLResourceOptions {
unsafe { msg_send![self, resourceOptions] }
@ -206,4 +274,8 @@ impl HeapDescriptorRef {
pub fn heap_type(&self) -> MTLHeapType {
unsafe { msg_send![self, type] }
}
/// Only available on macos(10.15), ios(13.0)
pub fn set_heap_type(&self, type_: MTLHeapType) {
unsafe { msg_send![self, setType: type_] }
}
}

Просмотреть файл

@ -1,6 +1,6 @@
use super::*;
bitflags! {
bitflags::bitflags! {
/// See <https://developer.apple.com/documentation/metal/mtlindirectcommandtype/>
#[allow(non_upper_case_globals)]
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)]

4
third_party/rust/metal/src/lib.rs поставляемый
Просмотреть файл

@ -9,10 +9,6 @@
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
#[macro_use]
pub extern crate bitflags;
#[macro_use]
pub extern crate log;
#[macro_use]
pub extern crate objc;
#[macro_use]

4
third_party/rust/metal/src/library.rs поставляемый
Просмотреть файл

@ -148,7 +148,7 @@ impl FunctionConstantRef {
}
}
bitflags! {
bitflags::bitflags! {
/// Only available on (macos(11.0), ios(14.0))
///
/// See <https://developer.apple.com/documentation/metal/mtlfunctionoptions/>
@ -263,8 +263,6 @@ impl FunctionHandleRef {
}
// TODO:
// MTLVisibleFunctionTableDescriptor
// MTLVisibleFunctionTable
// MTLIntersectionFunctionSignature
// MTLIntersectionFunctionTableDescriptor
// MTLIntersectionFunctionTable

4
third_party/rust/metal/src/mps.rs поставляемый
Просмотреть файл

@ -40,7 +40,7 @@ pub enum MPSRayDataType {
OriginMaskDirectionMaxDistance = 2,
}
bitflags! {
bitflags::bitflags! {
/// See <https://developer.apple.com/documentation/metalperformanceshaders/mpsraymaskoptions>
#[allow(non_upper_case_globals)]
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)]
@ -116,7 +116,7 @@ pub enum MPSAccelerationStructureStatus {
Built = 1,
}
bitflags! {
bitflags::bitflags! {
/// See <https://developer.apple.com/documentation/metalperformanceshaders/mpsaccelerationstructureusage>
#[allow(non_upper_case_globals)]
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)]

Просмотреть файл

@ -303,8 +303,12 @@ impl ComputePipelineStateRef {
// - (nullable id <MTLComputePipelineState>)newComputePipelineStateWithAdditionalBinaryFunctions:(nonnull NSArray<id<MTLFunction>> *)functions error:(__autoreleasing NSError **)error
// API_AVAILABLE(macos(11.0), ios(14.0));
// TODO: newVisibleFunctionTableWithDescriptor
// - (nullable id<MTLVisibleFunctionTable>)newVisibleFunctionTableWithDescriptor:(MTLVisibleFunctionTableDescriptor * __nonnull)descriptor
pub fn new_visible_function_table_with_descriptor(
&self,
descriptor: &VisibleFunctionTableDescriptorRef,
) -> VisibleFunctionTable {
unsafe { msg_send![self, newVisibleFunctionTableWithDescriptor: descriptor ] }
}
/// Only available on (macos(11.0), ios(14.0))
pub fn new_intersection_function_table_with_descriptor(

45
third_party/rust/metal/src/pipeline/render.rs поставляемый
Просмотреть файл

@ -47,7 +47,7 @@ pub enum MTLBlendOperation {
Max = 4,
}
bitflags! {
bitflags::bitflags! {
/// See <https://developer.apple.com/documentation/metal/mtlcolorwritemask>
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub struct MTLColorWriteMask: NSUInteger {
@ -664,6 +664,16 @@ impl RenderPipelineDescriptorRef {
unsafe { msg_send![self, setBinaryArchives: ns_array] }
}
/// API_AVAILABLE(macos(11.0), ios(14.0));
pub fn fragment_linked_functions(&self) -> &LinkedFunctionsRef {
unsafe { msg_send![self, fragmentLinkedFunctions] }
}
/// API_AVAILABLE(macos(11.0), ios(14.0));
pub fn set_fragment_linked_functions(&self, functions: &LinkedFunctionsRef) {
unsafe { msg_send![self, setFragmentLinkedFunctions: functions] }
}
pub fn reset(&self) {
unsafe { msg_send![self, reset] }
}
@ -688,6 +698,39 @@ impl RenderPipelineStateRef {
crate::nsstring_as_str(label)
}
}
/// Only available on (macos(11.0), ios(14.0))
pub fn new_intersection_function_table_with_descriptor(
&self,
descriptor: &IntersectionFunctionTableDescriptorRef,
stage: MTLRenderStages,
) -> IntersectionFunctionTable {
unsafe {
msg_send![self, newIntersectionFunctionTableWithDescriptor: descriptor
stage:stage]
}
}
/// Only available on (macos(11.0), ios(14.0))
pub fn function_handle_with_function(
&self,
function: &FunctionRef,
stage: MTLRenderStages,
) -> Option<&FunctionHandleRef> {
unsafe {
msg_send![self, functionHandleWithFunction: function
stage:stage]
}
}
/// Only available on (macos(11.0), ios(14.0))
pub fn new_visible_function_table_with_descriptor(
&self,
descriptor: &VisibleFunctionTableDescriptorRef,
stage: MTLRenderStages,
) -> VisibleFunctionTable {
unsafe { msg_send![self, newVisibleFunctionTableWithDescriptor: descriptor stage:stage] }
}
}
/// See <https://developer.apple.com/documentation/metal/mtlrenderpipelinecolorattachmentdescriptorarray>

4
third_party/rust/metal/src/resource.rs поставляемый
Просмотреть файл

@ -55,7 +55,7 @@ pub const MTLResourceStorageModeMask: NSUInteger = 0xf << MTLResourceStorageMode
pub const MTLResourceHazardTrackingModeShift: NSUInteger = 8;
pub const MTLResourceHazardTrackingModeMask: NSUInteger = 0x3 << MTLResourceHazardTrackingModeShift;
bitflags! {
bitflags::bitflags! {
/// See <https://developer.apple.com/documentation/metal/mtlresourceoptions>
#[allow(non_upper_case_globals)]
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)]
@ -77,7 +77,7 @@ bitflags! {
}
}
bitflags! {
bitflags::bitflags! {
/// Options that describe how a graphics or compute function uses an argument buffers resource.
///
/// Enabling certain options for certain resources determines whether the Metal driver should

4
third_party/rust/metal/src/sync.rs поставляемый
Просмотреть файл

@ -9,7 +9,7 @@ use super::*;
use block::{Block, RcBlock};
use std::ptr;
#[cfg(feature = "dispatch_queue")]
#[cfg(feature = "dispatch")]
use dispatch;
/// See <https://developer.apple.com/documentation/metal/mtlsharedeventnotificationblock>
@ -136,7 +136,7 @@ impl FenceRef {
}
}
bitflags! {
bitflags::bitflags! {
/// The render stages at which a synchronization command is triggered.
///
/// Render stages provide finer control for specifying when synchronization must occur,

2
third_party/rust/metal/src/texture.rs поставляемый
Просмотреть файл

@ -33,7 +33,7 @@ pub enum MTLTextureCompressionType {
Lossy = 1,
}
bitflags! {
bitflags::bitflags! {
/// See <https://developer.apple.com/documentation/metal/mtltextureusage>
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub struct MTLTextureUsage: NSUInteger {

2
third_party/rust/naga/.cargo-checksum.json поставляемый

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

4
third_party/rust/naga/Cargo.toml поставляемый
Просмотреть файл

@ -47,7 +47,7 @@ bit-set = "0.6"
bitflags = "2.6"
log = "0.4"
rustc-hash = "1.1.0"
thiserror = "1.0.61"
thiserror = "1.0.62"
[dependencies.arbitrary]
version = "1.3"
@ -74,7 +74,7 @@ version = "0.2.1"
optional = true
[dependencies.serde]
version = "1.0.203"
version = "1.0.204"
features = ["derive"]
optional = true

14
third_party/rust/naga/src/back/hlsl/writer.rs поставляемый
Просмотреть файл

@ -9,7 +9,7 @@ use super::{
use crate::{
back::{self, Baked},
proc::{self, NameKey},
valid, Handle, Module, ScalarKind, ShaderStage, TypeInner,
valid, Handle, Module, Scalar, ScalarKind, ShaderStage, TypeInner,
};
use std::{fmt, mem};
@ -2013,7 +2013,11 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
// ownership of our reusable access chain buffer.
let chain = mem::take(&mut self.temp_access_chain);
let var_name = &self.names[&NameKey::GlobalVariable(var_handle)];
write!(self.out, "{var_name}.Interlocked{fun_str}(")?;
let width = match func_ctx.resolve_type(value, &module.types) {
&TypeInner::Scalar(Scalar { width: 8, .. }) => "64",
_ => "",
};
write!(self.out, "{var_name}.Interlocked{fun_str}{width}(")?;
self.write_storage_address(module, &chain, func_ctx)?;
self.temp_access_chain = chain;
}
@ -2852,7 +2856,7 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
let inner = func_ctx.resolve_type(expr, &module.types);
let close_paren = match convert {
Some(dst_width) => {
let scalar = crate::Scalar {
let scalar = Scalar {
kind,
width: dst_width,
};
@ -3213,7 +3217,7 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
// as non-32bit types are DXC only.
Function::MissingIntOverload(fun_name) => {
let scalar_kind = func_ctx.resolve_type(arg, &module.types).scalar();
if let Some(crate::Scalar {
if let Some(Scalar {
kind: ScalarKind::Sint,
width: 4,
}) = scalar_kind
@ -3231,7 +3235,7 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
// as non-32bit types are DXC only.
Function::MissingIntReturnType(fun_name) => {
let scalar_kind = func_ctx.resolve_type(arg, &module.types).scalar();
if let Some(crate::Scalar {
if let Some(Scalar {
kind: ScalarKind::Sint,
width: 4,
}) = scalar_kind

4
third_party/rust/naga/src/back/mod.rs поставляемый
Просмотреть файл

@ -254,7 +254,9 @@ impl crate::TypeInner {
/// Returns true if this is a handle to a type rather than the type directly.
pub const fn is_handle(&self) -> bool {
match *self {
crate::TypeInner::Image { .. } | crate::TypeInner::Sampler { .. } => true,
crate::TypeInner::Image { .. }
| crate::TypeInner::Sampler { .. }
| crate::TypeInner::AccelerationStructure { .. } => true,
_ => false,
}
}

Просмотреть файл

@ -2482,6 +2482,10 @@ impl<'source, 'temp> Lowerer<'source, 'temp> {
crate::TypeInner::Scalar(crate::Scalar { width: 8, .. })
);
let result = if is_64_bit_min_max && is_statement {
let rctx = ctx.runtime_expression_ctx(span)?;
rctx.block
.extend(rctx.emitter.finish(&rctx.function.expressions));
rctx.emitter.start(&rctx.function.expressions);
None
} else {
let ty = ctx.register_type(value)?;

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

2
third_party/rust/wgpu-core/Cargo.toml поставляемый
Просмотреть файл

@ -43,7 +43,7 @@ targets = [
arrayvec = "0.7"
bit-vec = "0.7"
bitflags = "2"
document-features = "0.2.8"
document-features = "0.2.10"
indexmap = "2"
log = "0.4"
once_cell = "1"

Просмотреть файл

@ -66,7 +66,7 @@ pub enum CreateBindGroupLayoutError {
},
#[error(transparent)]
TooManyBindings(BindingTypeMaxCountError),
#[error("Binding index {binding} is greater than the maximum index {maximum}")]
#[error("Binding index {binding} is greater than the maximum number {maximum}")]
InvalidBindingIndex { binding: u32, maximum: u32 },
#[error("Invalid visibility {0:?}")]
InvalidVisibility(wgt::ShaderStages),

Просмотреть файл

@ -321,10 +321,7 @@ impl Global {
.raw()
.flush_mapped_ranges(raw_buf, iter::once(offset..offset + data.len() as u64));
}
device
.raw()
.unmap_buffer(raw_buf)
.map_err(DeviceError::from)?;
device.raw().unmap_buffer(raw_buf);
}
Ok(())
@ -370,10 +367,7 @@ impl Global {
);
}
ptr::copy_nonoverlapping(mapping.ptr.as_ptr(), data.as_mut_ptr(), data.len());
device
.raw()
.unmap_buffer(raw_buf)
.map_err(DeviceError::from)?;
device.raw().unmap_buffer(raw_buf);
}
Ok(())
@ -1391,12 +1385,18 @@ impl Global {
let hub = A::hub(self);
let missing_implicit_pipeline_ids =
desc.layout.is_none() && id_in.is_some() && implicit_pipeline_ids.is_none();
let fid = hub.render_pipelines.prepare(id_in);
let implicit_context = implicit_pipeline_ids.map(|ipi| ipi.prepare(hub));
let is_auto_layout = desc.layout.is_none();
let error = 'error: {
if missing_implicit_pipeline_ids {
// TODO: categorize this error as API misuse
break 'error pipeline::ImplicitLayoutError::MissingImplicitPipelineIds.into();
}
let device = match hub.devices.get(device_id) {
Ok(device) => device,
Err(_) => break 'error DeviceError::InvalidDeviceId.into(),
@ -1511,23 +1511,18 @@ impl Global {
Err(e) => break 'error e,
};
if is_auto_layout {
// TODO: categorize the errors below as API misuse
let ids = if let Some(ids) = implicit_context.as_ref() {
let group_count = pipeline.layout.bind_group_layouts.len();
if ids.group_ids.len() < group_count {
log::error!(
"Not enough bind group IDs ({}) specified for the implicit layout ({})",
ids.group_ids.len(),
group_count
);
break 'error pipeline::ImplicitLayoutError::MissingIds(group_count as _)
.into();
}
ids
} else {
break 'error pipeline::ImplicitLayoutError::MissingIds(0).into();
};
if let Some(ids) = implicit_context.as_ref() {
let group_count = pipeline.layout.bind_group_layouts.len();
if ids.group_ids.len() < group_count {
log::error!(
"Not enough bind group IDs ({}) specified for the implicit layout ({})",
ids.group_ids.len(),
group_count
);
// TODO: categorize this error as API misuse
break 'error pipeline::ImplicitLayoutError::MissingIds(group_count as _)
.into();
}
let mut pipeline_layout_guard = hub.pipeline_layouts.write();
let mut bgl_guard = hub.bind_group_layouts.write();
@ -1558,16 +1553,14 @@ impl Global {
let id = fid.assign_error();
if is_auto_layout {
// We also need to assign errors to the implicit pipeline layout and the
// implicit bind group layouts.
if let Some(ids) = implicit_context {
let mut pipeline_layout_guard = hub.pipeline_layouts.write();
let mut bgl_guard = hub.bind_group_layouts.write();
pipeline_layout_guard.insert_error(ids.root_id);
for bgl_id in ids.group_ids {
bgl_guard.insert_error(bgl_id);
}
// We also need to assign errors to the implicit pipeline layout and the
// implicit bind group layouts.
if let Some(ids) = implicit_context {
let mut pipeline_layout_guard = hub.pipeline_layouts.write();
let mut bgl_guard = hub.bind_group_layouts.write();
pipeline_layout_guard.insert_error(ids.root_id);
for bgl_id in ids.group_ids {
bgl_guard.insert_error(bgl_id);
}
}
@ -1635,12 +1628,18 @@ impl Global {
let hub = A::hub(self);
let missing_implicit_pipeline_ids =
desc.layout.is_none() && id_in.is_some() && implicit_pipeline_ids.is_none();
let fid = hub.compute_pipelines.prepare(id_in);
let implicit_context = implicit_pipeline_ids.map(|ipi| ipi.prepare(hub));
let is_auto_layout = desc.layout.is_none();
let error = 'error: {
if missing_implicit_pipeline_ids {
// TODO: categorize this error as API misuse
break 'error pipeline::ImplicitLayoutError::MissingImplicitPipelineIds.into();
}
let device = match hub.devices.get(device_id) {
Ok(device) => device,
Err(_) => break 'error DeviceError::InvalidDeviceId.into(),
@ -1709,23 +1708,18 @@ impl Global {
Err(e) => break 'error e,
};
if is_auto_layout {
// TODO: categorize the errors below as API misuse
let ids = if let Some(ids) = implicit_context.as_ref() {
let group_count = pipeline.layout.bind_group_layouts.len();
if ids.group_ids.len() < group_count {
log::error!(
"Not enough bind group IDs ({}) specified for the implicit layout ({})",
ids.group_ids.len(),
group_count
);
break 'error pipeline::ImplicitLayoutError::MissingIds(group_count as _)
.into();
}
ids
} else {
break 'error pipeline::ImplicitLayoutError::MissingIds(0).into();
};
if let Some(ids) = implicit_context.as_ref() {
let group_count = pipeline.layout.bind_group_layouts.len();
if ids.group_ids.len() < group_count {
log::error!(
"Not enough bind group IDs ({}) specified for the implicit layout ({})",
ids.group_ids.len(),
group_count
);
// TODO: categorize this error as API misuse
break 'error pipeline::ImplicitLayoutError::MissingIds(group_count as _)
.into();
}
let mut pipeline_layout_guard = hub.pipeline_layouts.write();
let mut bgl_guard = hub.bind_group_layouts.write();
@ -1756,16 +1750,14 @@ impl Global {
let id = fid.assign_error();
if is_auto_layout {
// We also need to assign errors to the implicit pipeline layout and the
// implicit bind group layouts.
if let Some(ids) = implicit_context {
let mut pipeline_layout_guard = hub.pipeline_layouts.write();
let mut bgl_guard = hub.bind_group_layouts.write();
pipeline_layout_guard.insert_error(ids.root_id);
for bgl_id in ids.group_ids {
bgl_guard.insert_error(bgl_id);
}
// We also need to assign errors to the implicit pipeline layout and the
// implicit bind group layouts.
if let Some(ids) = implicit_context {
let mut pipeline_layout_guard = hub.pipeline_layouts.write();
let mut bgl_guard = hub.bind_group_layouts.write();
pipeline_layout_guard.insert_error(ids.root_id);
for bgl_id in ids.group_ids {
bgl_guard.insert_error(bgl_id);
}
}
@ -2525,7 +2517,7 @@ impl Global {
}
let map_state = &*buffer.map_state.lock();
match *map_state {
resource::BufferMapState::Init { ref ptr, .. } => {
resource::BufferMapState::Init { ref staging_buffer } => {
// offset (u64) can not be < 0, so no need to validate the lower bound
if offset + range_size > buffer.size {
return Err(BufferAccessError::OutOfBoundsOverrun {
@ -2533,12 +2525,9 @@ impl Global {
max: buffer.size,
});
}
unsafe {
Ok((
NonNull::new_unchecked(ptr.as_ptr().offset(offset as isize)),
range_size,
))
}
let ptr = unsafe { staging_buffer.ptr() };
let ptr = unsafe { NonNull::new_unchecked(ptr.as_ptr().offset(offset as isize)) };
Ok((ptr, range_size))
}
resource::BufferMapState::Active {
ref ptr, ref range, ..

Просмотреть файл

@ -95,15 +95,14 @@ pub enum WaitIdleError {
/// submission index.
///
/// 3) `handle_mapping` drains `self.ready_to_map` and actually maps the
/// buffers, collecting a list of notification closures to call. But any
/// buffers that were dropped by the user get moved to
/// `self.free_resources`.
/// buffers, collecting a list of notification closures to call.
///
/// Only calling `Global::buffer_map_async` clones a new `Arc` for the
/// buffer. This new `Arc` is only dropped by `handle_mapping`.
pub(crate) struct LifetimeTracker<A: HalApi> {
/// Resources that the user has requested be mapped, but which are used by
/// queue submissions still in flight.
/// Buffers for which a call to [`Buffer::map_async`] has succeeded, but
/// which haven't been examined by `triage_mapped` yet to decide when they
/// can be mapped.
mapped: Vec<Arc<Buffer<A>>>,
/// Resources used by queue submissions still in flight. One entry per

Просмотреть файл

@ -433,18 +433,18 @@ pub struct ImplicitPipelineContext {
}
pub struct ImplicitPipelineIds<'a> {
pub root_id: Option<PipelineLayoutId>,
pub group_ids: &'a [Option<BindGroupLayoutId>],
pub root_id: PipelineLayoutId,
pub group_ids: &'a [BindGroupLayoutId],
}
impl ImplicitPipelineIds<'_> {
fn prepare<A: HalApi>(self, hub: &Hub<A>) -> ImplicitPipelineContext {
ImplicitPipelineContext {
root_id: hub.pipeline_layouts.prepare(self.root_id).into_id(),
root_id: hub.pipeline_layouts.prepare(Some(self.root_id)).into_id(),
group_ids: self
.group_ids
.iter()
.map(|id_in| hub.bind_group_layouts.prepare(*id_in).into_id())
.map(|id_in| hub.bind_group_layouts.prepare(Some(*id_in)).into_id())
.collect(),
}
}

143
third_party/rust/wgpu-core/src/device/queue.rs поставляемый
Просмотреть файл

@ -14,11 +14,11 @@ use crate::{
hal_label,
id::{self, QueueId},
init_tracker::{has_copy_partial_init_tracker_coverage, TextureInitRange},
lock::{rank, Mutex, RwLockWriteGuard},
lock::RwLockWriteGuard,
resource::{
Buffer, BufferAccessError, BufferMapState, DestroyedBuffer, DestroyedResourceError,
DestroyedTexture, Labeled, ParentDevice, ResourceErrorIdent, StagingBuffer, Texture,
TextureInner, Trackable,
DestroyedTexture, FlushedStagingBuffer, Labeled, ParentDevice, ResourceErrorIdent,
StagingBuffer, Texture, TextureInner, Trackable,
},
resource_log,
track::{self, Tracker, TrackerIndex},
@ -29,8 +29,9 @@ use hal::{CommandEncoder as _, Device as _, Queue as _};
use smallvec::SmallVec;
use std::{
iter, mem,
ptr::{self, NonNull},
iter,
mem::{self},
ptr::NonNull,
sync::{atomic::Ordering, Arc},
};
use thiserror::Error;
@ -135,7 +136,7 @@ pub struct WrappedSubmissionIndex {
/// submission, to be freed when it completes
#[derive(Debug)]
pub enum TempResource<A: HalApi> {
StagingBuffer(StagingBuffer<A>),
StagingBuffer(FlushedStagingBuffer<A>),
DestroyedBuffer(DestroyedBuffer<A>),
DestroyedTexture(DestroyedTexture<A>),
}
@ -255,7 +256,7 @@ impl<A: HalApi> PendingWrites<A> {
self.temp_resources.push(resource);
}
fn consume(&mut self, buffer: StagingBuffer<A>) {
pub fn consume(&mut self, buffer: FlushedStagingBuffer<A>) {
self.temp_resources
.push(TempResource::StagingBuffer(buffer));
}
@ -312,47 +313,6 @@ impl<A: HalApi> PendingWrites<A> {
}
}
pub(crate) fn prepare_staging_buffer<A: HalApi>(
device: &Arc<Device<A>>,
size: wgt::BufferSize,
instance_flags: wgt::InstanceFlags,
) -> Result<(StagingBuffer<A>, NonNull<u8>), DeviceError> {
profiling::scope!("prepare_staging_buffer");
let stage_desc = hal::BufferDescriptor {
label: hal_label(Some("(wgpu internal) Staging"), instance_flags),
size: size.get(),
usage: hal::BufferUses::MAP_WRITE | hal::BufferUses::COPY_SRC,
memory_flags: hal::MemoryFlags::TRANSIENT,
};
let buffer = unsafe { device.raw().create_buffer(&stage_desc)? };
let mapping = unsafe { device.raw().map_buffer(&buffer, 0..size.get()) }?;
let staging_buffer = StagingBuffer {
raw: Mutex::new(rank::STAGING_BUFFER_RAW, Some(buffer)),
device: device.clone(),
size,
is_coherent: mapping.is_coherent,
};
Ok((staging_buffer, mapping.ptr))
}
impl<A: HalApi> StagingBuffer<A> {
unsafe fn flush(&self, device: &A::Device) -> Result<(), DeviceError> {
if !self.is_coherent {
unsafe {
device.flush_mapped_ranges(
self.raw.lock().as_ref().unwrap(),
iter::once(0..self.size.get()),
)
};
}
unsafe { device.unmap_buffer(self.raw.lock().as_ref().unwrap())? };
Ok(())
}
}
#[derive(Clone, Debug, Error)]
#[error("Queue is invalid")]
pub struct InvalidQueue;
@ -445,23 +405,15 @@ impl Global {
// Platform validation requires that the staging buffer always be
// freed, even if an error occurs. All paths from here must call
// `device.pending_writes.consume`.
let (staging_buffer, staging_buffer_ptr) =
prepare_staging_buffer(device, data_size, device.instance_flags)?;
let mut staging_buffer = StagingBuffer::new(device, data_size)?;
let mut pending_writes = device.pending_writes.lock();
let pending_writes = pending_writes.as_mut().unwrap();
if let Err(flush_error) = unsafe {
let staging_buffer = {
profiling::scope!("copy");
ptr::copy_nonoverlapping(
data.as_ptr(),
staging_buffer_ptr.as_ptr(),
data_size.get() as usize,
);
staging_buffer.flush(device.raw())
} {
pending_writes.consume(staging_buffer);
return Err(flush_error.into());
}
staging_buffer.write(data);
staging_buffer.flush()
};
let result = self.queue_write_staging_buffer_impl(
&queue,
@ -492,14 +444,14 @@ impl Global {
let device = &queue.device;
let (staging_buffer, staging_buffer_ptr) =
prepare_staging_buffer(device, buffer_size, device.instance_flags)?;
let staging_buffer = StagingBuffer::new(device, buffer_size)?;
let ptr = unsafe { staging_buffer.ptr() };
let fid = hub.staging_buffers.prepare(id_in);
let id = fid.assign(Arc::new(staging_buffer));
resource_log!("Queue::create_staging_buffer {id:?}");
Ok((id, staging_buffer_ptr))
Ok((id, ptr))
}
pub fn queue_write_staging_buffer<A: HalApi>(
@ -532,10 +484,7 @@ impl Global {
// user. Platform validation requires that the staging buffer always
// be freed, even if an error occurs. All paths from here must call
// `device.pending_writes.consume`.
if let Err(flush_error) = unsafe { staging_buffer.flush(device.raw()) } {
pending_writes.consume(staging_buffer);
return Err(flush_error.into());
}
let staging_buffer = staging_buffer.flush();
let result = self.queue_write_staging_buffer_impl(
&queue,
@ -600,7 +549,7 @@ impl Global {
queue: &Arc<Queue<A>>,
device: &Arc<Device<A>>,
pending_writes: &mut PendingWrites<A>,
staging_buffer: &StagingBuffer<A>,
staging_buffer: &FlushedStagingBuffer<A>,
buffer_id: id::BufferId,
buffer_offset: u64,
) -> Result<(), QueueWriteError> {
@ -630,20 +579,15 @@ impl Global {
dst_offset: buffer_offset,
size: staging_buffer.size,
};
let inner_buffer = staging_buffer.raw.lock();
let barriers = iter::once(hal::BufferBarrier {
buffer: inner_buffer.as_ref().unwrap(),
buffer: staging_buffer.raw(),
usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC,
})
.chain(transition.map(|pending| pending.into_hal(&dst, &snatch_guard)));
let encoder = pending_writes.activate();
unsafe {
encoder.transition_buffers(barriers);
encoder.copy_buffer_to_buffer(
inner_buffer.as_ref().unwrap(),
dst_raw,
iter::once(region),
);
encoder.copy_buffer_to_buffer(staging_buffer.raw(), dst_raw, iter::once(region));
}
pending_writes.insert_buffer(&dst);
@ -832,17 +776,17 @@ impl Global {
// Platform validation requires that the staging buffer always be
// freed, even if an error occurs. All paths from here must call
// `device.pending_writes.consume`.
let (staging_buffer, staging_buffer_ptr) =
prepare_staging_buffer(device, stage_size, device.instance_flags)?;
let mut staging_buffer = StagingBuffer::new(device, stage_size)?;
if stage_bytes_per_row == bytes_per_row {
profiling::scope!("copy aligned");
// Fast path if the data is already being aligned optimally.
unsafe {
ptr::copy_nonoverlapping(
data.as_ptr().offset(data_layout.offset as isize),
staging_buffer_ptr.as_ptr(),
stage_size.get() as usize,
staging_buffer.write_with_offset(
data,
data_layout.offset as isize,
0,
(data.len() as u64 - data_layout.offset) as usize,
);
}
} else {
@ -851,27 +795,22 @@ impl Global {
let copy_bytes_per_row = stage_bytes_per_row.min(bytes_per_row) as usize;
for layer in 0..size.depth_or_array_layers {
let rows_offset = layer * block_rows_per_image;
for row in 0..height_blocks {
for row in rows_offset..rows_offset + height_blocks {
let src_offset = data_layout.offset as u32 + row * bytes_per_row;
let dst_offset = row * stage_bytes_per_row;
unsafe {
ptr::copy_nonoverlapping(
data.as_ptr().offset(
data_layout.offset as isize
+ (rows_offset + row) as isize * bytes_per_row as isize,
),
staging_buffer_ptr.as_ptr().offset(
(rows_offset + row) as isize * stage_bytes_per_row as isize,
),
staging_buffer.write_with_offset(
data,
src_offset as isize,
dst_offset as isize,
copy_bytes_per_row,
);
)
}
}
}
}
if let Err(e) = unsafe { staging_buffer.flush(device.raw()) } {
pending_writes.consume(staging_buffer);
return Err(e.into());
}
let staging_buffer = staging_buffer.flush();
let regions = (0..array_layer_count).map(|rel_array_layer| {
let mut texture_base = dst_base.clone();
@ -890,9 +829,8 @@ impl Global {
});
{
let inner_buffer = staging_buffer.raw.lock();
let barrier = hal::BufferBarrier {
buffer: inner_buffer.as_ref().unwrap(),
buffer: staging_buffer.raw(),
usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC,
};
@ -904,7 +842,7 @@ impl Global {
unsafe {
encoder.transition_textures(transition.map(|pending| pending.into_hal(dst_raw)));
encoder.transition_buffers(iter::once(barrier));
encoder.copy_buffer_to_texture(inner_buffer.as_ref().unwrap(), dst_raw, regions);
encoder.copy_buffer_to_texture(staging_buffer.raw(), dst_raw, regions);
}
}
@ -1131,7 +1069,7 @@ impl Global {
let fence = fence_guard.as_mut().unwrap();
let submit_index = device
.active_submission_index
.fetch_add(1, Ordering::Relaxed)
.fetch_add(1, Ordering::SeqCst)
+ 1;
let mut active_executions = Vec::new();
@ -1454,6 +1392,11 @@ impl Global {
)
.map_err(DeviceError::from)?;
}
// Advance the successful submission index.
device
.last_successful_submission_index
.fetch_max(submit_index, Ordering::SeqCst);
}
profiling::scope!("cleanup");

Просмотреть файл

@ -22,8 +22,8 @@ use crate::{
pipeline,
pool::ResourcePool,
resource::{
self, Buffer, Labeled, ParentDevice, QuerySet, Sampler, Texture, TextureView,
TextureViewNotRenderableReason, TrackingData,
self, Buffer, Labeled, ParentDevice, QuerySet, Sampler, StagingBuffer, Texture,
TextureView, TextureViewNotRenderableReason, TrackingData,
},
resource_log,
snatch::{SnatchGuard, SnatchLock, Snatchable},
@ -88,8 +88,27 @@ pub struct Device<A: HalApi> {
label: String,
pub(crate) command_allocator: command::CommandAllocator<A>,
//Note: The submission index here corresponds to the last submission that is done.
pub(crate) active_submission_index: AtomicU64, //SubmissionIndex,
/// The index of the last command submission that was attempted.
///
/// Note that `fence` may never be signalled with this value, if the command
/// submission failed. If you need to wait for everything running on a
/// `Queue` to complete, wait for [`last_successful_submission_index`].
///
/// [`last_successful_submission_index`]: Device::last_successful_submission_index
pub(crate) active_submission_index: hal::AtomicFenceValue,
/// The index of the last successful submission to this device's
/// [`hal::Queue`].
///
/// Unlike [`active_submission_index`], which is incremented each time
/// submission is attempted, this is updated only when submission succeeds,
/// so waiting for this value won't hang waiting for work that was never
/// submitted.
///
/// [`active_submission_index`]: Device::active_submission_index
pub(crate) last_successful_submission_index: hal::AtomicFenceValue,
// NOTE: if both are needed, the `snatchable_lock` must be consistently acquired before the
// `fence` lock to avoid deadlocks.
pub(crate) fence: RwLock<Option<A::Fence>>,
@ -258,6 +277,7 @@ impl<A: HalApi> Device<A> {
label: desc.label.to_string(),
command_allocator,
active_submission_index: AtomicU64::new(0),
last_successful_submission_index: AtomicU64::new(0),
fence: RwLock::new(rank::DEVICE_FENCE, Some(fence)),
snatchable_lock: unsafe { SnatchLock::new(rank::DEVICE_SNATCHABLE_LOCK) },
valid: AtomicBool::new(true),
@ -388,37 +408,41 @@ impl<A: HalApi> Device<A> {
profiling::scope!("Device::maintain");
let fence = fence_guard.as_ref().unwrap();
let last_done_index = if maintain.is_wait() {
let index_to_wait_for = match maintain {
wgt::Maintain::WaitForSubmissionIndex(submission_index) => {
// We don't need to check to see if the queue id matches
// as we already checked this from inside the poll call.
submission_index.index
}
_ => self.active_submission_index.load(Ordering::Relaxed),
};
unsafe {
self.raw
.as_ref()
.unwrap()
.wait(fence, index_to_wait_for, CLEANUP_WAIT_MS)
.map_err(DeviceError::from)?
};
index_to_wait_for
} else {
unsafe {
// Determine which submission index `maintain` represents.
let submission_index = match maintain {
wgt::Maintain::WaitForSubmissionIndex(submission_index) => {
// We don't need to check to see if the queue id matches
// as we already checked this from inside the poll call.
submission_index.index
}
wgt::Maintain::Wait => self
.last_successful_submission_index
.load(Ordering::Acquire),
wgt::Maintain::Poll => unsafe {
self.raw
.as_ref()
.unwrap()
.get_fence_value(fence)
.map_err(DeviceError::from)?
}
},
};
log::info!("Device::maintain: last done index {last_done_index}");
// If necessary, wait for that submission to complete.
if maintain.is_wait() {
unsafe {
self.raw
.as_ref()
.unwrap()
.wait(fence, submission_index, CLEANUP_WAIT_MS)
.map_err(DeviceError::from)?
};
}
log::info!("Device::maintain: waiting for submission index {submission_index}");
let mut life_tracker = self.lock_life();
let submission_closures =
life_tracker.triage_submissions(last_done_index, &self.command_allocator);
life_tracker.triage_submissions(submission_index, &self.command_allocator);
life_tracker.triage_mapped();
@ -591,21 +615,15 @@ impl<A: HalApi> Device<A> {
};
hal::BufferUses::MAP_WRITE
} else {
let (staging_buffer, staging_buffer_ptr) = queue::prepare_staging_buffer(
self,
wgt::BufferSize::new(aligned_size).unwrap(),
self.instance_flags,
)?;
let mut staging_buffer =
StagingBuffer::new(self, wgt::BufferSize::new(aligned_size).unwrap())?;
// Zero initialize memory and then mark the buffer as initialized
// (it's guaranteed that this is the case by the time the buffer is usable)
unsafe { std::ptr::write_bytes(staging_buffer_ptr.as_ptr(), 0, aligned_size as usize) };
staging_buffer.write_zeros();
buffer.initialization_status.write().drain(0..aligned_size);
*buffer.map_state.lock() = resource::BufferMapState::Init {
staging_buffer,
ptr: staging_buffer_ptr,
};
*buffer.map_state.lock() = resource::BufferMapState::Init { staging_buffer };
hal::BufferUses::COPY_DST
};
@ -1386,7 +1404,8 @@ impl<A: HalApi> Device<A> {
tracking_data: TrackingData::new(self.tracker_indices.samplers.clone()),
comparison: desc.compare.is_some(),
filtering: desc.min_filter == wgt::FilterMode::Linear
|| desc.mag_filter == wgt::FilterMode::Linear,
|| desc.mag_filter == wgt::FilterMode::Linear
|| desc.mipmap_filter == wgt::FilterMode::Linear,
};
let sampler = Arc::new(sampler);
@ -3592,7 +3611,9 @@ impl<A: HalApi> Device<A> {
/// Wait for idle and remove resources that we can, before we die.
pub(crate) fn prepare_to_die(&self) {
self.pending_writes.lock().as_mut().unwrap().deactivate();
let current_index = self.active_submission_index.load(Ordering::Relaxed);
let current_index = self
.last_successful_submission_index
.load(Ordering::Acquire);
if let Err(error) = unsafe {
let fence = self.fence.read();
let fence = fence.as_ref().unwrap();

12
third_party/rust/wgpu-core/src/id.rs поставляемый
Просмотреть файл

@ -77,18 +77,6 @@ impl RawId {
}
}
/// Coerce a slice of identifiers into a slice of optional raw identifiers.
///
/// There's two reasons why we know this is correct:
/// * `Option<T>` is guaranteed to be niche-filled to 0's.
/// * The `T` in `Option<T>` can inhabit any representation except 0's, since
/// its underlying representation is `NonZero*`.
pub fn as_option_slice<T: Marker>(ids: &[Id<T>]) -> &[Option<Id<T>>] {
// SAFETY: Any Id<T> is repr(transparent) over `Option<RawId>`, since both
// are backed by non-zero types.
unsafe { std::slice::from_raw_parts(ids.as_ptr().cast(), ids.len()) }
}
/// An identifier for a wgpu object.
///
/// An `Id<T>` value identifies a value stored in a [`Global`]'s [`Hub`].

1
third_party/rust/wgpu-core/src/lock/rank.rs поставляемый
Просмотреть файл

@ -143,7 +143,6 @@ define_lock_ranks! {
rank RENDER_BUNDLE_SCOPE_QUERY_SETS "RenderBundleScope::query_sets" followed by { }
rank RESOURCE_POOL_INNER "ResourcePool::inner" followed by { }
rank SHARED_TRACKER_INDEX_ALLOCATOR_INNER "SharedTrackerIndexAllocator::inner" followed by { }
rank STAGING_BUFFER_RAW "StagingBuffer::raw" followed by { }
rank STATELESS_BIND_GROUP_STATE_RESOURCES "StatelessBindGroupState::resources" followed by { }
rank SURFACE_PRESENTATION "Surface::presentation" followed by { }
rank TEXTURE_BIND_GROUPS "Texture::bind_groups" followed by { }

2
third_party/rust/wgpu-core/src/pipeline.rs поставляемый
Просмотреть файл

@ -186,6 +186,8 @@ pub type ImplicitBindGroupCount = u8;
#[derive(Clone, Debug, Error)]
#[non_exhaustive]
pub enum ImplicitLayoutError {
#[error("The implicit_pipeline_ids arg is required")]
MissingImplicitPipelineIds,
#[error("Missing IDs for deriving {0} bind groups")]
MissingIds(ImplicitBindGroupCount),
#[error("Unable to reflect the shader {0:?} interface")]

195
third_party/rust/wgpu-core/src/resource.rs поставляемый
Просмотреть файл

@ -24,13 +24,11 @@ use thiserror::Error;
use std::{
borrow::Borrow,
fmt::Debug,
iter, mem,
iter,
mem::{self, ManuallyDrop},
ops::Range,
ptr::NonNull,
sync::{
atomic::{AtomicUsize, Ordering},
Arc, Weak,
},
sync::{atomic::Ordering, Arc, Weak},
};
/// Information about the wgpu-core resource.
@ -63,7 +61,7 @@ pub(crate) struct TrackingData {
/// sequentially. Thus, when a queue submission completes, we know any
/// resources used in that submission and any lower-numbered submissions are
/// no longer in use by the GPU.
submission_index: AtomicUsize,
submission_index: hal::AtomicFenceValue,
}
impl Drop for TrackingData {
@ -77,7 +75,7 @@ impl TrackingData {
Self {
tracker_index: tracker_indices.alloc(),
tracker_indices,
submission_index: AtomicUsize::new(0),
submission_index: hal::AtomicFenceValue::new(0),
}
}
@ -88,12 +86,11 @@ impl TrackingData {
/// Record that this resource will be used by the queue submission with the
/// given index.
pub(crate) fn use_at(&self, submit_index: SubmissionIndex) {
self.submission_index
.store(submit_index as _, Ordering::Release);
self.submission_index.store(submit_index, Ordering::Release);
}
pub(crate) fn submission_index(&self) -> SubmissionIndex {
self.submission_index.load(Ordering::Acquire) as _
self.submission_index.load(Ordering::Acquire)
}
}
@ -255,10 +252,7 @@ pub enum BufferMapAsyncStatus {
#[derive(Debug)]
pub(crate) enum BufferMapState<A: HalApi> {
/// Mapped at creation.
Init {
staging_buffer: StagingBuffer<A>,
ptr: NonNull<u8>,
},
Init { staging_buffer: StagingBuffer<A> },
/// Waiting for GPU to be done before mapping
Waiting(BufferPendingMapping<A>),
/// Mapped
@ -650,15 +644,10 @@ impl<A: HalApi> Buffer<A> {
let raw_buf = self.try_raw(&snatch_guard)?;
log::debug!("{} map state -> Idle", self.error_ident());
match mem::replace(&mut *self.map_state.lock(), BufferMapState::Idle) {
BufferMapState::Init {
staging_buffer,
ptr,
} => {
BufferMapState::Init { staging_buffer } => {
#[cfg(feature = "trace")]
if let Some(ref mut trace) = *device.trace.lock() {
let data = trace.make_binary("bin", unsafe {
std::slice::from_raw_parts(ptr.as_ptr(), self.size as usize)
});
let data = trace.make_binary("bin", staging_buffer.get_data());
trace.add(trace::Action::WriteBuffer {
id: buffer_id,
data,
@ -666,17 +655,11 @@ impl<A: HalApi> Buffer<A> {
queued: true,
});
}
let _ = ptr;
let raw_staging_buffer_guard = staging_buffer.raw.lock();
let raw_staging_buffer = raw_staging_buffer_guard.as_ref().unwrap();
if !staging_buffer.is_coherent {
unsafe {
device
.raw()
.flush_mapped_ranges(raw_staging_buffer, iter::once(0..self.size));
}
}
let mut pending_writes = device.pending_writes.lock();
let pending_writes = pending_writes.as_mut().unwrap();
let staging_buffer = staging_buffer.flush();
self.use_at(device.active_submission_index.load(Ordering::Relaxed) + 1);
let region = wgt::BufferSize::new(self.size).map(|size| hal::BufferCopy {
@ -685,15 +668,13 @@ impl<A: HalApi> Buffer<A> {
size,
});
let transition_src = hal::BufferBarrier {
buffer: raw_staging_buffer,
buffer: staging_buffer.raw(),
usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC,
};
let transition_dst = hal::BufferBarrier {
buffer: raw_buf,
usage: hal::BufferUses::empty()..hal::BufferUses::COPY_DST,
};
let mut pending_writes = device.pending_writes.lock();
let pending_writes = pending_writes.as_mut().unwrap();
let encoder = pending_writes.activate();
unsafe {
encoder.transition_buffers(
@ -701,14 +682,13 @@ impl<A: HalApi> Buffer<A> {
);
if self.size > 0 {
encoder.copy_buffer_to_buffer(
raw_staging_buffer,
staging_buffer.raw(),
raw_buf,
region.into_iter(),
);
}
}
drop(raw_staging_buffer_guard);
pending_writes.consume_temp(queue::TempResource::StagingBuffer(staging_buffer));
pending_writes.consume(staging_buffer);
pending_writes.insert_buffer(self);
}
BufferMapState::Idle => {
@ -734,12 +714,7 @@ impl<A: HalApi> Buffer<A> {
}
let _ = (ptr, range);
}
unsafe {
device
.raw()
.unmap_buffer(raw_buf)
.map_err(DeviceError::from)?
};
unsafe { device.raw().unmap_buffer(raw_buf) };
}
}
Ok(None)
@ -844,6 +819,11 @@ impl<A: HalApi> Drop for DestroyedBuffer<A> {
}
}
#[cfg(send_sync)]
unsafe impl<A: HalApi> Send for StagingBuffer<A> {}
#[cfg(send_sync)]
unsafe impl<A: HalApi> Sync for StagingBuffer<A> {}
/// A temporary buffer, consumed by the command that uses it.
///
/// A [`StagingBuffer`] is designed for one-shot uploads of data to the GPU. It
@ -865,33 +845,128 @@ impl<A: HalApi> Drop for DestroyedBuffer<A> {
/// [`Device::pending_writes`]: crate::device::Device
#[derive(Debug)]
pub struct StagingBuffer<A: HalApi> {
pub(crate) raw: Mutex<Option<A::Buffer>>,
pub(crate) device: Arc<Device<A>>,
raw: A::Buffer,
device: Arc<Device<A>>,
pub(crate) size: wgt::BufferSize,
pub(crate) is_coherent: bool,
is_coherent: bool,
ptr: NonNull<u8>,
}
impl<A: HalApi> Drop for StagingBuffer<A> {
fn drop(&mut self) {
if let Some(raw) = self.raw.lock().take() {
resource_log!("Destroy raw {}", self.error_ident());
unsafe {
use hal::Device;
self.device.raw().destroy_buffer(raw);
}
impl<A: HalApi> StagingBuffer<A> {
pub(crate) fn new(device: &Arc<Device<A>>, size: wgt::BufferSize) -> Result<Self, DeviceError> {
use hal::Device;
profiling::scope!("StagingBuffer::new");
let stage_desc = hal::BufferDescriptor {
label: crate::hal_label(Some("(wgpu internal) Staging"), device.instance_flags),
size: size.get(),
usage: hal::BufferUses::MAP_WRITE | hal::BufferUses::COPY_SRC,
memory_flags: hal::MemoryFlags::TRANSIENT,
};
let raw = unsafe { device.raw().create_buffer(&stage_desc)? };
let mapping = unsafe { device.raw().map_buffer(&raw, 0..size.get()) }?;
let staging_buffer = StagingBuffer {
raw,
device: device.clone(),
size,
is_coherent: mapping.is_coherent,
ptr: mapping.ptr,
};
Ok(staging_buffer)
}
/// SAFETY: You must not call any functions of `self`
/// until you stopped using the returned pointer.
pub(crate) unsafe fn ptr(&self) -> NonNull<u8> {
self.ptr
}
#[cfg(feature = "trace")]
pub(crate) fn get_data(&self) -> &[u8] {
unsafe { std::slice::from_raw_parts(self.ptr.as_ptr(), self.size.get() as usize) }
}
pub(crate) fn write_zeros(&mut self) {
unsafe { core::ptr::write_bytes(self.ptr.as_ptr(), 0, self.size.get() as usize) };
}
pub(crate) fn write(&mut self, data: &[u8]) {
assert!(data.len() >= self.size.get() as usize);
// SAFETY: With the assert above, all of `copy_nonoverlapping`'s
// requirements are satisfied.
unsafe {
core::ptr::copy_nonoverlapping(
data.as_ptr(),
self.ptr.as_ptr(),
self.size.get() as usize,
);
}
}
/// SAFETY: The offsets and size must be in-bounds.
pub(crate) unsafe fn write_with_offset(
&mut self,
data: &[u8],
src_offset: isize,
dst_offset: isize,
size: usize,
) {
unsafe {
core::ptr::copy_nonoverlapping(
data.as_ptr().offset(src_offset),
self.ptr.as_ptr().offset(dst_offset),
size,
);
}
}
pub(crate) fn flush(self) -> FlushedStagingBuffer<A> {
use hal::Device;
let device = self.device.raw();
if !self.is_coherent {
unsafe { device.flush_mapped_ranges(&self.raw, iter::once(0..self.size.get())) };
}
unsafe { device.unmap_buffer(&self.raw) };
let StagingBuffer {
raw, device, size, ..
} = self;
FlushedStagingBuffer {
raw: ManuallyDrop::new(raw),
device,
size,
}
}
}
crate::impl_resource_type!(StagingBuffer);
// TODO: add label
impl<A: HalApi> Labeled for StagingBuffer<A> {
fn label(&self) -> &str {
""
crate::impl_storage_item!(StagingBuffer);
#[derive(Debug)]
pub struct FlushedStagingBuffer<A: HalApi> {
raw: ManuallyDrop<A::Buffer>,
device: Arc<Device<A>>,
pub(crate) size: wgt::BufferSize,
}
impl<A: HalApi> FlushedStagingBuffer<A> {
pub(crate) fn raw(&self) -> &A::Buffer {
&self.raw
}
}
impl<A: HalApi> Drop for FlushedStagingBuffer<A> {
fn drop(&mut self) {
use hal::Device;
resource_log!("Destroy raw StagingBuffer");
// SAFETY: We are in the Drop impl and we don't use self.raw anymore after this point.
let raw = unsafe { ManuallyDrop::take(&mut self.raw) };
unsafe { self.device.raw().destroy_buffer(raw) };
}
}
crate::impl_parent_device!(StagingBuffer);
crate::impl_storage_item!(StagingBuffer);
pub type TextureDescriptor<'a> = wgt::TextureDescriptor<Label<'a>, Vec<wgt::TextureFormat>>;

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

4
third_party/rust/wgpu-hal/Cargo.toml поставляемый
Просмотреть файл

@ -81,7 +81,7 @@ package = "wgpu-types"
[dev-dependencies]
cfg-if = "1"
env_logger = "0.11"
glam = "0.27.0"
glam = "0.28"
[dev-dependencies.naga]
version = "0.20.0"
@ -165,7 +165,7 @@ version = "0.1"
optional = true
[target."cfg(any(target_os=\"macos\", target_os=\"ios\"))".dependencies.metal]
version = "0.28.0"
version = "0.29.0"
[target."cfg(not(target_arch = \"wasm32\"))".dependencies.ash]
version = "0.38.0"

Просмотреть файл

@ -301,7 +301,7 @@ impl<A: hal::Api> Example<A> {
mapping.ptr.as_ptr(),
texture_data.len(),
);
device.unmap_buffer(&staging_buffer).unwrap();
device.unmap_buffer(&staging_buffer);
assert!(mapping.is_coherent);
}
@ -410,7 +410,7 @@ impl<A: hal::Api> Example<A> {
mapping.ptr.as_ptr(),
mem::size_of::<Globals>(),
);
device.unmap_buffer(&buffer).unwrap();
device.unmap_buffer(&buffer);
assert!(mapping.is_coherent);
buffer
};
@ -647,7 +647,7 @@ impl<A: hal::Api> Example<A> {
size,
);
assert!(mapping.is_coherent);
self.device.unmap_buffer(&self.local_buffer).unwrap();
self.device.unmap_buffer(&self.local_buffer);
}
}
@ -814,6 +814,8 @@ fn main() {
let example_result = Example::<Api>::init(&window);
let mut example = Some(example_result.expect("Selected backend is not supported"));
println!("Press space to spawn bunnies.");
let mut last_frame_inst = Instant::now();
let (mut frame_count, mut accum_time) = (0, 0.0);

Просмотреть файл

@ -413,7 +413,7 @@ impl<A: hal::Api> Example<A> {
mapping.ptr.as_ptr(),
vertices_size_in_bytes,
);
device.unmap_buffer(&vertices_buffer).unwrap();
device.unmap_buffer(&vertices_buffer);
assert!(mapping.is_coherent);
vertices_buffer
@ -438,7 +438,7 @@ impl<A: hal::Api> Example<A> {
mapping.ptr.as_ptr(),
indices_size_in_bytes,
);
device.unmap_buffer(&indices_buffer).unwrap();
device.unmap_buffer(&indices_buffer);
assert!(mapping.is_coherent);
indices_buffer
@ -537,7 +537,7 @@ impl<A: hal::Api> Example<A> {
mapping.ptr.as_ptr(),
uniforms_size,
);
device.unmap_buffer(&uniform_buffer).unwrap();
device.unmap_buffer(&uniform_buffer);
assert!(mapping.is_coherent);
uniform_buffer
};
@ -680,7 +680,7 @@ impl<A: hal::Api> Example<A> {
mapping.ptr.as_ptr(),
instances_buffer_size,
);
device.unmap_buffer(&instances_buffer).unwrap();
device.unmap_buffer(&instances_buffer);
assert!(mapping.is_coherent);
instances_buffer
@ -848,7 +848,7 @@ impl<A: hal::Api> Example<A> {
mapping.ptr.as_ptr(),
instances_buffer_size,
);
self.device.unmap_buffer(&self.instances_buffer).unwrap();
self.device.unmap_buffer(&self.instances_buffer);
assert!(mapping.is_coherent);
}

Просмотреть файл

@ -437,9 +437,8 @@ impl crate::Device for super::Device {
})
}
unsafe fn unmap_buffer(&self, buffer: &super::Buffer) -> Result<(), DeviceError> {
unsafe fn unmap_buffer(&self, buffer: &super::Buffer) {
unsafe { (*buffer.resource).Unmap(0, ptr::null()) };
Ok(())
}
unsafe fn flush_mapped_ranges<I>(&self, _buffer: &super::Buffer, _ranges: I) {}

4
third_party/rust/wgpu-hal/src/empty.rs поставляемый
Просмотреть файл

@ -151,9 +151,7 @@ impl crate::Device for Context {
) -> DeviceResult<crate::BufferMapping> {
Err(crate::DeviceError::Lost)
}
unsafe fn unmap_buffer(&self, buffer: &Resource) -> DeviceResult<()> {
Ok(())
}
unsafe fn unmap_buffer(&self, buffer: &Resource) {}
unsafe fn flush_mapped_ranges<I>(&self, buffer: &Resource, ranges: I) {}
unsafe fn invalidate_mapped_ranges<I>(&self, buffer: &Resource, ranges: I) {}

Просмотреть файл

@ -691,7 +691,7 @@ impl crate::Device for super::Device {
is_coherent,
})
}
unsafe fn unmap_buffer(&self, buffer: &super::Buffer) -> Result<(), crate::DeviceError> {
unsafe fn unmap_buffer(&self, buffer: &super::Buffer) {
if let Some(raw) = buffer.raw {
if buffer.data.is_none() {
let gl = &self.shared.context.lock();
@ -700,7 +700,6 @@ impl crate::Device for super::Device {
unsafe { gl.bind_buffer(buffer.target, None) };
}
}
Ok(())
}
unsafe fn flush_mapped_ranges<I>(&self, buffer: &super::Buffer, ranges: I)
where

12
third_party/rust/wgpu-hal/src/lib.rs поставляемый
Просмотреть файл

@ -294,6 +294,7 @@ pub const QUERY_SIZE: wgt::BufferAddress = 8;
pub type Label<'a> = Option<&'a str>;
pub type MemoryRange = Range<wgt::BufferAddress>;
pub type FenceValue = u64;
pub type AtomicFenceValue = std::sync::atomic::AtomicU64;
/// Drop guard to signal wgpu-hal is no longer using an externally created object.
pub type DropGuard = Box<dyn std::any::Any + Send + Sync>;
@ -714,9 +715,13 @@ pub trait Device: WasmNotSendSync {
/// be ordered, so it is meaningful to talk about what must occur
/// "between" them.
///
/// - Zero-sized mappings are not allowed.
///
/// - The returned [`BufferMapping::ptr`] must not be used after a call to
/// [`Device::unmap_buffer`].
///
/// [`MAP_READ`]: BufferUses::MAP_READ
/// [`MAP_WRITE`]: BufferUses::MAP_WRITE
//TODO: clarify if zero-sized mapping is allowed
unsafe fn map_buffer(
&self,
buffer: &<Self::A as Api>::Buffer,
@ -728,7 +733,7 @@ pub trait Device: WasmNotSendSync {
/// # Safety
///
/// - The given `buffer` must be currently mapped.
unsafe fn unmap_buffer(&self, buffer: &<Self::A as Api>::Buffer) -> Result<(), DeviceError>;
unsafe fn unmap_buffer(&self, buffer: &<Self::A as Api>::Buffer);
/// Indicate that CPU writes to mapped buffer memory should be made visible to the GPU.
///
@ -951,6 +956,9 @@ pub trait Queue: WasmNotSendSync {
/// - All calls to this function that include a given [`SurfaceTexture`][st]
/// in `surface_textures` must use the same [`Fence`].
///
/// - The [`Fence`] passed as `signal_fence.0` must remain alive until
/// all submissions that will signal it have completed.
///
/// [`Fence`]: Api::Fence
/// [cb]: Api::CommandBuffer
/// [ce]: Api::CommandEncoder

Просмотреть файл

@ -370,9 +370,7 @@ impl crate::Device for super::Device {
})
}
unsafe fn unmap_buffer(&self, _buffer: &super::Buffer) -> DeviceResult<()> {
Ok(())
}
unsafe fn unmap_buffer(&self, _buffer: &super::Buffer) {}
unsafe fn flush_mapped_ranges<I>(&self, _buffer: &super::Buffer, _ranges: I) {}
unsafe fn invalidate_mapped_ranges<I>(&self, _buffer: &super::Buffer, _ranges: I) {}

Просмотреть файл

@ -428,12 +428,14 @@ impl PhysicalDeviceFeatures {
shader_atomic_int64: if device_api_version >= vk::API_VERSION_1_2
|| enabled_extensions.contains(&khr::shader_atomic_int64::NAME)
{
let needed = requested_features.intersects(
wgt::Features::SHADER_INT64_ATOMIC_ALL_OPS
| wgt::Features::SHADER_INT64_ATOMIC_MIN_MAX,
);
Some(
vk::PhysicalDeviceShaderAtomicInt64Features::default()
.shader_buffer_int64_atomics(requested_features.intersects(
wgt::Features::SHADER_INT64_ATOMIC_ALL_OPS
| wgt::Features::SHADER_INT64_ATOMIC_MIN_MAX,
)),
.shader_buffer_int64_atomics(needed)
.shader_shared_int64_atomics(needed),
)
} else {
None
@ -1231,6 +1233,17 @@ impl super::InstanceShared {
features2 = features2.push_next(next);
}
// `VK_KHR_shader_atomic_int64` is promoted to 1.2, but has no
// changes, so we can keep using the extension unconditionally.
if capabilities.device_api_version >= vk::API_VERSION_1_2
|| capabilities.supports_extension(khr::shader_atomic_int64::NAME)
{
let next = features
.shader_atomic_int64
.insert(vk::PhysicalDeviceShaderAtomicInt64Features::default());
features2 = features2.push_next(next);
}
if capabilities.supports_extension(ext::image_robustness::NAME) {
let next = features
.image_robustness

Просмотреть файл

@ -951,12 +951,10 @@ impl crate::Device for super::Device {
Err(crate::DeviceError::OutOfMemory)
}
}
unsafe fn unmap_buffer(&self, buffer: &super::Buffer) -> Result<(), crate::DeviceError> {
unsafe fn unmap_buffer(&self, buffer: &super::Buffer) {
// We can only unmap the buffer if it was already mapped successfully.
if let Some(ref block) = buffer.block {
unsafe { block.lock().unmap(&*self.shared) };
Ok(())
} else {
Err(crate::DeviceError::OutOfMemory)
}
}

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"9e0e1fdb813669f5ae21d090c115e11cc57ca0991928633a0b009aa1b5267291","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","src/assertions.rs":"3fe98027aa73970c8ab7874a3e13dbfd6faa87df2081beb5c83aeec4c60f372f","src/counters.rs":"50184b482ae5a725c9bdedf4d21b737430eec07d3c0aaa5ca0c02617e649fc29","src/lib.rs":"9c333ed4794d1f8b184872f0fa016b87ce08472c71b7965aee18d0ce24686fda","src/math.rs":"4d03039736dd6926feb139bc68734cb59df34ede310427bbf059e5c925e0af3b"},"package":null}
{"files":{"Cargo.toml":"38e52f274586168e438609dce611fbc639397c5860a0766cf2c1fdf9873a05cb","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","src/assertions.rs":"3fe98027aa73970c8ab7874a3e13dbfd6faa87df2081beb5c83aeec4c60f372f","src/counters.rs":"50184b482ae5a725c9bdedf4d21b737430eec07d3c0aaa5ca0c02617e649fc29","src/lib.rs":"9c333ed4794d1f8b184872f0fa016b87ce08472c71b7965aee18d0ce24686fda","src/math.rs":"4d03039736dd6926feb139bc68734cb59df34ede310427bbf059e5c925e0af3b"},"package":null}

2
third_party/rust/wgpu-types/Cargo.toml поставляемый
Просмотреть файл

@ -45,7 +45,7 @@ features = ["derive"]
optional = true
[dev-dependencies]
serde_json = "1.0.119"
serde_json = "1.0.120"
[dev-dependencies.serde]
version = "1"