From e8a60d2eabf33bae9caa62fe9d3b95f636bdaca5 Mon Sep 17 00:00:00 2001 From: Dzmitry Malyshau Date: Mon, 13 Apr 2020 13:42:15 +0000 Subject: [PATCH] Bug 1624174 - Update wgpu to get the coordinate spaces right r=jgilbert The updated wgpu has the coordinate space fixes. Depends on - https://phabricator.services.mozilla.com/D70421 - https://phabricator.services.mozilla.com/D70432 - https://phabricator.services.mozilla.com/D70646 Differential Revision: https://phabricator.services.mozilla.com/D70140 --HG-- rename : third_party/rust/rendy-descriptor/Cargo.toml => third_party/rust/gfx-descriptor/Cargo.toml rename : third_party/rust/rendy-memory/Cargo.toml => third_party/rust/gfx-memory/Cargo.toml rename : third_party/rust/rendy-memory/src/allocator/dynamic.rs => third_party/rust/gfx-memory/src/allocator/general.rs rename : third_party/rust/rendy-memory/src/heaps/heap.rs => third_party/rust/gfx-memory/src/heaps/heap.rs rename : third_party/rust/rendy-memory/src/utilization.rs => third_party/rust/gfx-memory/src/stats.rs extra : moz-landing-system : lando --- .cargo/config.in | 7 +- Cargo.lock | 214 +- Cargo.toml | 2 +- dom/webgpu/RenderPassEncoder.cpp | 2 +- dom/webgpu/ipc/PWebGPU.ipdl | 4 +- dom/webgpu/ipc/WebGPUChild.cpp | 26 +- dom/webgpu/ipc/WebGPUParent.cpp | 22 +- dom/webgpu/ipc/WebGPUParent.h | 5 +- dom/webgpu/ipc/WebGPUSerialize.h | 7 +- dom/webgpu/ipc/WebGPUTypes.h | 11 + gfx/wgpu/Cargo.lock | 227 +- gfx/wgpu/README.md | 4 +- gfx/wgpu/examples/compute/main.c | 8 +- gfx/wgpu/examples/triangle/main.c | 99 +- gfx/wgpu/ffi/wgpu-remote.h | 1396 +- gfx/wgpu/ffi/wgpu.h | 82 +- gfx/wgpu/wgpu-core/Cargo.toml | 26 +- gfx/wgpu/wgpu-core/src/binding_model.rs | 17 +- gfx/wgpu/wgpu-core/src/command/allocator.rs | 7 +- gfx/wgpu/wgpu-core/src/command/compute.rs | 37 +- gfx/wgpu/wgpu-core/src/command/mod.rs | 59 +- gfx/wgpu/wgpu-core/src/command/render.rs | 142 +- gfx/wgpu/wgpu-core/src/command/transfer.rs | 4 +- gfx/wgpu/wgpu-core/src/conv.rs | 122 +- gfx/wgpu/wgpu-core/src/device/life.rs | 118 +- gfx/wgpu/wgpu-core/src/device/mod.rs | 363 +- gfx/wgpu/wgpu-core/src/hub.rs | 27 +- gfx/wgpu/wgpu-core/src/id.rs | 42 +- gfx/wgpu/wgpu-core/src/instance.rs | 87 +- gfx/wgpu/wgpu-core/src/lib.rs | 37 +- gfx/wgpu/wgpu-core/src/pipeline.rs | 17 + gfx/wgpu/wgpu-core/src/resource.rs | 136 +- gfx/wgpu/wgpu-core/src/swap_chain.rs | 8 +- gfx/wgpu/wgpu-core/src/track/buffer.rs | 2 +- gfx/wgpu/wgpu-core/src/track/mod.rs | 20 +- gfx/wgpu/wgpu-native/cbindgen.toml | 8 +- gfx/wgpu/wgpu-native/src/command.rs | 8 +- gfx/wgpu/wgpu-native/src/device.rs | 39 +- gfx/wgpu/wgpu-remote/cbindgen.toml | 7 +- gfx/wgpu/wgpu-remote/src/identity.rs | 30 +- gfx/wgpu/wgpu-remote/src/server.rs | 43 +- gfx/wgpu/wgpu-types/Cargo.toml | 2 +- gfx/wgpu/wgpu-types/src/lib.rs | 265 +- third_party/rust/ash/.cargo-checksum.json | 2 +- third_party/rust/ash/Cargo.toml | 7 +- third_party/rust/ash/output | 0 third_party/rust/ash/src/device.rs | 527 +- third_party/rust/ash/src/entry.rs | 105 +- .../ash/src/extensions/experimental/amd.rs | 4 +- .../ash/src/extensions/ext/debug_marker.rs | 22 +- .../ash/src/extensions/ext/debug_report.rs | 22 +- .../ash/src/extensions/ext/debug_utils.rs | 38 +- .../ash/src/extensions/khr/android_surface.rs | 20 +- .../rust/ash/src/extensions/khr/display.rs | 203 + .../src/extensions/khr/display_swapchain.rs | 20 +- .../src/extensions/khr/external_memory_fd.rs | 66 + .../rust/ash/src/extensions/khr/mod.rs | 10 + .../ash/src/extensions/khr/push_descriptor.rs | 75 + .../ash/src/extensions/khr/ray_tracing.rs | 369 + .../rust/ash/src/extensions/khr/surface.rs | 42 +- .../rust/ash/src/extensions/khr/swapchain.rs | 32 +- .../src/extensions/khr/timeline_semaphore.rs | 87 + .../ash/src/extensions/khr/wayland_surface.rs | 38 +- .../ash/src/extensions/khr/win32_surface.rs | 36 +- .../ash/src/extensions/khr/xcb_surface.rs | 40 +- .../ash/src/extensions/khr/xlib_surface.rs | 40 +- .../ash/src/extensions/mvk/ios_surface.rs | 20 +- .../ash/src/extensions/mvk/macos_surface.rs | 20 +- .../rust/ash/src/extensions/nv/mesh_shader.rs | 14 +- .../rust/ash/src/extensions/nv/ray_tracing.rs | 46 +- third_party/rust/ash/src/instance.rs | 106 +- third_party/rust/ash/src/lib.rs | 19 +- third_party/rust/ash/src/prelude.rs | 2 +- third_party/rust/ash/src/util.rs | 8 +- third_party/rust/ash/src/version.rs | 6 +- third_party/rust/ash/src/vk.rs | 29374 +++++++++++++--- .../rust/ash/tests/constant_size_arrays.rs | 15 +- third_party/rust/ash/tests/display.rs | 1 - .../rust/colorful/.cargo-checksum.json | 1 - third_party/rust/colorful/Cargo.toml | 26 - third_party/rust/colorful/CodeOfConduct.md | 74 - third_party/rust/colorful/README.md | 196 - third_party/rust/colorful/license | 9 - third_party/rust/colorful/rustfmt.toml | 5 - .../rust/colorful/src/core/color_string.rs | 134 - third_party/rust/colorful/src/core/colors.rs | 847 - third_party/rust/colorful/src/core/hsl.rs | 105 - third_party/rust/colorful/src/core/mod.rs | 36 - third_party/rust/colorful/src/core/rgb.rs | 90 - third_party/rust/colorful/src/core/style.rs | 24 - third_party/rust/colorful/src/core/symbols.rs | 55 - third_party/rust/colorful/src/lib.rs | 316 - .../rust/colorful/tests/test_all_color.rs | 280 - .../rust/colorful/tests/test_animation.rs | 31 - third_party/rust/colorful/tests/test_basic.rs | 53 - third_party/rust/colorful/tests/test_extra.rs | 10 - .../rust/colorful/tests/test_gradient.rs | 16 - third_party/rust/colorful/tests/test_hsl.rs | 10 - .../rust/gfx-auxil/.cargo-checksum.json | 2 +- third_party/rust/gfx-auxil/Cargo.toml | 7 +- third_party/rust/gfx-auxil/src/lib.rs | 105 +- .../gfx-backend-dx11/.cargo-checksum.json | 2 +- third_party/rust/gfx-backend-dx11/Cargo.toml | 15 +- third_party/rust/gfx-backend-dx11/README.md | 26 +- .../rust/gfx-backend-dx11/shaders/blit.hlsl | 126 +- .../rust/gfx-backend-dx11/shaders/clear.hlsl | 44 +- .../rust/gfx-backend-dx11/shaders/copy.hlsl | 1034 +- third_party/rust/gfx-backend-dx11/src/conv.rs | 1651 +- .../rust/gfx-backend-dx11/src/debug.rs | 186 +- .../rust/gfx-backend-dx11/src/device.rs | 4701 +-- third_party/rust/gfx-backend-dx11/src/dxgi.rs | 437 +- .../rust/gfx-backend-dx11/src/internal.rs | 2620 +- third_party/rust/gfx-backend-dx11/src/lib.rs | 6869 ++-- .../rust/gfx-backend-dx11/src/shader.rs | 588 +- .../gfx-backend-dx12/.cargo-checksum.json | 2 +- third_party/rust/gfx-backend-dx12/Cargo.toml | 24 +- .../rust/gfx-backend-dx12/src/command.rs | 131 +- third_party/rust/gfx-backend-dx12/src/conv.rs | 92 +- .../gfx-backend-dx12/src/descriptors_cpu.rs | 11 +- .../rust/gfx-backend-dx12/src/device.rs | 690 +- .../rust/gfx-backend-dx12/src/internal.rs | 28 +- third_party/rust/gfx-backend-dx12/src/lib.rs | 98 +- third_party/rust/gfx-backend-dx12/src/pool.rs | 7 +- .../rust/gfx-backend-dx12/src/resource.rs | 118 +- .../gfx-backend-dx12/src/root_constants.rs | 4 +- .../rust/gfx-backend-dx12/src/window.rs | 23 +- .../gfx-backend-empty/.cargo-checksum.json | 2 +- third_party/rust/gfx-backend-empty/Cargo.toml | 4 +- third_party/rust/gfx-backend-empty/src/lib.rs | 2043 +- .../gfx-backend-metal/.cargo-checksum.json | 2 +- third_party/rust/gfx-backend-metal/Cargo.toml | 13 +- .../rust/gfx-backend-metal/src/command.rs | 115 +- .../rust/gfx-backend-metal/src/conversions.rs | 11 +- .../rust/gfx-backend-metal/src/device.rs | 284 +- .../rust/gfx-backend-metal/src/internal.rs | 9 +- third_party/rust/gfx-backend-metal/src/lib.rs | 39 +- .../rust/gfx-backend-metal/src/native.rs | 62 +- .../rust/gfx-backend-metal/src/soft.rs | 1 - .../rust/gfx-backend-metal/src/window.rs | 15 +- .../gfx-backend-vulkan/.cargo-checksum.json | 2 +- .../rust/gfx-backend-vulkan/Cargo.toml | 10 +- third_party/rust/gfx-backend-vulkan/README.md | 26 +- .../rust/gfx-backend-vulkan/src/command.rs | 1974 +- .../rust/gfx-backend-vulkan/src/conv.rs | 1196 +- .../rust/gfx-backend-vulkan/src/device.rs | 4611 ++- .../rust/gfx-backend-vulkan/src/info.rs | 10 +- .../rust/gfx-backend-vulkan/src/lib.rs | 2946 +- .../rust/gfx-backend-vulkan/src/native.rs | 360 +- .../rust/gfx-backend-vulkan/src/pool.rs | 124 +- .../rust/gfx-backend-vulkan/src/window.rs | 1209 +- .../rust/gfx-descriptor/.cargo-checksum.json | 1 + .../Cargo.toml | 29 +- .../rust/gfx-descriptor/src/allocator.rs | 364 + third_party/rust/gfx-descriptor/src/counts.rs | 241 + third_party/rust/gfx-descriptor/src/lib.rs | 4 + third_party/rust/gfx-hal/.cargo-checksum.json | 2 +- third_party/rust/gfx-hal/Cargo.toml | 7 +- third_party/rust/gfx-hal/src/adapter.rs | 316 +- third_party/rust/gfx-hal/src/buffer.rs | 382 +- third_party/rust/gfx-hal/src/command/clear.rs | 140 +- third_party/rust/gfx-hal/src/command/mod.rs | 1132 +- .../rust/gfx-hal/src/command/structs.rs | 172 +- third_party/rust/gfx-hal/src/device.rs | 1900 +- third_party/rust/gfx-hal/src/format.rs | 1247 +- third_party/rust/gfx-hal/src/image.rs | 1390 +- third_party/rust/gfx-hal/src/lib.rs | 943 +- third_party/rust/gfx-hal/src/memory.rs | 222 +- third_party/rust/gfx-hal/src/pass.rs | 365 +- third_party/rust/gfx-hal/src/pool.rs | 90 +- third_party/rust/gfx-hal/src/pso/compute.rs | 62 +- .../rust/gfx-hal/src/pso/descriptor.rs | 561 +- third_party/rust/gfx-hal/src/pso/graphics.rs | 584 +- .../rust/gfx-hal/src/pso/input_assembler.rs | 291 +- third_party/rust/gfx-hal/src/pso/mod.rs | 612 +- .../rust/gfx-hal/src/pso/output_merger.rs | 721 +- .../rust/gfx-hal/src/pso/specialization.rs | 264 +- third_party/rust/gfx-hal/src/query.rs | 232 +- third_party/rust/gfx-hal/src/queue/family.rs | 104 +- third_party/rust/gfx-hal/src/queue/mod.rs | 298 +- third_party/rust/gfx-hal/src/range.rs | 59 - third_party/rust/gfx-hal/src/window.rs | 1248 +- .../rust/gfx-memory/.cargo-checksum.json | 1 + .../{rendy-memory => gfx-memory}/Cargo.toml | 36 +- .../gfx-memory/src/allocator/dedicated.rs | 171 + .../src/allocator/general.rs} | 1317 +- .../rust/gfx-memory/src/allocator/linear.rs | 277 + .../rust/gfx-memory/src/allocator/mod.rs | 79 + third_party/rust/gfx-memory/src/block.rs | 25 + .../src/heaps/heap.rs | 19 +- .../rust/gfx-memory/src/heaps/memory_type.rs | 131 + third_party/rust/gfx-memory/src/heaps/mod.rs | 277 + third_party/rust/gfx-memory/src/lib.rs | 75 + third_party/rust/gfx-memory/src/mapping.rs | 203 + third_party/rust/gfx-memory/src/memory.rs | 63 + .../src/stats.rs} | 277 +- third_party/rust/gfx-memory/src/usage.rs | 64 + .../rust/libloading/.cargo-checksum.json | 2 +- third_party/rust/libloading/Cargo.toml | 4 +- third_party/rust/libloading/build.rs | 1 + third_party/rust/libloading/src/changelog.rs | 14 +- third_party/rust/libloading/src/lib.rs | 2 +- .../rust/libloading/src/os/unix/mod.rs | 32 + .../rust/libloading/src/os/windows/mod.rs | 28 +- third_party/rust/libloading/src/util.rs | 2 +- .../peek-poke-derive/.cargo-checksum.json | 2 +- third_party/rust/peek-poke-derive/COPYRIGHT | 14 - third_party/rust/peek-poke-derive/Cargo.toml | 39 +- third_party/rust/peek-poke-derive/README.md | 3 +- third_party/rust/peek-poke-derive/src/lib.rs | 281 +- .../peek-poke-derive/src/max_size_expr.rs | 87 - .../peek-poke-derive/src/peek_from_expr.rs | 206 - .../rust/peek-poke-derive/src/peek_poke.rs | 154 - .../peek-poke-derive/src/poke_into_expr.rs | 127 - .../rust/peek-poke/.cargo-checksum.json | 2 +- third_party/rust/peek-poke/COPYRIGHT | 14 - third_party/rust/peek-poke/Cargo.toml | 42 +- third_party/rust/peek-poke/README.md | 3 +- .../rust/peek-poke/benches/versus_bincode.rs | 186 - .../rust/peek-poke/examples/webrender.rs | 111 - third_party/rust/peek-poke/src/euclid.rs | 170 + third_party/rust/peek-poke/src/lib.rs | 210 +- third_party/rust/peek-poke/src/slice_ext.rs | 19 + third_party/rust/peek-poke/src/vec_ext.rs | 26 + third_party/rust/peek-poke/tests/max_size.rs | 17 +- .../rust/peek-poke/tests/round_trip.rs | 42 +- .../rust/relevant/.cargo-checksum.json | 1 - third_party/rust/relevant/Cargo.toml | 42 - third_party/rust/relevant/LICENSE-APACHE | 201 - third_party/rust/relevant/LICENSE-MIT | 25 - third_party/rust/relevant/README.md | 54 - third_party/rust/relevant/src/lib.rs | 89 - .../rendy-descriptor/.cargo-checksum.json | 1 - .../rust/rendy-descriptor/src/allocator.rs | 398 - third_party/rust/rendy-descriptor/src/lib.rs | 4 - .../rust/rendy-descriptor/src/ranges.rs | 187 - .../rust/rendy-memory/.cargo-checksum.json | 1 - .../rendy-memory/src/allocator/dedicated.rs | 188 - .../rust/rendy-memory/src/allocator/linear.rs | 325 - .../rust/rendy-memory/src/allocator/mod.rs | 50 - third_party/rust/rendy-memory/src/block.rs | 36 - .../rendy-memory/src/heaps/memory_type.rs | 157 - .../rust/rendy-memory/src/heaps/mod.rs | 324 - third_party/rust/rendy-memory/src/lib.rs | 31 - .../rust/rendy-memory/src/mapping/mod.rs | 288 - .../rust/rendy-memory/src/mapping/range.rs | 101 - .../rust/rendy-memory/src/mapping/write.rs | 73 - third_party/rust/rendy-memory/src/memory.rs | 82 - third_party/rust/rendy-memory/src/usage.rs | 210 - third_party/rust/rendy-memory/src/util.rs | 125 - .../rust/shared_library/.cargo-checksum.json | 1 - third_party/rust/shared_library/Cargo.toml | 24 - .../rust/shared_library/LICENSE-APACHE | 201 - third_party/rust/shared_library/LICENSE-MIT | 25 - .../shared_library/src/dynamic_library.rs | 410 - third_party/rust/shared_library/src/lib.rs | 175 - .../rust/smallvec-0.6.10/.cargo-checksum.json | 1 - third_party/rust/smallvec-0.6.10/Cargo.toml | 39 - .../rust/smallvec-0.6.10/LICENSE-APACHE | 201 - third_party/rust/smallvec-0.6.10/LICENSE-MIT | 25 - third_party/rust/smallvec-0.6.10/README.md | 8 - .../rust/smallvec-0.6.10/benches/bench.rs | 295 - third_party/rust/smallvec-0.6.10/lib.rs | 2360 -- .../rust/smallvec/.cargo-checksum.json | 2 +- third_party/rust/smallvec/Cargo.toml | 3 +- third_party/rust/smallvec/lib.rs | 67 +- .../spirv-cross-internal/.cargo-checksum.json | 2 +- .../rust/spirv-cross-internal/Cargo.toml | 4 + .../rust/spirv-cross-internal/build.rs | 8 +- .../src/bindings_native.rs | 3450 +- .../spirv-cross-internal/src/bindings_wasm.rs | 3030 +- .../src/bindings_wasm_functions.rs | 37 + .../rust/spirv-cross-internal/src/compiler.rs | 20 +- .../rust/spirv-cross-internal/src/msl.rs | 103 +- .../rust/spirv-cross-internal/src/spirv.rs | 10 +- .../src/vendor/SPIRV-Cross/.travis.yml | 72 + .../src/vendor/SPIRV-Cross/CMakeLists.txt | 22 +- .../src/vendor/SPIRV-Cross/CODE_OF_CONDUCT.md | 1 + .../src/vendor/SPIRV-Cross/README.md | 493 + .../src/vendor/SPIRV-Cross/appveyor.yml | 31 + .../vendor/SPIRV-Cross/cmake/gitversion.in.h | 6 - .../src/vendor/SPIRV-Cross/format_all.sh | 8 - .../src/vendor/SPIRV-Cross/gn/BUILD.gn | 63 - .../src/vendor/SPIRV-Cross/main.cpp | 27 + .../src/vendor/SPIRV-Cross/spirv_cross_c.cpp | 49 + .../src/vendor/SPIRV-Cross/spirv_cross_c.h | 29 +- .../src/vendor/SPIRV-Cross/spirv_glsl.cpp | 29 +- .../src/vendor/SPIRV-Cross/spirv_glsl.hpp | 3 +- .../src/vendor/SPIRV-Cross/spirv_hlsl.cpp | 81 +- .../src/vendor/SPIRV-Cross/spirv_hlsl.hpp | 32 +- .../src/vendor/SPIRV-Cross/spirv_msl.cpp | 16 +- .../rust/spirv-cross-internal/src/wrapper.cpp | 29 +- .../rust/spirv-cross-internal/src/wrapper.hpp | 6 +- .../tests/shaders/multiple_entry_points.cl | 2 + .../shaders/multiple_entry_points.cl.spv | Bin 0 -> 432 bytes .../tests/shaders/two_ubo.vert | 20 + .../tests/shaders/two_ubo.vert.spv | Bin 0 -> 1480 bytes .../spirv-cross-internal/tests/spirv_tests.rs | 70 +- .../rust/spirv_cross/.cargo-checksum.json | 2 +- third_party/rust/spirv_cross/Cargo.toml | 2 +- 299 files changed, 61867 insertions(+), 43810 deletions(-) delete mode 100644 third_party/rust/ash/output mode change 100644 => 100755 third_party/rust/ash/src/extensions/ext/debug_marker.rs mode change 100644 => 100755 third_party/rust/ash/src/extensions/ext/debug_report.rs mode change 100644 => 100755 third_party/rust/ash/src/extensions/ext/debug_utils.rs mode change 100644 => 100755 third_party/rust/ash/src/extensions/khr/android_surface.rs create mode 100755 third_party/rust/ash/src/extensions/khr/display.rs mode change 100644 => 100755 third_party/rust/ash/src/extensions/khr/display_swapchain.rs create mode 100644 third_party/rust/ash/src/extensions/khr/external_memory_fd.rs create mode 100644 third_party/rust/ash/src/extensions/khr/push_descriptor.rs create mode 100644 third_party/rust/ash/src/extensions/khr/ray_tracing.rs mode change 100644 => 100755 third_party/rust/ash/src/extensions/khr/surface.rs mode change 100644 => 100755 third_party/rust/ash/src/extensions/khr/swapchain.rs create mode 100644 third_party/rust/ash/src/extensions/khr/timeline_semaphore.rs mode change 100644 => 100755 third_party/rust/ash/src/extensions/khr/wayland_surface.rs mode change 100644 => 100755 third_party/rust/ash/src/extensions/khr/win32_surface.rs mode change 100644 => 100755 third_party/rust/ash/src/extensions/khr/xcb_surface.rs mode change 100644 => 100755 third_party/rust/ash/src/extensions/khr/xlib_surface.rs mode change 100644 => 100755 third_party/rust/ash/src/extensions/mvk/ios_surface.rs mode change 100644 => 100755 third_party/rust/ash/src/extensions/mvk/macos_surface.rs mode change 100644 => 100755 third_party/rust/ash/src/extensions/nv/mesh_shader.rs mode change 100644 => 100755 third_party/rust/ash/src/extensions/nv/ray_tracing.rs delete mode 100644 third_party/rust/colorful/.cargo-checksum.json delete mode 100644 third_party/rust/colorful/Cargo.toml delete mode 100644 third_party/rust/colorful/CodeOfConduct.md delete mode 100644 third_party/rust/colorful/README.md delete mode 100644 third_party/rust/colorful/license delete mode 100644 third_party/rust/colorful/rustfmt.toml delete mode 100644 third_party/rust/colorful/src/core/color_string.rs delete mode 100644 third_party/rust/colorful/src/core/colors.rs delete mode 100644 third_party/rust/colorful/src/core/hsl.rs delete mode 100644 third_party/rust/colorful/src/core/mod.rs delete mode 100644 third_party/rust/colorful/src/core/rgb.rs delete mode 100644 third_party/rust/colorful/src/core/style.rs delete mode 100644 third_party/rust/colorful/src/core/symbols.rs delete mode 100644 third_party/rust/colorful/src/lib.rs delete mode 100644 third_party/rust/colorful/tests/test_all_color.rs delete mode 100644 third_party/rust/colorful/tests/test_animation.rs delete mode 100644 third_party/rust/colorful/tests/test_basic.rs delete mode 100644 third_party/rust/colorful/tests/test_extra.rs delete mode 100644 third_party/rust/colorful/tests/test_gradient.rs delete mode 100644 third_party/rust/colorful/tests/test_hsl.rs mode change 100755 => 100644 third_party/rust/gfx-auxil/src/lib.rs mode change 100755 => 100644 third_party/rust/gfx-backend-empty/src/lib.rs create mode 100644 third_party/rust/gfx-descriptor/.cargo-checksum.json rename third_party/rust/{rendy-descriptor => gfx-descriptor}/Cargo.toml (60%) create mode 100644 third_party/rust/gfx-descriptor/src/allocator.rs create mode 100644 third_party/rust/gfx-descriptor/src/counts.rs create mode 100644 third_party/rust/gfx-descriptor/src/lib.rs mode change 100755 => 100644 third_party/rust/gfx-hal/src/adapter.rs mode change 100755 => 100644 third_party/rust/gfx-hal/src/buffer.rs mode change 100755 => 100644 third_party/rust/gfx-hal/src/command/clear.rs mode change 100755 => 100644 third_party/rust/gfx-hal/src/command/mod.rs mode change 100755 => 100644 third_party/rust/gfx-hal/src/command/structs.rs mode change 100755 => 100644 third_party/rust/gfx-hal/src/device.rs mode change 100755 => 100644 third_party/rust/gfx-hal/src/format.rs mode change 100755 => 100644 third_party/rust/gfx-hal/src/image.rs mode change 100755 => 100644 third_party/rust/gfx-hal/src/lib.rs mode change 100755 => 100644 third_party/rust/gfx-hal/src/memory.rs mode change 100755 => 100644 third_party/rust/gfx-hal/src/pass.rs mode change 100755 => 100644 third_party/rust/gfx-hal/src/pool.rs mode change 100755 => 100644 third_party/rust/gfx-hal/src/pso/compute.rs mode change 100755 => 100644 third_party/rust/gfx-hal/src/pso/descriptor.rs mode change 100755 => 100644 third_party/rust/gfx-hal/src/pso/graphics.rs mode change 100755 => 100644 third_party/rust/gfx-hal/src/pso/input_assembler.rs mode change 100755 => 100644 third_party/rust/gfx-hal/src/pso/mod.rs mode change 100755 => 100644 third_party/rust/gfx-hal/src/pso/output_merger.rs mode change 100755 => 100644 third_party/rust/gfx-hal/src/pso/specialization.rs mode change 100755 => 100644 third_party/rust/gfx-hal/src/query.rs mode change 100755 => 100644 third_party/rust/gfx-hal/src/queue/family.rs mode change 100755 => 100644 third_party/rust/gfx-hal/src/queue/mod.rs delete mode 100755 third_party/rust/gfx-hal/src/range.rs mode change 100755 => 100644 third_party/rust/gfx-hal/src/window.rs create mode 100644 third_party/rust/gfx-memory/.cargo-checksum.json rename third_party/rust/{rendy-memory => gfx-memory}/Cargo.toml (62%) create mode 100644 third_party/rust/gfx-memory/src/allocator/dedicated.rs rename third_party/rust/{rendy-memory/src/allocator/dynamic.rs => gfx-memory/src/allocator/general.rs} (50%) create mode 100644 third_party/rust/gfx-memory/src/allocator/linear.rs create mode 100644 third_party/rust/gfx-memory/src/allocator/mod.rs create mode 100644 third_party/rust/gfx-memory/src/block.rs rename third_party/rust/{rendy-memory => gfx-memory}/src/heaps/heap.rs (70%) create mode 100644 third_party/rust/gfx-memory/src/heaps/memory_type.rs create mode 100644 third_party/rust/gfx-memory/src/heaps/mod.rs create mode 100644 third_party/rust/gfx-memory/src/lib.rs create mode 100644 third_party/rust/gfx-memory/src/mapping.rs create mode 100644 third_party/rust/gfx-memory/src/memory.rs rename third_party/rust/{rendy-memory/src/utilization.rs => gfx-memory/src/stats.rs} (89%) create mode 100644 third_party/rust/gfx-memory/src/usage.rs delete mode 100644 third_party/rust/peek-poke-derive/COPYRIGHT delete mode 100644 third_party/rust/peek-poke-derive/src/max_size_expr.rs delete mode 100644 third_party/rust/peek-poke-derive/src/peek_from_expr.rs delete mode 100644 third_party/rust/peek-poke-derive/src/peek_poke.rs delete mode 100644 third_party/rust/peek-poke-derive/src/poke_into_expr.rs delete mode 100644 third_party/rust/peek-poke/COPYRIGHT delete mode 100644 third_party/rust/peek-poke/benches/versus_bincode.rs delete mode 100644 third_party/rust/peek-poke/examples/webrender.rs create mode 100644 third_party/rust/peek-poke/src/euclid.rs create mode 100644 third_party/rust/peek-poke/src/slice_ext.rs create mode 100644 third_party/rust/peek-poke/src/vec_ext.rs delete mode 100644 third_party/rust/relevant/.cargo-checksum.json delete mode 100644 third_party/rust/relevant/Cargo.toml delete mode 100644 third_party/rust/relevant/LICENSE-APACHE delete mode 100644 third_party/rust/relevant/LICENSE-MIT delete mode 100644 third_party/rust/relevant/README.md delete mode 100644 third_party/rust/relevant/src/lib.rs delete mode 100644 third_party/rust/rendy-descriptor/.cargo-checksum.json delete mode 100644 third_party/rust/rendy-descriptor/src/allocator.rs delete mode 100644 third_party/rust/rendy-descriptor/src/lib.rs delete mode 100644 third_party/rust/rendy-descriptor/src/ranges.rs delete mode 100644 third_party/rust/rendy-memory/.cargo-checksum.json delete mode 100644 third_party/rust/rendy-memory/src/allocator/dedicated.rs delete mode 100644 third_party/rust/rendy-memory/src/allocator/linear.rs delete mode 100644 third_party/rust/rendy-memory/src/allocator/mod.rs delete mode 100644 third_party/rust/rendy-memory/src/block.rs delete mode 100644 third_party/rust/rendy-memory/src/heaps/memory_type.rs delete mode 100644 third_party/rust/rendy-memory/src/heaps/mod.rs delete mode 100644 third_party/rust/rendy-memory/src/lib.rs delete mode 100644 third_party/rust/rendy-memory/src/mapping/mod.rs delete mode 100644 third_party/rust/rendy-memory/src/mapping/range.rs delete mode 100644 third_party/rust/rendy-memory/src/mapping/write.rs delete mode 100644 third_party/rust/rendy-memory/src/memory.rs delete mode 100644 third_party/rust/rendy-memory/src/usage.rs delete mode 100644 third_party/rust/rendy-memory/src/util.rs delete mode 100644 third_party/rust/shared_library/.cargo-checksum.json delete mode 100644 third_party/rust/shared_library/Cargo.toml delete mode 100644 third_party/rust/shared_library/LICENSE-APACHE delete mode 100644 third_party/rust/shared_library/LICENSE-MIT delete mode 100644 third_party/rust/shared_library/src/dynamic_library.rs delete mode 100644 third_party/rust/shared_library/src/lib.rs delete mode 100644 third_party/rust/smallvec-0.6.10/.cargo-checksum.json delete mode 100644 third_party/rust/smallvec-0.6.10/Cargo.toml delete mode 100644 third_party/rust/smallvec-0.6.10/LICENSE-APACHE delete mode 100644 third_party/rust/smallvec-0.6.10/LICENSE-MIT delete mode 100644 third_party/rust/smallvec-0.6.10/README.md delete mode 100644 third_party/rust/smallvec-0.6.10/benches/bench.rs delete mode 100644 third_party/rust/smallvec-0.6.10/lib.rs create mode 100644 third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/.travis.yml create mode 100644 third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/CODE_OF_CONDUCT.md create mode 100644 third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/README.md create mode 100644 third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/appveyor.yml delete mode 100644 third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/cmake/gitversion.in.h delete mode 100755 third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/format_all.sh delete mode 100644 third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/gn/BUILD.gn create mode 100644 third_party/rust/spirv-cross-internal/tests/shaders/multiple_entry_points.cl create mode 100644 third_party/rust/spirv-cross-internal/tests/shaders/multiple_entry_points.cl.spv create mode 100644 third_party/rust/spirv-cross-internal/tests/shaders/two_ubo.vert create mode 100644 third_party/rust/spirv-cross-internal/tests/shaders/two_ubo.vert.spv diff --git a/.cargo/config.in b/.cargo/config.in index 6eed3457a55b..929d6d0fb201 100644 --- a/.cargo/config.in +++ b/.cargo/config.in @@ -28,15 +28,10 @@ replace-with = "vendored-sources" rev = "60952724c192a54f8d46c0f481f4bd8967c9bb6a" [source."https://github.com/kvark/spirv_cross"] -branch = "wgpu" +branch = "wgpu2" git = "https://github.com/kvark/spirv_cross" replace-with = "vendored-sources" -[source."https://github.com/kvark/peek-poke"] -git = "https://github.com/kvark/peek-poke" -replace-with = "vendored-sources" -rev = "969bd7fe2be1a83f87916dc8b388c63cfd457075" - [source."https://github.com/jfkthame/mapped_hyph.git"] git = "https://github.com/jfkthame/mapped_hyph.git" replace-with = "vendored-sources" diff --git a/Cargo.lock b/Cargo.lock index 95c4a203fc6c..10573319eaf3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -54,11 +54,11 @@ checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" [[package]] name = "ash" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "003d1fb2eb12eb06d4a03dbe02eea67a9fac910fa97932ab9e3a75b96a1ea5e5" +checksum = "69daec0742947f33a85931fa3cb0ce5f07929159dcbd1f0cbb5b2912e2978509" dependencies = [ - "shared_library", + "libloading", ] [[package]] @@ -201,7 +201,7 @@ dependencies = [ "cranelift-wasm", "env_logger", "log", - "smallvec 1.2.0", + "smallvec", ] [[package]] @@ -595,12 +595,6 @@ dependencies = [ "objc", ] -[[package]] -name = "colorful" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bca1619ff57dd7a56b58a8e25ef4199f123e78e503fe1653410350a1b98ae65" - [[package]] name = "comedy" version = "0.1.0" @@ -728,7 +722,7 @@ dependencies = [ "cranelift-codegen-shared", "cranelift-entity 0.62.0", "log", - "smallvec 1.2.0", + "smallvec", "target-lexicon 0.10.0", "thiserror", ] @@ -764,7 +758,7 @@ source = "git+https://github.com/bytecodealliance/wasmtime?rev=6a68130d5b0296379 dependencies = [ "cranelift-codegen", "log", - "smallvec 1.2.0", + "smallvec", "target-lexicon 0.10.0", ] @@ -857,7 +851,7 @@ dependencies = [ "phf", "proc-macro2", "quote", - "smallvec 1.2.0", + "smallvec", "syn", ] @@ -1232,7 +1226,7 @@ name = "fallible" version = "0.0.1" dependencies = [ "hashglobe", - "smallvec 1.2.0", + "smallvec", ] [[package]] @@ -1310,7 +1304,7 @@ dependencies = [ "intl-memoizer", "intl_pluralrules", "rental", - "smallvec 1.2.0", + "smallvec", "unic-langid", ] @@ -1496,7 +1490,7 @@ dependencies = [ "parking_lot", "selectors", "servo_arc", - "smallvec 1.2.0", + "smallvec", "style", "style_traits", "to_shmem", @@ -1525,9 +1519,9 @@ dependencies = [ [[package]] name = "gfx-auxil" -version = "0.1.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "572eee952a9a23c99cfe3e4fd95d277784058a89ac3c77ff6fa3d80a4e321919" +checksum = "3b46e6f0031330a0be08d17820f2dcaaa91cb36710a97a9500cb4f1c36e785c8" dependencies = [ "fxhash", "gfx-hal", @@ -1536,9 +1530,9 @@ dependencies = [ [[package]] name = "gfx-backend-dx11" -version = "0.4.6" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7527cfcd7d1eec6b99f81891293bdd2a41d044ace009717264e5f3b10ce5b86" +checksum = "b148219292624126f78245e50a9720d95ea149a415ce8ce73ab7014205301b88" dependencies = [ "bitflags", "gfx-auxil", @@ -1548,7 +1542,7 @@ dependencies = [ "parking_lot", "range-alloc", "raw-window-handle", - "smallvec 0.6.10", + "smallvec", "spirv_cross", "winapi 0.3.7", "wio", @@ -1556,9 +1550,9 @@ dependencies = [ [[package]] name = "gfx-backend-dx12" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6e913cc800fb12eaba2c420091a02aca9aafbefd672600dfc5b52654343d341" +checksum = "a0e526746379e974501551b08958947e67a81b5ea8cdc717a000cdd72577da05" dependencies = [ "bitflags", "d3d12", @@ -1567,16 +1561,16 @@ dependencies = [ "log", "range-alloc", "raw-window-handle", - "smallvec 0.6.10", + "smallvec", "spirv_cross", "winapi 0.3.7", ] [[package]] name = "gfx-backend-empty" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d383e6bc48867cb37d298a20139fd1eec298f8f6d594690cd1c50ef25470cc7" +checksum = "b67bd2d7bc022b257ddbdabc5fa3b10c29c292372c3409f2b6a6e3f4e11cdb85" dependencies = [ "gfx-hal", "raw-window-handle", @@ -1584,9 +1578,9 @@ dependencies = [ [[package]] name = "gfx-backend-metal" -version = "0.4.5" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b6130b9a72129ebb5c91d3d75a142a7fa54dcc112603231582e3fdc0b84247" +checksum = "cfe128c29675b5afc8acdda1dfe096d6abd5e3528059ab0b98bda8215d8beed9" dependencies = [ "arrayvec", "bitflags", @@ -1604,16 +1598,16 @@ dependencies = [ "parking_lot", "range-alloc", "raw-window-handle", - "smallvec 0.6.10", + "smallvec", "spirv_cross", "storage-map", ] [[package]] name = "gfx-backend-vulkan" -version = "0.4.3" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1b8d901941d1734d307dacd8e5f00c89ee8fb8e78b4dab3edd91248150b26b4" +checksum = "ebd1dee09bd8d8f1ba52c5ba22d1f70c7ffa990c5eb245eb3ef2d0206f631673" dependencies = [ "arrayvec", "ash", @@ -1624,20 +1618,43 @@ dependencies = [ "log", "objc", "raw-window-handle", - "smallvec 0.6.10", + "smallvec", "winapi 0.3.7", "x11", ] [[package]] -name = "gfx-hal" -version = "0.4.1" +name = "gfx-descriptor" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c88981665c780447bb08eb099e1ded330754a7246719bab927ee4a949c0ba7f" +checksum = "1bf35f5d66d1bc56e63e68d7528441453f25992bd954b84309d23c659df2c5da" +dependencies = [ + "fxhash", + "gfx-hal", + "log", +] + +[[package]] +name = "gfx-hal" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc96180204064c9493e0fe4a9efeb721e0ac59fe8e1906d0c659142a93114fb1" dependencies = [ "bitflags", "raw-window-handle", - "smallvec 0.6.10", +] + +[[package]] +name = "gfx-memory" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2eed6cda674d9cd4d92229102dbd544292124533d236904f987e9afab456137" +dependencies = [ + "fxhash", + "gfx-hal", + "hibitset", + "log", + "slab", ] [[package]] @@ -2236,9 +2253,9 @@ dependencies = [ [[package]] name = "libloading" -version = "0.5.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c3ad660d7cb8c5822cd83d10897b0f1f1526792737a179e73896152f85b88c2" +checksum = "f2b111a074963af1d37a139918ac6d49ad1d0d5e47f72fd55388619691a7d753" dependencies = [ "cc", "winapi 0.3.7", @@ -2446,7 +2463,7 @@ dependencies = [ "selectors", "servo_arc", "smallbitvec", - "smallvec 1.2.0", + "smallvec", "thin-slice", "void", ] @@ -2826,7 +2843,7 @@ dependencies = [ "neqo-qpack", "neqo-transport", "num-traits", - "smallvec 1.2.0", + "smallvec", ] [[package]] @@ -2851,7 +2868,7 @@ dependencies = [ "log", "neqo-common", "neqo-crypto", - "smallvec 1.2.0", + "smallvec", ] [[package]] @@ -3129,18 +3146,10 @@ dependencies = [ "cloudabi", "libc", "redox_syscall", - "smallvec 1.2.0", + "smallvec", "winapi 0.3.7", ] -[[package]] -name = "peek-poke" -version = "0.2.0" -source = "git+https://github.com/kvark/peek-poke?rev=969bd7fe2be1a83f87916dc8b388c63cfd457075#969bd7fe2be1a83f87916dc8b388c63cfd457075" -dependencies = [ - "peek-poke-derive 0.2.0", -] - [[package]] name = "peek-poke" version = "0.2.0" @@ -3150,13 +3159,12 @@ dependencies = [ ] [[package]] -name = "peek-poke-derive" +name = "peek-poke" version = "0.2.0" -source = "git+https://github.com/kvark/peek-poke?rev=969bd7fe2be1a83f87916dc8b388c63cfd457075#969bd7fe2be1a83f87916dc8b388c63cfd457075" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d93fd6a575ebf1ac2668d08443c97a22872cfb463fd8b7ddd141e9f6be59af2f" dependencies = [ - "proc-macro2", - "quote", - "syn", + "peek-poke-derive 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -3170,6 +3178,19 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "peek-poke-derive" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fb44a25c5bba983be0fc8592dfaf3e6d0935ce8be0c6b15b2a39507af34a926" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", + "unicode-xid", +] + [[package]] name = "peeking_take_while" version = "0.1.2" @@ -3533,16 +3554,6 @@ version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11a7e20d1cce64ef2fed88b66d347f88bd9babb82845b2b858f3edbf59a4f716" -[[package]] -name = "relevant" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbc232e13d37f4547f5b9b42a5efc380cabe5dbc1807f8b893580640b2ab0308" -dependencies = [ - "cfg-if", - "log", -] - [[package]] name = "remote" version = "0.1.0" @@ -3565,33 +3576,6 @@ dependencies = [ "winapi 0.3.7", ] -[[package]] -name = "rendy-descriptor" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f475bcc0505946e998590f1f0545c52ef4b559174a1b353a7ce6638def8b621e" -dependencies = [ - "gfx-hal", - "log", - "relevant", - "smallvec 0.6.10", -] - -[[package]] -name = "rendy-memory" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f99de535d9e48d9cfab780b521702cc0d7183d354872d223967b75abae1199" -dependencies = [ - "colorful", - "gfx-hal", - "hibitset", - "log", - "relevant", - "slab", - "smallvec 0.6.10", -] - [[package]] name = "rental" version = "0.5.5" @@ -3833,7 +3817,7 @@ dependencies = [ "phf_codegen", "precomputed-hash", "servo_arc", - "smallvec 1.2.0", + "smallvec", "thin-slice", "to_shmem", "to_shmem_derive", @@ -3961,16 +3945,6 @@ dependencies = [ "opaque-debug", ] -[[package]] -name = "shared_library" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a9e7e0f2bfae24d8a5b5a66c5b257a83c7412304311512a0c054cd5e619da11" -dependencies = [ - "lazy_static", - "libc", -] - [[package]] name = "shift_or_euc" version = "0.1.0" @@ -4021,15 +3995,9 @@ checksum = "1764fe2b30ee783bfe3b9b37b2649d8d590b3148bb12e0079715d4d5c673562e" [[package]] name = "smallvec" -version = "0.6.10" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab606a9c5e214920bb66c458cd7be8ef094f813f20fe77a54cc7dbfff220d4b7" - -[[package]] -name = "smallvec" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c2fb2ec9bcd216a5b0d0ccf31ab17b5ed1d627960edff65bbe95d3ce221cefc" +checksum = "05720e22615919e4734f6a99ceae50d00226c3c5aca406e102ebc33298214e0a" dependencies = [ "serde", ] @@ -4059,15 +4027,15 @@ dependencies = [ [[package]] name = "spirv-cross-internal" version = "0.1.0" -source = "git+https://github.com/kvark/spirv_cross?branch=wgpu#9cb4de489bf80b2b23efffe5a79afb7db6247ba4" +source = "git+https://github.com/kvark/spirv_cross?branch=wgpu2#f0c322e8a17d4957fbb5b7cd3b56d520ad72fbf2" dependencies = [ "cc", ] [[package]] name = "spirv_cross" -version = "0.16.0" -source = "git+https://github.com/kvark/spirv_cross?branch=wgpu#9cb4de489bf80b2b23efffe5a79afb7db6247ba4" +version = "0.18.0" +source = "git+https://github.com/kvark/spirv_cross?branch=wgpu2#f0c322e8a17d4957fbb5b7cd3b56d520ad72fbf2" dependencies = [ "spirv-cross-internal", ] @@ -4169,7 +4137,7 @@ dependencies = [ "serde", "servo_arc", "smallbitvec", - "smallvec 1.2.0", + "smallvec", "static_prefs", "style_derive", "style_traits", @@ -4230,7 +4198,7 @@ dependencies = [ "num-traits", "selectors", "size_of_test", - "smallvec 1.2.0", + "smallvec", "style", "style_traits", "to_shmem", @@ -4438,7 +4406,7 @@ dependencies = [ "cssparser", "servo_arc", "smallbitvec", - "smallvec 1.2.0", + "smallvec", "thin-slice", ] @@ -4968,7 +4936,7 @@ dependencies = [ "ron", "serde", "serde_json", - "smallvec 1.2.0", + "smallvec", "svg_fmt", "time", "tracy-rs", @@ -5064,14 +5032,14 @@ dependencies = [ "gfx-backend-empty", "gfx-backend-metal", "gfx-backend-vulkan", + "gfx-descriptor", "gfx-hal", + "gfx-memory", "log", "parking_lot", - "peek-poke 0.2.0 (git+https://github.com/kvark/peek-poke?rev=969bd7fe2be1a83f87916dc8b388c63cfd457075)", - "rendy-descriptor", - "rendy-memory", + "peek-poke 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde", - "smallvec 1.2.0", + "smallvec", "vec_map", "wgpu-types", ] @@ -5091,7 +5059,7 @@ name = "wgpu-types" version = "0.1.0" dependencies = [ "bitflags", - "peek-poke 0.2.0 (git+https://github.com/kvark/peek-poke?rev=969bd7fe2be1a83f87916dc8b388c63cfd457075)", + "peek-poke 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde", ] diff --git a/Cargo.toml b/Cargo.toml index 5017a5d8f682..a7fd52c79959 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -69,7 +69,7 @@ libudev-sys = { path = "dom/webauthn/libudev-sys" } packed_simd = { git = "https://github.com/hsivonen/packed_simd", rev="3541e3818fdc7c2a24f87e3459151a4ce955a67a" } rlbox_lucet_sandbox = { git = "https://github.com/PLSysSec/rlbox_lucet_sandbox/", rev="d510da5999a744c563b0acd18056069d1698273f" } nix = { git = "https://github.com/shravanrn/nix/", branch = "r0.13.1", rev="4af6c367603869a30fddb5ffb0aba2b9477ba92e" } -spirv_cross = { git = "https://github.com/kvark/spirv_cross", branch = "wgpu", rev = "9cb4de489bf80b2b23efffe5a79afb7db6247ba4" } +spirv_cross = { git = "https://github.com/kvark/spirv_cross", branch = "wgpu2", rev = "9cb4de489bf80b2b23efffe5a79afb7db6247ba4" } # failure's backtrace feature might break our builds, see bug 1608157. failure = { git = "https://github.com/badboy/failure", rev = "64af847bc5fdcb6d2438bec8a6030812a80519a5" } failure_derive = { git = "https://github.com/badboy/failure", rev = "64af847bc5fdcb6d2438bec8a6030812a80519a5" } diff --git a/dom/webgpu/RenderPassEncoder.cpp b/dom/webgpu/RenderPassEncoder.cpp index a1dfec934feb..e6364260cad1 100644 --- a/dom/webgpu/RenderPassEncoder.cpp +++ b/dom/webgpu/RenderPassEncoder.cpp @@ -86,7 +86,7 @@ ffi::WGPURawPass BeginRenderPass(RawId aEncoderId, cd.store_op = ConvertStoreOp(ca.mStoreOp); if (ca.mResolveTarget.WasPassed()) { - cd.resolve_target = &ca.mResolveTarget.Value().mId; + cd.resolve_target = ca.mResolveTarget.Value().mId; } if (ca.mLoadValue.IsGPULoadOp()) { cd.load_op = ConvertLoadOp(ca.mLoadValue.GetAsGPULoadOp()); diff --git a/dom/webgpu/ipc/PWebGPU.ipdl b/dom/webgpu/ipc/PWebGPU.ipdl index f2707a413513..079677498f68 100644 --- a/dom/webgpu/ipc/PWebGPU.ipdl +++ b/dom/webgpu/ipc/PWebGPU.ipdl @@ -14,6 +14,7 @@ using SerialPipelineLayoutDescriptor from "mozilla/webgpu/WebGPUTypes.h"; using SerialBindGroupDescriptor from "mozilla/webgpu/WebGPUTypes.h"; using SerialComputePipelineDescriptor from "mozilla/webgpu/WebGPUTypes.h"; using SerialRenderPipelineDescriptor from "mozilla/webgpu/WebGPUTypes.h"; +using SerialTextureDescriptor from "mozilla/webgpu/WebGPUTypes.h"; using dom::GPURequestAdapterOptions from "mozilla/dom/WebGPUBinding.h"; using dom::GPUDeviceDescriptor from "mozilla/dom/WebGPUBinding.h"; using dom::GPUBufferDescriptor from "mozilla/dom/WebGPUBinding.h"; @@ -21,7 +22,6 @@ using dom::GPUSamplerDescriptor from "mozilla/dom/WebGPUBinding.h"; using dom::GPUCommandEncoderDescriptor from "mozilla/dom/WebGPUBinding.h"; using dom::GPUCommandBufferDescriptor from "mozilla/dom/WebGPUBinding.h"; using dom::GPUPipelineLayoutDescriptor from "mozilla/dom/WebGPUBinding.h"; -using webgpu::ffi::WGPUTextureDescriptor from "mozilla/webgpu/ffi/wgpu.h"; using webgpu::ffi::WGPUTextureViewDescriptor from "mozilla/webgpu/ffi/wgpu.h"; include "mozilla/webgpu/WebGPUSerialize.h"; @@ -48,7 +48,7 @@ parent: async DeviceUnmapBuffer(RawId selfId, RawId bufferId, Shmem shmem); async BufferMapRead(RawId selfId, Shmem shmem) returns (Shmem sm); async BufferDestroy(RawId selfId); - async DeviceCreateTexture(RawId selfId, WGPUTextureDescriptor desc, RawId newId); + async DeviceCreateTexture(RawId selfId, SerialTextureDescriptor desc, RawId newId); async TextureCreateView(RawId selfId, WGPUTextureViewDescriptor desc, RawId newId); async TextureDestroy(RawId selfId); async TextureViewDestroy(RawId selfId); diff --git a/dom/webgpu/ipc/WebGPUChild.cpp b/dom/webgpu/ipc/WebGPUChild.cpp index 616facea71dd..48e3554f485a 100644 --- a/dom/webgpu/ipc/WebGPUChild.cpp +++ b/dom/webgpu/ipc/WebGPUChild.cpp @@ -99,26 +99,26 @@ UniquePtr WebGPUChild::GetDefaultViewDescriptor( RawId WebGPUChild::DeviceCreateTexture(RawId aSelfId, const dom::GPUTextureDescriptor& aDesc) { - ffi::WGPUTextureDescriptor desc = {}; + SerialTextureDescriptor desc = {}; if (aDesc.mSize.IsUnsignedLongSequence()) { const auto& seq = aDesc.mSize.GetAsUnsignedLongSequence(); - desc.size.width = seq.Length() > 0 ? seq[0] : 1; - desc.size.height = seq.Length() > 1 ? seq[1] : 1; - desc.size.depth = seq.Length() > 2 ? seq[2] : 1; + desc.mSize.width = seq.Length() > 0 ? seq[0] : 1; + desc.mSize.height = seq.Length() > 1 ? seq[1] : 1; + desc.mSize.depth = seq.Length() > 2 ? seq[2] : 1; } else if (aDesc.mSize.IsGPUExtent3DDict()) { const auto& dict = aDesc.mSize.GetAsGPUExtent3DDict(); - desc.size.width = dict.mWidth; - desc.size.height = dict.mHeight; - desc.size.depth = dict.mDepth; + desc.mSize.width = dict.mWidth; + desc.mSize.height = dict.mHeight; + desc.mSize.depth = dict.mDepth; } else { MOZ_CRASH("Unexpected union"); } - desc.array_layer_count = aDesc.mArrayLayerCount; - desc.mip_level_count = aDesc.mMipLevelCount; - desc.sample_count = aDesc.mSampleCount; - desc.dimension = ffi::WGPUTextureDimension(aDesc.mDimension); - desc.format = ffi::WGPUTextureFormat(aDesc.mFormat); - desc.usage = aDesc.mUsage; + desc.mArrayLayerCount = aDesc.mArrayLayerCount; + desc.mMipLevelCount = aDesc.mMipLevelCount; + desc.mSampleCount = aDesc.mSampleCount; + desc.mDimension = ffi::WGPUTextureDimension(aDesc.mDimension); + desc.mFormat = ffi::WGPUTextureFormat(aDesc.mFormat); + desc.mUsage = aDesc.mUsage; RawId id = ffi::wgpu_client_make_texture_id(mClient, aSelfId); if (!SendDeviceCreateTexture(aSelfId, desc, id)) { diff --git a/dom/webgpu/ipc/WebGPUParent.cpp b/dom/webgpu/ipc/WebGPUParent.cpp index 1cd32292602d..b5bac1809b7a 100644 --- a/dom/webgpu/ipc/WebGPUParent.cpp +++ b/dom/webgpu/ipc/WebGPUParent.cpp @@ -251,8 +251,16 @@ ipc::IPCResult WebGPUParent::RecvBufferDestroy(RawId aSelfId) { } ipc::IPCResult WebGPUParent::RecvDeviceCreateTexture( - RawId aSelfId, const ffi::WGPUTextureDescriptor& aDesc, RawId aNewId) { - ffi::wgpu_server_device_create_texture(mContext, aSelfId, &aDesc, aNewId); + RawId aSelfId, const SerialTextureDescriptor& aDesc, RawId aNewId) { + ffi::WGPUTextureDescriptor desc = {}; + desc.size = aDesc.mSize; + desc.array_layer_count = aDesc.mArrayLayerCount; + desc.mip_level_count = aDesc.mMipLevelCount; + desc.sample_count = aDesc.mSampleCount; + desc.dimension = aDesc.mDimension; + desc.format = aDesc.mFormat; + desc.usage = aDesc.mUsage; + ffi::wgpu_server_device_create_texture(mContext, aSelfId, &desc, aNewId); return IPC_OK(); } @@ -286,7 +294,7 @@ ipc::IPCResult WebGPUParent::RecvDeviceCreateSampler( ffi::WGPUCompareFunction compare; if (aDesc.mCompare.WasPassed()) { compare = ffi::WGPUCompareFunction(aDesc.mCompare.Value()); - desc.compare = &compare; + desc.compare = compare; } ffi::wgpu_server_device_create_sampler(mContext, aSelfId, &desc, aNewId); @@ -605,6 +613,7 @@ ipc::IPCResult WebGPUParent::RecvSwapChainPresent( data->mBuffersLock.Lock(); if (!data->mAvailableBufferIds.empty()) { bufferId = data->mAvailableBufferIds.back(); + wgpu_server_buffer_unmap(mContext, bufferId); data->mAvailableBufferIds.pop_back(); } else if (!data->mUnassignedBufferIds.empty()) { bufferId = data->mUnassignedBufferIds.back(); @@ -612,10 +621,9 @@ ipc::IPCResult WebGPUParent::RecvSwapChainPresent( ffi::WGPUBufferUsage usage = WGPUBufferUsage_COPY_DST | WGPUBufferUsage_MAP_READ; - const ffi::WGPUBufferDescriptor desc = { - bufferSize, - usage, - }; + ffi::WGPUBufferDescriptor desc = {}; + desc.size = bufferSize; + desc.usage = usage; ffi::wgpu_server_device_create_buffer(mContext, data->mDeviceId, &desc, bufferId); } else { diff --git a/dom/webgpu/ipc/WebGPUParent.h b/dom/webgpu/ipc/WebGPUParent.h index 6536f6ff78d8..ab7bb09994b9 100644 --- a/dom/webgpu/ipc/WebGPUParent.h +++ b/dom/webgpu/ipc/WebGPUParent.h @@ -38,8 +38,9 @@ class WebGPUParent final : public PWebGPUParent { ipc::IPCResult RecvBufferMapRead(RawId aSelfId, Shmem&& shmem, BufferMapReadResolver&& resolver); ipc::IPCResult RecvBufferDestroy(RawId aSelfId); - ipc::IPCResult RecvDeviceCreateTexture( - RawId aSelfId, const ffi::WGPUTextureDescriptor& aDesc, RawId aNewId); + ipc::IPCResult RecvDeviceCreateTexture(RawId aSelfId, + const SerialTextureDescriptor& aDesc, + RawId aNewId); ipc::IPCResult RecvTextureCreateView( RawId aSelfId, const ffi::WGPUTextureViewDescriptor& aDesc, RawId aNewId); ipc::IPCResult RecvTextureDestroy(RawId aSelfId); diff --git a/dom/webgpu/ipc/WebGPUSerialize.h b/dom/webgpu/ipc/WebGPUSerialize.h index 860b3b4a7528..6e67c6822d52 100644 --- a/dom/webgpu/ipc/WebGPUSerialize.h +++ b/dom/webgpu/ipc/WebGPUSerialize.h @@ -64,9 +64,6 @@ DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::dom::GPUSamplerDescriptor, mLodMinClamp, mLodMaxClamp, mCompare); DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::ffi::WGPUExtent3d, width, height, depth); -DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::ffi::WGPUTextureDescriptor, - size, array_layer_count, mip_level_count, - sample_count, dimension, format, usage); DEFINE_IPC_SERIALIZER_WITH_FIELDS( mozilla::webgpu::ffi::WGPUTextureViewDescriptor, format, dimension, aspect, base_mip_level, level_count, base_array_layer, array_layer_count); @@ -117,6 +114,10 @@ DEFINE_IPC_SERIALIZER_WITH_FIELDS( mFragmentStage, mPrimitiveTopology, mRasterizationState, mColorStates, mDepthStencilState, mVertexState, mSampleCount, mSampleMask, mAlphaToCoverageEnabled); +DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::SerialTextureDescriptor, + mLabel, mSize, mArrayLayerCount, + mMipLevelCount, mSampleCount, mDimension, + mFormat, mUsage); #undef DEFINE_IPC_SERIALIZER_FFI_ENUM #undef DEFINE_IPC_SERIALIZER_DOM_ENUM diff --git a/dom/webgpu/ipc/WebGPUTypes.h b/dom/webgpu/ipc/WebGPUTypes.h index 91b60465d23c..06118d8959be 100644 --- a/dom/webgpu/ipc/WebGPUTypes.h +++ b/dom/webgpu/ipc/WebGPUTypes.h @@ -80,6 +80,17 @@ struct SerialRenderPipelineDescriptor { bool mAlphaToCoverageEnabled; }; +struct SerialTextureDescriptor { + nsString mLabel; + struct ffi::WGPUExtent3d mSize; + uint32_t mArrayLayerCount; + uint32_t mMipLevelCount; + uint32_t mSampleCount; + enum ffi::WGPUTextureDimension mDimension; + enum ffi::WGPUTextureFormat mFormat; + ffi::WGPUTextureUsage mUsage; +}; + } // namespace webgpu } // namespace mozilla diff --git a/gfx/wgpu/Cargo.lock b/gfx/wgpu/Cargo.lock index 2d30a17cbbec..7b61ff46a9c1 100644 --- a/gfx/wgpu/Cargo.lock +++ b/gfx/wgpu/Cargo.lock @@ -7,10 +7,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "ash" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "shared_library 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "libloading 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -91,11 +91,6 @@ dependencies = [ "objc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "colorful" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "copyless" version = "0.1.4" @@ -129,17 +124,6 @@ name = "core-foundation-sys" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "core-graphics" -version = "0.17.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "core-foundation 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", - "foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "core-graphics" version = "0.19.0" @@ -184,62 +168,62 @@ dependencies = [ [[package]] name = "gfx-auxil" -version = "0.1.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "fxhash 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "gfx-hal 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", - "spirv_cross 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-hal 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "spirv_cross 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "gfx-backend-dx11" -version = "0.4.6" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "gfx-auxil 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "gfx-hal 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-auxil 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-hal 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "libloading 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)", "range-alloc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "raw-window-handle 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "spirv_cross 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "spirv_cross 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "wio 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "gfx-backend-dx12" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "d3d12 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "gfx-auxil 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "gfx-hal 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-auxil 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-hal 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "range-alloc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "raw-window-handle 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "spirv_cross 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "spirv_cross 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "gfx-backend-empty" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "gfx-hal 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-hal 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "raw-window-handle 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "gfx-backend-metal" -version = "0.4.5" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "arrayvec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -249,8 +233,8 @@ dependencies = [ "copyless 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "core-graphics 0.19.0 (registry+https://github.com/rust-lang/crates.io-index)", "foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "gfx-auxil 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "gfx-hal 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-auxil 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-hal 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "metal 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -258,38 +242,59 @@ dependencies = [ "parking_lot 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)", "range-alloc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "raw-window-handle 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "spirv_cross 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "spirv_cross 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)", "storage-map 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "gfx-backend-vulkan" -version = "0.4.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "arrayvec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "ash 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ash 0.30.0 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "core-graphics 0.17.3 (registry+https://github.com/rust-lang/crates.io-index)", - "gfx-hal 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "core-graphics 0.19.0 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-hal 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "objc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", "raw-window-handle 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "x11 2.18.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "gfx-descriptor" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "fxhash 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-hal 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "gfx-hal" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "raw-window-handle 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "gfx-memory" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "fxhash 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-hal 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "hibitset 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -364,11 +369,6 @@ dependencies = [ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "metal" version = "0.18.0" @@ -445,19 +445,21 @@ dependencies = [ [[package]] name = "peek-poke" version = "0.2.0" -source = "git+https://github.com/kvark/peek-poke?rev=969bd7fe2be1a83f87916dc8b388c63cfd457075#969bd7fe2be1a83f87916dc8b388c63cfd457075" +source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "peek-poke-derive 0.2.0 (git+https://github.com/kvark/peek-poke?rev=969bd7fe2be1a83f87916dc8b388c63cfd457075)", + "peek-poke-derive 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "peek-poke-derive" -version = "0.2.0" -source = "git+https://github.com/kvark/peek-poke?rev=969bd7fe2be1a83f87916dc8b388c63cfd457075#969bd7fe2be1a83f87916dc8b388c63cfd457075" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", + "synstructure 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -499,40 +501,6 @@ name = "redox_syscall" version = "0.1.56" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "relevant" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rendy-descriptor" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "gfx-hal 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "relevant 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rendy-memory" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "colorful 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "gfx-hal 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", - "hibitset 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "relevant 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "scopeguard" version = "1.0.0" @@ -556,28 +524,11 @@ dependencies = [ "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "shared_library" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "slab" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "smallvec" -version = "0.6.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "smallvec" version = "1.0.0" @@ -585,7 +536,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "spirv_cross" -version = "0.16.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cc 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)", @@ -611,6 +562,17 @@ dependencies = [ "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "synstructure" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "typenum" version = "1.11.2" @@ -698,17 +660,17 @@ dependencies = [ "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "copyless 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "fxhash 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "gfx-backend-dx11 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", - "gfx-backend-dx12 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", - "gfx-backend-empty 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "gfx-backend-metal 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "gfx-backend-vulkan 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "gfx-hal 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-backend-dx11 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-backend-dx12 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-backend-empty 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-backend-metal 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-backend-vulkan 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-descriptor 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-hal 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "gfx-memory 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)", - "peek-poke 0.2.0 (git+https://github.com/kvark/peek-poke?rev=969bd7fe2be1a83f87916dc8b388c63cfd457075)", - "rendy-descriptor 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rendy-memory 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", + "peek-poke 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -744,7 +706,7 @@ name = "wgpu-types" version = "0.1.0" dependencies = [ "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "peek-poke 0.2.0 (git+https://github.com/kvark/peek-poke?rev=969bd7fe2be1a83f87916dc8b388c63cfd457075)", + "peek-poke 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -786,7 +748,7 @@ dependencies = [ [metadata] "checksum arrayvec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" -"checksum ash 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)" = "003d1fb2eb12eb06d4a03dbe02eea67a9fac910fa97932ab9e3a75b96a1ea5e5" +"checksum ash 0.30.0 (registry+https://github.com/rust-lang/crates.io-index)" = "69daec0742947f33a85931fa3cb0ce5f07929159dcbd1f0cbb5b2912e2978509" "checksum atom 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "3c86699c3f02778ec07158376991c8f783dd1f2f95c579ffaf0738dc984b2fe2" "checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" "checksum battery 0.7.5 (registry+https://github.com/rust-lang/crates.io-index)" = "36a698e449024a5d18994a815998bf5e2e4bc1883e35a7d7ba95b6b69ee45907" @@ -798,25 +760,25 @@ dependencies = [ "checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" "checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" "checksum cocoa 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0a4736c86d51bd878b474400d9ec888156f4037015f5d09794fab9f26eab1ad4" -"checksum colorful 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0bca1619ff57dd7a56b58a8e25ef4199f123e78e503fe1653410350a1b98ae65" "checksum copyless 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "6ff9c56c9fb2a49c05ef0e431485a22400af20d33226dc0764d891d09e724127" "checksum core-foundation 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "25b9e03f145fd4f2bf705e07b900cd41fc636598fe5dc452fd0db1441c3f496d" "checksum core-foundation 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171" "checksum core-foundation-sys 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e7ca8a5221364ef15ce201e8ed2f609fc312682a8f4e0e3d4aa5879764e0fa3b" "checksum core-foundation-sys 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" -"checksum core-graphics 0.17.3 (registry+https://github.com/rust-lang/crates.io-index)" = "56790968ab1c8a1202a102e6de05fc6e1ec87da99e4e93e9a7d13efbfc1e95a9" "checksum core-graphics 0.19.0 (registry+https://github.com/rust-lang/crates.io-index)" = "59e78b2e0aaf43f08e7ae0d6bc96895ef72ff0921c7d4ff4762201b2dba376dd" "checksum d3d12 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bc7ed48e89905e5e146bcc1951cc3facb9e44aea9adf5dc01078cda1bd24b662" "checksum foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" "checksum foreign-types-shared 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" "checksum fxhash 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" -"checksum gfx-auxil 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "572eee952a9a23c99cfe3e4fd95d277784058a89ac3c77ff6fa3d80a4e321919" -"checksum gfx-backend-dx11 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "d7527cfcd7d1eec6b99f81891293bdd2a41d044ace009717264e5f3b10ce5b86" -"checksum gfx-backend-dx12 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b6e913cc800fb12eaba2c420091a02aca9aafbefd672600dfc5b52654343d341" -"checksum gfx-backend-empty 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3d383e6bc48867cb37d298a20139fd1eec298f8f6d594690cd1c50ef25470cc7" -"checksum gfx-backend-metal 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "05b6130b9a72129ebb5c91d3d75a142a7fa54dcc112603231582e3fdc0b84247" -"checksum gfx-backend-vulkan 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "62538fedd66a78968a162e8e1a29d085ffbc97f8782634684b2f7da7aea59207" -"checksum gfx-hal 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7c88981665c780447bb08eb099e1ded330754a7246719bab927ee4a949c0ba7f" +"checksum gfx-auxil 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3b46e6f0031330a0be08d17820f2dcaaa91cb36710a97a9500cb4f1c36e785c8" +"checksum gfx-backend-dx11 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b148219292624126f78245e50a9720d95ea149a415ce8ce73ab7014205301b88" +"checksum gfx-backend-dx12 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a0e526746379e974501551b08958947e67a81b5ea8cdc717a000cdd72577da05" +"checksum gfx-backend-empty 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b67bd2d7bc022b257ddbdabc5fa3b10c29c292372c3409f2b6a6e3f4e11cdb85" +"checksum gfx-backend-metal 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cfe128c29675b5afc8acdda1dfe096d6abd5e3528059ab0b98bda8215d8beed9" +"checksum gfx-backend-vulkan 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ebd1dee09bd8d8f1ba52c5ba22d1f70c7ffa990c5eb245eb3ef2d0206f631673" +"checksum gfx-descriptor 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1bf35f5d66d1bc56e63e68d7528441453f25992bd954b84309d23c659df2c5da" +"checksum gfx-hal 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bc96180204064c9493e0fe4a9efeb721e0ac59fe8e1906d0c659142a93114fb1" +"checksum gfx-memory 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c2eed6cda674d9cd4d92229102dbd544292124533d236904f987e9afab456137" "checksum hibitset 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "47e7292fd9f7fe89fa35c98048f2d0a69b79ed243604234d18f6f8a1aa6f408d" "checksum js-sys 0.3.32 (registry+https://github.com/rust-lang/crates.io-index)" = "1c840fdb2167497b0bd0db43d6dfe61e91637fa72f9d061f8bd17ddc44ba6414" "checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" @@ -827,7 +789,6 @@ dependencies = [ "checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" "checksum mach 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "86dd2487cdfea56def77b88438a2c915fb45113c5319bfe7e14306ca4cd0b0e1" "checksum malloc_buf 0.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "62bb907fe88d54d8d9ce32a3cceab4218ed2f6b7d35617cafe9adf84e43919cb" -"checksum maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" "checksum metal 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e198a0ee42bdbe9ef2c09d0b9426f3b2b47d90d93a4a9b0395c4cea605e92dc0" "checksum nix 0.15.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3b2e0b4f3320ed72aaedb9a5ac838690a8047c7b275da22711fddff4f8a14229" "checksum num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096" @@ -835,27 +796,23 @@ dependencies = [ "checksum objc_exception 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ad970fb455818ad6cba4c122ad012fae53ae8b4795f86378bce65e4f6bab2ca4" "checksum parking_lot 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e" "checksum parking_lot_core 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0e136c1904604defe99ce5fd71a28d473fa60a12255d511aa78a9ddf11237aeb" -"checksum peek-poke 0.2.0 (git+https://github.com/kvark/peek-poke?rev=969bd7fe2be1a83f87916dc8b388c63cfd457075)" = "" -"checksum peek-poke-derive 0.2.0 (git+https://github.com/kvark/peek-poke?rev=969bd7fe2be1a83f87916dc8b388c63cfd457075)" = "" +"checksum peek-poke 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d93fd6a575ebf1ac2668d08443c97a22872cfb463fd8b7ddd141e9f6be59af2f" +"checksum peek-poke-derive 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "6fb44a25c5bba983be0fc8592dfaf3e6d0935ce8be0c6b15b2a39507af34a926" "checksum pkg-config 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)" = "05da548ad6865900e60eaba7f589cc0783590a92e940c26953ff81ddbab2d677" "checksum proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "9c9e470a8dc4aeae2dee2f335e8f533e2d4b347e1434e5671afc49b054592f27" "checksum quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe" "checksum range-alloc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "dd5927936723a9e8b715d37d7e4b390455087c4bdf25b9f702309460577b14f9" "checksum raw-window-handle 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0a441a7a6c80ad6473bd4b74ec1c9a4c951794285bf941c2126f607c72e48211" "checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" -"checksum relevant 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bbc232e13d37f4547f5b9b42a5efc380cabe5dbc1807f8b893580640b2ab0308" -"checksum rendy-descriptor 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f475bcc0505946e998590f1f0545c52ef4b559174a1b353a7ce6638def8b621e" -"checksum rendy-memory 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ed492161a819feae7f27f418bb16035276ac20649c60d756699152cb5c1960ec" "checksum scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b42e15e59b18a828bbf5c58ea01debb36b9b096346de35d941dcb89009f24a0d" "checksum serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)" = "1217f97ab8e8904b57dd22eb61cde455fa7446a9c1cf43966066da047c1f3702" "checksum serde_derive 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)" = "a8c6faef9a2e64b0064f48570289b4bf8823b7581f1d6157c1b52152306651d0" -"checksum shared_library 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "5a9e7e0f2bfae24d8a5b5a66c5b257a83c7412304311512a0c054cd5e619da11" "checksum slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" -"checksum smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "f7b0758c52e15a8b5e3691eae6cc559f08eee9406e548a4477ba4e67770a82b6" "checksum smallvec 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4ecf3b85f68e8abaa7555aa5abdb1153079387e60b718283d732f03897fcfc86" -"checksum spirv_cross 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fbbe441b3ac8ec0ae6a4f05234239bd372a241ce15793eef694e8b24afc267bb" +"checksum spirv_cross 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)" = "946216f8793f7199e3ea5b995ee8dc20a0ace1fcf46293a0ef4c17e1d046dbde" "checksum storage-map 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fd0a4829a5c591dc24a944a736d6b1e4053e51339a79fd5d4702c4c999a9c45e" "checksum syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)" = "dff0acdb207ae2fe6d5976617f887eb1e35a2ba52c13c7234c790960cdad9238" +"checksum synstructure 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)" = "67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545" "checksum typenum 1.11.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6d2783fe2d6b8c1101136184eb41be8b1ad379e4657050b8aaff0c79ee7575f9" "checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" "checksum uom 0.26.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4cec796ec5f7ac557631709079168286056205c51c60aac33f51764bdc7b8dc4" diff --git a/gfx/wgpu/README.md b/gfx/wgpu/README.md index 4cd4749cdd36..585211491d7e 100644 --- a/gfx/wgpu/README.md +++ b/gfx/wgpu/README.md @@ -9,7 +9,7 @@ This is an active GitHub mirror of the WebGPU implementation in Rust, which now [![Crates.io](https://img.shields.io/crates/v/wgpu-core.svg?label=wgpu-core)](https://crates.io/crates/wgpu-core) [![Crates.io](https://img.shields.io/crates/v/wgpu-native.svg?label=wgpu-native)](https://crates.io/crates/wgpu-native) -This is an experimental [WebGPU](https://www.w3.org/community/gpu/) implementation, exposing both Rust and C interfaces as a native static library. It's written in Rust and is based on [gfx-hal](https://github.com/gfx-rs/gfx) and a few [Rendy](https://github.com/amethyst/rendy) bits. See the upstream [WebGPU specification](https://gpuweb.github.io/gpuweb/) (work in progress). +This is an experimental [WebGPU](https://www.w3.org/community/gpu/) implementation, exposing both Rust and C interfaces as a native static library. It's written in Rust and is based on [gfx-hal](https://github.com/gfx-rs/gfx) with help of [gfx-extras](https://github.com/gfx-rs/gfx-extras). See the upstream [WebGPU specification](https://gpuweb.github.io/gpuweb/) (work in progress). The implementation consists of the following parts: @@ -35,6 +35,8 @@ The implementation consists of the following parts: Vulkan | :heavy_check_mark: | :heavy_check_mark: | | Metal | | | :heavy_check_mark: | OpenGL | :construction: | :construction: | :construction: | + +:heavy_check_mark: = Primary support — :white_check_mark: = Secondary support — :construction: = Unsupported, but support in progress ## Usage diff --git a/gfx/wgpu/examples/compute/main.c b/gfx/wgpu/examples/compute/main.c index 60b7747ceb4a..4c2624679d94 100644 --- a/gfx/wgpu/examples/compute/main.c +++ b/gfx/wgpu/examples/compute/main.c @@ -69,6 +69,7 @@ int main( WGPUBufferId buffer = wgpu_device_create_buffer_mapped(device, &(WGPUBufferDescriptor){ + .label = "buffer", .size = size, .usage = WGPUBufferUsage_STORAGE | WGPUBufferUsage_MAP_READ}, &staging_memory); @@ -80,6 +81,7 @@ int main( WGPUBindGroupLayoutId bind_group_layout = wgpu_device_create_bind_group_layout(device, &(WGPUBindGroupLayoutDescriptor){ + .label = "bind group layout", .entries = &(WGPUBindGroupLayoutEntry){ .binding = 0, .visibility = WGPUShaderStage_COMPUTE, @@ -94,7 +96,9 @@ int main( .offset = 0}}}; WGPUBindGroupId bind_group = wgpu_device_create_bind_group(device, - &(WGPUBindGroupDescriptor){.layout = bind_group_layout, + &(WGPUBindGroupDescriptor){ + .label = "bind group", + .layout = bind_group_layout, .entries = &(WGPUBindGroupEntry){ .binding = 0, .resource = resource}, @@ -124,7 +128,7 @@ int main( WGPUCommandEncoderId encoder = wgpu_device_create_command_encoder( device, &(WGPUCommandEncoderDescriptor){ - .todo = 0 + .label = "command encoder", }); WGPUComputePassId command_pass = diff --git a/gfx/wgpu/examples/triangle/main.c b/gfx/wgpu/examples/triangle/main.c index 152743d9099a..695335914896 100644 --- a/gfx/wgpu/examples/triangle/main.c +++ b/gfx/wgpu/examples/triangle/main.c @@ -43,10 +43,58 @@ void request_adapter_callback(WGPUAdapterId received, void *userdata) { } int main() { + if (!glfwInit()) { + printf("Cannot initialize glfw"); + return 1; + } + + glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API); + GLFWwindow *window = + glfwCreateWindow(640, 480, "wgpu with glfw", NULL, NULL); + + if (!window) { + printf("Cannot create window"); + return 1; + } + + WGPUSurfaceId surface; + +#if WGPU_TARGET == WGPU_TARGET_MACOS + { + id metal_layer = NULL; + NSWindow *ns_window = glfwGetCocoaWindow(window); + [ns_window.contentView setWantsLayer:YES]; + metal_layer = [CAMetalLayer layer]; + [ns_window.contentView setLayer:metal_layer]; + surface = wgpu_create_surface_from_metal_layer(metal_layer); + } +#elif WGPU_TARGET == WGPU_TARGET_LINUX_X11 + { + Display *x11_display = glfwGetX11Display(); + Window x11_window = glfwGetX11Window(window); + surface = wgpu_create_surface_from_xlib((const void **)x11_display, x11_window); + } +#elif WGPU_TARGET == WGPU_TARGET_LINUX_WAYLAND + { + struct wl_display *wayland_display = glfwGetWaylandDisplay(); + struct wl_surface *wayland_surface = glfwGetWaylandWindow(window); + surface = wgpu_create_surface_from_wayland(wayland_surface, wayland_display); + } +#elif WGPU_TARGET == WGPU_TARGET_WINDOWS + { + HWND hwnd = glfwGetWin32Window(window); + HINSTANCE hinstance = GetModuleHandle(NULL); + surface = wgpu_create_surface_from_windows_hwnd(hinstance, hwnd); + } +#else + #error "Unsupported WGPU_TARGET" +#endif + WGPUAdapterId adapter = { 0 }; wgpu_request_adapter_async( &(WGPURequestAdapterOptions){ .power_preference = WGPUPowerPreference_LowPower, + .compatible_surface = surface, }, 2 | 4 | 8, request_adapter_callback, @@ -79,12 +127,14 @@ int main() { WGPUBindGroupLayoutId bind_group_layout = wgpu_device_create_bind_group_layout(device, &(WGPUBindGroupLayoutDescriptor){ + .label = "bind group layout", .entries = NULL, .entries_length = 0, }); WGPUBindGroupId bind_group = wgpu_device_create_bind_group(device, &(WGPUBindGroupDescriptor){ + .label = "bind group", .layout = bind_group_layout, .entries = NULL, .entries_length = 0, @@ -151,53 +201,6 @@ int main() { .sample_count = 1, }); - if (!glfwInit()) { - printf("Cannot initialize glfw"); - return 1; - } - - glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API); - GLFWwindow *window = - glfwCreateWindow(640, 480, "wgpu with glfw", NULL, NULL); - - if (!window) { - printf("Cannot create window"); - return 1; - } - - WGPUSurfaceId surface; - -#if WGPU_TARGET == WGPU_TARGET_MACOS - { - id metal_layer = NULL; - NSWindow *ns_window = glfwGetCocoaWindow(window); - [ns_window.contentView setWantsLayer:YES]; - metal_layer = [CAMetalLayer layer]; - [ns_window.contentView setLayer:metal_layer]; - surface = wgpu_create_surface_from_metal_layer(metal_layer); - } -#elif WGPU_TARGET == WGPU_TARGET_LINUX_X11 - { - Display *x11_display = glfwGetX11Display(); - Window x11_window = glfwGetX11Window(window); - surface = wgpu_create_surface_from_xlib((const void **)x11_display, x11_window); - } -#elif WGPU_TARGET == WGPU_TARGET_LINUX_WAYLAND - { - struct wl_display *wayland_display = glfwGetWaylandDisplay(); - struct wl_surface *wayland_surface = glfwGetWaylandWindow(window); - surface = wgpu_create_surface_from_wayland(wayland_surface, wayland_display); - } -#elif WGPU_TARGET == WGPU_TARGET_WINDOWS - { - HWND hwnd = glfwGetWin32Window(window); - HINSTANCE hinstance = GetModuleHandle(NULL); - surface = wgpu_create_surface_from_windows_hwnd(hinstance, hwnd); - } -#else - #error "Unsupported WGPU_TARGET" -#endif - int prev_width = 0; int prev_height = 0; glfwGetWindowSize(window, &prev_width, &prev_height); @@ -237,7 +240,7 @@ int main() { } WGPUCommandEncoderId cmd_encoder = wgpu_device_create_command_encoder( - device, &(WGPUCommandEncoderDescriptor){.todo = 0}); + device, &(WGPUCommandEncoderDescriptor){.label = "command encoder"}); WGPURenderPassColorAttachmentDescriptor color_attachments[ATTACHMENTS_LENGTH] = { diff --git a/gfx/wgpu/ffi/wgpu-remote.h b/gfx/wgpu/ffi/wgpu-remote.h index 22223daba0b5..07aa16da3478 100644 --- a/gfx/wgpu/ffi/wgpu-remote.h +++ b/gfx/wgpu/ffi/wgpu-remote.h @@ -2,7 +2,7 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ -/* Generated with cbindgen:0.9.1 */ +/* Generated with cbindgen:0.14.0 */ /* DO NOT MODIFY THIS MANUALLY! This file was generated using cbindgen. * To generate this file: @@ -11,7 +11,10 @@ * 2. Run `rustup run nightly cbindgen toolkit/library/rust/ --lockfile Cargo.lock --crate wgpu-remote -o dom/webgpu/ffi/wgpu_ffi_generated.h` */ -typedef void WGPUEmpty; +typedef uint64_t WGPUNonZeroU64; +typedef uint64_t WGPUOption_AdapterId; +typedef uint64_t WGPUOption_SurfaceId; +typedef uint64_t WGPUOption_TextureViewId; #include @@ -19,103 +22,1432 @@ typedef void WGPUEmpty; #include #include -typedef enum { +/** + * Bound uniform/storage buffer offsets must be aligned to this number. + */ +#define WGPUBIND_BUFFER_ALIGNMENT 256 + +#define WGPUDEFAULT_BIND_GROUPS 4 + +#define WGPUDESIRED_NUM_FRAMES 3 + +#define WGPUMAX_BIND_GROUPS 4 + +#define WGPUMAX_COLOR_TARGETS 4 + +#define WGPUMAX_MIP_LEVELS 16 + +#define WGPUMAX_VERTEX_BUFFERS 8 + +enum WGPUAddressMode { + WGPUAddressMode_ClampToEdge = 0, + WGPUAddressMode_Repeat = 1, + WGPUAddressMode_MirrorRepeat = 2, + /** + * Must be last for serialization purposes + */ + WGPUAddressMode_Sentinel, +}; + +enum WGPUBindingType { + WGPUBindingType_UniformBuffer = 0, + WGPUBindingType_StorageBuffer = 1, + WGPUBindingType_ReadonlyStorageBuffer = 2, + WGPUBindingType_Sampler = 3, + WGPUBindingType_ComparisonSampler = 4, + WGPUBindingType_SampledTexture = 5, + WGPUBindingType_ReadonlyStorageTexture = 6, + WGPUBindingType_WriteonlyStorageTexture = 7, + /** + * Must be last for serialization purposes + */ + WGPUBindingType_Sentinel, +}; + +enum WGPUBlendFactor { + WGPUBlendFactor_Zero = 0, + WGPUBlendFactor_One = 1, + WGPUBlendFactor_SrcColor = 2, + WGPUBlendFactor_OneMinusSrcColor = 3, + WGPUBlendFactor_SrcAlpha = 4, + WGPUBlendFactor_OneMinusSrcAlpha = 5, + WGPUBlendFactor_DstColor = 6, + WGPUBlendFactor_OneMinusDstColor = 7, + WGPUBlendFactor_DstAlpha = 8, + WGPUBlendFactor_OneMinusDstAlpha = 9, + WGPUBlendFactor_SrcAlphaSaturated = 10, + WGPUBlendFactor_BlendColor = 11, + WGPUBlendFactor_OneMinusBlendColor = 12, + /** + * Must be last for serialization purposes + */ + WGPUBlendFactor_Sentinel, +}; + +enum WGPUBlendOperation { + WGPUBlendOperation_Add = 0, + WGPUBlendOperation_Subtract = 1, + WGPUBlendOperation_ReverseSubtract = 2, + WGPUBlendOperation_Min = 3, + WGPUBlendOperation_Max = 4, + /** + * Must be last for serialization purposes + */ + WGPUBlendOperation_Sentinel, +}; + +enum WGPUBufferMapAsyncStatus { + WGPUBufferMapAsyncStatus_Success, + WGPUBufferMapAsyncStatus_Error, + WGPUBufferMapAsyncStatus_Unknown, + WGPUBufferMapAsyncStatus_ContextLost, + /** + * Must be last for serialization purposes + */ + WGPUBufferMapAsyncStatus_Sentinel, +}; + +enum WGPUCompareFunction { + WGPUCompareFunction_Undefined = 0, + WGPUCompareFunction_Never = 1, + WGPUCompareFunction_Less = 2, + WGPUCompareFunction_Equal = 3, + WGPUCompareFunction_LessEqual = 4, + WGPUCompareFunction_Greater = 5, + WGPUCompareFunction_NotEqual = 6, + WGPUCompareFunction_GreaterEqual = 7, + WGPUCompareFunction_Always = 8, + /** + * Must be last for serialization purposes + */ + WGPUCompareFunction_Sentinel, +}; + +enum WGPUCullMode { + WGPUCullMode_None = 0, + WGPUCullMode_Front = 1, + WGPUCullMode_Back = 2, + /** + * Must be last for serialization purposes + */ + WGPUCullMode_Sentinel, +}; + +enum WGPUFilterMode { + WGPUFilterMode_Nearest = 0, + WGPUFilterMode_Linear = 1, + /** + * Must be last for serialization purposes + */ + WGPUFilterMode_Sentinel, +}; + +enum WGPUFrontFace { + WGPUFrontFace_Ccw = 0, + WGPUFrontFace_Cw = 1, + /** + * Must be last for serialization purposes + */ + WGPUFrontFace_Sentinel, +}; + +enum WGPUIndexFormat { + WGPUIndexFormat_Uint16 = 0, + WGPUIndexFormat_Uint32 = 1, + /** + * Must be last for serialization purposes + */ + WGPUIndexFormat_Sentinel, +}; + +enum WGPUInputStepMode { + WGPUInputStepMode_Vertex = 0, + WGPUInputStepMode_Instance = 1, + /** + * Must be last for serialization purposes + */ + WGPUInputStepMode_Sentinel, +}; + +enum WGPULoadOp { + WGPULoadOp_Clear = 0, + WGPULoadOp_Load = 1, + /** + * Must be last for serialization purposes + */ + WGPULoadOp_Sentinel, +}; + +enum WGPUPowerPreference { WGPUPowerPreference_Default = 0, WGPUPowerPreference_LowPower = 1, WGPUPowerPreference_HighPerformance = 2, -} WGPUPowerPreference; + /** + * Must be last for serialization purposes + */ + WGPUPowerPreference_Sentinel, +}; -typedef struct WGPUClient WGPUClient; +enum WGPUPrimitiveTopology { + WGPUPrimitiveTopology_PointList = 0, + WGPUPrimitiveTopology_LineList = 1, + WGPUPrimitiveTopology_LineStrip = 2, + WGPUPrimitiveTopology_TriangleList = 3, + WGPUPrimitiveTopology_TriangleStrip = 4, + /** + * Must be last for serialization purposes + */ + WGPUPrimitiveTopology_Sentinel, +}; -typedef uint64_t WGPUId_Adapter_Dummy; +enum WGPUStencilOperation { + WGPUStencilOperation_Keep = 0, + WGPUStencilOperation_Zero = 1, + WGPUStencilOperation_Replace = 2, + WGPUStencilOperation_Invert = 3, + WGPUStencilOperation_IncrementClamp = 4, + WGPUStencilOperation_DecrementClamp = 5, + WGPUStencilOperation_IncrementWrap = 6, + WGPUStencilOperation_DecrementWrap = 7, + /** + * Must be last for serialization purposes + */ + WGPUStencilOperation_Sentinel, +}; + +enum WGPUStoreOp { + WGPUStoreOp_Clear = 0, + WGPUStoreOp_Store = 1, + /** + * Must be last for serialization purposes + */ + WGPUStoreOp_Sentinel, +}; + +enum WGPUTextureAspect { + WGPUTextureAspect_All, + WGPUTextureAspect_StencilOnly, + WGPUTextureAspect_DepthOnly, + /** + * Must be last for serialization purposes + */ + WGPUTextureAspect_Sentinel, +}; + +enum WGPUTextureComponentType { + WGPUTextureComponentType_Float, + WGPUTextureComponentType_Sint, + WGPUTextureComponentType_Uint, + /** + * Must be last for serialization purposes + */ + WGPUTextureComponentType_Sentinel, +}; + +enum WGPUTextureDimension { + WGPUTextureDimension_D1, + WGPUTextureDimension_D2, + WGPUTextureDimension_D3, + /** + * Must be last for serialization purposes + */ + WGPUTextureDimension_Sentinel, +}; + +enum WGPUTextureFormat { + WGPUTextureFormat_R8Unorm = 0, + WGPUTextureFormat_R8Snorm = 1, + WGPUTextureFormat_R8Uint = 2, + WGPUTextureFormat_R8Sint = 3, + WGPUTextureFormat_R16Uint = 4, + WGPUTextureFormat_R16Sint = 5, + WGPUTextureFormat_R16Float = 6, + WGPUTextureFormat_Rg8Unorm = 7, + WGPUTextureFormat_Rg8Snorm = 8, + WGPUTextureFormat_Rg8Uint = 9, + WGPUTextureFormat_Rg8Sint = 10, + WGPUTextureFormat_R32Uint = 11, + WGPUTextureFormat_R32Sint = 12, + WGPUTextureFormat_R32Float = 13, + WGPUTextureFormat_Rg16Uint = 14, + WGPUTextureFormat_Rg16Sint = 15, + WGPUTextureFormat_Rg16Float = 16, + WGPUTextureFormat_Rgba8Unorm = 17, + WGPUTextureFormat_Rgba8UnormSrgb = 18, + WGPUTextureFormat_Rgba8Snorm = 19, + WGPUTextureFormat_Rgba8Uint = 20, + WGPUTextureFormat_Rgba8Sint = 21, + WGPUTextureFormat_Bgra8Unorm = 22, + WGPUTextureFormat_Bgra8UnormSrgb = 23, + WGPUTextureFormat_Rgb10a2Unorm = 24, + WGPUTextureFormat_Rg11b10Float = 25, + WGPUTextureFormat_Rg32Uint = 26, + WGPUTextureFormat_Rg32Sint = 27, + WGPUTextureFormat_Rg32Float = 28, + WGPUTextureFormat_Rgba16Uint = 29, + WGPUTextureFormat_Rgba16Sint = 30, + WGPUTextureFormat_Rgba16Float = 31, + WGPUTextureFormat_Rgba32Uint = 32, + WGPUTextureFormat_Rgba32Sint = 33, + WGPUTextureFormat_Rgba32Float = 34, + WGPUTextureFormat_Depth32Float = 35, + WGPUTextureFormat_Depth24Plus = 36, + WGPUTextureFormat_Depth24PlusStencil8 = 37, + /** + * Must be last for serialization purposes + */ + WGPUTextureFormat_Sentinel, +}; + +enum WGPUTextureViewDimension { + WGPUTextureViewDimension_D1, + WGPUTextureViewDimension_D2, + WGPUTextureViewDimension_D2Array, + WGPUTextureViewDimension_Cube, + WGPUTextureViewDimension_CubeArray, + WGPUTextureViewDimension_D3, + /** + * Must be last for serialization purposes + */ + WGPUTextureViewDimension_Sentinel, +}; + +enum WGPUVertexFormat { + WGPUVertexFormat_Uchar2 = 1, + WGPUVertexFormat_Uchar4 = 3, + WGPUVertexFormat_Char2 = 5, + WGPUVertexFormat_Char4 = 7, + WGPUVertexFormat_Uchar2Norm = 9, + WGPUVertexFormat_Uchar4Norm = 11, + WGPUVertexFormat_Char2Norm = 14, + WGPUVertexFormat_Char4Norm = 16, + WGPUVertexFormat_Ushort2 = 18, + WGPUVertexFormat_Ushort4 = 20, + WGPUVertexFormat_Short2 = 22, + WGPUVertexFormat_Short4 = 24, + WGPUVertexFormat_Ushort2Norm = 26, + WGPUVertexFormat_Ushort4Norm = 28, + WGPUVertexFormat_Short2Norm = 30, + WGPUVertexFormat_Short4Norm = 32, + WGPUVertexFormat_Half2 = 34, + WGPUVertexFormat_Half4 = 36, + WGPUVertexFormat_Float = 37, + WGPUVertexFormat_Float2 = 38, + WGPUVertexFormat_Float3 = 39, + WGPUVertexFormat_Float4 = 40, + WGPUVertexFormat_Uint = 41, + WGPUVertexFormat_Uint2 = 42, + WGPUVertexFormat_Uint3 = 43, + WGPUVertexFormat_Uint4 = 44, + WGPUVertexFormat_Int = 45, + WGPUVertexFormat_Int2 = 46, + WGPUVertexFormat_Int3 = 47, + WGPUVertexFormat_Int4 = 48, + /** + * Must be last for serialization purposes + */ + WGPUVertexFormat_Sentinel, +}; + +struct WGPUClient; + +struct WGPUGlobal_IdentityRecyclerFactory; + +typedef WGPUNonZeroU64 WGPUId_Adapter_Dummy; typedef WGPUId_Adapter_Dummy WGPUAdapterId; -typedef uint64_t WGPUId_Device_Dummy; +typedef WGPUNonZeroU64 WGPUId_BindGroup_Dummy; + +typedef WGPUId_BindGroup_Dummy WGPUBindGroupId; + +typedef WGPUNonZeroU64 WGPUId_BindGroupLayout_Dummy; + +typedef WGPUId_BindGroupLayout_Dummy WGPUBindGroupLayoutId; + +typedef WGPUNonZeroU64 WGPUId_Buffer_Dummy; + +typedef WGPUId_Buffer_Dummy WGPUBufferId; + +typedef WGPUNonZeroU64 WGPUId_ComputePipeline_Dummy; + +typedef WGPUId_ComputePipeline_Dummy WGPUComputePipelineId; + +typedef WGPUNonZeroU64 WGPUId_Device_Dummy; typedef WGPUId_Device_Dummy WGPUDeviceId; -typedef struct { - WGPUClient *client; +typedef WGPUNonZeroU64 WGPUId_CommandBuffer_Dummy; + +typedef WGPUId_CommandBuffer_Dummy WGPUCommandBufferId; + +typedef WGPUCommandBufferId WGPUCommandEncoderId; + +typedef WGPUNonZeroU64 WGPUId_PipelineLayout_Dummy; + +typedef WGPUId_PipelineLayout_Dummy WGPUPipelineLayoutId; + +typedef WGPUNonZeroU64 WGPUId_RenderPipeline_Dummy; + +typedef WGPUId_RenderPipeline_Dummy WGPURenderPipelineId; + +typedef WGPUNonZeroU64 WGPUId_Sampler_Dummy; + +typedef WGPUId_Sampler_Dummy WGPUSamplerId; + +typedef WGPUNonZeroU64 WGPUId_ShaderModule_Dummy; + +typedef WGPUId_ShaderModule_Dummy WGPUShaderModuleId; + +typedef WGPUNonZeroU64 WGPUId_Texture_Dummy; + +typedef WGPUId_Texture_Dummy WGPUTextureId; + +typedef WGPUNonZeroU64 WGPUId_TextureView_Dummy; + +typedef WGPUId_TextureView_Dummy WGPUTextureViewId; + +struct WGPUInfrastructure { + struct WGPUClient *client; const uint8_t *error; -} WGPUInfrastructure; +}; -typedef struct { +struct WGPURawPass { + uint8_t *data; + uint8_t *base; + uintptr_t capacity; + WGPUCommandEncoderId parent; +}; + +struct WGPUComputePassDescriptor { + uint32_t todo; +}; + +struct WGPUColor { + double r; + double g; + double b; + double a; +}; +#define WGPUColor_TRANSPARENT (WGPUColor){ .r = 0.0, .g = 0.0, .b = 0.0, .a = 0.0 } +#define WGPUColor_BLACK (WGPUColor){ .r = 0.0, .g = 0.0, .b = 0.0, .a = 1.0 } +#define WGPUColor_WHITE (WGPUColor){ .r = 1.0, .g = 1.0, .b = 1.0, .a = 1.0 } +#define WGPUColor_RED (WGPUColor){ .r = 1.0, .g = 0.0, .b = 0.0, .a = 1.0 } +#define WGPUColor_GREEN (WGPUColor){ .r = 0.0, .g = 1.0, .b = 0.0, .a = 1.0 } +#define WGPUColor_BLUE (WGPUColor){ .r = 0.0, .g = 0.0, .b = 1.0, .a = 1.0 } + +struct WGPURenderPassColorAttachmentDescriptorBase_TextureViewId { + WGPUTextureViewId attachment; + WGPUOption_TextureViewId resolve_target; + enum WGPULoadOp load_op; + enum WGPUStoreOp store_op; + struct WGPUColor clear_color; +}; + +typedef struct WGPURenderPassColorAttachmentDescriptorBase_TextureViewId WGPURenderPassColorAttachmentDescriptor; + +struct WGPURenderPassDepthStencilAttachmentDescriptorBase_TextureViewId { + WGPUTextureViewId attachment; + enum WGPULoadOp depth_load_op; + enum WGPUStoreOp depth_store_op; + float clear_depth; + enum WGPULoadOp stencil_load_op; + enum WGPUStoreOp stencil_store_op; + uint32_t clear_stencil; +}; + +typedef struct WGPURenderPassDepthStencilAttachmentDescriptorBase_TextureViewId WGPURenderPassDepthStencilAttachmentDescriptor; + +struct WGPURenderPassDescriptor { + const WGPURenderPassColorAttachmentDescriptor *color_attachments; + uintptr_t color_attachments_length; + const WGPURenderPassDepthStencilAttachmentDescriptor *depth_stencil_attachment; +}; + +typedef uint64_t WGPUBufferAddress; + +typedef const char *WGPURawString; + +typedef uint32_t WGPUDynamicOffset; + +typedef WGPUNonZeroU64 WGPUId_RenderBundle_Dummy; + +typedef WGPUId_RenderBundle_Dummy WGPURenderBundleId; + +typedef struct WGPUGlobal_IdentityRecyclerFactory WGPUGlobal; + +struct WGPUExtensions { bool anisotropic_filtering; -} WGPUExtensions; +}; -typedef struct { +struct WGPULimits { uint32_t max_bind_groups; -} WGPULimits; +}; -typedef struct { - WGPUExtensions extensions; - WGPULimits limits; -} WGPUDeviceDescriptor; +struct WGPUDeviceDescriptor { + struct WGPUExtensions extensions; + struct WGPULimits limits; +}; -typedef struct { - WGPUPowerPreference power_preference; -} WGPURequestAdapterOptions; +typedef void (*WGPUBufferMapReadCallback)(enum WGPUBufferMapAsyncStatus status, const uint8_t *data, uint8_t *userdata); +struct WGPUBufferBinding { + WGPUBufferId buffer; + WGPUBufferAddress offset; + WGPUBufferAddress size; +}; + +enum WGPUBindingResource_Tag { + WGPUBindingResource_Buffer, + WGPUBindingResource_Sampler, + WGPUBindingResource_TextureView, + /** + * Must be last for serialization purposes + */ + WGPUBindingResource_Sentinel, +}; + +struct WGPUBindingResource_WGPUBuffer_Body { + struct WGPUBufferBinding _0; +}; + +struct WGPUBindingResource_WGPUSampler_Body { + WGPUSamplerId _0; +}; + +struct WGPUBindingResource_WGPUTextureView_Body { + WGPUTextureViewId _0; +}; + +struct WGPUBindingResource { + enum WGPUBindingResource_Tag tag; + union { + struct WGPUBindingResource_WGPUBuffer_Body buffer; + struct WGPUBindingResource_WGPUSampler_Body sampler; + struct WGPUBindingResource_WGPUTextureView_Body texture_view; + }; +}; + +struct WGPUBindGroupEntry { + uint32_t binding; + struct WGPUBindingResource resource; +}; + +struct WGPUBindGroupDescriptor { + const char *label; + WGPUBindGroupLayoutId layout; + const struct WGPUBindGroupEntry *entries; + uintptr_t entries_length; +}; + +typedef uint32_t WGPUShaderStage; +#define WGPUShaderStage_NONE 0 +#define WGPUShaderStage_VERTEX 1 +#define WGPUShaderStage_FRAGMENT 2 +#define WGPUShaderStage_COMPUTE 4 + +struct WGPUBindGroupLayoutEntry { + uint32_t binding; + WGPUShaderStage visibility; + enum WGPUBindingType ty; + bool multisampled; + bool has_dynamic_offset; + enum WGPUTextureViewDimension view_dimension; + enum WGPUTextureComponentType texture_component_type; + enum WGPUTextureFormat storage_texture_format; +}; + +struct WGPUBindGroupLayoutDescriptor { + const char *label; + const struct WGPUBindGroupLayoutEntry *entries; + uintptr_t entries_length; +}; + +typedef uint32_t WGPUBufferUsage; +#define WGPUBufferUsage_MAP_READ 1 +#define WGPUBufferUsage_MAP_WRITE 2 +#define WGPUBufferUsage_COPY_SRC 4 +#define WGPUBufferUsage_COPY_DST 8 +#define WGPUBufferUsage_INDEX 16 +#define WGPUBufferUsage_VERTEX 32 +#define WGPUBufferUsage_UNIFORM 64 +#define WGPUBufferUsage_STORAGE 128 +#define WGPUBufferUsage_INDIRECT 256 +#define WGPUBufferUsage_STORAGE_READ 512 +#define WGPUBufferUsage_NONE 0 + +struct WGPUBufferDescriptor { + const char *label; + WGPUBufferAddress size; + WGPUBufferUsage usage; +}; + +struct WGPUProgrammableStageDescriptor { + WGPUShaderModuleId module; + WGPURawString entry_point; +}; + +struct WGPUComputePipelineDescriptor { + WGPUPipelineLayoutId layout; + struct WGPUProgrammableStageDescriptor compute_stage; +}; + +struct WGPUCommandEncoderDescriptor { + const char *label; +}; + +struct WGPUPipelineLayoutDescriptor { + const WGPUBindGroupLayoutId *bind_group_layouts; + uintptr_t bind_group_layouts_length; +}; + +struct WGPURasterizationStateDescriptor { + enum WGPUFrontFace front_face; + enum WGPUCullMode cull_mode; + int32_t depth_bias; + float depth_bias_slope_scale; + float depth_bias_clamp; +}; + +struct WGPUBlendDescriptor { + enum WGPUBlendFactor src_factor; + enum WGPUBlendFactor dst_factor; + enum WGPUBlendOperation operation; +}; + +typedef uint32_t WGPUColorWrite; +#define WGPUColorWrite_RED 1 +#define WGPUColorWrite_GREEN 2 +#define WGPUColorWrite_BLUE 4 +#define WGPUColorWrite_ALPHA 8 +#define WGPUColorWrite_COLOR 7 +#define WGPUColorWrite_ALL 15 + +struct WGPUColorStateDescriptor { + enum WGPUTextureFormat format; + struct WGPUBlendDescriptor alpha_blend; + struct WGPUBlendDescriptor color_blend; + WGPUColorWrite write_mask; +}; + +struct WGPUStencilStateFaceDescriptor { + enum WGPUCompareFunction compare; + enum WGPUStencilOperation fail_op; + enum WGPUStencilOperation depth_fail_op; + enum WGPUStencilOperation pass_op; +}; + +struct WGPUDepthStencilStateDescriptor { + enum WGPUTextureFormat format; + bool depth_write_enabled; + enum WGPUCompareFunction depth_compare; + struct WGPUStencilStateFaceDescriptor stencil_front; + struct WGPUStencilStateFaceDescriptor stencil_back; + uint32_t stencil_read_mask; + uint32_t stencil_write_mask; +}; + +typedef uint32_t WGPUShaderLocation; + +struct WGPUVertexAttributeDescriptor { + WGPUBufferAddress offset; + enum WGPUVertexFormat format; + WGPUShaderLocation shader_location; +}; + +struct WGPUVertexBufferLayoutDescriptor { + WGPUBufferAddress array_stride; + enum WGPUInputStepMode step_mode; + const struct WGPUVertexAttributeDescriptor *attributes; + uintptr_t attributes_length; +}; + +struct WGPUVertexStateDescriptor { + enum WGPUIndexFormat index_format; + const struct WGPUVertexBufferLayoutDescriptor *vertex_buffers; + uintptr_t vertex_buffers_length; +}; + +struct WGPURenderPipelineDescriptor { + WGPUPipelineLayoutId layout; + struct WGPUProgrammableStageDescriptor vertex_stage; + const struct WGPUProgrammableStageDescriptor *fragment_stage; + enum WGPUPrimitiveTopology primitive_topology; + const struct WGPURasterizationStateDescriptor *rasterization_state; + const struct WGPUColorStateDescriptor *color_states; + uintptr_t color_states_length; + const struct WGPUDepthStencilStateDescriptor *depth_stencil_state; + struct WGPUVertexStateDescriptor vertex_state; + uint32_t sample_count; + uint32_t sample_mask; + bool alpha_to_coverage_enabled; +}; + +struct WGPUSamplerDescriptor { + enum WGPUAddressMode address_mode_u; + enum WGPUAddressMode address_mode_v; + enum WGPUAddressMode address_mode_w; + enum WGPUFilterMode mag_filter; + enum WGPUFilterMode min_filter; + enum WGPUFilterMode mipmap_filter; + float lod_min_clamp; + float lod_max_clamp; + enum WGPUCompareFunction compare; +}; + +struct WGPUU32Array { + const uint32_t *bytes; + uintptr_t length; +}; + +struct WGPUShaderModuleDescriptor { + struct WGPUU32Array code; +}; + +struct WGPUExtent3d { + uint32_t width; + uint32_t height; + uint32_t depth; +}; + +typedef uint32_t WGPUTextureUsage; +#define WGPUTextureUsage_COPY_SRC 1 +#define WGPUTextureUsage_COPY_DST 2 +#define WGPUTextureUsage_SAMPLED 4 +#define WGPUTextureUsage_STORAGE 8 +#define WGPUTextureUsage_OUTPUT_ATTACHMENT 16 +#define WGPUTextureUsage_NONE 0 +#define WGPUTextureUsage_UNINITIALIZED 65535 + +struct WGPUTextureDescriptor { + const char *label; + struct WGPUExtent3d size; + uint32_t array_layer_count; + uint32_t mip_level_count; + uint32_t sample_count; + enum WGPUTextureDimension dimension; + enum WGPUTextureFormat format; + WGPUTextureUsage usage; +}; + +struct WGPUBufferCopyView { + WGPUBufferId buffer; + WGPUBufferAddress offset; + uint32_t bytes_per_row; + uint32_t rows_per_image; +}; + +struct WGPUOrigin3d { + uint32_t x; + uint32_t y; + uint32_t z; +}; +#define WGPUOrigin3d_ZERO (WGPUOrigin3d){ .x = 0, .y = 0, .z = 0 } + +struct WGPUTextureCopyView { + WGPUTextureId texture; + uint32_t mip_level; + uint32_t array_layer; + struct WGPUOrigin3d origin; +}; + +struct WGPUCommandBufferDescriptor { + uint32_t todo; +}; + +struct WGPURequestAdapterOptions { + enum WGPUPowerPreference power_preference; + WGPUOption_SurfaceId compatible_surface; +}; + +typedef void *WGPUFactoryParam; + +typedef WGPUNonZeroU64 WGPUId_SwapChain_Dummy; + +typedef WGPUId_SwapChain_Dummy WGPUSwapChainId; + +typedef WGPUNonZeroU64 WGPUId_Surface; + +typedef WGPUId_Surface WGPUSurfaceId; + +struct WGPUIdentityRecyclerFactory { + WGPUFactoryParam param; + void (*free_adapter)(WGPUAdapterId, WGPUFactoryParam); + void (*free_device)(WGPUDeviceId, WGPUFactoryParam); + void (*free_swap_chain)(WGPUSwapChainId, WGPUFactoryParam); + void (*free_pipeline_layout)(WGPUPipelineLayoutId, WGPUFactoryParam); + void (*free_shader_module)(WGPUShaderModuleId, WGPUFactoryParam); + void (*free_bind_group_layout)(WGPUBindGroupLayoutId, WGPUFactoryParam); + void (*free_bind_group)(WGPUBindGroupId, WGPUFactoryParam); + void (*free_command_buffer)(WGPUCommandBufferId, WGPUFactoryParam); + void (*free_render_pipeline)(WGPURenderPipelineId, WGPUFactoryParam); + void (*free_compute_pipeline)(WGPUComputePipelineId, WGPUFactoryParam); + void (*free_buffer)(WGPUBufferId, WGPUFactoryParam); + void (*free_texture)(WGPUTextureId, WGPUFactoryParam); + void (*free_texture_view)(WGPUTextureViewId, WGPUFactoryParam); + void (*free_sampler)(WGPUSamplerId, WGPUFactoryParam); + void (*free_surface)(WGPUSurfaceId, WGPUFactoryParam); +}; + +typedef WGPUDeviceId WGPUQueueId; + +struct WGPUTextureViewDescriptor { + enum WGPUTextureFormat format; + enum WGPUTextureViewDimension dimension; + enum WGPUTextureAspect aspect; + uint32_t base_mip_level; + uint32_t level_count; + uint32_t base_array_layer; + uint32_t array_layer_count; +}; + +/** + * # Safety + * + * This function is unsafe because improper use may lead to memory + * problems. For example, a double-free may occur if the function is called + * twice on the same raw pointer. + */ WGPU_INLINE -void wgpu_client_delete(WGPUClient *aClient) +void wgpu_client_delete(struct WGPUClient *aClient) WGPU_FUNC; WGPU_INLINE -void wgpu_client_kill_adapter_ids(const WGPUClient *aClient, - const WGPUAdapterId *aIds, - uintptr_t aIdLength) +void wgpu_client_kill_adapter_id(const struct WGPUClient *aClient, + WGPUAdapterId aId) WGPU_FUNC; WGPU_INLINE -void wgpu_client_kill_device_id(const WGPUClient *aClient, +void wgpu_client_kill_bind_group_id(const struct WGPUClient *aClient, + WGPUBindGroupId aId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_client_kill_bind_group_layout_id(const struct WGPUClient *aClient, + WGPUBindGroupLayoutId aId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_client_kill_buffer_id(const struct WGPUClient *aClient, + WGPUBufferId aId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_client_kill_compute_pipeline_id(const struct WGPUClient *aClient, + WGPUComputePipelineId aId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_client_kill_device_id(const struct WGPUClient *aClient, WGPUDeviceId aId) WGPU_FUNC; WGPU_INLINE -uintptr_t wgpu_client_make_adapter_ids(const WGPUClient *aClient, +void wgpu_client_kill_encoder_id(const struct WGPUClient *aClient, + WGPUCommandEncoderId aId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_client_kill_pipeline_layout_id(const struct WGPUClient *aClient, + WGPUPipelineLayoutId aId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_client_kill_render_pipeline_id(const struct WGPUClient *aClient, + WGPURenderPipelineId aId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_client_kill_sampler_id(const struct WGPUClient *aClient, + WGPUSamplerId aId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_client_kill_shader_module_id(const struct WGPUClient *aClient, + WGPUShaderModuleId aId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_client_kill_texture_id(const struct WGPUClient *aClient, + WGPUTextureId aId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_client_kill_texture_view_id(const struct WGPUClient *aClient, + WGPUTextureViewId aId) +WGPU_FUNC; + +/** + * # Safety + * + * This function is unsafe as there is no guarantee that the given pointer is + * valid for `id_length` elements. + */ +WGPU_INLINE +uintptr_t wgpu_client_make_adapter_ids(const struct WGPUClient *aClient, WGPUAdapterId *aIds, uintptr_t aIdLength) WGPU_FUNC; WGPU_INLINE -WGPUDeviceId wgpu_client_make_device_id(const WGPUClient *aClient, +WGPUBindGroupId wgpu_client_make_bind_group_id(const struct WGPUClient *aClient, + WGPUDeviceId aDeviceId) +WGPU_FUNC; + +WGPU_INLINE +WGPUBindGroupLayoutId wgpu_client_make_bind_group_layout_id(const struct WGPUClient *aClient, + WGPUDeviceId aDeviceId) +WGPU_FUNC; + +WGPU_INLINE +WGPUBufferId wgpu_client_make_buffer_id(const struct WGPUClient *aClient, + WGPUDeviceId aDeviceId) +WGPU_FUNC; + +WGPU_INLINE +WGPUComputePipelineId wgpu_client_make_compute_pipeline_id(const struct WGPUClient *aClient, + WGPUDeviceId aDeviceId) +WGPU_FUNC; + +WGPU_INLINE +WGPUDeviceId wgpu_client_make_device_id(const struct WGPUClient *aClient, WGPUAdapterId aAdapterId) WGPU_FUNC; WGPU_INLINE -WGPUInfrastructure wgpu_client_new(void) +WGPUCommandEncoderId wgpu_client_make_encoder_id(const struct WGPUClient *aClient, + WGPUDeviceId aDeviceId) +WGPU_FUNC; + +WGPU_INLINE +WGPUPipelineLayoutId wgpu_client_make_pipeline_layout_id(const struct WGPUClient *aClient, + WGPUDeviceId aDeviceId) +WGPU_FUNC; + +WGPU_INLINE +WGPURenderPipelineId wgpu_client_make_render_pipeline_id(const struct WGPUClient *aClient, + WGPUDeviceId aDeviceId) +WGPU_FUNC; + +WGPU_INLINE +WGPUSamplerId wgpu_client_make_sampler_id(const struct WGPUClient *aClient, + WGPUDeviceId aDeviceId) +WGPU_FUNC; + +WGPU_INLINE +WGPUShaderModuleId wgpu_client_make_shader_module_id(const struct WGPUClient *aClient, + WGPUDeviceId aDeviceId) +WGPU_FUNC; + +WGPU_INLINE +WGPUTextureId wgpu_client_make_texture_id(const struct WGPUClient *aClient, + WGPUDeviceId aDeviceId) +WGPU_FUNC; + +WGPU_INLINE +WGPUTextureViewId wgpu_client_make_texture_view_id(const struct WGPUClient *aClient, + WGPUDeviceId aDeviceId) +WGPU_FUNC; + +WGPU_INLINE +struct WGPUInfrastructure wgpu_client_new(void) +WGPU_FUNC; + +WGPU_INLINE +struct WGPURawPass wgpu_command_encoder_begin_compute_pass(WGPUCommandEncoderId aEncoderId, + const struct WGPUComputePassDescriptor *aDesc) +WGPU_FUNC; + +WGPU_INLINE +struct WGPURawPass wgpu_command_encoder_begin_render_pass(WGPUCommandEncoderId aEncoderId, + const struct WGPURenderPassDescriptor *aDesc) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_compute_pass_destroy(struct WGPURawPass aPass) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_compute_pass_dispatch(struct WGPURawPass *aPass, + uint32_t aGroupsX, + uint32_t aGroupsY, + uint32_t aGroupsZ) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_compute_pass_dispatch_indirect(struct WGPURawPass *aPass, + WGPUBufferId aBufferId, + WGPUBufferAddress aOffset) +WGPU_FUNC; + +WGPU_INLINE +const uint8_t *wgpu_compute_pass_finish(struct WGPURawPass *aPass, + uintptr_t *aLength) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_compute_pass_insert_debug_marker(struct WGPURawPass *aPass, + WGPURawString aLabel) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_compute_pass_pop_debug_group(struct WGPURawPass *aPass) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_compute_pass_push_debug_group(struct WGPURawPass *aPass, + WGPURawString aLabel) +WGPU_FUNC; + +/** + * # Safety + * + * This function is unsafe as there is no guarantee that the given pointer is + * valid for `offset_length` elements. + */ +WGPU_INLINE +void wgpu_compute_pass_set_bind_group(struct WGPURawPass *aPass, + uint32_t aIndex, + WGPUBindGroupId aBindGroupId, + const WGPUDynamicOffset *aOffsets, + uintptr_t aOffsetLength) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_compute_pass_set_pipeline(struct WGPURawPass *aPass, + WGPUComputePipelineId aPipelineId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_render_pass_destroy(struct WGPURawPass aPass) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_render_pass_draw(struct WGPURawPass *aPass, + uint32_t aVertexCount, + uint32_t aInstanceCount, + uint32_t aFirstVertex, + uint32_t aFirstInstance) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_render_pass_draw_indexed(struct WGPURawPass *aPass, + uint32_t aIndexCount, + uint32_t aInstanceCount, + uint32_t aFirstIndex, + int32_t aBaseVertex, + uint32_t aFirstInstance) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_render_pass_draw_indexed_indirect(struct WGPURawPass *aPass, + WGPUBufferId aBufferId, + WGPUBufferAddress aOffset) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_render_pass_draw_indirect(struct WGPURawPass *aPass, + WGPUBufferId aBufferId, + WGPUBufferAddress aOffset) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_render_pass_execute_bundles(struct WGPURawPass *aPass, + const WGPURenderBundleId *aBundles, + uintptr_t aBundlesLength) +WGPU_FUNC; + +WGPU_INLINE +const uint8_t *wgpu_render_pass_finish(struct WGPURawPass *aPass, + uintptr_t *aLength) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_render_pass_insert_debug_marker(struct WGPURawPass *aPass, + WGPURawString aLabel) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_render_pass_pop_debug_group(struct WGPURawPass *aPass) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_render_pass_push_debug_group(struct WGPURawPass *aPass, + WGPURawString aLabel) +WGPU_FUNC; + +/** + * # Safety + * + * This function is unsafe as there is no guarantee that the given pointer is + * valid for `offset_length` elements. + */ +WGPU_INLINE +void wgpu_render_pass_set_bind_group(struct WGPURawPass *aPass, + uint32_t aIndex, + WGPUBindGroupId aBindGroupId, + const WGPUDynamicOffset *aOffsets, + uintptr_t aOffsetLength) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_render_pass_set_blend_color(struct WGPURawPass *aPass, + const struct WGPUColor *aColor) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_render_pass_set_index_buffer(struct WGPURawPass *aPass, + WGPUBufferId aBufferId, + WGPUBufferAddress aOffset, + WGPUBufferAddress aSize) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_render_pass_set_pipeline(struct WGPURawPass *aPass, + WGPURenderPipelineId aPipelineId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_render_pass_set_scissor_rect(struct WGPURawPass *aPass, + uint32_t aX, + uint32_t aY, + uint32_t aW, + uint32_t aH) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_render_pass_set_stencil_reference(struct WGPURawPass *aPass, + uint32_t aValue) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_render_pass_set_vertex_buffer(struct WGPURawPass *aPass, + uint32_t aSlot, + WGPUBufferId aBufferId, + WGPUBufferAddress aOffset, + WGPUBufferAddress aSize) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_render_pass_set_viewport(struct WGPURawPass *aPass, + float aX, + float aY, + float aW, + float aH, + float aDepthMin, + float aDepthMax) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_server_adapter_destroy(const WGPUGlobal *aGlobal, + WGPUAdapterId aAdapterId) WGPU_FUNC; WGPU_INLINE void wgpu_server_adapter_request_device(const WGPUGlobal *aGlobal, WGPUAdapterId aSelfId, - const WGPUDeviceDescriptor *aDesc, + const struct WGPUDeviceDescriptor *aDesc, WGPUDeviceId aNewId) WGPU_FUNC; +WGPU_INLINE +void wgpu_server_bind_group_destroy(const WGPUGlobal *aGlobal, + WGPUBindGroupId aSelfId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_server_bind_group_layout_destroy(const WGPUGlobal *aGlobal, + WGPUBindGroupLayoutId aSelfId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_server_buffer_destroy(const WGPUGlobal *aGlobal, + WGPUBufferId aSelfId) +WGPU_FUNC; + +/** + * # Safety + * + * This function is unsafe as there is no guarantee that the given pointer is + * valid for `size` elements. + */ +WGPU_INLINE +void wgpu_server_buffer_map_read(const WGPUGlobal *aGlobal, + WGPUBufferId aBufferId, + WGPUBufferAddress aStart, + WGPUBufferAddress aSize, + WGPUBufferMapReadCallback aCallback, + uint8_t *aUserdata) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_server_buffer_unmap(const WGPUGlobal *aGlobal, + WGPUBufferId aBufferId) +WGPU_FUNC; + +/** + * # Safety + * + * This function is unsafe as there is no guarantee that the given pointer is + * valid for `byte_length` elements. + */ +WGPU_INLINE +void wgpu_server_command_buffer_destroy(const WGPUGlobal *aGlobal, + WGPUCommandBufferId aSelfId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_server_compute_pipeline_destroy(const WGPUGlobal *aGlobal, + WGPUComputePipelineId aSelfId) +WGPU_FUNC; + +/** + * # Safety + * + * This function is unsafe because improper use may lead to memory + * problems. For example, a double-free may occur if the function is called + * twice on the same raw pointer. + */ WGPU_INLINE void wgpu_server_delete(WGPUGlobal *aGlobal) WGPU_FUNC; +WGPU_INLINE +void wgpu_server_device_create_bind_group(const WGPUGlobal *aGlobal, + WGPUDeviceId aSelfId, + const struct WGPUBindGroupDescriptor *aDesc, + WGPUBindGroupId aNewId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_server_device_create_bind_group_layout(const WGPUGlobal *aGlobal, + WGPUDeviceId aSelfId, + const struct WGPUBindGroupLayoutDescriptor *aDesc, + WGPUBindGroupLayoutId aNewId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_server_device_create_buffer(const WGPUGlobal *aGlobal, + WGPUDeviceId aSelfId, + const struct WGPUBufferDescriptor *aDesc, + WGPUBufferId aNewId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_server_device_create_compute_pipeline(const WGPUGlobal *aGlobal, + WGPUDeviceId aSelfId, + const struct WGPUComputePipelineDescriptor *aDesc, + WGPUComputePipelineId aNewId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_server_device_create_encoder(const WGPUGlobal *aGlobal, + WGPUDeviceId aSelfId, + const struct WGPUCommandEncoderDescriptor *aDesc, + WGPUCommandEncoderId aNewId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_server_device_create_pipeline_layout(const WGPUGlobal *aGlobal, + WGPUDeviceId aSelfId, + const struct WGPUPipelineLayoutDescriptor *aDesc, + WGPUPipelineLayoutId aNewId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_server_device_create_render_pipeline(const WGPUGlobal *aGlobal, + WGPUDeviceId aSelfId, + const struct WGPURenderPipelineDescriptor *aDesc, + WGPURenderPipelineId aNewId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_server_device_create_sampler(const WGPUGlobal *aGlobal, + WGPUDeviceId aSelfId, + const struct WGPUSamplerDescriptor *aDesc, + WGPUSamplerId aNewId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_server_device_create_shader_module(const WGPUGlobal *aGlobal, + WGPUDeviceId aSelfId, + const struct WGPUShaderModuleDescriptor *aDesc, + WGPUShaderModuleId aNewId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_server_device_create_texture(const WGPUGlobal *aGlobal, + WGPUDeviceId aSelfId, + const struct WGPUTextureDescriptor *aDesc, + WGPUTextureId aNewId) +WGPU_FUNC; + WGPU_INLINE void wgpu_server_device_destroy(const WGPUGlobal *aGlobal, WGPUDeviceId aSelfId) WGPU_FUNC; +/** + * # Safety + * + * This function is unsafe as there is no guarantee that the given pointer is + * valid for `size` elements. + */ +WGPU_INLINE +void wgpu_server_device_set_buffer_sub_data(const WGPUGlobal *aGlobal, + WGPUDeviceId aSelfId, + WGPUBufferId aBufferId, + WGPUBufferAddress aOffset, + const uint8_t *aData, + WGPUBufferAddress aSize) +WGPU_FUNC; + +/** + * # Safety + * + * This function is unsafe as there is no guarantee that the given pointers are + * valid for `color_attachments_length` and `command_length` elements, + * respectively. + */ +WGPU_INLINE +void wgpu_server_encode_compute_pass(const WGPUGlobal *aGlobal, + WGPUCommandEncoderId aSelfId, + const uint8_t *aBytes, + uintptr_t aByteLength) +WGPU_FUNC; + +/** + * # Safety + * + * This function is unsafe as there is no guarantee that the given pointers are + * valid for `color_attachments_length` and `command_length` elements, + * respectively. + */ +WGPU_INLINE +void wgpu_server_encode_render_pass(const WGPUGlobal *aGlobal, + WGPUCommandEncoderId aSelfId, + const uint8_t *aCommands, + uintptr_t aCommandLength) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_server_encoder_copy_buffer_to_buffer(const WGPUGlobal *aGlobal, + WGPUCommandEncoderId aSelfId, + WGPUBufferId aSourceId, + WGPUBufferAddress aSourceOffset, + WGPUBufferId aDestinationId, + WGPUBufferAddress aDestinationOffset, + WGPUBufferAddress aSize) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_server_encoder_copy_buffer_to_texture(const WGPUGlobal *aGlobal, + WGPUCommandEncoderId aSelfId, + const struct WGPUBufferCopyView *aSource, + const struct WGPUTextureCopyView *aDestination, + struct WGPUExtent3d aSize) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_server_encoder_copy_texture_to_buffer(const WGPUGlobal *aGlobal, + WGPUCommandEncoderId aSelfId, + const struct WGPUTextureCopyView *aSource, + const struct WGPUBufferCopyView *aDestination, + struct WGPUExtent3d aSize) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_server_encoder_copy_texture_to_texture(const WGPUGlobal *aGlobal, + WGPUCommandEncoderId aSelfId, + const struct WGPUTextureCopyView *aSource, + const struct WGPUTextureCopyView *aDestination, + struct WGPUExtent3d aSize) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_server_encoder_destroy(const WGPUGlobal *aGlobal, + WGPUCommandEncoderId aSelfId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_server_encoder_finish(const WGPUGlobal *aGlobal, + WGPUCommandEncoderId aSelfId, + const struct WGPUCommandBufferDescriptor *aDesc) +WGPU_FUNC; + /** * Request an adapter according to the specified options. * Provide the list of IDs to pick from. * * Returns the index in this list, or -1 if unable to pick. + * + * # Safety + * + * This function is unsafe as there is no guarantee that the given pointer is + * valid for `id_length` elements. */ WGPU_INLINE int8_t wgpu_server_instance_request_adapter(const WGPUGlobal *aGlobal, - const WGPURequestAdapterOptions *aDesc, + const struct WGPURequestAdapterOptions *aDesc, const WGPUAdapterId *aIds, uintptr_t aIdLength) WGPU_FUNC; WGPU_INLINE -WGPUGlobal *wgpu_server_new(void) +WGPUGlobal *wgpu_server_new(struct WGPUIdentityRecyclerFactory aFactory) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_server_pipeline_layout_destroy(const WGPUGlobal *aGlobal, + WGPUPipelineLayoutId aSelfId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_server_poll_all_devices(const WGPUGlobal *aGlobal, + bool aForceWait) +WGPU_FUNC; + +/** + * # Safety + * + * This function is unsafe as there is no guarantee that the given pointer is + * valid for `command_buffer_id_length` elements. + */ +WGPU_INLINE +void wgpu_server_queue_submit(const WGPUGlobal *aGlobal, + WGPUQueueId aSelfId, + const WGPUCommandBufferId *aCommandBufferIds, + uintptr_t aCommandBufferIdLength) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_server_render_pipeline_destroy(const WGPUGlobal *aGlobal, + WGPURenderPipelineId aSelfId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_server_sampler_destroy(const WGPUGlobal *aGlobal, + WGPUSamplerId aSelfId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_server_shader_module_destroy(const WGPUGlobal *aGlobal, + WGPUShaderModuleId aSelfId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_server_texture_create_view(const WGPUGlobal *aGlobal, + WGPUTextureId aSelfId, + const struct WGPUTextureViewDescriptor *aDesc, + WGPUTextureViewId aNewId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_server_texture_destroy(const WGPUGlobal *aGlobal, + WGPUTextureId aSelfId) +WGPU_FUNC; + +WGPU_INLINE +void wgpu_server_texture_view_destroy(const WGPUGlobal *aGlobal, + WGPUTextureViewId aSelfId) WGPU_FUNC; diff --git a/gfx/wgpu/ffi/wgpu.h b/gfx/wgpu/ffi/wgpu.h index ec6d92bc520b..7d7e7bfe5271 100644 --- a/gfx/wgpu/ffi/wgpu.h +++ b/gfx/wgpu/ffi/wgpu.h @@ -2,7 +2,7 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ -/* Generated with cbindgen:0.13.1 */ +/* Generated with cbindgen:0.14.0 */ /* DO NOT MODIFY THIS MANUALLY! This file was generated using cbindgen. * To generate this file: @@ -11,7 +11,10 @@ * 2. Run `rustup run nightly cbindgen toolkit/library/rust/ --lockfile Cargo.lock --crate wgpu-remote -o dom/webgpu/ffi/wgpu_ffi_generated.h` */ -#define WGPU_LOCAL +typedef unsigned long long WGPUNonZeroU64; +typedef unsigned long long WGPUOption_AdapterId; +typedef unsigned long long WGPUOption_SurfaceId; +typedef unsigned long long WGPUOption_TextureViewId; #include @@ -19,6 +22,11 @@ #include #include +/** + * Bound uniform/storage buffer offsets must be aligned to this number. + */ +#define WGPUBIND_BUFFER_ALIGNMENT 256 + #define WGPUDEFAULT_BIND_GROUPS 4 #define WGPUDESIRED_NUM_FRAMES 3 @@ -80,14 +88,15 @@ typedef enum { } WGPUBufferMapAsyncStatus; typedef enum { - WGPUCompareFunction_Never = 0, - WGPUCompareFunction_Less = 1, - WGPUCompareFunction_Equal = 2, - WGPUCompareFunction_LessEqual = 3, - WGPUCompareFunction_Greater = 4, - WGPUCompareFunction_NotEqual = 5, - WGPUCompareFunction_GreaterEqual = 6, - WGPUCompareFunction_Always = 7, + WGPUCompareFunction_Undefined = 0, + WGPUCompareFunction_Never = 1, + WGPUCompareFunction_Less = 2, + WGPUCompareFunction_Equal = 3, + WGPUCompareFunction_LessEqual = 4, + WGPUCompareFunction_Greater = 5, + WGPUCompareFunction_NotEqual = 6, + WGPUCompareFunction_GreaterEqual = 7, + WGPUCompareFunction_Always = 8, } WGPUCompareFunction; typedef enum { @@ -275,11 +284,11 @@ typedef enum { WGPUVertexFormat_Int4 = 48, } WGPUVertexFormat; -typedef uint64_t WGPUId_Adapter_Dummy; +typedef WGPUNonZeroU64 WGPUId_Adapter_Dummy; typedef WGPUId_Adapter_Dummy WGPUAdapterId; -typedef uint64_t WGPUId_Device_Dummy; +typedef WGPUNonZeroU64 WGPUId_Device_Dummy; typedef WGPUId_Device_Dummy WGPUDeviceId; @@ -296,11 +305,11 @@ typedef struct { WGPULimits limits; } WGPUDeviceDescriptor; -typedef uint64_t WGPUId_BindGroup_Dummy; +typedef WGPUNonZeroU64 WGPUId_BindGroup_Dummy; typedef WGPUId_BindGroup_Dummy WGPUBindGroupId; -typedef uint64_t WGPUId_Buffer_Dummy; +typedef WGPUNonZeroU64 WGPUId_Buffer_Dummy; typedef WGPUId_Buffer_Dummy WGPUBufferId; @@ -310,7 +319,7 @@ typedef void (*WGPUBufferMapReadCallback)(WGPUBufferMapAsyncStatus status, const typedef void (*WGPUBufferMapWriteCallback)(WGPUBufferMapAsyncStatus status, uint8_t *data, uint8_t *userdata); -typedef uint64_t WGPUId_CommandBuffer_Dummy; +typedef WGPUNonZeroU64 WGPUId_CommandBuffer_Dummy; typedef WGPUId_CommandBuffer_Dummy WGPUCommandBufferId; @@ -327,12 +336,10 @@ typedef struct { uint32_t todo; } WGPUComputePassDescriptor; -typedef uint64_t WGPUId_TextureView_Dummy; +typedef WGPUNonZeroU64 WGPUId_TextureView_Dummy; typedef WGPUId_TextureView_Dummy WGPUTextureViewId; -typedef const WGPUTextureViewId *WGPUOptionRef_TextureViewId; - typedef struct { double r; double g; @@ -348,13 +355,13 @@ typedef struct { typedef struct { WGPUTextureViewId attachment; - WGPUOptionRef_TextureViewId resolve_target; + WGPUOption_TextureViewId resolve_target; WGPULoadOp load_op; WGPUStoreOp store_op; WGPUColor clear_color; -} WGPURenderPassColorAttachmentDescriptorBase_TextureViewId__OptionRef_TextureViewId; +} WGPURenderPassColorAttachmentDescriptorBase_TextureViewId; -typedef WGPURenderPassColorAttachmentDescriptorBase_TextureViewId__OptionRef_TextureViewId WGPURenderPassColorAttachmentDescriptor; +typedef WGPURenderPassColorAttachmentDescriptorBase_TextureViewId WGPURenderPassColorAttachmentDescriptor; typedef struct { WGPUTextureViewId attachment; @@ -381,7 +388,7 @@ typedef struct { uint32_t rows_per_image; } WGPUBufferCopyView; -typedef uint64_t WGPUId_Texture_Dummy; +typedef WGPUNonZeroU64 WGPUId_Texture_Dummy; typedef WGPUId_Texture_Dummy WGPUTextureId; @@ -415,15 +422,15 @@ typedef const char *WGPURawString; typedef uint32_t WGPUDynamicOffset; -typedef uint64_t WGPUId_ComputePipeline_Dummy; +typedef WGPUNonZeroU64 WGPUId_ComputePipeline_Dummy; typedef WGPUId_ComputePipeline_Dummy WGPUComputePipelineId; -typedef uint64_t WGPUId_Surface; +typedef WGPUNonZeroU64 WGPUId_Surface; typedef WGPUId_Surface WGPUSurfaceId; -typedef uint64_t WGPUId_BindGroupLayout_Dummy; +typedef WGPUNonZeroU64 WGPUId_BindGroupLayout_Dummy; typedef WGPUId_BindGroupLayout_Dummy WGPUBindGroupLayoutId; @@ -433,7 +440,7 @@ typedef struct { WGPUBufferAddress size; } WGPUBufferBinding; -typedef uint64_t WGPUId_Sampler_Dummy; +typedef WGPUNonZeroU64 WGPUId_Sampler_Dummy; typedef WGPUId_Sampler_Dummy WGPUSamplerId; @@ -470,6 +477,7 @@ typedef struct { } WGPUBindGroupEntry; typedef struct { + const char *label; WGPUBindGroupLayoutId layout; const WGPUBindGroupEntry *entries; uintptr_t entries_length; @@ -493,6 +501,7 @@ typedef struct { } WGPUBindGroupLayoutEntry; typedef struct { + const char *label; const WGPUBindGroupLayoutEntry *entries; uintptr_t entries_length; } WGPUBindGroupLayoutDescriptor; @@ -511,19 +520,20 @@ typedef uint32_t WGPUBufferUsage; #define WGPUBufferUsage_NONE 0 typedef struct { + const char *label; WGPUBufferAddress size; WGPUBufferUsage usage; } WGPUBufferDescriptor; typedef struct { - uint32_t todo; + const char *label; } WGPUCommandEncoderDescriptor; -typedef uint64_t WGPUId_PipelineLayout_Dummy; +typedef WGPUNonZeroU64 WGPUId_PipelineLayout_Dummy; typedef WGPUId_PipelineLayout_Dummy WGPUPipelineLayoutId; -typedef uint64_t WGPUId_ShaderModule_Dummy; +typedef WGPUNonZeroU64 WGPUId_ShaderModule_Dummy; typedef WGPUId_ShaderModule_Dummy WGPUShaderModuleId; @@ -542,7 +552,7 @@ typedef struct { uintptr_t bind_group_layouts_length; } WGPUPipelineLayoutDescriptor; -typedef uint64_t WGPUId_RenderPipeline_Dummy; +typedef WGPUNonZeroU64 WGPUId_RenderPipeline_Dummy; typedef WGPUId_RenderPipeline_Dummy WGPURenderPipelineId; @@ -637,7 +647,7 @@ typedef struct { WGPUFilterMode mipmap_filter; float lod_min_clamp; float lod_max_clamp; - const WGPUCompareFunction *compare; + WGPUCompareFunction compare; } WGPUSamplerDescriptor; typedef struct { @@ -649,7 +659,7 @@ typedef struct { WGPUU32Array code; } WGPUShaderModuleDescriptor; -typedef uint64_t WGPUId_SwapChain_Dummy; +typedef WGPUNonZeroU64 WGPUId_SwapChain_Dummy; typedef WGPUId_SwapChain_Dummy WGPUSwapChainId; @@ -671,6 +681,7 @@ typedef struct { } WGPUSwapChainDescriptor; typedef struct { + const char *label; WGPUExtent3d size; uint32_t array_layer_count; uint32_t mip_level_count; @@ -684,20 +695,21 @@ typedef WGPUDeviceId WGPUQueueId; typedef WGPURawPass *WGPURenderPassId; -typedef uint64_t WGPUId_RenderBundle_Dummy; +typedef WGPUNonZeroU64 WGPUId_RenderBundle_Dummy; typedef WGPUId_RenderBundle_Dummy WGPURenderBundleId; typedef struct { WGPUPowerPreference power_preference; + WGPUOption_SurfaceId compatible_surface; } WGPURequestAdapterOptions; typedef uint32_t WGPUBackendBit; -typedef void (*WGPURequestAdapterCallback)(WGPUAdapterId id, void *userdata); +typedef void (*WGPURequestAdapterCallback)(WGPUOption_AdapterId id, void *userdata); typedef struct { - WGPUTextureViewId view_id; + WGPUOption_TextureViewId view_id; } WGPUSwapChainOutput; typedef struct { diff --git a/gfx/wgpu/wgpu-core/Cargo.toml b/gfx/wgpu/wgpu-core/Cargo.toml index 29cdfe373d57..c26fb55da61e 100644 --- a/gfx/wgpu/wgpu-core/Cargo.toml +++ b/gfx/wgpu/wgpu-core/Cargo.toml @@ -6,7 +6,7 @@ authors = [ "Joshua Groves ", ] edition = "2018" -description = "WebGPU core logic on gfx-hal/rendy" +description = "WebGPU core logic on gfx-hal" homepage = "https://github.com/gfx-rs/wgpu" repository = "https://github.com/gfx-rs/wgpu" keywords = ["graphics"] @@ -27,13 +27,13 @@ bitflags = "1.0" copyless = "0.1" fxhash = "0.2" log = "0.4" -hal = { package = "gfx-hal", version = "0.4" } -gfx-backend-empty = { version = "0.4" } +hal = { package = "gfx-hal", version = "0.5" } +gfx-backend-empty = "0.5" +gfx-descriptor = "0.1" +gfx-memory = "0.1" parking_lot = "0.10" -peek-poke = { git = "https://github.com/kvark/peek-poke", rev = "969bd7fe2be1a83f87916dc8b388c63cfd457075" } -rendy-memory = "0.5" -rendy-descriptor = "0.5" -smallvec = "1.0" +peek-poke = "0.2" +smallvec = "1" vec_map = "0.8" [dependencies.serde_crate] @@ -49,16 +49,16 @@ version = "0.1" features = ["peek-poke"] [target.'cfg(any(target_os = "ios", target_os = "macos"))'.dependencies] -gfx-backend-metal = { version = "0.4" } -gfx-backend-vulkan = { version = "0.4", optional = true } +gfx-backend-metal = { version = "0.5" } +gfx-backend-vulkan = { version = "0.5", optional = true } [target.'cfg(all(unix, not(target_os = "ios"), not(target_os = "macos")))'.dependencies] -gfx-backend-vulkan = { version = "0.4", features = ["x11"] } +gfx-backend-vulkan = { version = "0.5", features = ["x11"] } [target.'cfg(windows)'.dependencies] -gfx-backend-dx12 = { version = "0.4.1" } -gfx-backend-dx11 = { version = "0.4" } -gfx-backend-vulkan = { version = "0.4" } +gfx-backend-dx12 = { version = "0.5" } +gfx-backend-dx11 = { version = "0.5" } +gfx-backend-vulkan = { version = "0.5" } [target.'cfg(any(target_os = "linux", target_os = "macos", target_os = "windows", target_os = "dragonfly", target_os = "freebsd"))'.dependencies] battery = { version = "0.7", optional = true } diff --git a/gfx/wgpu/wgpu-core/src/binding_model.rs b/gfx/wgpu/wgpu-core/src/binding_model.rs index 051ee31ccfda..a919b1bf981d 100644 --- a/gfx/wgpu/wgpu-core/src/binding_model.rs +++ b/gfx/wgpu/wgpu-core/src/binding_model.rs @@ -11,9 +11,9 @@ use crate::{ Stored, }; -use wgt::BufferAddress; +use wgt::{BufferAddress, TextureComponentType}; use arrayvec::ArrayVec; -use rendy_descriptor::{DescriptorRanges, DescriptorSet}; +use gfx_descriptor::{DescriptorCounts, DescriptorSet}; #[cfg(feature = "serde")] use serde_crate::{Deserialize, Serialize}; @@ -33,15 +33,6 @@ pub enum BindingType { WriteonlyStorageTexture = 7, } -#[repr(C)] -#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(crate="serde_crate"))] -pub enum TextureComponentType { - Float, - Sint, - Uint, -} - #[repr(C)] #[derive(Clone, Debug, Hash, PartialEq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(crate="serde_crate"))] @@ -59,6 +50,7 @@ pub struct BindGroupLayoutEntry { #[repr(C)] #[derive(Debug)] pub struct BindGroupLayoutDescriptor { + pub label: *const std::os::raw::c_char, pub entries: *const BindGroupLayoutEntry, pub entries_length: usize, } @@ -68,7 +60,7 @@ pub struct BindGroupLayout { pub(crate) raw: B::DescriptorSetLayout, pub(crate) device_id: Stored, pub(crate) entries: FastHashMap, - pub(crate) desc_ranges: DescriptorRanges, + pub(crate) desc_counts: DescriptorCounts, pub(crate) dynamic_count: usize, } @@ -115,6 +107,7 @@ pub struct BindGroupEntry { #[repr(C)] #[derive(Debug)] pub struct BindGroupDescriptor { + pub label: *const std::os::raw::c_char, pub layout: BindGroupLayoutId, pub entries: *const BindGroupEntry, pub entries_length: usize, diff --git a/gfx/wgpu/wgpu-core/src/command/allocator.rs b/gfx/wgpu/wgpu-core/src/command/allocator.rs index 5a30d303ae77..13fd7dff7528 100644 --- a/gfx/wgpu/wgpu-core/src/command/allocator.rs +++ b/gfx/wgpu/wgpu-core/src/command/allocator.rs @@ -55,10 +55,8 @@ impl CommandPool { fn allocate(&mut self) -> B::CommandBuffer { if self.available.is_empty() { - let extra = unsafe { self.raw.allocate_vec(20, hal::command::Level::Primary) }; - self.available.extend(extra); + unsafe { self.raw.allocate(20, hal::command::Level::Primary, &mut self.available) }; } - self.available.pop().unwrap() } } @@ -133,8 +131,7 @@ impl CommandAllocator { let pool = inner.pools.get_mut(&cmd_buf.recorded_thread_id).unwrap(); if pool.available.is_empty() { - let extra = unsafe { pool.raw.allocate_vec(20, hal::command::Level::Primary) }; - pool.available.extend(extra); + unsafe { pool.raw.allocate(20, hal::command::Level::Primary, &mut pool.available) }; } pool.available.pop().unwrap() diff --git a/gfx/wgpu/wgpu-core/src/command/compute.rs b/gfx/wgpu/wgpu-core/src/command/compute.rs index 29a35526729d..879006455821 100644 --- a/gfx/wgpu/wgpu-core/src/command/compute.rs +++ b/gfx/wgpu/wgpu-core/src/command/compute.rs @@ -8,19 +8,24 @@ use crate::{ CommandBuffer, PhantomSlice, }, - device::{all_buffer_stages, BIND_BUFFER_ALIGNMENT}, + device::all_buffer_stages, hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Token}, id, }; -use wgt::{BufferAddress, BufferUsage, DynamicOffset}; +use wgt::{BufferAddress, BufferUsage, DynamicOffset, BIND_BUFFER_ALIGNMENT}; use hal::command::CommandBuffer as _; -use peek_poke::{Peek, PeekCopy, Poke}; +use peek_poke::{Peek, PeekPoke, Poke}; use std::iter; +#[derive(Debug, PartialEq)] +enum PipelineState { + Required, + Set, +} -#[derive(Clone, Copy, Debug, PeekCopy, Poke)] +#[derive(Clone, Copy, Debug, PeekPoke)] enum ComputeCommand { SetBindGroup { index: u8, @@ -37,6 +42,12 @@ enum ComputeCommand { End, } +impl Default for ComputeCommand { + fn default() -> Self { + ComputeCommand::End + } +} + impl super::RawPass { pub unsafe fn new_compute(parent: id::CommandEncoderId) -> Self { Self::from_vec(Vec::::with_capacity(1), parent) @@ -76,6 +87,8 @@ impl Global { let (buffer_guard, mut token) = hub.buffers.read(&mut token); let (texture_guard, _) = hub.textures.read(&mut token); + let mut pipeline_state = PipelineState::Required; + let mut peeker = raw_data.as_ptr(); let raw_data_end = unsafe { raw_data.as_ptr().add(raw_data.len()) @@ -83,7 +96,7 @@ impl Global { let mut command = ComputeCommand::Dispatch([0; 3]); // dummy loop { assert!(unsafe { peeker.add(ComputeCommand::max_size()) } <= raw_data_end); - peeker = unsafe { command.peek_from(peeker) }; + peeker = unsafe { ComputeCommand::peek_from(peeker, &mut command) }; match command { ComputeCommand::SetBindGroup { index, num_dynamic_offsets, bind_group_id, phantom_offsets } => { let (new_peeker, offsets) = unsafe { @@ -136,13 +149,17 @@ impl Global { offsets .iter() .chain(follow_ups.flat_map(|(_, offsets)| offsets)) - .map(|&off| off as hal::command::DescriptorSetOffset), + .cloned(), ); } } } ComputeCommand::SetPipeline(pipeline_id) => { - let pipeline = &pipeline_guard[pipeline_id]; + pipeline_state = PipelineState::Set; + let pipeline = cmb.trackers + .compute_pipes + .use_extend(&*pipeline_guard, pipeline_id, (), ()) + .unwrap(); unsafe { raw.bind_compute_pipeline(&pipeline.raw); @@ -170,7 +187,7 @@ impl Global { &pipeline_layout.raw, index, iter::once(desc_set), - offsets.iter().map(|offset| *offset as u32), + offsets.iter().cloned(), ); } } @@ -183,11 +200,13 @@ impl Global { } } ComputeCommand::Dispatch(groups) => { + assert_eq!(pipeline_state, PipelineState::Set, "Dispatch error: Pipeline is missing"); unsafe { raw.dispatch(groups); } } ComputeCommand::DispatchIndirect { buffer_id, offset } => { + assert_eq!(pipeline_state, PipelineState::Set, "Dispatch error: Pipeline is missing"); let (src_buffer, src_pending) = cmb.trackers.buffers.use_replace( &*buffer_guard, buffer_id, @@ -243,7 +262,7 @@ use wgt::{BufferAddress, DynamicOffset}; index: index.try_into().unwrap(), num_dynamic_offsets: offset_length.try_into().unwrap(), bind_group_id, - phantom_offsets: PhantomSlice::new(), + phantom_offsets: PhantomSlice::default(), }); pass.encode_slice( slice::from_raw_parts(offsets, offset_length), diff --git a/gfx/wgpu/wgpu-core/src/command/mod.rs b/gfx/wgpu/wgpu-core/src/command/mod.rs index 2e36672619ad..623f2a0c037b 100644 --- a/gfx/wgpu/wgpu-core/src/command/mod.rs +++ b/gfx/wgpu/wgpu-core/src/command/mod.rs @@ -28,6 +28,8 @@ use crate::{ Stored, }; +use peek_poke::PeekPoke; + use std::{ marker::PhantomData, mem, @@ -35,17 +37,18 @@ use std::{ slice, thread::ThreadId, }; -use wgt::RenderPassColorAttachmentDescriptorBase; -#[derive(Clone, Copy, Debug, peek_poke::PeekCopy, peek_poke::Poke)] +#[derive(Clone, Copy, Debug, PeekPoke)] struct PhantomSlice(PhantomData); -impl PhantomSlice { - fn new() -> Self { +impl Default for PhantomSlice { + fn default() -> Self { PhantomSlice(PhantomData) } +} +impl PhantomSlice { unsafe fn decode_unaligned<'a>( self, pointer: *const u8, count: usize, bound: *const u8 ) -> (*const u8, &'a [T]) { @@ -183,6 +186,8 @@ impl CommandBuffer { base.views.merge_extend(&head.views).unwrap(); base.bind_groups.merge_extend(&head.bind_groups).unwrap(); base.samplers.merge_extend(&head.samplers).unwrap(); + base.compute_pipes.merge_extend(&head.compute_pipes).unwrap(); + base.render_pipes.merge_extend(&head.render_pipes).unwrap(); let stages = all_buffer_stages() | all_image_stages(); unsafe { @@ -196,26 +201,52 @@ impl CommandBuffer { } #[repr(C)] -#[derive(Clone, Debug, Default)] -pub struct CommandBufferDescriptor { - pub todo: u32, +#[derive(PeekPoke)] +struct PassComponent { + load_op: wgt::LoadOp, + store_op: wgt::StoreOp, + clear_value: T, } -pub type RawRenderPassColorAttachmentDescriptor = - RenderPassColorAttachmentDescriptorBase; +// required for PeekPoke +impl Default for PassComponent { + fn default() -> Self { + PassComponent { + load_op: wgt::LoadOp::Clear, + store_op: wgt::StoreOp::Clear, + clear_value: T::default(), + } + } +} #[repr(C)] -#[derive(peek_poke::PeekCopy, peek_poke::Poke)] -pub struct RawRenderTargets { - pub colors: [RawRenderPassColorAttachmentDescriptor; MAX_COLOR_TARGETS], - pub depth_stencil: RenderPassDepthStencilAttachmentDescriptor, +#[derive(Default, PeekPoke)] +struct RawRenderPassColorAttachmentDescriptor { + attachment: u64, + resolve_target: u64, + component: PassComponent, +} + +#[repr(C)] +#[derive(Default, PeekPoke)] +struct RawRenderPassDepthStencilAttachmentDescriptor { + attachment: u64, + depth: PassComponent, + stencil: PassComponent, +} + +#[repr(C)] +#[derive(Default, PeekPoke)] +struct RawRenderTargets { + colors: [RawRenderPassColorAttachmentDescriptor; MAX_COLOR_TARGETS], + depth_stencil: RawRenderPassDepthStencilAttachmentDescriptor, } impl Global { pub fn command_encoder_finish( &self, encoder_id: id::CommandEncoderId, - _desc: &CommandBufferDescriptor, + _desc: &wgt::CommandBufferDescriptor, ) -> id::CommandBufferId { let hub = B::hub(self); let mut token = Token::root(); diff --git a/gfx/wgpu/wgpu-core/src/command/render.rs b/gfx/wgpu/wgpu-core/src/command/render.rs index ed7805ad14d3..b771e888c396 100644 --- a/gfx/wgpu/wgpu-core/src/command/render.rs +++ b/gfx/wgpu/wgpu-core/src/command/render.rs @@ -5,8 +5,10 @@ use crate::{ command::{ bind::{Binder, LayoutChange}, + PassComponent, PhantomSlice, RawRenderPassColorAttachmentDescriptor, + RawRenderPassDepthStencilAttachmentDescriptor, RawRenderTargets, }, conv, @@ -14,7 +16,6 @@ use crate::{ FramebufferKey, RenderPassContext, RenderPassKey, - BIND_BUFFER_ALIGNMENT, MAX_VERTEX_BUFFERS, MAX_COLOR_TARGETS, }, @@ -37,10 +38,11 @@ use wgt::{ RenderPassColorAttachmentDescriptorBase, RenderPassDepthStencilAttachmentDescriptorBase, TextureUsage, + BIND_BUFFER_ALIGNMENT }; use arrayvec::ArrayVec; use hal::command::CommandBuffer as _; -use peek_poke::{Peek, PeekCopy, Poke}; +use peek_poke::{Peek, PeekPoke, Poke}; use std::{ borrow::Borrow, @@ -52,23 +54,20 @@ use std::{ slice, }; -//Note: this could look better if `cbindgen` wasn't confused by &T used in place of -// a generic parameter, it's not able to manage -pub type OptionRef<'a, T> = Option<&'a T>; -pub type RenderPassColorAttachmentDescriptor<'a> = - RenderPassColorAttachmentDescriptorBase>; +pub type RenderPassColorAttachmentDescriptor = + RenderPassColorAttachmentDescriptorBase; pub type RenderPassDepthStencilAttachmentDescriptor = RenderPassDepthStencilAttachmentDescriptorBase; #[repr(C)] #[derive(Debug)] pub struct RenderPassDescriptor<'a> { - pub color_attachments: *const RenderPassColorAttachmentDescriptor<'a>, + pub color_attachments: *const RenderPassColorAttachmentDescriptor, pub color_attachments_length: usize, pub depth_stencil_attachment: Option<&'a RenderPassDepthStencilAttachmentDescriptor>, } -#[derive(Clone, Copy, Debug, PeekCopy, Poke)] +#[derive(Clone, Copy, Debug, Default, PeekPoke)] pub struct Rect { pub x: T, pub y: T, @@ -76,7 +75,7 @@ pub struct Rect { pub h: T, } -#[derive(Clone, Copy, Debug, PeekCopy, Poke)] +#[derive(Clone, Copy, Debug, PeekPoke)] enum RenderCommand { SetBindGroup { index: u8, @@ -129,26 +128,46 @@ enum RenderCommand { End, } +// required for PeekPoke +impl Default for RenderCommand { + fn default() -> Self { + RenderCommand::End + } +} + impl super::RawPass { pub unsafe fn new_render(parent_id: id::CommandEncoderId, desc: &RenderPassDescriptor) -> Self { let mut pass = Self::from_vec(Vec::::with_capacity(1), parent_id); - let mut targets = RawRenderTargets { - depth_stencil: desc.depth_stencil_attachment - .cloned() - .unwrap_or_else(|| mem::zeroed()), - colors: mem::zeroed(), - }; + let mut targets: RawRenderTargets = mem::zeroed(); + if let Some(ds) = desc.depth_stencil_attachment { + targets.depth_stencil = RawRenderPassDepthStencilAttachmentDescriptor { + attachment: ds.attachment.into_raw(), + depth: PassComponent { + load_op: ds.depth_load_op, + store_op: ds.depth_store_op, + clear_value: ds.clear_depth, + }, + stencil: PassComponent { + load_op: ds.stencil_load_op, + store_op: ds.stencil_store_op, + clear_value: ds.clear_stencil, + }, + }; + } + for (color, at) in targets.colors .iter_mut() .zip(slice::from_raw_parts(desc.color_attachments, desc.color_attachments_length)) { *color = RawRenderPassColorAttachmentDescriptor { - attachment: at.attachment, - resolve_target: at.resolve_target.map_or(id::TextureViewId::ERROR, |rt| *rt), - load_op: at.load_op, - store_op: at.store_op, - clear_color: at.clear_color, + attachment: at.attachment.into_raw(), + resolve_target: at.resolve_target.map_or(0, |id| id.into_raw()), + component: PassComponent { + load_op: at.load_op, + store_op: at.store_op, + clear_value: at.clear_color, + }, }; } @@ -182,6 +201,7 @@ impl OptionalState { enum DrawError { MissingBlendColor, MissingStencilReference, + MissingPipeline, IncompatibleBindGroup { index: u32, //expected: BindGroupLayoutId, @@ -255,6 +275,7 @@ struct State { binder: Binder, blend_color: OptionalState, stencil_reference: OptionalState, + pipeline: OptionalState, index: IndexState, vertex: VertexState, } @@ -266,9 +287,12 @@ impl State { if bind_mask != 0 { //let (expected, provided) = self.binder.entries[index as usize].info(); return Err(DrawError::IncompatibleBindGroup { - index: bind_mask.trailing_zeros() as u32, + index: bind_mask.trailing_zeros(), }); } + if self.pipeline == OptionalState::Required { + return Err(DrawError::MissingPipeline); + } if self.blend_color == OptionalState::Required { return Err(DrawError::MissingBlendColor); } @@ -317,29 +341,36 @@ impl Global { let mut targets: RawRenderTargets = unsafe { mem::zeroed() }; assert!(unsafe { peeker.add(RawRenderTargets::max_size()) <= raw_data_end }); - peeker = unsafe { targets.peek_from(peeker) }; + peeker = unsafe { RawRenderTargets::peek_from(peeker, &mut targets) }; let color_attachments = targets.colors .iter() - .take_while(|at| at.attachment != id::TextureViewId::ERROR) + .take_while(|at| at.attachment != 0) .map(|at| { RenderPassColorAttachmentDescriptor { - attachment: at.attachment, - resolve_target: if at.resolve_target == id::TextureViewId::ERROR { - None - } else { - Some(&at.resolve_target) - }, - load_op: at.load_op, - store_op: at.store_op, - clear_color: at.clear_color, + attachment: id::TextureViewId::from_raw(at.attachment).unwrap(), + resolve_target: id::TextureViewId::from_raw(at.resolve_target), + load_op: at.component.load_op, + store_op: at.component.store_op, + clear_color: at.component.clear_value, } }) - .collect::>(); - let depth_stencil_attachment = if targets.depth_stencil.attachment == id::TextureViewId::ERROR { + .collect::>(); + let depth_stencil_attachment_body; + let depth_stencil_attachment = if targets.depth_stencil.attachment == 0 { None } else { - Some(&targets.depth_stencil) + let at = &targets.depth_stencil; + depth_stencil_attachment_body = RenderPassDepthStencilAttachmentDescriptor { + attachment: id::TextureViewId::from_raw(at.attachment).unwrap(), + depth_load_op: at.depth.load_op, + depth_store_op: at.depth.store_op, + clear_depth: at.depth.clear_value, + stencil_load_op: at.stencil.load_op, + stencil_store_op: at.stencil.store_op, + clear_stencil: at.stencil.clear_value, + }; + Some(&depth_stencil_attachment_body) }; let (context, sample_count) = { @@ -485,7 +516,7 @@ impl Global { }); } - for &resolve_target in color_attachments + for resolve_target in color_attachments .iter() .flat_map(|at| at.resolve_target) { @@ -602,7 +633,8 @@ impl Global { } else { let sample_count_check = view_guard[color_attachments[i].attachment].samples; - assert!(sample_count_check > 1, "RenderPassColorAttachmentDescriptor with a resolve_target must have an attachment with sample_count > 1"); + assert!(sample_count_check > 1, + "RenderPassColorAttachmentDescriptor with a resolve_target must have an attachment with sample_count > 1"); resolve_ids.push(( attachment_index, hal::image::Layout::ColorAttachmentOptimal, @@ -641,7 +673,6 @@ impl Global { resolves: color_attachments .iter() .filter_map(|at| at.resolve_target) - .cloned() .collect(), depth_stencil: depth_stencil_attachment.map(|at| at.attachment), }; @@ -773,7 +804,7 @@ impl Global { resolves: color_attachments .iter() .filter_map(|at| at.resolve_target) - .map(|resolve| view_guard[*resolve].format) + .map(|resolve| view_guard[resolve].format) .collect(), depth_stencil: depth_stencil_attachment.map(|at| view_guard[at.attachment].format), }; @@ -784,6 +815,7 @@ impl Global { binder: Binder::new(cmb.features.max_bind_groups), blend_color: OptionalState::Unused, stencil_reference: OptionalState::Unused, + pipeline: OptionalState::Required, index: IndexState { bound_buffer_view: None, format: IndexFormat::Uint16, @@ -804,7 +836,7 @@ impl Global { }; loop { assert!(unsafe { peeker.add(RenderCommand::max_size()) } <= raw_data_end); - peeker = unsafe { command.peek_from(peeker) }; + peeker = unsafe { RenderCommand::peek_from(peeker, &mut command) }; match command { RenderCommand::SetBindGroup { index, num_dynamic_offsets, bind_group_id, phantom_offsets } => { let (new_peeker, offsets) = unsafe { @@ -845,13 +877,17 @@ impl Global { offsets .iter() .chain(follow_ups.flat_map(|(_, offsets)| offsets)) - .map(|&off| off as hal::command::DescriptorSetOffset), + .cloned() ); } }; } RenderCommand::SetPipeline(pipeline_id) => { - let pipeline = &pipeline_guard[pipeline_id]; + state.pipeline = OptionalState::Set; + let pipeline = trackers + .render_pipes + .use_extend(&*pipeline_guard, pipeline_id, (), ()) + .unwrap(); assert!( context.compatible(&pipeline.pass_context), @@ -894,7 +930,7 @@ impl Global { &pipeline_layout.raw, index, iter::once(desc_set), - offsets.iter().map(|offset| *offset as u32), + offsets.iter().cloned(), ); } } @@ -919,7 +955,10 @@ impl Global { let view = hal::buffer::IndexBufferView { buffer: &buffer.raw, - offset: range.start, + range: hal::buffer::SubRange { + offset: range.start, + size: Some(range.end - range.start), + }, index_type: conv::map_index_format(state.index.format), }; @@ -957,7 +996,10 @@ impl Global { let view = hal::buffer::IndexBufferView { buffer: &buffer.raw, - offset, + range: hal::buffer::SubRange { + offset, + size: Some(end - offset), + }, index_type: conv::map_index_format(state.index.format), }; @@ -977,8 +1019,12 @@ impl Global { buffer.size - offset }; + let range = hal::buffer::SubRange { + offset, + size: if size != 0 { Some(size) } else { None }, + }; unsafe { - raw.bind_vertex_buffers(slot, iter::once((&buffer.raw, offset))); + raw.bind_vertex_buffers(slot, iter::once((&buffer.raw, range))); } state.vertex.update_limits(); } @@ -1151,7 +1197,7 @@ pub mod render_ffi { index: index.try_into().unwrap(), num_dynamic_offsets: offset_length.try_into().unwrap(), bind_group_id, - phantom_offsets: PhantomSlice::new(), + phantom_offsets: PhantomSlice::default(), }); pass.encode_slice( slice::from_raw_parts(offsets, offset_length), diff --git a/gfx/wgpu/wgpu-core/src/command/transfer.rs b/gfx/wgpu/wgpu-core/src/command/transfer.rs index 6705c621b3bb..c38bb66906f0 100644 --- a/gfx/wgpu/wgpu-core/src/command/transfer.rs +++ b/gfx/wgpu/wgpu-core/src/command/transfer.rs @@ -7,11 +7,9 @@ use crate::{ device::{all_buffer_stages, all_image_stages}, hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Token}, id::{BufferId, CommandEncoderId, TextureId}, - Extent3d, - Origin3d, }; -use wgt::{BufferAddress, BufferUsage, TextureUsage}; +use wgt::{BufferAddress, BufferUsage, Extent3d, Origin3d, TextureUsage}; use hal::command::CommandBuffer as _; use std::iter; diff --git a/gfx/wgpu/wgpu-core/src/conv.rs b/gfx/wgpu/wgpu-core/src/conv.rs index 1380a3f685d1..1f5daf2768dd 100644 --- a/gfx/wgpu/wgpu-core/src/conv.rs +++ b/gfx/wgpu/wgpu-core/src/conv.rs @@ -2,7 +2,7 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ -use crate::{binding_model, resource, Extent3d, Features, Origin3d}; +use crate::{binding_model, Features}; use wgt::{ BlendDescriptor, BlendFactor, @@ -12,13 +12,15 @@ use wgt::{ CompareFunction, CullMode, DepthStencilStateDescriptor, + Extent3d, FrontFace, IndexFormat, + Origin3d, PrimitiveTopology, + RasterizationStateDescriptor, StencilOperation, StencilStateFaceDescriptor, TextureFormat, - RasterizationStateDescriptor, VertexFormat, }; @@ -99,28 +101,61 @@ pub fn map_binding_type( binding: &binding_model::BindGroupLayoutEntry, ) -> hal::pso::DescriptorType { use crate::binding_model::BindingType as Bt; - use hal::pso::DescriptorType as H; + use hal::pso; match binding.ty { Bt::UniformBuffer => { - if binding.has_dynamic_offset { - H::UniformBufferDynamic - } else { - H::UniformBuffer + pso::DescriptorType::Buffer { + ty: pso::BufferDescriptorType::Uniform, + format: pso::BufferDescriptorFormat::Structured { + dynamic_offset: binding.has_dynamic_offset, + }, + } + } + Bt::StorageBuffer => { + pso::DescriptorType::Buffer { + ty: pso::BufferDescriptorType::Storage { + read_only: false, + }, + format: pso::BufferDescriptorFormat::Structured { + dynamic_offset: binding.has_dynamic_offset, + }, } } - Bt::StorageBuffer | Bt::ReadonlyStorageBuffer => { - if binding.has_dynamic_offset { - H::StorageBufferDynamic - } else { - H::StorageBuffer + pso::DescriptorType::Buffer { + ty: pso::BufferDescriptorType::Storage { + read_only: true, + }, + format: pso::BufferDescriptorFormat::Structured { + dynamic_offset: binding.has_dynamic_offset, + }, } } Bt::Sampler | - Bt::ComparisonSampler => H::Sampler, - Bt::SampledTexture => H::SampledImage, - Bt::ReadonlyStorageTexture | - Bt::WriteonlyStorageTexture => H::StorageImage, + Bt::ComparisonSampler => { + pso::DescriptorType::Sampler + } + Bt::SampledTexture => { + pso::DescriptorType::Image { + ty: pso::ImageDescriptorType::Sampled { + with_sampler: false, + }, + } + } + Bt::ReadonlyStorageTexture => { + pso::DescriptorType::Image { + ty: pso::ImageDescriptorType::Storage { + read_only: true, + }, + } + } + Bt::WriteonlyStorageTexture => { + pso::DescriptorType::Image { + ty: pso::ImageDescriptorType::Storage { + read_only: false, + }, + } + } } } @@ -262,7 +297,7 @@ pub fn map_depth_stencil_state_descriptor( || desc.depth_compare != CompareFunction::Always { Some(hal::pso::DepthTest { - fun: map_compare_function(desc.depth_compare), + fun: map_compare_function(desc.depth_compare).expect("DepthStencilStateDescriptor has undefined compare function"), write: desc.depth_write_enabled, }) } else { @@ -297,25 +332,26 @@ fn map_stencil_face( stencil_state_face_desc: &StencilStateFaceDescriptor, ) -> hal::pso::StencilFace { hal::pso::StencilFace { - fun: map_compare_function(stencil_state_face_desc.compare), + fun: map_compare_function(stencil_state_face_desc.compare).expect("StencilStateFaceDescriptor has undefined compare function"), op_fail: map_stencil_operation(stencil_state_face_desc.fail_op), op_depth_fail: map_stencil_operation(stencil_state_face_desc.depth_fail_op), op_pass: map_stencil_operation(stencil_state_face_desc.pass_op), } } -pub fn map_compare_function(compare_function: CompareFunction) -> hal::pso::Comparison { +pub fn map_compare_function(compare_function: CompareFunction) -> Option { use wgt::CompareFunction as Cf; use hal::pso::Comparison as H; match compare_function { - Cf::Never => H::Never, - Cf::Less => H::Less, - Cf::Equal => H::Equal, - Cf::LessEqual => H::LessEqual, - Cf::Greater => H::Greater, - Cf::NotEqual => H::NotEqual, - Cf::GreaterEqual => H::GreaterEqual, - Cf::Always => H::Always, + Cf::Undefined => None, + Cf::Never => Some(H::Never), + Cf::Less => Some(H::Less), + Cf::Equal => Some(H::Equal), + Cf::LessEqual => Some(H::LessEqual), + Cf::Greater => Some(H::Greater), + Cf::NotEqual => Some(H::NotEqual), + Cf::GreaterEqual => Some(H::GreaterEqual), + Cf::Always => Some(H::Always), } } @@ -450,7 +486,7 @@ fn checked_u32_as_u16(value: u32) -> u16 { } pub fn map_texture_dimension_size( - dimension: resource::TextureDimension, + dimension: wgt::TextureDimension, Extent3d { width, height, @@ -459,7 +495,7 @@ pub fn map_texture_dimension_size( array_size: u32, sample_size: u32, ) -> hal::image::Kind { - use crate::resource::TextureDimension::*; + use wgt::TextureDimension::*; use hal::image::Kind as H; match dimension { D1 => { @@ -624,15 +660,15 @@ pub fn map_color_u32(color: &Color) -> [u32; 4] { ] } -pub fn map_filter(filter: resource::FilterMode) -> hal::image::Filter { +pub fn map_filter(filter: wgt::FilterMode) -> hal::image::Filter { match filter { - resource::FilterMode::Nearest => hal::image::Filter::Nearest, - resource::FilterMode::Linear => hal::image::Filter::Linear, + wgt::FilterMode::Nearest => hal::image::Filter::Nearest, + wgt::FilterMode::Linear => hal::image::Filter::Linear, } } -pub fn map_wrap(address: resource::AddressMode) -> hal::image::WrapMode { - use crate::resource::AddressMode as Am; +pub fn map_wrap(address: wgt::AddressMode) -> hal::image::WrapMode { + use wgt::AddressMode as Am; use hal::image::WrapMode as W; match address { Am::ClampToEdge => W::Clamp, @@ -644,23 +680,24 @@ pub fn map_wrap(address: resource::AddressMode) -> hal::image::WrapMode { pub fn map_rasterization_state_descriptor( desc: &RasterizationStateDescriptor, ) -> hal::pso::Rasterizer { - hal::pso::Rasterizer { + use hal::pso; + pso::Rasterizer { depth_clamping: false, - polygon_mode: hal::pso::PolygonMode::Fill, + polygon_mode: pso::PolygonMode::Fill, cull_face: match desc.cull_mode { - CullMode::None => hal::pso::Face::empty(), - CullMode::Front => hal::pso::Face::FRONT, - CullMode::Back => hal::pso::Face::BACK, + CullMode::None => pso::Face::empty(), + CullMode::Front => pso::Face::FRONT, + CullMode::Back => pso::Face::BACK, }, front_face: match desc.front_face { - FrontFace::Ccw => hal::pso::FrontFace::CounterClockwise, - FrontFace::Cw => hal::pso::FrontFace::Clockwise, + FrontFace::Ccw => pso::FrontFace::CounterClockwise, + FrontFace::Cw => pso::FrontFace::Clockwise, }, depth_bias: if desc.depth_bias != 0 || desc.depth_bias_slope_scale != 0.0 || desc.depth_bias_clamp != 0.0 { - Some(hal::pso::State::Static(hal::pso::DepthBias { + Some(pso::State::Static(pso::DepthBias { const_factor: desc.depth_bias as f32, slope_factor: desc.depth_bias_slope_scale, clamp: desc.depth_bias_clamp, @@ -669,6 +706,7 @@ pub fn map_rasterization_state_descriptor( None }, conservative: false, + line_width: pso::State::Static(1.0), } } diff --git a/gfx/wgpu/wgpu-core/src/device/life.rs b/gfx/wgpu/wgpu-core/src/device/life.rs index de3f646a986e..ffa2869cf6b8 100644 --- a/gfx/wgpu/wgpu-core/src/device/life.rs +++ b/gfx/wgpu/wgpu-core/src/device/life.rs @@ -16,8 +16,8 @@ use crate::{ use copyless::VecHelper as _; use hal::device::Device as _; use parking_lot::Mutex; -use rendy_descriptor::{DescriptorAllocator, DescriptorSet}; -use rendy_memory::{Heaps, MemoryBlock}; +use gfx_descriptor::{DescriptorAllocator, DescriptorSet}; +use gfx_memory::{Heaps, MemoryBlock}; use std::{ sync::atomic::Ordering, @@ -34,6 +34,8 @@ pub struct SuspectedResources { pub(crate) texture_views: Vec, pub(crate) samplers: Vec, pub(crate) bind_groups: Vec, + pub(crate) compute_pipelines: Vec, + pub(crate) render_pipelines: Vec, } impl SuspectedResources { @@ -43,6 +45,8 @@ impl SuspectedResources { self.texture_views.clear(); self.samplers.clear(); self.bind_groups.clear(); + self.compute_pipelines.clear(); + self.render_pipelines.clear(); } pub fn extend(&mut self, other: &Self) { @@ -51,6 +55,8 @@ impl SuspectedResources { self.texture_views.extend_from_slice(&other.texture_views); self.samplers.extend_from_slice(&other.samplers); self.bind_groups.extend_from_slice(&other.bind_groups); + self.compute_pipelines.extend_from_slice(&other.compute_pipelines); + self.render_pipelines.extend_from_slice(&other.render_pipelines); } } @@ -65,6 +71,8 @@ struct NonReferencedResources { samplers: Vec, framebuffers: Vec, desc_sets: Vec>, + compute_pipes: Vec, + graphics_pipes: Vec, } impl NonReferencedResources { @@ -76,6 +84,8 @@ impl NonReferencedResources { samplers: Vec::new(), framebuffers: Vec::new(), desc_sets: Vec::new(), + compute_pipes: Vec::new(), + graphics_pipes: Vec::new(), } } @@ -86,6 +96,8 @@ impl NonReferencedResources { self.samplers.extend(other.samplers); self.framebuffers.extend(other.framebuffers); self.desc_sets.extend(other.desc_sets); + self.compute_pipes.extend(other.compute_pipes); + self.graphics_pipes.extend(other.graphics_pipes); } unsafe fn clean( @@ -97,6 +109,7 @@ impl NonReferencedResources { if !self.buffers.is_empty() { let mut heaps = heaps_mutex.lock(); for (raw, memory) in self.buffers.drain(..) { + log::trace!("Buffer {:?} is destroyed with memory {:?}", raw, memory); device.destroy_buffer(raw); heaps.free(device, memory); } @@ -124,6 +137,13 @@ impl NonReferencedResources { .lock() .free(self.desc_sets.drain(..)); } + + for raw in self.compute_pipes.drain(..) { + device.destroy_compute_pipeline(raw); + } + for raw in self.graphics_pipes.drain(..) { + device.destroy_graphics_pipeline(raw); + } } } @@ -218,10 +238,14 @@ impl LifetimeTracker { } /// Returns the last submission index that is done. - fn check_last_done( + pub fn triage_submissions( &mut self, device: &B::Device, + force_wait: bool, ) -> SubmissionIndex { + if force_wait { + self.wait_idle(device); + } //TODO: enable when `is_sorted_by_key` is stable //debug_assert!(self.active.is_sorted_by_key(|a| a.index)); let done_count = self @@ -250,22 +274,19 @@ impl LifetimeTracker { pub fn cleanup( &mut self, device: &B::Device, - force_wait: bool, heaps_mutex: &Mutex>, descriptor_allocator_mutex: &Mutex>, - ) -> SubmissionIndex { - if force_wait { - self.wait_idle(device); - } - let last_done = self.check_last_done(device); + ) { unsafe { self.free_resources.clean( device, heaps_mutex, descriptor_allocator_mutex, ); + descriptor_allocator_mutex + .lock() + .cleanup(device); } - last_done } } @@ -376,6 +397,7 @@ impl LifetimeTracker { if trackers.buffers.remove_abandoned(id) { hub.buffers.free_id(id); let res = guard.remove(id).unwrap(); + log::debug!("Buffer {:?} is detached", id); let submit_index = res.life_guard.submission_index.load(Ordering::Acquire); self.active @@ -386,6 +408,44 @@ impl LifetimeTracker { } } } + + if !self.suspected_resources.compute_pipelines.is_empty() { + let mut trackers = trackers.lock(); + let (mut guard, _) = hub.compute_pipelines.write(token); + + for id in self.suspected_resources.compute_pipelines.drain(..) { + if trackers.compute_pipes.remove_abandoned(id) { + hub.compute_pipelines.free_id(id); + let res = guard.remove(id).unwrap(); + + let submit_index = res.life_guard.submission_index.load(Ordering::Acquire); + self.active + .iter_mut() + .find(|a| a.index == submit_index) + .map_or(&mut self.free_resources, |a| &mut a.last_resources) + .compute_pipes.push(res.raw); + } + } + } + + if !self.suspected_resources.render_pipelines.is_empty() { + let mut trackers = trackers.lock(); + let (mut guard, _) = hub.render_pipelines.write(token); + + for id in self.suspected_resources.render_pipelines.drain(..) { + if trackers.render_pipes.remove_abandoned(id) { + hub.render_pipelines.free_id(id); + let res = guard.remove(id).unwrap(); + + let submit_index = res.life_guard.submission_index.load(Ordering::Acquire); + self.active + .iter_mut() + .find(|a| a.index == submit_index) + .map_or(&mut self.free_resources, |a| &mut a.last_resources) + .graphics_pipes.push(res.raw); + } + } + } } pub(crate) fn triage_mapped( @@ -485,27 +545,41 @@ impl LifetimeTracker { &mut self, global: &Global, raw: &B::Device, + trackers: &Mutex, token: &mut Token>, ) -> Vec { if self.ready_to_map.is_empty() { return Vec::new(); } + let hub = B::hub(global); let (mut buffer_guard, _) = B::hub(global).buffers.write(token); - self.ready_to_map - .drain(..) - .map(|buffer_id| { - let buffer = &mut buffer_guard[buffer_id]; - let mapping = buffer.pending_mapping.take().unwrap(); + let mut pending_callbacks: Vec = Vec::with_capacity(self.ready_to_map.len()); + let mut trackers = trackers.lock(); + for buffer_id in self.ready_to_map.drain(..) { + let buffer = &mut buffer_guard[buffer_id]; + if buffer.life_guard.ref_count.is_none() && trackers.buffers.remove_abandoned(buffer_id) { + buffer.map_state = resource::BufferMapState::Idle; + log::debug!("Mapping request is dropped because the buffer is destroyed."); + hub.buffers.free_id(buffer_id); + let buffer = buffer_guard.remove(buffer_id).unwrap(); + self.free_resources.buffers.push((buffer.raw, buffer.memory)); + } else { + let mapping = match std::mem::replace(&mut buffer.map_state, resource::BufferMapState::Active) { + resource::BufferMapState::Waiting(pending_mapping) => pending_mapping, + _ => panic!("No pending mapping."), + }; + log::debug!("Buffer {:?} map state -> Active", buffer_id); let result = match mapping.op { - resource::BufferMapOperation::Read(..) => { - super::map_buffer(raw, buffer, mapping.range, super::HostMap::Read) + resource::BufferMapOperation::Read { .. } => { + super::map_buffer(raw, buffer, mapping.sub_range, super::HostMap::Read) } - resource::BufferMapOperation::Write(..) => { - super::map_buffer(raw, buffer, mapping.range, super::HostMap::Write) + resource::BufferMapOperation::Write { .. } => { + super::map_buffer(raw, buffer, mapping.sub_range, super::HostMap::Write) } }; - (mapping.op, result) - }) - .collect() + pending_callbacks.push((mapping.op, result)); + } + } + pending_callbacks } } diff --git a/gfx/wgpu/wgpu-core/src/device/mod.rs b/gfx/wgpu/wgpu-core/src/device/mod.rs index d1f516907e07..449ff62e22ea 100644 --- a/gfx/wgpu/wgpu-core/src/device/mod.rs +++ b/gfx/wgpu/wgpu-core/src/device/mod.rs @@ -18,9 +18,11 @@ use crate::{ Stored, }; -use wgt::{BufferAddress, InputStepMode, TextureFormat}; +use wgt::{BufferAddress, InputStepMode, TextureDimension, TextureFormat, BIND_BUFFER_ALIGNMENT}; use arrayvec::ArrayVec; use copyless::VecHelper as _; +use gfx_descriptor::DescriptorAllocator; +use gfx_memory::{Block, Heaps}; use hal::{ self, command::CommandBuffer as _, @@ -29,8 +31,6 @@ use hal::{ window::{PresentationSurface as _, Surface as _}, }; use parking_lot::{Mutex, MutexGuard}; -use rendy_descriptor::{DescriptorAllocator, DescriptorRanges}; -use rendy_memory::{Block, Heaps}; use smallvec::SmallVec; use std::{ @@ -38,7 +38,6 @@ use std::{ ffi, iter, marker::PhantomData, - ops, ptr, slice, sync::atomic::Ordering, @@ -50,9 +49,6 @@ pub const MAX_COLOR_TARGETS: usize = 4; pub const MAX_MIP_LEVELS: usize = 16; pub const MAX_VERTEX_BUFFERS: usize = 8; -/// Bound uniform/storage buffer offsets must be aligned to this number. -pub const BIND_BUFFER_ALIGNMENT: hal::buffer::Offset = 256; - pub fn all_buffer_stages() -> hal::pso::PipelineStage { use hal::pso::PipelineStage as Ps; Ps::DRAW_INDIRECT @@ -120,33 +116,41 @@ pub type BufferMapWriteCallback = fn map_buffer( raw: &B::Device, buffer: &mut resource::Buffer, - buffer_range: ops::Range, + sub_range: hal::buffer::SubRange, kind: HostMap, ) -> BufferMapResult { - let is_coherent = buffer - .memory - .properties() - .contains(hal::memory::Properties::COHERENT); - let (ptr, mapped_range) = { - let mapped = buffer.memory.map(raw, buffer_range)?; - (mapped.ptr(), mapped.range()) + let (ptr, sync_range) = { + let segment = hal::memory::Segment { + offset: sub_range.offset, + size: sub_range.size, + }; + let mapped = buffer.memory.map(raw, segment)?; + let sync_range = if mapped.is_coherent() { + None + } else { + Some(mapped.range()) + }; + (mapped.ptr(), sync_range) }; - if !is_coherent { + if let Some(range) = sync_range { + let segment = hal::memory::Segment { + offset: range.start, + size: Some(range.end - range.start), + }; match kind { HostMap::Read => unsafe { raw.invalidate_mapped_memory_ranges(iter::once(( buffer.memory.memory(), - mapped_range, + segment, ))) .unwrap(); }, HostMap::Write => { - buffer.mapped_write_ranges.push(mapped_range); + buffer.mapped_write_segments.push(segment); } } } - Ok(ptr.as_ptr()) } @@ -154,21 +158,27 @@ fn unmap_buffer( raw: &B::Device, buffer: &mut resource::Buffer, ) { - if !buffer.mapped_write_ranges.is_empty() { + match buffer.map_state { + resource::BufferMapState::Idle => { + log::error!("Buffer already unmapped"); + return; + }, + _ => buffer.map_state = resource::BufferMapState::Idle, + } + + if !buffer.mapped_write_segments.is_empty() { unsafe { raw .flush_mapped_memory_ranges( buffer - .mapped_write_ranges + .mapped_write_segments .iter() .map(|r| (buffer.memory.memory(), r.clone())), ) .unwrap() }; - buffer.mapped_write_ranges.clear(); + buffer.mapped_write_segments.clear(); } - - buffer.memory.unmap(raw); } //Note: this logic is specifically moved out of `handle_mapping()` in order to @@ -183,11 +193,11 @@ fn fire_map_callbacks>(callback } }; match operation { - resource::BufferMapOperation::Read(on_read) => { - on_read(status, ptr) + resource::BufferMapOperation::Read { callback, userdata } => unsafe { + callback(status, ptr, userdata) } - resource::BufferMapOperation::Write(on_write) => { - on_write(status, ptr) + resource::BufferMapOperation::Write { callback, userdata } => unsafe { + callback(status, ptr, userdata) } } } @@ -218,6 +228,7 @@ impl Device { adapter_id: id::AdapterId, queue_group: hal::queue::QueueGroup, mem_props: hal::adapter::MemoryProperties, + non_coherent_atom_size: u64, supports_texture_d24_s8: bool, max_bind_groups: u32, ) -> Self { @@ -225,26 +236,19 @@ impl Device { let life_guard = LifeGuard::new(); life_guard.submission_index.fetch_add(1, Ordering::Relaxed); - let heaps = { - let types = mem_props.memory_types.iter().map(|mt| { - use rendy_memory::{DynamicConfig, HeapsConfig, LinearConfig}; - let config = HeapsConfig { - linear: if mt.properties.contains(hal::memory::Properties::CPU_VISIBLE) { - Some(LinearConfig { - linear_size: 0x10_00_00, - }) - } else { - None - }, - dynamic: Some(DynamicConfig { - block_size_granularity: 0x1_00, - max_chunk_size: 0x1_00_00_00, - min_device_allocation: 0x1_00_00, - }), - }; - (mt.properties, mt.heap_index as u32, config) - }); - unsafe { Heaps::new(types, mem_props.memory_heaps.iter().cloned()) } + let heaps = unsafe { + Heaps::new( + &mem_props, + gfx_memory::GeneralConfig { + block_size_granularity: 0x100, + max_chunk_size: 0x100_0000, + min_device_allocation: 0x1_0000, + }, + gfx_memory::LinearConfig { + linear_size: 0x10_0000, + }, + non_coherent_atom_size, + ) }; Device { @@ -284,18 +288,13 @@ impl Device { life_tracker.triage_suspected(global, &self.trackers, token); life_tracker.triage_mapped(global, token); life_tracker.triage_framebuffers(global, &mut *self.framebuffers.lock(), token); + let _last_done = life_tracker.triage_submissions(&self.raw, force_wait); + let callbacks = life_tracker.handle_mapping(global, &self.raw, &self.trackers, token); life_tracker.cleanup( &self.raw, - force_wait, &self.mem_allocator, &self.desc_allocator, ); - let callbacks = life_tracker.handle_mapping(global, &self.raw, token); - - unsafe { - self.desc_allocator.lock().cleanup(&self.raw); - } - callbacks } @@ -304,25 +303,31 @@ impl Device { self_id: id::DeviceId, desc: &wgt::BufferDescriptor, ) -> resource::Buffer { + use gfx_memory::{Kind, MemoryUsage}; + debug_assert_eq!(self_id.backend(), B::VARIANT); let (usage, _memory_properties) = conv::map_buffer_usage(desc.usage); - - let rendy_usage = { - use rendy_memory::MemoryUsageValue as Muv; + let (kind, mem_usage) = { use wgt::BufferUsage as Bu; if !desc.usage.intersects(Bu::MAP_READ | Bu::MAP_WRITE) { - Muv::Data + (Kind::General, MemoryUsage::Private) } else if (Bu::MAP_WRITE | Bu::COPY_SRC).contains(desc.usage) { - Muv::Upload + (Kind::Linear, MemoryUsage::Staging { read_back: false }) } else if (Bu::MAP_READ | Bu::COPY_DST).contains(desc.usage) { - Muv::Download + (Kind::Linear, MemoryUsage::Staging { read_back: true }) } else { - Muv::Dynamic + (Kind::General, MemoryUsage::Dynamic { sparse_updates: false }) } }; let mut buffer = unsafe { self.raw.create_buffer(desc.size, usage).unwrap() }; + if !desc.label.is_null() { + unsafe { + let label = ffi::CStr::from_ptr(desc.label).to_string_lossy(); + self.raw.set_buffer_name(&mut buffer, &label) + }; + } let requirements = unsafe { self.raw.get_buffer_requirements(&buffer) }; let memory = self .mem_allocator @@ -330,7 +335,8 @@ impl Device { .allocate( &self.raw, requirements.type_mask as u32, - rendy_usage, + mem_usage, + kind, requirements.size, requirements.alignment, ) @@ -338,7 +344,11 @@ impl Device { unsafe { self.raw - .bind_buffer_memory(memory.memory(), memory.range().start, &mut buffer) + .bind_buffer_memory( + memory.memory(), + memory.segment().offset, + &mut buffer, + ) .unwrap() }; @@ -352,8 +362,8 @@ impl Device { memory, size: desc.size, full_range: (), - mapped_write_ranges: Vec::new(), - pending_mapping: None, + mapped_write_segments: Vec::new(), + map_state: resource::BufferMapState::Idle, life_guard: LifeGuard::new(), } } @@ -361,7 +371,7 @@ impl Device { fn create_texture( &self, self_id: id::DeviceId, - desc: &resource::TextureDescriptor, + desc: &wgt::TextureDescriptor, ) -> resource::Texture { debug_assert_eq!(self_id.backend(), B::VARIANT); @@ -390,23 +400,27 @@ impl Device { // 2D textures with array layer counts that are multiples of 6 could be cubemaps // Following gpuweb/gpuweb#68 always add the hint in that case - if desc.dimension == resource::TextureDimension::D2 && desc.array_layer_count % 6 == 0 { + if desc.dimension == TextureDimension::D2 && desc.array_layer_count % 6 == 0 { view_capabilities |= hal::image::ViewCapabilities::KIND_CUBE; }; // TODO: 2D arrays, cubemap arrays let mut image = unsafe { - self.raw.create_image( + let mut image = self.raw.create_image( kind, desc.mip_level_count as hal::image::Level, format, hal::image::Tiling::Optimal, usage, view_capabilities, - ) - } - .unwrap(); + ).unwrap(); + if !desc.label.is_null() { + let label = ffi::CStr::from_ptr(desc.label).to_string_lossy(); + self.raw.set_image_name(&mut image, &label); + } + image + }; let requirements = unsafe { self.raw.get_image_requirements(&image) }; let memory = self @@ -415,7 +429,8 @@ impl Device { .allocate( &self.raw, requirements.type_mask as u32, - rendy_memory::Data, + gfx_memory::MemoryUsage::Private, + gfx_memory::Kind::General, requirements.size, requirements.alignment, ) @@ -423,7 +438,11 @@ impl Device { unsafe { self.raw - .bind_image_memory(memory.memory(), memory.range().start, &mut image) + .bind_image_memory( + memory.memory(), + memory.segment().offset, + &mut image, + ) .unwrap() }; @@ -469,18 +488,18 @@ impl Device { } pub(crate) fn dispose(self) { + self.life_tracker.lock().triage_submissions(&self.raw, true); self.life_tracker.lock().cleanup( &self.raw, - true, &self.mem_allocator, &self.desc_allocator, ); self.com_allocator.destroy(&self.raw); - let desc_alloc = self.desc_allocator.into_inner(); - let mem_alloc = self.mem_allocator.into_inner(); + let mut desc_alloc = self.desc_allocator.into_inner(); + let mut mem_alloc = self.mem_allocator.into_inner(); unsafe { - desc_alloc.dispose(&self.raw); - mem_alloc.dispose(&self.raw); + desc_alloc.clear(&self.raw); + mem_alloc.clear(&self.raw); for (_, rp) in self.render_passes.lock().drain() { self.raw.destroy_render_pass(rp); } @@ -510,6 +529,7 @@ impl Global { let ref_count = buffer.life_guard.add_ref(); let id = hub.buffers.register_identity(id_in, buffer, &mut token); + log::info!("Created buffer {:?} with {:?}", id, desc); device .trackers .lock() @@ -539,8 +559,16 @@ impl Global { let mut buffer = device.create_buffer(device_id, &desc); let ref_count = buffer.life_guard.add_ref(); - let pointer = match map_buffer(&device.raw, &mut buffer, 0 .. desc.size, HostMap::Write) { - Ok(ptr) => ptr, + let pointer = match map_buffer( + &device.raw, + &mut buffer, + hal::buffer::SubRange::WHOLE, + HostMap::Write, + ) { + Ok(ptr) => { + buffer.map_state = resource::BufferMapState::Active; + ptr + }, Err(e) => { log::error!("failed to create buffer in a mapped state: {:?}", e); ptr::null_mut() @@ -548,6 +576,7 @@ impl Global { }; let id = hub.buffers.register_identity(id_in, buffer, &mut token); + log::info!("Created mapped buffer {:?} with {:?}", id, desc); device.trackers .lock() .buffers.init( @@ -580,7 +609,7 @@ impl Global { match map_buffer( &device.raw, &mut buffer, - offset .. offset + data.len() as BufferAddress, + hal::buffer::SubRange { offset, size: Some(data.len() as BufferAddress) }, HostMap::Write, ) { Ok(ptr) => unsafe { @@ -615,7 +644,7 @@ impl Global { match map_buffer( &device.raw, &mut buffer, - offset .. offset + data.len() as BufferAddress, + hal::buffer::SubRange { offset, size: Some(data.len() as BufferAddress) }, HostMap::Read, ) { Ok(ptr) => unsafe { @@ -634,6 +663,7 @@ impl Global { let hub = B::hub(self); let mut token = Token::root(); + log::info!("Buffer {:?} is dropped", buffer_id); let device_id = { let (mut buffer_guard, _) = hub.buffers.write(&mut token); let buffer = &mut buffer_guard[buffer_id]; @@ -650,7 +680,7 @@ impl Global { pub fn device_create_texture( &self, device_id: id::DeviceId, - desc: &resource::TextureDescriptor, + desc: &wgt::TextureDescriptor, id_in: Input, ) -> id::TextureId { let hub = B::hub(self); @@ -694,7 +724,7 @@ impl Global { pub fn texture_create_view( &self, texture_id: id::TextureId, - desc: Option<&resource::TextureViewDescriptor>, + desc: Option<&wgt::TextureViewDescriptor>, id_in: Input, ) -> id::TextureViewId { let hub = B::hub(self); @@ -803,7 +833,7 @@ impl Global { pub fn device_create_sampler( &self, device_id: id::DeviceId, - desc: &resource::SamplerDescriptor, + desc: &wgt::SamplerDescriptor, id_in: Input, ) -> id::SamplerId { let hub = B::hub(self); @@ -822,10 +852,10 @@ impl Global { ), lod_bias: hal::image::Lod(0.0), lod_range: hal::image::Lod(desc.lod_min_clamp) .. hal::image::Lod(desc.lod_max_clamp), - comparison: desc.compare.cloned().map(conv::map_compare_function), + comparison: conv::map_compare_function(desc.compare), border: hal::image::PackedColor(0), normalized: true, - anisotropic: hal::image::Anisotropic::Off, //TODO + anisotropy_clamp: None, //TODO }; let sampler = resource::Sampler { @@ -906,10 +936,15 @@ impl Global { let (device_guard, mut token) = hub.devices.read(&mut token); let device = &device_guard[device_id]; let raw = unsafe { - device + let mut raw_layout = device .raw .create_descriptor_set_layout(&raw_bindings, &[]) - .unwrap() + .unwrap(); + if !desc.label.is_null() { + let label = ffi::CStr::from_ptr(desc.label).to_string_lossy(); + device.raw.set_descriptor_set_layout_name(&mut raw_layout, &label); + } + raw_layout }; let layout = binding_model::BindGroupLayout { @@ -919,7 +954,7 @@ impl Global { ref_count: device.life_guard.add_ref(), }, entries: entry_map, - desc_ranges: DescriptorRanges::from_bindings(&raw_bindings), + desc_counts: raw_bindings.iter().cloned().collect(), dynamic_count: entries.iter().filter(|b| b.has_dynamic_offset).count(), }; @@ -1005,7 +1040,7 @@ impl Global { let (bind_group_layout_guard, mut token) = hub.bind_group_layouts.read(&mut token); let bind_group_layout = &bind_group_layout_guard[desc.layout]; let entries = - unsafe { slice::from_raw_parts(desc.entries, desc.entries_length as usize) }; + unsafe { slice::from_raw_parts(desc.entries, desc.entries_length) }; assert_eq!(entries.len(), bind_group_layout.entries.len()); let desc_set = unsafe { @@ -1016,7 +1051,7 @@ impl Global { .allocate( &device.raw, &bind_group_layout.raw, - bind_group_layout.desc_ranges, + &bind_group_layout.desc_counts, 1, &mut desc_sets, ) @@ -1024,6 +1059,14 @@ impl Global { desc_sets.pop().unwrap() }; + if !desc.label.is_null() { + //TODO: https://github.com/gfx-rs/gfx-extras/pull/5 + //unsafe { + // let label = ffi::CStr::from_ptr(desc.label).to_string_lossy(); + // device.raw.set_descriptor_set_name(desc_set.raw_mut(), &label); + //} + } + // fill out the descriptors let mut used = TrackerSet::new(B::VARIANT); { @@ -1058,7 +1101,7 @@ impl Global { } }; assert_eq!( - bb.offset as hal::buffer::Offset % alignment, + bb.offset % alignment, 0, "Misaligned buffer offset {}", bb.offset @@ -1073,21 +1116,20 @@ impl Global { usage ); - let end = if bb.size == 0 { - None - } else { - let end = bb.offset + bb.size; - assert!( - end <= buffer.size, - "Bound buffer range {:?} does not fit in buffer size {}", - bb.offset .. end, - buffer.size - ); - Some(end) + let sub_range = hal::buffer::SubRange { + offset: bb.offset, + size: if bb.size == 0 { None } else { + let end = bb.offset + bb.size; + assert!( + end <= buffer.size, + "Bound buffer range {:?} does not fit in buffer size {}", + bb.offset .. end, + buffer.size + ); + Some(bb.size) + }, }; - - let range = Some(bb.offset) .. end; - hal::pso::Descriptor::Buffer(&buffer.raw, range) + hal::pso::Descriptor::Buffer(&buffer.raw, sub_range) } binding_model::BindingResource::Sampler(id) => { match decl.ty { @@ -1245,7 +1287,7 @@ impl Global { pub fn device_create_command_encoder( &self, device_id: id::DeviceId, - _desc: &wgt::CommandEncoderDescriptor, + desc: &wgt::CommandEncoderDescriptor, id_in: Input, ) -> id::CommandEncoderId { let hub = B::hub(self); @@ -1263,17 +1305,22 @@ impl Global { .lock_life(&mut token) .lowest_active_submission(); - let mut comb = device + let mut command_buffer = device .com_allocator .allocate(dev_stored, &device.raw, device.features, lowest_active_index); unsafe { - comb.raw.last_mut().unwrap().begin_primary( + let raw_command_buffer = command_buffer.raw.last_mut().unwrap(); + if !desc.label.is_null() { + let label = ffi::CStr::from_ptr(desc.label).to_string_lossy(); + device.raw.set_command_buffer_name(raw_command_buffer, &label); + } + raw_command_buffer.begin_primary( hal::command::CommandBufferFlags::ONE_TIME_SUBMIT, ); } hub.command_buffers - .register_identity(id_in, comb, &mut token) + .register_identity(id_in, command_buffer, &mut token) } pub fn command_encoder_destroy( @@ -1294,6 +1341,8 @@ impl Global { // that it references for destruction in the next GC pass. { let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token); + let (compute_pipe_guard, mut token) = hub.compute_pipelines.read(&mut token); + let (render_pipe_guard, mut token) = hub.render_pipelines.read(&mut token); let (buffer_guard, mut token) = hub.buffers.read(&mut token); let (texture_guard, mut token) = hub.textures.read(&mut token); let (texture_view_guard, mut token) = hub.texture_views.read(&mut token); @@ -1324,6 +1373,16 @@ impl Global { device.temp_suspected.samplers.push(id); } } + for id in comb.trackers.compute_pipes.used() { + if compute_pipe_guard[id].life_guard.ref_count.is_none() { + device.temp_suspected.compute_pipelines.push(id); + } + } + for id in comb.trackers.render_pipes.used() { + if render_pipe_guard[id].life_guard.ref_count.is_none() { + device.temp_suspected.render_pipelines.push(id); + } + } } device @@ -1359,7 +1418,9 @@ impl Global { let (mut swap_chain_guard, mut token) = hub.swap_chains.write(&mut token); let (mut command_buffer_guard, mut token) = hub.command_buffers.write(&mut token); let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token); - let (buffer_guard, mut token) = hub.buffers.read(&mut token); + let (compute_pipe_guard, mut token) = hub.compute_pipelines.read(&mut token); + let (render_pipe_guard, mut token) = hub.render_pipelines.read(&mut token); + let (mut buffer_guard, mut token) = hub.buffers.write(&mut token); let (texture_guard, mut token) = hub.textures.read(&mut token); let (texture_view_guard, mut token) = hub.texture_views.read(&mut token); let (sampler_guard, _) = hub.samplers.read(&mut token); @@ -1392,8 +1453,13 @@ impl Global { // update submission IDs for id in comb.trackers.buffers.used() { - assert!(buffer_guard[id].pending_mapping.is_none()); + if let resource::BufferMapState::Waiting(_) = buffer_guard[id].map_state { + panic!("Buffer has a pending mapping."); + } if !buffer_guard[id].life_guard.use_at(submit_index) { + if let resource::BufferMapState::Active = buffer_guard[id].map_state { + unmap_buffer(&device.raw, &mut buffer_guard[id]); + } device.temp_suspected.buffers.push(id); } } @@ -1417,6 +1483,16 @@ impl Global { device.temp_suspected.samplers.push(id); } } + for id in comb.trackers.compute_pipes.used() { + if !compute_pipe_guard[id].life_guard.use_at(submit_index) { + device.temp_suspected.compute_pipelines.push(id); + } + } + for id in comb.trackers.render_pipes.used() { + if !render_pipe_guard[id].life_guard.use_at(submit_index) { + device.temp_suspected.render_pipelines.push(id); + } + } // execute resource transitions let mut transit = device.com_allocator.extend(comb); @@ -1747,6 +1823,7 @@ impl Global { index_format: desc.vertex_state.index_format, vertex_strides, sample_count: sc, + life_guard: LifeGuard::new(), }; hub.render_pipelines @@ -1756,9 +1833,18 @@ impl Global { pub fn render_pipeline_destroy(&self, render_pipeline_id: id::RenderPipelineId) { let hub = B::hub(self); let mut token = Token::root(); - let (_, mut token) = hub.devices.read(&mut token); - //TODO: track usage by GPU - hub.render_pipelines.unregister(render_pipeline_id, &mut token); + let (device_guard, mut token) = hub.devices.read(&mut token); + + let device_id = { + let (mut pipeline_guard, _) = hub.render_pipelines.write(&mut token); + let pipeline = &mut pipeline_guard[render_pipeline_id]; + pipeline.life_guard.ref_count.take(); + pipeline.device_id.value + }; + + device_guard[device_id] + .lock_life(&mut token) + .suspected_resources.render_pipelines.push(render_pipeline_id); } pub fn device_create_compute_pipeline( @@ -1814,6 +1900,7 @@ impl Global { value: device_id, ref_count: device.life_guard.add_ref(), }, + life_guard: LifeGuard::new(), }; hub.compute_pipelines .register_identity(id_in, pipeline, &mut token) @@ -1822,9 +1909,19 @@ impl Global { pub fn compute_pipeline_destroy(&self, compute_pipeline_id: id::ComputePipelineId) { let hub = B::hub(self); let mut token = Token::root(); - let (_, mut token) = hub.devices.read(&mut token); - //TODO: track usage by GPU - hub.compute_pipelines.unregister(compute_pipeline_id, &mut token); + let (device_guard, mut token) = hub.devices.read(&mut token); + + let device_id = { + let (mut pipeline_guard, _) = hub.compute_pipelines.write(&mut token); + let pipeline = &mut pipeline_guard[compute_pipeline_id]; + pipeline.life_guard.ref_count.take(); + pipeline.device_id.value + }; + + device_guard[device_id] + .lock_life(&mut token) + .suspected_resources.compute_pipelines.push(compute_pipeline_id); + } pub fn device_create_swap_chain( @@ -1997,16 +2094,23 @@ impl Global { assert!(buffer.usage.contains(wgt::BufferUsage::MAP_WRITE)); } - if buffer.pending_mapping.is_some() { - operation.call_error(); - return; - } + buffer.map_state = match buffer.map_state { + resource::BufferMapState::Active => panic!("Buffer already mapped"), + resource::BufferMapState::Waiting(_) => { + operation.call_error(); + return; + } + resource::BufferMapState::Idle => resource::BufferMapState::Waiting(resource::BufferPendingMapping { + sub_range: hal::buffer::SubRange { + offset: range.start, + size: Some(range.end - range.start), + }, + op: operation, + parent_ref_count: buffer.life_guard.add_ref(), + }), + }; + log::debug!("Buffer {:?} map state -> Waiting", buffer_id); - buffer.pending_mapping = Some(resource::BufferPendingMapping { - range, - op: operation, - parent_ref_count: buffer.life_guard.add_ref(), - }); (buffer.device_id.value, buffer.life_guard.add_ref()) }; @@ -2031,6 +2135,7 @@ impl Global { let (mut buffer_guard, _) = hub.buffers.write(&mut token); let buffer = &mut buffer_guard[buffer_id]; + log::debug!("Buffer {:?} map state -> Idle", buffer_id); unmap_buffer( &device_guard[buffer.device_id.value].raw, buffer, diff --git a/gfx/wgpu/wgpu-core/src/hub.rs b/gfx/wgpu/wgpu-core/src/hub.rs index 9c4abe38b878..63e42d128666 100644 --- a/gfx/wgpu/wgpu-core/src/hub.rs +++ b/gfx/wgpu/wgpu-core/src/hub.rs @@ -39,7 +39,7 @@ use vec_map::VecMap; #[cfg(debug_assertions)] use std::cell::Cell; -use std::{fmt::Debug, marker::PhantomData, ops}; +use std::{fmt::Debug, iter, marker::PhantomData, ops}; /// A simple structure to manage identities of objects. @@ -184,6 +184,7 @@ impl Access> for Device {} impl Access> for BindGroup {} impl Access> for Device {} impl Access> for BindGroup {} +impl Access> for ComputePipeline {} impl Access> for Device {} impl Access> for PipelineLayout {} impl Access> for Root {} @@ -270,7 +271,7 @@ impl IdentityHandler for Mutex { pub trait IdentityHandlerFactory { type Filter: IdentityHandler; - fn spawn(&self) -> Self::Filter; + fn spawn(&self, min_index: Index) -> Self::Filter; } #[derive(Debug)] @@ -278,8 +279,11 @@ pub struct IdentityManagerFactory; impl IdentityHandlerFactory for IdentityManagerFactory { type Filter = Mutex; - fn spawn(&self) -> Self::Filter { - Mutex::new(IdentityManager::default()) + fn spawn(&self, min_index: Index) -> Self::Filter { + let mut man = IdentityManager::default(); + man.free.extend(0 .. min_index); + man.epochs.extend(iter::repeat(1).take(min_index as usize)); + Mutex::new(man) } } @@ -316,7 +320,7 @@ pub struct Registry> { impl> Registry { fn new(backend: Backend, factory: &F) -> Self { Registry { - identity: factory.spawn(), + identity: factory.spawn(0), data: RwLock::new(Storage { map: VecMap::new(), _phantom: PhantomData, @@ -324,6 +328,17 @@ impl> Registry { backend, } } + + fn without_backend(factory: &F) -> Self { + Registry { + identity: factory.spawn(1), + data: RwLock::new(Storage { + map: VecMap::new(), + _phantom: PhantomData, + }), + backend: Backend::Empty, + } + } } impl> Registry { @@ -544,7 +559,7 @@ impl Global { pub fn new(name: &str, factory: G) -> Self { Global { instance: Instance::new(name, 1), - surfaces: Registry::new(Backend::Empty, &factory), + surfaces: Registry::without_backend(&factory), hubs: Hubs::new(&factory), } } diff --git a/gfx/wgpu/wgpu-core/src/id.rs b/gfx/wgpu/wgpu-core/src/id.rs index 68c4e4ce5617..fcda07b00dde 100644 --- a/gfx/wgpu/wgpu-core/src/id.rs +++ b/gfx/wgpu/wgpu-core/src/id.rs @@ -6,7 +6,7 @@ use crate::{Epoch, Index}; #[cfg(feature = "serde")] use serde_crate::{Deserialize, Serialize}; use wgt::Backend; -use std::{fmt, marker::PhantomData, mem}; +use std::{fmt, marker::PhantomData, mem, num::NonZeroU64}; const BACKEND_BITS: usize = 3; const EPOCH_MASK: u32 = (1 << (32 - BACKEND_BITS)) - 1; @@ -14,13 +14,18 @@ type Dummy = crate::backend::Empty; #[repr(transparent)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(crate="serde_crate"))] -pub struct Id(u64, PhantomData); +pub struct Id(NonZeroU64, PhantomData); + +// required for PeekPoke +impl Default for Id { + fn default() -> Self { + Id(unsafe { NonZeroU64::new_unchecked(!0) }, PhantomData) + } +} impl Id { - pub const ERROR: Self = Self(0, PhantomData); - pub fn backend(self) -> Backend { - match self.0 >> (64 - BACKEND_BITS) as u8 { + match self.0.get() >> (64 - BACKEND_BITS) as u8 { 0 => Backend::Empty, 1 => Backend::Vulkan, 2 => Backend::Metal, @@ -30,6 +35,14 @@ impl Id { _ => unreachable!(), } } + + pub(crate) fn into_raw(self) -> u64 { + self.0.get() + } + + pub(crate) fn from_raw(value: u64) -> Option { + NonZeroU64::new(value).map(|nz| Id(nz, PhantomData)) + } } impl Copy for Id {} @@ -58,18 +71,23 @@ impl PartialEq for Id { } } +impl Eq for Id {} + unsafe impl peek_poke::Poke for Id { fn max_size() -> usize { mem::size_of::() } unsafe fn poke_into(&self, data: *mut u8) -> *mut u8 { - self.0.poke_into(data) + self.0.get().poke_into(data) } } impl peek_poke::Peek for Id { - unsafe fn peek_from(&mut self, data: *const u8) -> *const u8 { - self.0.peek_from(data) + unsafe fn peek_from(mut data: *const u8, this: *mut Self) -> *const u8 { + let mut v = 0u64; + data = u64::peek_from(data, &mut v); + (*this).0 = NonZeroU64::new(v).unwrap(); + data } } @@ -82,13 +100,13 @@ impl TypedId for Id { fn zip(index: Index, epoch: Epoch, backend: Backend) -> Self { assert_eq!(0, epoch >> (32 - BACKEND_BITS)); let v = index as u64 | ((epoch as u64) << 32) | ((backend as u64) << (64 - BACKEND_BITS)); - Id(v, PhantomData) + Id(NonZeroU64::new(v).unwrap(), PhantomData) } fn unzip(self) -> (Index, Epoch, Backend) { ( - self.0 as u32, - (self.0 >> 32) as u32 & EPOCH_MASK, + self.0.get() as u32, + (self.0.get() >> 32) as u32 & EPOCH_MASK, self.backend(), ) } @@ -145,7 +163,7 @@ fn test_id_backend() { Backend::Dx11, Backend::Gl, ] { - let id: Id<()> = Id::zip(0, 0, b); + let id: Id<()> = Id::zip(1, 0, b); assert_eq!(id.backend(), b); } } diff --git a/gfx/wgpu/wgpu-core/src/instance.rs b/gfx/wgpu/wgpu-core/src/instance.rs index d0265ab0c502..51ea99129b3c 100644 --- a/gfx/wgpu/wgpu-core/src/instance.rs +++ b/gfx/wgpu/wgpu-core/src/instance.rs @@ -4,25 +4,47 @@ use crate::{ backend, - device::{Device, BIND_BUFFER_ALIGNMENT}, + device::Device, hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Input, Token}, - id::{AdapterId, DeviceId}, + id::{AdapterId, DeviceId, SurfaceId}, power, }; -use wgt::{Backend, BackendBit, DeviceDescriptor, PowerPreference, RequestAdapterOptions}; +use wgt::{Backend, BackendBit, DeviceDescriptor, PowerPreference, BIND_BUFFER_ALIGNMENT}; #[cfg(feature = "serde")] use serde_crate::{Deserialize, Serialize}; use hal::{ self, - adapter::{AdapterInfo as HalAdapterInfo, DeviceType as HalDeviceType, PhysicalDevice as _}, + adapter::{ + AdapterInfo as HalAdapterInfo, + DeviceType as HalDeviceType, + PhysicalDevice as _, + }, queue::QueueFamily as _, + window::Surface as _, Instance as _, }; +#[repr(C)] +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(crate="serde_crate"))] +pub struct RequestAdapterOptions { + pub power_preference: PowerPreference, + pub compatible_surface: Option, +} + +impl Default for RequestAdapterOptions { + fn default() -> Self { + RequestAdapterOptions { + power_preference: PowerPreference::Default, + compatible_surface: None, + } + } +} + #[derive(Debug)] pub struct Instance { #[cfg(any( @@ -261,6 +283,9 @@ impl Global { inputs: AdapterInputs>, ) -> Option { let instance = &self.instance; + let mut token = Token::root(); + let (surface_guard, mut token) = self.surfaces.read(&mut token); + let compatible_surface = desc.compatible_surface.map(|id| &surface_guard[id]); let mut device_types = Vec::new(); let id_vulkan = inputs.find(Backend::Vulkan); @@ -274,7 +299,15 @@ impl Global { ))] let mut adapters_vk = match instance.vulkan { Some(ref inst) if id_vulkan.is_some() => { - let adapters = inst.enumerate_adapters(); + let mut adapters = inst.enumerate_adapters(); + if let Some(&Surface { vulkan: Some(ref surface), .. }) = compatible_surface { + adapters.retain(|a| + a.queue_families + .iter() + .find(|qf| qf.queue_type().supports_graphics()) + .map_or(false, |qf| surface.supports_queue_family(qf)) + ); + } device_types.extend(adapters.iter().map(|ad| ad.info.device_type.clone())); adapters } @@ -282,7 +315,15 @@ impl Global { }; #[cfg(any(target_os = "ios", target_os = "macos"))] let mut adapters_mtl = if id_metal.is_some() { - let adapters = instance.metal.enumerate_adapters(); + let mut adapters = instance.metal.enumerate_adapters(); + if let Some(surface) = compatible_surface { + adapters.retain(|a| + a.queue_families + .iter() + .find(|qf| qf.queue_type().supports_graphics()) + .map_or(false, |qf| surface.metal.supports_queue_family(qf)) + ); + } device_types.extend(adapters.iter().map(|ad| ad.info.device_type.clone())); adapters } else { @@ -291,7 +332,15 @@ impl Global { #[cfg(windows)] let mut adapters_dx12 = match instance.dx12 { Some(ref inst) if id_dx12.is_some() => { - let adapters = inst.enumerate_adapters(); + let mut adapters = inst.enumerate_adapters(); + if let Some(&Surface { dx12: Some(ref surface), .. }) = compatible_surface { + adapters.retain(|a| + a.queue_families + .iter() + .find(|qf| qf.queue_type().supports_graphics()) + .map_or(false, |qf| surface.supports_queue_family(qf)) + ); + } device_types.extend(adapters.iter().map(|ad| ad.info.device_type.clone())); adapters } @@ -299,7 +348,15 @@ impl Global { }; #[cfg(windows)] let mut adapters_dx11 = if id_dx11.is_some() { - let adapters = instance.dx11.enumerate_adapters(); + let mut adapters = instance.dx11.enumerate_adapters(); + if let Some(surface) = compatible_surface { + adapters.retain(|a| + a.queue_families + .iter() + .find(|qf| qf.queue_type().supports_graphics()) + .map_or(false, |qf| surface.dx11.supports_queue_family(qf)) + ); + } device_types.extend(adapters.iter().map(|ad| ad.info.device_type.clone())); adapters } else { @@ -345,7 +402,6 @@ impl Global { PowerPreference::HighPerformance => discrete.or(other).or(integrated).or(virt), }; - let mut token = Token::root(); let mut selected = preferred_gpu.unwrap_or(0); #[cfg(any( not(any(target_os = "ios", target_os = "macos")), @@ -446,7 +502,12 @@ impl Global { let adapter = &adapter_guard[adapter_id].raw; let wishful_features = hal::Features::VERTEX_STORES_AND_ATOMICS | - hal::Features::FRAGMENT_STORES_AND_ATOMICS; + hal::Features::FRAGMENT_STORES_AND_ATOMICS | + hal::Features::NDC_Y_UP; + let enabled_features = adapter.physical_device.features() & wishful_features; + if enabled_features != wishful_features { + log::warn!("Missing features: {:?}", wishful_features - enabled_features); + } let family = adapter .queue_families @@ -456,10 +517,7 @@ impl Global { let mut gpu = unsafe { adapter .physical_device - .open( - &[(family, &[1.0])], - adapter.physical_device.features() & wishful_features, - ) + .open(&[(family, &[1.0])], enabled_features) .unwrap() }; @@ -496,6 +554,7 @@ impl Global { adapter_id, gpu.queue_groups.swap_remove(0), mem_props, + limits.non_coherent_atom_size as u64, supports_texture_d24_s8, desc.limits.max_bind_groups, ) diff --git a/gfx/wgpu/wgpu-core/src/lib.rs b/gfx/wgpu/wgpu-core/src/lib.rs index 2cd1dcfdf10e..6b13cae72e3a 100644 --- a/gfx/wgpu/wgpu-core/src/lib.rs +++ b/gfx/wgpu/wgpu-core/src/lib.rs @@ -2,6 +2,13 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +#![warn( + trivial_casts, + trivial_numeric_casts, + unused_extern_crates, + unused_qualifications +)] + pub mod backend { #[cfg(windows)] pub use gfx_backend_dx11::Backend as Dx11; @@ -108,36 +115,6 @@ struct Stored { ref_count: RefCount, } -#[repr(C)] -#[derive(Clone, Copy, Debug)] -pub struct Origin3d { - pub x: u32, - pub y: u32, - pub z: u32, -} - -impl Origin3d { - pub const ZERO: Self = Origin3d { - x: 0, - y: 0, - z: 0, - }; -} - -impl Default for Origin3d { - fn default() -> Self { - Origin3d::ZERO - } -} - -#[repr(C)] -#[derive(Clone, Copy, Debug)] -pub struct Extent3d { - pub width: u32, - pub height: u32, - pub depth: u32, -} - #[repr(C)] #[derive(Debug)] pub struct U32Array { diff --git a/gfx/wgpu/wgpu-core/src/pipeline.rs b/gfx/wgpu/wgpu-core/src/pipeline.rs index 274bbe2d0139..7b5997f089c4 100644 --- a/gfx/wgpu/wgpu-core/src/pipeline.rs +++ b/gfx/wgpu/wgpu-core/src/pipeline.rs @@ -5,11 +5,14 @@ use crate::{ device::RenderPassContext, id::{DeviceId, PipelineLayoutId, ShaderModuleId}, + LifeGuard, RawString, + RefCount, Stored, U32Array }; use wgt::{BufferAddress, ColorStateDescriptor, DepthStencilStateDescriptor, IndexFormat, InputStepMode, PrimitiveTopology, RasterizationStateDescriptor, VertexAttributeDescriptor}; +use std::borrow::Borrow; #[repr(C)] #[derive(Debug)] @@ -59,6 +62,13 @@ pub struct ComputePipeline { pub(crate) raw: B::ComputePipeline, pub(crate) layout_id: PipelineLayoutId, pub(crate) device_id: Stored, + pub(crate) life_guard: LifeGuard, +} + +impl Borrow for ComputePipeline { + fn borrow(&self) -> &RefCount { + self.life_guard.ref_count.as_ref().unwrap() + } } #[repr(C)] @@ -96,4 +106,11 @@ pub struct RenderPipeline { pub(crate) index_format: IndexFormat, pub(crate) sample_count: u8, pub(crate) vertex_strides: Vec<(BufferAddress, InputStepMode)>, + pub(crate) life_guard: LifeGuard, +} + +impl Borrow for RenderPipeline { + fn borrow(&self) -> &RefCount { + self.life_guard.ref_count.as_ref().unwrap() + } } diff --git a/gfx/wgpu/wgpu-core/src/resource.rs b/gfx/wgpu/wgpu-core/src/resource.rs index 4a923e8bf937..069e0cd5e762 100644 --- a/gfx/wgpu/wgpu-core/src/resource.rs +++ b/gfx/wgpu/wgpu-core/src/resource.rs @@ -5,15 +5,18 @@ use crate::{ id::{DeviceId, SwapChainId, TextureId}, track::DUMMY_SELECTOR, - Extent3d, LifeGuard, RefCount, Stored, }; -use wgt::{BufferAddress, BufferUsage, CompareFunction, TextureFormat, TextureUsage}; -use hal; -use rendy_memory::MemoryBlock; +use wgt::{ + BufferAddress, + BufferUsage, + TextureFormat, + TextureUsage, +}; +use gfx_memory::MemoryBlock; use std::{borrow::Borrow, fmt}; @@ -26,9 +29,25 @@ pub enum BufferMapAsyncStatus { ContextLost, } +#[derive(Debug)] +pub enum BufferMapState { + /// Waiting for GPU to be done before mapping + Waiting(BufferPendingMapping), + /// Mapped + Active, + /// Not mapped + Idle, +} + pub enum BufferMapOperation { - Read(Box), - Write(Box), + Read { + callback: crate::device::BufferMapReadCallback, + userdata: *mut u8, + }, + Write { + callback: crate::device::BufferMapWriteCallback, + userdata: *mut u8, + } } //TODO: clarify if/why this is needed here @@ -38,8 +57,8 @@ unsafe impl Sync for BufferMapOperation {} impl fmt::Debug for BufferMapOperation { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { let op = match *self { - BufferMapOperation::Read(_) => "read", - BufferMapOperation::Write(_) => "write", + BufferMapOperation::Read { .. } => "read", + BufferMapOperation::Write { .. } => "write", }; write!(fmt, "BufferMapOperation <{}>", op) } @@ -48,13 +67,13 @@ impl fmt::Debug for BufferMapOperation { impl BufferMapOperation { pub(crate) fn call_error(self) { match self { - BufferMapOperation::Read(callback) => { + BufferMapOperation::Read { callback, userdata } => { log::error!("wgpu_buffer_map_read_async failed: buffer mapping is pending"); - callback(BufferMapAsyncStatus::Error, std::ptr::null()); + unsafe { callback(BufferMapAsyncStatus::Error, std::ptr::null(), userdata); } } - BufferMapOperation::Write(callback) => { + BufferMapOperation::Write { callback, userdata } => { log::error!("wgpu_buffer_map_write_async failed: buffer mapping is pending"); - callback(BufferMapAsyncStatus::Error, std::ptr::null_mut()); + unsafe { callback(BufferMapAsyncStatus::Error, std::ptr::null_mut(), userdata); } } } } @@ -62,7 +81,7 @@ impl BufferMapOperation { #[derive(Debug)] pub struct BufferPendingMapping { - pub range: std::ops::Range, + pub sub_range: hal::buffer::SubRange, pub op: BufferMapOperation, // hold the parent alive while the mapping is active pub parent_ref_count: RefCount, @@ -76,9 +95,9 @@ pub struct Buffer { pub(crate) memory: MemoryBlock, pub(crate) size: BufferAddress, pub(crate) full_range: (), - pub(crate) mapped_write_ranges: Vec>, - pub(crate) pending_mapping: Option, + pub(crate) mapped_write_segments: Vec, pub(crate) life_guard: LifeGuard, + pub(crate) map_state: BufferMapState, } impl Borrow for Buffer { @@ -93,26 +112,6 @@ impl Borrow<()> for Buffer { } } -#[repr(C)] -#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] -pub enum TextureDimension { - D1, - D2, - D3, -} - -#[repr(C)] -#[derive(Debug)] -pub struct TextureDescriptor { - pub size: Extent3d, - pub array_layer_count: u32, - pub mip_level_count: u32, - pub sample_count: u32, - pub dimension: TextureDimension, - pub format: TextureFormat, - pub usage: TextureUsage, -} - #[derive(Debug)] pub struct Texture { pub(crate) raw: B::Image, @@ -137,32 +136,6 @@ impl Borrow for Texture { } } -#[repr(C)] -#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] -pub enum TextureAspect { - All, - StencilOnly, - DepthOnly, -} - -impl Default for TextureAspect { - fn default() -> Self { - TextureAspect::All - } -} - -#[repr(C)] -#[derive(Debug)] -pub struct TextureViewDescriptor { - pub format: TextureFormat, - pub dimension: wgt::TextureViewDimension, - pub aspect: TextureAspect, - pub base_mip_level: u32, - pub level_count: u32, - pub base_array_layer: u32, - pub array_layer_count: u32, -} - #[derive(Debug)] pub(crate) enum TextureViewInner { Native { @@ -198,47 +171,6 @@ impl Borrow<()> for TextureView { } } -#[repr(C)] -#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] -pub enum AddressMode { - ClampToEdge = 0, - Repeat = 1, - MirrorRepeat = 2, -} - -impl Default for AddressMode { - fn default() -> Self { - AddressMode::ClampToEdge - } -} - -#[repr(C)] -#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] -pub enum FilterMode { - Nearest = 0, - Linear = 1, -} - -impl Default for FilterMode { - fn default() -> Self { - FilterMode::Nearest - } -} - -#[repr(C)] -#[derive(Debug)] -pub struct SamplerDescriptor<'a> { - pub address_mode_u: AddressMode, - pub address_mode_v: AddressMode, - pub address_mode_w: AddressMode, - pub mag_filter: FilterMode, - pub min_filter: FilterMode, - pub mipmap_filter: FilterMode, - pub lod_min_clamp: f32, - pub lod_max_clamp: f32, - pub compare: Option<&'a CompareFunction>, -} - #[derive(Debug)] pub struct Sampler { pub(crate) raw: B::Sampler, diff --git a/gfx/wgpu/wgpu-core/src/swap_chain.rs b/gfx/wgpu/wgpu-core/src/swap_chain.rs index aca821eceea9..118b5899a0ff 100644 --- a/gfx/wgpu/wgpu-core/src/swap_chain.rs +++ b/gfx/wgpu/wgpu-core/src/swap_chain.rs @@ -85,7 +85,7 @@ pub(crate) fn swap_chain_descriptor_to_hal( #[repr(C)] #[derive(Debug)] pub struct SwapChainOutput { - pub view_id: TextureViewId, + pub view_id: Option, } #[derive(Debug)] @@ -150,7 +150,7 @@ impl Global { life_guard: LifeGuard::new(), }; let ref_count = view.life_guard.add_ref(); - let view_id = hub + let id = hub .texture_views .register_identity(view_id_in, view, &mut token); @@ -159,11 +159,11 @@ impl Global { "Swap chain image is already acquired" ); sc.acquired_view_id = Some(Stored { - value: view_id, + value: id, ref_count, }); - Ok(SwapChainOutput { view_id }) + Ok(SwapChainOutput { view_id: Some(id) }) } pub fn swap_chain_present(&self, swap_chain_id: SwapChainId) { diff --git a/gfx/wgpu/wgpu-core/src/track/buffer.rs b/gfx/wgpu/wgpu-core/src/track/buffer.rs index 6ddcf5994db1..0b86bb5b4032 100644 --- a/gfx/wgpu/wgpu-core/src/track/buffer.rs +++ b/gfx/wgpu/wgpu-core/src/track/buffer.rs @@ -122,7 +122,7 @@ mod test { first: Some(BufferUsage::INDEX), last: BufferUsage::STORAGE, }; - let id = TypedId::zip(0, 0, wgt::Backend::Empty); + let id = TypedId::zip(1, 0, wgt::Backend::Empty); assert!(bs.change(id, (), BufferUsage::VERTEX, None).is_err()); bs.change(id, (), BufferUsage::VERTEX, Some(&mut Vec::new())) .unwrap(); diff --git a/gfx/wgpu/wgpu-core/src/track/mod.rs b/gfx/wgpu/wgpu-core/src/track/mod.rs index b453d593026a..076fd60f3e82 100644 --- a/gfx/wgpu/wgpu-core/src/track/mod.rs +++ b/gfx/wgpu/wgpu-core/src/track/mod.rs @@ -9,7 +9,7 @@ mod texture; use crate::{ conv, hub::Storage, - id::{BindGroupId, SamplerId, TextureViewId, TypedId}, + id::{self, TypedId}, resource, Epoch, FastHashMap, @@ -138,7 +138,7 @@ impl PendingTransition { hal::memory::Barrier::Buffer { states: conv::map_buffer_state(self.usage.start) .. conv::map_buffer_state(self.usage.end), target: &buf.raw, - range: None .. None, + range: hal::buffer::SubRange::WHOLE, families: None, } } @@ -464,9 +464,11 @@ pub const DUMMY_SELECTOR: () = (); pub struct TrackerSet { pub buffers: ResourceTracker, pub textures: ResourceTracker, - pub views: ResourceTracker>, - pub bind_groups: ResourceTracker>, - pub samplers: ResourceTracker>, + pub views: ResourceTracker>, + pub bind_groups: ResourceTracker>, + pub samplers: ResourceTracker>, + pub compute_pipes: ResourceTracker>, + pub render_pipes: ResourceTracker>, } impl TrackerSet { @@ -478,6 +480,8 @@ impl TrackerSet { views: ResourceTracker::new(backend), bind_groups: ResourceTracker::new(backend), samplers: ResourceTracker::new(backend), + compute_pipes: ResourceTracker::new(backend), + render_pipes: ResourceTracker::new(backend), } } @@ -488,6 +492,8 @@ impl TrackerSet { self.views.clear(); self.bind_groups.clear(); self.samplers.clear(); + self.compute_pipes.clear(); + self.render_pipes.clear(); } /// Try to optimize the tracking representation. @@ -497,6 +503,8 @@ impl TrackerSet { self.views.optimize(); self.bind_groups.optimize(); self.samplers.optimize(); + self.compute_pipes.optimize(); + self.render_pipes.optimize(); } /// Merge all the trackers of another instance by extending @@ -507,6 +515,8 @@ impl TrackerSet { self.views.merge_extend(&other.views).unwrap(); self.bind_groups.merge_extend(&other.bind_groups).unwrap(); self.samplers.merge_extend(&other.samplers).unwrap(); + self.compute_pipes.merge_extend(&other.compute_pipes).unwrap(); + self.render_pipes.merge_extend(&other.render_pipes).unwrap(); } pub fn backend(&self) -> wgt::Backend { diff --git a/gfx/wgpu/wgpu-native/cbindgen.toml b/gfx/wgpu/wgpu-native/cbindgen.toml index 0d86ceaefc93..25289299f7ec 100644 --- a/gfx/wgpu/wgpu-native/cbindgen.toml +++ b/gfx/wgpu/wgpu-native/cbindgen.toml @@ -8,7 +8,10 @@ autogen_warning = """/* DO NOT MODIFY THIS MANUALLY! This file was generated usi * 2. Run `rustup run nightly cbindgen toolkit/library/rust/ --lockfile Cargo.lock --crate wgpu-remote -o dom/webgpu/ffi/wgpu_ffi_generated.h` */ -#define WGPU_LOCAL +typedef unsigned long long WGPUNonZeroU64; +typedef unsigned long long WGPUOption_AdapterId; +typedef unsigned long long WGPUOption_SurfaceId; +typedef unsigned long long WGPUOption_TextureViewId; """ include_version = true braces = "SameLine" @@ -18,8 +21,7 @@ language = "C" [export] prefix = "WGPU" -#TODO: figure out why cbindgen even tries to export a private type... -exclude = ["BufferMapResult"] +exclude = ["Option_AdapterId", "Option_SurfaceId", "Option_TextureViewId"] [parse] parse_deps = true diff --git a/gfx/wgpu/wgpu-native/src/command.rs b/gfx/wgpu/wgpu-native/src/command.rs index e1b3a3132f83..8001b4caa76d 100644 --- a/gfx/wgpu/wgpu-native/src/command.rs +++ b/gfx/wgpu/wgpu-native/src/command.rs @@ -15,7 +15,7 @@ use core::{gfx_select, id}; #[no_mangle] pub extern "C" fn wgpu_command_encoder_finish( encoder_id: id::CommandEncoderId, - desc: Option<&core::command::CommandBufferDescriptor>, + desc: Option<&wgt::CommandBufferDescriptor>, ) -> id::CommandBufferId { let desc = &desc.cloned().unwrap_or_default(); gfx_select!(encoder_id => GLOBAL.command_encoder_finish(encoder_id, desc)) @@ -43,7 +43,7 @@ pub extern "C" fn wgpu_command_encoder_copy_buffer_to_texture( command_encoder_id: id::CommandEncoderId, source: &core::command::BufferCopyView, destination: &core::command::TextureCopyView, - copy_size: core::Extent3d, + copy_size: wgt::Extent3d, ) { gfx_select!(command_encoder_id => GLOBAL.command_encoder_copy_buffer_to_texture( command_encoder_id, @@ -57,7 +57,7 @@ pub extern "C" fn wgpu_command_encoder_copy_texture_to_buffer( command_encoder_id: id::CommandEncoderId, source: &core::command::TextureCopyView, destination: &core::command::BufferCopyView, - copy_size: core::Extent3d, + copy_size: wgt::Extent3d, ) { gfx_select!(command_encoder_id => GLOBAL.command_encoder_copy_texture_to_buffer( command_encoder_id, @@ -71,7 +71,7 @@ pub extern "C" fn wgpu_command_encoder_copy_texture_to_texture( command_encoder_id: id::CommandEncoderId, source: &core::command::TextureCopyView, destination: &core::command::TextureCopyView, - copy_size: core::Extent3d, + copy_size: wgt::Extent3d, ) { gfx_select!(command_encoder_id => GLOBAL.command_encoder_copy_texture_to_texture( command_encoder_id, diff --git a/gfx/wgpu/wgpu-native/src/device.rs b/gfx/wgpu/wgpu-native/src/device.rs index 975ff314b7c3..16471f97ad9a 100644 --- a/gfx/wgpu/wgpu-native/src/device.rs +++ b/gfx/wgpu/wgpu-native/src/device.rs @@ -4,7 +4,7 @@ use crate::GLOBAL; -use wgt::{BackendBit, DeviceDescriptor, Limits, RequestAdapterOptions}; +use wgt::{BackendBit, DeviceDescriptor, Limits}; use core::{gfx_select, hub::Token, id}; use std::{marker::PhantomData, slice}; @@ -13,7 +13,7 @@ use std::{marker::PhantomData, slice}; use objc::{msg_send, runtime::Object, sel, sel_impl}; pub type RequestAdapterCallback = - unsafe extern "C" fn(id: id::AdapterId, userdata: *mut std::ffi::c_void); + unsafe extern "C" fn(id: Option, userdata: *mut std::ffi::c_void); pub fn wgpu_create_surface(raw_handle: raw_window_handle::RawWindowHandle) -> id::SurfaceId { use raw_window_handle::RawWindowHandle as Rwh; @@ -153,7 +153,7 @@ pub fn wgpu_enumerate_adapters(mask: BackendBit) -> Vec { /// This function is unsafe as it calls an unsafe extern callback. #[no_mangle] pub unsafe extern "C" fn wgpu_request_adapter_async( - desc: Option<&RequestAdapterOptions>, + desc: Option<&core::instance::RequestAdapterOptions>, mask: BackendBit, callback: RequestAdapterCallback, userdata: *mut std::ffi::c_void, @@ -162,10 +162,7 @@ pub unsafe extern "C" fn wgpu_request_adapter_async( &desc.cloned().unwrap_or_default(), core::instance::AdapterInputs::Mask(mask, || PhantomData), ); - callback( - id.unwrap_or(id::AdapterId::ERROR), - userdata, - ); + callback(id, userdata); } #[no_mangle] @@ -225,7 +222,7 @@ pub extern "C" fn wgpu_buffer_destroy(buffer_id: id::BufferId) { #[no_mangle] pub extern "C" fn wgpu_device_create_texture( device_id: id::DeviceId, - desc: &core::resource::TextureDescriptor, + desc: &wgt::TextureDescriptor, ) -> id::TextureId { gfx_select!(device_id => GLOBAL.device_create_texture(device_id, desc, PhantomData)) } @@ -238,7 +235,7 @@ pub extern "C" fn wgpu_texture_destroy(texture_id: id::TextureId) { #[no_mangle] pub extern "C" fn wgpu_texture_create_view( texture_id: id::TextureId, - desc: Option<&core::resource::TextureViewDescriptor>, + desc: Option<&wgt::TextureViewDescriptor>, ) -> id::TextureViewId { gfx_select!(texture_id => GLOBAL.texture_create_view(texture_id, desc, PhantomData)) } @@ -251,7 +248,7 @@ pub extern "C" fn wgpu_texture_view_destroy(texture_view_id: id::TextureViewId) #[no_mangle] pub extern "C" fn wgpu_device_create_sampler( device_id: id::DeviceId, - desc: &core::resource::SamplerDescriptor, + desc: &wgt::SamplerDescriptor, ) -> id::SamplerId { gfx_select!(device_id => GLOBAL.device_create_sampler(device_id, desc, PhantomData)) } @@ -380,11 +377,11 @@ pub extern "C" fn wgpu_buffer_map_read_async( callback: core::device::BufferMapReadCallback, userdata: *mut u8, ) { - let operation = core::resource::BufferMapOperation::Read( - Box::new(move |status, data| unsafe { - callback(status, data, userdata) - }), - ); + let operation = core::resource::BufferMapOperation::Read { + callback, + userdata, + }; + gfx_select!(buffer_id => GLOBAL.buffer_map_async(buffer_id, wgt::BufferUsage::MAP_READ, start .. start + size, operation)) } @@ -396,11 +393,11 @@ pub extern "C" fn wgpu_buffer_map_write_async( callback: core::device::BufferMapWriteCallback, userdata: *mut u8, ) { - let operation = core::resource::BufferMapOperation::Write( - Box::new(move |status, data| unsafe { - callback(status, data, userdata) - }), - ); + let operation = core::resource::BufferMapOperation::Write { + callback, + userdata, + }; + gfx_select!(buffer_id => GLOBAL.buffer_map_async(buffer_id, wgt::BufferUsage::MAP_WRITE, start .. start + size, operation)) } @@ -415,7 +412,7 @@ pub extern "C" fn wgpu_swap_chain_get_next_texture( ) -> core::swap_chain::SwapChainOutput { gfx_select!(swap_chain_id => GLOBAL.swap_chain_get_next_texture(swap_chain_id, PhantomData)) .unwrap_or(core::swap_chain::SwapChainOutput { - view_id: id::TextureViewId::ERROR, + view_id: None, }) } diff --git a/gfx/wgpu/wgpu-remote/cbindgen.toml b/gfx/wgpu/wgpu-remote/cbindgen.toml index af6001149bd2..45199448504b 100644 --- a/gfx/wgpu/wgpu-remote/cbindgen.toml +++ b/gfx/wgpu/wgpu-remote/cbindgen.toml @@ -8,7 +8,10 @@ autogen_warning = """/* DO NOT MODIFY THIS MANUALLY! This file was generated usi * 2. Run `rustup run nightly cbindgen toolkit/library/rust/ --lockfile Cargo.lock --crate wgpu-remote -o dom/webgpu/ffi/wgpu_ffi_generated.h` */ -typedef void WGPUEmpty; +typedef uint64_t WGPUNonZeroU64; +typedef uint64_t WGPUOption_AdapterId; +typedef uint64_t WGPUOption_SurfaceId; +typedef uint64_t WGPUOption_TextureViewId; """ include_version = true braces = "SameLine" @@ -19,7 +22,7 @@ style = "tag" [export] prefix = "WGPU" -exclude = ["BufferMapResult"] +exclude = ["Option_AdapterId", "Option_SurfaceId", "Option_TextureViewId"] [parse] parse_deps = true diff --git a/gfx/wgpu/wgpu-remote/src/identity.rs b/gfx/wgpu/wgpu-remote/src/identity.rs index 6a208e392b3b..e2e4c22df406 100644 --- a/gfx/wgpu/wgpu-remote/src/identity.rs +++ b/gfx/wgpu/wgpu-remote/src/identity.rs @@ -49,7 +49,7 @@ pub struct IdentityRecyclerFactory { impl core::hub::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; - fn spawn(&self) -> Self::Filter { + fn spawn(&self, _min_index: u32) -> Self::Filter { IdentityRecycler { fun: self.free_adapter, param: self.param, @@ -59,7 +59,7 @@ impl core::hub::IdentityHandlerFactory for IdentityRecyclerFactor } impl core::hub::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; - fn spawn(&self) -> Self::Filter { + fn spawn(&self, _min_index: u32) -> Self::Filter { IdentityRecycler { fun: self.free_device, param: self.param, @@ -69,7 +69,7 @@ impl core::hub::IdentityHandlerFactory for IdentityRecyclerFactory } impl core::hub::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; - fn spawn(&self) -> Self::Filter { + fn spawn(&self, _min_index: u32) -> Self::Filter { IdentityRecycler { fun: self.free_swap_chain, param: self.param, @@ -79,7 +79,7 @@ impl core::hub::IdentityHandlerFactory for IdentityRecyclerFact } impl core::hub::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; - fn spawn(&self) -> Self::Filter { + fn spawn(&self, _min_index: u32) -> Self::Filter { IdentityRecycler { fun: self.free_pipeline_layout, param: self.param, @@ -89,7 +89,7 @@ impl core::hub::IdentityHandlerFactory for IdentityRecycle } impl core::hub::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; - fn spawn(&self) -> Self::Filter { + fn spawn(&self, _min_index: u32) -> Self::Filter { IdentityRecycler { fun: self.free_shader_module, param: self.param, @@ -99,7 +99,7 @@ impl core::hub::IdentityHandlerFactory for IdentityRecyclerF } impl core::hub::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; - fn spawn(&self) -> Self::Filter { + fn spawn(&self, _min_index: u32) -> Self::Filter { IdentityRecycler { fun: self.free_bind_group_layout, param: self.param, @@ -109,7 +109,7 @@ impl core::hub::IdentityHandlerFactory for IdentityRecycl } impl core::hub::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; - fn spawn(&self) -> Self::Filter { + fn spawn(&self, _min_index: u32) -> Self::Filter { IdentityRecycler { fun: self.free_bind_group, param: self.param, @@ -119,7 +119,7 @@ impl core::hub::IdentityHandlerFactory for IdentityRecyclerFact } impl core::hub::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; - fn spawn(&self) -> Self::Filter { + fn spawn(&self, _min_index: u32) -> Self::Filter { IdentityRecycler { fun: self.free_command_buffer, param: self.param, @@ -129,7 +129,7 @@ impl core::hub::IdentityHandlerFactory for IdentityRecycler } impl core::hub::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; - fn spawn(&self) -> Self::Filter { + fn spawn(&self, _min_index: u32) -> Self::Filter { IdentityRecycler { fun: self.free_render_pipeline, param: self.param, @@ -139,7 +139,7 @@ impl core::hub::IdentityHandlerFactory for IdentityRecycle } impl core::hub::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; - fn spawn(&self) -> Self::Filter { + fn spawn(&self, _min_index: u32) -> Self::Filter { IdentityRecycler { fun: self.free_compute_pipeline, param: self.param, @@ -149,7 +149,7 @@ impl core::hub::IdentityHandlerFactory for IdentityRecycl } impl core::hub::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; - fn spawn(&self) -> Self::Filter { + fn spawn(&self, _min_index: u32) -> Self::Filter { IdentityRecycler { fun: self.free_buffer, param: self.param, @@ -159,7 +159,7 @@ impl core::hub::IdentityHandlerFactory for IdentityRecyclerFactory } impl core::hub::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; - fn spawn(&self) -> Self::Filter { + fn spawn(&self, _min_index: u32) -> Self::Filter { IdentityRecycler { fun: self.free_texture, param: self.param, @@ -169,7 +169,7 @@ impl core::hub::IdentityHandlerFactory for IdentityRecyclerFactor } impl core::hub::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; - fn spawn(&self) -> Self::Filter { + fn spawn(&self, _min_index: u32) -> Self::Filter { IdentityRecycler { fun: self.free_texture_view, param: self.param, @@ -179,7 +179,7 @@ impl core::hub::IdentityHandlerFactory for IdentityRecyclerFa } impl core::hub::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; - fn spawn(&self) -> Self::Filter { + fn spawn(&self, _min_index: u32) -> Self::Filter { IdentityRecycler { fun: self.free_sampler, param: self.param, @@ -189,7 +189,7 @@ impl core::hub::IdentityHandlerFactory for IdentityRecyclerFactor } impl core::hub::IdentityHandlerFactory for IdentityRecyclerFactory { type Filter = IdentityRecycler; - fn spawn(&self) -> Self::Filter { + fn spawn(&self, _min_index: u32) -> Self::Filter { IdentityRecycler { fun: self.free_surface, param: self.param, diff --git a/gfx/wgpu/wgpu-remote/src/server.rs b/gfx/wgpu/wgpu-remote/src/server.rs index 2226a2ac3067..2860e2caeaac 100644 --- a/gfx/wgpu/wgpu-remote/src/server.rs +++ b/gfx/wgpu/wgpu-remote/src/server.rs @@ -46,7 +46,7 @@ pub extern "C" fn wgpu_server_poll_all_devices(global: &Global, force_wait: bool #[no_mangle] pub unsafe extern "C" fn wgpu_server_instance_request_adapter( global: &Global, - desc: &wgt::RequestAdapterOptions, + desc: &core::instance::RequestAdapterOptions, ids: *const id::AdapterId, id_length: usize, ) -> i8 { @@ -123,11 +123,11 @@ pub extern "C" fn wgpu_server_buffer_map_read( callback: core::device::BufferMapReadCallback, userdata: *mut u8, ) { - let operation = core::resource::BufferMapOperation::Read( - Box::new(move |status, data| unsafe { - callback(status, data, userdata) - }), - ); + let operation = core::resource::BufferMapOperation::Read { + callback, + userdata, + }; + gfx_select!(buffer_id => global.buffer_map_async( buffer_id, wgt::BufferUsage::MAP_READ, @@ -136,6 +136,14 @@ pub extern "C" fn wgpu_server_buffer_map_read( )); } +#[no_mangle] +pub extern "C" fn wgpu_server_buffer_unmap( + global: &Global, + buffer_id: id::BufferId, +) { + gfx_select!(buffer_id => global.buffer_unmap(buffer_id)); +} + #[no_mangle] pub extern "C" fn wgpu_server_buffer_destroy(global: &Global, self_id: id::BufferId) { gfx_select!(self_id => global.buffer_destroy(self_id)); @@ -155,7 +163,7 @@ pub extern "C" fn wgpu_server_device_create_encoder( pub extern "C" fn wgpu_server_encoder_finish( global: &Global, self_id: id::CommandEncoderId, - desc: &core::command::CommandBufferDescriptor, + desc: &wgt::CommandBufferDescriptor, ) { gfx_select!(self_id => global.command_encoder_finish(self_id, desc)); } @@ -199,7 +207,7 @@ pub unsafe extern "C" fn wgpu_server_encoder_copy_texture_to_buffer( self_id: id::CommandEncoderId, source: &core::command::TextureCopyView, destination: &core::command::BufferCopyView, - size: core::Extent3d, + size: wgt::Extent3d, ) { gfx_select!(self_id => global.command_encoder_copy_texture_to_buffer(self_id, source, destination, size)); } @@ -210,11 +218,22 @@ pub unsafe extern "C" fn wgpu_server_encoder_copy_buffer_to_texture( self_id: id::CommandEncoderId, source: &core::command::BufferCopyView, destination: &core::command::TextureCopyView, - size: core::Extent3d, + size: wgt::Extent3d, ) { gfx_select!(self_id => global.command_encoder_copy_buffer_to_texture(self_id, source, destination, size)); } +#[no_mangle] +pub unsafe extern "C" fn wgpu_server_encoder_copy_texture_to_texture( + global: &Global, + self_id: id::CommandEncoderId, + source: &core::command::TextureCopyView, + destination: &core::command::TextureCopyView, + size: wgt::Extent3d, +) { + gfx_select!(self_id => global.command_encoder_copy_texture_to_texture(self_id, source, destination, size)); +} + /// # Safety /// /// This function is unsafe as there is no guarantee that the given pointers are @@ -374,7 +393,7 @@ pub extern "C" fn wgpu_server_render_pipeline_destroy( pub extern "C" fn wgpu_server_device_create_texture( global: &Global, self_id: id::DeviceId, - desc: &core::resource::TextureDescriptor, + desc: &wgt::TextureDescriptor, new_id: id::TextureId, ) { gfx_select!(self_id => global.device_create_texture(self_id, desc, new_id)); @@ -384,7 +403,7 @@ pub extern "C" fn wgpu_server_device_create_texture( pub extern "C" fn wgpu_server_texture_create_view( global: &Global, self_id: id::TextureId, - desc: Option<&core::resource::TextureViewDescriptor>, + desc: Option<&wgt::TextureViewDescriptor>, new_id: id::TextureViewId, ) { gfx_select!(self_id => global.texture_create_view(self_id, desc, new_id)); @@ -410,7 +429,7 @@ pub extern "C" fn wgpu_server_texture_view_destroy( pub extern "C" fn wgpu_server_device_create_sampler( global: &Global, self_id: id::DeviceId, - desc: &core::resource::SamplerDescriptor, + desc: &wgt::SamplerDescriptor, new_id: id::SamplerId, ) { gfx_select!(self_id => global.device_create_sampler(self_id, desc, new_id)); diff --git a/gfx/wgpu/wgpu-types/Cargo.toml b/gfx/wgpu/wgpu-types/Cargo.toml index 068fb1ade99a..2c485c7f5add 100644 --- a/gfx/wgpu/wgpu-types/Cargo.toml +++ b/gfx/wgpu/wgpu-types/Cargo.toml @@ -17,4 +17,4 @@ license = "MPL-2.0" [dependencies] bitflags = "1.0" serde = { version = "1.0", features = ["serde_derive"], optional = true } -peek-poke = { git = "https://github.com/kvark/peek-poke", rev = "969bd7fe2be1a83f87916dc8b388c63cfd457075", optional = true } +peek-poke = { version = "0.2", optional = true } diff --git a/gfx/wgpu/wgpu-types/src/lib.rs b/gfx/wgpu/wgpu-types/src/lib.rs index 3e931520bdd1..527be983d3e2 100644 --- a/gfx/wgpu/wgpu-types/src/lib.rs +++ b/gfx/wgpu/wgpu-types/src/lib.rs @@ -2,14 +2,14 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ -use std::{io, slice}; +use std::{io, slice, ptr}; #[cfg(feature = "serde")] use serde::{Serialize, Deserialize}; #[cfg(feature = "peek-poke")] -use peek_poke::{PeekCopy, Poke}; +use peek_poke::{PeekPoke}; #[repr(u8)] -#[derive(Clone, Copy, Debug, PartialEq)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum Backend { Empty = 0, @@ -22,7 +22,7 @@ pub enum Backend { } #[repr(C)] -#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum PowerPreference { Default = 0, @@ -30,21 +30,6 @@ pub enum PowerPreference { HighPerformance = 2, } -#[repr(C)] -#[derive(Clone, Debug)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct RequestAdapterOptions { - pub power_preference: PowerPreference, -} - -impl Default for RequestAdapterOptions { - fn default() -> Self { - RequestAdapterOptions { - power_preference: PowerPreference::Default, - } - } -} - bitflags::bitflags! { #[repr(transparent)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] @@ -72,14 +57,14 @@ impl From for BackendBit { } #[repr(C)] -#[derive(Clone, Debug, Default)] +#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct Extensions { pub anisotropic_filtering: bool, } #[repr(C)] -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct Limits { pub max_bind_groups: u32, @@ -170,6 +155,7 @@ pub type BufferAddress = u64; #[repr(C)] #[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum BlendFactor { Zero = 0, One = 1, @@ -188,6 +174,7 @@ pub enum BlendFactor { #[repr(C)] #[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum BlendOperation { Add = 0, Subtract = 1, @@ -203,7 +190,8 @@ impl Default for BlendOperation { } #[repr(C)] -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct BlendDescriptor { pub src_factor: BlendFactor, pub dst_factor: BlendFactor, @@ -235,7 +223,8 @@ impl Default for BlendDescriptor { } #[repr(C)] -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct ColorStateDescriptor { pub format: TextureFormat, pub alpha_blend: BlendDescriptor, @@ -245,6 +234,7 @@ pub struct ColorStateDescriptor { #[repr(C)] #[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum PrimitiveTopology { PointList = 0, LineList = 1, @@ -255,6 +245,7 @@ pub enum PrimitiveTopology { #[repr(C)] #[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum FrontFace { Ccw = 0, Cw = 1, @@ -268,6 +259,7 @@ impl Default for FrontFace { #[repr(C)] #[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum CullMode { None = 0, Front = 1, @@ -281,7 +273,8 @@ impl Default for CullMode { } #[repr(C)] -#[derive(Clone, Debug, Default)] +#[derive(Clone, Debug, Default, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct RasterizationStateDescriptor { pub front_face: FrontFace, pub cull_mode: CullMode, @@ -349,6 +342,7 @@ pub enum TextureFormat { bitflags::bitflags! { #[repr(transparent)] + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct ColorWrite: u32 { const RED = 1; const GREEN = 2; @@ -366,7 +360,8 @@ impl Default for ColorWrite { } #[repr(C)] -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct DepthStencilStateDescriptor { pub format: TextureFormat, pub depth_write_enabled: bool, @@ -385,6 +380,7 @@ impl DepthStencilStateDescriptor { #[repr(C)] #[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum IndexFormat { Uint16 = 0, Uint32 = 1, @@ -392,6 +388,7 @@ pub enum IndexFormat { #[repr(C)] #[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum StencilOperation { Keep = 0, Zero = 1, @@ -410,7 +407,8 @@ impl Default for StencilOperation { } #[repr(C)] -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct StencilStateFaceDescriptor { pub compare: CompareFunction, pub fail_op: StencilOperation, @@ -435,15 +433,17 @@ impl Default for StencilStateFaceDescriptor { #[repr(C)] #[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum CompareFunction { - Never = 0, - Less = 1, - Equal = 2, - LessEqual = 3, - Greater = 4, - NotEqual = 5, - GreaterEqual = 6, - Always = 7, + Undefined = 0, + Never = 1, + Less = 2, + Equal = 3, + LessEqual = 4, + Greater = 5, + NotEqual = 6, + GreaterEqual = 7, + Always = 8, } impl CompareFunction { @@ -459,13 +459,15 @@ pub type ShaderLocation = u32; #[repr(C)] #[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum InputStepMode { Vertex = 0, Instance = 1, } #[repr(C)] -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct VertexAttributeDescriptor { pub offset: BufferAddress, pub format: VertexFormat, @@ -474,6 +476,7 @@ pub struct VertexAttributeDescriptor { #[repr(C)] #[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum VertexFormat { Uchar2 = 1, Uchar4 = 3, @@ -536,25 +539,35 @@ bitflags::bitflags! { } #[repr(C)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct BufferDescriptor { + pub label: *const std::os::raw::c_char, pub size: BufferAddress, pub usage: BufferUsage, } #[repr(C)] -#[derive(Clone, Debug, Default)] +#[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct CommandEncoderDescriptor { // MSVC doesn't allow zero-sized structs // We can remove this when we actually have a field - pub todo: u32, + // pub todo: u32, + pub label: *const std::os::raw::c_char, +} + +impl Default for CommandEncoderDescriptor { + fn default() -> CommandEncoderDescriptor { + CommandEncoderDescriptor { + label: ptr::null(), + } + } } pub type DynamicOffset = u32; #[repr(C)] -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum PresentMode { /// The presentation engine does **not** wait for a vertical blanking period and /// the request is presented immediately. This is a low-latency presentation mode, @@ -574,6 +587,7 @@ pub enum PresentMode { bitflags::bitflags! { #[repr(transparent)] + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct TextureUsage: u32 { const COPY_SRC = 1; const COPY_DST = 2; @@ -594,7 +608,8 @@ bitflags::bitflags! { } #[repr(C)] -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct SwapChainDescriptor { pub usage: TextureUsage, pub format: TextureFormat, @@ -605,7 +620,8 @@ pub struct SwapChainDescriptor { #[repr(C)] #[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] -#[cfg_attr(feature = "peek-poke", derive(PeekCopy, Poke))] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "peek-poke", derive(PeekPoke))] pub enum LoadOp { Clear = 0, Load = 1, @@ -613,18 +629,20 @@ pub enum LoadOp { #[repr(C)] #[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] -#[cfg_attr(feature = "peek-poke", derive(PeekCopy, Poke))] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "peek-poke", derive(PeekPoke))] pub enum StoreOp { Clear = 0, Store = 1, } #[repr(C)] -#[derive(Debug)] -#[cfg_attr(feature = "peek-poke", derive(PeekCopy, Poke))] -pub struct RenderPassColorAttachmentDescriptorBase { +#[derive(Clone, Debug)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "peek-poke", derive(PeekPoke))] +pub struct RenderPassColorAttachmentDescriptorBase { pub attachment: T, - pub resolve_target: R, + pub resolve_target: Option, pub load_op: LoadOp, pub store_op: StoreOp, pub clear_color: Color, @@ -632,7 +650,8 @@ pub struct RenderPassColorAttachmentDescriptorBase { #[repr(C)] #[derive(Clone, Debug)] -#[cfg_attr(feature = "peek-poke", derive(PeekCopy, Poke))] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "peek-poke", derive(PeekPoke))] pub struct RenderPassDepthStencilAttachmentDescriptorBase { pub attachment: T, pub depth_load_op: LoadOp, @@ -644,8 +663,9 @@ pub struct RenderPassDepthStencilAttachmentDescriptorBase { } #[repr(C)] -#[derive(Clone, Copy, Debug)] -#[cfg_attr(feature = "peek-poke", derive(PeekCopy, Poke))] +#[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "peek-poke", derive(PeekPoke))] pub struct Color { pub r: f64, pub g: f64, @@ -691,3 +711,148 @@ impl Color { a: 1.0, }; } + +#[repr(C)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum TextureDimension { + D1, + D2, + D3, +} + +#[repr(C)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Origin3d { + pub x: u32, + pub y: u32, + pub z: u32, +} + +impl Origin3d { + pub const ZERO: Self = Origin3d { + x: 0, + y: 0, + z: 0, + }; +} + +impl Default for Origin3d { + fn default() -> Self { + Origin3d::ZERO + } +} + +#[repr(C)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Extent3d { + pub width: u32, + pub height: u32, + pub depth: u32, +} + +#[repr(C)] +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct TextureDescriptor { + pub label: *const std::os::raw::c_char, + pub size: Extent3d, + pub array_layer_count: u32, + pub mip_level_count: u32, + pub sample_count: u32, + pub dimension: TextureDimension, + pub format: TextureFormat, + pub usage: TextureUsage, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum TextureAspect { + All, + StencilOnly, + DepthOnly, +} + +impl Default for TextureAspect { + fn default() -> Self { + TextureAspect::All + } +} + +#[repr(C)] +#[derive(Clone, Debug, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct TextureViewDescriptor { + pub format: TextureFormat, + pub dimension: TextureViewDimension, + pub aspect: TextureAspect, + pub base_mip_level: u32, + pub level_count: u32, + pub base_array_layer: u32, + pub array_layer_count: u32, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum AddressMode { + ClampToEdge = 0, + Repeat = 1, + MirrorRepeat = 2, +} + +impl Default for AddressMode { + fn default() -> Self { + AddressMode::ClampToEdge + } +} + +#[repr(C)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum FilterMode { + Nearest = 0, + Linear = 1, +} + +impl Default for FilterMode { + fn default() -> Self { + FilterMode::Nearest + } +} + +#[repr(C)] +#[derive(Clone, Debug, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct SamplerDescriptor { + pub address_mode_u: AddressMode, + pub address_mode_v: AddressMode, + pub address_mode_w: AddressMode, + pub mag_filter: FilterMode, + pub min_filter: FilterMode, + pub mipmap_filter: FilterMode, + pub lod_min_clamp: f32, + pub lod_max_clamp: f32, + pub compare: CompareFunction, +} + +#[repr(C)] +#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct CommandBufferDescriptor { + pub todo: u32, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum TextureComponentType { + Float, + Sint, + Uint, +} + +/// Bound uniform/storage buffer offsets must be aligned to this number. +pub const BIND_BUFFER_ALIGNMENT: u64 = 256; diff --git a/third_party/rust/ash/.cargo-checksum.json b/third_party/rust/ash/.cargo-checksum.json index 71ffb13a4e4d..0477c4fed5d0 100644 --- a/third_party/rust/ash/.cargo-checksum.json +++ b/third_party/rust/ash/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"c1c67981635d76cb25b8e2d93bbf5ed521f2a436c8a3eb5c11673a1d6373cbbb","output":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","src/allocator.rs":"8defd2b41207b2049c2fdae62564148c969d92d5a724487bbc189e748b27fd5c","src/device.rs":"7349761540893b33a41eff1e922f846db494b8b9e6bf0dcfd48ce0bec00e5870","src/entry.rs":"2491a6f350f1c04bcb39e4f585d8549e2cf4bbd3e8a792145a6b0218f52b6e9b","src/extensions/experimental/amd.rs":"70652e2216811f0fcf2f0e748d0cf5c52c6eabfd613e3bd6da28cb1100cfd620","src/extensions/experimental/mod.rs":"41a5366e1c8bd0e1fa47e9cf6fddc8111ed0a6946813be4eefca81da969d1ee9","src/extensions/ext/debug_marker.rs":"2221980d611c8e9bdc9ca5186bf363ccbda22e6b5ea52572f3b7ed4ec5a752a3","src/extensions/ext/debug_report.rs":"affff85cefb68313a6d91489fba4c58e4adffca00d571e24a6818e72f94f4983","src/extensions/ext/debug_utils.rs":"8592be4c7dfbf13d4d224d79d16e4fc6ab746d1fa761178a781dc03caa63a53a","src/extensions/ext/mod.rs":"ccd7b9471c4bb356fc2fa309d58a847f9aff393b77fc08752123e19c801cbc65","src/extensions/khr/android_surface.rs":"5f9ff04add0661637258b32eea95c1eefcf86ab8686088e28a7369ab77df9456","src/extensions/khr/display_swapchain.rs":"cfd551cc2bb29d8e998938880de49d0142c1af6561360282820f6e32c1f9bc42","src/extensions/khr/mod.rs":"12a32c91a4b13972660dc997b59b38383b8059e6ff457d594731e828db6f2e1d","src/extensions/khr/surface.rs":"2e5a08e6a3f8903f40e643476eb48b0626054de606dfeed1e1e6ee3b8098c743","src/extensions/khr/swapchain.rs":"4dd73298a5d3e55c83d649a81aaf42384625ef53b7f13b24ad326a08627cf794","src/extensions/khr/wayland_surface.rs":"63233a95aa5f4c693f7322b6cf70789a9ac304a90bc3157a0855ce71872cf6e9","src/extensions/khr/win32_surface.rs":"4e27aaf236eba179eb0d2ad3a29a54ace21d7c4b5210ac36bc328e3d57cc8616","src/extensions/khr/xcb_surface.rs":"328e57312e261f55f13ed78a7c3bd8dcaab7d94d481910a6483b962d0f4da40d","src/extensions/khr/xlib_surface.rs":"44ee06032f0d3fe7f330c6542cbe81636523123355f8c10844abf7893bcb2503","src/extensions/mod.rs":"4a394c468a0fc824671b36c1390f6c34173d073ed0918a528a84f48667756d65","src/extensions/mvk/ios_surface.rs":"3c58810506841142a781df7ab76fe95a2eac5d7dc95ae6345ae93220d2647b7b","src/extensions/mvk/macos_surface.rs":"fcf3a34c164f0251293a50222f944e74fff4eeb797ad8521678031e69a26956c","src/extensions/mvk/mod.rs":"d03ac1a0144d1aca9ed1d0ce0c14b099f1fedb03b8108028b780a34f64de604c","src/extensions/nv/mesh_shader.rs":"c0450955eb36344b7e49acc58a021d04926dd918685b9fc6a655cd29a39afc72","src/extensions/nv/mod.rs":"175512de8528c3a90000cf9509c683761e9536dcb448877b7c7772b695aad258","src/extensions/nv/ray_tracing.rs":"a241936debf78f219de647b8392dc18c0542a82393eace4d25aaa49afef36b82","src/instance.rs":"fab133b311506eb38d8a3faa7f3e60a9e13b84760e08ad830e616262a6b46228","src/lib.rs":"801481c0cd8415f7f90ba1022128b440cc951cbd572a82f30cc1a142d34af405","src/prelude.rs":"ed6ee8e74131c232af2e3a780abe13f0c65acba1e6de61e3d1eec7f7aec7467a","src/util.rs":"bb50e11bc75058fb474bda5e34aa8978cb585ce3532ae2921c93692a13a25187","src/version.rs":"6f2d52ac2edd6f54c899763825954ac8b4c944aa9168d00885cf3955b5e4e454","src/vk.rs":"f946223870190a0060cf7b3c5baacae9ef1e4bcd12bc2d860344dc5c1567cf3d","tests/constant_size_arrays.rs":"6577f5c8d9810f9aea1d47862243e4d41a297d43e744be04fdb34d08021bac48","tests/display.rs":"13f341053efcfc104e6dae48c19e6092ffc2acf6ff3cbc4ed37dd1a03875cb17"},"package":"003d1fb2eb12eb06d4a03dbe02eea67a9fac910fa97932ab9e3a75b96a1ea5e5"} \ No newline at end of file +{"files":{"Cargo.toml":"8672467a4061ab042e1680a880e7dc96bff020880af1309f40ec5b3a92d98758","src/allocator.rs":"8defd2b41207b2049c2fdae62564148c969d92d5a724487bbc189e748b27fd5c","src/device.rs":"012bb9dbd4c557ac0309b8f39d911c2d28800c5258931e6c4166a56366b8f769","src/entry.rs":"e6cb5bd68abd37442fea9faa6a307210398c8f9798c19691d99ea8355d21aa2d","src/extensions/experimental/amd.rs":"39bef9d347d3abda78599a2363944612b3b52ea4ed90dcc36afc57162f5bdc87","src/extensions/experimental/mod.rs":"41a5366e1c8bd0e1fa47e9cf6fddc8111ed0a6946813be4eefca81da969d1ee9","src/extensions/ext/debug_marker.rs":"ddb7629fa10180fbe9e8ef55ce82bec08210a102baf7c66208697a1d6c2240d3","src/extensions/ext/debug_report.rs":"8d97690f144940beee44e198a81eae60c9aba2b549ac0c4cfedd1f81a8927cad","src/extensions/ext/debug_utils.rs":"a1064627573a2d552deecd210a8b93672f6b71f22ba7ba2ff08865e58a9fa354","src/extensions/ext/mod.rs":"ccd7b9471c4bb356fc2fa309d58a847f9aff393b77fc08752123e19c801cbc65","src/extensions/khr/android_surface.rs":"24f741651204c91d2f02c2fcb70bc30982c811434b32451590ffeb012931ebfc","src/extensions/khr/display.rs":"0358c47b5d22f8abc04141c121d1c810bd5714445532c67634d396cfdff71d2c","src/extensions/khr/display_swapchain.rs":"e18ca144054d99980c256a665e6a55dfbb33caf7c6d2d3261a6dcf02a88b16c1","src/extensions/khr/external_memory_fd.rs":"19d77c876693762c36a9bc3c5ba920515590fc7485c45f0acc10b9e6160b6536","src/extensions/khr/mod.rs":"b9416eb2158938c2887b06a3e157350da74853e0737c965091833edba55e21f2","src/extensions/khr/push_descriptor.rs":"b4976428308eb1399e565954ba487c2715d9bf46388c95ddd2155c9e9cb37af5","src/extensions/khr/ray_tracing.rs":"6f4f7c59c3812b87b66b87c5a15168f2d930fd55740f65ea157846d59eba1afd","src/extensions/khr/surface.rs":"09dd9b3489ad696ab931ed91d03e56d804e43444d99fe101a607aa3f7f330759","src/extensions/khr/swapchain.rs":"8dcd3561fd562013b8f3c3020497d149ff275bdb4725916380419ebb6d6c9903","src/extensions/khr/timeline_semaphore.rs":"28b9b68902ac0867d875261cfe6548539cb4446e57c7d7a9093ad3b0b93fc18e","src/extensions/khr/wayland_surface.rs":"67d73dfaae92c824c82e8c52e409388e25d5e9eed9a0546e12b3fe4bb9c0a7a3","src/extensions/khr/win32_surface.rs":"15f7c58b536fb4a278022c8e8b723d1c520ac4cd416e205794a6ca091a08b33d","src/extensions/khr/xcb_surface.rs":"3967715468823765c239023b4e3cec895a3b039d13c8e4126655bbf2d4a441cd","src/extensions/khr/xlib_surface.rs":"f057d3314aeb4646d10075fd71e6d4244ff98baf2cd0f29787fdf0aab76c530a","src/extensions/mod.rs":"4a394c468a0fc824671b36c1390f6c34173d073ed0918a528a84f48667756d65","src/extensions/mvk/ios_surface.rs":"2734555f36a2faf43bdf955caadd186484a50370b2a359b59f46e212a6cae953","src/extensions/mvk/macos_surface.rs":"a0aaa9027ecc0a005c8db5a8ff282866b0578bbf47da5d1d2483e2aabf606d89","src/extensions/mvk/mod.rs":"d03ac1a0144d1aca9ed1d0ce0c14b099f1fedb03b8108028b780a34f64de604c","src/extensions/nv/mesh_shader.rs":"d2d675a60047e2312fe72287a69c07ae4761a8980f8b9987efcd96b2fc3913c5","src/extensions/nv/mod.rs":"175512de8528c3a90000cf9509c683761e9536dcb448877b7c7772b695aad258","src/extensions/nv/ray_tracing.rs":"40112de7c6e27d4e763ac36488f6b01a5c313bca5e4f8d63a00e5c6fcb0320f4","src/instance.rs":"8dfb706126608f91c788e8d9984ac94f1894fbf52eaa784242234bff42672265","src/lib.rs":"e53d93789398bd2ea17d6ab056bc18d66373d7b87dcb912441426bcd70f05beb","src/prelude.rs":"68b66cab3ca52ef50943eca68c4482f919342f51e21fb52a66e9b0625d5ff79e","src/util.rs":"4f966bb489398813ae062d906ccb8348e8d6a77fdc48c0f8a0ad7871780f264e","src/version.rs":"2d46dc6a5cc2358f70e1561bf17fffeccf11649e67011b816b9e114d8ecebffb","src/vk.rs":"12a419254a1879d4c0fef855de973eb1f1a3e97fcf1a61587704cb6af139c37c","tests/constant_size_arrays.rs":"aff59222e42a0920ac1045f8eb34b1ff815fa0b19cd10c63194632e2ff0d362e","tests/display.rs":"d294396bb5b2210432724cccc0a56e105bab8743e180d4ad7cc675a200c09539"},"package":"69daec0742947f33a85931fa3cb0ce5f07929159dcbd1f0cbb5b2912e2978509"} \ No newline at end of file diff --git a/third_party/rust/ash/Cargo.toml b/third_party/rust/ash/Cargo.toml index a745e43b96b8..1a2b1c66c92f 100644 --- a/third_party/rust/ash/Cargo.toml +++ b/third_party/rust/ash/Cargo.toml @@ -11,8 +11,9 @@ # will likely look very different (and much more reasonable) [package] +edition = "2018" name = "ash" -version = "0.29.0" +version = "0.30.0" authors = ["maik klein "] description = "Vulkan bindings for Rust" documentation = "https://docs.rs/ash" @@ -22,8 +23,8 @@ license = "MIT" repository = "https://github.com/MaikKlein/ash" [package.metadata.release] no-dev-version = true -[dependencies.shared_library] -version = "0.1.9" +[dependencies.libloading] +version = "0.5.2" [features] default = [] diff --git a/third_party/rust/ash/output b/third_party/rust/ash/output deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/third_party/rust/ash/src/device.rs b/third_party/rust/ash/src/device.rs index 5684678b99b4..a2eeab7a7613 100644 --- a/third_party/rust/ash/src/device.rs +++ b/third_party/rust/ash/src/device.rs @@ -1,16 +1,203 @@ -#![allow(dead_code)] -use prelude::*; +#![allow(clippy::trivially_copy_pass_by_ref)] +use crate::prelude::*; +use crate::vk; +use crate::RawPtr; use std::mem; use std::os::raw::c_void; use std::ptr; -use vk; -use RawPtr; + +#[allow(non_camel_case_types)] +pub trait DeviceV1_2: DeviceV1_1 { + fn fp_v1_2(&self) -> &vk::DeviceFnV1_2; + + #[doc = ""] + unsafe fn cmd_draw_indirect_count( + &self, + command_buffer: vk::CommandBuffer, + buffer: vk::Buffer, + offset: vk::DeviceSize, + count_buffer: vk::Buffer, + count_buffer_offset: vk::DeviceSize, + max_draw_count: u32, + stride: u32, + ) { + self.fp_v1_2().cmd_draw_indirect_count( + command_buffer, + buffer, + offset, + count_buffer, + count_buffer_offset, + max_draw_count, + stride, + ); + } + + #[doc = ""] + unsafe fn cmd_draw_indexed_indirect_count( + &self, + command_buffer: vk::CommandBuffer, + buffer: vk::Buffer, + offset: vk::DeviceSize, + count_buffer: vk::Buffer, + count_buffer_offset: vk::DeviceSize, + max_draw_count: u32, + stride: u32, + ) { + self.fp_v1_2().cmd_draw_indexed_indirect_count( + command_buffer, + buffer, + offset, + count_buffer, + count_buffer_offset, + max_draw_count, + stride, + ); + } + + #[doc = ""] + unsafe fn create_render_pass2( + &self, + create_info: &vk::RenderPassCreateInfo2, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut renderpass = mem::zeroed(); + let err_code = self.fp_v1_2().create_render_pass2( + self.handle(), + create_info, + allocation_callbacks.as_raw_ptr(), + &mut renderpass, + ); + match err_code { + vk::Result::SUCCESS => Ok(renderpass), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn cmd_begin_render_pass2( + &self, + command_buffer: vk::CommandBuffer, + render_pass_begin_info: &vk::RenderPassBeginInfo, + subpass_begin_info: &vk::SubpassBeginInfo, + ) { + self.fp_v1_2().cmd_begin_render_pass2( + command_buffer, + render_pass_begin_info, + subpass_begin_info, + ); + } + + #[doc = ""] + unsafe fn cmd_next_subpass2( + &self, + command_buffer: vk::CommandBuffer, + subpass_begin_info: &vk::SubpassBeginInfo, + subpass_end_info: &vk::SubpassEndInfo, + ) { + self.fp_v1_2() + .cmd_next_subpass2(command_buffer, subpass_begin_info, subpass_end_info); + } + + #[doc = ""] + unsafe fn cmd_end_render_pass2( + &self, + command_buffer: vk::CommandBuffer, + subpass_end_info: &vk::SubpassEndInfo, + ) { + self.fp_v1_2() + .cmd_end_render_pass2(command_buffer, subpass_end_info); + } + + #[doc = ""] + unsafe fn reset_query_pool( + &self, + device: vk::Device, + query_pool: vk::QueryPool, + first_query: u32, + query_count: u32, + ) { + self.fp_v1_2() + .reset_query_pool(device, query_pool, first_query, query_count); + } + + #[doc = ""] + unsafe fn get_semaphore_counter_value( + &self, + device: vk::Device, + semaphore: vk::Semaphore, + ) -> VkResult { + let mut value = 0; + let err_code = self + .fp_v1_2() + .get_semaphore_counter_value(device, semaphore, &mut value); + match err_code { + vk::Result::SUCCESS => Ok(value), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn wait_semaphores( + &self, + device: vk::Device, + wait_info: &vk::SemaphoreWaitInfo, + timeout: u64, + ) -> VkResult<()> { + let err_code = self.fp_v1_2().wait_semaphores(device, wait_info, timeout); + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn signal_semaphore( + &self, + device: vk::Device, + signal_info: &vk::SemaphoreSignalInfo, + ) -> VkResult<()> { + let err_code = self.fp_v1_2().signal_semaphore(device, signal_info); + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + #[doc = ""] + unsafe fn get_buffer_device_address( + &self, + device: vk::Device, + info: &vk::BufferDeviceAddressInfo, + ) -> vk::DeviceAddress { + self.fp_v1_2().get_buffer_device_address(device, info) + } + + #[doc = ""] + unsafe fn get_buffer_opaque_capture_address( + &self, + device: vk::Device, + info: &vk::BufferDeviceAddressInfo, + ) -> u64 { + self.fp_v1_2() + .get_buffer_opaque_capture_address(device, info) + } + + #[doc = ""] + unsafe fn get_device_memory_opaque_capture_address( + &self, + device: vk::Device, + info: &vk::DeviceMemoryOpaqueCaptureAddressInfo, + ) -> u64 { + self.fp_v1_2() + .get_device_memory_opaque_capture_address(device, info) + } +} #[allow(non_camel_case_types)] pub trait DeviceV1_1: DeviceV1_0 { fn fp_v1_1(&self) -> &vk::DeviceFnV1_1; - #[doc = ""] + #[doc = ""] unsafe fn bind_buffer_memory2(&self, bind_infos: &[vk::BindBufferMemoryInfo]) -> VkResult<()> { let err_code = self.fp_v1_1().bind_buffer_memory2( self.handle(), @@ -23,7 +210,7 @@ pub trait DeviceV1_1: DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn bind_image_memory2(&self, bind_infos: &[vk::BindImageMemoryInfo]) -> VkResult<()> { let err_code = self.fp_v1_1().bind_image_memory2( self.handle(), @@ -36,14 +223,14 @@ pub trait DeviceV1_1: DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn get_device_group_peer_memory_features( &self, heap_index: u32, local_device_index: u32, remote_device_index: u32, ) -> vk::PeerMemoryFeatureFlags { - let mut peer_memory_features = mem::uninitialized(); + let mut peer_memory_features = mem::zeroed(); self.fp_v1_1().get_device_group_peer_memory_features( self.handle(), heap_index, @@ -54,13 +241,13 @@ pub trait DeviceV1_1: DeviceV1_0 { peer_memory_features } - #[doc = ""] + #[doc = ""] unsafe fn cmd_set_device_mask(&self, command_buffer: vk::CommandBuffer, device_mask: u32) { self.fp_v1_1() .cmd_set_device_mask(command_buffer, device_mask); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_dispatch_base( &self, command_buffer: vk::CommandBuffer, @@ -82,7 +269,7 @@ pub trait DeviceV1_1: DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn get_image_memory_requirements2( &self, info: &vk::ImageMemoryRequirementsInfo2, @@ -92,7 +279,7 @@ pub trait DeviceV1_1: DeviceV1_0 { .get_image_memory_requirements2(self.handle(), info, out); } - #[doc = ""] + #[doc = ""] unsafe fn get_buffer_memory_requirements2( &self, info: &vk::BufferMemoryRequirementsInfo2, @@ -106,7 +293,7 @@ pub trait DeviceV1_1: DeviceV1_0 { &self, info: &vk::ImageSparseMemoryRequirementsInfo2, ) -> usize { - let mut count = mem::uninitialized(); + let mut count = mem::zeroed(); self.fp_v1_1().get_image_sparse_memory_requirements2( self.handle(), info, @@ -116,7 +303,7 @@ pub trait DeviceV1_1: DeviceV1_0 { count as usize } - #[doc = ""] + #[doc = ""] unsafe fn get_image_sparse_memory_requirements2( &self, info: &vk::ImageSparseMemoryRequirementsInfo2, @@ -131,7 +318,7 @@ pub trait DeviceV1_1: DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn trim_command_pool( &self, command_pool: vk::CommandPool, @@ -141,13 +328,13 @@ pub trait DeviceV1_1: DeviceV1_0 { .trim_command_pool(self.handle(), command_pool, flags); } - #[doc = ""] + #[doc = ""] unsafe fn create_sampler_ycbcr_conversion( &self, create_info: &vk::SamplerYcbcrConversionCreateInfo, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut ycbcr_conversion = mem::uninitialized(); + let mut ycbcr_conversion = mem::zeroed(); let err_code = self.fp_v1_1().create_sampler_ycbcr_conversion( self.handle(), create_info, @@ -160,7 +347,7 @@ pub trait DeviceV1_1: DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn destroy_sampler_ycbcr_conversion( &self, ycbcr_conversion: vk::SamplerYcbcrConversion, @@ -173,13 +360,13 @@ pub trait DeviceV1_1: DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn create_descriptor_update_template( &self, create_info: &vk::DescriptorUpdateTemplateCreateInfo, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut descriptor_update_template = mem::uninitialized(); + let mut descriptor_update_template = mem::zeroed(); let err_code = self.fp_v1_1().create_descriptor_update_template( self.handle(), create_info, @@ -192,7 +379,7 @@ pub trait DeviceV1_1: DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn destroy_descriptor_update_template( &self, descriptor_update_template: vk::DescriptorUpdateTemplate, @@ -205,7 +392,7 @@ pub trait DeviceV1_1: DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn update_descriptor_set_with_template( &self, descriptor_set: vk::DescriptorSet, @@ -220,7 +407,7 @@ pub trait DeviceV1_1: DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn get_descriptor_set_layout_support( &self, create_info: &vk::DescriptorSetLayoutCreateInfo, @@ -235,13 +422,13 @@ pub trait DeviceV1_1: DeviceV1_0 { pub trait DeviceV1_0 { fn handle(&self) -> vk::Device; fn fp_v1_0(&self) -> &vk::DeviceFnV1_0; - #[doc = ""] + #[doc = ""] unsafe fn destroy_device(&self, allocation_callbacks: Option<&vk::AllocationCallbacks>) { self.fp_v1_0() .destroy_device(self.handle(), allocation_callbacks.as_raw_ptr()); } - #[doc = ""] + #[doc = ""] unsafe fn destroy_sampler( &self, sampler: vk::Sampler, @@ -251,7 +438,7 @@ pub trait DeviceV1_0 { .destroy_sampler(self.handle(), sampler, allocation_callbacks.as_raw_ptr()); } - #[doc = ""] + #[doc = ""] unsafe fn free_memory( &self, memory: vk::DeviceMemory, @@ -261,7 +448,7 @@ pub trait DeviceV1_0 { .free_memory(self.handle(), memory, allocation_callbacks.as_raw_ptr()); } - #[doc = ""] + #[doc = ""] unsafe fn free_command_buffers( &self, command_pool: vk::CommandPool, @@ -275,13 +462,13 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn create_event( &self, create_info: &vk::EventCreateInfo, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut event = mem::uninitialized(); + let mut event = mem::zeroed(); let err_code = self.fp_v1_0().create_event( self.handle(), create_info, @@ -296,7 +483,7 @@ pub trait DeviceV1_0 { /// Returns true if the event was set, and false if the event was reset, otherwise it will /// return the error code. - #[doc = ""] + #[doc = ""] unsafe fn get_event_status(&self, event: vk::Event) -> VkResult { let err_code = self.fp_v1_0().get_event_status(self.handle(), event); match err_code { @@ -306,7 +493,7 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn set_event(&self, event: vk::Event) -> VkResult<()> { let err_code = self.fp_v1_0().set_event(self.handle(), event); match err_code { @@ -315,7 +502,7 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn reset_event(&self, event: vk::Event) -> VkResult<()> { let err_code = self.fp_v1_0().reset_event(self.handle(), event); match err_code { @@ -323,7 +510,7 @@ pub trait DeviceV1_0 { _ => Err(err_code), } } - #[doc = ""] + #[doc = ""] unsafe fn cmd_set_event( &self, command_buffer: vk::CommandBuffer, @@ -333,7 +520,7 @@ pub trait DeviceV1_0 { self.fp_v1_0() .cmd_set_event(command_buffer, event, stage_mask); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_reset_event( &self, command_buffer: vk::CommandBuffer, @@ -344,7 +531,7 @@ pub trait DeviceV1_0 { .cmd_reset_event(command_buffer, event, stage_mask); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_wait_events( &self, command_buffer: vk::CommandBuffer, @@ -370,7 +557,7 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn destroy_fence( &self, fence: vk::Fence, @@ -380,7 +567,7 @@ pub trait DeviceV1_0 { .destroy_fence(self.handle(), fence, allocation_callbacks.as_raw_ptr()); } - #[doc = ""] + #[doc = ""] unsafe fn destroy_event( &self, event: vk::Event, @@ -390,7 +577,7 @@ pub trait DeviceV1_0 { .destroy_event(self.handle(), event, allocation_callbacks.as_raw_ptr()); } - #[doc = ""] + #[doc = ""] unsafe fn destroy_image( &self, image: vk::Image, @@ -400,7 +587,7 @@ pub trait DeviceV1_0 { .destroy_image(self.handle(), image, allocation_callbacks.as_raw_ptr()); } - #[doc = ""] + #[doc = ""] unsafe fn destroy_command_pool( &self, pool: vk::CommandPool, @@ -410,7 +597,7 @@ pub trait DeviceV1_0 { .destroy_command_pool(self.handle(), pool, allocation_callbacks.as_raw_ptr()); } - #[doc = ""] + #[doc = ""] unsafe fn destroy_image_view( &self, image_view: vk::ImageView, @@ -423,7 +610,7 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn destroy_render_pass( &self, renderpass: vk::RenderPass, @@ -436,7 +623,7 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn destroy_framebuffer( &self, framebuffer: vk::Framebuffer, @@ -449,7 +636,7 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn destroy_pipeline_layout( &self, pipeline_layout: vk::PipelineLayout, @@ -462,7 +649,7 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn destroy_pipeline_cache( &self, pipeline_cache: vk::PipelineCache, @@ -475,7 +662,7 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn destroy_buffer( &self, buffer: vk::Buffer, @@ -485,7 +672,7 @@ pub trait DeviceV1_0 { .destroy_buffer(self.handle(), buffer, allocation_callbacks.as_raw_ptr()); } - #[doc = ""] + #[doc = ""] unsafe fn destroy_shader_module( &self, shader: vk::ShaderModule, @@ -498,7 +685,7 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn destroy_pipeline( &self, pipeline: vk::Pipeline, @@ -508,7 +695,7 @@ pub trait DeviceV1_0 { .destroy_pipeline(self.handle(), pipeline, allocation_callbacks.as_raw_ptr()); } - #[doc = ""] + #[doc = ""] unsafe fn destroy_semaphore( &self, semaphore: vk::Semaphore, @@ -521,7 +708,7 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn destroy_descriptor_pool( &self, pool: vk::DescriptorPool, @@ -534,7 +721,7 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn destroy_query_pool( &self, pool: vk::QueryPool, @@ -544,7 +731,7 @@ pub trait DeviceV1_0 { .destroy_query_pool(self.handle(), pool, allocation_callbacks.as_raw_ptr()); } - #[doc = ""] + #[doc = ""] unsafe fn destroy_descriptor_set_layout( &self, layout: vk::DescriptorSetLayout, @@ -557,7 +744,7 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn free_descriptor_sets( &self, pool: vk::DescriptorPool, @@ -571,7 +758,7 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn update_descriptor_sets( &self, descriptor_writes: &[vk::WriteDescriptorSet], @@ -586,13 +773,13 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn create_sampler( &self, create_info: &vk::SamplerCreateInfo, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut sampler = mem::uninitialized(); + let mut sampler = mem::zeroed(); let err_code = self.fp_v1_0().create_sampler( self.handle(), create_info, @@ -605,7 +792,7 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn cmd_blit_image( &self, command_buffer: vk::CommandBuffer, @@ -628,7 +815,7 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_resolve_image( &self, command_buffer: vk::CommandBuffer, @@ -649,7 +836,7 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_fill_buffer( &self, command_buffer: vk::CommandBuffer, @@ -662,7 +849,7 @@ pub trait DeviceV1_0 { .cmd_fill_buffer(command_buffer, buffer, offset, size, data); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_update_buffer( &self, command_buffer: vk::CommandBuffer, @@ -679,7 +866,7 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_copy_buffer( &self, command_buffer: vk::CommandBuffer, @@ -696,7 +883,7 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_copy_image_to_buffer( &self, command_buffer: vk::CommandBuffer, @@ -715,7 +902,7 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_copy_buffer_to_image( &self, command_buffer: vk::CommandBuffer, @@ -734,7 +921,7 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_copy_image( &self, command_buffer: vk::CommandBuffer, @@ -755,7 +942,7 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn allocate_descriptor_sets( &self, create_info: &vk::DescriptorSetAllocateInfo, @@ -774,13 +961,13 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn create_descriptor_set_layout( &self, create_info: &vk::DescriptorSetLayoutCreateInfo, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut layout = mem::uninitialized(); + let mut layout = mem::zeroed(); let err_code = self.fp_v1_0().create_descriptor_set_layout( self.handle(), create_info, @@ -793,7 +980,7 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn device_wait_idle(&self) -> VkResult<()> { let err_code = self.fp_v1_0().device_wait_idle(self.handle()); match err_code { @@ -802,13 +989,13 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn create_descriptor_pool( &self, create_info: &vk::DescriptorPoolCreateInfo, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut pool = mem::uninitialized(); + let mut pool = mem::zeroed(); let err_code = self.fp_v1_0().create_descriptor_pool( self.handle(), create_info, @@ -821,7 +1008,7 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn reset_descriptor_pool( &self, pool: vk::DescriptorPool, @@ -836,7 +1023,7 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn reset_command_pool( &self, command_pool: vk::CommandPool, @@ -851,7 +1038,7 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn reset_command_buffer( &self, command_buffer: vk::CommandBuffer, @@ -864,7 +1051,7 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn reset_fences(&self, fences: &[vk::Fence]) -> VkResult<()> { let err_code = self.fp_v1_0() @@ -875,7 +1062,7 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn cmd_bind_index_buffer( &self, command_buffer: vk::CommandBuffer, @@ -887,7 +1074,7 @@ pub trait DeviceV1_0 { .cmd_bind_index_buffer(command_buffer, buffer, offset, index_type); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_clear_color_image( &self, command_buffer: vk::CommandBuffer, @@ -906,7 +1093,7 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_clear_depth_stencil_image( &self, command_buffer: vk::CommandBuffer, @@ -925,7 +1112,7 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_clear_attachments( &self, command_buffer: vk::CommandBuffer, @@ -941,7 +1128,7 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_draw_indexed( &self, command_buffer: vk::CommandBuffer, @@ -961,7 +1148,7 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_draw_indexed_indirect( &self, command_buffer: vk::CommandBuffer, @@ -979,7 +1166,7 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_execute_commands( &self, primary_command_buffer: vk::CommandBuffer, @@ -992,7 +1179,7 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_bind_descriptor_sets( &self, command_buffer: vk::CommandBuffer, @@ -1014,7 +1201,7 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_copy_query_pool_results( &self, command_buffer: vk::CommandBuffer, @@ -1038,7 +1225,7 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_push_constants( &self, command_buffer: vk::CommandBuffer, @@ -1057,7 +1244,7 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_begin_render_pass( &self, command_buffer: vk::CommandBuffer, @@ -1068,7 +1255,7 @@ pub trait DeviceV1_0 { .cmd_begin_render_pass(command_buffer, create_info, contents); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_next_subpass( &self, command_buffer: vk::CommandBuffer, @@ -1077,7 +1264,7 @@ pub trait DeviceV1_0 { self.fp_v1_0().cmd_next_subpass(command_buffer, contents); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_bind_pipeline( &self, command_buffer: vk::CommandBuffer, @@ -1088,7 +1275,7 @@ pub trait DeviceV1_0 { .cmd_bind_pipeline(command_buffer, pipeline_bind_point, pipeline); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_set_scissor( &self, command_buffer: vk::CommandBuffer, @@ -1103,13 +1290,13 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_set_line_width(&self, command_buffer: vk::CommandBuffer, line_width: f32) { self.fp_v1_0() .cmd_set_line_width(command_buffer, line_width); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_bind_vertex_buffers( &self, command_buffer: vk::CommandBuffer, @@ -1127,12 +1314,12 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_end_render_pass(&self, command_buffer: vk::CommandBuffer) { self.fp_v1_0().cmd_end_render_pass(command_buffer); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_draw( &self, command_buffer: vk::CommandBuffer, @@ -1150,7 +1337,7 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_draw_indirect( &self, command_buffer: vk::CommandBuffer, @@ -1163,7 +1350,7 @@ pub trait DeviceV1_0 { .cmd_draw_indirect(command_buffer, buffer, offset, draw_count, stride); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_dispatch( &self, command_buffer: vk::CommandBuffer, @@ -1175,7 +1362,7 @@ pub trait DeviceV1_0 { .cmd_dispatch(command_buffer, group_count_x, group_count_y, group_count_z); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_dispatch_indirect( &self, command_buffer: vk::CommandBuffer, @@ -1186,7 +1373,7 @@ pub trait DeviceV1_0 { .cmd_dispatch_indirect(command_buffer, buffer, offset); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_set_viewport( &self, command_buffer: vk::CommandBuffer, @@ -1201,7 +1388,7 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_set_depth_bias( &self, command_buffer: vk::CommandBuffer, @@ -1213,7 +1400,7 @@ pub trait DeviceV1_0 { .cmd_set_depth_bias(command_buffer, constant_factor, clamp, slope_factor); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_set_blend_constants( &self, command_buffer: vk::CommandBuffer, @@ -1223,7 +1410,7 @@ pub trait DeviceV1_0 { .cmd_set_blend_constants(command_buffer, blend_constants); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_set_depth_bounds( &self, command_buffer: vk::CommandBuffer, @@ -1234,7 +1421,7 @@ pub trait DeviceV1_0 { .cmd_set_depth_bounds(command_buffer, min_depth_bounds, max_depth_bounds); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_set_stencil_compare_mask( &self, command_buffer: vk::CommandBuffer, @@ -1245,7 +1432,7 @@ pub trait DeviceV1_0 { .cmd_set_stencil_compare_mask(command_buffer, face_mask, compare_mask); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_set_stencil_write_mask( &self, command_buffer: vk::CommandBuffer, @@ -1256,7 +1443,7 @@ pub trait DeviceV1_0 { .cmd_set_stencil_write_mask(command_buffer, face_mask, write_mask); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_set_stencil_reference( &self, command_buffer: vk::CommandBuffer, @@ -1267,7 +1454,7 @@ pub trait DeviceV1_0 { .cmd_set_stencil_reference(command_buffer, face_mask, reference); } - #[doc = ""] + #[doc = ""] unsafe fn get_query_pool_results( &self, query_pool: vk::QueryPool, @@ -1303,7 +1490,7 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn cmd_begin_query( &self, command_buffer: vk::CommandBuffer, @@ -1315,7 +1502,7 @@ pub trait DeviceV1_0 { .cmd_begin_query(command_buffer, query_pool, query, flags); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_end_query( &self, command_buffer: vk::CommandBuffer, @@ -1326,7 +1513,7 @@ pub trait DeviceV1_0 { .cmd_end_query(command_buffer, query_pool, query); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_reset_query_pool( &self, command_buffer: vk::CommandBuffer, @@ -1338,7 +1525,7 @@ pub trait DeviceV1_0 { .cmd_reset_query_pool(command_buffer, pool, first_query, query_count); } - #[doc = ""] + #[doc = ""] unsafe fn cmd_write_timestamp( &self, command_buffer: vk::CommandBuffer, @@ -1350,13 +1537,13 @@ pub trait DeviceV1_0 { .cmd_write_timestamp(command_buffer, pipeline_stage, query_pool, query); } - #[doc = ""] + #[doc = ""] unsafe fn create_semaphore( &self, create_info: &vk::SemaphoreCreateInfo, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut semaphore = mem::uninitialized(); + let mut semaphore = mem::zeroed(); let err_code = self.fp_v1_0().create_semaphore( self.handle(), create_info, @@ -1369,7 +1556,7 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn create_graphics_pipelines( &self, pipeline_cache: vk::PipelineCache, @@ -1392,7 +1579,7 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn create_compute_pipelines( &self, pipeline_cache: vk::PipelineCache, @@ -1415,13 +1602,13 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn create_buffer( &self, create_info: &vk::BufferCreateInfo, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut buffer = mem::uninitialized(); + let mut buffer = mem::zeroed(); let err_code = self.fp_v1_0().create_buffer( self.handle(), create_info, @@ -1434,13 +1621,13 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn create_pipeline_layout( &self, create_info: &vk::PipelineLayoutCreateInfo, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut pipeline_layout = mem::uninitialized(); + let mut pipeline_layout = mem::zeroed(); let err_code = self.fp_v1_0().create_pipeline_layout( self.handle(), create_info, @@ -1453,13 +1640,13 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn create_pipeline_cache( &self, create_info: &vk::PipelineCacheCreateInfo, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut pipeline_cache = mem::uninitialized(); + let mut pipeline_cache = mem::zeroed(); let err_code = self.fp_v1_0().create_pipeline_cache( self.handle(), create_info, @@ -1473,7 +1660,7 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn get_pipeline_cache_data( &self, pipeline_cache: vk::PipelineCache, @@ -1502,7 +1689,7 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn map_memory( &self, memory: vk::DeviceMemory, @@ -1510,7 +1697,7 @@ pub trait DeviceV1_0 { size: vk::DeviceSize, flags: vk::MemoryMapFlags, ) -> VkResult<*mut c_void> { - let mut data: *mut c_void = mem::uninitialized(); + let mut data: *mut c_void = ptr::null_mut(); let err_code = self.fp_v1_0() .map_memory(self.handle(), memory, offset, size, flags, &mut data); @@ -1520,12 +1707,12 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn unmap_memory(&self, memory: vk::DeviceMemory) { self.fp_v1_0().unmap_memory(self.handle(), memory); } - #[doc = ""] + #[doc = ""] unsafe fn invalidate_mapped_memory_ranges( &self, ranges: &[vk::MappedMemoryRange], @@ -1541,7 +1728,7 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn flush_mapped_memory_ranges(&self, ranges: &[vk::MappedMemoryRange]) -> VkResult<()> { let err_code = self.fp_v1_0().flush_mapped_memory_ranges( self.handle(), @@ -1554,13 +1741,13 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn create_framebuffer( &self, create_info: &vk::FramebufferCreateInfo, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut framebuffer = mem::uninitialized(); + let mut framebuffer = mem::zeroed(); let err_code = self.fp_v1_0().create_framebuffer( self.handle(), create_info, @@ -1573,15 +1760,15 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn get_device_queue(&self, queue_family_index: u32, queue_index: u32) -> vk::Queue { - let mut queue = mem::uninitialized(); + let mut queue = mem::zeroed(); self.fp_v1_0() .get_device_queue(self.handle(), queue_family_index, queue_index, &mut queue); queue } - #[doc = ""] + #[doc = ""] unsafe fn cmd_pipeline_barrier( &self, command_buffer: vk::CommandBuffer, @@ -1606,13 +1793,13 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn create_render_pass( &self, create_info: &vk::RenderPassCreateInfo, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut renderpass = mem::uninitialized(); + let mut renderpass = mem::zeroed(); let err_code = self.fp_v1_0().create_render_pass( self.handle(), create_info, @@ -1625,7 +1812,7 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn begin_command_buffer( &self, command_buffer: vk::CommandBuffer, @@ -1640,7 +1827,7 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn end_command_buffer(&self, command_buffer: vk::CommandBuffer) -> VkResult<()> { let err_code = self.fp_v1_0().end_command_buffer(command_buffer); match err_code { @@ -1649,7 +1836,7 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn wait_for_fences( &self, fences: &[vk::Fence], @@ -1669,16 +1856,17 @@ pub trait DeviceV1_0 { } } - #[doc = ""] - unsafe fn get_fence_status(&self, fence: vk::Fence) -> VkResult<()> { + #[doc = ""] + unsafe fn get_fence_status(&self, fence: vk::Fence) -> VkResult { let err_code = self.fp_v1_0().get_fence_status(self.handle(), fence); match err_code { - vk::Result::SUCCESS => Ok(()), + vk::Result::SUCCESS => Ok(true), + vk::Result::NOT_READY => Ok(false), _ => Err(err_code), } } - #[doc = ""] + #[doc = ""] unsafe fn queue_wait_idle(&self, queue: vk::Queue) -> VkResult<()> { let err_code = self.fp_v1_0().queue_wait_idle(queue); match err_code { @@ -1687,7 +1875,7 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn queue_submit( &self, queue: vk::Queue, @@ -1703,13 +1891,13 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn create_buffer_view( &self, create_info: &vk::BufferViewCreateInfo, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut buffer_view = mem::uninitialized(); + let mut buffer_view = mem::zeroed(); let err_code = self.fp_v1_0().create_buffer_view( self.handle(), create_info, @@ -1722,7 +1910,7 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn destroy_buffer_view( &self, buffer_view: vk::BufferView, @@ -1735,13 +1923,13 @@ pub trait DeviceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn create_image_view( &self, create_info: &vk::ImageViewCreateInfo, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut image_view = mem::uninitialized(); + let mut image_view = mem::zeroed(); let err_code = self.fp_v1_0().create_image_view( self.handle(), create_info, @@ -1754,7 +1942,7 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn allocate_command_buffers( &self, create_info: &vk::CommandBufferAllocateInfo, @@ -1772,13 +1960,13 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn create_command_pool( &self, create_info: &vk::CommandPoolCreateInfo, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut pool = mem::uninitialized(); + let mut pool = mem::zeroed(); let err_code = self.fp_v1_0().create_command_pool( self.handle(), create_info, @@ -1791,13 +1979,13 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn create_query_pool( &self, create_info: &vk::QueryPoolCreateInfo, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut pool = mem::uninitialized(); + let mut pool = mem::zeroed(); let err_code = self.fp_v1_0().create_query_pool( self.handle(), create_info, @@ -1810,13 +1998,13 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn create_image( &self, create_info: &vk::ImageCreateInfo, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut image = mem::uninitialized(); + let mut image = mem::zeroed(); let err_code = self.fp_v1_0().create_image( self.handle(), create_info, @@ -1829,13 +2017,13 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn get_image_subresource_layout( &self, image: vk::Image, subresource: vk::ImageSubresource, ) -> vk::SubresourceLayout { - let mut layout = mem::uninitialized(); + let mut layout = mem::zeroed(); self.fp_v1_0().get_image_subresource_layout( self.handle(), image, @@ -1845,29 +2033,29 @@ pub trait DeviceV1_0 { layout } - #[doc = ""] + #[doc = ""] unsafe fn get_image_memory_requirements(&self, image: vk::Image) -> vk::MemoryRequirements { - let mut mem_req = mem::uninitialized(); + let mut mem_req = mem::zeroed(); self.fp_v1_0() .get_image_memory_requirements(self.handle(), image, &mut mem_req); mem_req } - #[doc = ""] + #[doc = ""] unsafe fn get_buffer_memory_requirements(&self, buffer: vk::Buffer) -> vk::MemoryRequirements { - let mut mem_req = mem::uninitialized(); + let mut mem_req = mem::zeroed(); self.fp_v1_0() .get_buffer_memory_requirements(self.handle(), buffer, &mut mem_req); mem_req } - #[doc = ""] + #[doc = ""] unsafe fn allocate_memory( &self, create_info: &vk::MemoryAllocateInfo, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut memory = mem::uninitialized(); + let mut memory = mem::zeroed(); let err_code = self.fp_v1_0().allocate_memory( self.handle(), create_info, @@ -1880,13 +2068,13 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn create_shader_module( &self, create_info: &vk::ShaderModuleCreateInfo, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut shader = mem::uninitialized(); + let mut shader = mem::zeroed(); let err_code = self.fp_v1_0().create_shader_module( self.handle(), create_info, @@ -1899,13 +2087,13 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn create_fence( &self, create_info: &vk::FenceCreateInfo, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut fence = mem::uninitialized(); + let mut fence = mem::zeroed(); let err_code = self.fp_v1_0().create_fence( self.handle(), create_info, @@ -1918,7 +2106,7 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn bind_buffer_memory( &self, buffer: vk::Buffer, @@ -1934,7 +2122,7 @@ pub trait DeviceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn bind_image_memory( &self, image: vk::Image, @@ -1951,12 +2139,13 @@ pub trait DeviceV1_0 { } } -#[doc = ""] +#[doc = ""] #[derive(Clone)] pub struct Device { handle: vk::Device, device_fn_1_0: vk::DeviceFnV1_0, device_fn_1_1: vk::DeviceFnV1_1, + device_fn_1_2: vk::DeviceFnV1_2, } impl Device { pub unsafe fn load(instance_fn: &vk::InstanceFnV1_0, device: vk::Device) -> Self { @@ -1966,10 +2155,14 @@ impl Device { let device_fn_1_1 = vk::DeviceFnV1_1::load(|name| { mem::transmute(instance_fn.get_device_proc_addr(device, name.as_ptr())) }); + let device_fn_1_2 = vk::DeviceFnV1_2::load(|name| { + mem::transmute(instance_fn.get_device_proc_addr(device, name.as_ptr())) + }); Device { handle: device, device_fn_1_0, device_fn_1_1, + device_fn_1_2, } } } @@ -1990,6 +2183,12 @@ impl DeviceV1_1 for Device { } } +impl DeviceV1_2 for Device { + fn fp_v1_2(&self) -> &vk::DeviceFnV1_2 { + &self.device_fn_1_2 + } +} + impl Device { pub fn handle(&self) -> vk::Device { self.handle diff --git a/third_party/rust/ash/src/entry.rs b/third_party/rust/ash/src/entry.rs index d31dd8c49db7..006b1c03fa05 100644 --- a/third_party/rust/ash/src/entry.rs +++ b/third_party/rust/ash/src/entry.rs @@ -1,34 +1,34 @@ -use instance::Instance; -use prelude::*; -use shared_library::dynamic_library::DynamicLibrary; +use crate::instance::Instance; +use crate::prelude::*; +use crate::vk; +use crate::RawPtr; +use libloading::Library; use std::error::Error; use std::fmt; +use std::io; use std::mem; use std::os::raw::c_char; use std::os::raw::c_void; -use std::path::Path; use std::ptr; use std::sync::Arc; -use vk; -use RawPtr; #[cfg(windows)] -const LIB_PATH: &'static str = "vulkan-1.dll"; +const LIB_PATH: &str = "vulkan-1.dll"; #[cfg(all( unix, not(any(target_os = "macos", target_os = "ios", target_os = "android")) ))] -const LIB_PATH: &'static str = "libvulkan.so.1"; +const LIB_PATH: &str = "libvulkan.so.1"; #[cfg(target_os = "android")] -const LIB_PATH: &'static str = "libvulkan.so"; +const LIB_PATH: &str = "libvulkan.so"; #[cfg(any(target_os = "macos", target_os = "ios"))] -const LIB_PATH: &'static str = "libvulkan.dylib"; +const LIB_PATH: &str = "libvulkan.dylib"; /// Function loader -pub type Entry = EntryCustom>; +pub type Entry = EntryCustom>; /// Function loader #[derive(Clone)] @@ -36,12 +36,13 @@ pub struct EntryCustom { static_fn: vk::StaticFn, entry_fn_1_0: vk::EntryFnV1_0, entry_fn_1_1: vk::EntryFnV1_1, + entry_fn_1_2: vk::EntryFnV1_2, lib: L, } #[derive(Debug)] pub enum LoadingError { - LibraryLoadError(String), + LibraryLoadError(io::Error), } impl fmt::Display for LoadingError { @@ -54,7 +55,7 @@ impl fmt::Display for LoadingError { impl Error for LoadingError {} -#[derive(Debug)] +#[derive(Clone, Debug)] pub enum InstanceError { LoadError(Vec<&'static str>), VkError(vk::Result), @@ -76,14 +77,19 @@ pub trait EntryV1_0 { type Instance; fn fp_v1_0(&self) -> &vk::EntryFnV1_0; fn static_fn(&self) -> &vk::StaticFn; - #[doc = ""] + #[doc = ""] + /// + /// # Safety + /// In order for the created `Instance` to be valid for the duration of its + /// usage, the `Entry` this was called on must be dropped later than the + /// resulting `Instance`. unsafe fn create_instance( &self, create_info: &vk::InstanceCreateInfo, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> Result; - #[doc = ""] + #[doc = ""] fn enumerate_instance_layer_properties(&self) -> VkResult> { unsafe { let mut num = 0; @@ -102,7 +108,7 @@ pub trait EntryV1_0 { } } - #[doc = ""] + #[doc = ""] fn enumerate_instance_extension_properties(&self) -> VkResult> { unsafe { let mut num = 0; @@ -125,25 +131,30 @@ pub trait EntryV1_0 { } } - #[doc = ""] - fn get_instance_proc_addr( + #[doc = ""] + unsafe fn get_instance_proc_addr( &self, instance: vk::Instance, p_name: *const c_char, ) -> vk::PFN_vkVoidFunction { - unsafe { self.static_fn().get_instance_proc_addr(instance, p_name) } + self.static_fn().get_instance_proc_addr(instance, p_name) } } impl EntryV1_0 for EntryCustom { type Instance = Instance; - #[doc = ""] + #[doc = ""] + /// + /// # Safety + /// In order for the created `Instance` to be valid for the duration of its + /// usage, the `Entry` this was called on must be dropped later than the + /// resulting `Instance`. unsafe fn create_instance( &self, create_info: &vk::InstanceCreateInfo, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> Result { - let mut instance: vk::Instance = mem::uninitialized(); + let mut instance: vk::Instance = mem::zeroed(); let err_code = self.fp_v1_0().create_instance( create_info, allocation_callbacks.as_raw_ptr(), @@ -166,7 +177,8 @@ impl EntryV1_0 for EntryCustom { pub trait EntryV1_1: EntryV1_0 { fn fp_v1_1(&self) -> &vk::EntryFnV1_1; - #[doc = ""] + #[deprecated = "This function is unavailable and therefore panics on Vulkan 1.0, please use `try_enumerate_instance_version` instead"] + #[doc = ""] fn enumerate_instance_version(&self) -> VkResult { unsafe { let mut api_version = 0; @@ -179,15 +191,30 @@ pub trait EntryV1_1: EntryV1_0 { } } -impl EntryCustom> { +impl EntryV1_1 for EntryCustom { + fn fp_v1_1(&self) -> &vk::EntryFnV1_1 { + &self.entry_fn_1_1 + } +} + +#[allow(non_camel_case_types)] +pub trait EntryV1_2: EntryV1_1 { + fn fp_v1_2(&self) -> &vk::EntryFnV1_2; +} + +impl EntryV1_2 for EntryCustom { + fn fp_v1_2(&self) -> &vk::EntryFnV1_2 { + &self.entry_fn_1_2 + } +} + +impl EntryCustom> { /// ```rust,no_run - /// # #[macro_use] - /// # extern crate ash; /// use ash::{vk, Entry, version::EntryV1_0}; /// # fn main() -> Result<(), Box> { /// let entry = Entry::new()?; /// let app_info = vk::ApplicationInfo { - /// api_version: vk_make_version!(1, 0, 0), + /// api_version: vk::make_version(1, 0, 0), /// ..Default::default() /// }; /// let create_info = vk::InstanceCreateInfo { @@ -200,13 +227,14 @@ impl EntryCustom> { pub fn new() -> Result { Self::new_custom( || { - DynamicLibrary::open(Some(&Path::new(LIB_PATH))) - .map_err(|err| LoadingError::LibraryLoadError(err.clone())) - .map(|dl| Arc::new(dl)) + Library::new(&LIB_PATH) + .map_err(LoadingError::LibraryLoadError) + .map(Arc::new) }, |vk_lib, name| unsafe { vk_lib - .symbol(&*name.to_string_lossy()) + .get(name.to_bytes_with_nul()) + .map(|symbol| *symbol) .unwrap_or(ptr::null_mut()) }, ) @@ -230,27 +258,30 @@ impl EntryCustom { mem::transmute(static_fn.get_instance_proc_addr(vk::Instance::null(), name.as_ptr())) }); + let entry_fn_1_2 = vk::EntryFnV1_2::load(|name| unsafe { + mem::transmute(static_fn.get_instance_proc_addr(vk::Instance::null(), name.as_ptr())) + }); + Ok(EntryCustom { static_fn, entry_fn_1_0, entry_fn_1_1, + entry_fn_1_2, lib, }) } - #[doc = ""] + #[doc = ""] /// ```rust,no_run - /// # #[macro_use] - /// # extern crate ash; - /// # use ash::Entry; + /// # use ash::{Entry, vk}; /// # fn main() -> Result<(), Box> { /// let entry = Entry::new()?; /// match entry.try_enumerate_instance_version()? { /// // Vulkan 1.1+ /// Some(version) => { - /// let major = vk_version_major!(version); - /// let minor = vk_version_minor!(version); - /// let patch = vk_version_patch!(version); + /// let major = vk::version_major(version); + /// let minor = vk::version_minor(version); + /// let patch = vk::version_patch(version); /// }, /// // Vulkan 1.0 /// None => {}, diff --git a/third_party/rust/ash/src/extensions/experimental/amd.rs b/third_party/rust/ash/src/extensions/experimental/amd.rs index a68f0578d4c5..b01ef48d30e4 100644 --- a/third_party/rust/ash/src/extensions/experimental/amd.rs +++ b/third_party/rust/ash/src/extensions/experimental/amd.rs @@ -1,3 +1,5 @@ +#![allow(clippy::unreadable_literal)] + /* *********************************************************************************************************************** * @@ -23,9 +25,9 @@ * **********************************************************************************************************************/ +use crate::vk::*; use std::fmt; use std::os::raw::*; -use vk::*; // Extension: `VK_AMD_gpa_interface` diff --git a/third_party/rust/ash/src/extensions/ext/debug_marker.rs b/third_party/rust/ash/src/extensions/ext/debug_marker.rs old mode 100644 new mode 100755 index 11f6ce6aee2c..626f23ab506f --- a/third_party/rust/ash/src/extensions/ext/debug_marker.rs +++ b/third_party/rust/ash/src/extensions/ext/debug_marker.rs @@ -1,9 +1,9 @@ #![allow(dead_code)] -use prelude::*; +use crate::prelude::*; +use crate::version::{DeviceV1_0, InstanceV1_0}; +use crate::vk; use std::ffi::CStr; use std::mem; -use version::{DeviceV1_0, InstanceV1_0}; -use vk; #[derive(Clone)] pub struct DebugMarker { @@ -15,16 +15,14 @@ impl DebugMarker { let debug_marker_fn = vk::ExtDebugMarkerFn::load(|name| unsafe { mem::transmute(instance.get_device_proc_addr(device.handle(), name.as_ptr())) }); - DebugMarker { - debug_marker_fn: debug_marker_fn, - } + DebugMarker { debug_marker_fn } } pub fn name() -> &'static CStr { vk::ExtDebugMarkerFn::name() } - #[doc = ""] + #[doc = ""] pub unsafe fn debug_marker_set_object_name( &self, device: vk::Device, @@ -39,7 +37,7 @@ impl DebugMarker { } } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_debug_marker_begin( &self, command_buffer: vk::CommandBuffer, @@ -49,13 +47,13 @@ impl DebugMarker { .cmd_debug_marker_begin_ext(command_buffer, marker_info); } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_debug_marker_end(&self, command_buffer: vk::CommandBuffer) { self.debug_marker_fn .cmd_debug_marker_end_ext(command_buffer); } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_debug_marker_insert( &self, command_buffer: vk::CommandBuffer, @@ -64,4 +62,8 @@ impl DebugMarker { self.debug_marker_fn .cmd_debug_marker_insert_ext(command_buffer, marker_info); } + + pub fn fp(&self) -> &vk::ExtDebugMarkerFn { + &self.debug_marker_fn + } } diff --git a/third_party/rust/ash/src/extensions/ext/debug_report.rs b/third_party/rust/ash/src/extensions/ext/debug_report.rs old mode 100644 new mode 100755 index 1d5011750d28..56dce805599d --- a/third_party/rust/ash/src/extensions/ext/debug_report.rs +++ b/third_party/rust/ash/src/extensions/ext/debug_report.rs @@ -1,10 +1,10 @@ #![allow(dead_code)] -use prelude::*; +use crate::prelude::*; +use crate::version::{EntryV1_0, InstanceV1_0}; +use crate::vk; +use crate::RawPtr; use std::ffi::CStr; use std::mem; -use version::{EntryV1_0, InstanceV1_0}; -use vk; -use RawPtr; #[derive(Clone)] pub struct DebugReport { @@ -27,7 +27,7 @@ impl DebugReport { vk::ExtDebugReportFn::name() } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_debug_report_callback( &self, debug: vk::DebugReportCallbackEXT, @@ -40,13 +40,13 @@ impl DebugReport { ); } - #[doc = ""] + #[doc = ""] pub unsafe fn create_debug_report_callback( &self, create_info: &vk::DebugReportCallbackCreateInfoEXT, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut debug_cb = mem::uninitialized(); + let mut debug_cb = mem::zeroed(); let err_code = self.debug_report_fn.create_debug_report_callback_ext( self.handle, create_info, @@ -58,4 +58,12 @@ impl DebugReport { _ => Err(err_code), } } + + pub fn fp(&self) -> &vk::ExtDebugReportFn { + &self.debug_report_fn + } + + pub fn instance(&self) -> vk::Instance { + self.handle + } } diff --git a/third_party/rust/ash/src/extensions/ext/debug_utils.rs b/third_party/rust/ash/src/extensions/ext/debug_utils.rs old mode 100644 new mode 100755 index a8acffdf6139..fdafd3d2fc11 --- a/third_party/rust/ash/src/extensions/ext/debug_utils.rs +++ b/third_party/rust/ash/src/extensions/ext/debug_utils.rs @@ -1,9 +1,9 @@ #![allow(dead_code)] -use prelude::*; +use crate::prelude::*; +use crate::version::{EntryV1_0, InstanceV1_0}; +use crate::{vk, RawPtr}; use std::ffi::CStr; use std::mem; -use version::{EntryV1_0, InstanceV1_0}; -use {vk, RawPtr}; #[derive(Clone)] pub struct DebugUtils { @@ -26,7 +26,7 @@ impl DebugUtils { vk::ExtDebugUtilsFn::name() } - #[doc = ""] + #[doc = ""] pub unsafe fn debug_utils_set_object_name( &self, device: vk::Device, @@ -41,7 +41,7 @@ impl DebugUtils { } } - #[doc = ""] + #[doc = ""] pub unsafe fn debug_utils_set_object_tag( &self, device: vk::Device, @@ -56,7 +56,7 @@ impl DebugUtils { } } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_begin_debug_utils_label( &self, command_buffer: vk::CommandBuffer, @@ -66,13 +66,13 @@ impl DebugUtils { .cmd_begin_debug_utils_label_ext(command_buffer, label); } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_end_debug_utils_label(&self, command_buffer: vk::CommandBuffer) { self.debug_utils_fn .cmd_end_debug_utils_label_ext(command_buffer); } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_insert_debug_utils_label( &self, command_buffer: vk::CommandBuffer, @@ -82,7 +82,7 @@ impl DebugUtils { .cmd_insert_debug_utils_label_ext(command_buffer, label); } - #[doc = ""] + #[doc = ""] pub unsafe fn queue_begin_debug_utils_label( &self, queue: vk::Queue, @@ -92,12 +92,12 @@ impl DebugUtils { .queue_begin_debug_utils_label_ext(queue, label); } - #[doc = ""] + #[doc = ""] pub unsafe fn queue_end_debug_utils_label(&self, queue: vk::Queue) { self.debug_utils_fn.queue_end_debug_utils_label_ext(queue); } - #[doc = ""] + #[doc = ""] pub unsafe fn queue_insert_debug_utils_label( &self, queue: vk::Queue, @@ -107,13 +107,13 @@ impl DebugUtils { .queue_insert_debug_utils_label_ext(queue, label); } - #[doc = ""] + #[doc = ""] pub unsafe fn create_debug_utils_messenger( &self, create_info: &vk::DebugUtilsMessengerCreateInfoEXT, allocator: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut messenger = mem::uninitialized(); + let mut messenger = mem::zeroed(); let err_code = self.debug_utils_fn.create_debug_utils_messenger_ext( self.handle, create_info, @@ -126,7 +126,7 @@ impl DebugUtils { } } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_debug_utils_messenger( &self, messenger: vk::DebugUtilsMessengerEXT, @@ -139,7 +139,7 @@ impl DebugUtils { ); } - #[doc = ""] + #[doc = ""] pub unsafe fn submit_debug_utils_message( &self, instance: vk::Instance, @@ -154,4 +154,12 @@ impl DebugUtils { callback_data, ); } + + pub fn fp(&self) -> &vk::ExtDebugUtilsFn { + &self.debug_utils_fn + } + + pub fn instance(&self) -> vk::Instance { + self.handle + } } diff --git a/third_party/rust/ash/src/extensions/khr/android_surface.rs b/third_party/rust/ash/src/extensions/khr/android_surface.rs old mode 100644 new mode 100755 index d9d343d67bc7..0a22d47d5bb7 --- a/third_party/rust/ash/src/extensions/khr/android_surface.rs +++ b/third_party/rust/ash/src/extensions/khr/android_surface.rs @@ -1,10 +1,10 @@ #![allow(dead_code)] -use prelude::*; +use crate::prelude::*; +use crate::version::{EntryV1_0, InstanceV1_0}; +use crate::vk; +use crate::RawPtr; use std::ffi::CStr; use std::mem; -use version::{EntryV1_0, InstanceV1_0}; -use vk; -use RawPtr; #[derive(Clone)] pub struct AndroidSurface { @@ -27,13 +27,13 @@ impl AndroidSurface { vk::KhrAndroidSurfaceFn::name() } - #[doc = ""] + #[doc = ""] pub unsafe fn create_android_surface( &self, create_info: &vk::AndroidSurfaceCreateInfoKHR, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut surface = mem::uninitialized(); + let mut surface = mem::zeroed(); let err_code = self.android_surface_fn.create_android_surface_khr( self.handle, create_info, @@ -45,4 +45,12 @@ impl AndroidSurface { _ => Err(err_code), } } + + pub fn fp(&self) -> &vk::KhrAndroidSurfaceFn { + &self.android_surface_fn + } + + pub fn instance(&self) -> vk::Instance { + self.handle + } } diff --git a/third_party/rust/ash/src/extensions/khr/display.rs b/third_party/rust/ash/src/extensions/khr/display.rs new file mode 100755 index 000000000000..8ea5e62572f1 --- /dev/null +++ b/third_party/rust/ash/src/extensions/khr/display.rs @@ -0,0 +1,203 @@ +use crate::prelude::*; +use crate::version::{EntryV1_0, InstanceV1_0}; +use crate::vk; +use crate::RawPtr; +use std::ffi::CStr; +use std::mem; +use std::ptr; + +#[derive(Clone)] +pub struct Display { + handle: vk::Instance, + display_fn: vk::KhrDisplayFn, +} + +impl Display { + pub fn new(entry: &E, instance: &I) -> Display { + let display_fn = vk::KhrDisplayFn::load(|name| unsafe { + mem::transmute(entry.get_instance_proc_addr(instance.handle(), name.as_ptr())) + }); + Display { + handle: instance.handle(), + display_fn, + } + } + + pub fn name() -> &'static CStr { + vk::KhrDisplayFn::name() + } + + #[doc = ""] + pub unsafe fn get_physical_device_display_properties( + &self, + physical_device: vk::PhysicalDevice, + ) -> VkResult> { + let mut count = 0; + self.display_fn.get_physical_device_display_properties_khr( + physical_device, + &mut count, + ptr::null_mut(), + ); + let mut v = Vec::with_capacity(count as usize); + let err_code = self.display_fn.get_physical_device_display_properties_khr( + physical_device, + &mut count, + v.as_mut_ptr(), + ); + v.set_len(count as usize); + match err_code { + vk::Result::SUCCESS => Ok(v), + _ => Err(err_code), + } + } + + #[doc = ""] + pub unsafe fn get_physical_device_display_plane_properties( + &self, + physical_device: vk::PhysicalDevice, + ) -> VkResult> { + let mut count = 0; + self.display_fn + .get_physical_device_display_plane_properties_khr( + physical_device, + &mut count, + ptr::null_mut(), + ); + let mut v = Vec::with_capacity(count as usize); + let err_code = self + .display_fn + .get_physical_device_display_plane_properties_khr( + physical_device, + &mut count, + v.as_mut_ptr(), + ); + v.set_len(count as usize); + match err_code { + vk::Result::SUCCESS => Ok(v), + _ => Err(err_code), + } + } + + #[doc = ""] + pub unsafe fn get_display_plane_supported_displays( + &self, + physical_device: vk::PhysicalDevice, + plane_index: u32, + ) -> VkResult> { + let mut count = 0; + self.display_fn.get_display_plane_supported_displays_khr( + physical_device, + plane_index, + &mut count, + ptr::null_mut(), + ); + let mut v = Vec::with_capacity(count as usize); + let err_code = self.display_fn.get_display_plane_supported_displays_khr( + physical_device, + plane_index, + &mut count, + v.as_mut_ptr(), + ); + v.set_len(count as usize); + match err_code { + vk::Result::SUCCESS => Ok(v), + _ => Err(err_code), + } + } + + #[doc = ""] + pub unsafe fn get_display_mode_properties( + &self, + physical_device: vk::PhysicalDevice, + display: vk::DisplayKHR, + ) -> VkResult> { + let mut count = 0; + self.display_fn.get_display_mode_properties_khr( + physical_device, + display, + &mut count, + ptr::null_mut(), + ); + let mut v = Vec::with_capacity(count as usize); + let err_code = self.display_fn.get_display_mode_properties_khr( + physical_device, + display, + &mut count, + v.as_mut_ptr(), + ); + v.set_len(count as usize); + match err_code { + vk::Result::SUCCESS => Ok(v), + _ => Err(err_code), + } + } + + #[doc = ""] + pub unsafe fn create_display_mode( + &self, + physical_device: vk::PhysicalDevice, + display: vk::DisplayKHR, + create_info: &vk::DisplayModeCreateInfoKHR, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut display_mode = mem::MaybeUninit::zeroed(); + let err_code = self.display_fn.create_display_mode_khr( + physical_device, + display, + create_info, + allocation_callbacks.as_raw_ptr(), + display_mode.as_mut_ptr(), + ); + match err_code { + vk::Result::SUCCESS => Ok(display_mode.assume_init()), + _ => Err(err_code), + } + } + + #[doc = ""] + pub unsafe fn get_display_plane_capabilities( + &self, + physical_device: vk::PhysicalDevice, + mode: vk::DisplayModeKHR, + plane_index: u32, + ) -> VkResult { + let mut display_plane_capabilities = mem::MaybeUninit::zeroed(); + let err_code = self.display_fn.get_display_plane_capabilities_khr( + physical_device, + mode, + plane_index, + display_plane_capabilities.as_mut_ptr(), + ); + match err_code { + vk::Result::SUCCESS => Ok(display_plane_capabilities.assume_init()), + _ => Err(err_code), + } + } + + #[doc = ""] + pub unsafe fn create_display_plane_surface( + &self, + create_info: &vk::DisplaySurfaceCreateInfoKHR, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut surface = mem::MaybeUninit::zeroed(); + let err_code = self.display_fn.create_display_plane_surface_khr( + self.handle, + create_info, + allocation_callbacks.as_raw_ptr(), + surface.as_mut_ptr(), + ); + match err_code { + vk::Result::SUCCESS => Ok(surface.assume_init()), + _ => Err(err_code), + } + } + + pub fn fp(&self) -> &vk::KhrDisplayFn { + &self.display_fn + } + + pub fn instance(&self) -> vk::Instance { + self.handle + } +} diff --git a/third_party/rust/ash/src/extensions/khr/display_swapchain.rs b/third_party/rust/ash/src/extensions/khr/display_swapchain.rs old mode 100644 new mode 100755 index 29c3b5dfb348..bb4cfe289186 --- a/third_party/rust/ash/src/extensions/khr/display_swapchain.rs +++ b/third_party/rust/ash/src/extensions/khr/display_swapchain.rs @@ -1,10 +1,10 @@ #![allow(dead_code)] -use prelude::*; +use crate::prelude::*; +use crate::version::{DeviceV1_0, InstanceV1_0}; +use crate::vk; +use crate::RawPtr; use std::ffi::CStr; use std::mem; -use version::{DeviceV1_0, InstanceV1_0}; -use vk; -use RawPtr; #[derive(Clone)] pub struct DisplaySwapchain { @@ -19,7 +19,7 @@ impl DisplaySwapchain { }); DisplaySwapchain { handle: device.handle(), - swapchain_fn: swapchain_fn, + swapchain_fn, } } @@ -27,7 +27,7 @@ impl DisplaySwapchain { vk::KhrDisplaySwapchainFn::name() } - #[doc = ""] + #[doc = ""] pub unsafe fn create_shared_swapchains( &self, create_infos: &[vk::SwapchainCreateInfoKHR], @@ -47,4 +47,12 @@ impl DisplaySwapchain { _ => Err(err_code), } } + + pub fn fp(&self) -> &vk::KhrDisplaySwapchainFn { + &self.swapchain_fn + } + + pub fn device(&self) -> vk::Device { + self.handle + } } diff --git a/third_party/rust/ash/src/extensions/khr/external_memory_fd.rs b/third_party/rust/ash/src/extensions/khr/external_memory_fd.rs new file mode 100644 index 000000000000..070b33a1900d --- /dev/null +++ b/third_party/rust/ash/src/extensions/khr/external_memory_fd.rs @@ -0,0 +1,66 @@ +use crate::prelude::*; +use crate::version::{DeviceV1_0, InstanceV1_0}; +use crate::vk; +use std::ffi::CStr; +use std::mem; + +#[derive(Clone)] +pub struct ExternalMemoryFd { + handle: vk::Device, + external_memory_fd_fn: vk::KhrExternalMemoryFdFn, +} + +impl ExternalMemoryFd { + pub fn new(instance: &I, device: &D) -> Self { + let external_memory_fd_fn = vk::KhrExternalMemoryFdFn::load(|name| unsafe { + mem::transmute(instance.get_device_proc_addr(device.handle(), name.as_ptr())) + }); + Self { + handle: device.handle(), + external_memory_fd_fn, + } + } + + pub fn name() -> &'static CStr { + vk::KhrExternalMemoryFdFn::name() + } + + #[doc = ""] + pub unsafe fn get_memory_fd(&self, create_info: &vk::MemoryGetFdInfoKHR) -> VkResult { + let mut fd = -1; + let err_code = + self.external_memory_fd_fn + .get_memory_fd_khr(self.handle, create_info, &mut fd); + match err_code { + vk::Result::SUCCESS => Ok(fd), + _ => Err(err_code), + } + } + + #[doc = ""] + pub unsafe fn get_memory_fd_properties_khr( + &self, + handle_type: vk::ExternalMemoryHandleTypeFlags, + fd: i32, + ) -> VkResult { + let mut memory_fd_properties = mem::zeroed(); + let err_code = self.external_memory_fd_fn.get_memory_fd_properties_khr( + self.handle, + handle_type, + fd, + &mut memory_fd_properties, + ); + match err_code { + vk::Result::SUCCESS => Ok(memory_fd_properties), + _ => Err(err_code), + } + } + + pub fn fp(&self) -> &vk::KhrExternalMemoryFdFn { + &self.external_memory_fd_fn + } + + pub fn device(&self) -> vk::Device { + self.handle + } +} diff --git a/third_party/rust/ash/src/extensions/khr/mod.rs b/third_party/rust/ash/src/extensions/khr/mod.rs index 88126740cff1..31a40a714575 100644 --- a/third_party/rust/ash/src/extensions/khr/mod.rs +++ b/third_party/rust/ash/src/extensions/khr/mod.rs @@ -1,16 +1,26 @@ pub use self::android_surface::AndroidSurface; +pub use self::display::Display; pub use self::display_swapchain::DisplaySwapchain; +pub use self::external_memory_fd::ExternalMemoryFd; +pub use self::push_descriptor::PushDescriptor; +pub use self::ray_tracing::RayTracing; pub use self::surface::Surface; pub use self::swapchain::Swapchain; +pub use self::timeline_semaphore::TimelineSemaphore; pub use self::wayland_surface::WaylandSurface; pub use self::win32_surface::Win32Surface; pub use self::xcb_surface::XcbSurface; pub use self::xlib_surface::XlibSurface; mod android_surface; +mod display; mod display_swapchain; +mod external_memory_fd; +mod push_descriptor; +mod ray_tracing; mod surface; mod swapchain; +mod timeline_semaphore; mod wayland_surface; mod win32_surface; mod xcb_surface; diff --git a/third_party/rust/ash/src/extensions/khr/push_descriptor.rs b/third_party/rust/ash/src/extensions/khr/push_descriptor.rs new file mode 100644 index 000000000000..152568e84fba --- /dev/null +++ b/third_party/rust/ash/src/extensions/khr/push_descriptor.rs @@ -0,0 +1,75 @@ +#![allow(dead_code)] +use crate::version::{DeviceV1_0, InstanceV1_0}; +use crate::vk; +use std::ffi::c_void; +use std::ffi::CStr; +use std::mem; + +#[derive(Clone)] +pub struct PushDescriptor { + handle: vk::Instance, + push_descriptors_fn: vk::KhrPushDescriptorFn, +} + +impl PushDescriptor { + pub fn new(instance: &I, device: &D) -> PushDescriptor { + let push_descriptors_fn = vk::KhrPushDescriptorFn::load(|name| unsafe { + mem::transmute(instance.get_device_proc_addr(device.handle(), name.as_ptr())) + }); + + PushDescriptor { + handle: instance.handle(), + push_descriptors_fn, + } + } + + pub fn name() -> &'static CStr { + vk::KhrPushDescriptorFn::name() + } + + #[doc = ""] + pub unsafe fn cmd_push_descriptor_set( + &self, + command_buffer: vk::CommandBuffer, + pipeline_bind_point: vk::PipelineBindPoint, + layout: vk::PipelineLayout, + set: u32, + descriptor_writes: &[vk::WriteDescriptorSet], + ) { + self.push_descriptors_fn.cmd_push_descriptor_set_khr( + command_buffer, + pipeline_bind_point, + layout, + set, + descriptor_writes.len() as u32, + descriptor_writes.as_ptr(), + ); + } + + #[doc = ""] + pub unsafe fn cmd_push_descriptor_set_with_template( + &self, + command_buffer: vk::CommandBuffer, + descriptor_update_template: vk::DescriptorUpdateTemplate, + layout: vk::PipelineLayout, + set: u32, + p_data: *const c_void, + ) { + self.push_descriptors_fn + .cmd_push_descriptor_set_with_template_khr( + command_buffer, + descriptor_update_template, + layout, + set, + p_data, + ); + } + + pub fn fp(&self) -> &vk::KhrPushDescriptorFn { + &self.push_descriptors_fn + } + + pub fn instance(&self) -> vk::Instance { + self.handle + } +} diff --git a/third_party/rust/ash/src/extensions/khr/ray_tracing.rs b/third_party/rust/ash/src/extensions/khr/ray_tracing.rs new file mode 100644 index 000000000000..c94e7ab14b98 --- /dev/null +++ b/third_party/rust/ash/src/extensions/khr/ray_tracing.rs @@ -0,0 +1,369 @@ +#![allow(dead_code)] +use crate::prelude::*; +use crate::version::{DeviceV1_0, InstanceV1_0, InstanceV1_1}; +use crate::vk; +use crate::RawPtr; +use std::ffi::CStr; +use std::mem; + +#[derive(Clone)] +pub struct RayTracing { + handle: vk::Device, + ray_tracing_fn: vk::KhrRayTracingFn, +} + +impl RayTracing { + pub fn new(instance: &I, device: &D) -> RayTracing { + let ray_tracing_fn = vk::KhrRayTracingFn::load(|name| unsafe { + mem::transmute(instance.get_device_proc_addr(device.handle(), name.as_ptr())) + }); + RayTracing { + handle: device.handle(), + ray_tracing_fn, + } + } + + pub unsafe fn get_properties( + instance: &I, + pdevice: vk::PhysicalDevice, + ) -> vk::PhysicalDeviceRayTracingPropertiesKHR { + let mut props_rt = vk::PhysicalDeviceRayTracingPropertiesKHR::default(); + { + let mut props = vk::PhysicalDeviceProperties2::builder().push_next(&mut props_rt); + instance.get_physical_device_properties2(pdevice, &mut props); + } + props_rt + } + + #[doc = ""] + pub unsafe fn create_acceleration_structure( + &self, + create_info: &vk::AccelerationStructureCreateInfoKHR, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult { + let mut accel_struct = mem::zeroed(); + let err_code = self.ray_tracing_fn.create_acceleration_structure_khr( + self.handle, + create_info, + allocation_callbacks.as_raw_ptr(), + &mut accel_struct, + ); + match err_code { + vk::Result::SUCCESS => Ok(accel_struct), + _ => Err(err_code), + } + } + + #[doc = ""] + pub unsafe fn destroy_acceleration_structure( + &self, + accel_struct: vk::AccelerationStructureKHR, + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) { + self.ray_tracing_fn.destroy_acceleration_structure_khr( + self.handle, + accel_struct, + allocation_callbacks.as_raw_ptr(), + ); + } + + #[doc = ""] + pub unsafe fn get_acceleration_structure_memory_requirements( + &self, + info: &vk::AccelerationStructureMemoryRequirementsInfoKHR, + ) -> vk::MemoryRequirements2KHR { + let mut requirements = mem::zeroed(); + self.ray_tracing_fn + .get_acceleration_structure_memory_requirements_khr( + self.handle, + info, + &mut requirements, + ); + requirements + } + + #[doc = ""] + pub unsafe fn bind_acceleration_structure_memory( + &self, + bind_info: &[vk::BindAccelerationStructureMemoryInfoKHR], + ) -> VkResult<()> { + let err_code = self.ray_tracing_fn.bind_acceleration_structure_memory_khr( + self.handle, + bind_info.len() as u32, + bind_info.as_ptr(), + ); + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + #[doc = ""] + pub unsafe fn cmd_build_acceleration_structure( + &self, + command_buffer: vk::CommandBuffer, + infos: &[vk::AccelerationStructureBuildGeometryInfoKHR], + offset_infos: &[&[vk::AccelerationStructureBuildOffsetInfoKHR]], + ) { + let offset_info_ptr = offset_infos + .iter() + .map(|slice| slice.as_ptr()) + .collect::>(); + + self.ray_tracing_fn.cmd_build_acceleration_structure_khr( + command_buffer, + infos.len() as u32, + infos.as_ptr(), + offset_info_ptr.as_ptr(), + ); + } + + #[doc = ""] + pub unsafe fn cmd_copy_acceleration_structure( + &self, + command_buffer: vk::CommandBuffer, + info: &vk::CopyAccelerationStructureInfoKHR, + ) { + self.ray_tracing_fn + .cmd_copy_acceleration_structure_khr(command_buffer, info); + } + + #[doc = ""] + pub unsafe fn cmd_trace_rays( + &self, + command_buffer: vk::CommandBuffer, + raygen_shader_binding_tables: &[vk::StridedBufferRegionKHR], + miss_shader_binding_tables: &[vk::StridedBufferRegionKHR], + hit_shader_binding_tables: &[vk::StridedBufferRegionKHR], + callable_shader_binding_tables: &[vk::StridedBufferRegionKHR], + width: u32, + height: u32, + depth: u32, + ) { + self.ray_tracing_fn.cmd_trace_rays_khr( + command_buffer, + raygen_shader_binding_tables.as_ptr(), + miss_shader_binding_tables.as_ptr(), + hit_shader_binding_tables.as_ptr(), + callable_shader_binding_tables.as_ptr(), + width, + height, + depth, + ); + } + + #[doc = ""] + pub unsafe fn create_ray_tracing_pipelines( + &self, + pipeline_cache: vk::PipelineCache, + create_info: &[vk::RayTracingPipelineCreateInfoKHR], + allocation_callbacks: Option<&vk::AllocationCallbacks>, + ) -> VkResult> { + let mut pipelines = vec![mem::zeroed(); create_info.len()]; + let err_code = self.ray_tracing_fn.create_ray_tracing_pipelines_khr( + self.handle, + pipeline_cache, + create_info.len() as u32, + create_info.as_ptr(), + allocation_callbacks.as_raw_ptr(), + pipelines.as_mut_ptr(), + ); + match err_code { + vk::Result::SUCCESS => Ok(pipelines), + _ => Err(err_code), + } + } + + #[doc = ""] + pub unsafe fn get_ray_tracing_shader_group_handles( + &self, + pipeline: vk::Pipeline, + first_group: u32, + group_count: u32, + data: &mut [u8], + ) -> VkResult<()> { + let err_code = self + .ray_tracing_fn + .get_ray_tracing_shader_group_handles_khr( + self.handle, + pipeline, + first_group, + group_count, + data.len(), + data.as_mut_ptr() as *mut std::ffi::c_void, + ); + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + #[doc = ""] + pub unsafe fn get_acceleration_structure_device_address( + &self, + info: &vk::AccelerationStructureDeviceAddressInfoKHR, + ) -> vk::DeviceAddress { + self.ray_tracing_fn + .get_acceleration_structure_device_address_khr(self.handle, info) + } + + #[doc = ""] + pub unsafe fn cmd_write_acceleration_structures_properties( + &self, + command_buffer: vk::CommandBuffer, + structures: &[vk::AccelerationStructureKHR], + query_type: vk::QueryType, + query_pool: vk::QueryPool, + first_query: u32, + ) { + self.ray_tracing_fn + .cmd_write_acceleration_structures_properties_khr( + command_buffer, + structures.len() as u32, + structures.as_ptr(), + query_type, + query_pool, + first_query, + ); + } + + pub unsafe fn cmd_build_acceleration_structure_indirect( + &self, + command_buffer: vk::CommandBuffer, + info: &vk::AccelerationStructureBuildGeometryInfoKHR, + indirect_buffer: vk::Buffer, + indirect_offset: vk::DeviceSize, + indirect_stride: u32, + ) { + self.ray_tracing_fn + .cmd_build_acceleration_structure_indirect_khr( + command_buffer, + info, + indirect_buffer, + indirect_offset, + indirect_stride, + ); + } + + pub unsafe fn copy_acceleration_structure_to_memory( + &self, + device: vk::Device, + info: &vk::CopyAccelerationStructureToMemoryInfoKHR, + ) -> VkResult<()> { + let err_code = self + .ray_tracing_fn + .copy_acceleration_structure_to_memory_khr(device, info); + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + pub unsafe fn copy_memory_to_acceleration_structure( + &self, + device: vk::Device, + info: &vk::CopyMemoryToAccelerationStructureInfoKHR, + ) -> VkResult<()> { + let err_code = self + .ray_tracing_fn + .copy_memory_to_acceleration_structure_khr(device, info); + + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + pub unsafe fn cmd_copy_acceleration_structure_to_memory( + &self, + command_buffer: vk::CommandBuffer, + info: &vk::CopyAccelerationStructureToMemoryInfoKHR, + ) { + self.ray_tracing_fn + .cmd_copy_acceleration_structure_to_memory_khr(command_buffer, info); + } + + pub unsafe fn cmd_copy_memory_to_acceleration_structure( + &self, + command_buffer: vk::CommandBuffer, + info: &vk::CopyMemoryToAccelerationStructureInfoKHR, + ) { + self.ray_tracing_fn + .cmd_copy_memory_to_acceleration_structure_khr(command_buffer, info); + } + + pub unsafe fn get_ray_tracing_capture_replay_shader_group_handles( + &self, + device: vk::Device, + pipeline: vk::Pipeline, + first_group: u32, + group_count: u32, + data_size: usize, + ) -> VkResult> { + let mut data: Vec = Vec::with_capacity(data_size); + + let err_code = self + .ray_tracing_fn + .get_ray_tracing_capture_replay_shader_group_handles_khr( + device, + pipeline, + first_group, + group_count, + data_size, + data.as_mut_ptr() as *mut _, + ); + + match err_code { + vk::Result::SUCCESS => Ok(data), + _ => Err(err_code), + } + } + + pub unsafe fn cmd_trace_rays_indirect( + &self, + command_buffer: vk::CommandBuffer, + raygen_shader_binding_table: &[vk::StridedBufferRegionKHR], + miss_shader_binding_table: &[vk::StridedBufferRegionKHR], + hit_shader_binding_table: &[vk::StridedBufferRegionKHR], + callable_shader_binding_table: &[vk::StridedBufferRegionKHR], + buffer: vk::Buffer, + offset: vk::DeviceSize, + ) { + self.ray_tracing_fn.cmd_trace_rays_indirect_khr( + command_buffer, + raygen_shader_binding_table.as_ptr(), + miss_shader_binding_table.as_ptr(), + hit_shader_binding_table.as_ptr(), + callable_shader_binding_table.as_ptr(), + buffer, + offset, + ); + } + + pub unsafe fn get_device_acceleration_structure_compatibility( + &self, + device: vk::Device, + version: &vk::AccelerationStructureVersionKHR, + ) -> VkResult<()> { + let err_code = self + .ray_tracing_fn + .get_device_acceleration_structure_compatibility_khr(device, version); + + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + pub fn name() -> &'static CStr { + vk::KhrRayTracingFn::name() + } + + pub fn fp(&self) -> &vk::KhrRayTracingFn { + &self.ray_tracing_fn + } + + pub fn device(&self) -> vk::Device { + self.handle + } +} diff --git a/third_party/rust/ash/src/extensions/khr/surface.rs b/third_party/rust/ash/src/extensions/khr/surface.rs old mode 100644 new mode 100755 index a0ca5ad1694c..7cde5aa9f90d --- a/third_party/rust/ash/src/extensions/khr/surface.rs +++ b/third_party/rust/ash/src/extensions/khr/surface.rs @@ -1,11 +1,11 @@ #![allow(dead_code)] -use prelude::*; +use crate::prelude::*; +use crate::version::{EntryV1_0, InstanceV1_0}; +use crate::vk; +use crate::RawPtr; use std::ffi::CStr; use std::mem; use std::ptr; -use version::{EntryV1_0, InstanceV1_0}; -use vk; -use RawPtr; #[derive(Clone)] pub struct Surface { @@ -20,7 +20,7 @@ impl Surface { }); Surface { handle: instance.handle(), - surface_fn: surface_fn, + surface_fn, } } @@ -28,24 +28,28 @@ impl Surface { vk::KhrSurfaceFn::name() } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_surface_support( &self, physical_device: vk::PhysicalDevice, queue_index: u32, surface: vk::SurfaceKHR, - ) -> bool { - let mut b = mem::uninitialized(); - self.surface_fn.get_physical_device_surface_support_khr( + ) -> VkResult { + let mut b = mem::zeroed(); + let err_code = self.surface_fn.get_physical_device_surface_support_khr( physical_device, queue_index, surface, &mut b, ); - b > 0 + + match err_code { + vk::Result::SUCCESS => Ok(b > 0), + _ => Err(err_code), + } } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_surface_present_modes( &self, physical_device: vk::PhysicalDevice, @@ -75,13 +79,13 @@ impl Surface { } } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_surface_capabilities( &self, physical_device: vk::PhysicalDevice, surface: vk::SurfaceKHR, ) -> VkResult { - let mut surface_capabilities = mem::uninitialized(); + let mut surface_capabilities = mem::zeroed(); let err_code = self .surface_fn .get_physical_device_surface_capabilities_khr( @@ -95,7 +99,7 @@ impl Surface { } } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_surface_formats( &self, physical_device: vk::PhysicalDevice, @@ -122,7 +126,7 @@ impl Surface { } } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_surface( &self, surface: vk::SurfaceKHR, @@ -134,4 +138,12 @@ impl Surface { allocation_callbacks.as_raw_ptr(), ); } + + pub fn fp(&self) -> &vk::KhrSurfaceFn { + &self.surface_fn + } + + pub fn instance(&self) -> vk::Instance { + self.handle + } } diff --git a/third_party/rust/ash/src/extensions/khr/swapchain.rs b/third_party/rust/ash/src/extensions/khr/swapchain.rs old mode 100644 new mode 100755 index ade5d0470c9a..266b9875e44d --- a/third_party/rust/ash/src/extensions/khr/swapchain.rs +++ b/third_party/rust/ash/src/extensions/khr/swapchain.rs @@ -1,11 +1,11 @@ #![allow(dead_code)] -use prelude::*; +use crate::prelude::*; +use crate::version::{DeviceV1_0, InstanceV1_0}; +use crate::vk; +use crate::RawPtr; use std::ffi::CStr; use std::mem; use std::ptr; -use version::{DeviceV1_0, InstanceV1_0}; -use vk; -use RawPtr; #[derive(Clone)] pub struct Swapchain { @@ -20,7 +20,7 @@ impl Swapchain { }); Swapchain { handle: device.handle(), - swapchain_fn: swapchain_fn, + swapchain_fn, } } @@ -28,7 +28,7 @@ impl Swapchain { vk::KhrSwapchainFn::name() } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_swapchain( &self, swapchain: vk::SwapchainKHR, @@ -42,7 +42,7 @@ impl Swapchain { } /// On success, returns the next image's index and whether the swapchain is suboptimal for the surface. - #[doc = ""] + #[doc = ""] pub unsafe fn acquire_next_image( &self, swapchain: vk::SwapchainKHR, @@ -50,7 +50,7 @@ impl Swapchain { semaphore: vk::Semaphore, fence: vk::Fence, ) -> VkResult<(u32, bool)> { - let mut index = mem::uninitialized(); + let mut index = mem::zeroed(); let err_code = self.swapchain_fn.acquire_next_image_khr( self.handle, swapchain, @@ -66,13 +66,13 @@ impl Swapchain { } } - #[doc = ""] + #[doc = ""] pub unsafe fn create_swapchain( &self, create_info: &vk::SwapchainCreateInfoKHR, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut swapchain = mem::uninitialized(); + let mut swapchain = mem::zeroed(); let err_code = self.swapchain_fn.create_swapchain_khr( self.handle, create_info, @@ -86,7 +86,7 @@ impl Swapchain { } /// On success, returns whether the swapchain is suboptimal for the surface. - #[doc = ""] + #[doc = ""] pub unsafe fn queue_present( &self, queue: vk::Queue, @@ -100,7 +100,7 @@ impl Swapchain { } } - #[doc = ""] + #[doc = ""] pub unsafe fn get_swapchain_images( &self, swapchain: vk::SwapchainKHR, @@ -126,4 +126,12 @@ impl Swapchain { _ => Err(err_code), } } + + pub fn fp(&self) -> &vk::KhrSwapchainFn { + &self.swapchain_fn + } + + pub fn device(&self) -> vk::Device { + self.handle + } } diff --git a/third_party/rust/ash/src/extensions/khr/timeline_semaphore.rs b/third_party/rust/ash/src/extensions/khr/timeline_semaphore.rs new file mode 100644 index 000000000000..09190ea8c6c8 --- /dev/null +++ b/third_party/rust/ash/src/extensions/khr/timeline_semaphore.rs @@ -0,0 +1,87 @@ +#![allow(dead_code)] +use crate::prelude::*; +use crate::version::{EntryV1_0, InstanceV1_0}; +use crate::vk; +use std::ffi::CStr; +use std::mem; + +#[derive(Clone)] +pub struct TimelineSemaphore { + handle: vk::Instance, + timeline_semaphore_fn: vk::KhrTimelineSemaphoreFn, +} + +impl TimelineSemaphore { + pub fn new(entry: &E, instance: &I) -> TimelineSemaphore { + let timeline_semaphore_fn = vk::KhrTimelineSemaphoreFn::load(|name| unsafe { + mem::transmute(entry.get_instance_proc_addr(instance.handle(), name.as_ptr())) + }); + + TimelineSemaphore { + handle: instance.handle(), + timeline_semaphore_fn, + } + } + + pub fn name() -> &'static CStr { + vk::KhrTimelineSemaphoreFn::name() + } + + #[doc = ""] + pub unsafe fn get_semaphore_counter_value( + &self, + device: vk::Device, + semaphore: vk::Semaphore, + ) -> VkResult { + let mut value = 0; + let err_code = self + .timeline_semaphore_fn + .get_semaphore_counter_value_khr(device, semaphore, &mut value); + + match err_code { + vk::Result::SUCCESS => Ok(value), + _ => Err(err_code), + } + } + + #[doc = ""] + pub unsafe fn wait_semaphores( + &self, + device: vk::Device, + wait_info: &vk::SemaphoreWaitInfo, + timeout: u64, + ) -> VkResult<()> { + let err_code = self + .timeline_semaphore_fn + .wait_semaphores_khr(device, wait_info, timeout); + + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + #[doc = ""] + pub unsafe fn signal_semaphore( + &self, + device: vk::Device, + signal_info: &vk::SemaphoreSignalInfo, + ) -> VkResult<()> { + let err_code = self + .timeline_semaphore_fn + .signal_semaphore_khr(device, signal_info); + + match err_code { + vk::Result::SUCCESS => Ok(()), + _ => Err(err_code), + } + } + + pub fn fp(&self) -> &vk::KhrTimelineSemaphoreFn { + &self.timeline_semaphore_fn + } + + pub fn instance(&self) -> vk::Instance { + self.handle + } +} diff --git a/third_party/rust/ash/src/extensions/khr/wayland_surface.rs b/third_party/rust/ash/src/extensions/khr/wayland_surface.rs old mode 100644 new mode 100755 index ec5480d96a1f..d705f3803055 --- a/third_party/rust/ash/src/extensions/khr/wayland_surface.rs +++ b/third_party/rust/ash/src/extensions/khr/wayland_surface.rs @@ -1,10 +1,10 @@ #![allow(dead_code)] -use prelude::*; +use crate::prelude::*; +use crate::version::{EntryV1_0, InstanceV1_0}; +use crate::vk; +use crate::RawPtr; use std::ffi::CStr; use std::mem; -use version::{EntryV1_0, InstanceV1_0}; -use vk; -use RawPtr; #[derive(Clone)] pub struct WaylandSurface { @@ -27,13 +27,13 @@ impl WaylandSurface { vk::KhrWaylandSurfaceFn::name() } - #[doc = ""] + #[doc = ""] pub unsafe fn create_wayland_surface( &self, create_info: &vk::WaylandSurfaceCreateInfoKHR, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut surface = mem::uninitialized(); + let mut surface = mem::zeroed(); let err_code = self.wayland_surface_fn.create_wayland_surface_khr( self.handle, create_info, @@ -45,4 +45,30 @@ impl WaylandSurface { _ => Err(err_code), } } + + #[doc = " bool { + let b = self + .wayland_surface_fn + .get_physical_device_wayland_presentation_support_khr( + physical_device, + queue_family_index, + wl_display, + ); + + b > 0 + } + + pub fn fp(&self) -> &vk::KhrWaylandSurfaceFn { + &self.wayland_surface_fn + } + + pub fn instance(&self) -> vk::Instance { + self.handle + } } diff --git a/third_party/rust/ash/src/extensions/khr/win32_surface.rs b/third_party/rust/ash/src/extensions/khr/win32_surface.rs old mode 100644 new mode 100755 index 72ae7efce0b7..ae604436d0d6 --- a/third_party/rust/ash/src/extensions/khr/win32_surface.rs +++ b/third_party/rust/ash/src/extensions/khr/win32_surface.rs @@ -1,10 +1,10 @@ #![allow(dead_code)] -use prelude::*; +use crate::prelude::*; +use crate::version::{EntryV1_0, InstanceV1_0}; +use crate::vk; +use crate::RawPtr; use std::ffi::CStr; use std::mem; -use version::{EntryV1_0, InstanceV1_0}; -use vk; -use RawPtr; #[derive(Clone)] pub struct Win32Surface { @@ -27,13 +27,13 @@ impl Win32Surface { vk::KhrWin32SurfaceFn::name() } - #[doc = ""] + #[doc = ""] pub unsafe fn create_win32_surface( &self, create_info: &vk::Win32SurfaceCreateInfoKHR, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut surface = mem::uninitialized(); + let mut surface = mem::zeroed(); let err_code = self.win32_surface_fn.create_win32_surface_khr( self.handle, create_info, @@ -45,4 +45,28 @@ impl Win32Surface { _ => Err(err_code), } } + + #[doc = " bool { + let b = self + .win32_surface_fn + .get_physical_device_win32_presentation_support_khr( + physical_device, + queue_family_index, + ); + + b > 0 + } + + pub fn fp(&self) -> &vk::KhrWin32SurfaceFn { + &self.win32_surface_fn + } + + pub fn instance(&self) -> vk::Instance { + self.handle + } } diff --git a/third_party/rust/ash/src/extensions/khr/xcb_surface.rs b/third_party/rust/ash/src/extensions/khr/xcb_surface.rs old mode 100644 new mode 100755 index dbd05e8a7f54..4785a37e0910 --- a/third_party/rust/ash/src/extensions/khr/xcb_surface.rs +++ b/third_party/rust/ash/src/extensions/khr/xcb_surface.rs @@ -1,10 +1,10 @@ #![allow(dead_code)] -use prelude::*; +use crate::prelude::*; +use crate::version::{EntryV1_0, InstanceV1_0}; +use crate::vk; +use crate::RawPtr; use std::ffi::CStr; use std::mem; -use version::{EntryV1_0, InstanceV1_0}; -use vk; -use RawPtr; #[derive(Clone)] pub struct XcbSurface { @@ -27,13 +27,13 @@ impl XcbSurface { vk::KhrXcbSurfaceFn::name() } - #[doc = ""] + #[doc = ""] pub unsafe fn create_xcb_surface( &self, create_info: &vk::XcbSurfaceCreateInfoKHR, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut surface = mem::uninitialized(); + let mut surface = mem::zeroed(); let err_code = self.xcb_surface_fn.create_xcb_surface_khr( self.handle, create_info, @@ -45,4 +45,32 @@ impl XcbSurface { _ => Err(err_code), } } + + #[doc = " bool { + let b = self + .xcb_surface_fn + .get_physical_device_xcb_presentation_support_khr( + physical_device, + queue_family_index, + connection, + visual_id, + ); + + b > 0 + } + + pub fn fp(&self) -> &vk::KhrXcbSurfaceFn { + &self.xcb_surface_fn + } + + pub fn instance(&self) -> vk::Instance { + self.handle + } } diff --git a/third_party/rust/ash/src/extensions/khr/xlib_surface.rs b/third_party/rust/ash/src/extensions/khr/xlib_surface.rs old mode 100644 new mode 100755 index aed6ca4efd01..0569006edd1f --- a/third_party/rust/ash/src/extensions/khr/xlib_surface.rs +++ b/third_party/rust/ash/src/extensions/khr/xlib_surface.rs @@ -1,10 +1,10 @@ #![allow(dead_code)] -use prelude::*; +use crate::prelude::*; +use crate::version::{EntryV1_0, InstanceV1_0}; +use crate::vk; +use crate::RawPtr; use std::ffi::CStr; use std::mem; -use version::{EntryV1_0, InstanceV1_0}; -use vk; -use RawPtr; #[derive(Clone)] pub struct XlibSurface { @@ -27,13 +27,13 @@ impl XlibSurface { vk::KhrXlibSurfaceFn::name() } - #[doc = ""] + #[doc = ""] pub unsafe fn create_xlib_surface( &self, create_info: &vk::XlibSurfaceCreateInfoKHR, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut surface = mem::uninitialized(); + let mut surface = mem::zeroed(); let err_code = self.xlib_surface_fn.create_xlib_surface_khr( self.handle, create_info, @@ -45,4 +45,32 @@ impl XlibSurface { _ => Err(err_code), } } + + #[doc = " bool { + let b = self + .xlib_surface_fn + .get_physical_device_xlib_presentation_support_khr( + physical_device, + queue_family_index, + display, + visual_id, + ); + + b > 0 + } + + pub fn fp(&self) -> &vk::KhrXlibSurfaceFn { + &self.xlib_surface_fn + } + + pub fn instance(&self) -> vk::Instance { + self.handle + } } diff --git a/third_party/rust/ash/src/extensions/mvk/ios_surface.rs b/third_party/rust/ash/src/extensions/mvk/ios_surface.rs old mode 100644 new mode 100755 index 4aac1c50478f..76b339f0fe42 --- a/third_party/rust/ash/src/extensions/mvk/ios_surface.rs +++ b/third_party/rust/ash/src/extensions/mvk/ios_surface.rs @@ -1,10 +1,10 @@ #![allow(dead_code)] -use prelude::*; +use crate::prelude::*; +use crate::version::{EntryV1_0, InstanceV1_0}; +use crate::vk; +use crate::RawPtr; use std::ffi::CStr; use std::mem; -use version::{EntryV1_0, InstanceV1_0}; -use vk; -use RawPtr; #[derive(Clone)] pub struct IOSSurface { @@ -27,13 +27,13 @@ impl IOSSurface { vk::MvkIosSurfaceFn::name() } - #[doc = ""] + #[doc = ""] pub unsafe fn create_ios_surface_mvk( &self, create_info: &vk::IOSSurfaceCreateInfoMVK, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut surface = mem::uninitialized(); + let mut surface = mem::zeroed(); let err_code = self.ios_surface_fn.create_ios_surface_mvk( self.handle, create_info, @@ -45,4 +45,12 @@ impl IOSSurface { _ => Err(err_code), } } + + pub fn fp(&self) -> &vk::MvkIosSurfaceFn { + &self.ios_surface_fn + } + + pub fn instance(&self) -> vk::Instance { + self.handle + } } diff --git a/third_party/rust/ash/src/extensions/mvk/macos_surface.rs b/third_party/rust/ash/src/extensions/mvk/macos_surface.rs old mode 100644 new mode 100755 index 9dbf44b60da1..6f2aea696651 --- a/third_party/rust/ash/src/extensions/mvk/macos_surface.rs +++ b/third_party/rust/ash/src/extensions/mvk/macos_surface.rs @@ -1,10 +1,10 @@ #![allow(dead_code)] -use prelude::*; +use crate::prelude::*; +use crate::version::{EntryV1_0, InstanceV1_0}; +use crate::vk; +use crate::RawPtr; use std::ffi::CStr; use std::mem; -use version::{EntryV1_0, InstanceV1_0}; -use vk; -use RawPtr; #[derive(Clone)] pub struct MacOSSurface { @@ -27,13 +27,13 @@ impl MacOSSurface { vk::MvkMacosSurfaceFn::name() } - #[doc = ""] + #[doc = ""] pub unsafe fn create_mac_os_surface_mvk( &self, create_info: &vk::MacOSSurfaceCreateInfoMVK, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut surface = mem::uninitialized(); + let mut surface = mem::zeroed(); let err_code = self.macos_surface_fn.create_mac_os_surface_mvk( self.handle, create_info, @@ -45,4 +45,12 @@ impl MacOSSurface { _ => Err(err_code), } } + + pub fn fp(&self) -> &vk::MvkMacosSurfaceFn { + &self.macos_surface_fn + } + + pub fn instance(&self) -> vk::Instance { + self.handle + } } diff --git a/third_party/rust/ash/src/extensions/nv/mesh_shader.rs b/third_party/rust/ash/src/extensions/nv/mesh_shader.rs old mode 100644 new mode 100755 index c9829514ebdf..61822b3989af --- a/third_party/rust/ash/src/extensions/nv/mesh_shader.rs +++ b/third_party/rust/ash/src/extensions/nv/mesh_shader.rs @@ -1,8 +1,8 @@ #![allow(dead_code)] +use crate::version::{DeviceV1_0, InstanceV1_0}; +use crate::vk; use std::ffi::CStr; use std::mem; -use version::{DeviceV1_0, InstanceV1_0}; -use vk; #[derive(Clone)] pub struct MeshShader { @@ -16,7 +16,7 @@ impl MeshShader { }); MeshShader { mesh_shader_fn } } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_draw_mesh_tasks( &self, command_buffer: vk::CommandBuffer, @@ -26,7 +26,7 @@ impl MeshShader { self.mesh_shader_fn .cmd_draw_mesh_tasks_nv(command_buffer, task_count, first_task); } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_draw_mesh_tasks_indirect( &self, command_buffer: vk::CommandBuffer, @@ -43,7 +43,7 @@ impl MeshShader { stride, ); } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_draw_mesh_tasks_indirect_count( &self, command_buffer: vk::CommandBuffer, @@ -67,4 +67,8 @@ impl MeshShader { pub fn name() -> &'static CStr { vk::NvMeshShaderFn::name() } + + pub fn fp(&self) -> &vk::NvMeshShaderFn { + &self.mesh_shader_fn + } } diff --git a/third_party/rust/ash/src/extensions/nv/ray_tracing.rs b/third_party/rust/ash/src/extensions/nv/ray_tracing.rs old mode 100644 new mode 100755 index 02aa71f7ad72..af55f6bed7d4 --- a/third_party/rust/ash/src/extensions/nv/ray_tracing.rs +++ b/third_party/rust/ash/src/extensions/nv/ray_tracing.rs @@ -1,10 +1,10 @@ #![allow(dead_code)] -use prelude::*; +use crate::prelude::*; +use crate::version::{DeviceV1_0, InstanceV1_0, InstanceV1_1}; +use crate::vk; +use crate::RawPtr; use std::ffi::CStr; use std::mem; -use version::{DeviceV1_0, InstanceV1_0, InstanceV1_1}; -use vk; -use RawPtr; #[derive(Clone)] pub struct RayTracing { @@ -35,13 +35,13 @@ impl RayTracing { props_rt } - #[doc = ""] + #[doc = ""] pub unsafe fn create_acceleration_structure( &self, create_info: &vk::AccelerationStructureCreateInfoNV, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { - let mut accel_struct = mem::uninitialized(); + let mut accel_struct = mem::zeroed(); let err_code = self.ray_tracing_fn.create_acceleration_structure_nv( self.handle, create_info, @@ -54,7 +54,7 @@ impl RayTracing { } } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_acceleration_structure( &self, accel_struct: vk::AccelerationStructureNV, @@ -67,12 +67,12 @@ impl RayTracing { ); } - #[doc = ""] + #[doc = ""] pub unsafe fn get_acceleration_structure_memory_requirements( &self, info: &vk::AccelerationStructureMemoryRequirementsInfoNV, ) -> vk::MemoryRequirements2KHR { - let mut requirements = mem::uninitialized(); + let mut requirements = mem::zeroed(); self.ray_tracing_fn .get_acceleration_structure_memory_requirements_nv( self.handle, @@ -82,7 +82,7 @@ impl RayTracing { requirements } - #[doc = ""] + #[doc = ""] pub unsafe fn bind_acceleration_structure_memory( &self, bind_info: &[vk::BindAccelerationStructureMemoryInfoNV], @@ -98,7 +98,7 @@ impl RayTracing { } } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_build_acceleration_structure( &self, command_buffer: vk::CommandBuffer, @@ -124,7 +124,7 @@ impl RayTracing { ); } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_copy_acceleration_structure( &self, command_buffer: vk::CommandBuffer, @@ -136,7 +136,7 @@ impl RayTracing { .cmd_copy_acceleration_structure_nv(command_buffer, dst, src, mode); } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_trace_rays( &self, command_buffer: vk::CommandBuffer, @@ -174,14 +174,14 @@ impl RayTracing { ); } - #[doc = ""] + #[doc = ""] pub unsafe fn create_ray_tracing_pipelines( &self, pipeline_cache: vk::PipelineCache, create_info: &[vk::RayTracingPipelineCreateInfoNV], allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult> { - let mut pipelines = vec![mem::uninitialized(); create_info.len()]; + let mut pipelines = vec![mem::zeroed(); create_info.len()]; let err_code = self.ray_tracing_fn.create_ray_tracing_pipelines_nv( self.handle, pipeline_cache, @@ -196,7 +196,7 @@ impl RayTracing { } } - #[doc = ""] + #[doc = ""] pub unsafe fn get_ray_tracing_shader_group_handles( &self, pipeline: vk::Pipeline, @@ -218,7 +218,7 @@ impl RayTracing { } } - #[doc = ""] + #[doc = ""] pub unsafe fn get_acceleration_structure_handle( &self, accel_struct: vk::AccelerationStructureNV, @@ -237,7 +237,7 @@ impl RayTracing { } } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_write_acceleration_structures_properties( &self, command_buffer: vk::CommandBuffer, @@ -257,7 +257,7 @@ impl RayTracing { ); } - #[doc = ""] + #[doc = ""] pub unsafe fn compile_deferred(&self, pipeline: vk::Pipeline, shader: u32) -> VkResult<()> { let err_code = self .ray_tracing_fn @@ -271,4 +271,12 @@ impl RayTracing { pub fn name() -> &'static CStr { vk::NvRayTracingFn::name() } + + pub fn fp(&self) -> &vk::NvRayTracingFn { + &self.ray_tracing_fn + } + + pub fn device(&self) -> vk::Device { + self.handle + } } diff --git a/third_party/rust/ash/src/instance.rs b/third_party/rust/ash/src/instance.rs index 4db5ceb0dc34..669eed47b571 100644 --- a/third_party/rust/ash/src/instance.rs +++ b/third_party/rust/ash/src/instance.rs @@ -1,18 +1,19 @@ #![allow(dead_code)] -use device::Device; -use prelude::*; +use crate::device::Device; +use crate::prelude::*; +use crate::vk; +use crate::RawPtr; use std::mem; use std::os::raw::c_char; use std::ptr; -use vk; -use RawPtr; -#[doc = ""] +#[doc = ""] #[derive(Clone)] pub struct Instance { handle: vk::Instance, instance_fn_1_0: vk::InstanceFnV1_0, instance_fn_1_1: vk::InstanceFnV1_1, + instance_fn_1_2: vk::InstanceFnV1_2, } impl Instance { pub unsafe fn load(static_fn: &vk::StaticFn, instance: vk::Instance) -> Self { @@ -22,25 +23,34 @@ impl Instance { let instance_fn_1_1 = vk::InstanceFnV1_1::load(|name| { mem::transmute(static_fn.get_instance_proc_addr(instance, name.as_ptr())) }); + let instance_fn_1_2 = vk::InstanceFnV1_2::load(|name| { + mem::transmute(static_fn.get_instance_proc_addr(instance, name.as_ptr())) + }); Instance { handle: instance, instance_fn_1_0, instance_fn_1_1, + instance_fn_1_2, } } } impl InstanceV1_0 for Instance { type Device = Device; - #[doc = ""] + #[doc = ""] + /// + /// # Safety + /// In order for the created `Device` to be valid for the duration of its + /// usage, the `Instance` this was called on must be dropped later than the + /// resulting `Device`. unsafe fn create_device( &self, physical_device: vk::PhysicalDevice, create_info: &vk::DeviceCreateInfo, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> Result { - let mut device: vk::Device = mem::uninitialized(); + let mut device: vk::Device = mem::zeroed(); let err_code = self.fp_v1_0().create_device( physical_device, create_info, @@ -67,12 +77,23 @@ impl InstanceV1_1 for Instance { } } +impl InstanceV1_2 for Instance { + fn fp_v1_2(&self) -> &vk::InstanceFnV1_2 { + &self.instance_fn_1_2 + } +} + +#[allow(non_camel_case_types)] +pub trait InstanceV1_2: InstanceV1_1 { + fn fp_v1_2(&self) -> &vk::InstanceFnV1_2; +} + #[allow(non_camel_case_types)] pub trait InstanceV1_1: InstanceV1_0 { fn fp_v1_1(&self) -> &vk::InstanceFnV1_1; unsafe fn enumerate_physical_device_groups_len(&self) -> usize { - let mut group_count = mem::uninitialized(); + let mut group_count = mem::zeroed(); self.fp_v1_1().enumerate_physical_device_groups( self.handle(), &mut group_count, @@ -81,7 +102,7 @@ pub trait InstanceV1_1: InstanceV1_0 { group_count as usize } - #[doc = ""] + #[doc = ""] fn enumerate_physical_device_groups( &self, out: &mut [vk::PhysicalDeviceGroupProperties], @@ -101,7 +122,17 @@ pub trait InstanceV1_1: InstanceV1_0 { } } - #[doc = ""] + #[doc = ""] + unsafe fn get_physical_device_features2( + &self, + physical_device: vk::PhysicalDevice, + features: &mut vk::PhysicalDeviceFeatures2, + ) { + self.fp_v1_1() + .get_physical_device_features2(physical_device, features); + } + + #[doc = ""] unsafe fn get_physical_device_properties2( &self, physical_device: vk::PhysicalDevice, @@ -111,7 +142,7 @@ pub trait InstanceV1_1: InstanceV1_0 { .get_physical_device_properties2(physical_device, prop); } - #[doc = ""] + #[doc = ""] unsafe fn get_physical_device_format_properties2( &self, physical_device: vk::PhysicalDevice, @@ -122,7 +153,7 @@ pub trait InstanceV1_1: InstanceV1_0 { .get_physical_device_format_properties2(physical_device, format, out); } - #[doc = ""] + #[doc = ""] unsafe fn get_physical_device_image_format_properties2( &self, physical_device: vk::PhysicalDevice, @@ -154,7 +185,7 @@ pub trait InstanceV1_1: InstanceV1_0 { queue_count as usize } - #[doc = ""] + #[doc = ""] unsafe fn get_physical_device_queue_family_properties2( &self, physical_device: vk::PhysicalDevice, @@ -168,7 +199,7 @@ pub trait InstanceV1_1: InstanceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn get_physical_device_memory_properties2( &self, physical_device: vk::PhysicalDevice, @@ -194,7 +225,7 @@ pub trait InstanceV1_1: InstanceV1_0 { format_count as usize } - #[doc = ""] + #[doc = ""] unsafe fn get_physical_device_sparse_image_format_properties2( &self, physical_device: vk::PhysicalDevice, @@ -211,7 +242,7 @@ pub trait InstanceV1_1: InstanceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn get_physical_device_external_buffer_properties( &self, physical_device: vk::PhysicalDevice, @@ -226,7 +257,7 @@ pub trait InstanceV1_1: InstanceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn get_physical_device_external_fence_properties( &self, physical_device: vk::PhysicalDevice, @@ -241,7 +272,7 @@ pub trait InstanceV1_1: InstanceV1_0 { ); } - #[doc = ""] + #[doc = ""] unsafe fn get_physical_device_external_semaphore_properties( &self, physical_device: vk::PhysicalDevice, @@ -262,7 +293,12 @@ pub trait InstanceV1_0 { type Device; fn handle(&self) -> vk::Instance; fn fp_v1_0(&self) -> &vk::InstanceFnV1_0; - #[doc = ""] + #[doc = ""] + /// + /// # Safety + /// In order for the created `Device` to be valid for the duration of its + /// usage, the `Instance` this was called on must be dropped later than the + /// resulting `Device`. unsafe fn create_device( &self, physical_device: vk::PhysicalDevice, @@ -270,7 +306,7 @@ pub trait InstanceV1_0 { allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> Result; - #[doc = ""] + #[doc = ""] unsafe fn get_device_proc_addr( &self, device: vk::Device, @@ -279,19 +315,19 @@ pub trait InstanceV1_0 { self.fp_v1_0().get_device_proc_addr(device, p_name) } - #[doc = ""] + #[doc = ""] unsafe fn destroy_instance(&self, allocation_callbacks: Option<&vk::AllocationCallbacks>) { self.fp_v1_0() .destroy_instance(self.handle(), allocation_callbacks.as_raw_ptr()); } - #[doc = ""] + #[doc = ""] unsafe fn get_physical_device_format_properties( &self, physical_device: vk::PhysicalDevice, format: vk::Format, ) -> vk::FormatProperties { - let mut format_prop = mem::uninitialized(); + let mut format_prop = mem::zeroed(); self.fp_v1_0().get_physical_device_format_properties( physical_device, format, @@ -300,7 +336,7 @@ pub trait InstanceV1_0 { format_prop } - #[doc = ""] + #[doc = ""] unsafe fn get_physical_device_image_format_properties( &self, physical_device: vk::PhysicalDevice, @@ -310,7 +346,7 @@ pub trait InstanceV1_0 { usage: vk::ImageUsageFlags, flags: vk::ImageCreateFlags, ) -> VkResult { - let mut image_format_prop = mem::uninitialized(); + let mut image_format_prop = mem::zeroed(); let err_code = self.fp_v1_0().get_physical_device_image_format_properties( physical_device, format, @@ -327,29 +363,29 @@ pub trait InstanceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn get_physical_device_memory_properties( &self, physical_device: vk::PhysicalDevice, ) -> vk::PhysicalDeviceMemoryProperties { - let mut memory_prop = mem::uninitialized(); + let mut memory_prop = mem::zeroed(); self.fp_v1_0() .get_physical_device_memory_properties(physical_device, &mut memory_prop); memory_prop } - #[doc = ""] + #[doc = ""] unsafe fn get_physical_device_properties( &self, physical_device: vk::PhysicalDevice, ) -> vk::PhysicalDeviceProperties { - let mut prop = mem::uninitialized(); + let mut prop = mem::zeroed(); self.fp_v1_0() .get_physical_device_properties(physical_device, &mut prop); prop } - #[doc = ""] + #[doc = ""] unsafe fn get_physical_device_queue_family_properties( &self, physical_device: vk::PhysicalDevice, @@ -370,20 +406,20 @@ pub trait InstanceV1_0 { queue_families_vec } - #[doc = ""] + #[doc = ""] unsafe fn get_physical_device_features( &self, physical_device: vk::PhysicalDevice, ) -> vk::PhysicalDeviceFeatures { - let mut prop = mem::uninitialized(); + let mut prop = mem::zeroed(); self.fp_v1_0() .get_physical_device_features(physical_device, &mut prop); prop } - #[doc = ""] + #[doc = ""] unsafe fn enumerate_physical_devices(&self) -> VkResult> { - let mut num = mem::uninitialized(); + let mut num = mem::zeroed(); self.fp_v1_0() .enumerate_physical_devices(self.handle(), &mut num, ptr::null_mut()); let mut physical_devices = Vec::::with_capacity(num as usize); @@ -399,7 +435,7 @@ pub trait InstanceV1_0 { } } - #[doc = ""] + #[doc = ""] unsafe fn enumerate_device_extension_properties( &self, device: vk::PhysicalDevice, diff --git a/third_party/rust/ash/src/lib.rs b/third_party/rust/ash/src/lib.rs index d6e5fc29fb59..3b7f72dab55f 100644 --- a/third_party/rust/ash/src/lib.rs +++ b/third_party/rust/ash/src/lib.rs @@ -1,17 +1,16 @@ +#![allow(clippy::too_many_arguments, clippy::missing_safety_doc)] //! # Vulkan API //! -//! +//! //! //! ## Examples //! //! ```rust,no_run -//! # #[macro_use] -//! # extern crate ash; //! use ash::{vk, Entry, version::EntryV1_0}; //! # fn main() -> Result<(), Box> { //! let entry = Entry::new()?; //! let app_info = vk::ApplicationInfo { -//! api_version: vk_make_version!(1, 0, 0), +//! api_version: vk::make_version(1, 0, 0), //! ..Default::default() //! }; //! let create_info = vk::InstanceCreateInfo { @@ -23,11 +22,9 @@ //! ``` //! -extern crate shared_library; - -pub use device::Device; -pub use entry::{Entry, EntryCustom, InstanceError, LoadingError}; -pub use instance::Instance; +pub use crate::device::Device; +pub use crate::entry::{Entry, EntryCustom, InstanceError, LoadingError}; +pub use crate::instance::Instance; mod device; mod entry; @@ -47,8 +44,8 @@ pub trait RawPtr { impl<'r, T> RawPtr for Option<&'r T> { fn as_raw_ptr(&self) -> *const T { - match self { - &Some(inner) => inner as *const T, + match *self { + Some(inner) => inner as *const T, _ => ::std::ptr::null(), } diff --git a/third_party/rust/ash/src/prelude.rs b/third_party/rust/ash/src/prelude.rs index a01406c9bc3c..b4a888cd554e 100644 --- a/third_party/rust/ash/src/prelude.rs +++ b/third_party/rust/ash/src/prelude.rs @@ -1,2 +1,2 @@ -use vk; +use crate::vk; pub type VkResult = Result; diff --git a/third_party/rust/ash/src/util.rs b/third_party/rust/ash/src/util.rs index 178da36f2fdc..48591d85cd8b 100644 --- a/third_party/rust/ash/src/util.rs +++ b/third_party/rust/ash/src/util.rs @@ -1,9 +1,9 @@ +use crate::vk; use std::iter::Iterator; use std::marker::PhantomData; use std::mem::size_of; use std::os::raw::c_void; use std::{io, slice}; -use vk; /// `Align` handles dynamic alignment. The is useful for dynamic uniform buffers where /// the alignment might be different. For example a 4x4 f32 matrix has a size of 64 bytes @@ -122,13 +122,13 @@ pub fn read_spv(x: &mut R) -> io::Result> { ))?; result.set_len(words); } - const MAGIC_NUMBER: u32 = 0x07230203; - if result.len() > 0 && result[0] == MAGIC_NUMBER.swap_bytes() { + const MAGIC_NUMBER: u32 = 0x0723_0203; + if !result.is_empty() && result[0] == MAGIC_NUMBER.swap_bytes() { for word in &mut result { *word = word.swap_bytes(); } } - if result.len() == 0 || result[0] != MAGIC_NUMBER { + if result.is_empty() || result[0] != MAGIC_NUMBER { return Err(io::Error::new( io::ErrorKind::InvalidData, "input missing SPIR-V magic number", diff --git a/third_party/rust/ash/src/version.rs b/third_party/rust/ash/src/version.rs index 922f6bff0d00..18dd2c9d7597 100644 --- a/third_party/rust/ash/src/version.rs +++ b/third_party/rust/ash/src/version.rs @@ -1,3 +1,3 @@ -pub use device::{DeviceV1_0, DeviceV1_1}; -pub use entry::{EntryV1_0, EntryV1_1}; -pub use instance::{InstanceV1_0, InstanceV1_1}; +pub use crate::device::{DeviceV1_0, DeviceV1_1, DeviceV1_2}; +pub use crate::entry::{EntryV1_0, EntryV1_1, EntryV1_2}; +pub use crate::instance::{InstanceV1_0, InstanceV1_1, InstanceV1_2}; diff --git a/third_party/rust/ash/src/vk.rs b/third_party/rust/ash/src/vk.rs index bcd41b8e0f59..39e3ef010fee 100644 --- a/third_party/rust/ash/src/vk.rs +++ b/third_party/rust/ash/src/vk.rs @@ -1,12 +1,11 @@ -use std::fmt; +# ! [ allow ( clippy :: too_many_arguments , clippy :: cognitive_complexity , clippy :: wrong_self_convention ) ]use std::fmt; use std::os::raw::*; #[doc = r" Iterates through the pointer chain. Includes the item that is passed into the function."] #[doc = r" Stops at the last `BaseOutStructure` that has a null `p_next` field."] pub(crate) unsafe fn ptr_chain_iter(ptr: &mut T) -> impl Iterator { - use std::ptr::null_mut; let ptr: *mut BaseOutStructure = ptr as *mut T as _; (0..).scan(ptr, |p_ptr, _| { - if *p_ptr == null_mut() { + if p_ptr.is_null() { return None; } let n_ptr = (**p_ptr).p_next as *mut BaseOutStructure; @@ -18,46 +17,34 @@ pub(crate) unsafe fn ptr_chain_iter(ptr: &mut T) -> impl Iterator u64; - fn from_raw(u64) -> Self; + fn from_raw(_: u64) -> Self; } -#[doc = ""] -#[macro_export] -macro_rules! vk_make_version { - ( $ major : expr , $ minor : expr , $ patch : expr ) => { - (($major as u32) << 22) | (($minor as u32) << 12) | $patch as u32 - }; +#[doc = ""] +pub const fn make_version(major: u32, minor: u32, patch: u32) -> u32 { + (major << 22) | (minor << 12) | patch } -#[doc = ""] -#[macro_export] -macro_rules! vk_version_major { - ( $ major : expr ) => { - ($major as u32) >> 22 - }; +#[doc = ""] +pub const fn version_major(version: u32) -> u32 { + version >> 22 } -#[doc = ""] -#[macro_export] -macro_rules! vk_version_minor { - ( $ minor : expr ) => { - (($minor as u32) >> 12) & 0x3ff - }; +#[doc = ""] +pub const fn version_minor(version: u32) -> u32 { + (version >> 12) & 0x3ff } -#[doc = ""] -#[macro_export] -macro_rules! vk_version_patch { - ( $ minor : expr ) => { - ($minor as u32) & 0xfff - }; +#[doc = ""] +pub const fn version_patch(version: u32) -> u32 { + version & 0xfff } pub type RROutput = c_ulong; pub type VisualID = c_uint; pub type Display = *const c_void; pub type Window = c_ulong; #[allow(non_camel_case_types)] -pub type xcb_connection_t = *const c_void; +pub type xcb_connection_t = c_void; #[allow(non_camel_case_types)] pub type xcb_window_t = u32; #[allow(non_camel_case_types)] -pub type xcb_visualid_t = *const c_void; +pub type xcb_visualid_t = u32; pub type MirConnection = *const c_void; pub type MirSurface = *const c_void; pub type HINSTANCE = *const c_void; @@ -67,6 +54,7 @@ pub type wl_display = c_void; #[allow(non_camel_case_types)] pub type wl_surface = c_void; pub type HANDLE = *mut c_void; +pub type HMONITOR = HANDLE; pub type DWORD = c_ulong; pub type LPCWSTR = *const u16; #[allow(non_camel_case_types)] @@ -75,6 +63,11 @@ pub type zx_handle_t = u32; pub type SECURITY_ATTRIBUTES = (); pub type ANativeWindow = c_void; pub type AHardwareBuffer = c_void; +#[doc = r" This definition is experimental and won't adhere to semver rules."] +pub type GgpStreamDescriptor = u32; +#[doc = r" This definition is experimental and won't adhere to semver rules."] +pub type GgpFrameToken = u32; +pub type CAMetalLayer = c_void; #[macro_export] macro_rules! vk_bitflags_wrapped { ( $ name : ident , $ all : expr , $ flag_type : ty ) => { @@ -85,19 +78,19 @@ macro_rules! vk_bitflags_wrapped { } impl $name { #[inline] - pub fn empty() -> $name { + pub const fn empty() -> $name { $name(0) } #[inline] - pub fn all() -> $name { + pub const fn all() -> $name { $name($all) } #[inline] - pub fn from_raw(x: $flag_type) -> Self { + pub const fn from_raw(x: $flag_type) -> Self { $name(x) } #[inline] - pub fn as_raw(self) -> $flag_type { + pub const fn as_raw(self) -> $flag_type { self.0 } #[inline] @@ -199,7 +192,7 @@ macro_rules! handle_nondispatchable { } } impl $name { - pub fn null() -> $name { + pub const fn null() -> $name { $name(0) } } @@ -242,7 +235,7 @@ macro_rules! define_handle { unsafe impl Send for $name {} unsafe impl Sync for $name {} impl $name { - pub fn null() -> Self { + pub const fn null() -> Self { $name(::std::ptr::null_mut()) } } @@ -301,7 +294,7 @@ impl StaticFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn get_instance_proc_addr( &self, instance: Instance, @@ -416,7 +409,7 @@ impl EntryFnV1_0 { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn create_instance( &self, p_create_info: *const InstanceCreateInfo, @@ -425,7 +418,7 @@ impl EntryFnV1_0 { ) -> Result { (self.create_instance)(p_create_info, p_allocator, p_instance) } - #[doc = ""] + #[doc = ""] pub unsafe fn enumerate_instance_extension_properties( &self, p_layer_name: *const c_char, @@ -434,7 +427,7 @@ impl EntryFnV1_0 { ) -> Result { (self.enumerate_instance_extension_properties)(p_layer_name, p_property_count, p_properties) } - #[doc = ""] + #[doc = ""] pub unsafe fn enumerate_instance_layer_properties( &self, p_property_count: *mut u32, @@ -881,7 +874,7 @@ impl InstanceFnV1_0 { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_instance( &self, instance: Instance, @@ -889,7 +882,7 @@ impl InstanceFnV1_0 { ) -> c_void { (self.destroy_instance)(instance, p_allocator) } - #[doc = ""] + #[doc = ""] pub unsafe fn enumerate_physical_devices( &self, instance: Instance, @@ -898,7 +891,7 @@ impl InstanceFnV1_0 { ) -> Result { (self.enumerate_physical_devices)(instance, p_physical_device_count, p_physical_devices) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_features( &self, physical_device: PhysicalDevice, @@ -906,7 +899,7 @@ impl InstanceFnV1_0 { ) -> c_void { (self.get_physical_device_features)(physical_device, p_features) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_format_properties( &self, physical_device: PhysicalDevice, @@ -915,7 +908,7 @@ impl InstanceFnV1_0 { ) -> c_void { (self.get_physical_device_format_properties)(physical_device, format, p_format_properties) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_image_format_properties( &self, physical_device: PhysicalDevice, @@ -936,7 +929,7 @@ impl InstanceFnV1_0 { p_image_format_properties, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_properties( &self, physical_device: PhysicalDevice, @@ -944,7 +937,7 @@ impl InstanceFnV1_0 { ) -> c_void { (self.get_physical_device_properties)(physical_device, p_properties) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_queue_family_properties( &self, physical_device: PhysicalDevice, @@ -957,7 +950,7 @@ impl InstanceFnV1_0 { p_queue_family_properties, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_memory_properties( &self, physical_device: PhysicalDevice, @@ -965,7 +958,7 @@ impl InstanceFnV1_0 { ) -> c_void { (self.get_physical_device_memory_properties)(physical_device, p_memory_properties) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_device_proc_addr( &self, device: Device, @@ -973,7 +966,7 @@ impl InstanceFnV1_0 { ) -> PFN_vkVoidFunction { (self.get_device_proc_addr)(device, p_name) } - #[doc = ""] + #[doc = ""] pub unsafe fn create_device( &self, physical_device: PhysicalDevice, @@ -983,7 +976,7 @@ impl InstanceFnV1_0 { ) -> Result { (self.create_device)(physical_device, p_create_info, p_allocator, p_device) } - #[doc = ""] + #[doc = ""] pub unsafe fn enumerate_device_extension_properties( &self, physical_device: PhysicalDevice, @@ -998,7 +991,7 @@ impl InstanceFnV1_0 { p_properties, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn enumerate_device_layer_properties( &self, physical_device: PhysicalDevice, @@ -1007,7 +1000,7 @@ impl InstanceFnV1_0 { ) -> Result { (self.enumerate_device_layer_properties)(physical_device, p_property_count, p_properties) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_sparse_image_format_properties( &self, physical_device: PhysicalDevice, @@ -4878,7 +4871,7 @@ impl DeviceFnV1_0 { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_device( &self, device: Device, @@ -4886,7 +4879,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.destroy_device)(device, p_allocator) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_device_queue( &self, device: Device, @@ -4896,7 +4889,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.get_device_queue)(device, queue_family_index, queue_index, p_queue) } - #[doc = ""] + #[doc = ""] pub unsafe fn queue_submit( &self, queue: Queue, @@ -4906,15 +4899,15 @@ impl DeviceFnV1_0 { ) -> Result { (self.queue_submit)(queue, submit_count, p_submits, fence) } - #[doc = ""] + #[doc = ""] pub unsafe fn queue_wait_idle(&self, queue: Queue) -> Result { (self.queue_wait_idle)(queue) } - #[doc = ""] + #[doc = ""] pub unsafe fn device_wait_idle(&self, device: Device) -> Result { (self.device_wait_idle)(device) } - #[doc = ""] + #[doc = ""] pub unsafe fn allocate_memory( &self, device: Device, @@ -4924,7 +4917,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.allocate_memory)(device, p_allocate_info, p_allocator, p_memory) } - #[doc = ""] + #[doc = ""] pub unsafe fn free_memory( &self, device: Device, @@ -4933,7 +4926,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.free_memory)(device, memory, p_allocator) } - #[doc = ""] + #[doc = ""] pub unsafe fn map_memory( &self, device: Device, @@ -4945,11 +4938,11 @@ impl DeviceFnV1_0 { ) -> Result { (self.map_memory)(device, memory, offset, size, flags, pp_data) } - #[doc = ""] + #[doc = ""] pub unsafe fn unmap_memory(&self, device: Device, memory: DeviceMemory) -> c_void { (self.unmap_memory)(device, memory) } - #[doc = ""] + #[doc = ""] pub unsafe fn flush_mapped_memory_ranges( &self, device: Device, @@ -4958,7 +4951,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.flush_mapped_memory_ranges)(device, memory_range_count, p_memory_ranges) } - #[doc = ""] + #[doc = ""] pub unsafe fn invalidate_mapped_memory_ranges( &self, device: Device, @@ -4967,7 +4960,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.invalidate_mapped_memory_ranges)(device, memory_range_count, p_memory_ranges) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_device_memory_commitment( &self, device: Device, @@ -4976,7 +4969,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.get_device_memory_commitment)(device, memory, p_committed_memory_in_bytes) } - #[doc = ""] + #[doc = ""] pub unsafe fn bind_buffer_memory( &self, device: Device, @@ -4986,7 +4979,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.bind_buffer_memory)(device, buffer, memory, memory_offset) } - #[doc = ""] + #[doc = ""] pub unsafe fn bind_image_memory( &self, device: Device, @@ -4996,7 +4989,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.bind_image_memory)(device, image, memory, memory_offset) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_buffer_memory_requirements( &self, device: Device, @@ -5005,7 +4998,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.get_buffer_memory_requirements)(device, buffer, p_memory_requirements) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_image_memory_requirements( &self, device: Device, @@ -5014,7 +5007,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.get_image_memory_requirements)(device, image, p_memory_requirements) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_image_sparse_memory_requirements( &self, device: Device, @@ -5029,7 +5022,7 @@ impl DeviceFnV1_0 { p_sparse_memory_requirements, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn queue_bind_sparse( &self, queue: Queue, @@ -5039,7 +5032,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.queue_bind_sparse)(queue, bind_info_count, p_bind_info, fence) } - #[doc = ""] + #[doc = ""] pub unsafe fn create_fence( &self, device: Device, @@ -5049,7 +5042,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.create_fence)(device, p_create_info, p_allocator, p_fence) } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_fence( &self, device: Device, @@ -5058,7 +5051,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.destroy_fence)(device, fence, p_allocator) } - #[doc = ""] + #[doc = ""] pub unsafe fn reset_fences( &self, device: Device, @@ -5067,11 +5060,11 @@ impl DeviceFnV1_0 { ) -> Result { (self.reset_fences)(device, fence_count, p_fences) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_fence_status(&self, device: Device, fence: Fence) -> Result { (self.get_fence_status)(device, fence) } - #[doc = ""] + #[doc = ""] pub unsafe fn wait_for_fences( &self, device: Device, @@ -5082,7 +5075,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.wait_for_fences)(device, fence_count, p_fences, wait_all, timeout) } - #[doc = ""] + #[doc = ""] pub unsafe fn create_semaphore( &self, device: Device, @@ -5092,7 +5085,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.create_semaphore)(device, p_create_info, p_allocator, p_semaphore) } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_semaphore( &self, device: Device, @@ -5101,7 +5094,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.destroy_semaphore)(device, semaphore, p_allocator) } - #[doc = ""] + #[doc = ""] pub unsafe fn create_event( &self, device: Device, @@ -5111,7 +5104,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.create_event)(device, p_create_info, p_allocator, p_event) } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_event( &self, device: Device, @@ -5120,19 +5113,19 @@ impl DeviceFnV1_0 { ) -> c_void { (self.destroy_event)(device, event, p_allocator) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_event_status(&self, device: Device, event: Event) -> Result { (self.get_event_status)(device, event) } - #[doc = ""] + #[doc = ""] pub unsafe fn set_event(&self, device: Device, event: Event) -> Result { (self.set_event)(device, event) } - #[doc = ""] + #[doc = ""] pub unsafe fn reset_event(&self, device: Device, event: Event) -> Result { (self.reset_event)(device, event) } - #[doc = ""] + #[doc = ""] pub unsafe fn create_query_pool( &self, device: Device, @@ -5142,7 +5135,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.create_query_pool)(device, p_create_info, p_allocator, p_query_pool) } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_query_pool( &self, device: Device, @@ -5151,7 +5144,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.destroy_query_pool)(device, query_pool, p_allocator) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_query_pool_results( &self, device: Device, @@ -5174,7 +5167,7 @@ impl DeviceFnV1_0 { flags, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn create_buffer( &self, device: Device, @@ -5184,7 +5177,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.create_buffer)(device, p_create_info, p_allocator, p_buffer) } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_buffer( &self, device: Device, @@ -5193,7 +5186,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.destroy_buffer)(device, buffer, p_allocator) } - #[doc = ""] + #[doc = ""] pub unsafe fn create_buffer_view( &self, device: Device, @@ -5203,7 +5196,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.create_buffer_view)(device, p_create_info, p_allocator, p_view) } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_buffer_view( &self, device: Device, @@ -5212,7 +5205,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.destroy_buffer_view)(device, buffer_view, p_allocator) } - #[doc = ""] + #[doc = ""] pub unsafe fn create_image( &self, device: Device, @@ -5222,7 +5215,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.create_image)(device, p_create_info, p_allocator, p_image) } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_image( &self, device: Device, @@ -5231,7 +5224,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.destroy_image)(device, image, p_allocator) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_image_subresource_layout( &self, device: Device, @@ -5241,7 +5234,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.get_image_subresource_layout)(device, image, p_subresource, p_layout) } - #[doc = ""] + #[doc = ""] pub unsafe fn create_image_view( &self, device: Device, @@ -5251,7 +5244,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.create_image_view)(device, p_create_info, p_allocator, p_view) } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_image_view( &self, device: Device, @@ -5260,7 +5253,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.destroy_image_view)(device, image_view, p_allocator) } - #[doc = ""] + #[doc = ""] pub unsafe fn create_shader_module( &self, device: Device, @@ -5270,7 +5263,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.create_shader_module)(device, p_create_info, p_allocator, p_shader_module) } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_shader_module( &self, device: Device, @@ -5279,7 +5272,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.destroy_shader_module)(device, shader_module, p_allocator) } - #[doc = ""] + #[doc = ""] pub unsafe fn create_pipeline_cache( &self, device: Device, @@ -5289,7 +5282,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.create_pipeline_cache)(device, p_create_info, p_allocator, p_pipeline_cache) } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_pipeline_cache( &self, device: Device, @@ -5298,7 +5291,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.destroy_pipeline_cache)(device, pipeline_cache, p_allocator) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_pipeline_cache_data( &self, device: Device, @@ -5308,7 +5301,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.get_pipeline_cache_data)(device, pipeline_cache, p_data_size, p_data) } - #[doc = ""] + #[doc = ""] pub unsafe fn merge_pipeline_caches( &self, device: Device, @@ -5318,7 +5311,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.merge_pipeline_caches)(device, dst_cache, src_cache_count, p_src_caches) } - #[doc = ""] + #[doc = ""] pub unsafe fn create_graphics_pipelines( &self, device: Device, @@ -5337,7 +5330,7 @@ impl DeviceFnV1_0 { p_pipelines, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn create_compute_pipelines( &self, device: Device, @@ -5356,7 +5349,7 @@ impl DeviceFnV1_0 { p_pipelines, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_pipeline( &self, device: Device, @@ -5365,7 +5358,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.destroy_pipeline)(device, pipeline, p_allocator) } - #[doc = ""] + #[doc = ""] pub unsafe fn create_pipeline_layout( &self, device: Device, @@ -5375,7 +5368,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.create_pipeline_layout)(device, p_create_info, p_allocator, p_pipeline_layout) } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_pipeline_layout( &self, device: Device, @@ -5384,7 +5377,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.destroy_pipeline_layout)(device, pipeline_layout, p_allocator) } - #[doc = ""] + #[doc = ""] pub unsafe fn create_sampler( &self, device: Device, @@ -5394,7 +5387,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.create_sampler)(device, p_create_info, p_allocator, p_sampler) } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_sampler( &self, device: Device, @@ -5403,7 +5396,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.destroy_sampler)(device, sampler, p_allocator) } - #[doc = ""] + #[doc = ""] pub unsafe fn create_descriptor_set_layout( &self, device: Device, @@ -5413,7 +5406,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.create_descriptor_set_layout)(device, p_create_info, p_allocator, p_set_layout) } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_descriptor_set_layout( &self, device: Device, @@ -5422,7 +5415,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.destroy_descriptor_set_layout)(device, descriptor_set_layout, p_allocator) } - #[doc = ""] + #[doc = ""] pub unsafe fn create_descriptor_pool( &self, device: Device, @@ -5432,7 +5425,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.create_descriptor_pool)(device, p_create_info, p_allocator, p_descriptor_pool) } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_descriptor_pool( &self, device: Device, @@ -5441,7 +5434,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.destroy_descriptor_pool)(device, descriptor_pool, p_allocator) } - #[doc = ""] + #[doc = ""] pub unsafe fn reset_descriptor_pool( &self, device: Device, @@ -5450,7 +5443,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.reset_descriptor_pool)(device, descriptor_pool, flags) } - #[doc = ""] + #[doc = ""] pub unsafe fn allocate_descriptor_sets( &self, device: Device, @@ -5459,7 +5452,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.allocate_descriptor_sets)(device, p_allocate_info, p_descriptor_sets) } - #[doc = ""] + #[doc = ""] pub unsafe fn free_descriptor_sets( &self, device: Device, @@ -5474,7 +5467,7 @@ impl DeviceFnV1_0 { p_descriptor_sets, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn update_descriptor_sets( &self, device: Device, @@ -5491,7 +5484,7 @@ impl DeviceFnV1_0 { p_descriptor_copies, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn create_framebuffer( &self, device: Device, @@ -5501,7 +5494,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.create_framebuffer)(device, p_create_info, p_allocator, p_framebuffer) } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_framebuffer( &self, device: Device, @@ -5510,7 +5503,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.destroy_framebuffer)(device, framebuffer, p_allocator) } - #[doc = ""] + #[doc = ""] pub unsafe fn create_render_pass( &self, device: Device, @@ -5520,7 +5513,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.create_render_pass)(device, p_create_info, p_allocator, p_render_pass) } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_render_pass( &self, device: Device, @@ -5529,7 +5522,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.destroy_render_pass)(device, render_pass, p_allocator) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_render_area_granularity( &self, device: Device, @@ -5538,7 +5531,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.get_render_area_granularity)(device, render_pass, p_granularity) } - #[doc = ""] + #[doc = ""] pub unsafe fn create_command_pool( &self, device: Device, @@ -5548,7 +5541,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.create_command_pool)(device, p_create_info, p_allocator, p_command_pool) } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_command_pool( &self, device: Device, @@ -5557,7 +5550,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.destroy_command_pool)(device, command_pool, p_allocator) } - #[doc = ""] + #[doc = ""] pub unsafe fn reset_command_pool( &self, device: Device, @@ -5566,7 +5559,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.reset_command_pool)(device, command_pool, flags) } - #[doc = ""] + #[doc = ""] pub unsafe fn allocate_command_buffers( &self, device: Device, @@ -5575,7 +5568,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.allocate_command_buffers)(device, p_allocate_info, p_command_buffers) } - #[doc = ""] + #[doc = ""] pub unsafe fn free_command_buffers( &self, device: Device, @@ -5590,7 +5583,7 @@ impl DeviceFnV1_0 { p_command_buffers, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn begin_command_buffer( &self, command_buffer: CommandBuffer, @@ -5598,11 +5591,11 @@ impl DeviceFnV1_0 { ) -> Result { (self.begin_command_buffer)(command_buffer, p_begin_info) } - #[doc = ""] + #[doc = ""] pub unsafe fn end_command_buffer(&self, command_buffer: CommandBuffer) -> Result { (self.end_command_buffer)(command_buffer) } - #[doc = ""] + #[doc = ""] pub unsafe fn reset_command_buffer( &self, command_buffer: CommandBuffer, @@ -5610,7 +5603,7 @@ impl DeviceFnV1_0 { ) -> Result { (self.reset_command_buffer)(command_buffer, flags) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_bind_pipeline( &self, command_buffer: CommandBuffer, @@ -5619,7 +5612,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.cmd_bind_pipeline)(command_buffer, pipeline_bind_point, pipeline) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_set_viewport( &self, command_buffer: CommandBuffer, @@ -5629,7 +5622,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.cmd_set_viewport)(command_buffer, first_viewport, viewport_count, p_viewports) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_set_scissor( &self, command_buffer: CommandBuffer, @@ -5639,7 +5632,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.cmd_set_scissor)(command_buffer, first_scissor, scissor_count, p_scissors) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_set_line_width( &self, command_buffer: CommandBuffer, @@ -5647,7 +5640,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.cmd_set_line_width)(command_buffer, line_width) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_set_depth_bias( &self, command_buffer: CommandBuffer, @@ -5662,7 +5655,7 @@ impl DeviceFnV1_0 { depth_bias_slope_factor, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_set_blend_constants( &self, command_buffer: CommandBuffer, @@ -5670,7 +5663,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.cmd_set_blend_constants)(command_buffer, blend_constants) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_set_depth_bounds( &self, command_buffer: CommandBuffer, @@ -5679,7 +5672,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.cmd_set_depth_bounds)(command_buffer, min_depth_bounds, max_depth_bounds) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_set_stencil_compare_mask( &self, command_buffer: CommandBuffer, @@ -5688,7 +5681,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.cmd_set_stencil_compare_mask)(command_buffer, face_mask, compare_mask) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_set_stencil_write_mask( &self, command_buffer: CommandBuffer, @@ -5697,7 +5690,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.cmd_set_stencil_write_mask)(command_buffer, face_mask, write_mask) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_set_stencil_reference( &self, command_buffer: CommandBuffer, @@ -5706,7 +5699,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.cmd_set_stencil_reference)(command_buffer, face_mask, reference) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_bind_descriptor_sets( &self, command_buffer: CommandBuffer, @@ -5729,7 +5722,7 @@ impl DeviceFnV1_0 { p_dynamic_offsets, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_bind_index_buffer( &self, command_buffer: CommandBuffer, @@ -5739,7 +5732,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.cmd_bind_index_buffer)(command_buffer, buffer, offset, index_type) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_bind_vertex_buffers( &self, command_buffer: CommandBuffer, @@ -5756,7 +5749,7 @@ impl DeviceFnV1_0 { p_offsets, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_draw( &self, command_buffer: CommandBuffer, @@ -5773,7 +5766,7 @@ impl DeviceFnV1_0 { first_instance, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_draw_indexed( &self, command_buffer: CommandBuffer, @@ -5792,7 +5785,7 @@ impl DeviceFnV1_0 { first_instance, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_draw_indirect( &self, command_buffer: CommandBuffer, @@ -5803,7 +5796,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.cmd_draw_indirect)(command_buffer, buffer, offset, draw_count, stride) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_draw_indexed_indirect( &self, command_buffer: CommandBuffer, @@ -5814,7 +5807,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.cmd_draw_indexed_indirect)(command_buffer, buffer, offset, draw_count, stride) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_dispatch( &self, command_buffer: CommandBuffer, @@ -5824,7 +5817,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.cmd_dispatch)(command_buffer, group_count_x, group_count_y, group_count_z) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_dispatch_indirect( &self, command_buffer: CommandBuffer, @@ -5833,7 +5826,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.cmd_dispatch_indirect)(command_buffer, buffer, offset) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_copy_buffer( &self, command_buffer: CommandBuffer, @@ -5850,7 +5843,7 @@ impl DeviceFnV1_0 { p_regions, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_copy_image( &self, command_buffer: CommandBuffer, @@ -5871,7 +5864,7 @@ impl DeviceFnV1_0 { p_regions, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_blit_image( &self, command_buffer: CommandBuffer, @@ -5894,7 +5887,7 @@ impl DeviceFnV1_0 { filter, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_copy_buffer_to_image( &self, command_buffer: CommandBuffer, @@ -5913,7 +5906,7 @@ impl DeviceFnV1_0 { p_regions, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_copy_image_to_buffer( &self, command_buffer: CommandBuffer, @@ -5932,7 +5925,7 @@ impl DeviceFnV1_0 { p_regions, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_update_buffer( &self, command_buffer: CommandBuffer, @@ -5943,7 +5936,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.cmd_update_buffer)(command_buffer, dst_buffer, dst_offset, data_size, p_data) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_fill_buffer( &self, command_buffer: CommandBuffer, @@ -5954,7 +5947,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.cmd_fill_buffer)(command_buffer, dst_buffer, dst_offset, size, data) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_clear_color_image( &self, command_buffer: CommandBuffer, @@ -5973,7 +5966,7 @@ impl DeviceFnV1_0 { p_ranges, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_clear_depth_stencil_image( &self, command_buffer: CommandBuffer, @@ -5992,7 +5985,7 @@ impl DeviceFnV1_0 { p_ranges, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_clear_attachments( &self, command_buffer: CommandBuffer, @@ -6009,7 +6002,7 @@ impl DeviceFnV1_0 { p_rects, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_resolve_image( &self, command_buffer: CommandBuffer, @@ -6030,7 +6023,7 @@ impl DeviceFnV1_0 { p_regions, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_set_event( &self, command_buffer: CommandBuffer, @@ -6039,7 +6032,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.cmd_set_event)(command_buffer, event, stage_mask) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_reset_event( &self, command_buffer: CommandBuffer, @@ -6048,7 +6041,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.cmd_reset_event)(command_buffer, event, stage_mask) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_wait_events( &self, command_buffer: CommandBuffer, @@ -6077,7 +6070,7 @@ impl DeviceFnV1_0 { p_image_memory_barriers, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_pipeline_barrier( &self, command_buffer: CommandBuffer, @@ -6104,7 +6097,7 @@ impl DeviceFnV1_0 { p_image_memory_barriers, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_begin_query( &self, command_buffer: CommandBuffer, @@ -6114,7 +6107,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.cmd_begin_query)(command_buffer, query_pool, query, flags) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_end_query( &self, command_buffer: CommandBuffer, @@ -6123,7 +6116,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.cmd_end_query)(command_buffer, query_pool, query) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_reset_query_pool( &self, command_buffer: CommandBuffer, @@ -6133,7 +6126,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.cmd_reset_query_pool)(command_buffer, query_pool, first_query, query_count) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_write_timestamp( &self, command_buffer: CommandBuffer, @@ -6143,7 +6136,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.cmd_write_timestamp)(command_buffer, pipeline_stage, query_pool, query) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_copy_query_pool_results( &self, command_buffer: CommandBuffer, @@ -6166,7 +6159,7 @@ impl DeviceFnV1_0 { flags, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_push_constants( &self, command_buffer: CommandBuffer, @@ -6178,7 +6171,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.cmd_push_constants)(command_buffer, layout, stage_flags, offset, size, p_values) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_begin_render_pass( &self, command_buffer: CommandBuffer, @@ -6187,7 +6180,7 @@ impl DeviceFnV1_0 { ) -> c_void { (self.cmd_begin_render_pass)(command_buffer, p_render_pass_begin, contents) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_next_subpass( &self, command_buffer: CommandBuffer, @@ -6195,11 +6188,11 @@ impl DeviceFnV1_0 { ) -> c_void { (self.cmd_next_subpass)(command_buffer, contents) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_end_render_pass(&self, command_buffer: CommandBuffer) -> c_void { (self.cmd_end_render_pass)(command_buffer) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_execute_commands( &self, command_buffer: CommandBuffer, @@ -6247,75 +6240,11 @@ impl EntryFnV1_1 { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn enumerate_instance_version(&self, p_api_version: *mut u32) -> Result { (self.enumerate_instance_version)(p_api_version) } } -#[allow(non_camel_case_types)] -pub type PFN_vkEnumeratePhysicalDeviceGroups = extern "system" fn( - instance: Instance, - p_physical_device_group_count: *mut u32, - p_physical_device_group_properties: *mut PhysicalDeviceGroupProperties, -) -> Result; -#[allow(non_camel_case_types)] -pub type PFN_vkGetPhysicalDeviceFeatures2 = extern "system" fn( - physical_device: PhysicalDevice, - p_features: *mut PhysicalDeviceFeatures2, -) -> c_void; -#[allow(non_camel_case_types)] -pub type PFN_vkGetPhysicalDeviceProperties2 = extern "system" fn( - physical_device: PhysicalDevice, - p_properties: *mut PhysicalDeviceProperties2, -) -> c_void; -#[allow(non_camel_case_types)] -pub type PFN_vkGetPhysicalDeviceFormatProperties2 = extern "system" fn( - physical_device: PhysicalDevice, - format: Format, - p_format_properties: *mut FormatProperties2, -) -> c_void; -#[allow(non_camel_case_types)] -pub type PFN_vkGetPhysicalDeviceImageFormatProperties2 = extern "system" fn( - physical_device: PhysicalDevice, - p_image_format_info: *const PhysicalDeviceImageFormatInfo2, - p_image_format_properties: *mut ImageFormatProperties2, -) -> Result; -#[allow(non_camel_case_types)] -pub type PFN_vkGetPhysicalDeviceQueueFamilyProperties2 = extern "system" fn( - physical_device: PhysicalDevice, - p_queue_family_property_count: *mut u32, - p_queue_family_properties: *mut QueueFamilyProperties2, -) -> c_void; -#[allow(non_camel_case_types)] -pub type PFN_vkGetPhysicalDeviceMemoryProperties2 = extern "system" fn( - physical_device: PhysicalDevice, - p_memory_properties: *mut PhysicalDeviceMemoryProperties2, -) -> c_void; -#[allow(non_camel_case_types)] -pub type PFN_vkGetPhysicalDeviceSparseImageFormatProperties2 = extern "system" fn( - physical_device: PhysicalDevice, - p_format_info: *const PhysicalDeviceSparseImageFormatInfo2, - p_property_count: *mut u32, - p_properties: *mut SparseImageFormatProperties2, -) -> c_void; -#[allow(non_camel_case_types)] -pub type PFN_vkGetPhysicalDeviceExternalBufferProperties = extern "system" fn( - physical_device: PhysicalDevice, - p_external_buffer_info: *const PhysicalDeviceExternalBufferInfo, - p_external_buffer_properties: *mut ExternalBufferProperties, -) -> c_void; -#[allow(non_camel_case_types)] -pub type PFN_vkGetPhysicalDeviceExternalFenceProperties = extern "system" fn( - physical_device: PhysicalDevice, - p_external_fence_info: *const PhysicalDeviceExternalFenceInfo, - p_external_fence_properties: *mut ExternalFenceProperties, -) -> c_void; -#[allow(non_camel_case_types)] -pub type PFN_vkGetPhysicalDeviceExternalSemaphoreProperties = extern "system" fn( - physical_device: PhysicalDevice, - p_external_semaphore_info: *const PhysicalDeviceExternalSemaphoreInfo, - p_external_semaphore_properties: *mut ExternalSemaphoreProperties, -) -> c_void; pub struct InstanceFnV1_1 { pub enumerate_physical_device_groups: extern "system" fn( instance: Instance, @@ -6622,7 +6551,7 @@ impl InstanceFnV1_1 { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn enumerate_physical_device_groups( &self, instance: Instance, @@ -6635,7 +6564,7 @@ impl InstanceFnV1_1 { p_physical_device_group_properties, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_features2( &self, physical_device: PhysicalDevice, @@ -6643,7 +6572,7 @@ impl InstanceFnV1_1 { ) -> c_void { (self.get_physical_device_features2)(physical_device, p_features) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_properties2( &self, physical_device: PhysicalDevice, @@ -6651,7 +6580,7 @@ impl InstanceFnV1_1 { ) -> c_void { (self.get_physical_device_properties2)(physical_device, p_properties) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_format_properties2( &self, physical_device: PhysicalDevice, @@ -6660,7 +6589,7 @@ impl InstanceFnV1_1 { ) -> c_void { (self.get_physical_device_format_properties2)(physical_device, format, p_format_properties) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_image_format_properties2( &self, physical_device: PhysicalDevice, @@ -6673,7 +6602,7 @@ impl InstanceFnV1_1 { p_image_format_properties, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_queue_family_properties2( &self, physical_device: PhysicalDevice, @@ -6686,7 +6615,7 @@ impl InstanceFnV1_1 { p_queue_family_properties, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_memory_properties2( &self, physical_device: PhysicalDevice, @@ -6694,7 +6623,7 @@ impl InstanceFnV1_1 { ) -> c_void { (self.get_physical_device_memory_properties2)(physical_device, p_memory_properties) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_sparse_image_format_properties2( &self, physical_device: PhysicalDevice, @@ -6709,7 +6638,7 @@ impl InstanceFnV1_1 { p_properties, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_external_buffer_properties( &self, physical_device: PhysicalDevice, @@ -6722,7 +6651,7 @@ impl InstanceFnV1_1 { p_external_buffer_properties, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_external_fence_properties( &self, physical_device: PhysicalDevice, @@ -6735,7 +6664,7 @@ impl InstanceFnV1_1 { p_external_fence_properties, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_external_semaphore_properties( &self, physical_device: PhysicalDevice, @@ -6750,108 +6679,11 @@ impl InstanceFnV1_1 { } } #[allow(non_camel_case_types)] -pub type PFN_vkBindBufferMemory2 = extern "system" fn( +pub type PFN_vkGetDeviceQueue2 = extern "system" fn( device: Device, bind_info_count: u32, p_bind_infos: *const BindBufferMemoryInfo, ) -> Result; -#[allow(non_camel_case_types)] -pub type PFN_vkBindImageMemory2 = extern "system" fn( - device: Device, - bind_info_count: u32, - p_bind_infos: *const BindImageMemoryInfo, -) -> Result; -#[allow(non_camel_case_types)] -pub type PFN_vkGetDeviceGroupPeerMemoryFeatures = extern "system" fn( - device: Device, - heap_index: u32, - local_device_index: u32, - remote_device_index: u32, - p_peer_memory_features: *mut PeerMemoryFeatureFlags, -) -> c_void; -#[allow(non_camel_case_types)] -pub type PFN_vkCmdSetDeviceMask = - extern "system" fn(command_buffer: CommandBuffer, device_mask: u32) -> c_void; -#[allow(non_camel_case_types)] -pub type PFN_vkCmdDispatchBase = extern "system" fn( - command_buffer: CommandBuffer, - base_group_x: u32, - base_group_y: u32, - base_group_z: u32, - group_count_x: u32, - group_count_y: u32, - group_count_z: u32, -) -> c_void; -#[allow(non_camel_case_types)] -pub type PFN_vkGetImageMemoryRequirements2 = extern "system" fn( - device: Device, - p_info: *const ImageMemoryRequirementsInfo2, - p_memory_requirements: *mut MemoryRequirements2, -) -> c_void; -#[allow(non_camel_case_types)] -pub type PFN_vkGetBufferMemoryRequirements2 = extern "system" fn( - device: Device, - p_info: *const BufferMemoryRequirementsInfo2, - p_memory_requirements: *mut MemoryRequirements2, -) -> c_void; -#[allow(non_camel_case_types)] -pub type PFN_vkGetImageSparseMemoryRequirements2 = extern "system" fn( - device: Device, - p_info: *const ImageSparseMemoryRequirementsInfo2, - p_sparse_memory_requirement_count: *mut u32, - p_sparse_memory_requirements: *mut SparseImageMemoryRequirements2, -) -> c_void; -#[allow(non_camel_case_types)] -pub type PFN_vkTrimCommandPool = extern "system" fn( - device: Device, - command_pool: CommandPool, - flags: CommandPoolTrimFlags, -) -> c_void; -#[allow(non_camel_case_types)] -pub type PFN_vkGetDeviceQueue2 = extern "system" fn( - device: Device, - p_queue_info: *const DeviceQueueInfo2, - p_queue: *mut Queue, -) -> c_void; -#[allow(non_camel_case_types)] -pub type PFN_vkCreateSamplerYcbcrConversion = extern "system" fn( - device: Device, - p_create_info: *const SamplerYcbcrConversionCreateInfo, - p_allocator: *const AllocationCallbacks, - p_ycbcr_conversion: *mut SamplerYcbcrConversion, -) -> Result; -#[allow(non_camel_case_types)] -pub type PFN_vkDestroySamplerYcbcrConversion = extern "system" fn( - device: Device, - ycbcr_conversion: SamplerYcbcrConversion, - p_allocator: *const AllocationCallbacks, -) -> c_void; -#[allow(non_camel_case_types)] -pub type PFN_vkCreateDescriptorUpdateTemplate = extern "system" fn( - device: Device, - p_create_info: *const DescriptorUpdateTemplateCreateInfo, - p_allocator: *const AllocationCallbacks, - p_descriptor_update_template: *mut DescriptorUpdateTemplate, -) -> Result; -#[allow(non_camel_case_types)] -pub type PFN_vkDestroyDescriptorUpdateTemplate = extern "system" fn( - device: Device, - descriptor_update_template: DescriptorUpdateTemplate, - p_allocator: *const AllocationCallbacks, -) -> c_void; -#[allow(non_camel_case_types)] -pub type PFN_vkUpdateDescriptorSetWithTemplate = extern "system" fn( - device: Device, - descriptor_set: DescriptorSet, - descriptor_update_template: DescriptorUpdateTemplate, - p_data: *const c_void, -) -> c_void; -#[allow(non_camel_case_types)] -pub type PFN_vkGetDescriptorSetLayoutSupport = extern "system" fn( - device: Device, - p_create_info: *const DescriptorSetLayoutCreateInfo, - p_support: *mut DescriptorSetLayoutSupport, -) -> c_void; pub struct DeviceFnV1_1 { pub bind_buffer_memory2: extern "system" fn( device: Device, @@ -7284,7 +7116,7 @@ impl DeviceFnV1_1 { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn bind_buffer_memory2( &self, device: Device, @@ -7293,7 +7125,7 @@ impl DeviceFnV1_1 { ) -> Result { (self.bind_buffer_memory2)(device, bind_info_count, p_bind_infos) } - #[doc = ""] + #[doc = ""] pub unsafe fn bind_image_memory2( &self, device: Device, @@ -7302,7 +7134,7 @@ impl DeviceFnV1_1 { ) -> Result { (self.bind_image_memory2)(device, bind_info_count, p_bind_infos) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_device_group_peer_memory_features( &self, device: Device, @@ -7319,7 +7151,7 @@ impl DeviceFnV1_1 { p_peer_memory_features, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_set_device_mask( &self, command_buffer: CommandBuffer, @@ -7327,7 +7159,7 @@ impl DeviceFnV1_1 { ) -> c_void { (self.cmd_set_device_mask)(command_buffer, device_mask) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_dispatch_base( &self, command_buffer: CommandBuffer, @@ -7348,7 +7180,7 @@ impl DeviceFnV1_1 { group_count_z, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_image_memory_requirements2( &self, device: Device, @@ -7357,7 +7189,7 @@ impl DeviceFnV1_1 { ) -> c_void { (self.get_image_memory_requirements2)(device, p_info, p_memory_requirements) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_buffer_memory_requirements2( &self, device: Device, @@ -7366,7 +7198,7 @@ impl DeviceFnV1_1 { ) -> c_void { (self.get_buffer_memory_requirements2)(device, p_info, p_memory_requirements) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_image_sparse_memory_requirements2( &self, device: Device, @@ -7381,7 +7213,7 @@ impl DeviceFnV1_1 { p_sparse_memory_requirements, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn trim_command_pool( &self, device: Device, @@ -7390,7 +7222,7 @@ impl DeviceFnV1_1 { ) -> c_void { (self.trim_command_pool)(device, command_pool, flags) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_device_queue2( &self, device: Device, @@ -7399,7 +7231,7 @@ impl DeviceFnV1_1 { ) -> c_void { (self.get_device_queue2)(device, p_queue_info, p_queue) } - #[doc = ""] + #[doc = ""] pub unsafe fn create_sampler_ycbcr_conversion( &self, device: Device, @@ -7414,7 +7246,7 @@ impl DeviceFnV1_1 { p_ycbcr_conversion, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_sampler_ycbcr_conversion( &self, device: Device, @@ -7423,7 +7255,7 @@ impl DeviceFnV1_1 { ) -> c_void { (self.destroy_sampler_ycbcr_conversion)(device, ycbcr_conversion, p_allocator) } - #[doc = ""] + #[doc = ""] pub unsafe fn create_descriptor_update_template( &self, device: Device, @@ -7438,7 +7270,7 @@ impl DeviceFnV1_1 { p_descriptor_update_template, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_descriptor_update_template( &self, device: Device, @@ -7447,7 +7279,7 @@ impl DeviceFnV1_1 { ) -> c_void { (self.destroy_descriptor_update_template)(device, descriptor_update_template, p_allocator) } - #[doc = ""] + #[doc = ""] pub unsafe fn update_descriptor_set_with_template( &self, device: Device, @@ -7462,7 +7294,7 @@ impl DeviceFnV1_1 { p_data, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_descriptor_set_layout_support( &self, device: Device, @@ -7472,229 +7304,734 @@ impl DeviceFnV1_1 { (self.get_descriptor_set_layout_support)(device, p_create_info, p_support) } } -#[doc = ""] +pub struct EntryFnV1_2 {} +unsafe impl Send for EntryFnV1_2 {} +unsafe impl Sync for EntryFnV1_2 {} +impl ::std::clone::Clone for EntryFnV1_2 { + fn clone(&self) -> Self { + EntryFnV1_2 {} + } +} +impl EntryFnV1_2 { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + EntryFnV1_2 {} + } +} +pub struct InstanceFnV1_2 {} +unsafe impl Send for InstanceFnV1_2 {} +unsafe impl Sync for InstanceFnV1_2 {} +impl ::std::clone::Clone for InstanceFnV1_2 { + fn clone(&self) -> Self { + InstanceFnV1_2 {} + } +} +impl InstanceFnV1_2 { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + InstanceFnV1_2 {} + } +} +pub struct DeviceFnV1_2 { + pub cmd_draw_indirect_count: extern "system" fn( + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + count_buffer: Buffer, + count_buffer_offset: DeviceSize, + max_draw_count: u32, + stride: u32, + ) -> c_void, + pub cmd_draw_indexed_indirect_count: extern "system" fn( + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + count_buffer: Buffer, + count_buffer_offset: DeviceSize, + max_draw_count: u32, + stride: u32, + ) -> c_void, + pub create_render_pass2: extern "system" fn( + device: Device, + p_create_info: *const RenderPassCreateInfo2, + p_allocator: *const AllocationCallbacks, + p_render_pass: *mut RenderPass, + ) -> Result, + pub cmd_begin_render_pass2: extern "system" fn( + command_buffer: CommandBuffer, + p_render_pass_begin: *const RenderPassBeginInfo, + p_subpass_begin_info: *const SubpassBeginInfo, + ) -> c_void, + pub cmd_next_subpass2: extern "system" fn( + command_buffer: CommandBuffer, + p_subpass_begin_info: *const SubpassBeginInfo, + p_subpass_end_info: *const SubpassEndInfo, + ) -> c_void, + pub cmd_end_render_pass2: extern "system" fn( + command_buffer: CommandBuffer, + p_subpass_end_info: *const SubpassEndInfo, + ) -> c_void, + pub reset_query_pool: extern "system" fn( + device: Device, + query_pool: QueryPool, + first_query: u32, + query_count: u32, + ) -> c_void, + pub get_semaphore_counter_value: + extern "system" fn(device: Device, semaphore: Semaphore, p_value: *mut u64) -> Result, + pub wait_semaphores: extern "system" fn( + device: Device, + p_wait_info: *const SemaphoreWaitInfo, + timeout: u64, + ) -> Result, + pub signal_semaphore: + extern "system" fn(device: Device, p_signal_info: *const SemaphoreSignalInfo) -> Result, + pub get_buffer_device_address: + extern "system" fn(device: Device, p_info: *const BufferDeviceAddressInfo) -> DeviceAddress, + pub get_buffer_opaque_capture_address: + extern "system" fn(device: Device, p_info: *const BufferDeviceAddressInfo) -> u64, + pub get_device_memory_opaque_capture_address: extern "system" fn( + device: Device, + p_info: *const DeviceMemoryOpaqueCaptureAddressInfo, + ) -> u64, +} +unsafe impl Send for DeviceFnV1_2 {} +unsafe impl Sync for DeviceFnV1_2 {} +impl ::std::clone::Clone for DeviceFnV1_2 { + fn clone(&self) -> Self { + DeviceFnV1_2 { + cmd_draw_indirect_count: self.cmd_draw_indirect_count, + cmd_draw_indexed_indirect_count: self.cmd_draw_indexed_indirect_count, + create_render_pass2: self.create_render_pass2, + cmd_begin_render_pass2: self.cmd_begin_render_pass2, + cmd_next_subpass2: self.cmd_next_subpass2, + cmd_end_render_pass2: self.cmd_end_render_pass2, + reset_query_pool: self.reset_query_pool, + get_semaphore_counter_value: self.get_semaphore_counter_value, + wait_semaphores: self.wait_semaphores, + signal_semaphore: self.signal_semaphore, + get_buffer_device_address: self.get_buffer_device_address, + get_buffer_opaque_capture_address: self.get_buffer_opaque_capture_address, + get_device_memory_opaque_capture_address: self.get_device_memory_opaque_capture_address, + } + } +} +impl DeviceFnV1_2 { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + DeviceFnV1_2 { + cmd_draw_indirect_count: unsafe { + extern "system" fn cmd_draw_indirect_count( + _command_buffer: CommandBuffer, + _buffer: Buffer, + _offset: DeviceSize, + _count_buffer: Buffer, + _count_buffer_offset: DeviceSize, + _max_draw_count: u32, + _stride: u32, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_draw_indirect_count) + )) + } + let raw_name = stringify!(vkCmdDrawIndirectCount); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_draw_indirect_count + } else { + ::std::mem::transmute(val) + } + }, + cmd_draw_indexed_indirect_count: unsafe { + extern "system" fn cmd_draw_indexed_indirect_count( + _command_buffer: CommandBuffer, + _buffer: Buffer, + _offset: DeviceSize, + _count_buffer: Buffer, + _count_buffer_offset: DeviceSize, + _max_draw_count: u32, + _stride: u32, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_draw_indexed_indirect_count) + )) + } + let raw_name = stringify!(vkCmdDrawIndexedIndirectCount); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_draw_indexed_indirect_count + } else { + ::std::mem::transmute(val) + } + }, + create_render_pass2: unsafe { + extern "system" fn create_render_pass2( + _device: Device, + _p_create_info: *const RenderPassCreateInfo2, + _p_allocator: *const AllocationCallbacks, + _p_render_pass: *mut RenderPass, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(create_render_pass2))) + } + let raw_name = stringify!(vkCreateRenderPass2); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_render_pass2 + } else { + ::std::mem::transmute(val) + } + }, + cmd_begin_render_pass2: unsafe { + extern "system" fn cmd_begin_render_pass2( + _command_buffer: CommandBuffer, + _p_render_pass_begin: *const RenderPassBeginInfo, + _p_subpass_begin_info: *const SubpassBeginInfo, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_begin_render_pass2) + )) + } + let raw_name = stringify!(vkCmdBeginRenderPass2); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_begin_render_pass2 + } else { + ::std::mem::transmute(val) + } + }, + cmd_next_subpass2: unsafe { + extern "system" fn cmd_next_subpass2( + _command_buffer: CommandBuffer, + _p_subpass_begin_info: *const SubpassBeginInfo, + _p_subpass_end_info: *const SubpassEndInfo, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_next_subpass2))) + } + let raw_name = stringify!(vkCmdNextSubpass2); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_next_subpass2 + } else { + ::std::mem::transmute(val) + } + }, + cmd_end_render_pass2: unsafe { + extern "system" fn cmd_end_render_pass2( + _command_buffer: CommandBuffer, + _p_subpass_end_info: *const SubpassEndInfo, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_end_render_pass2))) + } + let raw_name = stringify!(vkCmdEndRenderPass2); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_end_render_pass2 + } else { + ::std::mem::transmute(val) + } + }, + reset_query_pool: unsafe { + extern "system" fn reset_query_pool( + _device: Device, + _query_pool: QueryPool, + _first_query: u32, + _query_count: u32, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(reset_query_pool))) + } + let raw_name = stringify!(vkResetQueryPool); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + reset_query_pool + } else { + ::std::mem::transmute(val) + } + }, + get_semaphore_counter_value: unsafe { + extern "system" fn get_semaphore_counter_value( + _device: Device, + _semaphore: Semaphore, + _p_value: *mut u64, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_semaphore_counter_value) + )) + } + let raw_name = stringify!(vkGetSemaphoreCounterValue); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_semaphore_counter_value + } else { + ::std::mem::transmute(val) + } + }, + wait_semaphores: unsafe { + extern "system" fn wait_semaphores( + _device: Device, + _p_wait_info: *const SemaphoreWaitInfo, + _timeout: u64, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(wait_semaphores))) + } + let raw_name = stringify!(vkWaitSemaphores); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + wait_semaphores + } else { + ::std::mem::transmute(val) + } + }, + signal_semaphore: unsafe { + extern "system" fn signal_semaphore( + _device: Device, + _p_signal_info: *const SemaphoreSignalInfo, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(signal_semaphore))) + } + let raw_name = stringify!(vkSignalSemaphore); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + signal_semaphore + } else { + ::std::mem::transmute(val) + } + }, + get_buffer_device_address: unsafe { + extern "system" fn get_buffer_device_address( + _device: Device, + _p_info: *const BufferDeviceAddressInfo, + ) -> DeviceAddress { + panic!(concat!( + "Unable to load ", + stringify!(get_buffer_device_address) + )) + } + let raw_name = stringify!(vkGetBufferDeviceAddress); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_buffer_device_address + } else { + ::std::mem::transmute(val) + } + }, + get_buffer_opaque_capture_address: unsafe { + extern "system" fn get_buffer_opaque_capture_address( + _device: Device, + _p_info: *const BufferDeviceAddressInfo, + ) -> u64 { + panic!(concat!( + "Unable to load ", + stringify!(get_buffer_opaque_capture_address) + )) + } + let raw_name = stringify!(vkGetBufferOpaqueCaptureAddress); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_buffer_opaque_capture_address + } else { + ::std::mem::transmute(val) + } + }, + get_device_memory_opaque_capture_address: unsafe { + extern "system" fn get_device_memory_opaque_capture_address( + _device: Device, + _p_info: *const DeviceMemoryOpaqueCaptureAddressInfo, + ) -> u64 { + panic!(concat!( + "Unable to load ", + stringify!(get_device_memory_opaque_capture_address) + )) + } + let raw_name = stringify!(vkGetDeviceMemoryOpaqueCaptureAddress); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_device_memory_opaque_capture_address + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn cmd_draw_indirect_count( + &self, + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + count_buffer: Buffer, + count_buffer_offset: DeviceSize, + max_draw_count: u32, + stride: u32, + ) -> c_void { + (self.cmd_draw_indirect_count)( + command_buffer, + buffer, + offset, + count_buffer, + count_buffer_offset, + max_draw_count, + stride, + ) + } + #[doc = ""] + pub unsafe fn cmd_draw_indexed_indirect_count( + &self, + command_buffer: CommandBuffer, + buffer: Buffer, + offset: DeviceSize, + count_buffer: Buffer, + count_buffer_offset: DeviceSize, + max_draw_count: u32, + stride: u32, + ) -> c_void { + (self.cmd_draw_indexed_indirect_count)( + command_buffer, + buffer, + offset, + count_buffer, + count_buffer_offset, + max_draw_count, + stride, + ) + } + #[doc = ""] + pub unsafe fn create_render_pass2( + &self, + device: Device, + p_create_info: *const RenderPassCreateInfo2, + p_allocator: *const AllocationCallbacks, + p_render_pass: *mut RenderPass, + ) -> Result { + (self.create_render_pass2)(device, p_create_info, p_allocator, p_render_pass) + } + #[doc = ""] + pub unsafe fn cmd_begin_render_pass2( + &self, + command_buffer: CommandBuffer, + p_render_pass_begin: *const RenderPassBeginInfo, + p_subpass_begin_info: *const SubpassBeginInfo, + ) -> c_void { + (self.cmd_begin_render_pass2)(command_buffer, p_render_pass_begin, p_subpass_begin_info) + } + #[doc = ""] + pub unsafe fn cmd_next_subpass2( + &self, + command_buffer: CommandBuffer, + p_subpass_begin_info: *const SubpassBeginInfo, + p_subpass_end_info: *const SubpassEndInfo, + ) -> c_void { + (self.cmd_next_subpass2)(command_buffer, p_subpass_begin_info, p_subpass_end_info) + } + #[doc = ""] + pub unsafe fn cmd_end_render_pass2( + &self, + command_buffer: CommandBuffer, + p_subpass_end_info: *const SubpassEndInfo, + ) -> c_void { + (self.cmd_end_render_pass2)(command_buffer, p_subpass_end_info) + } + #[doc = ""] + pub unsafe fn reset_query_pool( + &self, + device: Device, + query_pool: QueryPool, + first_query: u32, + query_count: u32, + ) -> c_void { + (self.reset_query_pool)(device, query_pool, first_query, query_count) + } + #[doc = ""] + pub unsafe fn get_semaphore_counter_value( + &self, + device: Device, + semaphore: Semaphore, + p_value: *mut u64, + ) -> Result { + (self.get_semaphore_counter_value)(device, semaphore, p_value) + } + #[doc = ""] + pub unsafe fn wait_semaphores( + &self, + device: Device, + p_wait_info: *const SemaphoreWaitInfo, + timeout: u64, + ) -> Result { + (self.wait_semaphores)(device, p_wait_info, timeout) + } + #[doc = ""] + pub unsafe fn signal_semaphore( + &self, + device: Device, + p_signal_info: *const SemaphoreSignalInfo, + ) -> Result { + (self.signal_semaphore)(device, p_signal_info) + } + #[doc = ""] + pub unsafe fn get_buffer_device_address( + &self, + device: Device, + p_info: *const BufferDeviceAddressInfo, + ) -> DeviceAddress { + (self.get_buffer_device_address)(device, p_info) + } + #[doc = ""] + pub unsafe fn get_buffer_opaque_capture_address( + &self, + device: Device, + p_info: *const BufferDeviceAddressInfo, + ) -> u64 { + (self.get_buffer_opaque_capture_address)(device, p_info) + } + #[doc = ""] + pub unsafe fn get_device_memory_opaque_capture_address( + &self, + device: Device, + p_info: *const DeviceMemoryOpaqueCaptureAddressInfo, + ) -> u64 { + (self.get_device_memory_opaque_capture_address)(device, p_info) + } +} +#[doc = ""] pub type SampleMask = u32; -#[doc = ""] +#[doc = ""] pub type Bool32 = u32; -#[doc = ""] +#[doc = ""] pub type Flags = u32; -#[doc = ""] +#[doc = ""] pub type DeviceSize = u64; -#[doc = ""] +#[doc = ""] pub type DeviceAddress = u64; #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] -pub struct FramebufferCreateFlags(Flags); -vk_bitflags_wrapped!(FramebufferCreateFlags, 0b0, Flags); -#[repr(transparent)] -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct QueryPoolCreateFlags(Flags); vk_bitflags_wrapped!(QueryPoolCreateFlags, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct PipelineLayoutCreateFlags(Flags); vk_bitflags_wrapped!(PipelineLayoutCreateFlags, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] -pub struct PipelineCacheCreateFlags(Flags); -vk_bitflags_wrapped!(PipelineCacheCreateFlags, 0b0, Flags); -#[repr(transparent)] -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct PipelineDepthStencilStateCreateFlags(Flags); vk_bitflags_wrapped!(PipelineDepthStencilStateCreateFlags, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct PipelineDynamicStateCreateFlags(Flags); vk_bitflags_wrapped!(PipelineDynamicStateCreateFlags, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct PipelineColorBlendStateCreateFlags(Flags); vk_bitflags_wrapped!(PipelineColorBlendStateCreateFlags, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct PipelineMultisampleStateCreateFlags(Flags); vk_bitflags_wrapped!(PipelineMultisampleStateCreateFlags, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct PipelineRasterizationStateCreateFlags(Flags); vk_bitflags_wrapped!(PipelineRasterizationStateCreateFlags, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct PipelineViewportStateCreateFlags(Flags); vk_bitflags_wrapped!(PipelineViewportStateCreateFlags, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct PipelineTessellationStateCreateFlags(Flags); vk_bitflags_wrapped!(PipelineTessellationStateCreateFlags, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct PipelineInputAssemblyStateCreateFlags(Flags); vk_bitflags_wrapped!(PipelineInputAssemblyStateCreateFlags, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct PipelineVertexInputStateCreateFlags(Flags); vk_bitflags_wrapped!(PipelineVertexInputStateCreateFlags, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] -pub struct PipelineShaderStageCreateFlags(Flags); -vk_bitflags_wrapped!(PipelineShaderStageCreateFlags, 0b0, Flags); -#[repr(transparent)] -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct BufferViewCreateFlags(Flags); vk_bitflags_wrapped!(BufferViewCreateFlags, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct InstanceCreateFlags(Flags); vk_bitflags_wrapped!(InstanceCreateFlags, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct DeviceCreateFlags(Flags); vk_bitflags_wrapped!(DeviceCreateFlags, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] -pub struct SemaphoreCreateFlags(Flags); -vk_bitflags_wrapped!(SemaphoreCreateFlags, 0b0, Flags); -#[repr(transparent)] -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] -pub struct ShaderModuleCreateFlags(Flags); -vk_bitflags_wrapped!(ShaderModuleCreateFlags, 0b0, Flags); -#[repr(transparent)] -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct EventCreateFlags(Flags); vk_bitflags_wrapped!(EventCreateFlags, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct MemoryMapFlags(Flags); vk_bitflags_wrapped!(MemoryMapFlags, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct DescriptorPoolResetFlags(Flags); vk_bitflags_wrapped!(DescriptorPoolResetFlags, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct DescriptorUpdateTemplateCreateFlags(Flags); vk_bitflags_wrapped!(DescriptorUpdateTemplateCreateFlags, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct DisplayModeCreateFlagsKHR(Flags); vk_bitflags_wrapped!(DisplayModeCreateFlagsKHR, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct DisplaySurfaceCreateFlagsKHR(Flags); vk_bitflags_wrapped!(DisplaySurfaceCreateFlagsKHR, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct AndroidSurfaceCreateFlagsKHR(Flags); vk_bitflags_wrapped!(AndroidSurfaceCreateFlagsKHR, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct ViSurfaceCreateFlagsNN(Flags); vk_bitflags_wrapped!(ViSurfaceCreateFlagsNN, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct WaylandSurfaceCreateFlagsKHR(Flags); vk_bitflags_wrapped!(WaylandSurfaceCreateFlagsKHR, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct Win32SurfaceCreateFlagsKHR(Flags); vk_bitflags_wrapped!(Win32SurfaceCreateFlagsKHR, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct XlibSurfaceCreateFlagsKHR(Flags); vk_bitflags_wrapped!(XlibSurfaceCreateFlagsKHR, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct XcbSurfaceCreateFlagsKHR(Flags); vk_bitflags_wrapped!(XcbSurfaceCreateFlagsKHR, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct IOSSurfaceCreateFlagsMVK(Flags); vk_bitflags_wrapped!(IOSSurfaceCreateFlagsMVK, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct MacOSSurfaceCreateFlagsMVK(Flags); vk_bitflags_wrapped!(MacOSSurfaceCreateFlagsMVK, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] +pub struct MetalSurfaceCreateFlagsEXT(Flags); +vk_bitflags_wrapped!(MetalSurfaceCreateFlagsEXT, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] pub struct ImagePipeSurfaceCreateFlagsFUCHSIA(Flags); vk_bitflags_wrapped!(ImagePipeSurfaceCreateFlagsFUCHSIA, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] +pub struct StreamDescriptorSurfaceCreateFlagsGGP(Flags); +vk_bitflags_wrapped!(StreamDescriptorSurfaceCreateFlagsGGP, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct HeadlessSurfaceCreateFlagsEXT(Flags); +vk_bitflags_wrapped!(HeadlessSurfaceCreateFlagsEXT, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] pub struct CommandPoolTrimFlags(Flags); vk_bitflags_wrapped!(CommandPoolTrimFlags, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct PipelineViewportSwizzleStateCreateFlagsNV(Flags); vk_bitflags_wrapped!(PipelineViewportSwizzleStateCreateFlagsNV, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct PipelineDiscardRectangleStateCreateFlagsEXT(Flags); vk_bitflags_wrapped!(PipelineDiscardRectangleStateCreateFlagsEXT, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct PipelineCoverageToColorStateCreateFlagsNV(Flags); vk_bitflags_wrapped!(PipelineCoverageToColorStateCreateFlagsNV, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct PipelineCoverageModulationStateCreateFlagsNV(Flags); vk_bitflags_wrapped!(PipelineCoverageModulationStateCreateFlagsNV, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] +pub struct PipelineCoverageReductionStateCreateFlagsNV(Flags); +vk_bitflags_wrapped!(PipelineCoverageReductionStateCreateFlagsNV, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] pub struct ValidationCacheCreateFlagsEXT(Flags); vk_bitflags_wrapped!(ValidationCacheCreateFlagsEXT, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct DebugUtilsMessengerCreateFlagsEXT(Flags); vk_bitflags_wrapped!(DebugUtilsMessengerCreateFlagsEXT, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct DebugUtilsMessengerCallbackDataFlagsEXT(Flags); vk_bitflags_wrapped!(DebugUtilsMessengerCallbackDataFlagsEXT, 0b0, Flags); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct PipelineRasterizationConservativeStateCreateFlagsEXT(Flags); vk_bitflags_wrapped!( PipelineRasterizationConservativeStateCreateFlagsEXT, @@ -7703,119 +8040,129 @@ vk_bitflags_wrapped!( ); #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct PipelineRasterizationStateStreamCreateFlagsEXT(Flags); vk_bitflags_wrapped!(PipelineRasterizationStateStreamCreateFlagsEXT, 0b0, Flags); +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct PipelineRasterizationDepthClipStateCreateFlagsEXT(Flags); +vk_bitflags_wrapped!( + PipelineRasterizationDepthClipStateCreateFlagsEXT, + 0b0, + Flags +); define_handle!( Instance, INSTANCE, - doc = "" + doc = "" ); -define_handle ! ( PhysicalDevice , PHYSICAL_DEVICE , doc = "" ) ; +define_handle ! ( PhysicalDevice , PHYSICAL_DEVICE , doc = "" ) ; define_handle!( Device, DEVICE, - doc = "" + doc = "" ); define_handle!( Queue, QUEUE, - doc = "" + doc = "" ); -define_handle ! ( CommandBuffer , COMMAND_BUFFER , doc = "" ) ; -handle_nondispatchable ! ( DeviceMemory , DEVICE_MEMORY , doc = "" ) ; -handle_nondispatchable ! ( CommandPool , COMMAND_POOL , doc = "" ) ; +define_handle ! ( CommandBuffer , COMMAND_BUFFER , doc = "" ) ; +handle_nondispatchable ! ( DeviceMemory , DEVICE_MEMORY , doc = "" ) ; +handle_nondispatchable ! ( CommandPool , COMMAND_POOL , doc = "" ) ; handle_nondispatchable!( Buffer, BUFFER, - doc = "" + doc = "" ); handle_nondispatchable!( BufferView, BUFFER_VIEW, doc = - "" + "" ); handle_nondispatchable!( Image, IMAGE, - doc = "" + doc = "" ); handle_nondispatchable!( ImageView, IMAGE_VIEW, doc = - "" + "" ); -handle_nondispatchable ! ( ShaderModule , SHADER_MODULE , doc = "" ) ; +handle_nondispatchable ! ( ShaderModule , SHADER_MODULE , doc = "" ) ; handle_nondispatchable!( Pipeline, PIPELINE, - doc = "" + doc = "" ); -handle_nondispatchable ! ( PipelineLayout , PIPELINE_LAYOUT , doc = "" ) ; +handle_nondispatchable ! ( PipelineLayout , PIPELINE_LAYOUT , doc = "" ) ; handle_nondispatchable!( Sampler, SAMPLER, - doc = "" + doc = "" ); -handle_nondispatchable ! ( DescriptorSet , DESCRIPTOR_SET , doc = "" ) ; -handle_nondispatchable ! ( DescriptorSetLayout , DESCRIPTOR_SET_LAYOUT , doc = "" ) ; -handle_nondispatchable ! ( DescriptorPool , DESCRIPTOR_POOL , doc = "" ) ; +handle_nondispatchable ! ( DescriptorSet , DESCRIPTOR_SET , doc = "" ) ; +handle_nondispatchable ! ( DescriptorSetLayout , DESCRIPTOR_SET_LAYOUT , doc = "" ) ; +handle_nondispatchable ! ( DescriptorPool , DESCRIPTOR_POOL , doc = "" ) ; handle_nondispatchable!( Fence, FENCE, - doc = "" + doc = "" ); handle_nondispatchable!( Semaphore, SEMAPHORE, doc = - "" + "" ); handle_nondispatchable!( Event, EVENT, - doc = "" + doc = "" ); handle_nondispatchable!( QueryPool, QUERY_POOL, doc = - "" + "" ); -handle_nondispatchable ! ( Framebuffer , FRAMEBUFFER , doc = "" ) ; +handle_nondispatchable ! ( Framebuffer , FRAMEBUFFER , doc = "" ) ; handle_nondispatchable!( RenderPass, RENDER_PASS, doc = - "" + "" ); -handle_nondispatchable ! ( PipelineCache , PIPELINE_CACHE , doc = "" ) ; -handle_nondispatchable ! ( ObjectTableNVX , OBJECT_TABLE_NVX , doc = "" ) ; -handle_nondispatchable ! ( IndirectCommandsLayoutNVX , INDIRECT_COMMANDS_LAYOUT_NVX , doc = "" ) ; -handle_nondispatchable ! ( DescriptorUpdateTemplate , DESCRIPTOR_UPDATE_TEMPLATE , doc = "" ) ; -handle_nondispatchable ! ( SamplerYcbcrConversion , SAMPLER_YCBCR_CONVERSION , doc = "" ) ; -handle_nondispatchable ! ( ValidationCacheEXT , VALIDATION_CACHE_EXT , doc = "" ) ; -handle_nondispatchable ! ( AccelerationStructureNV , ACCELERATION_STRUCTURE_NV , doc = "" ) ; +handle_nondispatchable ! ( PipelineCache , PIPELINE_CACHE , doc = "" ) ; +handle_nondispatchable ! ( IndirectCommandsLayoutNV , INDIRECT_COMMANDS_LAYOUT_NV , doc = "" ) ; +handle_nondispatchable ! ( DescriptorUpdateTemplate , DESCRIPTOR_UPDATE_TEMPLATE , doc = "" ) ; +handle_nondispatchable ! ( SamplerYcbcrConversion , SAMPLER_YCBCR_CONVERSION , doc = "" ) ; +handle_nondispatchable ! ( ValidationCacheEXT , VALIDATION_CACHE_EXT , doc = "" ) ; +handle_nondispatchable ! ( AccelerationStructureKHR , ACCELERATION_STRUCTURE_KHR , doc = "" ) ; +handle_nondispatchable ! ( PerformanceConfigurationINTEL , PERFORMANCE_CONFIGURATION_INTEL , doc = "" ) ; +handle_nondispatchable ! ( DeferredOperationKHR , DEFERRED_OPERATION_KHR , doc = "" ) ; handle_nondispatchable!( DisplayKHR, DISPLAY_KHR, doc = - "" + "" ); -handle_nondispatchable ! ( DisplayModeKHR , DISPLAY_MODE_KHR , doc = "" ) ; +handle_nondispatchable ! ( DisplayModeKHR , DISPLAY_MODE_KHR , doc = "" ) ; handle_nondispatchable!( SurfaceKHR, SURFACE_KHR, doc = - "" + "" ); -handle_nondispatchable ! ( SwapchainKHR , SWAPCHAIN_KHR , doc = "" ) ; -handle_nondispatchable ! ( DebugReportCallbackEXT , DEBUG_REPORT_CALLBACK_EXT , doc = "" ) ; -handle_nondispatchable ! ( DebugUtilsMessengerEXT , DEBUG_UTILS_MESSENGER_EXT , doc = "" ) ; +handle_nondispatchable ! ( SwapchainKHR , SWAPCHAIN_KHR , doc = "" ) ; +handle_nondispatchable ! ( DebugReportCallbackEXT , DEBUG_REPORT_CALLBACK_EXT , doc = "" ) ; +handle_nondispatchable ! ( DebugUtilsMessengerEXT , DEBUG_UTILS_MESSENGER_EXT , doc = "" ) ; #[allow(non_camel_case_types)] -#[doc = ""] +#[doc = ""] pub type PFN_vkInternalAllocationNotification = Option< unsafe extern "system" fn( p_user_data: *mut c_void, @@ -7825,7 +8172,7 @@ pub type PFN_vkInternalAllocationNotification = Option< ) -> c_void, >; #[allow(non_camel_case_types)] -#[doc = ""] +#[doc = ""] pub type PFN_vkInternalFreeNotification = Option< unsafe extern "system" fn( p_user_data: *mut c_void, @@ -7835,7 +8182,7 @@ pub type PFN_vkInternalFreeNotification = Option< ) -> c_void, >; #[allow(non_camel_case_types)] -#[doc = ""] +#[doc = ""] pub type PFN_vkReallocationFunction = Option< unsafe extern "system" fn( p_user_data: *mut c_void, @@ -7846,7 +8193,7 @@ pub type PFN_vkReallocationFunction = Option< ) -> *mut c_void, >; #[allow(non_camel_case_types)] -#[doc = ""] +#[doc = ""] pub type PFN_vkAllocationFunction = Option< unsafe extern "system" fn( p_user_data: *mut c_void, @@ -7856,14 +8203,14 @@ pub type PFN_vkAllocationFunction = Option< ) -> *mut c_void, >; #[allow(non_camel_case_types)] -#[doc = ""] +#[doc = ""] pub type PFN_vkFreeFunction = Option c_void>; #[allow(non_camel_case_types)] -#[doc = ""] +#[doc = ""] pub type PFN_vkVoidFunction = Option c_void>; #[allow(non_camel_case_types)] -#[doc = ""] +#[doc = ""] pub type PFN_vkDebugReportCallbackEXT = Option< unsafe extern "system" fn( flags: DebugReportFlagsEXT, @@ -7877,7 +8224,7 @@ pub type PFN_vkDebugReportCallbackEXT = Option< ) -> Bool32, >; #[allow(non_camel_case_types)] -#[doc = ""] +#[doc = ""] pub type PFN_vkDebugUtilsMessengerCallbackEXT = Option< unsafe extern "system" fn( message_severity: DebugUtilsMessageSeverityFlagsEXT, @@ -7888,7 +8235,7 @@ pub type PFN_vkDebugUtilsMessengerCallbackEXT = Option< >; #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct BaseOutStructure { pub s_type: StructureType, pub p_next: *mut BaseOutStructure, @@ -7903,7 +8250,7 @@ impl ::std::default::Default for BaseOutStructure { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct BaseInStructure { pub s_type: StructureType, pub p_next: *const BaseInStructure, @@ -7917,8 +8264,8 @@ impl ::std::default::Default for BaseInStructure { } } #[repr(C)] -#[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[derive(Copy, Clone, Default, Debug, PartialEq, Eq, Hash)] +#[doc = ""] pub struct Offset2D { pub x: i32, pub y: i32, @@ -7964,8 +8311,8 @@ impl<'a> Offset2DBuilder<'a> { } } #[repr(C)] -#[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[derive(Copy, Clone, Default, Debug, PartialEq, Eq, Hash)] +#[doc = ""] pub struct Offset3D { pub x: i32, pub y: i32, @@ -8016,8 +8363,8 @@ impl<'a> Offset3DBuilder<'a> { } } #[repr(C)] -#[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[derive(Copy, Clone, Default, Debug, PartialEq, Eq, Hash)] +#[doc = ""] pub struct Extent2D { pub width: u32, pub height: u32, @@ -8064,7 +8411,7 @@ impl<'a> Extent2DBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug, PartialEq, Eq, Hash)] -#[doc = ""] +#[doc = ""] pub struct Extent3D { pub width: u32, pub height: u32, @@ -8116,7 +8463,7 @@ impl<'a> Extent3DBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct Viewport { pub x: f32, pub y: f32, @@ -8182,8 +8529,8 @@ impl<'a> ViewportBuilder<'a> { } } #[repr(C)] -#[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[derive(Copy, Clone, Default, Debug, PartialEq, Eq, Hash)] +#[doc = ""] pub struct Rect2D { pub offset: Offset2D, pub extent: Extent2D, @@ -8229,8 +8576,8 @@ impl<'a> Rect2DBuilder<'a> { } } #[repr(C)] -#[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[derive(Copy, Clone, Default, Debug, PartialEq, Eq, Hash)] +#[doc = ""] pub struct ClearRect { pub rect: Rect2D, pub base_array_layer: u32, @@ -8282,7 +8629,7 @@ impl<'a> ClearRectBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct ComponentMapping { pub r: ComponentSwizzle, pub g: ComponentSwizzle, @@ -8339,7 +8686,7 @@ impl<'a> ComponentMappingBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceProperties { pub api_version: u32, pub driver_version: u32, @@ -8465,7 +8812,7 @@ impl<'a> PhysicalDevicePropertiesBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone)] -#[doc = ""] +#[doc = ""] pub struct ExtensionProperties { pub extension_name: [c_char; MAX_EXTENSION_NAME_SIZE], pub spec_version: u32, @@ -8533,7 +8880,7 @@ impl<'a> ExtensionPropertiesBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone)] -#[doc = ""] +#[doc = ""] pub struct LayerProperties { pub layer_name: [c_char; MAX_EXTENSION_NAME_SIZE], pub spec_version: u32, @@ -8623,7 +8970,7 @@ impl<'a> LayerPropertiesBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ApplicationInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -8721,7 +9068,7 @@ impl<'a> ApplicationInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone)] -#[doc = ""] +#[doc = ""] pub struct AllocationCallbacks { pub p_user_data: *mut c_void, pub pfn_allocation: PFN_vkAllocationFunction, @@ -8836,7 +9183,7 @@ impl<'a> AllocationCallbacksBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DeviceQueueCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -8928,7 +9275,7 @@ impl<'a> DeviceQueueCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DeviceCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -9044,7 +9391,7 @@ impl<'a> DeviceCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct InstanceCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -9148,7 +9495,7 @@ impl<'a> InstanceCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct QueueFamilyProperties { pub queue_flags: QueueFlags, pub queue_count: u32, @@ -9211,7 +9558,7 @@ impl<'a> QueueFamilyPropertiesBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceMemoryProperties { pub memory_type_count: u32, pub memory_types: [MemoryType; MAX_MEMORY_TYPES], @@ -9290,7 +9637,7 @@ impl<'a> PhysicalDeviceMemoryPropertiesBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct MemoryAllocateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -9367,7 +9714,7 @@ impl<'a> MemoryAllocateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct MemoryRequirements { pub size: DeviceSize, pub alignment: DeviceSize, @@ -9419,7 +9766,7 @@ impl<'a> MemoryRequirementsBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct SparseImageFormatProperties { pub aspect_mask: ImageAspectFlags, pub image_granularity: Extent3D, @@ -9480,7 +9827,7 @@ impl<'a> SparseImageFormatPropertiesBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct SparseImageMemoryRequirements { pub format_properties: SparseImageFormatProperties, pub image_mip_tail_first_lod: u32, @@ -9557,7 +9904,7 @@ impl<'a> SparseImageMemoryRequirementsBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct MemoryType { pub property_flags: MemoryPropertyFlags, pub heap_index: u32, @@ -9604,7 +9951,7 @@ impl<'a> MemoryTypeBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct MemoryHeap { pub size: DeviceSize, pub flags: MemoryHeapFlags, @@ -9651,7 +9998,7 @@ impl<'a> MemoryHeapBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct MappedMemoryRange { pub s_type: StructureType, pub p_next: *const c_void, @@ -9734,7 +10081,7 @@ impl<'a> MappedMemoryRangeBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct FormatProperties { pub linear_tiling_features: FormatFeatureFlags, pub optimal_tiling_features: FormatFeatureFlags, @@ -9795,7 +10142,7 @@ impl<'a> FormatPropertiesBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct ImageFormatProperties { pub max_extent: Extent3D, pub max_mip_levels: u32, @@ -9863,7 +10210,7 @@ impl<'a> ImageFormatPropertiesBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct DescriptorBufferInfo { pub buffer: Buffer, pub offset: DeviceSize, @@ -9915,7 +10262,7 @@ impl<'a> DescriptorBufferInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct DescriptorImageInfo { pub sampler: Sampler, pub image_view: ImageView, @@ -9967,7 +10314,7 @@ impl<'a> DescriptorImageInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct WriteDescriptorSet { pub s_type: StructureType, pub p_next: *const c_void, @@ -10091,7 +10438,7 @@ impl<'a> WriteDescriptorSetBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct CopyDescriptorSet { pub s_type: StructureType, pub p_next: *const c_void, @@ -10198,7 +10545,7 @@ impl<'a> CopyDescriptorSetBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct BufferCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -10299,7 +10646,7 @@ impl<'a> BufferCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct BufferViewCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -10394,7 +10741,7 @@ impl<'a> BufferViewCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct ImageSubresource { pub aspect_mask: ImageAspectFlags, pub mip_level: u32, @@ -10446,7 +10793,7 @@ impl<'a> ImageSubresourceBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct ImageSubresourceLayers { pub aspect_mask: ImageAspectFlags, pub mip_level: u32, @@ -10506,7 +10853,7 @@ impl<'a> ImageSubresourceLayersBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct ImageSubresourceRange { pub aspect_mask: ImageAspectFlags, pub base_mip_level: u32, @@ -10571,7 +10918,7 @@ impl<'a> ImageSubresourceRangeBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct MemoryBarrier { pub s_type: StructureType, pub p_next: *const c_void, @@ -10648,7 +10995,7 @@ impl<'a> MemoryBarrierBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct BufferMemoryBarrier { pub s_type: StructureType, pub p_next: *const c_void, @@ -10767,7 +11114,7 @@ impl<'a> BufferMemoryBarrierBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ImageMemoryBarrier { pub s_type: StructureType, pub p_next: *const c_void, @@ -10895,7 +11242,7 @@ impl<'a> ImageMemoryBarrierBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ImageCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -11038,7 +11385,7 @@ impl<'a> ImageCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct SubresourceLayout { pub offset: DeviceSize, pub size: DeviceSize, @@ -11100,7 +11447,7 @@ impl<'a> SubresourceLayoutBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ImageViewCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -11204,7 +11551,7 @@ impl<'a> ImageViewCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct BufferCopy { pub src_offset: DeviceSize, pub dst_offset: DeviceSize, @@ -11256,7 +11603,7 @@ impl<'a> BufferCopyBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct SparseMemoryBind { pub resource_offset: DeviceSize, pub size: DeviceSize, @@ -11318,7 +11665,7 @@ impl<'a> SparseMemoryBindBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct SparseImageMemoryBind { pub subresource: ImageSubresource, pub offset: Offset3D, @@ -11388,7 +11735,7 @@ impl<'a> SparseImageMemoryBindBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct SparseBufferMemoryBindInfo { pub buffer: Buffer, pub bind_count: u32, @@ -11446,7 +11793,7 @@ impl<'a> SparseBufferMemoryBindInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct SparseImageOpaqueMemoryBindInfo { pub image: Image, pub bind_count: u32, @@ -11507,7 +11854,7 @@ impl<'a> SparseImageOpaqueMemoryBindInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct SparseImageMemoryBindInfo { pub image: Image, pub bind_count: u32, @@ -11568,7 +11915,7 @@ impl<'a> SparseImageMemoryBindInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct BindSparseInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -11693,7 +12040,7 @@ impl<'a> BindSparseInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct ImageCopy { pub src_subresource: ImageSubresourceLayers, pub src_offset: Offset3D, @@ -11761,7 +12108,7 @@ impl<'a> ImageCopyBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ImageBlit { pub src_subresource: ImageSubresourceLayers, pub src_offsets: [Offset3D; 2], @@ -11834,7 +12181,7 @@ impl<'a> ImageBlitBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct BufferImageCopy { pub buffer_offset: DeviceSize, pub buffer_row_length: u32, @@ -11904,7 +12251,7 @@ impl<'a> BufferImageCopyBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct ImageResolve { pub src_subresource: ImageSubresourceLayers, pub src_offset: Offset3D, @@ -11972,7 +12319,7 @@ impl<'a> ImageResolveBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ShaderModuleCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -12052,7 +12399,7 @@ impl<'a> ShaderModuleCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DescriptorSetLayoutBinding { pub binding: u32, pub descriptor_type: DescriptorType, @@ -12138,7 +12485,7 @@ impl<'a> DescriptorSetLayoutBindingBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DescriptorSetLayoutCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -12224,7 +12571,7 @@ impl<'a> DescriptorSetLayoutCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct DescriptorPoolSize { pub ty: DescriptorType, pub descriptor_count: u32, @@ -12271,7 +12618,7 @@ impl<'a> DescriptorPoolSizeBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DescriptorPoolCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -12363,7 +12710,7 @@ impl<'a> DescriptorPoolCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DescriptorSetAllocateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -12449,7 +12796,7 @@ impl<'a> DescriptorSetAllocateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct SpecializationMapEntry { pub constant_id: u32, pub offset: u32, @@ -12501,7 +12848,7 @@ impl<'a> SpecializationMapEntryBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct SpecializationInfo { pub map_entry_count: u32, pub p_map_entries: *const SpecializationMapEntry, @@ -12565,7 +12912,7 @@ impl<'a> SpecializationInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PipelineShaderStageCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -12666,7 +13013,7 @@ impl<'a> PipelineShaderStageCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ComputePipelineCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -12770,7 +13117,7 @@ impl<'a> ComputePipelineCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct VertexInputBindingDescription { pub binding: u32, pub stride: u32, @@ -12825,7 +13172,7 @@ impl<'a> VertexInputBindingDescriptionBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct VertexInputAttributeDescription { pub location: u32, pub binding: u32, @@ -12882,7 +13229,7 @@ impl<'a> VertexInputAttributeDescriptionBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PipelineVertexInputStateCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -12980,7 +13327,7 @@ impl<'a> PipelineVertexInputStateCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PipelineInputAssemblyStateCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -13072,7 +13419,7 @@ impl<'a> PipelineInputAssemblyStateCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PipelineTessellationStateCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -13155,7 +13502,7 @@ impl<'a> PipelineTessellationStateCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PipelineViewportStateCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -13267,7 +13614,7 @@ impl<'a> PipelineViewportStateCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PipelineRasterizationStateCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -13431,7 +13778,7 @@ impl<'a> PipelineRasterizationStateCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PipelineMultisampleStateCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -13559,7 +13906,7 @@ impl<'a> PipelineMultisampleStateCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct PipelineColorBlendAttachmentState { pub blend_enable: Bool32, pub src_color_blend_factor: BlendFactor, @@ -13660,7 +14007,7 @@ impl<'a> PipelineColorBlendAttachmentStateBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PipelineColorBlendStateCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -13770,7 +14117,7 @@ impl<'a> PipelineColorBlendStateCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PipelineDynamicStateCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -13856,7 +14203,7 @@ impl<'a> PipelineDynamicStateCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct StencilOpState { pub fail_op: StencilOp, pub pass_op: StencilOp, @@ -13928,7 +14275,7 @@ impl<'a> StencilOpStateBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PipelineDepthStencilStateCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -14080,7 +14427,7 @@ impl<'a> PipelineDepthStencilStateCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct GraphicsPipelineCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -14280,7 +14627,7 @@ impl<'a> GraphicsPipelineCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PipelineCacheCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -14360,7 +14707,7 @@ impl<'a> PipelineCacheCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct PushConstantRange { pub stage_flags: ShaderStageFlags, pub offset: u32, @@ -14412,7 +14759,7 @@ impl<'a> PushConstantRangeBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PipelineLayoutCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -14510,7 +14857,7 @@ impl<'a> PipelineLayoutCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct SamplerCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -14683,7 +15030,7 @@ impl<'a> SamplerCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct CommandPoolCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -14763,7 +15110,7 @@ impl<'a> CommandPoolCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct CommandBufferAllocateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -14852,7 +15199,7 @@ impl<'a> CommandBufferAllocateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct CommandBufferInheritanceInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -14968,7 +15315,7 @@ impl<'a> CommandBufferInheritanceInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct CommandBufferBeginInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -15048,7 +15395,7 @@ impl<'a> CommandBufferBeginInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone)] -#[doc = ""] +#[doc = ""] pub struct RenderPassBeginInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -15156,7 +15503,7 @@ impl<'a> RenderPassBeginInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone)] -#[doc = ""] +#[doc = ""] pub union ClearColorValue { pub float32: [f32; 4], pub int32: [i32; 4], @@ -15169,7 +15516,7 @@ impl ::std::default::Default for ClearColorValue { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct ClearDepthStencilValue { pub depth: f32, pub stencil: u32, @@ -15216,7 +15563,7 @@ impl<'a> ClearDepthStencilValueBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone)] -#[doc = ""] +#[doc = ""] pub union ClearValue { pub color: ClearColorValue, pub depth_stencil: ClearDepthStencilValue, @@ -15228,7 +15575,7 @@ impl ::std::default::Default for ClearValue { } #[repr(C)] #[derive(Copy, Clone, Default)] -#[doc = ""] +#[doc = ""] pub struct ClearAttachment { pub aspect_mask: ImageAspectFlags, pub color_attachment: u32, @@ -15289,7 +15636,7 @@ impl<'a> ClearAttachmentBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct AttachmentDescription { pub flags: AttachmentDescriptionFlags, pub format: Format, @@ -15380,7 +15727,7 @@ impl<'a> AttachmentDescriptionBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct AttachmentReference { pub attachment: u32, pub layout: ImageLayout, @@ -15427,7 +15774,7 @@ impl<'a> AttachmentReferenceBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct SubpassDescription { pub flags: SubpassDescriptionFlags, pub pipeline_bind_point: PipelineBindPoint, @@ -15540,7 +15887,7 @@ impl<'a> SubpassDescriptionBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct SubpassDependency { pub src_subpass: u32, pub dst_subpass: u32, @@ -15621,7 +15968,7 @@ impl<'a> SubpassDependencyBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct RenderPassCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -15728,7 +16075,7 @@ impl<'a> RenderPassCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct EventCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -15799,7 +16146,7 @@ impl<'a> EventCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct FenceCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -15870,7 +16217,7 @@ impl<'a> FenceCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceFeatures { pub robust_buffer_access: Bool32, pub full_draw_index_uint32: Bool32, @@ -16310,7 +16657,7 @@ impl<'a> PhysicalDeviceFeaturesBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceSparseProperties { pub residency_standard2_d_block_shape: Bool32, pub residency_standard2_d_multisample_block_shape: Bool32, @@ -16388,7 +16735,7 @@ impl<'a> PhysicalDeviceSparsePropertiesBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceLimits { pub max_image_dimension1_d: u32, pub max_image_dimension2_d: u32, @@ -17388,7 +17735,7 @@ impl<'a> PhysicalDeviceLimitsBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct SemaphoreCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -17459,7 +17806,7 @@ impl<'a> SemaphoreCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct QueryPoolCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -17551,7 +17898,7 @@ impl<'a> QueryPoolCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct FramebufferCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -17655,7 +18002,7 @@ impl<'a> FramebufferCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct DrawIndirectCommand { pub vertex_count: u32, pub instance_count: u32, @@ -17712,7 +18059,7 @@ impl<'a> DrawIndirectCommandBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct DrawIndexedIndirectCommand { pub index_count: u32, pub instance_count: u32, @@ -17774,7 +18121,7 @@ impl<'a> DrawIndexedIndirectCommandBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct DispatchIndirectCommand { pub x: u32, pub y: u32, @@ -17826,7 +18173,7 @@ impl<'a> DispatchIndirectCommandBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct SubmitInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -17931,7 +18278,7 @@ impl<'a> SubmitInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DisplayPropertiesKHR { pub display: DisplayKHR, pub display_name: *const c_char, @@ -18031,7 +18378,7 @@ impl<'a> DisplayPropertiesKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct DisplayPlanePropertiesKHR { pub current_display: DisplayKHR, pub current_stack_index: u32, @@ -18084,7 +18431,7 @@ impl<'a> DisplayPlanePropertiesKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct DisplayModeParametersKHR { pub visible_region: Extent2D, pub refresh_rate: u32, @@ -18134,7 +18481,7 @@ impl<'a> DisplayModeParametersKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct DisplayModePropertiesKHR { pub display_mode: DisplayModeKHR, pub parameters: DisplayModeParametersKHR, @@ -18187,7 +18534,7 @@ impl<'a> DisplayModePropertiesKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DisplayModeCreateInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -18270,7 +18617,7 @@ impl<'a> DisplayModeCreateInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct DisplayPlaneCapabilitiesKHR { pub supported_alpha: DisplayPlaneAlphaFlagsKHR, pub min_src_position: Offset2D, @@ -18379,7 +18726,7 @@ impl<'a> DisplayPlaneCapabilitiesKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DisplaySurfaceCreateInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -18510,7 +18857,7 @@ impl<'a> DisplaySurfaceCreateInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DisplayPresentInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -18577,7 +18924,7 @@ impl<'a> DisplayPresentInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct SurfaceCapabilitiesKHR { pub min_image_count: u32, pub max_image_count: u32, @@ -18685,7 +19032,7 @@ impl<'a> SurfaceCapabilitiesKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct AndroidSurfaceCreateInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -18765,7 +19112,7 @@ impl<'a> AndroidSurfaceCreateInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ViSurfaceCreateInfoNN { pub s_type: StructureType, pub p_next: *const c_void, @@ -18842,7 +19189,7 @@ impl<'a> ViSurfaceCreateInfoNNBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct WaylandSurfaceCreateInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -18928,7 +19275,7 @@ impl<'a> WaylandSurfaceCreateInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct Win32SurfaceCreateInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -19014,7 +19361,7 @@ impl<'a> Win32SurfaceCreateInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct XlibSurfaceCreateInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -19100,7 +19447,7 @@ impl<'a> XlibSurfaceCreateInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct XcbSurfaceCreateInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -19186,7 +19533,7 @@ impl<'a> XcbSurfaceCreateInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ImagePipeSurfaceCreateInfoFUCHSIA { pub s_type: StructureType, pub p_next: *const c_void, @@ -19268,8 +19615,91 @@ impl<'a> ImagePipeSurfaceCreateInfoFUCHSIABuilder<'a> { } } #[repr(C)] -#[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct StreamDescriptorSurfaceCreateInfoGGP { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: StreamDescriptorSurfaceCreateFlagsGGP, + pub stream_descriptor: GgpStreamDescriptor, +} +impl ::std::default::Default for StreamDescriptorSurfaceCreateInfoGGP { + fn default() -> StreamDescriptorSurfaceCreateInfoGGP { + StreamDescriptorSurfaceCreateInfoGGP { + s_type: StructureType::STREAM_DESCRIPTOR_SURFACE_CREATE_INFO_GGP, + p_next: ::std::ptr::null(), + flags: StreamDescriptorSurfaceCreateFlagsGGP::default(), + stream_descriptor: GgpStreamDescriptor::default(), + } + } +} +impl StreamDescriptorSurfaceCreateInfoGGP { + pub fn builder<'a>() -> StreamDescriptorSurfaceCreateInfoGGPBuilder<'a> { + StreamDescriptorSurfaceCreateInfoGGPBuilder { + inner: StreamDescriptorSurfaceCreateInfoGGP::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct StreamDescriptorSurfaceCreateInfoGGPBuilder<'a> { + inner: StreamDescriptorSurfaceCreateInfoGGP, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsStreamDescriptorSurfaceCreateInfoGGP {} +impl<'a> ::std::ops::Deref for StreamDescriptorSurfaceCreateInfoGGPBuilder<'a> { + type Target = StreamDescriptorSurfaceCreateInfoGGP; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for StreamDescriptorSurfaceCreateInfoGGPBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> StreamDescriptorSurfaceCreateInfoGGPBuilder<'a> { + pub fn flags( + mut self, + flags: StreamDescriptorSurfaceCreateFlagsGGP, + ) -> StreamDescriptorSurfaceCreateInfoGGPBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn stream_descriptor( + mut self, + stream_descriptor: GgpStreamDescriptor, + ) -> StreamDescriptorSurfaceCreateInfoGGPBuilder<'a> { + self.inner.stream_descriptor = stream_descriptor; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> StreamDescriptorSurfaceCreateInfoGGPBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> StreamDescriptorSurfaceCreateInfoGGP { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug, PartialEq, Eq, Hash)] +#[doc = ""] pub struct SurfaceFormatKHR { pub format: Format, pub color_space: ColorSpaceKHR, @@ -19316,7 +19746,7 @@ impl<'a> SurfaceFormatKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct SwapchainCreateInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -19501,7 +19931,7 @@ impl<'a> SwapchainCreateInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PresentInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -19601,7 +20031,7 @@ impl<'a> PresentInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone)] -#[doc = ""] +#[doc = ""] pub struct DebugReportCallbackCreateInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -19688,7 +20118,7 @@ impl<'a> DebugReportCallbackCreateInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ValidationFlagsEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -19749,7 +20179,7 @@ impl<'a> ValidationFlagsEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ValidationFeaturesEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -19822,7 +20252,7 @@ impl<'a> ValidationFeaturesEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PipelineRasterizationStateRasterizationOrderAMD { pub s_type: StructureType, pub p_next: *const c_void, @@ -19886,7 +20316,7 @@ impl<'a> PipelineRasterizationStateRasterizationOrderAMDBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DebugMarkerObjectNameInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -19975,7 +20405,7 @@ impl<'a> DebugMarkerObjectNameInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DebugMarkerObjectTagInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -20070,7 +20500,7 @@ impl<'a> DebugMarkerObjectTagInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DebugMarkerMarkerInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -20150,7 +20580,7 @@ impl<'a> DebugMarkerMarkerInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DedicatedAllocationImageCreateInfoNV { pub s_type: StructureType, pub p_next: *const c_void, @@ -20208,7 +20638,7 @@ impl<'a> DedicatedAllocationImageCreateInfoNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DedicatedAllocationBufferCreateInfoNV { pub s_type: StructureType, pub p_next: *const c_void, @@ -20266,7 +20696,7 @@ impl<'a> DedicatedAllocationBufferCreateInfoNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DedicatedAllocationMemoryAllocateInfoNV { pub s_type: StructureType, pub p_next: *const c_void, @@ -20327,7 +20757,7 @@ impl<'a> DedicatedAllocationMemoryAllocateInfoNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct ExternalImageFormatPropertiesNV { pub image_format_properties: ImageFormatProperties, pub external_memory_features: ExternalMemoryFeatureFlagsNV, @@ -20396,7 +20826,7 @@ impl<'a> ExternalImageFormatPropertiesNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ExternalMemoryImageCreateInfoNV { pub s_type: StructureType, pub p_next: *const c_void, @@ -20454,7 +20884,7 @@ impl<'a> ExternalMemoryImageCreateInfoNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ExportMemoryAllocateInfoNV { pub s_type: StructureType, pub p_next: *const c_void, @@ -20512,7 +20942,7 @@ impl<'a> ExportMemoryAllocateInfoNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ImportMemoryWin32HandleInfoNV { pub s_type: StructureType, pub p_next: *const c_void, @@ -20576,7 +21006,7 @@ impl<'a> ImportMemoryWin32HandleInfoNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ExportMemoryWin32HandleInfoNV { pub s_type: StructureType, pub p_next: *const c_void, @@ -20640,7 +21070,7 @@ impl<'a> ExportMemoryWin32HandleInfoNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct Win32KeyedMutexAcquireReleaseInfoNV { pub s_type: StructureType, pub p_next: *const c_void, @@ -20743,166 +21173,267 @@ impl<'a> Win32KeyedMutexAcquireReleaseInfoNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct DeviceGeneratedCommandsFeaturesNVX { +#[doc = ""] +pub struct PhysicalDeviceDeviceGeneratedCommandsFeaturesNV { pub s_type: StructureType, - pub p_next: *const c_void, - pub compute_binding_point_support: Bool32, + pub p_next: *mut c_void, + pub device_generated_commands: Bool32, } -impl ::std::default::Default for DeviceGeneratedCommandsFeaturesNVX { - fn default() -> DeviceGeneratedCommandsFeaturesNVX { - DeviceGeneratedCommandsFeaturesNVX { - s_type: StructureType::DEVICE_GENERATED_COMMANDS_FEATURES_NVX, - p_next: ::std::ptr::null(), - compute_binding_point_support: Bool32::default(), +impl ::std::default::Default for PhysicalDeviceDeviceGeneratedCommandsFeaturesNV { + fn default() -> PhysicalDeviceDeviceGeneratedCommandsFeaturesNV { + PhysicalDeviceDeviceGeneratedCommandsFeaturesNV { + s_type: StructureType::PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_NV, + p_next: ::std::ptr::null_mut(), + device_generated_commands: Bool32::default(), } } } -impl DeviceGeneratedCommandsFeaturesNVX { - pub fn builder<'a>() -> DeviceGeneratedCommandsFeaturesNVXBuilder<'a> { - DeviceGeneratedCommandsFeaturesNVXBuilder { - inner: DeviceGeneratedCommandsFeaturesNVX::default(), +impl PhysicalDeviceDeviceGeneratedCommandsFeaturesNV { + pub fn builder<'a>() -> PhysicalDeviceDeviceGeneratedCommandsFeaturesNVBuilder<'a> { + PhysicalDeviceDeviceGeneratedCommandsFeaturesNVBuilder { + inner: PhysicalDeviceDeviceGeneratedCommandsFeaturesNV::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct DeviceGeneratedCommandsFeaturesNVXBuilder<'a> { - inner: DeviceGeneratedCommandsFeaturesNVX, +pub struct PhysicalDeviceDeviceGeneratedCommandsFeaturesNVBuilder<'a> { + inner: PhysicalDeviceDeviceGeneratedCommandsFeaturesNV, marker: ::std::marker::PhantomData<&'a ()>, } -pub unsafe trait ExtendsDeviceGeneratedCommandsFeaturesNVX {} -impl<'a> ::std::ops::Deref for DeviceGeneratedCommandsFeaturesNVXBuilder<'a> { - type Target = DeviceGeneratedCommandsFeaturesNVX; +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceDeviceGeneratedCommandsFeaturesNVBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceDeviceGeneratedCommandsFeaturesNV {} +impl<'a> ::std::ops::Deref for PhysicalDeviceDeviceGeneratedCommandsFeaturesNVBuilder<'a> { + type Target = PhysicalDeviceDeviceGeneratedCommandsFeaturesNV; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for DeviceGeneratedCommandsFeaturesNVXBuilder<'a> { +impl<'a> ::std::ops::DerefMut for PhysicalDeviceDeviceGeneratedCommandsFeaturesNVBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> DeviceGeneratedCommandsFeaturesNVXBuilder<'a> { - pub fn compute_binding_point_support( +impl<'a> PhysicalDeviceDeviceGeneratedCommandsFeaturesNVBuilder<'a> { + pub fn device_generated_commands( mut self, - compute_binding_point_support: bool, - ) -> DeviceGeneratedCommandsFeaturesNVXBuilder<'a> { - self.inner.compute_binding_point_support = compute_binding_point_support.into(); - self - } - #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] - #[doc = r" method only exists on structs that can be passed to a function directly. Only"] - #[doc = r" valid extension structs can be pushed into the chain."] - #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] - #[doc = r" chain will look like `A -> D -> B -> C`."] - pub fn push_next( - mut self, - next: &'a mut T, - ) -> DeviceGeneratedCommandsFeaturesNVXBuilder<'a> { - unsafe { - let next_ptr = next as *mut T as *mut BaseOutStructure; - let last_next = ptr_chain_iter(next).last().unwrap(); - (*last_next).p_next = self.inner.p_next as _; - self.inner.p_next = next_ptr as _; - } + device_generated_commands: bool, + ) -> PhysicalDeviceDeviceGeneratedCommandsFeaturesNVBuilder<'a> { + self.inner.device_generated_commands = device_generated_commands.into(); self } #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> DeviceGeneratedCommandsFeaturesNVX { + pub fn build(self) -> PhysicalDeviceDeviceGeneratedCommandsFeaturesNV { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct DeviceGeneratedCommandsLimitsNVX { +#[doc = ""] +pub struct PhysicalDeviceDeviceGeneratedCommandsPropertiesNV { pub s_type: StructureType, pub p_next: *const c_void, - pub max_indirect_commands_layout_token_count: u32, - pub max_object_entry_counts: u32, - pub min_sequence_count_buffer_offset_alignment: u32, - pub min_sequence_index_buffer_offset_alignment: u32, - pub min_commands_token_buffer_offset_alignment: u32, + pub max_graphics_shader_group_count: u32, + pub max_indirect_sequence_count: u32, + pub max_indirect_commands_token_count: u32, + pub max_indirect_commands_stream_count: u32, + pub max_indirect_commands_token_offset: u32, + pub max_indirect_commands_stream_stride: u32, + pub min_sequences_count_buffer_offset_alignment: u32, + pub min_sequences_index_buffer_offset_alignment: u32, + pub min_indirect_commands_buffer_offset_alignment: u32, } -impl ::std::default::Default for DeviceGeneratedCommandsLimitsNVX { - fn default() -> DeviceGeneratedCommandsLimitsNVX { - DeviceGeneratedCommandsLimitsNVX { - s_type: StructureType::DEVICE_GENERATED_COMMANDS_LIMITS_NVX, +impl ::std::default::Default for PhysicalDeviceDeviceGeneratedCommandsPropertiesNV { + fn default() -> PhysicalDeviceDeviceGeneratedCommandsPropertiesNV { + PhysicalDeviceDeviceGeneratedCommandsPropertiesNV { + s_type: StructureType::PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV, p_next: ::std::ptr::null(), - max_indirect_commands_layout_token_count: u32::default(), - max_object_entry_counts: u32::default(), - min_sequence_count_buffer_offset_alignment: u32::default(), - min_sequence_index_buffer_offset_alignment: u32::default(), - min_commands_token_buffer_offset_alignment: u32::default(), + max_graphics_shader_group_count: u32::default(), + max_indirect_sequence_count: u32::default(), + max_indirect_commands_token_count: u32::default(), + max_indirect_commands_stream_count: u32::default(), + max_indirect_commands_token_offset: u32::default(), + max_indirect_commands_stream_stride: u32::default(), + min_sequences_count_buffer_offset_alignment: u32::default(), + min_sequences_index_buffer_offset_alignment: u32::default(), + min_indirect_commands_buffer_offset_alignment: u32::default(), } } } -impl DeviceGeneratedCommandsLimitsNVX { - pub fn builder<'a>() -> DeviceGeneratedCommandsLimitsNVXBuilder<'a> { - DeviceGeneratedCommandsLimitsNVXBuilder { - inner: DeviceGeneratedCommandsLimitsNVX::default(), +impl PhysicalDeviceDeviceGeneratedCommandsPropertiesNV { + pub fn builder<'a>() -> PhysicalDeviceDeviceGeneratedCommandsPropertiesNVBuilder<'a> { + PhysicalDeviceDeviceGeneratedCommandsPropertiesNVBuilder { + inner: PhysicalDeviceDeviceGeneratedCommandsPropertiesNV::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct DeviceGeneratedCommandsLimitsNVXBuilder<'a> { - inner: DeviceGeneratedCommandsLimitsNVX, +pub struct PhysicalDeviceDeviceGeneratedCommandsPropertiesNVBuilder<'a> { + inner: PhysicalDeviceDeviceGeneratedCommandsPropertiesNV, marker: ::std::marker::PhantomData<&'a ()>, } -pub unsafe trait ExtendsDeviceGeneratedCommandsLimitsNVX {} -impl<'a> ::std::ops::Deref for DeviceGeneratedCommandsLimitsNVXBuilder<'a> { - type Target = DeviceGeneratedCommandsLimitsNVX; +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDeviceDeviceGeneratedCommandsPropertiesNVBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceDeviceGeneratedCommandsPropertiesNV {} +impl<'a> ::std::ops::Deref for PhysicalDeviceDeviceGeneratedCommandsPropertiesNVBuilder<'a> { + type Target = PhysicalDeviceDeviceGeneratedCommandsPropertiesNV; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for DeviceGeneratedCommandsLimitsNVXBuilder<'a> { +impl<'a> ::std::ops::DerefMut for PhysicalDeviceDeviceGeneratedCommandsPropertiesNVBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> DeviceGeneratedCommandsLimitsNVXBuilder<'a> { - pub fn max_indirect_commands_layout_token_count( +impl<'a> PhysicalDeviceDeviceGeneratedCommandsPropertiesNVBuilder<'a> { + pub fn max_graphics_shader_group_count( mut self, - max_indirect_commands_layout_token_count: u32, - ) -> DeviceGeneratedCommandsLimitsNVXBuilder<'a> { - self.inner.max_indirect_commands_layout_token_count = - max_indirect_commands_layout_token_count; + max_graphics_shader_group_count: u32, + ) -> PhysicalDeviceDeviceGeneratedCommandsPropertiesNVBuilder<'a> { + self.inner.max_graphics_shader_group_count = max_graphics_shader_group_count; self } - pub fn max_object_entry_counts( + pub fn max_indirect_sequence_count( mut self, - max_object_entry_counts: u32, - ) -> DeviceGeneratedCommandsLimitsNVXBuilder<'a> { - self.inner.max_object_entry_counts = max_object_entry_counts; + max_indirect_sequence_count: u32, + ) -> PhysicalDeviceDeviceGeneratedCommandsPropertiesNVBuilder<'a> { + self.inner.max_indirect_sequence_count = max_indirect_sequence_count; self } - pub fn min_sequence_count_buffer_offset_alignment( + pub fn max_indirect_commands_token_count( mut self, - min_sequence_count_buffer_offset_alignment: u32, - ) -> DeviceGeneratedCommandsLimitsNVXBuilder<'a> { - self.inner.min_sequence_count_buffer_offset_alignment = - min_sequence_count_buffer_offset_alignment; + max_indirect_commands_token_count: u32, + ) -> PhysicalDeviceDeviceGeneratedCommandsPropertiesNVBuilder<'a> { + self.inner.max_indirect_commands_token_count = max_indirect_commands_token_count; self } - pub fn min_sequence_index_buffer_offset_alignment( + pub fn max_indirect_commands_stream_count( mut self, - min_sequence_index_buffer_offset_alignment: u32, - ) -> DeviceGeneratedCommandsLimitsNVXBuilder<'a> { - self.inner.min_sequence_index_buffer_offset_alignment = - min_sequence_index_buffer_offset_alignment; + max_indirect_commands_stream_count: u32, + ) -> PhysicalDeviceDeviceGeneratedCommandsPropertiesNVBuilder<'a> { + self.inner.max_indirect_commands_stream_count = max_indirect_commands_stream_count; self } - pub fn min_commands_token_buffer_offset_alignment( + pub fn max_indirect_commands_token_offset( mut self, - min_commands_token_buffer_offset_alignment: u32, - ) -> DeviceGeneratedCommandsLimitsNVXBuilder<'a> { - self.inner.min_commands_token_buffer_offset_alignment = - min_commands_token_buffer_offset_alignment; + max_indirect_commands_token_offset: u32, + ) -> PhysicalDeviceDeviceGeneratedCommandsPropertiesNVBuilder<'a> { + self.inner.max_indirect_commands_token_offset = max_indirect_commands_token_offset; + self + } + pub fn max_indirect_commands_stream_stride( + mut self, + max_indirect_commands_stream_stride: u32, + ) -> PhysicalDeviceDeviceGeneratedCommandsPropertiesNVBuilder<'a> { + self.inner.max_indirect_commands_stream_stride = max_indirect_commands_stream_stride; + self + } + pub fn min_sequences_count_buffer_offset_alignment( + mut self, + min_sequences_count_buffer_offset_alignment: u32, + ) -> PhysicalDeviceDeviceGeneratedCommandsPropertiesNVBuilder<'a> { + self.inner.min_sequences_count_buffer_offset_alignment = + min_sequences_count_buffer_offset_alignment; + self + } + pub fn min_sequences_index_buffer_offset_alignment( + mut self, + min_sequences_index_buffer_offset_alignment: u32, + ) -> PhysicalDeviceDeviceGeneratedCommandsPropertiesNVBuilder<'a> { + self.inner.min_sequences_index_buffer_offset_alignment = + min_sequences_index_buffer_offset_alignment; + self + } + pub fn min_indirect_commands_buffer_offset_alignment( + mut self, + min_indirect_commands_buffer_offset_alignment: u32, + ) -> PhysicalDeviceDeviceGeneratedCommandsPropertiesNVBuilder<'a> { + self.inner.min_indirect_commands_buffer_offset_alignment = + min_indirect_commands_buffer_offset_alignment; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceDeviceGeneratedCommandsPropertiesNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct GraphicsShaderGroupCreateInfoNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub stage_count: u32, + pub p_stages: *const PipelineShaderStageCreateInfo, + pub p_vertex_input_state: *const PipelineVertexInputStateCreateInfo, + pub p_tessellation_state: *const PipelineTessellationStateCreateInfo, +} +impl ::std::default::Default for GraphicsShaderGroupCreateInfoNV { + fn default() -> GraphicsShaderGroupCreateInfoNV { + GraphicsShaderGroupCreateInfoNV { + s_type: StructureType::GRAPHICS_SHADER_GROUP_CREATE_INFO_NV, + p_next: ::std::ptr::null(), + stage_count: u32::default(), + p_stages: ::std::ptr::null(), + p_vertex_input_state: ::std::ptr::null(), + p_tessellation_state: ::std::ptr::null(), + } + } +} +impl GraphicsShaderGroupCreateInfoNV { + pub fn builder<'a>() -> GraphicsShaderGroupCreateInfoNVBuilder<'a> { + GraphicsShaderGroupCreateInfoNVBuilder { + inner: GraphicsShaderGroupCreateInfoNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct GraphicsShaderGroupCreateInfoNVBuilder<'a> { + inner: GraphicsShaderGroupCreateInfoNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsGraphicsShaderGroupCreateInfoNV {} +impl<'a> ::std::ops::Deref for GraphicsShaderGroupCreateInfoNVBuilder<'a> { + type Target = GraphicsShaderGroupCreateInfoNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for GraphicsShaderGroupCreateInfoNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> GraphicsShaderGroupCreateInfoNVBuilder<'a> { + pub fn stages( + mut self, + stages: &'a [PipelineShaderStageCreateInfo], + ) -> GraphicsShaderGroupCreateInfoNVBuilder<'a> { + self.inner.stage_count = stages.len() as _; + self.inner.p_stages = stages.as_ptr(); + self + } + pub fn vertex_input_state( + mut self, + vertex_input_state: &'a PipelineVertexInputStateCreateInfo, + ) -> GraphicsShaderGroupCreateInfoNVBuilder<'a> { + self.inner.p_vertex_input_state = vertex_input_state; + self + } + pub fn tessellation_state( + mut self, + tessellation_state: &'a PipelineTessellationStateCreateInfo, + ) -> GraphicsShaderGroupCreateInfoNVBuilder<'a> { + self.inner.p_tessellation_state = tessellation_state; self } #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] @@ -20910,10 +21441,10 @@ impl<'a> DeviceGeneratedCommandsLimitsNVXBuilder<'a> { #[doc = r" valid extension structs can be pushed into the chain."] #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] #[doc = r" chain will look like `A -> D -> B -> C`."] - pub fn push_next( + pub fn push_next( mut self, next: &'a mut T, - ) -> DeviceGeneratedCommandsLimitsNVXBuilder<'a> { + ) -> GraphicsShaderGroupCreateInfoNVBuilder<'a> { unsafe { let next_ptr = next as *mut T as *mut BaseOutStructure; let last_next = ptr_chain_iter(next).last().unwrap(); @@ -20925,197 +21456,475 @@ impl<'a> DeviceGeneratedCommandsLimitsNVXBuilder<'a> { #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> DeviceGeneratedCommandsLimitsNVX { + pub fn build(self) -> GraphicsShaderGroupCreateInfoNV { self.inner } } #[repr(C)] -#[derive(Copy, Clone, Default, Debug)] -#[doc = ""] -pub struct IndirectCommandsTokenNVX { - pub token_type: IndirectCommandsTokenTypeNVX, - pub buffer: Buffer, - pub offset: DeviceSize, +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct GraphicsPipelineShaderGroupsCreateInfoNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub group_count: u32, + pub p_groups: *const GraphicsShaderGroupCreateInfoNV, + pub pipeline_count: u32, + pub p_pipelines: *const Pipeline, } -impl IndirectCommandsTokenNVX { - pub fn builder<'a>() -> IndirectCommandsTokenNVXBuilder<'a> { - IndirectCommandsTokenNVXBuilder { - inner: IndirectCommandsTokenNVX::default(), +impl ::std::default::Default for GraphicsPipelineShaderGroupsCreateInfoNV { + fn default() -> GraphicsPipelineShaderGroupsCreateInfoNV { + GraphicsPipelineShaderGroupsCreateInfoNV { + s_type: StructureType::GRAPHICS_PIPELINE_SHADER_GROUPS_CREATE_INFO_NV, + p_next: ::std::ptr::null(), + group_count: u32::default(), + p_groups: ::std::ptr::null(), + pipeline_count: u32::default(), + p_pipelines: ::std::ptr::null(), + } + } +} +impl GraphicsPipelineShaderGroupsCreateInfoNV { + pub fn builder<'a>() -> GraphicsPipelineShaderGroupsCreateInfoNVBuilder<'a> { + GraphicsPipelineShaderGroupsCreateInfoNVBuilder { + inner: GraphicsPipelineShaderGroupsCreateInfoNV::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct IndirectCommandsTokenNVXBuilder<'a> { - inner: IndirectCommandsTokenNVX, +pub struct GraphicsPipelineShaderGroupsCreateInfoNVBuilder<'a> { + inner: GraphicsPipelineShaderGroupsCreateInfoNV, marker: ::std::marker::PhantomData<&'a ()>, } -impl<'a> ::std::ops::Deref for IndirectCommandsTokenNVXBuilder<'a> { - type Target = IndirectCommandsTokenNVX; +unsafe impl ExtendsGraphicsPipelineCreateInfo + for GraphicsPipelineShaderGroupsCreateInfoNVBuilder<'_> +{ +} +unsafe impl ExtendsGraphicsPipelineCreateInfo for GraphicsPipelineShaderGroupsCreateInfoNV {} +impl<'a> ::std::ops::Deref for GraphicsPipelineShaderGroupsCreateInfoNVBuilder<'a> { + type Target = GraphicsPipelineShaderGroupsCreateInfoNV; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for IndirectCommandsTokenNVXBuilder<'a> { +impl<'a> ::std::ops::DerefMut for GraphicsPipelineShaderGroupsCreateInfoNVBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> IndirectCommandsTokenNVXBuilder<'a> { - pub fn token_type( +impl<'a> GraphicsPipelineShaderGroupsCreateInfoNVBuilder<'a> { + pub fn groups( mut self, - token_type: IndirectCommandsTokenTypeNVX, - ) -> IndirectCommandsTokenNVXBuilder<'a> { - self.inner.token_type = token_type; + groups: &'a [GraphicsShaderGroupCreateInfoNV], + ) -> GraphicsPipelineShaderGroupsCreateInfoNVBuilder<'a> { + self.inner.group_count = groups.len() as _; + self.inner.p_groups = groups.as_ptr(); self } - pub fn buffer(mut self, buffer: Buffer) -> IndirectCommandsTokenNVXBuilder<'a> { + pub fn pipelines( + mut self, + pipelines: &'a [Pipeline], + ) -> GraphicsPipelineShaderGroupsCreateInfoNVBuilder<'a> { + self.inner.pipeline_count = pipelines.len() as _; + self.inner.p_pipelines = pipelines.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> GraphicsPipelineShaderGroupsCreateInfoNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct BindShaderGroupIndirectCommandNV { + pub group_index: u32, +} +impl BindShaderGroupIndirectCommandNV { + pub fn builder<'a>() -> BindShaderGroupIndirectCommandNVBuilder<'a> { + BindShaderGroupIndirectCommandNVBuilder { + inner: BindShaderGroupIndirectCommandNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct BindShaderGroupIndirectCommandNVBuilder<'a> { + inner: BindShaderGroupIndirectCommandNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for BindShaderGroupIndirectCommandNVBuilder<'a> { + type Target = BindShaderGroupIndirectCommandNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for BindShaderGroupIndirectCommandNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> BindShaderGroupIndirectCommandNVBuilder<'a> { + pub fn group_index(mut self, group_index: u32) -> BindShaderGroupIndirectCommandNVBuilder<'a> { + self.inner.group_index = group_index; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> BindShaderGroupIndirectCommandNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct BindIndexBufferIndirectCommandNV { + pub buffer_address: DeviceAddress, + pub size: u32, + pub index_type: IndexType, +} +impl BindIndexBufferIndirectCommandNV { + pub fn builder<'a>() -> BindIndexBufferIndirectCommandNVBuilder<'a> { + BindIndexBufferIndirectCommandNVBuilder { + inner: BindIndexBufferIndirectCommandNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct BindIndexBufferIndirectCommandNVBuilder<'a> { + inner: BindIndexBufferIndirectCommandNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for BindIndexBufferIndirectCommandNVBuilder<'a> { + type Target = BindIndexBufferIndirectCommandNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for BindIndexBufferIndirectCommandNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> BindIndexBufferIndirectCommandNVBuilder<'a> { + pub fn buffer_address( + mut self, + buffer_address: DeviceAddress, + ) -> BindIndexBufferIndirectCommandNVBuilder<'a> { + self.inner.buffer_address = buffer_address; + self + } + pub fn size(mut self, size: u32) -> BindIndexBufferIndirectCommandNVBuilder<'a> { + self.inner.size = size; + self + } + pub fn index_type( + mut self, + index_type: IndexType, + ) -> BindIndexBufferIndirectCommandNVBuilder<'a> { + self.inner.index_type = index_type; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> BindIndexBufferIndirectCommandNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct BindVertexBufferIndirectCommandNV { + pub buffer_address: DeviceAddress, + pub size: u32, + pub stride: u32, +} +impl BindVertexBufferIndirectCommandNV { + pub fn builder<'a>() -> BindVertexBufferIndirectCommandNVBuilder<'a> { + BindVertexBufferIndirectCommandNVBuilder { + inner: BindVertexBufferIndirectCommandNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct BindVertexBufferIndirectCommandNVBuilder<'a> { + inner: BindVertexBufferIndirectCommandNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for BindVertexBufferIndirectCommandNVBuilder<'a> { + type Target = BindVertexBufferIndirectCommandNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for BindVertexBufferIndirectCommandNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> BindVertexBufferIndirectCommandNVBuilder<'a> { + pub fn buffer_address( + mut self, + buffer_address: DeviceAddress, + ) -> BindVertexBufferIndirectCommandNVBuilder<'a> { + self.inner.buffer_address = buffer_address; + self + } + pub fn size(mut self, size: u32) -> BindVertexBufferIndirectCommandNVBuilder<'a> { + self.inner.size = size; + self + } + pub fn stride(mut self, stride: u32) -> BindVertexBufferIndirectCommandNVBuilder<'a> { + self.inner.stride = stride; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> BindVertexBufferIndirectCommandNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct SetStateFlagsIndirectCommandNV { + pub data: u32, +} +impl SetStateFlagsIndirectCommandNV { + pub fn builder<'a>() -> SetStateFlagsIndirectCommandNVBuilder<'a> { + SetStateFlagsIndirectCommandNVBuilder { + inner: SetStateFlagsIndirectCommandNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SetStateFlagsIndirectCommandNVBuilder<'a> { + inner: SetStateFlagsIndirectCommandNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for SetStateFlagsIndirectCommandNVBuilder<'a> { + type Target = SetStateFlagsIndirectCommandNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SetStateFlagsIndirectCommandNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SetStateFlagsIndirectCommandNVBuilder<'a> { + pub fn data(mut self, data: u32) -> SetStateFlagsIndirectCommandNVBuilder<'a> { + self.inner.data = data; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SetStateFlagsIndirectCommandNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct IndirectCommandsStreamNV { + pub buffer: Buffer, + pub offset: DeviceSize, +} +impl IndirectCommandsStreamNV { + pub fn builder<'a>() -> IndirectCommandsStreamNVBuilder<'a> { + IndirectCommandsStreamNVBuilder { + inner: IndirectCommandsStreamNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct IndirectCommandsStreamNVBuilder<'a> { + inner: IndirectCommandsStreamNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for IndirectCommandsStreamNVBuilder<'a> { + type Target = IndirectCommandsStreamNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for IndirectCommandsStreamNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> IndirectCommandsStreamNVBuilder<'a> { + pub fn buffer(mut self, buffer: Buffer) -> IndirectCommandsStreamNVBuilder<'a> { self.inner.buffer = buffer; self } - pub fn offset(mut self, offset: DeviceSize) -> IndirectCommandsTokenNVXBuilder<'a> { + pub fn offset(mut self, offset: DeviceSize) -> IndirectCommandsStreamNVBuilder<'a> { self.inner.offset = offset; self } #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> IndirectCommandsTokenNVX { - self.inner - } -} -#[repr(C)] -#[derive(Copy, Clone, Default, Debug)] -#[doc = ""] -pub struct IndirectCommandsLayoutTokenNVX { - pub token_type: IndirectCommandsTokenTypeNVX, - pub binding_unit: u32, - pub dynamic_count: u32, - pub divisor: u32, -} -impl IndirectCommandsLayoutTokenNVX { - pub fn builder<'a>() -> IndirectCommandsLayoutTokenNVXBuilder<'a> { - IndirectCommandsLayoutTokenNVXBuilder { - inner: IndirectCommandsLayoutTokenNVX::default(), - marker: ::std::marker::PhantomData, - } - } -} -#[repr(transparent)] -pub struct IndirectCommandsLayoutTokenNVXBuilder<'a> { - inner: IndirectCommandsLayoutTokenNVX, - marker: ::std::marker::PhantomData<&'a ()>, -} -impl<'a> ::std::ops::Deref for IndirectCommandsLayoutTokenNVXBuilder<'a> { - type Target = IndirectCommandsLayoutTokenNVX; - fn deref(&self) -> &Self::Target { - &self.inner - } -} -impl<'a> ::std::ops::DerefMut for IndirectCommandsLayoutTokenNVXBuilder<'a> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.inner - } -} -impl<'a> IndirectCommandsLayoutTokenNVXBuilder<'a> { - pub fn token_type( - mut self, - token_type: IndirectCommandsTokenTypeNVX, - ) -> IndirectCommandsLayoutTokenNVXBuilder<'a> { - self.inner.token_type = token_type; - self - } - pub fn binding_unit(mut self, binding_unit: u32) -> IndirectCommandsLayoutTokenNVXBuilder<'a> { - self.inner.binding_unit = binding_unit; - self - } - pub fn dynamic_count( - mut self, - dynamic_count: u32, - ) -> IndirectCommandsLayoutTokenNVXBuilder<'a> { - self.inner.dynamic_count = dynamic_count; - self - } - pub fn divisor(mut self, divisor: u32) -> IndirectCommandsLayoutTokenNVXBuilder<'a> { - self.inner.divisor = divisor; - self - } - #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] - #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] - #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> IndirectCommandsLayoutTokenNVX { + pub fn build(self) -> IndirectCommandsStreamNV { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct IndirectCommandsLayoutCreateInfoNVX { +#[doc = ""] +pub struct IndirectCommandsLayoutTokenNV { pub s_type: StructureType, pub p_next: *const c_void, - pub pipeline_bind_point: PipelineBindPoint, - pub flags: IndirectCommandsLayoutUsageFlagsNVX, - pub token_count: u32, - pub p_tokens: *const IndirectCommandsLayoutTokenNVX, + pub token_type: IndirectCommandsTokenTypeNV, + pub stream: u32, + pub offset: u32, + pub vertex_binding_unit: u32, + pub vertex_dynamic_stride: Bool32, + pub pushconstant_pipeline_layout: PipelineLayout, + pub pushconstant_shader_stage_flags: ShaderStageFlags, + pub pushconstant_offset: u32, + pub pushconstant_size: u32, + pub indirect_state_flags: IndirectStateFlagsNV, + pub index_type_count: u32, + pub p_index_types: *const IndexType, + pub p_index_type_values: *const u32, } -impl ::std::default::Default for IndirectCommandsLayoutCreateInfoNVX { - fn default() -> IndirectCommandsLayoutCreateInfoNVX { - IndirectCommandsLayoutCreateInfoNVX { - s_type: StructureType::INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NVX, +impl ::std::default::Default for IndirectCommandsLayoutTokenNV { + fn default() -> IndirectCommandsLayoutTokenNV { + IndirectCommandsLayoutTokenNV { + s_type: StructureType::INDIRECT_COMMANDS_LAYOUT_TOKEN_NV, p_next: ::std::ptr::null(), - pipeline_bind_point: PipelineBindPoint::default(), - flags: IndirectCommandsLayoutUsageFlagsNVX::default(), - token_count: u32::default(), - p_tokens: ::std::ptr::null(), + token_type: IndirectCommandsTokenTypeNV::default(), + stream: u32::default(), + offset: u32::default(), + vertex_binding_unit: u32::default(), + vertex_dynamic_stride: Bool32::default(), + pushconstant_pipeline_layout: PipelineLayout::default(), + pushconstant_shader_stage_flags: ShaderStageFlags::default(), + pushconstant_offset: u32::default(), + pushconstant_size: u32::default(), + indirect_state_flags: IndirectStateFlagsNV::default(), + index_type_count: u32::default(), + p_index_types: ::std::ptr::null(), + p_index_type_values: ::std::ptr::null(), } } } -impl IndirectCommandsLayoutCreateInfoNVX { - pub fn builder<'a>() -> IndirectCommandsLayoutCreateInfoNVXBuilder<'a> { - IndirectCommandsLayoutCreateInfoNVXBuilder { - inner: IndirectCommandsLayoutCreateInfoNVX::default(), +impl IndirectCommandsLayoutTokenNV { + pub fn builder<'a>() -> IndirectCommandsLayoutTokenNVBuilder<'a> { + IndirectCommandsLayoutTokenNVBuilder { + inner: IndirectCommandsLayoutTokenNV::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct IndirectCommandsLayoutCreateInfoNVXBuilder<'a> { - inner: IndirectCommandsLayoutCreateInfoNVX, +pub struct IndirectCommandsLayoutTokenNVBuilder<'a> { + inner: IndirectCommandsLayoutTokenNV, marker: ::std::marker::PhantomData<&'a ()>, } -pub unsafe trait ExtendsIndirectCommandsLayoutCreateInfoNVX {} -impl<'a> ::std::ops::Deref for IndirectCommandsLayoutCreateInfoNVXBuilder<'a> { - type Target = IndirectCommandsLayoutCreateInfoNVX; +pub unsafe trait ExtendsIndirectCommandsLayoutTokenNV {} +impl<'a> ::std::ops::Deref for IndirectCommandsLayoutTokenNVBuilder<'a> { + type Target = IndirectCommandsLayoutTokenNV; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for IndirectCommandsLayoutCreateInfoNVXBuilder<'a> { +impl<'a> ::std::ops::DerefMut for IndirectCommandsLayoutTokenNVBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> IndirectCommandsLayoutCreateInfoNVXBuilder<'a> { - pub fn pipeline_bind_point( +impl<'a> IndirectCommandsLayoutTokenNVBuilder<'a> { + pub fn token_type( mut self, - pipeline_bind_point: PipelineBindPoint, - ) -> IndirectCommandsLayoutCreateInfoNVXBuilder<'a> { - self.inner.pipeline_bind_point = pipeline_bind_point; + token_type: IndirectCommandsTokenTypeNV, + ) -> IndirectCommandsLayoutTokenNVBuilder<'a> { + self.inner.token_type = token_type; self } - pub fn flags( - mut self, - flags: IndirectCommandsLayoutUsageFlagsNVX, - ) -> IndirectCommandsLayoutCreateInfoNVXBuilder<'a> { - self.inner.flags = flags; + pub fn stream(mut self, stream: u32) -> IndirectCommandsLayoutTokenNVBuilder<'a> { + self.inner.stream = stream; self } - pub fn tokens( + pub fn offset(mut self, offset: u32) -> IndirectCommandsLayoutTokenNVBuilder<'a> { + self.inner.offset = offset; + self + } + pub fn vertex_binding_unit( mut self, - tokens: &'a [IndirectCommandsLayoutTokenNVX], - ) -> IndirectCommandsLayoutCreateInfoNVXBuilder<'a> { - self.inner.token_count = tokens.len() as _; - self.inner.p_tokens = tokens.as_ptr(); + vertex_binding_unit: u32, + ) -> IndirectCommandsLayoutTokenNVBuilder<'a> { + self.inner.vertex_binding_unit = vertex_binding_unit; + self + } + pub fn vertex_dynamic_stride( + mut self, + vertex_dynamic_stride: bool, + ) -> IndirectCommandsLayoutTokenNVBuilder<'a> { + self.inner.vertex_dynamic_stride = vertex_dynamic_stride.into(); + self + } + pub fn pushconstant_pipeline_layout( + mut self, + pushconstant_pipeline_layout: PipelineLayout, + ) -> IndirectCommandsLayoutTokenNVBuilder<'a> { + self.inner.pushconstant_pipeline_layout = pushconstant_pipeline_layout; + self + } + pub fn pushconstant_shader_stage_flags( + mut self, + pushconstant_shader_stage_flags: ShaderStageFlags, + ) -> IndirectCommandsLayoutTokenNVBuilder<'a> { + self.inner.pushconstant_shader_stage_flags = pushconstant_shader_stage_flags; + self + } + pub fn pushconstant_offset( + mut self, + pushconstant_offset: u32, + ) -> IndirectCommandsLayoutTokenNVBuilder<'a> { + self.inner.pushconstant_offset = pushconstant_offset; + self + } + pub fn pushconstant_size( + mut self, + pushconstant_size: u32, + ) -> IndirectCommandsLayoutTokenNVBuilder<'a> { + self.inner.pushconstant_size = pushconstant_size; + self + } + pub fn indirect_state_flags( + mut self, + indirect_state_flags: IndirectStateFlagsNV, + ) -> IndirectCommandsLayoutTokenNVBuilder<'a> { + self.inner.indirect_state_flags = indirect_state_flags; + self + } + pub fn index_types( + mut self, + index_types: &'a [IndexType], + ) -> IndirectCommandsLayoutTokenNVBuilder<'a> { + self.inner.index_type_count = index_types.len() as _; + self.inner.p_index_types = index_types.as_ptr(); + self + } + pub fn index_type_values( + mut self, + index_type_values: &'a [u32], + ) -> IndirectCommandsLayoutTokenNVBuilder<'a> { + self.inner.index_type_count = index_type_values.len() as _; + self.inner.p_index_type_values = index_type_values.as_ptr(); self } #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] @@ -21123,10 +21932,10 @@ impl<'a> IndirectCommandsLayoutCreateInfoNVXBuilder<'a> { #[doc = r" valid extension structs can be pushed into the chain."] #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] #[doc = r" chain will look like `A -> D -> B -> C`."] - pub fn push_next( + pub fn push_next( mut self, next: &'a mut T, - ) -> IndirectCommandsLayoutCreateInfoNVXBuilder<'a> { + ) -> IndirectCommandsLayoutTokenNVBuilder<'a> { unsafe { let next_ptr = next as *mut T as *mut BaseOutStructure; let last_next = ptr_chain_iter(next).last().unwrap(); @@ -21138,38 +21947,151 @@ impl<'a> IndirectCommandsLayoutCreateInfoNVXBuilder<'a> { #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> IndirectCommandsLayoutCreateInfoNVX { + pub fn build(self) -> IndirectCommandsLayoutTokenNV { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct CmdProcessCommandsInfoNVX { +#[doc = ""] +pub struct IndirectCommandsLayoutCreateInfoNV { pub s_type: StructureType, pub p_next: *const c_void, - pub object_table: ObjectTableNVX, - pub indirect_commands_layout: IndirectCommandsLayoutNVX, - pub indirect_commands_token_count: u32, - pub p_indirect_commands_tokens: *const IndirectCommandsTokenNVX, - pub max_sequences_count: u32, - pub target_command_buffer: CommandBuffer, + pub flags: IndirectCommandsLayoutUsageFlagsNV, + pub pipeline_bind_point: PipelineBindPoint, + pub token_count: u32, + pub p_tokens: *const IndirectCommandsLayoutTokenNV, + pub stream_count: u32, + pub p_stream_strides: *const u32, +} +impl ::std::default::Default for IndirectCommandsLayoutCreateInfoNV { + fn default() -> IndirectCommandsLayoutCreateInfoNV { + IndirectCommandsLayoutCreateInfoNV { + s_type: StructureType::INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NV, + p_next: ::std::ptr::null(), + flags: IndirectCommandsLayoutUsageFlagsNV::default(), + pipeline_bind_point: PipelineBindPoint::default(), + token_count: u32::default(), + p_tokens: ::std::ptr::null(), + stream_count: u32::default(), + p_stream_strides: ::std::ptr::null(), + } + } +} +impl IndirectCommandsLayoutCreateInfoNV { + pub fn builder<'a>() -> IndirectCommandsLayoutCreateInfoNVBuilder<'a> { + IndirectCommandsLayoutCreateInfoNVBuilder { + inner: IndirectCommandsLayoutCreateInfoNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct IndirectCommandsLayoutCreateInfoNVBuilder<'a> { + inner: IndirectCommandsLayoutCreateInfoNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsIndirectCommandsLayoutCreateInfoNV {} +impl<'a> ::std::ops::Deref for IndirectCommandsLayoutCreateInfoNVBuilder<'a> { + type Target = IndirectCommandsLayoutCreateInfoNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for IndirectCommandsLayoutCreateInfoNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> IndirectCommandsLayoutCreateInfoNVBuilder<'a> { + pub fn flags( + mut self, + flags: IndirectCommandsLayoutUsageFlagsNV, + ) -> IndirectCommandsLayoutCreateInfoNVBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn pipeline_bind_point( + mut self, + pipeline_bind_point: PipelineBindPoint, + ) -> IndirectCommandsLayoutCreateInfoNVBuilder<'a> { + self.inner.pipeline_bind_point = pipeline_bind_point; + self + } + pub fn tokens( + mut self, + tokens: &'a [IndirectCommandsLayoutTokenNV], + ) -> IndirectCommandsLayoutCreateInfoNVBuilder<'a> { + self.inner.token_count = tokens.len() as _; + self.inner.p_tokens = tokens.as_ptr(); + self + } + pub fn stream_strides( + mut self, + stream_strides: &'a [u32], + ) -> IndirectCommandsLayoutCreateInfoNVBuilder<'a> { + self.inner.stream_count = stream_strides.len() as _; + self.inner.p_stream_strides = stream_strides.as_ptr(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> IndirectCommandsLayoutCreateInfoNVBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> IndirectCommandsLayoutCreateInfoNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct GeneratedCommandsInfoNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub pipeline_bind_point: PipelineBindPoint, + pub pipeline: Pipeline, + pub indirect_commands_layout: IndirectCommandsLayoutNV, + pub stream_count: u32, + pub p_streams: *const IndirectCommandsStreamNV, + pub sequences_count: u32, + pub preprocess_buffer: Buffer, + pub preprocess_offset: DeviceSize, + pub preprocess_size: DeviceSize, pub sequences_count_buffer: Buffer, pub sequences_count_offset: DeviceSize, pub sequences_index_buffer: Buffer, pub sequences_index_offset: DeviceSize, } -impl ::std::default::Default for CmdProcessCommandsInfoNVX { - fn default() -> CmdProcessCommandsInfoNVX { - CmdProcessCommandsInfoNVX { - s_type: StructureType::CMD_PROCESS_COMMANDS_INFO_NVX, +impl ::std::default::Default for GeneratedCommandsInfoNV { + fn default() -> GeneratedCommandsInfoNV { + GeneratedCommandsInfoNV { + s_type: StructureType::GENERATED_COMMANDS_INFO_NV, p_next: ::std::ptr::null(), - object_table: ObjectTableNVX::default(), - indirect_commands_layout: IndirectCommandsLayoutNVX::default(), - indirect_commands_token_count: u32::default(), - p_indirect_commands_tokens: ::std::ptr::null(), - max_sequences_count: u32::default(), - target_command_buffer: CommandBuffer::default(), + pipeline_bind_point: PipelineBindPoint::default(), + pipeline: Pipeline::default(), + indirect_commands_layout: IndirectCommandsLayoutNV::default(), + stream_count: u32::default(), + p_streams: ::std::ptr::null(), + sequences_count: u32::default(), + preprocess_buffer: Buffer::default(), + preprocess_offset: DeviceSize::default(), + preprocess_size: DeviceSize::default(), sequences_count_buffer: Buffer::default(), sequences_count_offset: DeviceSize::default(), sequences_index_buffer: Buffer::default(), @@ -21177,93 +22099,108 @@ impl ::std::default::Default for CmdProcessCommandsInfoNVX { } } } -impl CmdProcessCommandsInfoNVX { - pub fn builder<'a>() -> CmdProcessCommandsInfoNVXBuilder<'a> { - CmdProcessCommandsInfoNVXBuilder { - inner: CmdProcessCommandsInfoNVX::default(), +impl GeneratedCommandsInfoNV { + pub fn builder<'a>() -> GeneratedCommandsInfoNVBuilder<'a> { + GeneratedCommandsInfoNVBuilder { + inner: GeneratedCommandsInfoNV::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct CmdProcessCommandsInfoNVXBuilder<'a> { - inner: CmdProcessCommandsInfoNVX, +pub struct GeneratedCommandsInfoNVBuilder<'a> { + inner: GeneratedCommandsInfoNV, marker: ::std::marker::PhantomData<&'a ()>, } -pub unsafe trait ExtendsCmdProcessCommandsInfoNVX {} -impl<'a> ::std::ops::Deref for CmdProcessCommandsInfoNVXBuilder<'a> { - type Target = CmdProcessCommandsInfoNVX; +pub unsafe trait ExtendsGeneratedCommandsInfoNV {} +impl<'a> ::std::ops::Deref for GeneratedCommandsInfoNVBuilder<'a> { + type Target = GeneratedCommandsInfoNV; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for CmdProcessCommandsInfoNVXBuilder<'a> { +impl<'a> ::std::ops::DerefMut for GeneratedCommandsInfoNVBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> CmdProcessCommandsInfoNVXBuilder<'a> { - pub fn object_table( +impl<'a> GeneratedCommandsInfoNVBuilder<'a> { + pub fn pipeline_bind_point( mut self, - object_table: ObjectTableNVX, - ) -> CmdProcessCommandsInfoNVXBuilder<'a> { - self.inner.object_table = object_table; + pipeline_bind_point: PipelineBindPoint, + ) -> GeneratedCommandsInfoNVBuilder<'a> { + self.inner.pipeline_bind_point = pipeline_bind_point; + self + } + pub fn pipeline(mut self, pipeline: Pipeline) -> GeneratedCommandsInfoNVBuilder<'a> { + self.inner.pipeline = pipeline; self } pub fn indirect_commands_layout( mut self, - indirect_commands_layout: IndirectCommandsLayoutNVX, - ) -> CmdProcessCommandsInfoNVXBuilder<'a> { + indirect_commands_layout: IndirectCommandsLayoutNV, + ) -> GeneratedCommandsInfoNVBuilder<'a> { self.inner.indirect_commands_layout = indirect_commands_layout; self } - pub fn indirect_commands_tokens( + pub fn streams( mut self, - indirect_commands_tokens: &'a [IndirectCommandsTokenNVX], - ) -> CmdProcessCommandsInfoNVXBuilder<'a> { - self.inner.indirect_commands_token_count = indirect_commands_tokens.len() as _; - self.inner.p_indirect_commands_tokens = indirect_commands_tokens.as_ptr(); + streams: &'a [IndirectCommandsStreamNV], + ) -> GeneratedCommandsInfoNVBuilder<'a> { + self.inner.stream_count = streams.len() as _; + self.inner.p_streams = streams.as_ptr(); self } - pub fn max_sequences_count( - mut self, - max_sequences_count: u32, - ) -> CmdProcessCommandsInfoNVXBuilder<'a> { - self.inner.max_sequences_count = max_sequences_count; + pub fn sequences_count(mut self, sequences_count: u32) -> GeneratedCommandsInfoNVBuilder<'a> { + self.inner.sequences_count = sequences_count; self } - pub fn target_command_buffer( + pub fn preprocess_buffer( mut self, - target_command_buffer: CommandBuffer, - ) -> CmdProcessCommandsInfoNVXBuilder<'a> { - self.inner.target_command_buffer = target_command_buffer; + preprocess_buffer: Buffer, + ) -> GeneratedCommandsInfoNVBuilder<'a> { + self.inner.preprocess_buffer = preprocess_buffer; + self + } + pub fn preprocess_offset( + mut self, + preprocess_offset: DeviceSize, + ) -> GeneratedCommandsInfoNVBuilder<'a> { + self.inner.preprocess_offset = preprocess_offset; + self + } + pub fn preprocess_size( + mut self, + preprocess_size: DeviceSize, + ) -> GeneratedCommandsInfoNVBuilder<'a> { + self.inner.preprocess_size = preprocess_size; self } pub fn sequences_count_buffer( mut self, sequences_count_buffer: Buffer, - ) -> CmdProcessCommandsInfoNVXBuilder<'a> { + ) -> GeneratedCommandsInfoNVBuilder<'a> { self.inner.sequences_count_buffer = sequences_count_buffer; self } pub fn sequences_count_offset( mut self, sequences_count_offset: DeviceSize, - ) -> CmdProcessCommandsInfoNVXBuilder<'a> { + ) -> GeneratedCommandsInfoNVBuilder<'a> { self.inner.sequences_count_offset = sequences_count_offset; self } pub fn sequences_index_buffer( mut self, sequences_index_buffer: Buffer, - ) -> CmdProcessCommandsInfoNVXBuilder<'a> { + ) -> GeneratedCommandsInfoNVBuilder<'a> { self.inner.sequences_index_buffer = sequences_index_buffer; self } pub fn sequences_index_offset( mut self, sequences_index_offset: DeviceSize, - ) -> CmdProcessCommandsInfoNVXBuilder<'a> { + ) -> GeneratedCommandsInfoNVBuilder<'a> { self.inner.sequences_index_offset = sequences_index_offset; self } @@ -21272,10 +22209,10 @@ impl<'a> CmdProcessCommandsInfoNVXBuilder<'a> { #[doc = r" valid extension structs can be pushed into the chain."] #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] #[doc = r" chain will look like `A -> D -> B -> C`."] - pub fn push_next( + pub fn push_next( mut self, next: &'a mut T, - ) -> CmdProcessCommandsInfoNVXBuilder<'a> { + ) -> GeneratedCommandsInfoNVBuilder<'a> { unsafe { let next_ptr = next as *mut T as *mut BaseOutStructure; let last_next = ptr_chain_iter(next).last().unwrap(); @@ -21287,75 +22224,84 @@ impl<'a> CmdProcessCommandsInfoNVXBuilder<'a> { #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> CmdProcessCommandsInfoNVX { + pub fn build(self) -> GeneratedCommandsInfoNV { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct CmdReserveSpaceForCommandsInfoNVX { +#[doc = ""] +pub struct GeneratedCommandsMemoryRequirementsInfoNV { pub s_type: StructureType, pub p_next: *const c_void, - pub object_table: ObjectTableNVX, - pub indirect_commands_layout: IndirectCommandsLayoutNVX, + pub pipeline_bind_point: PipelineBindPoint, + pub pipeline: Pipeline, + pub indirect_commands_layout: IndirectCommandsLayoutNV, pub max_sequences_count: u32, } -impl ::std::default::Default for CmdReserveSpaceForCommandsInfoNVX { - fn default() -> CmdReserveSpaceForCommandsInfoNVX { - CmdReserveSpaceForCommandsInfoNVX { - s_type: StructureType::CMD_RESERVE_SPACE_FOR_COMMANDS_INFO_NVX, +impl ::std::default::Default for GeneratedCommandsMemoryRequirementsInfoNV { + fn default() -> GeneratedCommandsMemoryRequirementsInfoNV { + GeneratedCommandsMemoryRequirementsInfoNV { + s_type: StructureType::GENERATED_COMMANDS_MEMORY_REQUIREMENTS_INFO_NV, p_next: ::std::ptr::null(), - object_table: ObjectTableNVX::default(), - indirect_commands_layout: IndirectCommandsLayoutNVX::default(), + pipeline_bind_point: PipelineBindPoint::default(), + pipeline: Pipeline::default(), + indirect_commands_layout: IndirectCommandsLayoutNV::default(), max_sequences_count: u32::default(), } } } -impl CmdReserveSpaceForCommandsInfoNVX { - pub fn builder<'a>() -> CmdReserveSpaceForCommandsInfoNVXBuilder<'a> { - CmdReserveSpaceForCommandsInfoNVXBuilder { - inner: CmdReserveSpaceForCommandsInfoNVX::default(), +impl GeneratedCommandsMemoryRequirementsInfoNV { + pub fn builder<'a>() -> GeneratedCommandsMemoryRequirementsInfoNVBuilder<'a> { + GeneratedCommandsMemoryRequirementsInfoNVBuilder { + inner: GeneratedCommandsMemoryRequirementsInfoNV::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct CmdReserveSpaceForCommandsInfoNVXBuilder<'a> { - inner: CmdReserveSpaceForCommandsInfoNVX, +pub struct GeneratedCommandsMemoryRequirementsInfoNVBuilder<'a> { + inner: GeneratedCommandsMemoryRequirementsInfoNV, marker: ::std::marker::PhantomData<&'a ()>, } -pub unsafe trait ExtendsCmdReserveSpaceForCommandsInfoNVX {} -impl<'a> ::std::ops::Deref for CmdReserveSpaceForCommandsInfoNVXBuilder<'a> { - type Target = CmdReserveSpaceForCommandsInfoNVX; +pub unsafe trait ExtendsGeneratedCommandsMemoryRequirementsInfoNV {} +impl<'a> ::std::ops::Deref for GeneratedCommandsMemoryRequirementsInfoNVBuilder<'a> { + type Target = GeneratedCommandsMemoryRequirementsInfoNV; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for CmdReserveSpaceForCommandsInfoNVXBuilder<'a> { +impl<'a> ::std::ops::DerefMut for GeneratedCommandsMemoryRequirementsInfoNVBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> CmdReserveSpaceForCommandsInfoNVXBuilder<'a> { - pub fn object_table( +impl<'a> GeneratedCommandsMemoryRequirementsInfoNVBuilder<'a> { + pub fn pipeline_bind_point( mut self, - object_table: ObjectTableNVX, - ) -> CmdReserveSpaceForCommandsInfoNVXBuilder<'a> { - self.inner.object_table = object_table; + pipeline_bind_point: PipelineBindPoint, + ) -> GeneratedCommandsMemoryRequirementsInfoNVBuilder<'a> { + self.inner.pipeline_bind_point = pipeline_bind_point; + self + } + pub fn pipeline( + mut self, + pipeline: Pipeline, + ) -> GeneratedCommandsMemoryRequirementsInfoNVBuilder<'a> { + self.inner.pipeline = pipeline; self } pub fn indirect_commands_layout( mut self, - indirect_commands_layout: IndirectCommandsLayoutNVX, - ) -> CmdReserveSpaceForCommandsInfoNVXBuilder<'a> { + indirect_commands_layout: IndirectCommandsLayoutNV, + ) -> GeneratedCommandsMemoryRequirementsInfoNVBuilder<'a> { self.inner.indirect_commands_layout = indirect_commands_layout; self } pub fn max_sequences_count( mut self, max_sequences_count: u32, - ) -> CmdReserveSpaceForCommandsInfoNVXBuilder<'a> { + ) -> GeneratedCommandsMemoryRequirementsInfoNVBuilder<'a> { self.inner.max_sequences_count = max_sequences_count; self } @@ -21364,10 +22310,10 @@ impl<'a> CmdReserveSpaceForCommandsInfoNVXBuilder<'a> { #[doc = r" valid extension structs can be pushed into the chain."] #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] #[doc = r" chain will look like `A -> D -> B -> C`."] - pub fn push_next( + pub fn push_next( mut self, next: &'a mut T, - ) -> CmdReserveSpaceForCommandsInfoNVXBuilder<'a> { + ) -> GeneratedCommandsMemoryRequirementsInfoNVBuilder<'a> { unsafe { let next_ptr = next as *mut T as *mut BaseOutStructure; let last_next = ptr_chain_iter(next).last().unwrap(); @@ -21379,507 +22325,13 @@ impl<'a> CmdReserveSpaceForCommandsInfoNVXBuilder<'a> { #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> CmdReserveSpaceForCommandsInfoNVX { + pub fn build(self) -> GeneratedCommandsMemoryRequirementsInfoNV { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct ObjectTableCreateInfoNVX { - pub s_type: StructureType, - pub p_next: *const c_void, - pub object_count: u32, - pub p_object_entry_types: *const ObjectEntryTypeNVX, - pub p_object_entry_counts: *const u32, - pub p_object_entry_usage_flags: *const ObjectEntryUsageFlagsNVX, - pub max_uniform_buffers_per_descriptor: u32, - pub max_storage_buffers_per_descriptor: u32, - pub max_storage_images_per_descriptor: u32, - pub max_sampled_images_per_descriptor: u32, - pub max_pipeline_layouts: u32, -} -impl ::std::default::Default for ObjectTableCreateInfoNVX { - fn default() -> ObjectTableCreateInfoNVX { - ObjectTableCreateInfoNVX { - s_type: StructureType::OBJECT_TABLE_CREATE_INFO_NVX, - p_next: ::std::ptr::null(), - object_count: u32::default(), - p_object_entry_types: ::std::ptr::null(), - p_object_entry_counts: ::std::ptr::null(), - p_object_entry_usage_flags: ::std::ptr::null(), - max_uniform_buffers_per_descriptor: u32::default(), - max_storage_buffers_per_descriptor: u32::default(), - max_storage_images_per_descriptor: u32::default(), - max_sampled_images_per_descriptor: u32::default(), - max_pipeline_layouts: u32::default(), - } - } -} -impl ObjectTableCreateInfoNVX { - pub fn builder<'a>() -> ObjectTableCreateInfoNVXBuilder<'a> { - ObjectTableCreateInfoNVXBuilder { - inner: ObjectTableCreateInfoNVX::default(), - marker: ::std::marker::PhantomData, - } - } -} -#[repr(transparent)] -pub struct ObjectTableCreateInfoNVXBuilder<'a> { - inner: ObjectTableCreateInfoNVX, - marker: ::std::marker::PhantomData<&'a ()>, -} -pub unsafe trait ExtendsObjectTableCreateInfoNVX {} -impl<'a> ::std::ops::Deref for ObjectTableCreateInfoNVXBuilder<'a> { - type Target = ObjectTableCreateInfoNVX; - fn deref(&self) -> &Self::Target { - &self.inner - } -} -impl<'a> ::std::ops::DerefMut for ObjectTableCreateInfoNVXBuilder<'a> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.inner - } -} -impl<'a> ObjectTableCreateInfoNVXBuilder<'a> { - pub fn object_entry_types( - mut self, - object_entry_types: &'a [ObjectEntryTypeNVX], - ) -> ObjectTableCreateInfoNVXBuilder<'a> { - self.inner.object_count = object_entry_types.len() as _; - self.inner.p_object_entry_types = object_entry_types.as_ptr(); - self - } - pub fn object_entry_counts( - mut self, - object_entry_counts: &'a [u32], - ) -> ObjectTableCreateInfoNVXBuilder<'a> { - self.inner.object_count = object_entry_counts.len() as _; - self.inner.p_object_entry_counts = object_entry_counts.as_ptr(); - self - } - pub fn object_entry_usage_flags( - mut self, - object_entry_usage_flags: &'a [ObjectEntryUsageFlagsNVX], - ) -> ObjectTableCreateInfoNVXBuilder<'a> { - self.inner.object_count = object_entry_usage_flags.len() as _; - self.inner.p_object_entry_usage_flags = object_entry_usage_flags.as_ptr(); - self - } - pub fn max_uniform_buffers_per_descriptor( - mut self, - max_uniform_buffers_per_descriptor: u32, - ) -> ObjectTableCreateInfoNVXBuilder<'a> { - self.inner.max_uniform_buffers_per_descriptor = max_uniform_buffers_per_descriptor; - self - } - pub fn max_storage_buffers_per_descriptor( - mut self, - max_storage_buffers_per_descriptor: u32, - ) -> ObjectTableCreateInfoNVXBuilder<'a> { - self.inner.max_storage_buffers_per_descriptor = max_storage_buffers_per_descriptor; - self - } - pub fn max_storage_images_per_descriptor( - mut self, - max_storage_images_per_descriptor: u32, - ) -> ObjectTableCreateInfoNVXBuilder<'a> { - self.inner.max_storage_images_per_descriptor = max_storage_images_per_descriptor; - self - } - pub fn max_sampled_images_per_descriptor( - mut self, - max_sampled_images_per_descriptor: u32, - ) -> ObjectTableCreateInfoNVXBuilder<'a> { - self.inner.max_sampled_images_per_descriptor = max_sampled_images_per_descriptor; - self - } - pub fn max_pipeline_layouts( - mut self, - max_pipeline_layouts: u32, - ) -> ObjectTableCreateInfoNVXBuilder<'a> { - self.inner.max_pipeline_layouts = max_pipeline_layouts; - self - } - #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] - #[doc = r" method only exists on structs that can be passed to a function directly. Only"] - #[doc = r" valid extension structs can be pushed into the chain."] - #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] - #[doc = r" chain will look like `A -> D -> B -> C`."] - pub fn push_next( - mut self, - next: &'a mut T, - ) -> ObjectTableCreateInfoNVXBuilder<'a> { - unsafe { - let next_ptr = next as *mut T as *mut BaseOutStructure; - let last_next = ptr_chain_iter(next).last().unwrap(); - (*last_next).p_next = self.inner.p_next as _; - self.inner.p_next = next_ptr as _; - } - self - } - #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] - #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] - #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> ObjectTableCreateInfoNVX { - self.inner - } -} -#[repr(C)] -#[derive(Copy, Clone, Default, Debug)] -#[doc = ""] -pub struct ObjectTableEntryNVX { - pub ty: ObjectEntryTypeNVX, - pub flags: ObjectEntryUsageFlagsNVX, -} -impl ObjectTableEntryNVX { - pub fn builder<'a>() -> ObjectTableEntryNVXBuilder<'a> { - ObjectTableEntryNVXBuilder { - inner: ObjectTableEntryNVX::default(), - marker: ::std::marker::PhantomData, - } - } -} -#[repr(transparent)] -pub struct ObjectTableEntryNVXBuilder<'a> { - inner: ObjectTableEntryNVX, - marker: ::std::marker::PhantomData<&'a ()>, -} -impl<'a> ::std::ops::Deref for ObjectTableEntryNVXBuilder<'a> { - type Target = ObjectTableEntryNVX; - fn deref(&self) -> &Self::Target { - &self.inner - } -} -impl<'a> ::std::ops::DerefMut for ObjectTableEntryNVXBuilder<'a> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.inner - } -} -impl<'a> ObjectTableEntryNVXBuilder<'a> { - pub fn ty(mut self, ty: ObjectEntryTypeNVX) -> ObjectTableEntryNVXBuilder<'a> { - self.inner.ty = ty; - self - } - pub fn flags(mut self, flags: ObjectEntryUsageFlagsNVX) -> ObjectTableEntryNVXBuilder<'a> { - self.inner.flags = flags; - self - } - #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] - #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] - #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> ObjectTableEntryNVX { - self.inner - } -} -#[repr(C)] -#[derive(Copy, Clone, Default, Debug)] -#[doc = ""] -pub struct ObjectTablePipelineEntryNVX { - pub ty: ObjectEntryTypeNVX, - pub flags: ObjectEntryUsageFlagsNVX, - pub pipeline: Pipeline, -} -impl ObjectTablePipelineEntryNVX { - pub fn builder<'a>() -> ObjectTablePipelineEntryNVXBuilder<'a> { - ObjectTablePipelineEntryNVXBuilder { - inner: ObjectTablePipelineEntryNVX::default(), - marker: ::std::marker::PhantomData, - } - } -} -#[repr(transparent)] -pub struct ObjectTablePipelineEntryNVXBuilder<'a> { - inner: ObjectTablePipelineEntryNVX, - marker: ::std::marker::PhantomData<&'a ()>, -} -impl<'a> ::std::ops::Deref for ObjectTablePipelineEntryNVXBuilder<'a> { - type Target = ObjectTablePipelineEntryNVX; - fn deref(&self) -> &Self::Target { - &self.inner - } -} -impl<'a> ::std::ops::DerefMut for ObjectTablePipelineEntryNVXBuilder<'a> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.inner - } -} -impl<'a> ObjectTablePipelineEntryNVXBuilder<'a> { - pub fn ty(mut self, ty: ObjectEntryTypeNVX) -> ObjectTablePipelineEntryNVXBuilder<'a> { - self.inner.ty = ty; - self - } - pub fn flags( - mut self, - flags: ObjectEntryUsageFlagsNVX, - ) -> ObjectTablePipelineEntryNVXBuilder<'a> { - self.inner.flags = flags; - self - } - pub fn pipeline(mut self, pipeline: Pipeline) -> ObjectTablePipelineEntryNVXBuilder<'a> { - self.inner.pipeline = pipeline; - self - } - #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] - #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] - #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> ObjectTablePipelineEntryNVX { - self.inner - } -} -#[repr(C)] -#[derive(Copy, Clone, Default, Debug)] -#[doc = ""] -pub struct ObjectTableDescriptorSetEntryNVX { - pub ty: ObjectEntryTypeNVX, - pub flags: ObjectEntryUsageFlagsNVX, - pub pipeline_layout: PipelineLayout, - pub descriptor_set: DescriptorSet, -} -impl ObjectTableDescriptorSetEntryNVX { - pub fn builder<'a>() -> ObjectTableDescriptorSetEntryNVXBuilder<'a> { - ObjectTableDescriptorSetEntryNVXBuilder { - inner: ObjectTableDescriptorSetEntryNVX::default(), - marker: ::std::marker::PhantomData, - } - } -} -#[repr(transparent)] -pub struct ObjectTableDescriptorSetEntryNVXBuilder<'a> { - inner: ObjectTableDescriptorSetEntryNVX, - marker: ::std::marker::PhantomData<&'a ()>, -} -impl<'a> ::std::ops::Deref for ObjectTableDescriptorSetEntryNVXBuilder<'a> { - type Target = ObjectTableDescriptorSetEntryNVX; - fn deref(&self) -> &Self::Target { - &self.inner - } -} -impl<'a> ::std::ops::DerefMut for ObjectTableDescriptorSetEntryNVXBuilder<'a> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.inner - } -} -impl<'a> ObjectTableDescriptorSetEntryNVXBuilder<'a> { - pub fn ty(mut self, ty: ObjectEntryTypeNVX) -> ObjectTableDescriptorSetEntryNVXBuilder<'a> { - self.inner.ty = ty; - self - } - pub fn flags( - mut self, - flags: ObjectEntryUsageFlagsNVX, - ) -> ObjectTableDescriptorSetEntryNVXBuilder<'a> { - self.inner.flags = flags; - self - } - pub fn pipeline_layout( - mut self, - pipeline_layout: PipelineLayout, - ) -> ObjectTableDescriptorSetEntryNVXBuilder<'a> { - self.inner.pipeline_layout = pipeline_layout; - self - } - pub fn descriptor_set( - mut self, - descriptor_set: DescriptorSet, - ) -> ObjectTableDescriptorSetEntryNVXBuilder<'a> { - self.inner.descriptor_set = descriptor_set; - self - } - #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] - #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] - #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> ObjectTableDescriptorSetEntryNVX { - self.inner - } -} -#[repr(C)] -#[derive(Copy, Clone, Default, Debug)] -#[doc = ""] -pub struct ObjectTableVertexBufferEntryNVX { - pub ty: ObjectEntryTypeNVX, - pub flags: ObjectEntryUsageFlagsNVX, - pub buffer: Buffer, -} -impl ObjectTableVertexBufferEntryNVX { - pub fn builder<'a>() -> ObjectTableVertexBufferEntryNVXBuilder<'a> { - ObjectTableVertexBufferEntryNVXBuilder { - inner: ObjectTableVertexBufferEntryNVX::default(), - marker: ::std::marker::PhantomData, - } - } -} -#[repr(transparent)] -pub struct ObjectTableVertexBufferEntryNVXBuilder<'a> { - inner: ObjectTableVertexBufferEntryNVX, - marker: ::std::marker::PhantomData<&'a ()>, -} -impl<'a> ::std::ops::Deref for ObjectTableVertexBufferEntryNVXBuilder<'a> { - type Target = ObjectTableVertexBufferEntryNVX; - fn deref(&self) -> &Self::Target { - &self.inner - } -} -impl<'a> ::std::ops::DerefMut for ObjectTableVertexBufferEntryNVXBuilder<'a> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.inner - } -} -impl<'a> ObjectTableVertexBufferEntryNVXBuilder<'a> { - pub fn ty(mut self, ty: ObjectEntryTypeNVX) -> ObjectTableVertexBufferEntryNVXBuilder<'a> { - self.inner.ty = ty; - self - } - pub fn flags( - mut self, - flags: ObjectEntryUsageFlagsNVX, - ) -> ObjectTableVertexBufferEntryNVXBuilder<'a> { - self.inner.flags = flags; - self - } - pub fn buffer(mut self, buffer: Buffer) -> ObjectTableVertexBufferEntryNVXBuilder<'a> { - self.inner.buffer = buffer; - self - } - #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] - #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] - #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> ObjectTableVertexBufferEntryNVX { - self.inner - } -} -#[repr(C)] -#[derive(Copy, Clone, Default, Debug)] -#[doc = ""] -pub struct ObjectTableIndexBufferEntryNVX { - pub ty: ObjectEntryTypeNVX, - pub flags: ObjectEntryUsageFlagsNVX, - pub buffer: Buffer, - pub index_type: IndexType, -} -impl ObjectTableIndexBufferEntryNVX { - pub fn builder<'a>() -> ObjectTableIndexBufferEntryNVXBuilder<'a> { - ObjectTableIndexBufferEntryNVXBuilder { - inner: ObjectTableIndexBufferEntryNVX::default(), - marker: ::std::marker::PhantomData, - } - } -} -#[repr(transparent)] -pub struct ObjectTableIndexBufferEntryNVXBuilder<'a> { - inner: ObjectTableIndexBufferEntryNVX, - marker: ::std::marker::PhantomData<&'a ()>, -} -impl<'a> ::std::ops::Deref for ObjectTableIndexBufferEntryNVXBuilder<'a> { - type Target = ObjectTableIndexBufferEntryNVX; - fn deref(&self) -> &Self::Target { - &self.inner - } -} -impl<'a> ::std::ops::DerefMut for ObjectTableIndexBufferEntryNVXBuilder<'a> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.inner - } -} -impl<'a> ObjectTableIndexBufferEntryNVXBuilder<'a> { - pub fn ty(mut self, ty: ObjectEntryTypeNVX) -> ObjectTableIndexBufferEntryNVXBuilder<'a> { - self.inner.ty = ty; - self - } - pub fn flags( - mut self, - flags: ObjectEntryUsageFlagsNVX, - ) -> ObjectTableIndexBufferEntryNVXBuilder<'a> { - self.inner.flags = flags; - self - } - pub fn buffer(mut self, buffer: Buffer) -> ObjectTableIndexBufferEntryNVXBuilder<'a> { - self.inner.buffer = buffer; - self - } - pub fn index_type( - mut self, - index_type: IndexType, - ) -> ObjectTableIndexBufferEntryNVXBuilder<'a> { - self.inner.index_type = index_type; - self - } - #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] - #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] - #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> ObjectTableIndexBufferEntryNVX { - self.inner - } -} -#[repr(C)] -#[derive(Copy, Clone, Default, Debug)] -#[doc = ""] -pub struct ObjectTablePushConstantEntryNVX { - pub ty: ObjectEntryTypeNVX, - pub flags: ObjectEntryUsageFlagsNVX, - pub pipeline_layout: PipelineLayout, - pub stage_flags: ShaderStageFlags, -} -impl ObjectTablePushConstantEntryNVX { - pub fn builder<'a>() -> ObjectTablePushConstantEntryNVXBuilder<'a> { - ObjectTablePushConstantEntryNVXBuilder { - inner: ObjectTablePushConstantEntryNVX::default(), - marker: ::std::marker::PhantomData, - } - } -} -#[repr(transparent)] -pub struct ObjectTablePushConstantEntryNVXBuilder<'a> { - inner: ObjectTablePushConstantEntryNVX, - marker: ::std::marker::PhantomData<&'a ()>, -} -impl<'a> ::std::ops::Deref for ObjectTablePushConstantEntryNVXBuilder<'a> { - type Target = ObjectTablePushConstantEntryNVX; - fn deref(&self) -> &Self::Target { - &self.inner - } -} -impl<'a> ::std::ops::DerefMut for ObjectTablePushConstantEntryNVXBuilder<'a> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.inner - } -} -impl<'a> ObjectTablePushConstantEntryNVXBuilder<'a> { - pub fn ty(mut self, ty: ObjectEntryTypeNVX) -> ObjectTablePushConstantEntryNVXBuilder<'a> { - self.inner.ty = ty; - self - } - pub fn flags( - mut self, - flags: ObjectEntryUsageFlagsNVX, - ) -> ObjectTablePushConstantEntryNVXBuilder<'a> { - self.inner.flags = flags; - self - } - pub fn pipeline_layout( - mut self, - pipeline_layout: PipelineLayout, - ) -> ObjectTablePushConstantEntryNVXBuilder<'a> { - self.inner.pipeline_layout = pipeline_layout; - self - } - pub fn stage_flags( - mut self, - stage_flags: ShaderStageFlags, - ) -> ObjectTablePushConstantEntryNVXBuilder<'a> { - self.inner.stage_flags = stage_flags; - self - } - #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] - #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] - #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> ObjectTablePushConstantEntryNVX { - self.inner - } -} -#[repr(C)] -#[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceFeatures2 { pub s_type: StructureType, pub p_next: *mut c_void, @@ -21937,7 +22389,7 @@ impl<'a> PhysicalDeviceFeatures2Builder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceProperties2 { pub s_type: StructureType, pub p_next: *mut c_void, @@ -22011,7 +22463,7 @@ impl<'a> PhysicalDeviceProperties2Builder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct FormatProperties2 { pub s_type: StructureType, pub p_next: *mut c_void, @@ -22085,7 +22537,7 @@ impl<'a> FormatProperties2Builder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ImageFormatProperties2 { pub s_type: StructureType, pub p_next: *mut c_void, @@ -22159,7 +22611,7 @@ impl<'a> ImageFormatProperties2Builder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceImageFormatInfo2 { pub s_type: StructureType, pub p_next: *const c_void, @@ -22254,7 +22706,7 @@ impl<'a> PhysicalDeviceImageFormatInfo2Builder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct QueueFamilyProperties2 { pub s_type: StructureType, pub p_next: *mut c_void, @@ -22328,7 +22780,7 @@ impl<'a> QueueFamilyProperties2Builder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceMemoryProperties2 { pub s_type: StructureType, pub p_next: *mut c_void, @@ -22402,7 +22854,7 @@ impl<'a> PhysicalDeviceMemoryProperties2Builder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct SparseImageFormatProperties2 { pub s_type: StructureType, pub p_next: *mut c_void, @@ -22476,7 +22928,7 @@ impl<'a> SparseImageFormatProperties2Builder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceSparseImageFormatInfo2 { pub s_type: StructureType, pub p_next: *const c_void, @@ -22580,7 +23032,7 @@ impl<'a> PhysicalDeviceSparseImageFormatInfo2Builder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDevicePushDescriptorPropertiesKHR { pub s_type: StructureType, pub p_next: *mut c_void, @@ -22641,75 +23093,75 @@ impl<'a> PhysicalDevicePushDescriptorPropertiesKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] -pub struct ConformanceVersionKHR { +#[doc = ""] +pub struct ConformanceVersion { pub major: u8, pub minor: u8, pub subminor: u8, pub patch: u8, } -impl ConformanceVersionKHR { - pub fn builder<'a>() -> ConformanceVersionKHRBuilder<'a> { - ConformanceVersionKHRBuilder { - inner: ConformanceVersionKHR::default(), +impl ConformanceVersion { + pub fn builder<'a>() -> ConformanceVersionBuilder<'a> { + ConformanceVersionBuilder { + inner: ConformanceVersion::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct ConformanceVersionKHRBuilder<'a> { - inner: ConformanceVersionKHR, +pub struct ConformanceVersionBuilder<'a> { + inner: ConformanceVersion, marker: ::std::marker::PhantomData<&'a ()>, } -impl<'a> ::std::ops::Deref for ConformanceVersionKHRBuilder<'a> { - type Target = ConformanceVersionKHR; +impl<'a> ::std::ops::Deref for ConformanceVersionBuilder<'a> { + type Target = ConformanceVersion; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for ConformanceVersionKHRBuilder<'a> { +impl<'a> ::std::ops::DerefMut for ConformanceVersionBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> ConformanceVersionKHRBuilder<'a> { - pub fn major(mut self, major: u8) -> ConformanceVersionKHRBuilder<'a> { +impl<'a> ConformanceVersionBuilder<'a> { + pub fn major(mut self, major: u8) -> ConformanceVersionBuilder<'a> { self.inner.major = major; self } - pub fn minor(mut self, minor: u8) -> ConformanceVersionKHRBuilder<'a> { + pub fn minor(mut self, minor: u8) -> ConformanceVersionBuilder<'a> { self.inner.minor = minor; self } - pub fn subminor(mut self, subminor: u8) -> ConformanceVersionKHRBuilder<'a> { + pub fn subminor(mut self, subminor: u8) -> ConformanceVersionBuilder<'a> { self.inner.subminor = subminor; self } - pub fn patch(mut self, patch: u8) -> ConformanceVersionKHRBuilder<'a> { + pub fn patch(mut self, patch: u8) -> ConformanceVersionBuilder<'a> { self.inner.patch = patch; self } #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> ConformanceVersionKHR { + pub fn build(self) -> ConformanceVersion { self.inner } } #[repr(C)] #[derive(Copy, Clone)] -#[doc = ""] -pub struct PhysicalDeviceDriverPropertiesKHR { +#[doc = ""] +pub struct PhysicalDeviceDriverProperties { pub s_type: StructureType, pub p_next: *mut c_void, - pub driver_id: DriverIdKHR, - pub driver_name: [c_char; MAX_DRIVER_NAME_SIZE_KHR], - pub driver_info: [c_char; MAX_DRIVER_INFO_SIZE_KHR], - pub conformance_version: ConformanceVersionKHR, + pub driver_id: DriverId, + pub driver_name: [c_char; MAX_DRIVER_NAME_SIZE], + pub driver_info: [c_char; MAX_DRIVER_INFO_SIZE], + pub conformance_version: ConformanceVersion, } -impl fmt::Debug for PhysicalDeviceDriverPropertiesKHR { +impl fmt::Debug for PhysicalDeviceDriverProperties { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("PhysicalDeviceDriverPropertiesKHR") + fmt.debug_struct("PhysicalDeviceDriverProperties") .field("s_type", &self.s_type) .field("p_next", &self.p_next) .field("driver_id", &self.driver_id) @@ -22723,83 +23175,80 @@ impl fmt::Debug for PhysicalDeviceDriverPropertiesKHR { .finish() } } -impl ::std::default::Default for PhysicalDeviceDriverPropertiesKHR { - fn default() -> PhysicalDeviceDriverPropertiesKHR { - PhysicalDeviceDriverPropertiesKHR { - s_type: StructureType::PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR, +impl ::std::default::Default for PhysicalDeviceDriverProperties { + fn default() -> PhysicalDeviceDriverProperties { + PhysicalDeviceDriverProperties { + s_type: StructureType::PHYSICAL_DEVICE_DRIVER_PROPERTIES, p_next: ::std::ptr::null_mut(), - driver_id: DriverIdKHR::default(), + driver_id: DriverId::default(), driver_name: unsafe { ::std::mem::zeroed() }, driver_info: unsafe { ::std::mem::zeroed() }, - conformance_version: ConformanceVersionKHR::default(), + conformance_version: ConformanceVersion::default(), } } } -impl PhysicalDeviceDriverPropertiesKHR { - pub fn builder<'a>() -> PhysicalDeviceDriverPropertiesKHRBuilder<'a> { - PhysicalDeviceDriverPropertiesKHRBuilder { - inner: PhysicalDeviceDriverPropertiesKHR::default(), +impl PhysicalDeviceDriverProperties { + pub fn builder<'a>() -> PhysicalDeviceDriverPropertiesBuilder<'a> { + PhysicalDeviceDriverPropertiesBuilder { + inner: PhysicalDeviceDriverProperties::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct PhysicalDeviceDriverPropertiesKHRBuilder<'a> { - inner: PhysicalDeviceDriverPropertiesKHR, +pub struct PhysicalDeviceDriverPropertiesBuilder<'a> { + inner: PhysicalDeviceDriverProperties, marker: ::std::marker::PhantomData<&'a ()>, } -unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceDriverPropertiesKHRBuilder<'_> {} -unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceDriverPropertiesKHR {} -impl<'a> ::std::ops::Deref for PhysicalDeviceDriverPropertiesKHRBuilder<'a> { - type Target = PhysicalDeviceDriverPropertiesKHR; +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceDriverPropertiesBuilder<'_> {} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceDriverProperties {} +impl<'a> ::std::ops::Deref for PhysicalDeviceDriverPropertiesBuilder<'a> { + type Target = PhysicalDeviceDriverProperties; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for PhysicalDeviceDriverPropertiesKHRBuilder<'a> { +impl<'a> ::std::ops::DerefMut for PhysicalDeviceDriverPropertiesBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> PhysicalDeviceDriverPropertiesKHRBuilder<'a> { - pub fn driver_id( - mut self, - driver_id: DriverIdKHR, - ) -> PhysicalDeviceDriverPropertiesKHRBuilder<'a> { +impl<'a> PhysicalDeviceDriverPropertiesBuilder<'a> { + pub fn driver_id(mut self, driver_id: DriverId) -> PhysicalDeviceDriverPropertiesBuilder<'a> { self.inner.driver_id = driver_id; self } pub fn driver_name( mut self, - driver_name: [c_char; MAX_DRIVER_NAME_SIZE_KHR], - ) -> PhysicalDeviceDriverPropertiesKHRBuilder<'a> { + driver_name: [c_char; MAX_DRIVER_NAME_SIZE], + ) -> PhysicalDeviceDriverPropertiesBuilder<'a> { self.inner.driver_name = driver_name; self } pub fn driver_info( mut self, - driver_info: [c_char; MAX_DRIVER_INFO_SIZE_KHR], - ) -> PhysicalDeviceDriverPropertiesKHRBuilder<'a> { + driver_info: [c_char; MAX_DRIVER_INFO_SIZE], + ) -> PhysicalDeviceDriverPropertiesBuilder<'a> { self.inner.driver_info = driver_info; self } pub fn conformance_version( mut self, - conformance_version: ConformanceVersionKHR, - ) -> PhysicalDeviceDriverPropertiesKHRBuilder<'a> { + conformance_version: ConformanceVersion, + ) -> PhysicalDeviceDriverPropertiesBuilder<'a> { self.inner.conformance_version = conformance_version; self } #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> PhysicalDeviceDriverPropertiesKHR { + pub fn build(self) -> PhysicalDeviceDriverProperties { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PresentRegionsKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -22857,7 +23306,7 @@ impl<'a> PresentRegionsKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PresentRegionKHR { pub rectangle_count: u32, pub p_rectangles: *const RectLayerKHR, @@ -22909,7 +23358,7 @@ impl<'a> PresentRegionKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct RectLayerKHR { pub offset: Offset2D, pub extent: Extent2D, @@ -22961,74 +23410,74 @@ impl<'a> RectLayerKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct PhysicalDeviceVariablePointerFeatures { +#[doc = ""] +pub struct PhysicalDeviceVariablePointersFeatures { pub s_type: StructureType, pub p_next: *mut c_void, pub variable_pointers_storage_buffer: Bool32, pub variable_pointers: Bool32, } -impl ::std::default::Default for PhysicalDeviceVariablePointerFeatures { - fn default() -> PhysicalDeviceVariablePointerFeatures { - PhysicalDeviceVariablePointerFeatures { - s_type: StructureType::PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES, +impl ::std::default::Default for PhysicalDeviceVariablePointersFeatures { + fn default() -> PhysicalDeviceVariablePointersFeatures { + PhysicalDeviceVariablePointersFeatures { + s_type: StructureType::PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES, p_next: ::std::ptr::null_mut(), variable_pointers_storage_buffer: Bool32::default(), variable_pointers: Bool32::default(), } } } -impl PhysicalDeviceVariablePointerFeatures { - pub fn builder<'a>() -> PhysicalDeviceVariablePointerFeaturesBuilder<'a> { - PhysicalDeviceVariablePointerFeaturesBuilder { - inner: PhysicalDeviceVariablePointerFeatures::default(), +impl PhysicalDeviceVariablePointersFeatures { + pub fn builder<'a>() -> PhysicalDeviceVariablePointersFeaturesBuilder<'a> { + PhysicalDeviceVariablePointersFeaturesBuilder { + inner: PhysicalDeviceVariablePointersFeatures::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct PhysicalDeviceVariablePointerFeaturesBuilder<'a> { - inner: PhysicalDeviceVariablePointerFeatures, +pub struct PhysicalDeviceVariablePointersFeaturesBuilder<'a> { + inner: PhysicalDeviceVariablePointersFeatures, marker: ::std::marker::PhantomData<&'a ()>, } -unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceVariablePointerFeaturesBuilder<'_> {} -unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceVariablePointerFeatures {} -impl<'a> ::std::ops::Deref for PhysicalDeviceVariablePointerFeaturesBuilder<'a> { - type Target = PhysicalDeviceVariablePointerFeatures; +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceVariablePointersFeaturesBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceVariablePointersFeatures {} +impl<'a> ::std::ops::Deref for PhysicalDeviceVariablePointersFeaturesBuilder<'a> { + type Target = PhysicalDeviceVariablePointersFeatures; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for PhysicalDeviceVariablePointerFeaturesBuilder<'a> { +impl<'a> ::std::ops::DerefMut for PhysicalDeviceVariablePointersFeaturesBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> PhysicalDeviceVariablePointerFeaturesBuilder<'a> { +impl<'a> PhysicalDeviceVariablePointersFeaturesBuilder<'a> { pub fn variable_pointers_storage_buffer( mut self, variable_pointers_storage_buffer: bool, - ) -> PhysicalDeviceVariablePointerFeaturesBuilder<'a> { + ) -> PhysicalDeviceVariablePointersFeaturesBuilder<'a> { self.inner.variable_pointers_storage_buffer = variable_pointers_storage_buffer.into(); self } pub fn variable_pointers( mut self, variable_pointers: bool, - ) -> PhysicalDeviceVariablePointerFeaturesBuilder<'a> { + ) -> PhysicalDeviceVariablePointersFeaturesBuilder<'a> { self.inner.variable_pointers = variable_pointers.into(); self } #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> PhysicalDeviceVariablePointerFeatures { + pub fn build(self) -> PhysicalDeviceVariablePointersFeatures { self.inner } } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct ExternalMemoryProperties { pub external_memory_features: ExternalMemoryFeatureFlags, pub export_from_imported_handle_types: ExternalMemoryHandleTypeFlags, @@ -23089,7 +23538,7 @@ impl<'a> ExternalMemoryPropertiesBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceExternalImageFormatInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -23150,7 +23599,7 @@ impl<'a> PhysicalDeviceExternalImageFormatInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ExternalImageFormatProperties { pub s_type: StructureType, pub p_next: *mut c_void, @@ -23208,7 +23657,7 @@ impl<'a> ExternalImageFormatPropertiesBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceExternalBufferInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -23297,7 +23746,7 @@ impl<'a> PhysicalDeviceExternalBufferInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ExternalBufferProperties { pub s_type: StructureType, pub p_next: *mut c_void, @@ -23371,7 +23820,7 @@ impl<'a> ExternalBufferPropertiesBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceIDProperties { pub s_type: StructureType, pub p_next: *mut c_void, @@ -23465,7 +23914,7 @@ impl<'a> PhysicalDeviceIDPropertiesBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ExternalMemoryImageCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -23523,7 +23972,7 @@ impl<'a> ExternalMemoryImageCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ExternalMemoryBufferCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -23581,7 +24030,7 @@ impl<'a> ExternalMemoryBufferCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ExportMemoryAllocateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -23639,7 +24088,7 @@ impl<'a> ExportMemoryAllocateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ImportMemoryWin32HandleInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -23709,7 +24158,7 @@ impl<'a> ImportMemoryWin32HandleInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ExportMemoryWin32HandleInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -23779,7 +24228,7 @@ impl<'a> ExportMemoryWin32HandleInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct MemoryWin32HandlePropertiesKHR { pub s_type: StructureType, pub p_next: *mut c_void, @@ -23853,7 +24302,7 @@ impl<'a> MemoryWin32HandlePropertiesKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct MemoryGetWin32HandleInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -23933,7 +24382,7 @@ impl<'a> MemoryGetWin32HandleInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ImportMemoryFdInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -23997,7 +24446,7 @@ impl<'a> ImportMemoryFdInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct MemoryFdPropertiesKHR { pub s_type: StructureType, pub p_next: *mut c_void, @@ -24068,7 +24517,7 @@ impl<'a> MemoryFdPropertiesKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct MemoryGetFdInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -24148,7 +24597,7 @@ impl<'a> MemoryGetFdInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct Win32KeyedMutexAcquireReleaseInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -24251,7 +24700,7 @@ impl<'a> Win32KeyedMutexAcquireReleaseInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceExternalSemaphoreInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -24325,7 +24774,7 @@ impl<'a> PhysicalDeviceExternalSemaphoreInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ExternalSemaphoreProperties { pub s_type: StructureType, pub p_next: *mut c_void, @@ -24417,7 +24866,7 @@ impl<'a> ExternalSemaphorePropertiesBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ExportSemaphoreCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -24475,7 +24924,7 @@ impl<'a> ExportSemaphoreCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ImportSemaphoreWin32HandleInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -24579,7 +25028,7 @@ impl<'a> ImportSemaphoreWin32HandleInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ExportSemaphoreWin32HandleInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -24649,7 +25098,7 @@ impl<'a> ExportSemaphoreWin32HandleInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct D3D12FenceSubmitInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -24722,7 +25171,7 @@ impl<'a> D3D12FenceSubmitInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct SemaphoreGetWin32HandleInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -24802,7 +25251,7 @@ impl<'a> SemaphoreGetWin32HandleInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ImportSemaphoreFdInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -24894,7 +25343,7 @@ impl<'a> ImportSemaphoreFdInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct SemaphoreGetFdInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -24974,7 +25423,7 @@ impl<'a> SemaphoreGetFdInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceExternalFenceInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -25048,7 +25497,7 @@ impl<'a> PhysicalDeviceExternalFenceInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ExternalFenceProperties { pub s_type: StructureType, pub p_next: *mut c_void, @@ -25140,7 +25589,7 @@ impl<'a> ExternalFencePropertiesBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ExportFenceCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -25198,7 +25647,7 @@ impl<'a> ExportFenceCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ImportFenceWin32HandleInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -25296,7 +25745,7 @@ impl<'a> ImportFenceWin32HandleInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ExportFenceWin32HandleInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -25366,7 +25815,7 @@ impl<'a> ExportFenceWin32HandleInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct FenceGetWin32HandleInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -25446,7 +25895,7 @@ impl<'a> FenceGetWin32HandleInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ImportFenceFdInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -25538,7 +25987,7 @@ impl<'a> ImportFenceFdInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct FenceGetFdInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -25618,7 +26067,7 @@ impl<'a> FenceGetFdInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceMultiviewFeatures { pub s_type: StructureType, pub p_next: *mut c_void, @@ -25691,7 +26140,7 @@ impl<'a> PhysicalDeviceMultiviewFeaturesBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceMultiviewProperties { pub s_type: StructureType, pub p_next: *mut c_void, @@ -25758,7 +26207,7 @@ impl<'a> PhysicalDeviceMultiviewPropertiesBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct RenderPassMultiviewCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -25840,7 +26289,7 @@ impl<'a> RenderPassMultiviewCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct SurfaceCapabilities2EXT { pub s_type: StructureType, pub p_next: *mut c_void, @@ -25998,7 +26447,7 @@ impl<'a> SurfaceCapabilities2EXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DisplayPowerInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -26072,7 +26521,7 @@ impl<'a> DisplayPowerInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DeviceEventInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -26146,7 +26595,7 @@ impl<'a> DeviceEventInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DisplayEventInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -26220,7 +26669,7 @@ impl<'a> DisplayEventInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct SwapchainCounterCreateInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -26278,7 +26727,7 @@ impl<'a> SwapchainCounterCreateInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceGroupProperties { pub s_type: StructureType, pub p_next: *mut c_void, @@ -26370,7 +26819,7 @@ impl<'a> PhysicalDeviceGroupPropertiesBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct MemoryAllocateFlagsInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -26431,7 +26880,7 @@ impl<'a> MemoryAllocateFlagsInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct BindBufferMemoryInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -26514,7 +26963,7 @@ impl<'a> BindBufferMemoryInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct BindBufferMemoryDeviceGroupInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -26575,7 +27024,7 @@ impl<'a> BindBufferMemoryDeviceGroupInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct BindImageMemoryInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -26658,7 +27107,7 @@ impl<'a> BindImageMemoryInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct BindImageMemoryDeviceGroupInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -26731,7 +27180,7 @@ impl<'a> BindImageMemoryDeviceGroupInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DeviceGroupRenderPassBeginInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -26798,7 +27247,7 @@ impl<'a> DeviceGroupRenderPassBeginInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DeviceGroupCommandBufferBeginInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -26853,7 +27302,7 @@ impl<'a> DeviceGroupCommandBufferBeginInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DeviceGroupSubmitInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -26938,7 +27387,7 @@ impl<'a> DeviceGroupSubmitInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DeviceGroupBindSparseInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -27005,7 +27454,7 @@ impl<'a> DeviceGroupBindSparseInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DeviceGroupPresentCapabilitiesKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -27088,7 +27537,7 @@ impl<'a> DeviceGroupPresentCapabilitiesKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ImageSwapchainCreateInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -27143,7 +27592,7 @@ impl<'a> ImageSwapchainCreateInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct BindImageMemorySwapchainInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -27207,7 +27656,7 @@ impl<'a> BindImageMemorySwapchainInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct AcquireNextImageInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -27302,7 +27751,7 @@ impl<'a> AcquireNextImageInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DeviceGroupPresentInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -27369,7 +27818,7 @@ impl<'a> DeviceGroupPresentInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DeviceGroupDeviceCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -27430,7 +27879,7 @@ impl<'a> DeviceGroupDeviceCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DeviceGroupSwapchainCreateInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -27488,7 +27937,7 @@ impl<'a> DeviceGroupSwapchainCreateInfoKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct DescriptorUpdateTemplateEntry { pub dst_binding: u32, pub dst_array_element: u32, @@ -27564,7 +28013,7 @@ impl<'a> DescriptorUpdateTemplateEntryBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DescriptorUpdateTemplateCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -27692,7 +28141,7 @@ impl<'a> DescriptorUpdateTemplateCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct XYColorEXT { pub x: f32, pub y: f32, @@ -27739,7 +28188,7 @@ impl<'a> XYColorEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct HdrMetadataEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -27866,8 +28315,124 @@ impl<'a> HdrMetadataEXTBuilder<'a> { } } #[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DisplayNativeHdrSurfaceCapabilitiesAMD { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub local_dimming_support: Bool32, +} +impl ::std::default::Default for DisplayNativeHdrSurfaceCapabilitiesAMD { + fn default() -> DisplayNativeHdrSurfaceCapabilitiesAMD { + DisplayNativeHdrSurfaceCapabilitiesAMD { + s_type: StructureType::DISPLAY_NATIVE_HDR_SURFACE_CAPABILITIES_AMD, + p_next: ::std::ptr::null_mut(), + local_dimming_support: Bool32::default(), + } + } +} +impl DisplayNativeHdrSurfaceCapabilitiesAMD { + pub fn builder<'a>() -> DisplayNativeHdrSurfaceCapabilitiesAMDBuilder<'a> { + DisplayNativeHdrSurfaceCapabilitiesAMDBuilder { + inner: DisplayNativeHdrSurfaceCapabilitiesAMD::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DisplayNativeHdrSurfaceCapabilitiesAMDBuilder<'a> { + inner: DisplayNativeHdrSurfaceCapabilitiesAMD, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsSurfaceCapabilities2KHR for DisplayNativeHdrSurfaceCapabilitiesAMDBuilder<'_> {} +unsafe impl ExtendsSurfaceCapabilities2KHR for DisplayNativeHdrSurfaceCapabilitiesAMD {} +impl<'a> ::std::ops::Deref for DisplayNativeHdrSurfaceCapabilitiesAMDBuilder<'a> { + type Target = DisplayNativeHdrSurfaceCapabilitiesAMD; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DisplayNativeHdrSurfaceCapabilitiesAMDBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DisplayNativeHdrSurfaceCapabilitiesAMDBuilder<'a> { + pub fn local_dimming_support( + mut self, + local_dimming_support: bool, + ) -> DisplayNativeHdrSurfaceCapabilitiesAMDBuilder<'a> { + self.inner.local_dimming_support = local_dimming_support.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DisplayNativeHdrSurfaceCapabilitiesAMD { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SwapchainDisplayNativeHdrCreateInfoAMD { + pub s_type: StructureType, + pub p_next: *const c_void, + pub local_dimming_enable: Bool32, +} +impl ::std::default::Default for SwapchainDisplayNativeHdrCreateInfoAMD { + fn default() -> SwapchainDisplayNativeHdrCreateInfoAMD { + SwapchainDisplayNativeHdrCreateInfoAMD { + s_type: StructureType::SWAPCHAIN_DISPLAY_NATIVE_HDR_CREATE_INFO_AMD, + p_next: ::std::ptr::null(), + local_dimming_enable: Bool32::default(), + } + } +} +impl SwapchainDisplayNativeHdrCreateInfoAMD { + pub fn builder<'a>() -> SwapchainDisplayNativeHdrCreateInfoAMDBuilder<'a> { + SwapchainDisplayNativeHdrCreateInfoAMDBuilder { + inner: SwapchainDisplayNativeHdrCreateInfoAMD::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SwapchainDisplayNativeHdrCreateInfoAMDBuilder<'a> { + inner: SwapchainDisplayNativeHdrCreateInfoAMD, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsSwapchainCreateInfoKHR for SwapchainDisplayNativeHdrCreateInfoAMDBuilder<'_> {} +unsafe impl ExtendsSwapchainCreateInfoKHR for SwapchainDisplayNativeHdrCreateInfoAMD {} +impl<'a> ::std::ops::Deref for SwapchainDisplayNativeHdrCreateInfoAMDBuilder<'a> { + type Target = SwapchainDisplayNativeHdrCreateInfoAMD; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SwapchainDisplayNativeHdrCreateInfoAMDBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SwapchainDisplayNativeHdrCreateInfoAMDBuilder<'a> { + pub fn local_dimming_enable( + mut self, + local_dimming_enable: bool, + ) -> SwapchainDisplayNativeHdrCreateInfoAMDBuilder<'a> { + self.inner.local_dimming_enable = local_dimming_enable.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SwapchainDisplayNativeHdrCreateInfoAMD { + self.inner + } +} +#[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct RefreshCycleDurationGOOGLE { pub refresh_duration: u64, } @@ -27912,7 +28477,7 @@ impl<'a> RefreshCycleDurationGOOGLEBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct PastPresentationTimingGOOGLE { pub present_id: u32, pub desired_present_time: u64, @@ -27986,7 +28551,7 @@ impl<'a> PastPresentationTimingGOOGLEBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PresentTimesInfoGOOGLE { pub s_type: StructureType, pub p_next: *const c_void, @@ -28044,7 +28609,7 @@ impl<'a> PresentTimesInfoGOOGLEBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct PresentTimeGOOGLE { pub present_id: u32, pub desired_present_time: u64, @@ -28094,7 +28659,7 @@ impl<'a> PresentTimeGOOGLEBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct IOSSurfaceCreateInfoMVK { pub s_type: StructureType, pub p_next: *const c_void, @@ -28171,7 +28736,7 @@ impl<'a> IOSSurfaceCreateInfoMVKBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct MacOSSurfaceCreateInfoMVK { pub s_type: StructureType, pub p_next: *const c_void, @@ -28250,8 +28815,88 @@ impl<'a> MacOSSurfaceCreateInfoMVKBuilder<'a> { } } #[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct MetalSurfaceCreateInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: MetalSurfaceCreateFlagsEXT, + pub p_layer: *const CAMetalLayer, +} +impl ::std::default::Default for MetalSurfaceCreateInfoEXT { + fn default() -> MetalSurfaceCreateInfoEXT { + MetalSurfaceCreateInfoEXT { + s_type: StructureType::METAL_SURFACE_CREATE_INFO_EXT, + p_next: ::std::ptr::null(), + flags: MetalSurfaceCreateFlagsEXT::default(), + p_layer: ::std::ptr::null(), + } + } +} +impl MetalSurfaceCreateInfoEXT { + pub fn builder<'a>() -> MetalSurfaceCreateInfoEXTBuilder<'a> { + MetalSurfaceCreateInfoEXTBuilder { + inner: MetalSurfaceCreateInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct MetalSurfaceCreateInfoEXTBuilder<'a> { + inner: MetalSurfaceCreateInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsMetalSurfaceCreateInfoEXT {} +impl<'a> ::std::ops::Deref for MetalSurfaceCreateInfoEXTBuilder<'a> { + type Target = MetalSurfaceCreateInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for MetalSurfaceCreateInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> MetalSurfaceCreateInfoEXTBuilder<'a> { + pub fn flags( + mut self, + flags: MetalSurfaceCreateFlagsEXT, + ) -> MetalSurfaceCreateInfoEXTBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn layer(mut self, layer: &'a CAMetalLayer) -> MetalSurfaceCreateInfoEXTBuilder<'a> { + self.inner.p_layer = layer; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> MetalSurfaceCreateInfoEXTBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> MetalSurfaceCreateInfoEXT { + self.inner + } +} +#[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct ViewportWScalingNV { pub xcoeff: f32, pub ycoeff: f32, @@ -28298,7 +28943,7 @@ impl<'a> ViewportWScalingNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PipelineViewportWScalingStateCreateInfoNV { pub s_type: StructureType, pub p_next: *const c_void, @@ -28371,7 +29016,7 @@ impl<'a> PipelineViewportWScalingStateCreateInfoNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct ViewportSwizzleNV { pub x: ViewportCoordinateSwizzleNV, pub y: ViewportCoordinateSwizzleNV, @@ -28428,7 +29073,7 @@ impl<'a> ViewportSwizzleNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PipelineViewportSwizzleStateCreateInfoNV { pub s_type: StructureType, pub p_next: *const c_void, @@ -28501,7 +29146,7 @@ impl<'a> PipelineViewportSwizzleStateCreateInfoNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceDiscardRectanglePropertiesEXT { pub s_type: StructureType, pub p_next: *mut c_void, @@ -28562,7 +29207,7 @@ impl<'a> PhysicalDeviceDiscardRectanglePropertiesEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PipelineDiscardRectangleStateCreateInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -28644,7 +29289,7 @@ impl<'a> PipelineDiscardRectangleStateCreateInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX { pub s_type: StructureType, pub p_next: *mut c_void, @@ -28708,7 +29353,7 @@ impl<'a> PhysicalDeviceMultiviewPerViewAttributesPropertiesNVXBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct InputAttachmentAspectReference { pub subpass: u32, pub input_attachment_index: u32, @@ -28766,7 +29411,7 @@ impl<'a> InputAttachmentAspectReferenceBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct RenderPassInputAttachmentAspectCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -28827,7 +29472,7 @@ impl<'a> RenderPassInputAttachmentAspectCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceSurfaceInfo2KHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -28898,7 +29543,7 @@ impl<'a> PhysicalDeviceSurfaceInfo2KHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct SurfaceCapabilities2KHR { pub s_type: StructureType, pub p_next: *mut c_void, @@ -28972,7 +29617,7 @@ impl<'a> SurfaceCapabilities2KHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct SurfaceFormat2KHR { pub s_type: StructureType, pub p_next: *mut c_void, @@ -29046,7 +29691,7 @@ impl<'a> SurfaceFormat2KHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DisplayProperties2KHR { pub s_type: StructureType, pub p_next: *mut c_void, @@ -29120,7 +29765,7 @@ impl<'a> DisplayProperties2KHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DisplayPlaneProperties2KHR { pub s_type: StructureType, pub p_next: *mut c_void, @@ -29194,7 +29839,7 @@ impl<'a> DisplayPlaneProperties2KHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DisplayModeProperties2KHR { pub s_type: StructureType, pub p_next: *mut c_void, @@ -29268,7 +29913,7 @@ impl<'a> DisplayModeProperties2KHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DisplayPlaneInfo2KHR { pub s_type: StructureType, pub p_next: *const c_void, @@ -29345,7 +29990,7 @@ impl<'a> DisplayPlaneInfo2KHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DisplayPlaneCapabilities2KHR { pub s_type: StructureType, pub p_next: *mut c_void, @@ -29419,7 +30064,7 @@ impl<'a> DisplayPlaneCapabilities2KHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct SharedPresentSurfaceCapabilitiesKHR { pub s_type: StructureType, pub p_next: *mut c_void, @@ -29477,7 +30122,7 @@ impl<'a> SharedPresentSurfaceCapabilitiesKHRBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDevice16BitStorageFeatures { pub s_type: StructureType, pub p_next: *mut c_void, @@ -29563,7 +30208,7 @@ impl<'a> PhysicalDevice16BitStorageFeaturesBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceSubgroupProperties { pub s_type: StructureType, pub p_next: *mut c_void, @@ -29648,7 +30293,68 @@ impl<'a> PhysicalDeviceSubgroupPropertiesBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] +pub struct PhysicalDeviceShaderSubgroupExtendedTypesFeatures { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub shader_subgroup_extended_types: Bool32, +} +impl ::std::default::Default for PhysicalDeviceShaderSubgroupExtendedTypesFeatures { + fn default() -> PhysicalDeviceShaderSubgroupExtendedTypesFeatures { + PhysicalDeviceShaderSubgroupExtendedTypesFeatures { + s_type: StructureType::PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES, + p_next: ::std::ptr::null_mut(), + shader_subgroup_extended_types: Bool32::default(), + } + } +} +impl PhysicalDeviceShaderSubgroupExtendedTypesFeatures { + pub fn builder<'a>() -> PhysicalDeviceShaderSubgroupExtendedTypesFeaturesBuilder<'a> { + PhysicalDeviceShaderSubgroupExtendedTypesFeaturesBuilder { + inner: PhysicalDeviceShaderSubgroupExtendedTypesFeatures::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceShaderSubgroupExtendedTypesFeaturesBuilder<'a> { + inner: PhysicalDeviceShaderSubgroupExtendedTypesFeatures, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo + for PhysicalDeviceShaderSubgroupExtendedTypesFeaturesBuilder<'_> +{ +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceShaderSubgroupExtendedTypesFeatures {} +impl<'a> ::std::ops::Deref for PhysicalDeviceShaderSubgroupExtendedTypesFeaturesBuilder<'a> { + type Target = PhysicalDeviceShaderSubgroupExtendedTypesFeatures; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceShaderSubgroupExtendedTypesFeaturesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceShaderSubgroupExtendedTypesFeaturesBuilder<'a> { + pub fn shader_subgroup_extended_types( + mut self, + shader_subgroup_extended_types: bool, + ) -> PhysicalDeviceShaderSubgroupExtendedTypesFeaturesBuilder<'a> { + self.inner.shader_subgroup_extended_types = shader_subgroup_extended_types.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceShaderSubgroupExtendedTypesFeatures { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] pub struct BufferMemoryRequirementsInfo2 { pub s_type: StructureType, pub p_next: *const c_void, @@ -29719,7 +30425,7 @@ impl<'a> BufferMemoryRequirementsInfo2Builder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ImageMemoryRequirementsInfo2 { pub s_type: StructureType, pub p_next: *const c_void, @@ -29790,7 +30496,7 @@ impl<'a> ImageMemoryRequirementsInfo2Builder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ImageSparseMemoryRequirementsInfo2 { pub s_type: StructureType, pub p_next: *const c_void, @@ -29861,7 +30567,7 @@ impl<'a> ImageSparseMemoryRequirementsInfo2Builder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct MemoryRequirements2 { pub s_type: StructureType, pub p_next: *mut c_void, @@ -29935,7 +30641,7 @@ impl<'a> MemoryRequirements2Builder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct SparseImageMemoryRequirements2 { pub s_type: StructureType, pub p_next: *mut c_void, @@ -30009,7 +30715,7 @@ impl<'a> SparseImageMemoryRequirements2Builder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDevicePointClippingProperties { pub s_type: StructureType, pub p_next: *mut c_void, @@ -30067,7 +30773,7 @@ impl<'a> PhysicalDevicePointClippingPropertiesBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct MemoryDedicatedRequirements { pub s_type: StructureType, pub p_next: *mut c_void, @@ -30134,7 +30840,7 @@ impl<'a> MemoryDedicatedRequirementsBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct MemoryDedicatedAllocateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -30195,7 +30901,7 @@ impl<'a> MemoryDedicatedAllocateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ImageViewUsageCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -30250,7 +30956,7 @@ impl<'a> ImageViewUsageCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PipelineTessellationDomainOriginStateCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -30314,7 +31020,7 @@ impl<'a> PipelineTessellationDomainOriginStateCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct SamplerYcbcrConversionInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -30374,7 +31080,7 @@ impl<'a> SamplerYcbcrConversionInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct SamplerYcbcrConversionCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -30508,7 +31214,7 @@ impl<'a> SamplerYcbcrConversionCreateInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct BindImagePlaneMemoryInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -30566,7 +31272,7 @@ impl<'a> BindImagePlaneMemoryInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ImagePlaneMemoryRequirementsInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -30624,7 +31330,7 @@ impl<'a> ImagePlaneMemoryRequirementsInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceSamplerYcbcrConversionFeatures { pub s_type: StructureType, pub p_next: *mut c_void, @@ -30682,7 +31388,7 @@ impl<'a> PhysicalDeviceSamplerYcbcrConversionFeaturesBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct SamplerYcbcrConversionImageFormatProperties { pub s_type: StructureType, pub p_next: *mut c_void, @@ -30744,7 +31450,7 @@ impl<'a> SamplerYcbcrConversionImageFormatPropertiesBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct TextureLODGatherFormatPropertiesAMD { pub s_type: StructureType, pub p_next: *mut c_void, @@ -30803,7 +31509,7 @@ impl<'a> TextureLODGatherFormatPropertiesAMDBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ConditionalRenderingBeginInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -30889,7 +31595,7 @@ impl<'a> ConditionalRenderingBeginInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ProtectedSubmitInfo { pub s_type: StructureType, pub p_next: *const c_void, @@ -30944,7 +31650,7 @@ impl<'a> ProtectedSubmitInfoBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceProtectedMemoryFeatures { pub s_type: StructureType, pub p_next: *mut c_void, @@ -31002,7 +31708,7 @@ impl<'a> PhysicalDeviceProtectedMemoryFeaturesBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceProtectedMemoryProperties { pub s_type: StructureType, pub p_next: *mut c_void, @@ -31063,7 +31769,7 @@ impl<'a> PhysicalDeviceProtectedMemoryPropertiesBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DeviceQueueInfo2 { pub s_type: StructureType, pub p_next: *const c_void, @@ -31146,7 +31852,7 @@ impl<'a> DeviceQueueInfo2Builder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PipelineCoverageToColorStateCreateInfoNV { pub s_type: StructureType, pub p_next: *const c_void, @@ -31225,57 +31931,57 @@ impl<'a> PipelineCoverageToColorStateCreateInfoNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct PhysicalDeviceSamplerFilterMinmaxPropertiesEXT { +#[doc = ""] +pub struct PhysicalDeviceSamplerFilterMinmaxProperties { pub s_type: StructureType, pub p_next: *mut c_void, pub filter_minmax_single_component_formats: Bool32, pub filter_minmax_image_component_mapping: Bool32, } -impl ::std::default::Default for PhysicalDeviceSamplerFilterMinmaxPropertiesEXT { - fn default() -> PhysicalDeviceSamplerFilterMinmaxPropertiesEXT { - PhysicalDeviceSamplerFilterMinmaxPropertiesEXT { - s_type: StructureType::PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT, +impl ::std::default::Default for PhysicalDeviceSamplerFilterMinmaxProperties { + fn default() -> PhysicalDeviceSamplerFilterMinmaxProperties { + PhysicalDeviceSamplerFilterMinmaxProperties { + s_type: StructureType::PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES, p_next: ::std::ptr::null_mut(), filter_minmax_single_component_formats: Bool32::default(), filter_minmax_image_component_mapping: Bool32::default(), } } } -impl PhysicalDeviceSamplerFilterMinmaxPropertiesEXT { - pub fn builder<'a>() -> PhysicalDeviceSamplerFilterMinmaxPropertiesEXTBuilder<'a> { - PhysicalDeviceSamplerFilterMinmaxPropertiesEXTBuilder { - inner: PhysicalDeviceSamplerFilterMinmaxPropertiesEXT::default(), +impl PhysicalDeviceSamplerFilterMinmaxProperties { + pub fn builder<'a>() -> PhysicalDeviceSamplerFilterMinmaxPropertiesBuilder<'a> { + PhysicalDeviceSamplerFilterMinmaxPropertiesBuilder { + inner: PhysicalDeviceSamplerFilterMinmaxProperties::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct PhysicalDeviceSamplerFilterMinmaxPropertiesEXTBuilder<'a> { - inner: PhysicalDeviceSamplerFilterMinmaxPropertiesEXT, +pub struct PhysicalDeviceSamplerFilterMinmaxPropertiesBuilder<'a> { + inner: PhysicalDeviceSamplerFilterMinmaxProperties, marker: ::std::marker::PhantomData<&'a ()>, } unsafe impl ExtendsPhysicalDeviceProperties2 - for PhysicalDeviceSamplerFilterMinmaxPropertiesEXTBuilder<'_> + for PhysicalDeviceSamplerFilterMinmaxPropertiesBuilder<'_> { } -unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceSamplerFilterMinmaxPropertiesEXT {} -impl<'a> ::std::ops::Deref for PhysicalDeviceSamplerFilterMinmaxPropertiesEXTBuilder<'a> { - type Target = PhysicalDeviceSamplerFilterMinmaxPropertiesEXT; +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceSamplerFilterMinmaxProperties {} +impl<'a> ::std::ops::Deref for PhysicalDeviceSamplerFilterMinmaxPropertiesBuilder<'a> { + type Target = PhysicalDeviceSamplerFilterMinmaxProperties; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for PhysicalDeviceSamplerFilterMinmaxPropertiesEXTBuilder<'a> { +impl<'a> ::std::ops::DerefMut for PhysicalDeviceSamplerFilterMinmaxPropertiesBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> PhysicalDeviceSamplerFilterMinmaxPropertiesEXTBuilder<'a> { +impl<'a> PhysicalDeviceSamplerFilterMinmaxPropertiesBuilder<'a> { pub fn filter_minmax_single_component_formats( mut self, filter_minmax_single_component_formats: bool, - ) -> PhysicalDeviceSamplerFilterMinmaxPropertiesEXTBuilder<'a> { + ) -> PhysicalDeviceSamplerFilterMinmaxPropertiesBuilder<'a> { self.inner.filter_minmax_single_component_formats = filter_minmax_single_component_formats.into(); self @@ -31283,7 +31989,7 @@ impl<'a> PhysicalDeviceSamplerFilterMinmaxPropertiesEXTBuilder<'a> { pub fn filter_minmax_image_component_mapping( mut self, filter_minmax_image_component_mapping: bool, - ) -> PhysicalDeviceSamplerFilterMinmaxPropertiesEXTBuilder<'a> { + ) -> PhysicalDeviceSamplerFilterMinmaxPropertiesBuilder<'a> { self.inner.filter_minmax_image_component_mapping = filter_minmax_image_component_mapping.into(); self @@ -31291,13 +31997,13 @@ impl<'a> PhysicalDeviceSamplerFilterMinmaxPropertiesEXTBuilder<'a> { #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> PhysicalDeviceSamplerFilterMinmaxPropertiesEXT { + pub fn build(self) -> PhysicalDeviceSamplerFilterMinmaxProperties { self.inner } } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct SampleLocationEXT { pub x: f32, pub y: f32, @@ -31344,7 +32050,7 @@ impl<'a> SampleLocationEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct SampleLocationsInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -31423,7 +32129,7 @@ impl<'a> SampleLocationsInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct AttachmentSampleLocationsEXT { pub attachment_index: u32, pub sample_locations_info: SampleLocationsInfoEXT, @@ -31476,7 +32182,7 @@ impl<'a> AttachmentSampleLocationsEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct SubpassSampleLocationsEXT { pub subpass_index: u32, pub sample_locations_info: SampleLocationsInfoEXT, @@ -31526,7 +32232,7 @@ impl<'a> SubpassSampleLocationsEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct RenderPassSampleLocationsBeginInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -31601,7 +32307,7 @@ impl<'a> RenderPassSampleLocationsBeginInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PipelineSampleLocationsStateCreateInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -31674,7 +32380,7 @@ impl<'a> PipelineSampleLocationsStateCreateInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceSampleLocationsPropertiesEXT { pub s_type: StructureType, pub p_next: *mut c_void, @@ -31771,7 +32477,7 @@ impl<'a> PhysicalDeviceSampleLocationsPropertiesEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct MultisamplePropertiesEXT { pub s_type: StructureType, pub p_next: *mut c_void, @@ -31845,65 +32551,65 @@ impl<'a> MultisamplePropertiesEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct SamplerReductionModeCreateInfoEXT { +#[doc = ""] +pub struct SamplerReductionModeCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, - pub reduction_mode: SamplerReductionModeEXT, + pub reduction_mode: SamplerReductionMode, } -impl ::std::default::Default for SamplerReductionModeCreateInfoEXT { - fn default() -> SamplerReductionModeCreateInfoEXT { - SamplerReductionModeCreateInfoEXT { - s_type: StructureType::SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT, +impl ::std::default::Default for SamplerReductionModeCreateInfo { + fn default() -> SamplerReductionModeCreateInfo { + SamplerReductionModeCreateInfo { + s_type: StructureType::SAMPLER_REDUCTION_MODE_CREATE_INFO, p_next: ::std::ptr::null(), - reduction_mode: SamplerReductionModeEXT::default(), + reduction_mode: SamplerReductionMode::default(), } } } -impl SamplerReductionModeCreateInfoEXT { - pub fn builder<'a>() -> SamplerReductionModeCreateInfoEXTBuilder<'a> { - SamplerReductionModeCreateInfoEXTBuilder { - inner: SamplerReductionModeCreateInfoEXT::default(), +impl SamplerReductionModeCreateInfo { + pub fn builder<'a>() -> SamplerReductionModeCreateInfoBuilder<'a> { + SamplerReductionModeCreateInfoBuilder { + inner: SamplerReductionModeCreateInfo::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct SamplerReductionModeCreateInfoEXTBuilder<'a> { - inner: SamplerReductionModeCreateInfoEXT, +pub struct SamplerReductionModeCreateInfoBuilder<'a> { + inner: SamplerReductionModeCreateInfo, marker: ::std::marker::PhantomData<&'a ()>, } -unsafe impl ExtendsSamplerCreateInfo for SamplerReductionModeCreateInfoEXTBuilder<'_> {} -unsafe impl ExtendsSamplerCreateInfo for SamplerReductionModeCreateInfoEXT {} -impl<'a> ::std::ops::Deref for SamplerReductionModeCreateInfoEXTBuilder<'a> { - type Target = SamplerReductionModeCreateInfoEXT; +unsafe impl ExtendsSamplerCreateInfo for SamplerReductionModeCreateInfoBuilder<'_> {} +unsafe impl ExtendsSamplerCreateInfo for SamplerReductionModeCreateInfo {} +impl<'a> ::std::ops::Deref for SamplerReductionModeCreateInfoBuilder<'a> { + type Target = SamplerReductionModeCreateInfo; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for SamplerReductionModeCreateInfoEXTBuilder<'a> { +impl<'a> ::std::ops::DerefMut for SamplerReductionModeCreateInfoBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> SamplerReductionModeCreateInfoEXTBuilder<'a> { +impl<'a> SamplerReductionModeCreateInfoBuilder<'a> { pub fn reduction_mode( mut self, - reduction_mode: SamplerReductionModeEXT, - ) -> SamplerReductionModeCreateInfoEXTBuilder<'a> { + reduction_mode: SamplerReductionMode, + ) -> SamplerReductionModeCreateInfoBuilder<'a> { self.inner.reduction_mode = reduction_mode; self } #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> SamplerReductionModeCreateInfoEXT { + pub fn build(self) -> SamplerReductionModeCreateInfo { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceBlendOperationAdvancedFeaturesEXT { pub s_type: StructureType, pub p_next: *mut c_void, @@ -31961,7 +32667,7 @@ impl<'a> PhysicalDeviceBlendOperationAdvancedFeaturesEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceBlendOperationAdvancedPropertiesEXT { pub s_type: StructureType, pub p_next: *mut c_void, @@ -32069,7 +32775,7 @@ impl<'a> PhysicalDeviceBlendOperationAdvancedPropertiesEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PipelineColorBlendAdvancedStateCreateInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -32151,7 +32857,7 @@ impl<'a> PipelineColorBlendAdvancedStateCreateInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceInlineUniformBlockFeaturesEXT { pub s_type: StructureType, pub p_next: *mut c_void, @@ -32220,7 +32926,7 @@ impl<'a> PhysicalDeviceInlineUniformBlockFeaturesEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceInlineUniformBlockPropertiesEXT { pub s_type: StructureType, pub p_next: *mut c_void, @@ -32323,7 +33029,7 @@ impl<'a> PhysicalDeviceInlineUniformBlockPropertiesEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct WriteDescriptorSetInlineUniformBlockEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -32381,7 +33087,7 @@ impl<'a> WriteDescriptorSetInlineUniformBlockEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DescriptorPoolInlineUniformBlockCreateInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -32442,7 +33148,7 @@ impl<'a> DescriptorPoolInlineUniformBlockCreateInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PipelineCoverageModulationStateCreateInfoNV { pub s_type: StructureType, pub p_next: *const c_void, @@ -32536,58 +33242,58 @@ impl<'a> PipelineCoverageModulationStateCreateInfoNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct ImageFormatListCreateInfoKHR { +#[doc = ""] +pub struct ImageFormatListCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, pub view_format_count: u32, pub p_view_formats: *const Format, } -impl ::std::default::Default for ImageFormatListCreateInfoKHR { - fn default() -> ImageFormatListCreateInfoKHR { - ImageFormatListCreateInfoKHR { - s_type: StructureType::IMAGE_FORMAT_LIST_CREATE_INFO_KHR, +impl ::std::default::Default for ImageFormatListCreateInfo { + fn default() -> ImageFormatListCreateInfo { + ImageFormatListCreateInfo { + s_type: StructureType::IMAGE_FORMAT_LIST_CREATE_INFO, p_next: ::std::ptr::null(), view_format_count: u32::default(), p_view_formats: ::std::ptr::null(), } } } -impl ImageFormatListCreateInfoKHR { - pub fn builder<'a>() -> ImageFormatListCreateInfoKHRBuilder<'a> { - ImageFormatListCreateInfoKHRBuilder { - inner: ImageFormatListCreateInfoKHR::default(), +impl ImageFormatListCreateInfo { + pub fn builder<'a>() -> ImageFormatListCreateInfoBuilder<'a> { + ImageFormatListCreateInfoBuilder { + inner: ImageFormatListCreateInfo::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct ImageFormatListCreateInfoKHRBuilder<'a> { - inner: ImageFormatListCreateInfoKHR, +pub struct ImageFormatListCreateInfoBuilder<'a> { + inner: ImageFormatListCreateInfo, marker: ::std::marker::PhantomData<&'a ()>, } -unsafe impl ExtendsImageCreateInfo for ImageFormatListCreateInfoKHRBuilder<'_> {} -unsafe impl ExtendsImageCreateInfo for ImageFormatListCreateInfoKHR {} -unsafe impl ExtendsSwapchainCreateInfoKHR for ImageFormatListCreateInfoKHRBuilder<'_> {} -unsafe impl ExtendsSwapchainCreateInfoKHR for ImageFormatListCreateInfoKHR {} -unsafe impl ExtendsPhysicalDeviceImageFormatInfo2 for ImageFormatListCreateInfoKHRBuilder<'_> {} -unsafe impl ExtendsPhysicalDeviceImageFormatInfo2 for ImageFormatListCreateInfoKHR {} -impl<'a> ::std::ops::Deref for ImageFormatListCreateInfoKHRBuilder<'a> { - type Target = ImageFormatListCreateInfoKHR; +unsafe impl ExtendsImageCreateInfo for ImageFormatListCreateInfoBuilder<'_> {} +unsafe impl ExtendsImageCreateInfo for ImageFormatListCreateInfo {} +unsafe impl ExtendsSwapchainCreateInfoKHR for ImageFormatListCreateInfoBuilder<'_> {} +unsafe impl ExtendsSwapchainCreateInfoKHR for ImageFormatListCreateInfo {} +unsafe impl ExtendsPhysicalDeviceImageFormatInfo2 for ImageFormatListCreateInfoBuilder<'_> {} +unsafe impl ExtendsPhysicalDeviceImageFormatInfo2 for ImageFormatListCreateInfo {} +impl<'a> ::std::ops::Deref for ImageFormatListCreateInfoBuilder<'a> { + type Target = ImageFormatListCreateInfo; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for ImageFormatListCreateInfoKHRBuilder<'a> { +impl<'a> ::std::ops::DerefMut for ImageFormatListCreateInfoBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> ImageFormatListCreateInfoKHRBuilder<'a> { +impl<'a> ImageFormatListCreateInfoBuilder<'a> { pub fn view_formats( mut self, view_formats: &'a [Format], - ) -> ImageFormatListCreateInfoKHRBuilder<'a> { + ) -> ImageFormatListCreateInfoBuilder<'a> { self.inner.view_format_count = view_formats.len() as _; self.inner.p_view_formats = view_formats.as_ptr(); self @@ -32595,13 +33301,13 @@ impl<'a> ImageFormatListCreateInfoKHRBuilder<'a> { #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> ImageFormatListCreateInfoKHR { + pub fn build(self) -> ImageFormatListCreateInfo { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ValidationCacheCreateInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -32687,7 +33393,7 @@ impl<'a> ValidationCacheCreateInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ShaderModuleValidationCacheCreateInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -32745,7 +33451,7 @@ impl<'a> ShaderModuleValidationCacheCreateInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceMaintenance3Properties { pub s_type: StructureType, pub p_next: *mut c_void, @@ -32812,7 +33518,7 @@ impl<'a> PhysicalDeviceMaintenance3PropertiesBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DescriptorSetLayoutSupport { pub s_type: StructureType, pub p_next: *mut c_void, @@ -32883,137 +33589,137 @@ impl<'a> DescriptorSetLayoutSupportBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct PhysicalDeviceShaderDrawParameterFeatures { +#[doc = ""] +pub struct PhysicalDeviceShaderDrawParametersFeatures { pub s_type: StructureType, pub p_next: *mut c_void, pub shader_draw_parameters: Bool32, } -impl ::std::default::Default for PhysicalDeviceShaderDrawParameterFeatures { - fn default() -> PhysicalDeviceShaderDrawParameterFeatures { - PhysicalDeviceShaderDrawParameterFeatures { - s_type: StructureType::PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES, +impl ::std::default::Default for PhysicalDeviceShaderDrawParametersFeatures { + fn default() -> PhysicalDeviceShaderDrawParametersFeatures { + PhysicalDeviceShaderDrawParametersFeatures { + s_type: StructureType::PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES, p_next: ::std::ptr::null_mut(), shader_draw_parameters: Bool32::default(), } } } -impl PhysicalDeviceShaderDrawParameterFeatures { - pub fn builder<'a>() -> PhysicalDeviceShaderDrawParameterFeaturesBuilder<'a> { - PhysicalDeviceShaderDrawParameterFeaturesBuilder { - inner: PhysicalDeviceShaderDrawParameterFeatures::default(), +impl PhysicalDeviceShaderDrawParametersFeatures { + pub fn builder<'a>() -> PhysicalDeviceShaderDrawParametersFeaturesBuilder<'a> { + PhysicalDeviceShaderDrawParametersFeaturesBuilder { + inner: PhysicalDeviceShaderDrawParametersFeatures::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct PhysicalDeviceShaderDrawParameterFeaturesBuilder<'a> { - inner: PhysicalDeviceShaderDrawParameterFeatures, +pub struct PhysicalDeviceShaderDrawParametersFeaturesBuilder<'a> { + inner: PhysicalDeviceShaderDrawParametersFeatures, marker: ::std::marker::PhantomData<&'a ()>, } -unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceShaderDrawParameterFeaturesBuilder<'_> {} -unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceShaderDrawParameterFeatures {} -impl<'a> ::std::ops::Deref for PhysicalDeviceShaderDrawParameterFeaturesBuilder<'a> { - type Target = PhysicalDeviceShaderDrawParameterFeatures; +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceShaderDrawParametersFeaturesBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceShaderDrawParametersFeatures {} +impl<'a> ::std::ops::Deref for PhysicalDeviceShaderDrawParametersFeaturesBuilder<'a> { + type Target = PhysicalDeviceShaderDrawParametersFeatures; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for PhysicalDeviceShaderDrawParameterFeaturesBuilder<'a> { +impl<'a> ::std::ops::DerefMut for PhysicalDeviceShaderDrawParametersFeaturesBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> PhysicalDeviceShaderDrawParameterFeaturesBuilder<'a> { +impl<'a> PhysicalDeviceShaderDrawParametersFeaturesBuilder<'a> { pub fn shader_draw_parameters( mut self, shader_draw_parameters: bool, - ) -> PhysicalDeviceShaderDrawParameterFeaturesBuilder<'a> { + ) -> PhysicalDeviceShaderDrawParametersFeaturesBuilder<'a> { self.inner.shader_draw_parameters = shader_draw_parameters.into(); self } #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> PhysicalDeviceShaderDrawParameterFeatures { + pub fn build(self) -> PhysicalDeviceShaderDrawParametersFeatures { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct PhysicalDeviceFloat16Int8FeaturesKHR { +#[doc = ""] +pub struct PhysicalDeviceShaderFloat16Int8Features { pub s_type: StructureType, pub p_next: *mut c_void, pub shader_float16: Bool32, pub shader_int8: Bool32, } -impl ::std::default::Default for PhysicalDeviceFloat16Int8FeaturesKHR { - fn default() -> PhysicalDeviceFloat16Int8FeaturesKHR { - PhysicalDeviceFloat16Int8FeaturesKHR { - s_type: StructureType::PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR, +impl ::std::default::Default for PhysicalDeviceShaderFloat16Int8Features { + fn default() -> PhysicalDeviceShaderFloat16Int8Features { + PhysicalDeviceShaderFloat16Int8Features { + s_type: StructureType::PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES, p_next: ::std::ptr::null_mut(), shader_float16: Bool32::default(), shader_int8: Bool32::default(), } } } -impl PhysicalDeviceFloat16Int8FeaturesKHR { - pub fn builder<'a>() -> PhysicalDeviceFloat16Int8FeaturesKHRBuilder<'a> { - PhysicalDeviceFloat16Int8FeaturesKHRBuilder { - inner: PhysicalDeviceFloat16Int8FeaturesKHR::default(), +impl PhysicalDeviceShaderFloat16Int8Features { + pub fn builder<'a>() -> PhysicalDeviceShaderFloat16Int8FeaturesBuilder<'a> { + PhysicalDeviceShaderFloat16Int8FeaturesBuilder { + inner: PhysicalDeviceShaderFloat16Int8Features::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct PhysicalDeviceFloat16Int8FeaturesKHRBuilder<'a> { - inner: PhysicalDeviceFloat16Int8FeaturesKHR, +pub struct PhysicalDeviceShaderFloat16Int8FeaturesBuilder<'a> { + inner: PhysicalDeviceShaderFloat16Int8Features, marker: ::std::marker::PhantomData<&'a ()>, } -unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceFloat16Int8FeaturesKHRBuilder<'_> {} -unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceFloat16Int8FeaturesKHR {} -impl<'a> ::std::ops::Deref for PhysicalDeviceFloat16Int8FeaturesKHRBuilder<'a> { - type Target = PhysicalDeviceFloat16Int8FeaturesKHR; +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceShaderFloat16Int8FeaturesBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceShaderFloat16Int8Features {} +impl<'a> ::std::ops::Deref for PhysicalDeviceShaderFloat16Int8FeaturesBuilder<'a> { + type Target = PhysicalDeviceShaderFloat16Int8Features; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for PhysicalDeviceFloat16Int8FeaturesKHRBuilder<'a> { +impl<'a> ::std::ops::DerefMut for PhysicalDeviceShaderFloat16Int8FeaturesBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> PhysicalDeviceFloat16Int8FeaturesKHRBuilder<'a> { +impl<'a> PhysicalDeviceShaderFloat16Int8FeaturesBuilder<'a> { pub fn shader_float16( mut self, shader_float16: bool, - ) -> PhysicalDeviceFloat16Int8FeaturesKHRBuilder<'a> { + ) -> PhysicalDeviceShaderFloat16Int8FeaturesBuilder<'a> { self.inner.shader_float16 = shader_float16.into(); self } pub fn shader_int8( mut self, shader_int8: bool, - ) -> PhysicalDeviceFloat16Int8FeaturesKHRBuilder<'a> { + ) -> PhysicalDeviceShaderFloat16Int8FeaturesBuilder<'a> { self.inner.shader_int8 = shader_int8.into(); self } #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> PhysicalDeviceFloat16Int8FeaturesKHR { + pub fn build(self) -> PhysicalDeviceShaderFloat16Int8Features { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct PhysicalDeviceFloatControlsPropertiesKHR { +#[doc = ""] +pub struct PhysicalDeviceFloatControlsProperties { pub s_type: StructureType, pub p_next: *mut c_void, - pub separate_denorm_settings: Bool32, - pub separate_rounding_mode_settings: Bool32, + pub denorm_behavior_independence: ShaderFloatControlsIndependence, + pub rounding_mode_independence: ShaderFloatControlsIndependence, pub shader_signed_zero_inf_nan_preserve_float16: Bool32, pub shader_signed_zero_inf_nan_preserve_float32: Bool32, pub shader_signed_zero_inf_nan_preserve_float64: Bool32, @@ -33030,13 +33736,13 @@ pub struct PhysicalDeviceFloatControlsPropertiesKHR { pub shader_rounding_mode_rtz_float32: Bool32, pub shader_rounding_mode_rtz_float64: Bool32, } -impl ::std::default::Default for PhysicalDeviceFloatControlsPropertiesKHR { - fn default() -> PhysicalDeviceFloatControlsPropertiesKHR { - PhysicalDeviceFloatControlsPropertiesKHR { - s_type: StructureType::PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES_KHR, +impl ::std::default::Default for PhysicalDeviceFloatControlsProperties { + fn default() -> PhysicalDeviceFloatControlsProperties { + PhysicalDeviceFloatControlsProperties { + s_type: StructureType::PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES, p_next: ::std::ptr::null_mut(), - separate_denorm_settings: Bool32::default(), - separate_rounding_mode_settings: Bool32::default(), + denorm_behavior_independence: ShaderFloatControlsIndependence::default(), + rounding_mode_independence: ShaderFloatControlsIndependence::default(), shader_signed_zero_inf_nan_preserve_float16: Bool32::default(), shader_signed_zero_inf_nan_preserve_float32: Bool32::default(), shader_signed_zero_inf_nan_preserve_float64: Bool32::default(), @@ -33055,54 +33761,51 @@ impl ::std::default::Default for PhysicalDeviceFloatControlsPropertiesKHR { } } } -impl PhysicalDeviceFloatControlsPropertiesKHR { - pub fn builder<'a>() -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { - PhysicalDeviceFloatControlsPropertiesKHRBuilder { - inner: PhysicalDeviceFloatControlsPropertiesKHR::default(), +impl PhysicalDeviceFloatControlsProperties { + pub fn builder<'a>() -> PhysicalDeviceFloatControlsPropertiesBuilder<'a> { + PhysicalDeviceFloatControlsPropertiesBuilder { + inner: PhysicalDeviceFloatControlsProperties::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { - inner: PhysicalDeviceFloatControlsPropertiesKHR, +pub struct PhysicalDeviceFloatControlsPropertiesBuilder<'a> { + inner: PhysicalDeviceFloatControlsProperties, marker: ::std::marker::PhantomData<&'a ()>, } -unsafe impl ExtendsPhysicalDeviceProperties2 - for PhysicalDeviceFloatControlsPropertiesKHRBuilder<'_> -{ -} -unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceFloatControlsPropertiesKHR {} -impl<'a> ::std::ops::Deref for PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { - type Target = PhysicalDeviceFloatControlsPropertiesKHR; +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceFloatControlsPropertiesBuilder<'_> {} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceFloatControlsProperties {} +impl<'a> ::std::ops::Deref for PhysicalDeviceFloatControlsPropertiesBuilder<'a> { + type Target = PhysicalDeviceFloatControlsProperties; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { +impl<'a> ::std::ops::DerefMut for PhysicalDeviceFloatControlsPropertiesBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { - pub fn separate_denorm_settings( +impl<'a> PhysicalDeviceFloatControlsPropertiesBuilder<'a> { + pub fn denorm_behavior_independence( mut self, - separate_denorm_settings: bool, - ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { - self.inner.separate_denorm_settings = separate_denorm_settings.into(); + denorm_behavior_independence: ShaderFloatControlsIndependence, + ) -> PhysicalDeviceFloatControlsPropertiesBuilder<'a> { + self.inner.denorm_behavior_independence = denorm_behavior_independence; self } - pub fn separate_rounding_mode_settings( + pub fn rounding_mode_independence( mut self, - separate_rounding_mode_settings: bool, - ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { - self.inner.separate_rounding_mode_settings = separate_rounding_mode_settings.into(); + rounding_mode_independence: ShaderFloatControlsIndependence, + ) -> PhysicalDeviceFloatControlsPropertiesBuilder<'a> { + self.inner.rounding_mode_independence = rounding_mode_independence; self } pub fn shader_signed_zero_inf_nan_preserve_float16( mut self, shader_signed_zero_inf_nan_preserve_float16: bool, - ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + ) -> PhysicalDeviceFloatControlsPropertiesBuilder<'a> { self.inner.shader_signed_zero_inf_nan_preserve_float16 = shader_signed_zero_inf_nan_preserve_float16.into(); self @@ -33110,7 +33813,7 @@ impl<'a> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { pub fn shader_signed_zero_inf_nan_preserve_float32( mut self, shader_signed_zero_inf_nan_preserve_float32: bool, - ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + ) -> PhysicalDeviceFloatControlsPropertiesBuilder<'a> { self.inner.shader_signed_zero_inf_nan_preserve_float32 = shader_signed_zero_inf_nan_preserve_float32.into(); self @@ -33118,7 +33821,7 @@ impl<'a> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { pub fn shader_signed_zero_inf_nan_preserve_float64( mut self, shader_signed_zero_inf_nan_preserve_float64: bool, - ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + ) -> PhysicalDeviceFloatControlsPropertiesBuilder<'a> { self.inner.shader_signed_zero_inf_nan_preserve_float64 = shader_signed_zero_inf_nan_preserve_float64.into(); self @@ -33126,97 +33829,202 @@ impl<'a> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { pub fn shader_denorm_preserve_float16( mut self, shader_denorm_preserve_float16: bool, - ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + ) -> PhysicalDeviceFloatControlsPropertiesBuilder<'a> { self.inner.shader_denorm_preserve_float16 = shader_denorm_preserve_float16.into(); self } pub fn shader_denorm_preserve_float32( mut self, shader_denorm_preserve_float32: bool, - ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + ) -> PhysicalDeviceFloatControlsPropertiesBuilder<'a> { self.inner.shader_denorm_preserve_float32 = shader_denorm_preserve_float32.into(); self } pub fn shader_denorm_preserve_float64( mut self, shader_denorm_preserve_float64: bool, - ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + ) -> PhysicalDeviceFloatControlsPropertiesBuilder<'a> { self.inner.shader_denorm_preserve_float64 = shader_denorm_preserve_float64.into(); self } pub fn shader_denorm_flush_to_zero_float16( mut self, shader_denorm_flush_to_zero_float16: bool, - ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + ) -> PhysicalDeviceFloatControlsPropertiesBuilder<'a> { self.inner.shader_denorm_flush_to_zero_float16 = shader_denorm_flush_to_zero_float16.into(); self } pub fn shader_denorm_flush_to_zero_float32( mut self, shader_denorm_flush_to_zero_float32: bool, - ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + ) -> PhysicalDeviceFloatControlsPropertiesBuilder<'a> { self.inner.shader_denorm_flush_to_zero_float32 = shader_denorm_flush_to_zero_float32.into(); self } pub fn shader_denorm_flush_to_zero_float64( mut self, shader_denorm_flush_to_zero_float64: bool, - ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + ) -> PhysicalDeviceFloatControlsPropertiesBuilder<'a> { self.inner.shader_denorm_flush_to_zero_float64 = shader_denorm_flush_to_zero_float64.into(); self } pub fn shader_rounding_mode_rte_float16( mut self, shader_rounding_mode_rte_float16: bool, - ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + ) -> PhysicalDeviceFloatControlsPropertiesBuilder<'a> { self.inner.shader_rounding_mode_rte_float16 = shader_rounding_mode_rte_float16.into(); self } pub fn shader_rounding_mode_rte_float32( mut self, shader_rounding_mode_rte_float32: bool, - ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + ) -> PhysicalDeviceFloatControlsPropertiesBuilder<'a> { self.inner.shader_rounding_mode_rte_float32 = shader_rounding_mode_rte_float32.into(); self } pub fn shader_rounding_mode_rte_float64( mut self, shader_rounding_mode_rte_float64: bool, - ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + ) -> PhysicalDeviceFloatControlsPropertiesBuilder<'a> { self.inner.shader_rounding_mode_rte_float64 = shader_rounding_mode_rte_float64.into(); self } pub fn shader_rounding_mode_rtz_float16( mut self, shader_rounding_mode_rtz_float16: bool, - ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + ) -> PhysicalDeviceFloatControlsPropertiesBuilder<'a> { self.inner.shader_rounding_mode_rtz_float16 = shader_rounding_mode_rtz_float16.into(); self } pub fn shader_rounding_mode_rtz_float32( mut self, shader_rounding_mode_rtz_float32: bool, - ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + ) -> PhysicalDeviceFloatControlsPropertiesBuilder<'a> { self.inner.shader_rounding_mode_rtz_float32 = shader_rounding_mode_rtz_float32.into(); self } pub fn shader_rounding_mode_rtz_float64( mut self, shader_rounding_mode_rtz_float64: bool, - ) -> PhysicalDeviceFloatControlsPropertiesKHRBuilder<'a> { + ) -> PhysicalDeviceFloatControlsPropertiesBuilder<'a> { self.inner.shader_rounding_mode_rtz_float64 = shader_rounding_mode_rtz_float64.into(); self } #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> PhysicalDeviceFloatControlsPropertiesKHR { + pub fn build(self) -> PhysicalDeviceFloatControlsProperties { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] +pub struct PhysicalDeviceHostQueryResetFeatures { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub host_query_reset: Bool32, +} +impl ::std::default::Default for PhysicalDeviceHostQueryResetFeatures { + fn default() -> PhysicalDeviceHostQueryResetFeatures { + PhysicalDeviceHostQueryResetFeatures { + s_type: StructureType::PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES, + p_next: ::std::ptr::null_mut(), + host_query_reset: Bool32::default(), + } + } +} +impl PhysicalDeviceHostQueryResetFeatures { + pub fn builder<'a>() -> PhysicalDeviceHostQueryResetFeaturesBuilder<'a> { + PhysicalDeviceHostQueryResetFeaturesBuilder { + inner: PhysicalDeviceHostQueryResetFeatures::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceHostQueryResetFeaturesBuilder<'a> { + inner: PhysicalDeviceHostQueryResetFeatures, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceHostQueryResetFeaturesBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceHostQueryResetFeatures {} +impl<'a> ::std::ops::Deref for PhysicalDeviceHostQueryResetFeaturesBuilder<'a> { + type Target = PhysicalDeviceHostQueryResetFeatures; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceHostQueryResetFeaturesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceHostQueryResetFeaturesBuilder<'a> { + pub fn host_query_reset( + mut self, + host_query_reset: bool, + ) -> PhysicalDeviceHostQueryResetFeaturesBuilder<'a> { + self.inner.host_query_reset = host_query_reset.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceHostQueryResetFeatures { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct NativeBufferUsage2ANDROID { + pub consumer: u64, + pub producer: u64, +} +impl NativeBufferUsage2ANDROID { + pub fn builder<'a>() -> NativeBufferUsage2ANDROIDBuilder<'a> { + NativeBufferUsage2ANDROIDBuilder { + inner: NativeBufferUsage2ANDROID::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct NativeBufferUsage2ANDROIDBuilder<'a> { + inner: NativeBufferUsage2ANDROID, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for NativeBufferUsage2ANDROIDBuilder<'a> { + type Target = NativeBufferUsage2ANDROID; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for NativeBufferUsage2ANDROIDBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> NativeBufferUsage2ANDROIDBuilder<'a> { + pub fn consumer(mut self, consumer: u64) -> NativeBufferUsage2ANDROIDBuilder<'a> { + self.inner.consumer = consumer; + self + } + pub fn producer(mut self, producer: u64) -> NativeBufferUsage2ANDROIDBuilder<'a> { + self.inner.producer = producer; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> NativeBufferUsage2ANDROID { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] pub struct NativeBufferANDROID { pub s_type: StructureType, pub p_next: *const c_void, @@ -33224,6 +34032,7 @@ pub struct NativeBufferANDROID { pub stride: c_int, pub format: c_int, pub usage: c_int, + pub usage2: NativeBufferUsage2ANDROID, } impl ::std::default::Default for NativeBufferANDROID { fn default() -> NativeBufferANDROID { @@ -33234,6 +34043,7 @@ impl ::std::default::Default for NativeBufferANDROID { stride: c_int::default(), format: c_int::default(), usage: c_int::default(), + usage2: NativeBufferUsage2ANDROID::default(), } } } @@ -33279,6 +34089,10 @@ impl<'a> NativeBufferANDROIDBuilder<'a> { self.inner.usage = usage; self } + pub fn usage2(mut self, usage2: NativeBufferUsage2ANDROID) -> NativeBufferANDROIDBuilder<'a> { + self.inner.usage2 = usage2; + self + } #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] #[doc = r" method only exists on structs that can be passed to a function directly. Only"] #[doc = r" valid extension structs can be pushed into the chain."] @@ -33304,8 +34118,156 @@ impl<'a> NativeBufferANDROIDBuilder<'a> { } } #[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SwapchainImageCreateInfoANDROID { + pub s_type: StructureType, + pub p_next: *const c_void, + pub usage: SwapchainImageUsageFlagsANDROID, +} +impl ::std::default::Default for SwapchainImageCreateInfoANDROID { + fn default() -> SwapchainImageCreateInfoANDROID { + SwapchainImageCreateInfoANDROID { + s_type: StructureType::SWAPCHAIN_IMAGE_CREATE_INFO_ANDROID, + p_next: ::std::ptr::null(), + usage: SwapchainImageUsageFlagsANDROID::default(), + } + } +} +impl SwapchainImageCreateInfoANDROID { + pub fn builder<'a>() -> SwapchainImageCreateInfoANDROIDBuilder<'a> { + SwapchainImageCreateInfoANDROIDBuilder { + inner: SwapchainImageCreateInfoANDROID::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SwapchainImageCreateInfoANDROIDBuilder<'a> { + inner: SwapchainImageCreateInfoANDROID, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsSwapchainImageCreateInfoANDROID {} +impl<'a> ::std::ops::Deref for SwapchainImageCreateInfoANDROIDBuilder<'a> { + type Target = SwapchainImageCreateInfoANDROID; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SwapchainImageCreateInfoANDROIDBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SwapchainImageCreateInfoANDROIDBuilder<'a> { + pub fn usage( + mut self, + usage: SwapchainImageUsageFlagsANDROID, + ) -> SwapchainImageCreateInfoANDROIDBuilder<'a> { + self.inner.usage = usage; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> SwapchainImageCreateInfoANDROIDBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SwapchainImageCreateInfoANDROID { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDevicePresentationPropertiesANDROID { + pub s_type: StructureType, + pub p_next: *const c_void, + pub shared_image: Bool32, +} +impl ::std::default::Default for PhysicalDevicePresentationPropertiesANDROID { + fn default() -> PhysicalDevicePresentationPropertiesANDROID { + PhysicalDevicePresentationPropertiesANDROID { + s_type: StructureType::PHYSICAL_DEVICE_PRESENTATION_PROPERTIES_ANDROID, + p_next: ::std::ptr::null(), + shared_image: Bool32::default(), + } + } +} +impl PhysicalDevicePresentationPropertiesANDROID { + pub fn builder<'a>() -> PhysicalDevicePresentationPropertiesANDROIDBuilder<'a> { + PhysicalDevicePresentationPropertiesANDROIDBuilder { + inner: PhysicalDevicePresentationPropertiesANDROID::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDevicePresentationPropertiesANDROIDBuilder<'a> { + inner: PhysicalDevicePresentationPropertiesANDROID, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPhysicalDevicePresentationPropertiesANDROID {} +impl<'a> ::std::ops::Deref for PhysicalDevicePresentationPropertiesANDROIDBuilder<'a> { + type Target = PhysicalDevicePresentationPropertiesANDROID; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDevicePresentationPropertiesANDROIDBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDevicePresentationPropertiesANDROIDBuilder<'a> { + pub fn shared_image( + mut self, + shared_image: bool, + ) -> PhysicalDevicePresentationPropertiesANDROIDBuilder<'a> { + self.inner.shared_image = shared_image.into(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PhysicalDevicePresentationPropertiesANDROIDBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDevicePresentationPropertiesANDROID { + self.inner + } +} +#[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct ShaderResourceUsageAMD { pub num_used_vgprs: u32, pub num_used_sgprs: u32, @@ -33376,7 +34338,7 @@ impl<'a> ShaderResourceUsageAMDBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ShaderStatisticsInfoAMD { pub shader_stage_mask: ShaderStageFlags, pub resource_usage: ShaderResourceUsageAMD, @@ -33482,7 +34444,7 @@ impl<'a> ShaderStatisticsInfoAMDBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DeviceQueueGlobalPriorityCreateInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -33540,7 +34502,7 @@ impl<'a> DeviceQueueGlobalPriorityCreateInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DebugUtilsObjectNameInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -33629,7 +34591,7 @@ impl<'a> DebugUtilsObjectNameInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DebugUtilsObjectTagInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -33721,7 +34683,7 @@ impl<'a> DebugUtilsObjectTagInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DebugUtilsLabelEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -33798,7 +34760,7 @@ impl<'a> DebugUtilsLabelEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone)] -#[doc = ""] +#[doc = ""] pub struct DebugUtilsMessengerCreateInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -33908,7 +34870,7 @@ impl<'a> DebugUtilsMessengerCreateInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DebugUtilsMessengerCallbackDataEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -34045,7 +35007,7 @@ impl<'a> DebugUtilsMessengerCallbackDataEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ImportMemoryHostPointerInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -34112,7 +35074,7 @@ impl<'a> ImportMemoryHostPointerInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct MemoryHostPointerPropertiesEXT { pub s_type: StructureType, pub p_next: *mut c_void, @@ -34186,7 +35148,7 @@ impl<'a> MemoryHostPointerPropertiesEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceExternalMemoryHostPropertiesEXT { pub s_type: StructureType, pub p_next: *mut c_void, @@ -34247,7 +35209,7 @@ impl<'a> PhysicalDeviceExternalMemoryHostPropertiesEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceConservativeRasterizationPropertiesEXT { pub s_type: StructureType, pub p_next: *mut c_void, @@ -34388,7 +35350,7 @@ impl<'a> PhysicalDeviceConservativeRasterizationPropertiesEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct CalibratedTimestampInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -34462,7 +35424,7 @@ impl<'a> CalibratedTimestampInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceShaderCorePropertiesAMD { pub s_type: StructureType, pub p_next: *mut c_void, @@ -34637,7 +35599,74 @@ impl<'a> PhysicalDeviceShaderCorePropertiesAMDBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] +pub struct PhysicalDeviceShaderCoreProperties2AMD { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub shader_core_features: ShaderCorePropertiesFlagsAMD, + pub active_compute_unit_count: u32, +} +impl ::std::default::Default for PhysicalDeviceShaderCoreProperties2AMD { + fn default() -> PhysicalDeviceShaderCoreProperties2AMD { + PhysicalDeviceShaderCoreProperties2AMD { + s_type: StructureType::PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD, + p_next: ::std::ptr::null_mut(), + shader_core_features: ShaderCorePropertiesFlagsAMD::default(), + active_compute_unit_count: u32::default(), + } + } +} +impl PhysicalDeviceShaderCoreProperties2AMD { + pub fn builder<'a>() -> PhysicalDeviceShaderCoreProperties2AMDBuilder<'a> { + PhysicalDeviceShaderCoreProperties2AMDBuilder { + inner: PhysicalDeviceShaderCoreProperties2AMD::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceShaderCoreProperties2AMDBuilder<'a> { + inner: PhysicalDeviceShaderCoreProperties2AMD, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceShaderCoreProperties2AMDBuilder<'_> {} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceShaderCoreProperties2AMD {} +impl<'a> ::std::ops::Deref for PhysicalDeviceShaderCoreProperties2AMDBuilder<'a> { + type Target = PhysicalDeviceShaderCoreProperties2AMD; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceShaderCoreProperties2AMDBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceShaderCoreProperties2AMDBuilder<'a> { + pub fn shader_core_features( + mut self, + shader_core_features: ShaderCorePropertiesFlagsAMD, + ) -> PhysicalDeviceShaderCoreProperties2AMDBuilder<'a> { + self.inner.shader_core_features = shader_core_features; + self + } + pub fn active_compute_unit_count( + mut self, + active_compute_unit_count: u32, + ) -> PhysicalDeviceShaderCoreProperties2AMDBuilder<'a> { + self.inner.active_compute_unit_count = active_compute_unit_count; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceShaderCoreProperties2AMD { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] pub struct PipelineRasterizationConservativeStateCreateInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -34719,8 +35748,8 @@ impl<'a> PipelineRasterizationConservativeStateCreateInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct PhysicalDeviceDescriptorIndexingFeaturesEXT { +#[doc = ""] +pub struct PhysicalDeviceDescriptorIndexingFeatures { pub s_type: StructureType, pub p_next: *mut c_void, pub shader_input_attachment_array_dynamic_indexing: Bool32, @@ -34744,10 +35773,10 @@ pub struct PhysicalDeviceDescriptorIndexingFeaturesEXT { pub descriptor_binding_variable_descriptor_count: Bool32, pub runtime_descriptor_array: Bool32, } -impl ::std::default::Default for PhysicalDeviceDescriptorIndexingFeaturesEXT { - fn default() -> PhysicalDeviceDescriptorIndexingFeaturesEXT { - PhysicalDeviceDescriptorIndexingFeaturesEXT { - s_type: StructureType::PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT, +impl ::std::default::Default for PhysicalDeviceDescriptorIndexingFeatures { + fn default() -> PhysicalDeviceDescriptorIndexingFeatures { + PhysicalDeviceDescriptorIndexingFeatures { + s_type: StructureType::PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES, p_next: ::std::ptr::null_mut(), shader_input_attachment_array_dynamic_indexing: Bool32::default(), shader_uniform_texel_buffer_array_dynamic_indexing: Bool32::default(), @@ -34772,37 +35801,37 @@ impl ::std::default::Default for PhysicalDeviceDescriptorIndexingFeaturesEXT { } } } -impl PhysicalDeviceDescriptorIndexingFeaturesEXT { - pub fn builder<'a>() -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { - PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder { - inner: PhysicalDeviceDescriptorIndexingFeaturesEXT::default(), +impl PhysicalDeviceDescriptorIndexingFeatures { + pub fn builder<'a>() -> PhysicalDeviceDescriptorIndexingFeaturesBuilder<'a> { + PhysicalDeviceDescriptorIndexingFeaturesBuilder { + inner: PhysicalDeviceDescriptorIndexingFeatures::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { - inner: PhysicalDeviceDescriptorIndexingFeaturesEXT, +pub struct PhysicalDeviceDescriptorIndexingFeaturesBuilder<'a> { + inner: PhysicalDeviceDescriptorIndexingFeatures, marker: ::std::marker::PhantomData<&'a ()>, } -unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'_> {} -unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceDescriptorIndexingFeaturesEXT {} -impl<'a> ::std::ops::Deref for PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { - type Target = PhysicalDeviceDescriptorIndexingFeaturesEXT; +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceDescriptorIndexingFeaturesBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceDescriptorIndexingFeatures {} +impl<'a> ::std::ops::Deref for PhysicalDeviceDescriptorIndexingFeaturesBuilder<'a> { + type Target = PhysicalDeviceDescriptorIndexingFeatures; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { +impl<'a> ::std::ops::DerefMut for PhysicalDeviceDescriptorIndexingFeaturesBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { +impl<'a> PhysicalDeviceDescriptorIndexingFeaturesBuilder<'a> { pub fn shader_input_attachment_array_dynamic_indexing( mut self, shader_input_attachment_array_dynamic_indexing: bool, - ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingFeaturesBuilder<'a> { self.inner.shader_input_attachment_array_dynamic_indexing = shader_input_attachment_array_dynamic_indexing.into(); self @@ -34810,7 +35839,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { pub fn shader_uniform_texel_buffer_array_dynamic_indexing( mut self, shader_uniform_texel_buffer_array_dynamic_indexing: bool, - ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingFeaturesBuilder<'a> { self.inner .shader_uniform_texel_buffer_array_dynamic_indexing = shader_uniform_texel_buffer_array_dynamic_indexing.into(); @@ -34819,7 +35848,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { pub fn shader_storage_texel_buffer_array_dynamic_indexing( mut self, shader_storage_texel_buffer_array_dynamic_indexing: bool, - ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingFeaturesBuilder<'a> { self.inner .shader_storage_texel_buffer_array_dynamic_indexing = shader_storage_texel_buffer_array_dynamic_indexing.into(); @@ -34828,7 +35857,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { pub fn shader_uniform_buffer_array_non_uniform_indexing( mut self, shader_uniform_buffer_array_non_uniform_indexing: bool, - ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingFeaturesBuilder<'a> { self.inner.shader_uniform_buffer_array_non_uniform_indexing = shader_uniform_buffer_array_non_uniform_indexing.into(); self @@ -34836,7 +35865,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { pub fn shader_sampled_image_array_non_uniform_indexing( mut self, shader_sampled_image_array_non_uniform_indexing: bool, - ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingFeaturesBuilder<'a> { self.inner.shader_sampled_image_array_non_uniform_indexing = shader_sampled_image_array_non_uniform_indexing.into(); self @@ -34844,7 +35873,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { pub fn shader_storage_buffer_array_non_uniform_indexing( mut self, shader_storage_buffer_array_non_uniform_indexing: bool, - ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingFeaturesBuilder<'a> { self.inner.shader_storage_buffer_array_non_uniform_indexing = shader_storage_buffer_array_non_uniform_indexing.into(); self @@ -34852,7 +35881,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { pub fn shader_storage_image_array_non_uniform_indexing( mut self, shader_storage_image_array_non_uniform_indexing: bool, - ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingFeaturesBuilder<'a> { self.inner.shader_storage_image_array_non_uniform_indexing = shader_storage_image_array_non_uniform_indexing.into(); self @@ -34860,7 +35889,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { pub fn shader_input_attachment_array_non_uniform_indexing( mut self, shader_input_attachment_array_non_uniform_indexing: bool, - ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingFeaturesBuilder<'a> { self.inner .shader_input_attachment_array_non_uniform_indexing = shader_input_attachment_array_non_uniform_indexing.into(); @@ -34869,7 +35898,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { pub fn shader_uniform_texel_buffer_array_non_uniform_indexing( mut self, shader_uniform_texel_buffer_array_non_uniform_indexing: bool, - ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingFeaturesBuilder<'a> { self.inner .shader_uniform_texel_buffer_array_non_uniform_indexing = shader_uniform_texel_buffer_array_non_uniform_indexing.into(); @@ -34878,7 +35907,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { pub fn shader_storage_texel_buffer_array_non_uniform_indexing( mut self, shader_storage_texel_buffer_array_non_uniform_indexing: bool, - ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingFeaturesBuilder<'a> { self.inner .shader_storage_texel_buffer_array_non_uniform_indexing = shader_storage_texel_buffer_array_non_uniform_indexing.into(); @@ -34887,7 +35916,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { pub fn descriptor_binding_uniform_buffer_update_after_bind( mut self, descriptor_binding_uniform_buffer_update_after_bind: bool, - ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingFeaturesBuilder<'a> { self.inner .descriptor_binding_uniform_buffer_update_after_bind = descriptor_binding_uniform_buffer_update_after_bind.into(); @@ -34896,7 +35925,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { pub fn descriptor_binding_sampled_image_update_after_bind( mut self, descriptor_binding_sampled_image_update_after_bind: bool, - ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingFeaturesBuilder<'a> { self.inner .descriptor_binding_sampled_image_update_after_bind = descriptor_binding_sampled_image_update_after_bind.into(); @@ -34905,7 +35934,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { pub fn descriptor_binding_storage_image_update_after_bind( mut self, descriptor_binding_storage_image_update_after_bind: bool, - ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingFeaturesBuilder<'a> { self.inner .descriptor_binding_storage_image_update_after_bind = descriptor_binding_storage_image_update_after_bind.into(); @@ -34914,7 +35943,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { pub fn descriptor_binding_storage_buffer_update_after_bind( mut self, descriptor_binding_storage_buffer_update_after_bind: bool, - ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingFeaturesBuilder<'a> { self.inner .descriptor_binding_storage_buffer_update_after_bind = descriptor_binding_storage_buffer_update_after_bind.into(); @@ -34923,7 +35952,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { pub fn descriptor_binding_uniform_texel_buffer_update_after_bind( mut self, descriptor_binding_uniform_texel_buffer_update_after_bind: bool, - ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingFeaturesBuilder<'a> { self.inner .descriptor_binding_uniform_texel_buffer_update_after_bind = descriptor_binding_uniform_texel_buffer_update_after_bind.into(); @@ -34932,7 +35961,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { pub fn descriptor_binding_storage_texel_buffer_update_after_bind( mut self, descriptor_binding_storage_texel_buffer_update_after_bind: bool, - ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingFeaturesBuilder<'a> { self.inner .descriptor_binding_storage_texel_buffer_update_after_bind = descriptor_binding_storage_texel_buffer_update_after_bind.into(); @@ -34941,7 +35970,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { pub fn descriptor_binding_update_unused_while_pending( mut self, descriptor_binding_update_unused_while_pending: bool, - ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingFeaturesBuilder<'a> { self.inner.descriptor_binding_update_unused_while_pending = descriptor_binding_update_unused_while_pending.into(); self @@ -34949,14 +35978,14 @@ impl<'a> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { pub fn descriptor_binding_partially_bound( mut self, descriptor_binding_partially_bound: bool, - ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingFeaturesBuilder<'a> { self.inner.descriptor_binding_partially_bound = descriptor_binding_partially_bound.into(); self } pub fn descriptor_binding_variable_descriptor_count( mut self, descriptor_binding_variable_descriptor_count: bool, - ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingFeaturesBuilder<'a> { self.inner.descriptor_binding_variable_descriptor_count = descriptor_binding_variable_descriptor_count.into(); self @@ -34964,21 +35993,21 @@ impl<'a> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { pub fn runtime_descriptor_array( mut self, runtime_descriptor_array: bool, - ) -> PhysicalDeviceDescriptorIndexingFeaturesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingFeaturesBuilder<'a> { self.inner.runtime_descriptor_array = runtime_descriptor_array.into(); self } #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> PhysicalDeviceDescriptorIndexingFeaturesEXT { + pub fn build(self) -> PhysicalDeviceDescriptorIndexingFeatures { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct PhysicalDeviceDescriptorIndexingPropertiesEXT { +#[doc = ""] +pub struct PhysicalDeviceDescriptorIndexingProperties { pub s_type: StructureType, pub p_next: *mut c_void, pub max_update_after_bind_descriptors_in_all_pools: u32, @@ -35005,10 +36034,10 @@ pub struct PhysicalDeviceDescriptorIndexingPropertiesEXT { pub max_descriptor_set_update_after_bind_storage_images: u32, pub max_descriptor_set_update_after_bind_input_attachments: u32, } -impl ::std::default::Default for PhysicalDeviceDescriptorIndexingPropertiesEXT { - fn default() -> PhysicalDeviceDescriptorIndexingPropertiesEXT { - PhysicalDeviceDescriptorIndexingPropertiesEXT { - s_type: StructureType::PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT, +impl ::std::default::Default for PhysicalDeviceDescriptorIndexingProperties { + fn default() -> PhysicalDeviceDescriptorIndexingProperties { + PhysicalDeviceDescriptorIndexingProperties { + s_type: StructureType::PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES, p_next: ::std::ptr::null_mut(), max_update_after_bind_descriptors_in_all_pools: u32::default(), shader_uniform_buffer_array_non_uniform_indexing_native: Bool32::default(), @@ -35036,40 +36065,40 @@ impl ::std::default::Default for PhysicalDeviceDescriptorIndexingPropertiesEXT { } } } -impl PhysicalDeviceDescriptorIndexingPropertiesEXT { - pub fn builder<'a>() -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { - PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder { - inner: PhysicalDeviceDescriptorIndexingPropertiesEXT::default(), +impl PhysicalDeviceDescriptorIndexingProperties { + pub fn builder<'a>() -> PhysicalDeviceDescriptorIndexingPropertiesBuilder<'a> { + PhysicalDeviceDescriptorIndexingPropertiesBuilder { + inner: PhysicalDeviceDescriptorIndexingProperties::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { - inner: PhysicalDeviceDescriptorIndexingPropertiesEXT, +pub struct PhysicalDeviceDescriptorIndexingPropertiesBuilder<'a> { + inner: PhysicalDeviceDescriptorIndexingProperties, marker: ::std::marker::PhantomData<&'a ()>, } unsafe impl ExtendsPhysicalDeviceProperties2 - for PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'_> + for PhysicalDeviceDescriptorIndexingPropertiesBuilder<'_> { } -unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceDescriptorIndexingPropertiesEXT {} -impl<'a> ::std::ops::Deref for PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { - type Target = PhysicalDeviceDescriptorIndexingPropertiesEXT; +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceDescriptorIndexingProperties {} +impl<'a> ::std::ops::Deref for PhysicalDeviceDescriptorIndexingPropertiesBuilder<'a> { + type Target = PhysicalDeviceDescriptorIndexingProperties; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { +impl<'a> ::std::ops::DerefMut for PhysicalDeviceDescriptorIndexingPropertiesBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { +impl<'a> PhysicalDeviceDescriptorIndexingPropertiesBuilder<'a> { pub fn max_update_after_bind_descriptors_in_all_pools( mut self, max_update_after_bind_descriptors_in_all_pools: u32, - ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingPropertiesBuilder<'a> { self.inner.max_update_after_bind_descriptors_in_all_pools = max_update_after_bind_descriptors_in_all_pools; self @@ -35077,7 +36106,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { pub fn shader_uniform_buffer_array_non_uniform_indexing_native( mut self, shader_uniform_buffer_array_non_uniform_indexing_native: bool, - ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingPropertiesBuilder<'a> { self.inner .shader_uniform_buffer_array_non_uniform_indexing_native = shader_uniform_buffer_array_non_uniform_indexing_native.into(); @@ -35086,7 +36115,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { pub fn shader_sampled_image_array_non_uniform_indexing_native( mut self, shader_sampled_image_array_non_uniform_indexing_native: bool, - ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingPropertiesBuilder<'a> { self.inner .shader_sampled_image_array_non_uniform_indexing_native = shader_sampled_image_array_non_uniform_indexing_native.into(); @@ -35095,7 +36124,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { pub fn shader_storage_buffer_array_non_uniform_indexing_native( mut self, shader_storage_buffer_array_non_uniform_indexing_native: bool, - ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingPropertiesBuilder<'a> { self.inner .shader_storage_buffer_array_non_uniform_indexing_native = shader_storage_buffer_array_non_uniform_indexing_native.into(); @@ -35104,7 +36133,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { pub fn shader_storage_image_array_non_uniform_indexing_native( mut self, shader_storage_image_array_non_uniform_indexing_native: bool, - ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingPropertiesBuilder<'a> { self.inner .shader_storage_image_array_non_uniform_indexing_native = shader_storage_image_array_non_uniform_indexing_native.into(); @@ -35113,7 +36142,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { pub fn shader_input_attachment_array_non_uniform_indexing_native( mut self, shader_input_attachment_array_non_uniform_indexing_native: bool, - ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingPropertiesBuilder<'a> { self.inner .shader_input_attachment_array_non_uniform_indexing_native = shader_input_attachment_array_non_uniform_indexing_native.into(); @@ -35122,7 +36151,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { pub fn robust_buffer_access_update_after_bind( mut self, robust_buffer_access_update_after_bind: bool, - ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingPropertiesBuilder<'a> { self.inner.robust_buffer_access_update_after_bind = robust_buffer_access_update_after_bind.into(); self @@ -35130,14 +36159,14 @@ impl<'a> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { pub fn quad_divergent_implicit_lod( mut self, quad_divergent_implicit_lod: bool, - ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingPropertiesBuilder<'a> { self.inner.quad_divergent_implicit_lod = quad_divergent_implicit_lod.into(); self } pub fn max_per_stage_descriptor_update_after_bind_samplers( mut self, max_per_stage_descriptor_update_after_bind_samplers: u32, - ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingPropertiesBuilder<'a> { self.inner .max_per_stage_descriptor_update_after_bind_samplers = max_per_stage_descriptor_update_after_bind_samplers; @@ -35146,7 +36175,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { pub fn max_per_stage_descriptor_update_after_bind_uniform_buffers( mut self, max_per_stage_descriptor_update_after_bind_uniform_buffers: u32, - ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingPropertiesBuilder<'a> { self.inner .max_per_stage_descriptor_update_after_bind_uniform_buffers = max_per_stage_descriptor_update_after_bind_uniform_buffers; @@ -35155,7 +36184,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { pub fn max_per_stage_descriptor_update_after_bind_storage_buffers( mut self, max_per_stage_descriptor_update_after_bind_storage_buffers: u32, - ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingPropertiesBuilder<'a> { self.inner .max_per_stage_descriptor_update_after_bind_storage_buffers = max_per_stage_descriptor_update_after_bind_storage_buffers; @@ -35164,7 +36193,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { pub fn max_per_stage_descriptor_update_after_bind_sampled_images( mut self, max_per_stage_descriptor_update_after_bind_sampled_images: u32, - ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingPropertiesBuilder<'a> { self.inner .max_per_stage_descriptor_update_after_bind_sampled_images = max_per_stage_descriptor_update_after_bind_sampled_images; @@ -35173,7 +36202,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { pub fn max_per_stage_descriptor_update_after_bind_storage_images( mut self, max_per_stage_descriptor_update_after_bind_storage_images: u32, - ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingPropertiesBuilder<'a> { self.inner .max_per_stage_descriptor_update_after_bind_storage_images = max_per_stage_descriptor_update_after_bind_storage_images; @@ -35182,7 +36211,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { pub fn max_per_stage_descriptor_update_after_bind_input_attachments( mut self, max_per_stage_descriptor_update_after_bind_input_attachments: u32, - ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingPropertiesBuilder<'a> { self.inner .max_per_stage_descriptor_update_after_bind_input_attachments = max_per_stage_descriptor_update_after_bind_input_attachments; @@ -35191,7 +36220,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { pub fn max_per_stage_update_after_bind_resources( mut self, max_per_stage_update_after_bind_resources: u32, - ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingPropertiesBuilder<'a> { self.inner.max_per_stage_update_after_bind_resources = max_per_stage_update_after_bind_resources; self @@ -35199,7 +36228,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { pub fn max_descriptor_set_update_after_bind_samplers( mut self, max_descriptor_set_update_after_bind_samplers: u32, - ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingPropertiesBuilder<'a> { self.inner.max_descriptor_set_update_after_bind_samplers = max_descriptor_set_update_after_bind_samplers; self @@ -35207,7 +36236,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { pub fn max_descriptor_set_update_after_bind_uniform_buffers( mut self, max_descriptor_set_update_after_bind_uniform_buffers: u32, - ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingPropertiesBuilder<'a> { self.inner .max_descriptor_set_update_after_bind_uniform_buffers = max_descriptor_set_update_after_bind_uniform_buffers; @@ -35216,7 +36245,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { pub fn max_descriptor_set_update_after_bind_uniform_buffers_dynamic( mut self, max_descriptor_set_update_after_bind_uniform_buffers_dynamic: u32, - ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingPropertiesBuilder<'a> { self.inner .max_descriptor_set_update_after_bind_uniform_buffers_dynamic = max_descriptor_set_update_after_bind_uniform_buffers_dynamic; @@ -35225,7 +36254,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { pub fn max_descriptor_set_update_after_bind_storage_buffers( mut self, max_descriptor_set_update_after_bind_storage_buffers: u32, - ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingPropertiesBuilder<'a> { self.inner .max_descriptor_set_update_after_bind_storage_buffers = max_descriptor_set_update_after_bind_storage_buffers; @@ -35234,7 +36263,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { pub fn max_descriptor_set_update_after_bind_storage_buffers_dynamic( mut self, max_descriptor_set_update_after_bind_storage_buffers_dynamic: u32, - ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingPropertiesBuilder<'a> { self.inner .max_descriptor_set_update_after_bind_storage_buffers_dynamic = max_descriptor_set_update_after_bind_storage_buffers_dynamic; @@ -35243,7 +36272,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { pub fn max_descriptor_set_update_after_bind_sampled_images( mut self, max_descriptor_set_update_after_bind_sampled_images: u32, - ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingPropertiesBuilder<'a> { self.inner .max_descriptor_set_update_after_bind_sampled_images = max_descriptor_set_update_after_bind_sampled_images; @@ -35252,7 +36281,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { pub fn max_descriptor_set_update_after_bind_storage_images( mut self, max_descriptor_set_update_after_bind_storage_images: u32, - ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingPropertiesBuilder<'a> { self.inner .max_descriptor_set_update_after_bind_storage_images = max_descriptor_set_update_after_bind_storage_images; @@ -35261,7 +36290,7 @@ impl<'a> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { pub fn max_descriptor_set_update_after_bind_input_attachments( mut self, max_descriptor_set_update_after_bind_input_attachments: u32, - ) -> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { + ) -> PhysicalDeviceDescriptorIndexingPropertiesBuilder<'a> { self.inner .max_descriptor_set_update_after_bind_input_attachments = max_descriptor_set_update_after_bind_input_attachments; @@ -35270,63 +36299,63 @@ impl<'a> PhysicalDeviceDescriptorIndexingPropertiesEXTBuilder<'a> { #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> PhysicalDeviceDescriptorIndexingPropertiesEXT { + pub fn build(self) -> PhysicalDeviceDescriptorIndexingProperties { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct DescriptorSetLayoutBindingFlagsCreateInfoEXT { +#[doc = ""] +pub struct DescriptorSetLayoutBindingFlagsCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, pub binding_count: u32, - pub p_binding_flags: *const DescriptorBindingFlagsEXT, + pub p_binding_flags: *const DescriptorBindingFlags, } -impl ::std::default::Default for DescriptorSetLayoutBindingFlagsCreateInfoEXT { - fn default() -> DescriptorSetLayoutBindingFlagsCreateInfoEXT { - DescriptorSetLayoutBindingFlagsCreateInfoEXT { - s_type: StructureType::DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT, +impl ::std::default::Default for DescriptorSetLayoutBindingFlagsCreateInfo { + fn default() -> DescriptorSetLayoutBindingFlagsCreateInfo { + DescriptorSetLayoutBindingFlagsCreateInfo { + s_type: StructureType::DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO, p_next: ::std::ptr::null(), binding_count: u32::default(), p_binding_flags: ::std::ptr::null(), } } } -impl DescriptorSetLayoutBindingFlagsCreateInfoEXT { - pub fn builder<'a>() -> DescriptorSetLayoutBindingFlagsCreateInfoEXTBuilder<'a> { - DescriptorSetLayoutBindingFlagsCreateInfoEXTBuilder { - inner: DescriptorSetLayoutBindingFlagsCreateInfoEXT::default(), +impl DescriptorSetLayoutBindingFlagsCreateInfo { + pub fn builder<'a>() -> DescriptorSetLayoutBindingFlagsCreateInfoBuilder<'a> { + DescriptorSetLayoutBindingFlagsCreateInfoBuilder { + inner: DescriptorSetLayoutBindingFlagsCreateInfo::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct DescriptorSetLayoutBindingFlagsCreateInfoEXTBuilder<'a> { - inner: DescriptorSetLayoutBindingFlagsCreateInfoEXT, +pub struct DescriptorSetLayoutBindingFlagsCreateInfoBuilder<'a> { + inner: DescriptorSetLayoutBindingFlagsCreateInfo, marker: ::std::marker::PhantomData<&'a ()>, } unsafe impl ExtendsDescriptorSetLayoutCreateInfo - for DescriptorSetLayoutBindingFlagsCreateInfoEXTBuilder<'_> + for DescriptorSetLayoutBindingFlagsCreateInfoBuilder<'_> { } -unsafe impl ExtendsDescriptorSetLayoutCreateInfo for DescriptorSetLayoutBindingFlagsCreateInfoEXT {} -impl<'a> ::std::ops::Deref for DescriptorSetLayoutBindingFlagsCreateInfoEXTBuilder<'a> { - type Target = DescriptorSetLayoutBindingFlagsCreateInfoEXT; +unsafe impl ExtendsDescriptorSetLayoutCreateInfo for DescriptorSetLayoutBindingFlagsCreateInfo {} +impl<'a> ::std::ops::Deref for DescriptorSetLayoutBindingFlagsCreateInfoBuilder<'a> { + type Target = DescriptorSetLayoutBindingFlagsCreateInfo; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for DescriptorSetLayoutBindingFlagsCreateInfoEXTBuilder<'a> { +impl<'a> ::std::ops::DerefMut for DescriptorSetLayoutBindingFlagsCreateInfoBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> DescriptorSetLayoutBindingFlagsCreateInfoEXTBuilder<'a> { +impl<'a> DescriptorSetLayoutBindingFlagsCreateInfoBuilder<'a> { pub fn binding_flags( mut self, - binding_flags: &'a [DescriptorBindingFlagsEXT], - ) -> DescriptorSetLayoutBindingFlagsCreateInfoEXTBuilder<'a> { + binding_flags: &'a [DescriptorBindingFlags], + ) -> DescriptorSetLayoutBindingFlagsCreateInfoBuilder<'a> { self.inner.binding_count = binding_flags.len() as _; self.inner.p_binding_flags = binding_flags.as_ptr(); self @@ -35334,66 +36363,63 @@ impl<'a> DescriptorSetLayoutBindingFlagsCreateInfoEXTBuilder<'a> { #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> DescriptorSetLayoutBindingFlagsCreateInfoEXT { + pub fn build(self) -> DescriptorSetLayoutBindingFlagsCreateInfo { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct DescriptorSetVariableDescriptorCountAllocateInfoEXT { +#[doc = ""] +pub struct DescriptorSetVariableDescriptorCountAllocateInfo { pub s_type: StructureType, pub p_next: *const c_void, pub descriptor_set_count: u32, pub p_descriptor_counts: *const u32, } -impl ::std::default::Default for DescriptorSetVariableDescriptorCountAllocateInfoEXT { - fn default() -> DescriptorSetVariableDescriptorCountAllocateInfoEXT { - DescriptorSetVariableDescriptorCountAllocateInfoEXT { - s_type: StructureType::DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT, +impl ::std::default::Default for DescriptorSetVariableDescriptorCountAllocateInfo { + fn default() -> DescriptorSetVariableDescriptorCountAllocateInfo { + DescriptorSetVariableDescriptorCountAllocateInfo { + s_type: StructureType::DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO, p_next: ::std::ptr::null(), descriptor_set_count: u32::default(), p_descriptor_counts: ::std::ptr::null(), } } } -impl DescriptorSetVariableDescriptorCountAllocateInfoEXT { - pub fn builder<'a>() -> DescriptorSetVariableDescriptorCountAllocateInfoEXTBuilder<'a> { - DescriptorSetVariableDescriptorCountAllocateInfoEXTBuilder { - inner: DescriptorSetVariableDescriptorCountAllocateInfoEXT::default(), +impl DescriptorSetVariableDescriptorCountAllocateInfo { + pub fn builder<'a>() -> DescriptorSetVariableDescriptorCountAllocateInfoBuilder<'a> { + DescriptorSetVariableDescriptorCountAllocateInfoBuilder { + inner: DescriptorSetVariableDescriptorCountAllocateInfo::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct DescriptorSetVariableDescriptorCountAllocateInfoEXTBuilder<'a> { - inner: DescriptorSetVariableDescriptorCountAllocateInfoEXT, +pub struct DescriptorSetVariableDescriptorCountAllocateInfoBuilder<'a> { + inner: DescriptorSetVariableDescriptorCountAllocateInfo, marker: ::std::marker::PhantomData<&'a ()>, } unsafe impl ExtendsDescriptorSetAllocateInfo - for DescriptorSetVariableDescriptorCountAllocateInfoEXTBuilder<'_> + for DescriptorSetVariableDescriptorCountAllocateInfoBuilder<'_> { } -unsafe impl ExtendsDescriptorSetAllocateInfo - for DescriptorSetVariableDescriptorCountAllocateInfoEXT -{ -} -impl<'a> ::std::ops::Deref for DescriptorSetVariableDescriptorCountAllocateInfoEXTBuilder<'a> { - type Target = DescriptorSetVariableDescriptorCountAllocateInfoEXT; +unsafe impl ExtendsDescriptorSetAllocateInfo for DescriptorSetVariableDescriptorCountAllocateInfo {} +impl<'a> ::std::ops::Deref for DescriptorSetVariableDescriptorCountAllocateInfoBuilder<'a> { + type Target = DescriptorSetVariableDescriptorCountAllocateInfo; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for DescriptorSetVariableDescriptorCountAllocateInfoEXTBuilder<'a> { +impl<'a> ::std::ops::DerefMut for DescriptorSetVariableDescriptorCountAllocateInfoBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> DescriptorSetVariableDescriptorCountAllocateInfoEXTBuilder<'a> { +impl<'a> DescriptorSetVariableDescriptorCountAllocateInfoBuilder<'a> { pub fn descriptor_counts( mut self, descriptor_counts: &'a [u32], - ) -> DescriptorSetVariableDescriptorCountAllocateInfoEXTBuilder<'a> { + ) -> DescriptorSetVariableDescriptorCountAllocateInfoBuilder<'a> { self.inner.descriptor_set_count = descriptor_counts.len() as _; self.inner.p_descriptor_counts = descriptor_counts.as_ptr(); self @@ -35401,78 +36427,78 @@ impl<'a> DescriptorSetVariableDescriptorCountAllocateInfoEXTBuilder<'a> { #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> DescriptorSetVariableDescriptorCountAllocateInfoEXT { + pub fn build(self) -> DescriptorSetVariableDescriptorCountAllocateInfo { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct DescriptorSetVariableDescriptorCountLayoutSupportEXT { +#[doc = ""] +pub struct DescriptorSetVariableDescriptorCountLayoutSupport { pub s_type: StructureType, pub p_next: *mut c_void, pub max_variable_descriptor_count: u32, } -impl ::std::default::Default for DescriptorSetVariableDescriptorCountLayoutSupportEXT { - fn default() -> DescriptorSetVariableDescriptorCountLayoutSupportEXT { - DescriptorSetVariableDescriptorCountLayoutSupportEXT { - s_type: StructureType::DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT, +impl ::std::default::Default for DescriptorSetVariableDescriptorCountLayoutSupport { + fn default() -> DescriptorSetVariableDescriptorCountLayoutSupport { + DescriptorSetVariableDescriptorCountLayoutSupport { + s_type: StructureType::DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT, p_next: ::std::ptr::null_mut(), max_variable_descriptor_count: u32::default(), } } } -impl DescriptorSetVariableDescriptorCountLayoutSupportEXT { - pub fn builder<'a>() -> DescriptorSetVariableDescriptorCountLayoutSupportEXTBuilder<'a> { - DescriptorSetVariableDescriptorCountLayoutSupportEXTBuilder { - inner: DescriptorSetVariableDescriptorCountLayoutSupportEXT::default(), +impl DescriptorSetVariableDescriptorCountLayoutSupport { + pub fn builder<'a>() -> DescriptorSetVariableDescriptorCountLayoutSupportBuilder<'a> { + DescriptorSetVariableDescriptorCountLayoutSupportBuilder { + inner: DescriptorSetVariableDescriptorCountLayoutSupport::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct DescriptorSetVariableDescriptorCountLayoutSupportEXTBuilder<'a> { - inner: DescriptorSetVariableDescriptorCountLayoutSupportEXT, +pub struct DescriptorSetVariableDescriptorCountLayoutSupportBuilder<'a> { + inner: DescriptorSetVariableDescriptorCountLayoutSupport, marker: ::std::marker::PhantomData<&'a ()>, } unsafe impl ExtendsDescriptorSetLayoutSupport - for DescriptorSetVariableDescriptorCountLayoutSupportEXTBuilder<'_> + for DescriptorSetVariableDescriptorCountLayoutSupportBuilder<'_> { } unsafe impl ExtendsDescriptorSetLayoutSupport - for DescriptorSetVariableDescriptorCountLayoutSupportEXT + for DescriptorSetVariableDescriptorCountLayoutSupport { } -impl<'a> ::std::ops::Deref for DescriptorSetVariableDescriptorCountLayoutSupportEXTBuilder<'a> { - type Target = DescriptorSetVariableDescriptorCountLayoutSupportEXT; +impl<'a> ::std::ops::Deref for DescriptorSetVariableDescriptorCountLayoutSupportBuilder<'a> { + type Target = DescriptorSetVariableDescriptorCountLayoutSupport; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for DescriptorSetVariableDescriptorCountLayoutSupportEXTBuilder<'a> { +impl<'a> ::std::ops::DerefMut for DescriptorSetVariableDescriptorCountLayoutSupportBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> DescriptorSetVariableDescriptorCountLayoutSupportEXTBuilder<'a> { +impl<'a> DescriptorSetVariableDescriptorCountLayoutSupportBuilder<'a> { pub fn max_variable_descriptor_count( mut self, max_variable_descriptor_count: u32, - ) -> DescriptorSetVariableDescriptorCountLayoutSupportEXTBuilder<'a> { + ) -> DescriptorSetVariableDescriptorCountLayoutSupportBuilder<'a> { self.inner.max_variable_descriptor_count = max_variable_descriptor_count; self } #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> DescriptorSetVariableDescriptorCountLayoutSupportEXT { + pub fn build(self) -> DescriptorSetVariableDescriptorCountLayoutSupport { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct AttachmentDescription2KHR { +#[doc = ""] +pub struct AttachmentDescription2 { pub s_type: StructureType, pub p_next: *const c_void, pub flags: AttachmentDescriptionFlags, @@ -35485,10 +36511,10 @@ pub struct AttachmentDescription2KHR { pub initial_layout: ImageLayout, pub final_layout: ImageLayout, } -impl ::std::default::Default for AttachmentDescription2KHR { - fn default() -> AttachmentDescription2KHR { - AttachmentDescription2KHR { - s_type: StructureType::ATTACHMENT_DESCRIPTION_2_KHR, +impl ::std::default::Default for AttachmentDescription2 { + fn default() -> AttachmentDescription2 { + AttachmentDescription2 { + s_type: StructureType::ATTACHMENT_DESCRIPTION_2, p_next: ::std::ptr::null(), flags: AttachmentDescriptionFlags::default(), format: Format::default(), @@ -35502,80 +36528,74 @@ impl ::std::default::Default for AttachmentDescription2KHR { } } } -impl AttachmentDescription2KHR { - pub fn builder<'a>() -> AttachmentDescription2KHRBuilder<'a> { - AttachmentDescription2KHRBuilder { - inner: AttachmentDescription2KHR::default(), +impl AttachmentDescription2 { + pub fn builder<'a>() -> AttachmentDescription2Builder<'a> { + AttachmentDescription2Builder { + inner: AttachmentDescription2::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct AttachmentDescription2KHRBuilder<'a> { - inner: AttachmentDescription2KHR, +pub struct AttachmentDescription2Builder<'a> { + inner: AttachmentDescription2, marker: ::std::marker::PhantomData<&'a ()>, } -pub unsafe trait ExtendsAttachmentDescription2KHR {} -impl<'a> ::std::ops::Deref for AttachmentDescription2KHRBuilder<'a> { - type Target = AttachmentDescription2KHR; +pub unsafe trait ExtendsAttachmentDescription2 {} +impl<'a> ::std::ops::Deref for AttachmentDescription2Builder<'a> { + type Target = AttachmentDescription2; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for AttachmentDescription2KHRBuilder<'a> { +impl<'a> ::std::ops::DerefMut for AttachmentDescription2Builder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> AttachmentDescription2KHRBuilder<'a> { - pub fn flags( - mut self, - flags: AttachmentDescriptionFlags, - ) -> AttachmentDescription2KHRBuilder<'a> { +impl<'a> AttachmentDescription2Builder<'a> { + pub fn flags(mut self, flags: AttachmentDescriptionFlags) -> AttachmentDescription2Builder<'a> { self.inner.flags = flags; self } - pub fn format(mut self, format: Format) -> AttachmentDescription2KHRBuilder<'a> { + pub fn format(mut self, format: Format) -> AttachmentDescription2Builder<'a> { self.inner.format = format; self } - pub fn samples(mut self, samples: SampleCountFlags) -> AttachmentDescription2KHRBuilder<'a> { + pub fn samples(mut self, samples: SampleCountFlags) -> AttachmentDescription2Builder<'a> { self.inner.samples = samples; self } - pub fn load_op(mut self, load_op: AttachmentLoadOp) -> AttachmentDescription2KHRBuilder<'a> { + pub fn load_op(mut self, load_op: AttachmentLoadOp) -> AttachmentDescription2Builder<'a> { self.inner.load_op = load_op; self } - pub fn store_op(mut self, store_op: AttachmentStoreOp) -> AttachmentDescription2KHRBuilder<'a> { + pub fn store_op(mut self, store_op: AttachmentStoreOp) -> AttachmentDescription2Builder<'a> { self.inner.store_op = store_op; self } pub fn stencil_load_op( mut self, stencil_load_op: AttachmentLoadOp, - ) -> AttachmentDescription2KHRBuilder<'a> { + ) -> AttachmentDescription2Builder<'a> { self.inner.stencil_load_op = stencil_load_op; self } pub fn stencil_store_op( mut self, stencil_store_op: AttachmentStoreOp, - ) -> AttachmentDescription2KHRBuilder<'a> { + ) -> AttachmentDescription2Builder<'a> { self.inner.stencil_store_op = stencil_store_op; self } pub fn initial_layout( mut self, initial_layout: ImageLayout, - ) -> AttachmentDescription2KHRBuilder<'a> { + ) -> AttachmentDescription2Builder<'a> { self.inner.initial_layout = initial_layout; self } - pub fn final_layout( - mut self, - final_layout: ImageLayout, - ) -> AttachmentDescription2KHRBuilder<'a> { + pub fn final_layout(mut self, final_layout: ImageLayout) -> AttachmentDescription2Builder<'a> { self.inner.final_layout = final_layout; self } @@ -35584,10 +36604,10 @@ impl<'a> AttachmentDescription2KHRBuilder<'a> { #[doc = r" valid extension structs can be pushed into the chain."] #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] #[doc = r" chain will look like `A -> D -> B -> C`."] - pub fn push_next( + pub fn push_next( mut self, next: &'a mut T, - ) -> AttachmentDescription2KHRBuilder<'a> { + ) -> AttachmentDescription2Builder<'a> { unsafe { let next_ptr = next as *mut T as *mut BaseOutStructure; let last_next = ptr_chain_iter(next).last().unwrap(); @@ -35599,24 +36619,24 @@ impl<'a> AttachmentDescription2KHRBuilder<'a> { #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> AttachmentDescription2KHR { + pub fn build(self) -> AttachmentDescription2 { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct AttachmentReference2KHR { +#[doc = ""] +pub struct AttachmentReference2 { pub s_type: StructureType, pub p_next: *const c_void, pub attachment: u32, pub layout: ImageLayout, pub aspect_mask: ImageAspectFlags, } -impl ::std::default::Default for AttachmentReference2KHR { - fn default() -> AttachmentReference2KHR { - AttachmentReference2KHR { - s_type: StructureType::ATTACHMENT_REFERENCE_2_KHR, +impl ::std::default::Default for AttachmentReference2 { + fn default() -> AttachmentReference2 { + AttachmentReference2 { + s_type: StructureType::ATTACHMENT_REFERENCE_2, p_next: ::std::ptr::null(), attachment: u32::default(), layout: ImageLayout::default(), @@ -35624,44 +36644,41 @@ impl ::std::default::Default for AttachmentReference2KHR { } } } -impl AttachmentReference2KHR { - pub fn builder<'a>() -> AttachmentReference2KHRBuilder<'a> { - AttachmentReference2KHRBuilder { - inner: AttachmentReference2KHR::default(), +impl AttachmentReference2 { + pub fn builder<'a>() -> AttachmentReference2Builder<'a> { + AttachmentReference2Builder { + inner: AttachmentReference2::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct AttachmentReference2KHRBuilder<'a> { - inner: AttachmentReference2KHR, +pub struct AttachmentReference2Builder<'a> { + inner: AttachmentReference2, marker: ::std::marker::PhantomData<&'a ()>, } -pub unsafe trait ExtendsAttachmentReference2KHR {} -impl<'a> ::std::ops::Deref for AttachmentReference2KHRBuilder<'a> { - type Target = AttachmentReference2KHR; +pub unsafe trait ExtendsAttachmentReference2 {} +impl<'a> ::std::ops::Deref for AttachmentReference2Builder<'a> { + type Target = AttachmentReference2; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for AttachmentReference2KHRBuilder<'a> { +impl<'a> ::std::ops::DerefMut for AttachmentReference2Builder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> AttachmentReference2KHRBuilder<'a> { - pub fn attachment(mut self, attachment: u32) -> AttachmentReference2KHRBuilder<'a> { +impl<'a> AttachmentReference2Builder<'a> { + pub fn attachment(mut self, attachment: u32) -> AttachmentReference2Builder<'a> { self.inner.attachment = attachment; self } - pub fn layout(mut self, layout: ImageLayout) -> AttachmentReference2KHRBuilder<'a> { + pub fn layout(mut self, layout: ImageLayout) -> AttachmentReference2Builder<'a> { self.inner.layout = layout; self } - pub fn aspect_mask( - mut self, - aspect_mask: ImageAspectFlags, - ) -> AttachmentReference2KHRBuilder<'a> { + pub fn aspect_mask(mut self, aspect_mask: ImageAspectFlags) -> AttachmentReference2Builder<'a> { self.inner.aspect_mask = aspect_mask; self } @@ -35670,10 +36687,10 @@ impl<'a> AttachmentReference2KHRBuilder<'a> { #[doc = r" valid extension structs can be pushed into the chain."] #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] #[doc = r" chain will look like `A -> D -> B -> C`."] - pub fn push_next( + pub fn push_next( mut self, next: &'a mut T, - ) -> AttachmentReference2KHRBuilder<'a> { + ) -> AttachmentReference2Builder<'a> { unsafe { let next_ptr = next as *mut T as *mut BaseOutStructure; let last_next = ptr_chain_iter(next).last().unwrap(); @@ -35685,32 +36702,32 @@ impl<'a> AttachmentReference2KHRBuilder<'a> { #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> AttachmentReference2KHR { + pub fn build(self) -> AttachmentReference2 { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct SubpassDescription2KHR { +#[doc = ""] +pub struct SubpassDescription2 { pub s_type: StructureType, pub p_next: *const c_void, pub flags: SubpassDescriptionFlags, pub pipeline_bind_point: PipelineBindPoint, pub view_mask: u32, pub input_attachment_count: u32, - pub p_input_attachments: *const AttachmentReference2KHR, + pub p_input_attachments: *const AttachmentReference2, pub color_attachment_count: u32, - pub p_color_attachments: *const AttachmentReference2KHR, - pub p_resolve_attachments: *const AttachmentReference2KHR, - pub p_depth_stencil_attachment: *const AttachmentReference2KHR, + pub p_color_attachments: *const AttachmentReference2, + pub p_resolve_attachments: *const AttachmentReference2, + pub p_depth_stencil_attachment: *const AttachmentReference2, pub preserve_attachment_count: u32, pub p_preserve_attachments: *const u32, } -impl ::std::default::Default for SubpassDescription2KHR { - fn default() -> SubpassDescription2KHR { - SubpassDescription2KHR { - s_type: StructureType::SUBPASS_DESCRIPTION_2_KHR, +impl ::std::default::Default for SubpassDescription2 { + fn default() -> SubpassDescription2 { + SubpassDescription2 { + s_type: StructureType::SUBPASS_DESCRIPTION_2, p_next: ::std::ptr::null(), flags: SubpassDescriptionFlags::default(), pipeline_bind_point: PipelineBindPoint::default(), @@ -35726,82 +36743,82 @@ impl ::std::default::Default for SubpassDescription2KHR { } } } -impl SubpassDescription2KHR { - pub fn builder<'a>() -> SubpassDescription2KHRBuilder<'a> { - SubpassDescription2KHRBuilder { - inner: SubpassDescription2KHR::default(), +impl SubpassDescription2 { + pub fn builder<'a>() -> SubpassDescription2Builder<'a> { + SubpassDescription2Builder { + inner: SubpassDescription2::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct SubpassDescription2KHRBuilder<'a> { - inner: SubpassDescription2KHR, +pub struct SubpassDescription2Builder<'a> { + inner: SubpassDescription2, marker: ::std::marker::PhantomData<&'a ()>, } -pub unsafe trait ExtendsSubpassDescription2KHR {} -impl<'a> ::std::ops::Deref for SubpassDescription2KHRBuilder<'a> { - type Target = SubpassDescription2KHR; +pub unsafe trait ExtendsSubpassDescription2 {} +impl<'a> ::std::ops::Deref for SubpassDescription2Builder<'a> { + type Target = SubpassDescription2; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for SubpassDescription2KHRBuilder<'a> { +impl<'a> ::std::ops::DerefMut for SubpassDescription2Builder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> SubpassDescription2KHRBuilder<'a> { - pub fn flags(mut self, flags: SubpassDescriptionFlags) -> SubpassDescription2KHRBuilder<'a> { +impl<'a> SubpassDescription2Builder<'a> { + pub fn flags(mut self, flags: SubpassDescriptionFlags) -> SubpassDescription2Builder<'a> { self.inner.flags = flags; self } pub fn pipeline_bind_point( mut self, pipeline_bind_point: PipelineBindPoint, - ) -> SubpassDescription2KHRBuilder<'a> { + ) -> SubpassDescription2Builder<'a> { self.inner.pipeline_bind_point = pipeline_bind_point; self } - pub fn view_mask(mut self, view_mask: u32) -> SubpassDescription2KHRBuilder<'a> { + pub fn view_mask(mut self, view_mask: u32) -> SubpassDescription2Builder<'a> { self.inner.view_mask = view_mask; self } pub fn input_attachments( mut self, - input_attachments: &'a [AttachmentReference2KHR], - ) -> SubpassDescription2KHRBuilder<'a> { + input_attachments: &'a [AttachmentReference2], + ) -> SubpassDescription2Builder<'a> { self.inner.input_attachment_count = input_attachments.len() as _; self.inner.p_input_attachments = input_attachments.as_ptr(); self } pub fn color_attachments( mut self, - color_attachments: &'a [AttachmentReference2KHR], - ) -> SubpassDescription2KHRBuilder<'a> { + color_attachments: &'a [AttachmentReference2], + ) -> SubpassDescription2Builder<'a> { self.inner.color_attachment_count = color_attachments.len() as _; self.inner.p_color_attachments = color_attachments.as_ptr(); self } pub fn resolve_attachments( mut self, - resolve_attachments: &'a [AttachmentReference2KHR], - ) -> SubpassDescription2KHRBuilder<'a> { + resolve_attachments: &'a [AttachmentReference2], + ) -> SubpassDescription2Builder<'a> { self.inner.color_attachment_count = resolve_attachments.len() as _; self.inner.p_resolve_attachments = resolve_attachments.as_ptr(); self } pub fn depth_stencil_attachment( mut self, - depth_stencil_attachment: &'a AttachmentReference2KHR, - ) -> SubpassDescription2KHRBuilder<'a> { + depth_stencil_attachment: &'a AttachmentReference2, + ) -> SubpassDescription2Builder<'a> { self.inner.p_depth_stencil_attachment = depth_stencil_attachment; self } pub fn preserve_attachments( mut self, preserve_attachments: &'a [u32], - ) -> SubpassDescription2KHRBuilder<'a> { + ) -> SubpassDescription2Builder<'a> { self.inner.preserve_attachment_count = preserve_attachments.len() as _; self.inner.p_preserve_attachments = preserve_attachments.as_ptr(); self @@ -35811,10 +36828,10 @@ impl<'a> SubpassDescription2KHRBuilder<'a> { #[doc = r" valid extension structs can be pushed into the chain."] #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] #[doc = r" chain will look like `A -> D -> B -> C`."] - pub fn push_next( + pub fn push_next( mut self, next: &'a mut T, - ) -> SubpassDescription2KHRBuilder<'a> { + ) -> SubpassDescription2Builder<'a> { unsafe { let next_ptr = next as *mut T as *mut BaseOutStructure; let last_next = ptr_chain_iter(next).last().unwrap(); @@ -35826,14 +36843,14 @@ impl<'a> SubpassDescription2KHRBuilder<'a> { #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> SubpassDescription2KHR { + pub fn build(self) -> SubpassDescription2 { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct SubpassDependency2KHR { +#[doc = ""] +pub struct SubpassDependency2 { pub s_type: StructureType, pub p_next: *const c_void, pub src_subpass: u32, @@ -35845,10 +36862,10 @@ pub struct SubpassDependency2KHR { pub dependency_flags: DependencyFlags, pub view_offset: i32, } -impl ::std::default::Default for SubpassDependency2KHR { - fn default() -> SubpassDependency2KHR { - SubpassDependency2KHR { - s_type: StructureType::SUBPASS_DEPENDENCY_2_KHR, +impl ::std::default::Default for SubpassDependency2 { + fn default() -> SubpassDependency2 { + SubpassDependency2 { + s_type: StructureType::SUBPASS_DEPENDENCY_2, p_next: ::std::ptr::null(), src_subpass: u32::default(), dst_subpass: u32::default(), @@ -35861,76 +36878,76 @@ impl ::std::default::Default for SubpassDependency2KHR { } } } -impl SubpassDependency2KHR { - pub fn builder<'a>() -> SubpassDependency2KHRBuilder<'a> { - SubpassDependency2KHRBuilder { - inner: SubpassDependency2KHR::default(), +impl SubpassDependency2 { + pub fn builder<'a>() -> SubpassDependency2Builder<'a> { + SubpassDependency2Builder { + inner: SubpassDependency2::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct SubpassDependency2KHRBuilder<'a> { - inner: SubpassDependency2KHR, +pub struct SubpassDependency2Builder<'a> { + inner: SubpassDependency2, marker: ::std::marker::PhantomData<&'a ()>, } -pub unsafe trait ExtendsSubpassDependency2KHR {} -impl<'a> ::std::ops::Deref for SubpassDependency2KHRBuilder<'a> { - type Target = SubpassDependency2KHR; +pub unsafe trait ExtendsSubpassDependency2 {} +impl<'a> ::std::ops::Deref for SubpassDependency2Builder<'a> { + type Target = SubpassDependency2; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for SubpassDependency2KHRBuilder<'a> { +impl<'a> ::std::ops::DerefMut for SubpassDependency2Builder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> SubpassDependency2KHRBuilder<'a> { - pub fn src_subpass(mut self, src_subpass: u32) -> SubpassDependency2KHRBuilder<'a> { +impl<'a> SubpassDependency2Builder<'a> { + pub fn src_subpass(mut self, src_subpass: u32) -> SubpassDependency2Builder<'a> { self.inner.src_subpass = src_subpass; self } - pub fn dst_subpass(mut self, dst_subpass: u32) -> SubpassDependency2KHRBuilder<'a> { + pub fn dst_subpass(mut self, dst_subpass: u32) -> SubpassDependency2Builder<'a> { self.inner.dst_subpass = dst_subpass; self } pub fn src_stage_mask( mut self, src_stage_mask: PipelineStageFlags, - ) -> SubpassDependency2KHRBuilder<'a> { + ) -> SubpassDependency2Builder<'a> { self.inner.src_stage_mask = src_stage_mask; self } pub fn dst_stage_mask( mut self, dst_stage_mask: PipelineStageFlags, - ) -> SubpassDependency2KHRBuilder<'a> { + ) -> SubpassDependency2Builder<'a> { self.inner.dst_stage_mask = dst_stage_mask; self } pub fn src_access_mask( mut self, src_access_mask: AccessFlags, - ) -> SubpassDependency2KHRBuilder<'a> { + ) -> SubpassDependency2Builder<'a> { self.inner.src_access_mask = src_access_mask; self } pub fn dst_access_mask( mut self, dst_access_mask: AccessFlags, - ) -> SubpassDependency2KHRBuilder<'a> { + ) -> SubpassDependency2Builder<'a> { self.inner.dst_access_mask = dst_access_mask; self } pub fn dependency_flags( mut self, dependency_flags: DependencyFlags, - ) -> SubpassDependency2KHRBuilder<'a> { + ) -> SubpassDependency2Builder<'a> { self.inner.dependency_flags = dependency_flags; self } - pub fn view_offset(mut self, view_offset: i32) -> SubpassDependency2KHRBuilder<'a> { + pub fn view_offset(mut self, view_offset: i32) -> SubpassDependency2Builder<'a> { self.inner.view_offset = view_offset; self } @@ -35939,10 +36956,10 @@ impl<'a> SubpassDependency2KHRBuilder<'a> { #[doc = r" valid extension structs can be pushed into the chain."] #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] #[doc = r" chain will look like `A -> D -> B -> C`."] - pub fn push_next( + pub fn push_next( mut self, next: &'a mut T, - ) -> SubpassDependency2KHRBuilder<'a> { + ) -> SubpassDependency2Builder<'a> { unsafe { let next_ptr = next as *mut T as *mut BaseOutStructure; let last_next = ptr_chain_iter(next).last().unwrap(); @@ -35954,30 +36971,30 @@ impl<'a> SubpassDependency2KHRBuilder<'a> { #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> SubpassDependency2KHR { + pub fn build(self) -> SubpassDependency2 { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct RenderPassCreateInfo2KHR { +#[doc = ""] +pub struct RenderPassCreateInfo2 { pub s_type: StructureType, pub p_next: *const c_void, pub flags: RenderPassCreateFlags, pub attachment_count: u32, - pub p_attachments: *const AttachmentDescription2KHR, + pub p_attachments: *const AttachmentDescription2, pub subpass_count: u32, - pub p_subpasses: *const SubpassDescription2KHR, + pub p_subpasses: *const SubpassDescription2, pub dependency_count: u32, - pub p_dependencies: *const SubpassDependency2KHR, + pub p_dependencies: *const SubpassDependency2, pub correlated_view_mask_count: u32, pub p_correlated_view_masks: *const u32, } -impl ::std::default::Default for RenderPassCreateInfo2KHR { - fn default() -> RenderPassCreateInfo2KHR { - RenderPassCreateInfo2KHR { - s_type: StructureType::RENDER_PASS_CREATE_INFO_2_KHR, +impl ::std::default::Default for RenderPassCreateInfo2 { + fn default() -> RenderPassCreateInfo2 { + RenderPassCreateInfo2 { + s_type: StructureType::RENDER_PASS_CREATE_INFO_2, p_next: ::std::ptr::null(), flags: RenderPassCreateFlags::default(), attachment_count: u32::default(), @@ -35991,56 +37008,56 @@ impl ::std::default::Default for RenderPassCreateInfo2KHR { } } } -impl RenderPassCreateInfo2KHR { - pub fn builder<'a>() -> RenderPassCreateInfo2KHRBuilder<'a> { - RenderPassCreateInfo2KHRBuilder { - inner: RenderPassCreateInfo2KHR::default(), +impl RenderPassCreateInfo2 { + pub fn builder<'a>() -> RenderPassCreateInfo2Builder<'a> { + RenderPassCreateInfo2Builder { + inner: RenderPassCreateInfo2::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct RenderPassCreateInfo2KHRBuilder<'a> { - inner: RenderPassCreateInfo2KHR, +pub struct RenderPassCreateInfo2Builder<'a> { + inner: RenderPassCreateInfo2, marker: ::std::marker::PhantomData<&'a ()>, } -pub unsafe trait ExtendsRenderPassCreateInfo2KHR {} -impl<'a> ::std::ops::Deref for RenderPassCreateInfo2KHRBuilder<'a> { - type Target = RenderPassCreateInfo2KHR; +pub unsafe trait ExtendsRenderPassCreateInfo2 {} +impl<'a> ::std::ops::Deref for RenderPassCreateInfo2Builder<'a> { + type Target = RenderPassCreateInfo2; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for RenderPassCreateInfo2KHRBuilder<'a> { +impl<'a> ::std::ops::DerefMut for RenderPassCreateInfo2Builder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> RenderPassCreateInfo2KHRBuilder<'a> { - pub fn flags(mut self, flags: RenderPassCreateFlags) -> RenderPassCreateInfo2KHRBuilder<'a> { +impl<'a> RenderPassCreateInfo2Builder<'a> { + pub fn flags(mut self, flags: RenderPassCreateFlags) -> RenderPassCreateInfo2Builder<'a> { self.inner.flags = flags; self } pub fn attachments( mut self, - attachments: &'a [AttachmentDescription2KHR], - ) -> RenderPassCreateInfo2KHRBuilder<'a> { + attachments: &'a [AttachmentDescription2], + ) -> RenderPassCreateInfo2Builder<'a> { self.inner.attachment_count = attachments.len() as _; self.inner.p_attachments = attachments.as_ptr(); self } pub fn subpasses( mut self, - subpasses: &'a [SubpassDescription2KHR], - ) -> RenderPassCreateInfo2KHRBuilder<'a> { + subpasses: &'a [SubpassDescription2], + ) -> RenderPassCreateInfo2Builder<'a> { self.inner.subpass_count = subpasses.len() as _; self.inner.p_subpasses = subpasses.as_ptr(); self } pub fn dependencies( mut self, - dependencies: &'a [SubpassDependency2KHR], - ) -> RenderPassCreateInfo2KHRBuilder<'a> { + dependencies: &'a [SubpassDependency2], + ) -> RenderPassCreateInfo2Builder<'a> { self.inner.dependency_count = dependencies.len() as _; self.inner.p_dependencies = dependencies.as_ptr(); self @@ -36048,7 +37065,7 @@ impl<'a> RenderPassCreateInfo2KHRBuilder<'a> { pub fn correlated_view_masks( mut self, correlated_view_masks: &'a [u32], - ) -> RenderPassCreateInfo2KHRBuilder<'a> { + ) -> RenderPassCreateInfo2Builder<'a> { self.inner.correlated_view_mask_count = correlated_view_masks.len() as _; self.inner.p_correlated_view_masks = correlated_view_masks.as_ptr(); self @@ -36058,10 +37075,10 @@ impl<'a> RenderPassCreateInfo2KHRBuilder<'a> { #[doc = r" valid extension structs can be pushed into the chain."] #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] #[doc = r" chain will look like `A -> D -> B -> C`."] - pub fn push_next( + pub fn push_next( mut self, next: &'a mut T, - ) -> RenderPassCreateInfo2KHRBuilder<'a> { + ) -> RenderPassCreateInfo2Builder<'a> { unsafe { let next_ptr = next as *mut T as *mut BaseOutStructure; let last_next = ptr_chain_iter(next).last().unwrap(); @@ -36073,54 +37090,54 @@ impl<'a> RenderPassCreateInfo2KHRBuilder<'a> { #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> RenderPassCreateInfo2KHR { + pub fn build(self) -> RenderPassCreateInfo2 { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct SubpassBeginInfoKHR { +#[doc = ""] +pub struct SubpassBeginInfo { pub s_type: StructureType, pub p_next: *const c_void, pub contents: SubpassContents, } -impl ::std::default::Default for SubpassBeginInfoKHR { - fn default() -> SubpassBeginInfoKHR { - SubpassBeginInfoKHR { - s_type: StructureType::SUBPASS_BEGIN_INFO_KHR, +impl ::std::default::Default for SubpassBeginInfo { + fn default() -> SubpassBeginInfo { + SubpassBeginInfo { + s_type: StructureType::SUBPASS_BEGIN_INFO, p_next: ::std::ptr::null(), contents: SubpassContents::default(), } } } -impl SubpassBeginInfoKHR { - pub fn builder<'a>() -> SubpassBeginInfoKHRBuilder<'a> { - SubpassBeginInfoKHRBuilder { - inner: SubpassBeginInfoKHR::default(), +impl SubpassBeginInfo { + pub fn builder<'a>() -> SubpassBeginInfoBuilder<'a> { + SubpassBeginInfoBuilder { + inner: SubpassBeginInfo::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct SubpassBeginInfoKHRBuilder<'a> { - inner: SubpassBeginInfoKHR, +pub struct SubpassBeginInfoBuilder<'a> { + inner: SubpassBeginInfo, marker: ::std::marker::PhantomData<&'a ()>, } -pub unsafe trait ExtendsSubpassBeginInfoKHR {} -impl<'a> ::std::ops::Deref for SubpassBeginInfoKHRBuilder<'a> { - type Target = SubpassBeginInfoKHR; +pub unsafe trait ExtendsSubpassBeginInfo {} +impl<'a> ::std::ops::Deref for SubpassBeginInfoBuilder<'a> { + type Target = SubpassBeginInfo; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for SubpassBeginInfoKHRBuilder<'a> { +impl<'a> ::std::ops::DerefMut for SubpassBeginInfoBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> SubpassBeginInfoKHRBuilder<'a> { - pub fn contents(mut self, contents: SubpassContents) -> SubpassBeginInfoKHRBuilder<'a> { +impl<'a> SubpassBeginInfoBuilder<'a> { + pub fn contents(mut self, contents: SubpassContents) -> SubpassBeginInfoBuilder<'a> { self.inner.contents = contents; self } @@ -36129,10 +37146,10 @@ impl<'a> SubpassBeginInfoKHRBuilder<'a> { #[doc = r" valid extension structs can be pushed into the chain."] #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] #[doc = r" chain will look like `A -> D -> B -> C`."] - pub fn push_next( + pub fn push_next( mut self, next: &'a mut T, - ) -> SubpassBeginInfoKHRBuilder<'a> { + ) -> SubpassBeginInfoBuilder<'a> { unsafe { let next_ptr = next as *mut T as *mut BaseOutStructure; let last_next = ptr_chain_iter(next).last().unwrap(); @@ -36144,60 +37161,60 @@ impl<'a> SubpassBeginInfoKHRBuilder<'a> { #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> SubpassBeginInfoKHR { + pub fn build(self) -> SubpassBeginInfo { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct SubpassEndInfoKHR { +#[doc = ""] +pub struct SubpassEndInfo { pub s_type: StructureType, pub p_next: *const c_void, } -impl ::std::default::Default for SubpassEndInfoKHR { - fn default() -> SubpassEndInfoKHR { - SubpassEndInfoKHR { - s_type: StructureType::SUBPASS_END_INFO_KHR, +impl ::std::default::Default for SubpassEndInfo { + fn default() -> SubpassEndInfo { + SubpassEndInfo { + s_type: StructureType::SUBPASS_END_INFO, p_next: ::std::ptr::null(), } } } -impl SubpassEndInfoKHR { - pub fn builder<'a>() -> SubpassEndInfoKHRBuilder<'a> { - SubpassEndInfoKHRBuilder { - inner: SubpassEndInfoKHR::default(), +impl SubpassEndInfo { + pub fn builder<'a>() -> SubpassEndInfoBuilder<'a> { + SubpassEndInfoBuilder { + inner: SubpassEndInfo::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct SubpassEndInfoKHRBuilder<'a> { - inner: SubpassEndInfoKHR, +pub struct SubpassEndInfoBuilder<'a> { + inner: SubpassEndInfo, marker: ::std::marker::PhantomData<&'a ()>, } -pub unsafe trait ExtendsSubpassEndInfoKHR {} -impl<'a> ::std::ops::Deref for SubpassEndInfoKHRBuilder<'a> { - type Target = SubpassEndInfoKHR; +pub unsafe trait ExtendsSubpassEndInfo {} +impl<'a> ::std::ops::Deref for SubpassEndInfoBuilder<'a> { + type Target = SubpassEndInfo; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for SubpassEndInfoKHRBuilder<'a> { +impl<'a> ::std::ops::DerefMut for SubpassEndInfoBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> SubpassEndInfoKHRBuilder<'a> { +impl<'a> SubpassEndInfoBuilder<'a> { #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] #[doc = r" method only exists on structs that can be passed to a function directly. Only"] #[doc = r" valid extension structs can be pushed into the chain."] #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] #[doc = r" chain will look like `A -> D -> B -> C`."] - pub fn push_next( + pub fn push_next( mut self, next: &'a mut T, - ) -> SubpassEndInfoKHRBuilder<'a> { + ) -> SubpassEndInfoBuilder<'a> { unsafe { let next_ptr = next as *mut T as *mut BaseOutStructure; let last_next = ptr_chain_iter(next).last().unwrap(); @@ -36209,13 +37226,438 @@ impl<'a> SubpassEndInfoKHRBuilder<'a> { #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> SubpassEndInfoKHR { + pub fn build(self) -> SubpassEndInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceTimelineSemaphoreFeatures { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub timeline_semaphore: Bool32, +} +impl ::std::default::Default for PhysicalDeviceTimelineSemaphoreFeatures { + fn default() -> PhysicalDeviceTimelineSemaphoreFeatures { + PhysicalDeviceTimelineSemaphoreFeatures { + s_type: StructureType::PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES, + p_next: ::std::ptr::null_mut(), + timeline_semaphore: Bool32::default(), + } + } +} +impl PhysicalDeviceTimelineSemaphoreFeatures { + pub fn builder<'a>() -> PhysicalDeviceTimelineSemaphoreFeaturesBuilder<'a> { + PhysicalDeviceTimelineSemaphoreFeaturesBuilder { + inner: PhysicalDeviceTimelineSemaphoreFeatures::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceTimelineSemaphoreFeaturesBuilder<'a> { + inner: PhysicalDeviceTimelineSemaphoreFeatures, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceTimelineSemaphoreFeaturesBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceTimelineSemaphoreFeatures {} +impl<'a> ::std::ops::Deref for PhysicalDeviceTimelineSemaphoreFeaturesBuilder<'a> { + type Target = PhysicalDeviceTimelineSemaphoreFeatures; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceTimelineSemaphoreFeaturesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceTimelineSemaphoreFeaturesBuilder<'a> { + pub fn timeline_semaphore( + mut self, + timeline_semaphore: bool, + ) -> PhysicalDeviceTimelineSemaphoreFeaturesBuilder<'a> { + self.inner.timeline_semaphore = timeline_semaphore.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceTimelineSemaphoreFeatures { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceTimelineSemaphoreProperties { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub max_timeline_semaphore_value_difference: u64, +} +impl ::std::default::Default for PhysicalDeviceTimelineSemaphoreProperties { + fn default() -> PhysicalDeviceTimelineSemaphoreProperties { + PhysicalDeviceTimelineSemaphoreProperties { + s_type: StructureType::PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES, + p_next: ::std::ptr::null_mut(), + max_timeline_semaphore_value_difference: u64::default(), + } + } +} +impl PhysicalDeviceTimelineSemaphoreProperties { + pub fn builder<'a>() -> PhysicalDeviceTimelineSemaphorePropertiesBuilder<'a> { + PhysicalDeviceTimelineSemaphorePropertiesBuilder { + inner: PhysicalDeviceTimelineSemaphoreProperties::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceTimelineSemaphorePropertiesBuilder<'a> { + inner: PhysicalDeviceTimelineSemaphoreProperties, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDeviceTimelineSemaphorePropertiesBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceTimelineSemaphoreProperties {} +impl<'a> ::std::ops::Deref for PhysicalDeviceTimelineSemaphorePropertiesBuilder<'a> { + type Target = PhysicalDeviceTimelineSemaphoreProperties; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceTimelineSemaphorePropertiesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceTimelineSemaphorePropertiesBuilder<'a> { + pub fn max_timeline_semaphore_value_difference( + mut self, + max_timeline_semaphore_value_difference: u64, + ) -> PhysicalDeviceTimelineSemaphorePropertiesBuilder<'a> { + self.inner.max_timeline_semaphore_value_difference = + max_timeline_semaphore_value_difference; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceTimelineSemaphoreProperties { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SemaphoreTypeCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub semaphore_type: SemaphoreType, + pub initial_value: u64, +} +impl ::std::default::Default for SemaphoreTypeCreateInfo { + fn default() -> SemaphoreTypeCreateInfo { + SemaphoreTypeCreateInfo { + s_type: StructureType::SEMAPHORE_TYPE_CREATE_INFO, + p_next: ::std::ptr::null(), + semaphore_type: SemaphoreType::default(), + initial_value: u64::default(), + } + } +} +impl SemaphoreTypeCreateInfo { + pub fn builder<'a>() -> SemaphoreTypeCreateInfoBuilder<'a> { + SemaphoreTypeCreateInfoBuilder { + inner: SemaphoreTypeCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SemaphoreTypeCreateInfoBuilder<'a> { + inner: SemaphoreTypeCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsSemaphoreCreateInfo for SemaphoreTypeCreateInfoBuilder<'_> {} +unsafe impl ExtendsSemaphoreCreateInfo for SemaphoreTypeCreateInfo {} +unsafe impl ExtendsPhysicalDeviceExternalSemaphoreInfo for SemaphoreTypeCreateInfoBuilder<'_> {} +unsafe impl ExtendsPhysicalDeviceExternalSemaphoreInfo for SemaphoreTypeCreateInfo {} +impl<'a> ::std::ops::Deref for SemaphoreTypeCreateInfoBuilder<'a> { + type Target = SemaphoreTypeCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SemaphoreTypeCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SemaphoreTypeCreateInfoBuilder<'a> { + pub fn semaphore_type( + mut self, + semaphore_type: SemaphoreType, + ) -> SemaphoreTypeCreateInfoBuilder<'a> { + self.inner.semaphore_type = semaphore_type; + self + } + pub fn initial_value(mut self, initial_value: u64) -> SemaphoreTypeCreateInfoBuilder<'a> { + self.inner.initial_value = initial_value; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SemaphoreTypeCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct TimelineSemaphoreSubmitInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub wait_semaphore_value_count: u32, + pub p_wait_semaphore_values: *const u64, + pub signal_semaphore_value_count: u32, + pub p_signal_semaphore_values: *const u64, +} +impl ::std::default::Default for TimelineSemaphoreSubmitInfo { + fn default() -> TimelineSemaphoreSubmitInfo { + TimelineSemaphoreSubmitInfo { + s_type: StructureType::TIMELINE_SEMAPHORE_SUBMIT_INFO, + p_next: ::std::ptr::null(), + wait_semaphore_value_count: u32::default(), + p_wait_semaphore_values: ::std::ptr::null(), + signal_semaphore_value_count: u32::default(), + p_signal_semaphore_values: ::std::ptr::null(), + } + } +} +impl TimelineSemaphoreSubmitInfo { + pub fn builder<'a>() -> TimelineSemaphoreSubmitInfoBuilder<'a> { + TimelineSemaphoreSubmitInfoBuilder { + inner: TimelineSemaphoreSubmitInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct TimelineSemaphoreSubmitInfoBuilder<'a> { + inner: TimelineSemaphoreSubmitInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsSubmitInfo for TimelineSemaphoreSubmitInfoBuilder<'_> {} +unsafe impl ExtendsSubmitInfo for TimelineSemaphoreSubmitInfo {} +unsafe impl ExtendsBindSparseInfo for TimelineSemaphoreSubmitInfoBuilder<'_> {} +unsafe impl ExtendsBindSparseInfo for TimelineSemaphoreSubmitInfo {} +impl<'a> ::std::ops::Deref for TimelineSemaphoreSubmitInfoBuilder<'a> { + type Target = TimelineSemaphoreSubmitInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for TimelineSemaphoreSubmitInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> TimelineSemaphoreSubmitInfoBuilder<'a> { + pub fn wait_semaphore_values( + mut self, + wait_semaphore_values: &'a [u64], + ) -> TimelineSemaphoreSubmitInfoBuilder<'a> { + self.inner.wait_semaphore_value_count = wait_semaphore_values.len() as _; + self.inner.p_wait_semaphore_values = wait_semaphore_values.as_ptr(); + self + } + pub fn signal_semaphore_values( + mut self, + signal_semaphore_values: &'a [u64], + ) -> TimelineSemaphoreSubmitInfoBuilder<'a> { + self.inner.signal_semaphore_value_count = signal_semaphore_values.len() as _; + self.inner.p_signal_semaphore_values = signal_semaphore_values.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> TimelineSemaphoreSubmitInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SemaphoreWaitInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: SemaphoreWaitFlags, + pub semaphore_count: u32, + pub p_semaphores: *const Semaphore, + pub p_values: *const u64, +} +impl ::std::default::Default for SemaphoreWaitInfo { + fn default() -> SemaphoreWaitInfo { + SemaphoreWaitInfo { + s_type: StructureType::SEMAPHORE_WAIT_INFO, + p_next: ::std::ptr::null(), + flags: SemaphoreWaitFlags::default(), + semaphore_count: u32::default(), + p_semaphores: ::std::ptr::null(), + p_values: ::std::ptr::null(), + } + } +} +impl SemaphoreWaitInfo { + pub fn builder<'a>() -> SemaphoreWaitInfoBuilder<'a> { + SemaphoreWaitInfoBuilder { + inner: SemaphoreWaitInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SemaphoreWaitInfoBuilder<'a> { + inner: SemaphoreWaitInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsSemaphoreWaitInfo {} +impl<'a> ::std::ops::Deref for SemaphoreWaitInfoBuilder<'a> { + type Target = SemaphoreWaitInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SemaphoreWaitInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SemaphoreWaitInfoBuilder<'a> { + pub fn flags(mut self, flags: SemaphoreWaitFlags) -> SemaphoreWaitInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn semaphores(mut self, semaphores: &'a [Semaphore]) -> SemaphoreWaitInfoBuilder<'a> { + self.inner.semaphore_count = semaphores.len() as _; + self.inner.p_semaphores = semaphores.as_ptr(); + self + } + pub fn values(mut self, values: &'a [u64]) -> SemaphoreWaitInfoBuilder<'a> { + self.inner.semaphore_count = values.len() as _; + self.inner.p_values = values.as_ptr(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> SemaphoreWaitInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SemaphoreWaitInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SemaphoreSignalInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub semaphore: Semaphore, + pub value: u64, +} +impl ::std::default::Default for SemaphoreSignalInfo { + fn default() -> SemaphoreSignalInfo { + SemaphoreSignalInfo { + s_type: StructureType::SEMAPHORE_SIGNAL_INFO, + p_next: ::std::ptr::null(), + semaphore: Semaphore::default(), + value: u64::default(), + } + } +} +impl SemaphoreSignalInfo { + pub fn builder<'a>() -> SemaphoreSignalInfoBuilder<'a> { + SemaphoreSignalInfoBuilder { + inner: SemaphoreSignalInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SemaphoreSignalInfoBuilder<'a> { + inner: SemaphoreSignalInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsSemaphoreSignalInfo {} +impl<'a> ::std::ops::Deref for SemaphoreSignalInfoBuilder<'a> { + type Target = SemaphoreSignalInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SemaphoreSignalInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SemaphoreSignalInfoBuilder<'a> { + pub fn semaphore(mut self, semaphore: Semaphore) -> SemaphoreSignalInfoBuilder<'a> { + self.inner.semaphore = semaphore; + self + } + pub fn value(mut self, value: u64) -> SemaphoreSignalInfoBuilder<'a> { + self.inner.value = value; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> SemaphoreSignalInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SemaphoreSignalInfo { self.inner } } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct VertexInputBindingDivisorDescriptionEXT { pub binding: u32, pub divisor: u32, @@ -36262,7 +37704,7 @@ impl<'a> VertexInputBindingDivisorDescriptionEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PipelineVertexInputDivisorStateCreateInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -36329,7 +37771,7 @@ impl<'a> PipelineVertexInputDivisorStateCreateInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceVertexAttributeDivisorPropertiesEXT { pub s_type: StructureType, pub p_next: *mut c_void, @@ -36390,7 +37832,7 @@ impl<'a> PhysicalDeviceVertexAttributeDivisorPropertiesEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDevicePCIBusInfoPropertiesEXT { pub s_type: StructureType, pub p_next: *mut c_void, @@ -36472,7 +37914,7 @@ impl<'a> PhysicalDevicePCIBusInfoPropertiesEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ImportAndroidHardwareBufferInfoANDROID { pub s_type: StructureType, pub p_next: *const c_void, @@ -36530,7 +37972,7 @@ impl<'a> ImportAndroidHardwareBufferInfoANDROIDBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct AndroidHardwareBufferUsageANDROID { pub s_type: StructureType, pub p_next: *mut c_void, @@ -36588,7 +38030,7 @@ impl<'a> AndroidHardwareBufferUsageANDROIDBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct AndroidHardwareBufferPropertiesANDROID { pub s_type: StructureType, pub p_next: *mut c_void, @@ -36671,7 +38113,7 @@ impl<'a> AndroidHardwareBufferPropertiesANDROIDBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct MemoryGetAndroidHardwareBufferInfoANDROID { pub s_type: StructureType, pub p_next: *const c_void, @@ -36745,7 +38187,7 @@ impl<'a> MemoryGetAndroidHardwareBufferInfoANDROIDBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct AndroidHardwareBufferFormatPropertiesANDROID { pub s_type: StructureType, pub p_next: *mut c_void, @@ -36872,7 +38314,7 @@ impl<'a> AndroidHardwareBufferFormatPropertiesANDROIDBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct CommandBufferInheritanceConditionalRenderingInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -36936,7 +38378,7 @@ impl<'a> CommandBufferInheritanceConditionalRenderingInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ExternalFormatANDROID { pub s_type: StructureType, pub p_next: *mut c_void, @@ -36993,18 +38435,18 @@ impl<'a> ExternalFormatANDROIDBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct PhysicalDevice8BitStorageFeaturesKHR { +#[doc = ""] +pub struct PhysicalDevice8BitStorageFeatures { pub s_type: StructureType, pub p_next: *mut c_void, pub storage_buffer8_bit_access: Bool32, pub uniform_and_storage_buffer8_bit_access: Bool32, pub storage_push_constant8: Bool32, } -impl ::std::default::Default for PhysicalDevice8BitStorageFeaturesKHR { - fn default() -> PhysicalDevice8BitStorageFeaturesKHR { - PhysicalDevice8BitStorageFeaturesKHR { - s_type: StructureType::PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR, +impl ::std::default::Default for PhysicalDevice8BitStorageFeatures { + fn default() -> PhysicalDevice8BitStorageFeatures { + PhysicalDevice8BitStorageFeatures { + s_type: StructureType::PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES, p_next: ::std::ptr::null_mut(), storage_buffer8_bit_access: Bool32::default(), uniform_and_storage_buffer8_bit_access: Bool32::default(), @@ -37012,44 +38454,44 @@ impl ::std::default::Default for PhysicalDevice8BitStorageFeaturesKHR { } } } -impl PhysicalDevice8BitStorageFeaturesKHR { - pub fn builder<'a>() -> PhysicalDevice8BitStorageFeaturesKHRBuilder<'a> { - PhysicalDevice8BitStorageFeaturesKHRBuilder { - inner: PhysicalDevice8BitStorageFeaturesKHR::default(), +impl PhysicalDevice8BitStorageFeatures { + pub fn builder<'a>() -> PhysicalDevice8BitStorageFeaturesBuilder<'a> { + PhysicalDevice8BitStorageFeaturesBuilder { + inner: PhysicalDevice8BitStorageFeatures::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct PhysicalDevice8BitStorageFeaturesKHRBuilder<'a> { - inner: PhysicalDevice8BitStorageFeaturesKHR, +pub struct PhysicalDevice8BitStorageFeaturesBuilder<'a> { + inner: PhysicalDevice8BitStorageFeatures, marker: ::std::marker::PhantomData<&'a ()>, } -unsafe impl ExtendsDeviceCreateInfo for PhysicalDevice8BitStorageFeaturesKHRBuilder<'_> {} -unsafe impl ExtendsDeviceCreateInfo for PhysicalDevice8BitStorageFeaturesKHR {} -impl<'a> ::std::ops::Deref for PhysicalDevice8BitStorageFeaturesKHRBuilder<'a> { - type Target = PhysicalDevice8BitStorageFeaturesKHR; +unsafe impl ExtendsDeviceCreateInfo for PhysicalDevice8BitStorageFeaturesBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDevice8BitStorageFeatures {} +impl<'a> ::std::ops::Deref for PhysicalDevice8BitStorageFeaturesBuilder<'a> { + type Target = PhysicalDevice8BitStorageFeatures; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for PhysicalDevice8BitStorageFeaturesKHRBuilder<'a> { +impl<'a> ::std::ops::DerefMut for PhysicalDevice8BitStorageFeaturesBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> PhysicalDevice8BitStorageFeaturesKHRBuilder<'a> { +impl<'a> PhysicalDevice8BitStorageFeaturesBuilder<'a> { pub fn storage_buffer8_bit_access( mut self, storage_buffer8_bit_access: bool, - ) -> PhysicalDevice8BitStorageFeaturesKHRBuilder<'a> { + ) -> PhysicalDevice8BitStorageFeaturesBuilder<'a> { self.inner.storage_buffer8_bit_access = storage_buffer8_bit_access.into(); self } pub fn uniform_and_storage_buffer8_bit_access( mut self, uniform_and_storage_buffer8_bit_access: bool, - ) -> PhysicalDevice8BitStorageFeaturesKHRBuilder<'a> { + ) -> PhysicalDevice8BitStorageFeaturesBuilder<'a> { self.inner.uniform_and_storage_buffer8_bit_access = uniform_and_storage_buffer8_bit_access.into(); self @@ -37057,20 +38499,20 @@ impl<'a> PhysicalDevice8BitStorageFeaturesKHRBuilder<'a> { pub fn storage_push_constant8( mut self, storage_push_constant8: bool, - ) -> PhysicalDevice8BitStorageFeaturesKHRBuilder<'a> { + ) -> PhysicalDevice8BitStorageFeaturesBuilder<'a> { self.inner.storage_push_constant8 = storage_push_constant8.into(); self } #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> PhysicalDevice8BitStorageFeaturesKHR { + pub fn build(self) -> PhysicalDevice8BitStorageFeatures { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceConditionalRenderingFeaturesEXT { pub s_type: StructureType, pub p_next: *mut c_void, @@ -37137,141 +38579,152 @@ impl<'a> PhysicalDeviceConditionalRenderingFeaturesEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct PhysicalDeviceVulkanMemoryModelFeaturesKHR { +#[doc = ""] +pub struct PhysicalDeviceVulkanMemoryModelFeatures { pub s_type: StructureType, pub p_next: *mut c_void, pub vulkan_memory_model: Bool32, pub vulkan_memory_model_device_scope: Bool32, + pub vulkan_memory_model_availability_visibility_chains: Bool32, } -impl ::std::default::Default for PhysicalDeviceVulkanMemoryModelFeaturesKHR { - fn default() -> PhysicalDeviceVulkanMemoryModelFeaturesKHR { - PhysicalDeviceVulkanMemoryModelFeaturesKHR { - s_type: StructureType::PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES_KHR, +impl ::std::default::Default for PhysicalDeviceVulkanMemoryModelFeatures { + fn default() -> PhysicalDeviceVulkanMemoryModelFeatures { + PhysicalDeviceVulkanMemoryModelFeatures { + s_type: StructureType::PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES, p_next: ::std::ptr::null_mut(), vulkan_memory_model: Bool32::default(), vulkan_memory_model_device_scope: Bool32::default(), + vulkan_memory_model_availability_visibility_chains: Bool32::default(), } } } -impl PhysicalDeviceVulkanMemoryModelFeaturesKHR { - pub fn builder<'a>() -> PhysicalDeviceVulkanMemoryModelFeaturesKHRBuilder<'a> { - PhysicalDeviceVulkanMemoryModelFeaturesKHRBuilder { - inner: PhysicalDeviceVulkanMemoryModelFeaturesKHR::default(), +impl PhysicalDeviceVulkanMemoryModelFeatures { + pub fn builder<'a>() -> PhysicalDeviceVulkanMemoryModelFeaturesBuilder<'a> { + PhysicalDeviceVulkanMemoryModelFeaturesBuilder { + inner: PhysicalDeviceVulkanMemoryModelFeatures::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct PhysicalDeviceVulkanMemoryModelFeaturesKHRBuilder<'a> { - inner: PhysicalDeviceVulkanMemoryModelFeaturesKHR, +pub struct PhysicalDeviceVulkanMemoryModelFeaturesBuilder<'a> { + inner: PhysicalDeviceVulkanMemoryModelFeatures, marker: ::std::marker::PhantomData<&'a ()>, } -unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceVulkanMemoryModelFeaturesKHRBuilder<'_> {} -unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceVulkanMemoryModelFeaturesKHR {} -impl<'a> ::std::ops::Deref for PhysicalDeviceVulkanMemoryModelFeaturesKHRBuilder<'a> { - type Target = PhysicalDeviceVulkanMemoryModelFeaturesKHR; +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceVulkanMemoryModelFeaturesBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceVulkanMemoryModelFeatures {} +impl<'a> ::std::ops::Deref for PhysicalDeviceVulkanMemoryModelFeaturesBuilder<'a> { + type Target = PhysicalDeviceVulkanMemoryModelFeatures; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for PhysicalDeviceVulkanMemoryModelFeaturesKHRBuilder<'a> { +impl<'a> ::std::ops::DerefMut for PhysicalDeviceVulkanMemoryModelFeaturesBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> PhysicalDeviceVulkanMemoryModelFeaturesKHRBuilder<'a> { +impl<'a> PhysicalDeviceVulkanMemoryModelFeaturesBuilder<'a> { pub fn vulkan_memory_model( mut self, vulkan_memory_model: bool, - ) -> PhysicalDeviceVulkanMemoryModelFeaturesKHRBuilder<'a> { + ) -> PhysicalDeviceVulkanMemoryModelFeaturesBuilder<'a> { self.inner.vulkan_memory_model = vulkan_memory_model.into(); self } pub fn vulkan_memory_model_device_scope( mut self, vulkan_memory_model_device_scope: bool, - ) -> PhysicalDeviceVulkanMemoryModelFeaturesKHRBuilder<'a> { + ) -> PhysicalDeviceVulkanMemoryModelFeaturesBuilder<'a> { self.inner.vulkan_memory_model_device_scope = vulkan_memory_model_device_scope.into(); self } + pub fn vulkan_memory_model_availability_visibility_chains( + mut self, + vulkan_memory_model_availability_visibility_chains: bool, + ) -> PhysicalDeviceVulkanMemoryModelFeaturesBuilder<'a> { + self.inner + .vulkan_memory_model_availability_visibility_chains = + vulkan_memory_model_availability_visibility_chains.into(); + self + } #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> PhysicalDeviceVulkanMemoryModelFeaturesKHR { + pub fn build(self) -> PhysicalDeviceVulkanMemoryModelFeatures { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct PhysicalDeviceShaderAtomicInt64FeaturesKHR { +#[doc = ""] +pub struct PhysicalDeviceShaderAtomicInt64Features { pub s_type: StructureType, pub p_next: *mut c_void, pub shader_buffer_int64_atomics: Bool32, pub shader_shared_int64_atomics: Bool32, } -impl ::std::default::Default for PhysicalDeviceShaderAtomicInt64FeaturesKHR { - fn default() -> PhysicalDeviceShaderAtomicInt64FeaturesKHR { - PhysicalDeviceShaderAtomicInt64FeaturesKHR { - s_type: StructureType::PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR, +impl ::std::default::Default for PhysicalDeviceShaderAtomicInt64Features { + fn default() -> PhysicalDeviceShaderAtomicInt64Features { + PhysicalDeviceShaderAtomicInt64Features { + s_type: StructureType::PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES, p_next: ::std::ptr::null_mut(), shader_buffer_int64_atomics: Bool32::default(), shader_shared_int64_atomics: Bool32::default(), } } } -impl PhysicalDeviceShaderAtomicInt64FeaturesKHR { - pub fn builder<'a>() -> PhysicalDeviceShaderAtomicInt64FeaturesKHRBuilder<'a> { - PhysicalDeviceShaderAtomicInt64FeaturesKHRBuilder { - inner: PhysicalDeviceShaderAtomicInt64FeaturesKHR::default(), +impl PhysicalDeviceShaderAtomicInt64Features { + pub fn builder<'a>() -> PhysicalDeviceShaderAtomicInt64FeaturesBuilder<'a> { + PhysicalDeviceShaderAtomicInt64FeaturesBuilder { + inner: PhysicalDeviceShaderAtomicInt64Features::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct PhysicalDeviceShaderAtomicInt64FeaturesKHRBuilder<'a> { - inner: PhysicalDeviceShaderAtomicInt64FeaturesKHR, +pub struct PhysicalDeviceShaderAtomicInt64FeaturesBuilder<'a> { + inner: PhysicalDeviceShaderAtomicInt64Features, marker: ::std::marker::PhantomData<&'a ()>, } -unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceShaderAtomicInt64FeaturesKHRBuilder<'_> {} -unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceShaderAtomicInt64FeaturesKHR {} -impl<'a> ::std::ops::Deref for PhysicalDeviceShaderAtomicInt64FeaturesKHRBuilder<'a> { - type Target = PhysicalDeviceShaderAtomicInt64FeaturesKHR; +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceShaderAtomicInt64FeaturesBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceShaderAtomicInt64Features {} +impl<'a> ::std::ops::Deref for PhysicalDeviceShaderAtomicInt64FeaturesBuilder<'a> { + type Target = PhysicalDeviceShaderAtomicInt64Features; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for PhysicalDeviceShaderAtomicInt64FeaturesKHRBuilder<'a> { +impl<'a> ::std::ops::DerefMut for PhysicalDeviceShaderAtomicInt64FeaturesBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> PhysicalDeviceShaderAtomicInt64FeaturesKHRBuilder<'a> { +impl<'a> PhysicalDeviceShaderAtomicInt64FeaturesBuilder<'a> { pub fn shader_buffer_int64_atomics( mut self, shader_buffer_int64_atomics: bool, - ) -> PhysicalDeviceShaderAtomicInt64FeaturesKHRBuilder<'a> { + ) -> PhysicalDeviceShaderAtomicInt64FeaturesBuilder<'a> { self.inner.shader_buffer_int64_atomics = shader_buffer_int64_atomics.into(); self } pub fn shader_shared_int64_atomics( mut self, shader_shared_int64_atomics: bool, - ) -> PhysicalDeviceShaderAtomicInt64FeaturesKHRBuilder<'a> { + ) -> PhysicalDeviceShaderAtomicInt64FeaturesBuilder<'a> { self.inner.shader_shared_int64_atomics = shader_shared_int64_atomics.into(); self } #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> PhysicalDeviceShaderAtomicInt64FeaturesKHR { + pub fn build(self) -> PhysicalDeviceShaderAtomicInt64Features { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceVertexAttributeDivisorFeaturesEXT { pub s_type: StructureType, pub p_next: *mut c_void, @@ -37340,7 +38793,7 @@ impl<'a> PhysicalDeviceVertexAttributeDivisorFeaturesEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct QueueFamilyCheckpointPropertiesNV { pub s_type: StructureType, pub p_next: *mut c_void, @@ -37398,7 +38851,7 @@ impl<'a> QueueFamilyCheckpointPropertiesNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct CheckpointDataNV { pub s_type: StructureType, pub p_next: *mut c_void, @@ -37478,171 +38931,171 @@ impl<'a> CheckpointDataNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct PhysicalDeviceDepthStencilResolvePropertiesKHR { +#[doc = ""] +pub struct PhysicalDeviceDepthStencilResolveProperties { pub s_type: StructureType, pub p_next: *mut c_void, - pub supported_depth_resolve_modes: ResolveModeFlagsKHR, - pub supported_stencil_resolve_modes: ResolveModeFlagsKHR, + pub supported_depth_resolve_modes: ResolveModeFlags, + pub supported_stencil_resolve_modes: ResolveModeFlags, pub independent_resolve_none: Bool32, pub independent_resolve: Bool32, } -impl ::std::default::Default for PhysicalDeviceDepthStencilResolvePropertiesKHR { - fn default() -> PhysicalDeviceDepthStencilResolvePropertiesKHR { - PhysicalDeviceDepthStencilResolvePropertiesKHR { - s_type: StructureType::PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES_KHR, +impl ::std::default::Default for PhysicalDeviceDepthStencilResolveProperties { + fn default() -> PhysicalDeviceDepthStencilResolveProperties { + PhysicalDeviceDepthStencilResolveProperties { + s_type: StructureType::PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES, p_next: ::std::ptr::null_mut(), - supported_depth_resolve_modes: ResolveModeFlagsKHR::default(), - supported_stencil_resolve_modes: ResolveModeFlagsKHR::default(), + supported_depth_resolve_modes: ResolveModeFlags::default(), + supported_stencil_resolve_modes: ResolveModeFlags::default(), independent_resolve_none: Bool32::default(), independent_resolve: Bool32::default(), } } } -impl PhysicalDeviceDepthStencilResolvePropertiesKHR { - pub fn builder<'a>() -> PhysicalDeviceDepthStencilResolvePropertiesKHRBuilder<'a> { - PhysicalDeviceDepthStencilResolvePropertiesKHRBuilder { - inner: PhysicalDeviceDepthStencilResolvePropertiesKHR::default(), +impl PhysicalDeviceDepthStencilResolveProperties { + pub fn builder<'a>() -> PhysicalDeviceDepthStencilResolvePropertiesBuilder<'a> { + PhysicalDeviceDepthStencilResolvePropertiesBuilder { + inner: PhysicalDeviceDepthStencilResolveProperties::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct PhysicalDeviceDepthStencilResolvePropertiesKHRBuilder<'a> { - inner: PhysicalDeviceDepthStencilResolvePropertiesKHR, +pub struct PhysicalDeviceDepthStencilResolvePropertiesBuilder<'a> { + inner: PhysicalDeviceDepthStencilResolveProperties, marker: ::std::marker::PhantomData<&'a ()>, } unsafe impl ExtendsPhysicalDeviceProperties2 - for PhysicalDeviceDepthStencilResolvePropertiesKHRBuilder<'_> + for PhysicalDeviceDepthStencilResolvePropertiesBuilder<'_> { } -unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceDepthStencilResolvePropertiesKHR {} -impl<'a> ::std::ops::Deref for PhysicalDeviceDepthStencilResolvePropertiesKHRBuilder<'a> { - type Target = PhysicalDeviceDepthStencilResolvePropertiesKHR; +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceDepthStencilResolveProperties {} +impl<'a> ::std::ops::Deref for PhysicalDeviceDepthStencilResolvePropertiesBuilder<'a> { + type Target = PhysicalDeviceDepthStencilResolveProperties; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for PhysicalDeviceDepthStencilResolvePropertiesKHRBuilder<'a> { +impl<'a> ::std::ops::DerefMut for PhysicalDeviceDepthStencilResolvePropertiesBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> PhysicalDeviceDepthStencilResolvePropertiesKHRBuilder<'a> { +impl<'a> PhysicalDeviceDepthStencilResolvePropertiesBuilder<'a> { pub fn supported_depth_resolve_modes( mut self, - supported_depth_resolve_modes: ResolveModeFlagsKHR, - ) -> PhysicalDeviceDepthStencilResolvePropertiesKHRBuilder<'a> { + supported_depth_resolve_modes: ResolveModeFlags, + ) -> PhysicalDeviceDepthStencilResolvePropertiesBuilder<'a> { self.inner.supported_depth_resolve_modes = supported_depth_resolve_modes; self } pub fn supported_stencil_resolve_modes( mut self, - supported_stencil_resolve_modes: ResolveModeFlagsKHR, - ) -> PhysicalDeviceDepthStencilResolvePropertiesKHRBuilder<'a> { + supported_stencil_resolve_modes: ResolveModeFlags, + ) -> PhysicalDeviceDepthStencilResolvePropertiesBuilder<'a> { self.inner.supported_stencil_resolve_modes = supported_stencil_resolve_modes; self } pub fn independent_resolve_none( mut self, independent_resolve_none: bool, - ) -> PhysicalDeviceDepthStencilResolvePropertiesKHRBuilder<'a> { + ) -> PhysicalDeviceDepthStencilResolvePropertiesBuilder<'a> { self.inner.independent_resolve_none = independent_resolve_none.into(); self } pub fn independent_resolve( mut self, independent_resolve: bool, - ) -> PhysicalDeviceDepthStencilResolvePropertiesKHRBuilder<'a> { + ) -> PhysicalDeviceDepthStencilResolvePropertiesBuilder<'a> { self.inner.independent_resolve = independent_resolve.into(); self } #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> PhysicalDeviceDepthStencilResolvePropertiesKHR { + pub fn build(self) -> PhysicalDeviceDepthStencilResolveProperties { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct SubpassDescriptionDepthStencilResolveKHR { +#[doc = ""] +pub struct SubpassDescriptionDepthStencilResolve { pub s_type: StructureType, pub p_next: *const c_void, - pub depth_resolve_mode: ResolveModeFlagsKHR, - pub stencil_resolve_mode: ResolveModeFlagsKHR, - pub p_depth_stencil_resolve_attachment: *const AttachmentReference2KHR, + pub depth_resolve_mode: ResolveModeFlags, + pub stencil_resolve_mode: ResolveModeFlags, + pub p_depth_stencil_resolve_attachment: *const AttachmentReference2, } -impl ::std::default::Default for SubpassDescriptionDepthStencilResolveKHR { - fn default() -> SubpassDescriptionDepthStencilResolveKHR { - SubpassDescriptionDepthStencilResolveKHR { - s_type: StructureType::SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE_KHR, +impl ::std::default::Default for SubpassDescriptionDepthStencilResolve { + fn default() -> SubpassDescriptionDepthStencilResolve { + SubpassDescriptionDepthStencilResolve { + s_type: StructureType::SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE, p_next: ::std::ptr::null(), - depth_resolve_mode: ResolveModeFlagsKHR::default(), - stencil_resolve_mode: ResolveModeFlagsKHR::default(), + depth_resolve_mode: ResolveModeFlags::default(), + stencil_resolve_mode: ResolveModeFlags::default(), p_depth_stencil_resolve_attachment: ::std::ptr::null(), } } } -impl SubpassDescriptionDepthStencilResolveKHR { - pub fn builder<'a>() -> SubpassDescriptionDepthStencilResolveKHRBuilder<'a> { - SubpassDescriptionDepthStencilResolveKHRBuilder { - inner: SubpassDescriptionDepthStencilResolveKHR::default(), +impl SubpassDescriptionDepthStencilResolve { + pub fn builder<'a>() -> SubpassDescriptionDepthStencilResolveBuilder<'a> { + SubpassDescriptionDepthStencilResolveBuilder { + inner: SubpassDescriptionDepthStencilResolve::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct SubpassDescriptionDepthStencilResolveKHRBuilder<'a> { - inner: SubpassDescriptionDepthStencilResolveKHR, +pub struct SubpassDescriptionDepthStencilResolveBuilder<'a> { + inner: SubpassDescriptionDepthStencilResolve, marker: ::std::marker::PhantomData<&'a ()>, } -unsafe impl ExtendsSubpassDescription2KHR for SubpassDescriptionDepthStencilResolveKHRBuilder<'_> {} -unsafe impl ExtendsSubpassDescription2KHR for SubpassDescriptionDepthStencilResolveKHR {} -impl<'a> ::std::ops::Deref for SubpassDescriptionDepthStencilResolveKHRBuilder<'a> { - type Target = SubpassDescriptionDepthStencilResolveKHR; +unsafe impl ExtendsSubpassDescription2 for SubpassDescriptionDepthStencilResolveBuilder<'_> {} +unsafe impl ExtendsSubpassDescription2 for SubpassDescriptionDepthStencilResolve {} +impl<'a> ::std::ops::Deref for SubpassDescriptionDepthStencilResolveBuilder<'a> { + type Target = SubpassDescriptionDepthStencilResolve; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for SubpassDescriptionDepthStencilResolveKHRBuilder<'a> { +impl<'a> ::std::ops::DerefMut for SubpassDescriptionDepthStencilResolveBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> SubpassDescriptionDepthStencilResolveKHRBuilder<'a> { +impl<'a> SubpassDescriptionDepthStencilResolveBuilder<'a> { pub fn depth_resolve_mode( mut self, - depth_resolve_mode: ResolveModeFlagsKHR, - ) -> SubpassDescriptionDepthStencilResolveKHRBuilder<'a> { + depth_resolve_mode: ResolveModeFlags, + ) -> SubpassDescriptionDepthStencilResolveBuilder<'a> { self.inner.depth_resolve_mode = depth_resolve_mode; self } pub fn stencil_resolve_mode( mut self, - stencil_resolve_mode: ResolveModeFlagsKHR, - ) -> SubpassDescriptionDepthStencilResolveKHRBuilder<'a> { + stencil_resolve_mode: ResolveModeFlags, + ) -> SubpassDescriptionDepthStencilResolveBuilder<'a> { self.inner.stencil_resolve_mode = stencil_resolve_mode; self } pub fn depth_stencil_resolve_attachment( mut self, - depth_stencil_resolve_attachment: &'a AttachmentReference2KHR, - ) -> SubpassDescriptionDepthStencilResolveKHRBuilder<'a> { + depth_stencil_resolve_attachment: &'a AttachmentReference2, + ) -> SubpassDescriptionDepthStencilResolveBuilder<'a> { self.inner.p_depth_stencil_resolve_attachment = depth_stencil_resolve_attachment; self } #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> SubpassDescriptionDepthStencilResolveKHR { + pub fn build(self) -> SubpassDescriptionDepthStencilResolve { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ImageViewASTCDecodeModeEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -37697,7 +39150,7 @@ impl<'a> ImageViewASTCDecodeModeEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceASTCDecodeFeaturesEXT { pub s_type: StructureType, pub p_next: *mut c_void, @@ -37755,7 +39208,7 @@ impl<'a> PhysicalDeviceASTCDecodeFeaturesEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceTransformFeedbackFeaturesEXT { pub s_type: StructureType, pub p_next: *mut c_void, @@ -37822,7 +39275,7 @@ impl<'a> PhysicalDeviceTransformFeedbackFeaturesEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceTransformFeedbackPropertiesEXT { pub s_type: StructureType, pub p_next: *mut c_void, @@ -37969,7 +39422,7 @@ impl<'a> PhysicalDeviceTransformFeedbackPropertiesEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PipelineRasterizationStateStreamCreateInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -38042,7 +39495,7 @@ impl<'a> PipelineRasterizationStateStreamCreateInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceRepresentativeFragmentTestFeaturesNV { pub s_type: StructureType, pub p_next: *mut c_void, @@ -38103,7 +39556,7 @@ impl<'a> PhysicalDeviceRepresentativeFragmentTestFeaturesNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PipelineRepresentativeFragmentTestStateCreateInfoNV { pub s_type: StructureType, pub p_next: *const c_void, @@ -38167,7 +39620,7 @@ impl<'a> PipelineRepresentativeFragmentTestStateCreateInfoNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceExclusiveScissorFeaturesNV { pub s_type: StructureType, pub p_next: *mut c_void, @@ -38225,7 +39678,7 @@ impl<'a> PhysicalDeviceExclusiveScissorFeaturesNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PipelineViewportExclusiveScissorStateCreateInfoNV { pub s_type: StructureType, pub p_next: *const c_void, @@ -38292,7 +39745,7 @@ impl<'a> PipelineViewportExclusiveScissorStateCreateInfoNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceCornerSampledImageFeaturesNV { pub s_type: StructureType, pub p_next: *mut c_void, @@ -38350,7 +39803,7 @@ impl<'a> PhysicalDeviceCornerSampledImageFeaturesNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceComputeShaderDerivativesFeaturesNV { pub s_type: StructureType, pub p_next: *mut c_void, @@ -38420,7 +39873,7 @@ impl<'a> PhysicalDeviceComputeShaderDerivativesFeaturesNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceFragmentShaderBarycentricFeaturesNV { pub s_type: StructureType, pub p_next: *mut c_void, @@ -38481,7 +39934,7 @@ impl<'a> PhysicalDeviceFragmentShaderBarycentricFeaturesNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceShaderImageFootprintFeaturesNV { pub s_type: StructureType, pub p_next: *mut c_void, @@ -38539,7 +39992,70 @@ impl<'a> PhysicalDeviceShaderImageFootprintFeaturesNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] +pub struct PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub dedicated_allocation_image_aliasing: Bool32, +} +impl ::std::default::Default for PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV { + fn default() -> PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV { + PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV { + s_type: StructureType::PHYSICAL_DEVICE_DEDICATED_ALLOCATION_IMAGE_ALIASING_FEATURES_NV, + p_next: ::std::ptr::null_mut(), + dedicated_allocation_image_aliasing: Bool32::default(), + } + } +} +impl PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV { + pub fn builder<'a>() -> PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNVBuilder<'a> { + PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNVBuilder { + inner: PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNVBuilder<'a> { + inner: PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo + for PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNVBuilder<'_> +{ +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV {} +impl<'a> ::std::ops::Deref for PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNVBuilder<'a> { + type Target = PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut + for PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNVBuilder<'a> +{ + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNVBuilder<'a> { + pub fn dedicated_allocation_image_aliasing( + mut self, + dedicated_allocation_image_aliasing: bool, + ) -> PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNVBuilder<'a> { + self.inner.dedicated_allocation_image_aliasing = dedicated_allocation_image_aliasing.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] pub struct ShadingRatePaletteNV { pub shading_rate_palette_entry_count: u32, pub p_shading_rate_palette_entries: *const ShadingRatePaletteEntryNV, @@ -38594,7 +40110,7 @@ impl<'a> ShadingRatePaletteNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PipelineViewportShadingRateImageStateCreateInfoNV { pub s_type: StructureType, pub p_next: *const c_void, @@ -38670,7 +40186,7 @@ impl<'a> PipelineViewportShadingRateImageStateCreateInfoNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceShadingRateImageFeaturesNV { pub s_type: StructureType, pub p_next: *mut c_void, @@ -38737,7 +40253,7 @@ impl<'a> PhysicalDeviceShadingRateImageFeaturesNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceShadingRateImagePropertiesNV { pub s_type: StructureType, pub p_next: *mut c_void, @@ -38816,7 +40332,7 @@ impl<'a> PhysicalDeviceShadingRateImagePropertiesNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct CoarseSampleLocationNV { pub pixel_x: u32, pub pixel_y: u32, @@ -38868,7 +40384,7 @@ impl<'a> CoarseSampleLocationNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct CoarseSampleOrderCustomNV { pub shading_rate: ShadingRatePaletteEntryNV, pub sample_count: u32, @@ -38938,7 +40454,7 @@ impl<'a> CoarseSampleOrderCustomNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PipelineViewportCoarseSampleOrderStateCreateInfoNV { pub s_type: StructureType, pub p_next: *const c_void, @@ -39014,7 +40530,7 @@ impl<'a> PipelineViewportCoarseSampleOrderStateCreateInfoNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceMeshShaderFeaturesNV { pub s_type: StructureType, pub p_next: *mut c_void, @@ -39081,7 +40597,7 @@ impl<'a> PhysicalDeviceMeshShaderFeaturesNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceMeshShaderPropertiesNV { pub s_type: StructureType, pub p_next: *mut c_void, @@ -39247,7 +40763,7 @@ impl<'a> PhysicalDeviceMeshShaderPropertiesNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct DrawMeshTasksIndirectCommandNV { pub task_count: u32, pub first_task: u32, @@ -39294,11 +40810,11 @@ impl<'a> DrawMeshTasksIndirectCommandNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct RayTracingShaderGroupCreateInfoNV { pub s_type: StructureType, pub p_next: *const c_void, - pub ty: RayTracingShaderGroupTypeNV, + pub ty: RayTracingShaderGroupTypeKHR, pub general_shader: u32, pub closest_hit_shader: u32, pub any_hit_shader: u32, @@ -39309,7 +40825,7 @@ impl ::std::default::Default for RayTracingShaderGroupCreateInfoNV { RayTracingShaderGroupCreateInfoNV { s_type: StructureType::RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV, p_next: ::std::ptr::null(), - ty: RayTracingShaderGroupTypeNV::default(), + ty: RayTracingShaderGroupTypeKHR::default(), general_shader: u32::default(), closest_hit_shader: u32::default(), any_hit_shader: u32::default(), @@ -39345,7 +40861,7 @@ impl<'a> ::std::ops::DerefMut for RayTracingShaderGroupCreateInfoNVBuilder<'a> { impl<'a> RayTracingShaderGroupCreateInfoNVBuilder<'a> { pub fn ty( mut self, - ty: RayTracingShaderGroupTypeNV, + ty: RayTracingShaderGroupTypeKHR, ) -> RayTracingShaderGroupCreateInfoNVBuilder<'a> { self.inner.ty = ty; self @@ -39404,7 +40920,126 @@ impl<'a> RayTracingShaderGroupCreateInfoNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] +pub struct RayTracingShaderGroupCreateInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub ty: RayTracingShaderGroupTypeKHR, + pub general_shader: u32, + pub closest_hit_shader: u32, + pub any_hit_shader: u32, + pub intersection_shader: u32, + pub p_shader_group_capture_replay_handle: *const c_void, +} +impl ::std::default::Default for RayTracingShaderGroupCreateInfoKHR { + fn default() -> RayTracingShaderGroupCreateInfoKHR { + RayTracingShaderGroupCreateInfoKHR { + s_type: StructureType::RAY_TRACING_SHADER_GROUP_CREATE_INFO_KHR, + p_next: ::std::ptr::null(), + ty: RayTracingShaderGroupTypeKHR::default(), + general_shader: u32::default(), + closest_hit_shader: u32::default(), + any_hit_shader: u32::default(), + intersection_shader: u32::default(), + p_shader_group_capture_replay_handle: ::std::ptr::null(), + } + } +} +impl RayTracingShaderGroupCreateInfoKHR { + pub fn builder<'a>() -> RayTracingShaderGroupCreateInfoKHRBuilder<'a> { + RayTracingShaderGroupCreateInfoKHRBuilder { + inner: RayTracingShaderGroupCreateInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct RayTracingShaderGroupCreateInfoKHRBuilder<'a> { + inner: RayTracingShaderGroupCreateInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsRayTracingShaderGroupCreateInfoKHR {} +impl<'a> ::std::ops::Deref for RayTracingShaderGroupCreateInfoKHRBuilder<'a> { + type Target = RayTracingShaderGroupCreateInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for RayTracingShaderGroupCreateInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> RayTracingShaderGroupCreateInfoKHRBuilder<'a> { + pub fn ty( + mut self, + ty: RayTracingShaderGroupTypeKHR, + ) -> RayTracingShaderGroupCreateInfoKHRBuilder<'a> { + self.inner.ty = ty; + self + } + pub fn general_shader( + mut self, + general_shader: u32, + ) -> RayTracingShaderGroupCreateInfoKHRBuilder<'a> { + self.inner.general_shader = general_shader; + self + } + pub fn closest_hit_shader( + mut self, + closest_hit_shader: u32, + ) -> RayTracingShaderGroupCreateInfoKHRBuilder<'a> { + self.inner.closest_hit_shader = closest_hit_shader; + self + } + pub fn any_hit_shader( + mut self, + any_hit_shader: u32, + ) -> RayTracingShaderGroupCreateInfoKHRBuilder<'a> { + self.inner.any_hit_shader = any_hit_shader; + self + } + pub fn intersection_shader( + mut self, + intersection_shader: u32, + ) -> RayTracingShaderGroupCreateInfoKHRBuilder<'a> { + self.inner.intersection_shader = intersection_shader; + self + } + pub fn shader_group_capture_replay_handle( + mut self, + shader_group_capture_replay_handle: &'a c_void, + ) -> RayTracingShaderGroupCreateInfoKHRBuilder<'a> { + self.inner.p_shader_group_capture_replay_handle = shader_group_capture_replay_handle; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> RayTracingShaderGroupCreateInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> RayTracingShaderGroupCreateInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] pub struct RayTracingPipelineCreateInfoNV { pub s_type: StructureType, pub p_next: *const c_void, @@ -39535,7 +41170,156 @@ impl<'a> RayTracingPipelineCreateInfoNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] +pub struct RayTracingPipelineCreateInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: PipelineCreateFlags, + pub stage_count: u32, + pub p_stages: *const PipelineShaderStageCreateInfo, + pub group_count: u32, + pub p_groups: *const RayTracingShaderGroupCreateInfoKHR, + pub max_recursion_depth: u32, + pub libraries: PipelineLibraryCreateInfoKHR, + pub p_library_interface: *const RayTracingPipelineInterfaceCreateInfoKHR, + pub layout: PipelineLayout, + pub base_pipeline_handle: Pipeline, + pub base_pipeline_index: i32, +} +impl ::std::default::Default for RayTracingPipelineCreateInfoKHR { + fn default() -> RayTracingPipelineCreateInfoKHR { + RayTracingPipelineCreateInfoKHR { + s_type: StructureType::RAY_TRACING_PIPELINE_CREATE_INFO_KHR, + p_next: ::std::ptr::null(), + flags: PipelineCreateFlags::default(), + stage_count: u32::default(), + p_stages: ::std::ptr::null(), + group_count: u32::default(), + p_groups: ::std::ptr::null(), + max_recursion_depth: u32::default(), + libraries: PipelineLibraryCreateInfoKHR::default(), + p_library_interface: ::std::ptr::null(), + layout: PipelineLayout::default(), + base_pipeline_handle: Pipeline::default(), + base_pipeline_index: i32::default(), + } + } +} +impl RayTracingPipelineCreateInfoKHR { + pub fn builder<'a>() -> RayTracingPipelineCreateInfoKHRBuilder<'a> { + RayTracingPipelineCreateInfoKHRBuilder { + inner: RayTracingPipelineCreateInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct RayTracingPipelineCreateInfoKHRBuilder<'a> { + inner: RayTracingPipelineCreateInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsRayTracingPipelineCreateInfoKHR {} +impl<'a> ::std::ops::Deref for RayTracingPipelineCreateInfoKHRBuilder<'a> { + type Target = RayTracingPipelineCreateInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for RayTracingPipelineCreateInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> RayTracingPipelineCreateInfoKHRBuilder<'a> { + pub fn flags( + mut self, + flags: PipelineCreateFlags, + ) -> RayTracingPipelineCreateInfoKHRBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn stages( + mut self, + stages: &'a [PipelineShaderStageCreateInfo], + ) -> RayTracingPipelineCreateInfoKHRBuilder<'a> { + self.inner.stage_count = stages.len() as _; + self.inner.p_stages = stages.as_ptr(); + self + } + pub fn groups( + mut self, + groups: &'a [RayTracingShaderGroupCreateInfoKHR], + ) -> RayTracingPipelineCreateInfoKHRBuilder<'a> { + self.inner.group_count = groups.len() as _; + self.inner.p_groups = groups.as_ptr(); + self + } + pub fn max_recursion_depth( + mut self, + max_recursion_depth: u32, + ) -> RayTracingPipelineCreateInfoKHRBuilder<'a> { + self.inner.max_recursion_depth = max_recursion_depth; + self + } + pub fn libraries( + mut self, + libraries: PipelineLibraryCreateInfoKHR, + ) -> RayTracingPipelineCreateInfoKHRBuilder<'a> { + self.inner.libraries = libraries; + self + } + pub fn library_interface( + mut self, + library_interface: &'a RayTracingPipelineInterfaceCreateInfoKHR, + ) -> RayTracingPipelineCreateInfoKHRBuilder<'a> { + self.inner.p_library_interface = library_interface; + self + } + pub fn layout(mut self, layout: PipelineLayout) -> RayTracingPipelineCreateInfoKHRBuilder<'a> { + self.inner.layout = layout; + self + } + pub fn base_pipeline_handle( + mut self, + base_pipeline_handle: Pipeline, + ) -> RayTracingPipelineCreateInfoKHRBuilder<'a> { + self.inner.base_pipeline_handle = base_pipeline_handle; + self + } + pub fn base_pipeline_index( + mut self, + base_pipeline_index: i32, + ) -> RayTracingPipelineCreateInfoKHRBuilder<'a> { + self.inner.base_pipeline_index = base_pipeline_index; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> RayTracingPipelineCreateInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> RayTracingPipelineCreateInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] pub struct GeometryTrianglesNV { pub s_type: StructureType, pub p_next: *const c_void, @@ -39669,7 +41453,7 @@ impl<'a> GeometryTrianglesNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct GeometryAABBNV { pub s_type: StructureType, pub p_next: *const c_void, @@ -39758,7 +41542,7 @@ impl<'a> GeometryAABBNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct GeometryDataNV { pub triangles: GeometryTrianglesNV, pub aabbs: GeometryAABBNV, @@ -39805,22 +41589,22 @@ impl<'a> GeometryDataNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct GeometryNV { pub s_type: StructureType, pub p_next: *const c_void, - pub geometry_type: GeometryTypeNV, + pub geometry_type: GeometryTypeKHR, pub geometry: GeometryDataNV, - pub flags: GeometryFlagsNV, + pub flags: GeometryFlagsKHR, } impl ::std::default::Default for GeometryNV { fn default() -> GeometryNV { GeometryNV { s_type: StructureType::GEOMETRY_NV, p_next: ::std::ptr::null(), - geometry_type: GeometryTypeNV::default(), + geometry_type: GeometryTypeKHR::default(), geometry: GeometryDataNV::default(), - flags: GeometryFlagsNV::default(), + flags: GeometryFlagsKHR::default(), } } } @@ -39850,7 +41634,7 @@ impl<'a> ::std::ops::DerefMut for GeometryNVBuilder<'a> { } } impl<'a> GeometryNVBuilder<'a> { - pub fn geometry_type(mut self, geometry_type: GeometryTypeNV) -> GeometryNVBuilder<'a> { + pub fn geometry_type(mut self, geometry_type: GeometryTypeKHR) -> GeometryNVBuilder<'a> { self.inner.geometry_type = geometry_type; self } @@ -39858,7 +41642,7 @@ impl<'a> GeometryNVBuilder<'a> { self.inner.geometry = geometry; self } - pub fn flags(mut self, flags: GeometryFlagsNV) -> GeometryNVBuilder<'a> { + pub fn flags(mut self, flags: GeometryFlagsKHR) -> GeometryNVBuilder<'a> { self.inner.flags = flags; self } @@ -39885,7 +41669,7 @@ impl<'a> GeometryNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct AccelerationStructureInfoNV { pub s_type: StructureType, pub p_next: *const c_void, @@ -39983,7 +41767,7 @@ impl<'a> AccelerationStructureInfoNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct AccelerationStructureCreateInfoNV { pub s_type: StructureType, pub p_next: *const c_void, @@ -40066,22 +41850,22 @@ impl<'a> AccelerationStructureCreateInfoNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct BindAccelerationStructureMemoryInfoNV { +#[doc = ""] +pub struct BindAccelerationStructureMemoryInfoKHR { pub s_type: StructureType, pub p_next: *const c_void, - pub acceleration_structure: AccelerationStructureNV, + pub acceleration_structure: AccelerationStructureKHR, pub memory: DeviceMemory, pub memory_offset: DeviceSize, pub device_index_count: u32, pub p_device_indices: *const u32, } -impl ::std::default::Default for BindAccelerationStructureMemoryInfoNV { - fn default() -> BindAccelerationStructureMemoryInfoNV { - BindAccelerationStructureMemoryInfoNV { - s_type: StructureType::BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV, +impl ::std::default::Default for BindAccelerationStructureMemoryInfoKHR { + fn default() -> BindAccelerationStructureMemoryInfoKHR { + BindAccelerationStructureMemoryInfoKHR { + s_type: StructureType::BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_KHR, p_next: ::std::ptr::null(), - acceleration_structure: AccelerationStructureNV::default(), + acceleration_structure: AccelerationStructureKHR::default(), memory: DeviceMemory::default(), memory_offset: DeviceSize::default(), device_index_count: u32::default(), @@ -40089,57 +41873,57 @@ impl ::std::default::Default for BindAccelerationStructureMemoryInfoNV { } } } -impl BindAccelerationStructureMemoryInfoNV { - pub fn builder<'a>() -> BindAccelerationStructureMemoryInfoNVBuilder<'a> { - BindAccelerationStructureMemoryInfoNVBuilder { - inner: BindAccelerationStructureMemoryInfoNV::default(), +impl BindAccelerationStructureMemoryInfoKHR { + pub fn builder<'a>() -> BindAccelerationStructureMemoryInfoKHRBuilder<'a> { + BindAccelerationStructureMemoryInfoKHRBuilder { + inner: BindAccelerationStructureMemoryInfoKHR::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct BindAccelerationStructureMemoryInfoNVBuilder<'a> { - inner: BindAccelerationStructureMemoryInfoNV, +pub struct BindAccelerationStructureMemoryInfoKHRBuilder<'a> { + inner: BindAccelerationStructureMemoryInfoKHR, marker: ::std::marker::PhantomData<&'a ()>, } -pub unsafe trait ExtendsBindAccelerationStructureMemoryInfoNV {} -impl<'a> ::std::ops::Deref for BindAccelerationStructureMemoryInfoNVBuilder<'a> { - type Target = BindAccelerationStructureMemoryInfoNV; +pub unsafe trait ExtendsBindAccelerationStructureMemoryInfoKHR {} +impl<'a> ::std::ops::Deref for BindAccelerationStructureMemoryInfoKHRBuilder<'a> { + type Target = BindAccelerationStructureMemoryInfoKHR; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for BindAccelerationStructureMemoryInfoNVBuilder<'a> { +impl<'a> ::std::ops::DerefMut for BindAccelerationStructureMemoryInfoKHRBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> BindAccelerationStructureMemoryInfoNVBuilder<'a> { +impl<'a> BindAccelerationStructureMemoryInfoKHRBuilder<'a> { pub fn acceleration_structure( mut self, - acceleration_structure: AccelerationStructureNV, - ) -> BindAccelerationStructureMemoryInfoNVBuilder<'a> { + acceleration_structure: AccelerationStructureKHR, + ) -> BindAccelerationStructureMemoryInfoKHRBuilder<'a> { self.inner.acceleration_structure = acceleration_structure; self } pub fn memory( mut self, memory: DeviceMemory, - ) -> BindAccelerationStructureMemoryInfoNVBuilder<'a> { + ) -> BindAccelerationStructureMemoryInfoKHRBuilder<'a> { self.inner.memory = memory; self } pub fn memory_offset( mut self, memory_offset: DeviceSize, - ) -> BindAccelerationStructureMemoryInfoNVBuilder<'a> { + ) -> BindAccelerationStructureMemoryInfoKHRBuilder<'a> { self.inner.memory_offset = memory_offset; self } pub fn device_indices( mut self, device_indices: &'a [u32], - ) -> BindAccelerationStructureMemoryInfoNVBuilder<'a> { + ) -> BindAccelerationStructureMemoryInfoKHRBuilder<'a> { self.inner.device_index_count = device_indices.len() as _; self.inner.p_device_indices = device_indices.as_ptr(); self @@ -40149,10 +41933,10 @@ impl<'a> BindAccelerationStructureMemoryInfoNVBuilder<'a> { #[doc = r" valid extension structs can be pushed into the chain."] #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] #[doc = r" chain will look like `A -> D -> B -> C`."] - pub fn push_next( + pub fn push_next( mut self, next: &'a mut T, - ) -> BindAccelerationStructureMemoryInfoNVBuilder<'a> { + ) -> BindAccelerationStructureMemoryInfoKHRBuilder<'a> { unsafe { let next_ptr = next as *mut T as *mut BaseOutStructure; let last_next = ptr_chain_iter(next).last().unwrap(); @@ -40164,60 +41948,60 @@ impl<'a> BindAccelerationStructureMemoryInfoNVBuilder<'a> { #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> BindAccelerationStructureMemoryInfoNV { + pub fn build(self) -> BindAccelerationStructureMemoryInfoKHR { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct WriteDescriptorSetAccelerationStructureNV { +#[doc = ""] +pub struct WriteDescriptorSetAccelerationStructureKHR { pub s_type: StructureType, pub p_next: *const c_void, pub acceleration_structure_count: u32, - pub p_acceleration_structures: *const AccelerationStructureNV, + pub p_acceleration_structures: *const AccelerationStructureKHR, } -impl ::std::default::Default for WriteDescriptorSetAccelerationStructureNV { - fn default() -> WriteDescriptorSetAccelerationStructureNV { - WriteDescriptorSetAccelerationStructureNV { - s_type: StructureType::WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV, +impl ::std::default::Default for WriteDescriptorSetAccelerationStructureKHR { + fn default() -> WriteDescriptorSetAccelerationStructureKHR { + WriteDescriptorSetAccelerationStructureKHR { + s_type: StructureType::WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR, p_next: ::std::ptr::null(), acceleration_structure_count: u32::default(), p_acceleration_structures: ::std::ptr::null(), } } } -impl WriteDescriptorSetAccelerationStructureNV { - pub fn builder<'a>() -> WriteDescriptorSetAccelerationStructureNVBuilder<'a> { - WriteDescriptorSetAccelerationStructureNVBuilder { - inner: WriteDescriptorSetAccelerationStructureNV::default(), +impl WriteDescriptorSetAccelerationStructureKHR { + pub fn builder<'a>() -> WriteDescriptorSetAccelerationStructureKHRBuilder<'a> { + WriteDescriptorSetAccelerationStructureKHRBuilder { + inner: WriteDescriptorSetAccelerationStructureKHR::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct WriteDescriptorSetAccelerationStructureNVBuilder<'a> { - inner: WriteDescriptorSetAccelerationStructureNV, +pub struct WriteDescriptorSetAccelerationStructureKHRBuilder<'a> { + inner: WriteDescriptorSetAccelerationStructureKHR, marker: ::std::marker::PhantomData<&'a ()>, } -unsafe impl ExtendsWriteDescriptorSet for WriteDescriptorSetAccelerationStructureNVBuilder<'_> {} -unsafe impl ExtendsWriteDescriptorSet for WriteDescriptorSetAccelerationStructureNV {} -impl<'a> ::std::ops::Deref for WriteDescriptorSetAccelerationStructureNVBuilder<'a> { - type Target = WriteDescriptorSetAccelerationStructureNV; +unsafe impl ExtendsWriteDescriptorSet for WriteDescriptorSetAccelerationStructureKHRBuilder<'_> {} +unsafe impl ExtendsWriteDescriptorSet for WriteDescriptorSetAccelerationStructureKHR {} +impl<'a> ::std::ops::Deref for WriteDescriptorSetAccelerationStructureKHRBuilder<'a> { + type Target = WriteDescriptorSetAccelerationStructureKHR; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for WriteDescriptorSetAccelerationStructureNVBuilder<'a> { +impl<'a> ::std::ops::DerefMut for WriteDescriptorSetAccelerationStructureKHRBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> WriteDescriptorSetAccelerationStructureNVBuilder<'a> { +impl<'a> WriteDescriptorSetAccelerationStructureKHRBuilder<'a> { pub fn acceleration_structures( mut self, - acceleration_structures: &'a [AccelerationStructureNV], - ) -> WriteDescriptorSetAccelerationStructureNVBuilder<'a> { + acceleration_structures: &'a [AccelerationStructureKHR], + ) -> WriteDescriptorSetAccelerationStructureKHRBuilder<'a> { self.inner.acceleration_structure_count = acceleration_structures.len() as _; self.inner.p_acceleration_structures = acceleration_structures.as_ptr(); self @@ -40225,13 +42009,105 @@ impl<'a> WriteDescriptorSetAccelerationStructureNVBuilder<'a> { #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> WriteDescriptorSetAccelerationStructureNV { + pub fn build(self) -> WriteDescriptorSetAccelerationStructureKHR { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] +pub struct AccelerationStructureMemoryRequirementsInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub ty: AccelerationStructureMemoryRequirementsTypeKHR, + pub build_type: AccelerationStructureBuildTypeKHR, + pub acceleration_structure: AccelerationStructureKHR, +} +impl ::std::default::Default for AccelerationStructureMemoryRequirementsInfoKHR { + fn default() -> AccelerationStructureMemoryRequirementsInfoKHR { + AccelerationStructureMemoryRequirementsInfoKHR { + s_type: StructureType::ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_KHR, + p_next: ::std::ptr::null(), + ty: AccelerationStructureMemoryRequirementsTypeKHR::default(), + build_type: AccelerationStructureBuildTypeKHR::default(), + acceleration_structure: AccelerationStructureKHR::default(), + } + } +} +impl AccelerationStructureMemoryRequirementsInfoKHR { + pub fn builder<'a>() -> AccelerationStructureMemoryRequirementsInfoKHRBuilder<'a> { + AccelerationStructureMemoryRequirementsInfoKHRBuilder { + inner: AccelerationStructureMemoryRequirementsInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct AccelerationStructureMemoryRequirementsInfoKHRBuilder<'a> { + inner: AccelerationStructureMemoryRequirementsInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsAccelerationStructureMemoryRequirementsInfoKHR {} +impl<'a> ::std::ops::Deref for AccelerationStructureMemoryRequirementsInfoKHRBuilder<'a> { + type Target = AccelerationStructureMemoryRequirementsInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for AccelerationStructureMemoryRequirementsInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> AccelerationStructureMemoryRequirementsInfoKHRBuilder<'a> { + pub fn ty( + mut self, + ty: AccelerationStructureMemoryRequirementsTypeKHR, + ) -> AccelerationStructureMemoryRequirementsInfoKHRBuilder<'a> { + self.inner.ty = ty; + self + } + pub fn build_type( + mut self, + build_type: AccelerationStructureBuildTypeKHR, + ) -> AccelerationStructureMemoryRequirementsInfoKHRBuilder<'a> { + self.inner.build_type = build_type; + self + } + pub fn acceleration_structure( + mut self, + acceleration_structure: AccelerationStructureKHR, + ) -> AccelerationStructureMemoryRequirementsInfoKHRBuilder<'a> { + self.inner.acceleration_structure = acceleration_structure; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> AccelerationStructureMemoryRequirementsInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> AccelerationStructureMemoryRequirementsInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] pub struct AccelerationStructureMemoryRequirementsInfoNV { pub s_type: StructureType, pub p_next: *const c_void, @@ -40314,7 +42190,272 @@ impl<'a> AccelerationStructureMemoryRequirementsInfoNVBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] +pub struct PhysicalDeviceRayTracingFeaturesKHR { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub ray_tracing: Bool32, + pub ray_tracing_shader_group_handle_capture_replay: Bool32, + pub ray_tracing_shader_group_handle_capture_replay_mixed: Bool32, + pub ray_tracing_acceleration_structure_capture_replay: Bool32, + pub ray_tracing_indirect_trace_rays: Bool32, + pub ray_tracing_indirect_acceleration_structure_build: Bool32, + pub ray_tracing_host_acceleration_structure_commands: Bool32, + pub ray_query: Bool32, + pub ray_tracing_primitive_culling: Bool32, +} +impl ::std::default::Default for PhysicalDeviceRayTracingFeaturesKHR { + fn default() -> PhysicalDeviceRayTracingFeaturesKHR { + PhysicalDeviceRayTracingFeaturesKHR { + s_type: StructureType::PHYSICAL_DEVICE_RAY_TRACING_FEATURES_KHR, + p_next: ::std::ptr::null_mut(), + ray_tracing: Bool32::default(), + ray_tracing_shader_group_handle_capture_replay: Bool32::default(), + ray_tracing_shader_group_handle_capture_replay_mixed: Bool32::default(), + ray_tracing_acceleration_structure_capture_replay: Bool32::default(), + ray_tracing_indirect_trace_rays: Bool32::default(), + ray_tracing_indirect_acceleration_structure_build: Bool32::default(), + ray_tracing_host_acceleration_structure_commands: Bool32::default(), + ray_query: Bool32::default(), + ray_tracing_primitive_culling: Bool32::default(), + } + } +} +impl PhysicalDeviceRayTracingFeaturesKHR { + pub fn builder<'a>() -> PhysicalDeviceRayTracingFeaturesKHRBuilder<'a> { + PhysicalDeviceRayTracingFeaturesKHRBuilder { + inner: PhysicalDeviceRayTracingFeaturesKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceRayTracingFeaturesKHRBuilder<'a> { + inner: PhysicalDeviceRayTracingFeaturesKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceRayTracingFeaturesKHRBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceRayTracingFeaturesKHR {} +impl<'a> ::std::ops::Deref for PhysicalDeviceRayTracingFeaturesKHRBuilder<'a> { + type Target = PhysicalDeviceRayTracingFeaturesKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceRayTracingFeaturesKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceRayTracingFeaturesKHRBuilder<'a> { + pub fn ray_tracing( + mut self, + ray_tracing: bool, + ) -> PhysicalDeviceRayTracingFeaturesKHRBuilder<'a> { + self.inner.ray_tracing = ray_tracing.into(); + self + } + pub fn ray_tracing_shader_group_handle_capture_replay( + mut self, + ray_tracing_shader_group_handle_capture_replay: bool, + ) -> PhysicalDeviceRayTracingFeaturesKHRBuilder<'a> { + self.inner.ray_tracing_shader_group_handle_capture_replay = + ray_tracing_shader_group_handle_capture_replay.into(); + self + } + pub fn ray_tracing_shader_group_handle_capture_replay_mixed( + mut self, + ray_tracing_shader_group_handle_capture_replay_mixed: bool, + ) -> PhysicalDeviceRayTracingFeaturesKHRBuilder<'a> { + self.inner + .ray_tracing_shader_group_handle_capture_replay_mixed = + ray_tracing_shader_group_handle_capture_replay_mixed.into(); + self + } + pub fn ray_tracing_acceleration_structure_capture_replay( + mut self, + ray_tracing_acceleration_structure_capture_replay: bool, + ) -> PhysicalDeviceRayTracingFeaturesKHRBuilder<'a> { + self.inner.ray_tracing_acceleration_structure_capture_replay = + ray_tracing_acceleration_structure_capture_replay.into(); + self + } + pub fn ray_tracing_indirect_trace_rays( + mut self, + ray_tracing_indirect_trace_rays: bool, + ) -> PhysicalDeviceRayTracingFeaturesKHRBuilder<'a> { + self.inner.ray_tracing_indirect_trace_rays = ray_tracing_indirect_trace_rays.into(); + self + } + pub fn ray_tracing_indirect_acceleration_structure_build( + mut self, + ray_tracing_indirect_acceleration_structure_build: bool, + ) -> PhysicalDeviceRayTracingFeaturesKHRBuilder<'a> { + self.inner.ray_tracing_indirect_acceleration_structure_build = + ray_tracing_indirect_acceleration_structure_build.into(); + self + } + pub fn ray_tracing_host_acceleration_structure_commands( + mut self, + ray_tracing_host_acceleration_structure_commands: bool, + ) -> PhysicalDeviceRayTracingFeaturesKHRBuilder<'a> { + self.inner.ray_tracing_host_acceleration_structure_commands = + ray_tracing_host_acceleration_structure_commands.into(); + self + } + pub fn ray_query(mut self, ray_query: bool) -> PhysicalDeviceRayTracingFeaturesKHRBuilder<'a> { + self.inner.ray_query = ray_query.into(); + self + } + pub fn ray_tracing_primitive_culling( + mut self, + ray_tracing_primitive_culling: bool, + ) -> PhysicalDeviceRayTracingFeaturesKHRBuilder<'a> { + self.inner.ray_tracing_primitive_culling = ray_tracing_primitive_culling.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceRayTracingFeaturesKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceRayTracingPropertiesKHR { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub shader_group_handle_size: u32, + pub max_recursion_depth: u32, + pub max_shader_group_stride: u32, + pub shader_group_base_alignment: u32, + pub max_geometry_count: u64, + pub max_instance_count: u64, + pub max_primitive_count: u64, + pub max_descriptor_set_acceleration_structures: u32, + pub shader_group_handle_capture_replay_size: u32, +} +impl ::std::default::Default for PhysicalDeviceRayTracingPropertiesKHR { + fn default() -> PhysicalDeviceRayTracingPropertiesKHR { + PhysicalDeviceRayTracingPropertiesKHR { + s_type: StructureType::PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_KHR, + p_next: ::std::ptr::null_mut(), + shader_group_handle_size: u32::default(), + max_recursion_depth: u32::default(), + max_shader_group_stride: u32::default(), + shader_group_base_alignment: u32::default(), + max_geometry_count: u64::default(), + max_instance_count: u64::default(), + max_primitive_count: u64::default(), + max_descriptor_set_acceleration_structures: u32::default(), + shader_group_handle_capture_replay_size: u32::default(), + } + } +} +impl PhysicalDeviceRayTracingPropertiesKHR { + pub fn builder<'a>() -> PhysicalDeviceRayTracingPropertiesKHRBuilder<'a> { + PhysicalDeviceRayTracingPropertiesKHRBuilder { + inner: PhysicalDeviceRayTracingPropertiesKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceRayTracingPropertiesKHRBuilder<'a> { + inner: PhysicalDeviceRayTracingPropertiesKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceRayTracingPropertiesKHRBuilder<'_> {} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceRayTracingPropertiesKHR {} +impl<'a> ::std::ops::Deref for PhysicalDeviceRayTracingPropertiesKHRBuilder<'a> { + type Target = PhysicalDeviceRayTracingPropertiesKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceRayTracingPropertiesKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceRayTracingPropertiesKHRBuilder<'a> { + pub fn shader_group_handle_size( + mut self, + shader_group_handle_size: u32, + ) -> PhysicalDeviceRayTracingPropertiesKHRBuilder<'a> { + self.inner.shader_group_handle_size = shader_group_handle_size; + self + } + pub fn max_recursion_depth( + mut self, + max_recursion_depth: u32, + ) -> PhysicalDeviceRayTracingPropertiesKHRBuilder<'a> { + self.inner.max_recursion_depth = max_recursion_depth; + self + } + pub fn max_shader_group_stride( + mut self, + max_shader_group_stride: u32, + ) -> PhysicalDeviceRayTracingPropertiesKHRBuilder<'a> { + self.inner.max_shader_group_stride = max_shader_group_stride; + self + } + pub fn shader_group_base_alignment( + mut self, + shader_group_base_alignment: u32, + ) -> PhysicalDeviceRayTracingPropertiesKHRBuilder<'a> { + self.inner.shader_group_base_alignment = shader_group_base_alignment; + self + } + pub fn max_geometry_count( + mut self, + max_geometry_count: u64, + ) -> PhysicalDeviceRayTracingPropertiesKHRBuilder<'a> { + self.inner.max_geometry_count = max_geometry_count; + self + } + pub fn max_instance_count( + mut self, + max_instance_count: u64, + ) -> PhysicalDeviceRayTracingPropertiesKHRBuilder<'a> { + self.inner.max_instance_count = max_instance_count; + self + } + pub fn max_primitive_count( + mut self, + max_primitive_count: u64, + ) -> PhysicalDeviceRayTracingPropertiesKHRBuilder<'a> { + self.inner.max_primitive_count = max_primitive_count; + self + } + pub fn max_descriptor_set_acceleration_structures( + mut self, + max_descriptor_set_acceleration_structures: u32, + ) -> PhysicalDeviceRayTracingPropertiesKHRBuilder<'a> { + self.inner.max_descriptor_set_acceleration_structures = + max_descriptor_set_acceleration_structures; + self + } + pub fn shader_group_handle_capture_replay_size( + mut self, + shader_group_handle_capture_replay_size: u32, + ) -> PhysicalDeviceRayTracingPropertiesKHRBuilder<'a> { + self.inner.shader_group_handle_capture_replay_size = + shader_group_handle_capture_replay_size; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceRayTracingPropertiesKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] pub struct PhysicalDeviceRayTracingPropertiesNV { pub s_type: StructureType, pub p_next: *mut c_void, @@ -40435,8 +42576,117 @@ impl<'a> PhysicalDeviceRayTracingPropertiesNVBuilder<'a> { } } #[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct StridedBufferRegionKHR { + pub buffer: Buffer, + pub offset: DeviceSize, + pub stride: DeviceSize, + pub size: DeviceSize, +} +impl StridedBufferRegionKHR { + pub fn builder<'a>() -> StridedBufferRegionKHRBuilder<'a> { + StridedBufferRegionKHRBuilder { + inner: StridedBufferRegionKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct StridedBufferRegionKHRBuilder<'a> { + inner: StridedBufferRegionKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for StridedBufferRegionKHRBuilder<'a> { + type Target = StridedBufferRegionKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for StridedBufferRegionKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> StridedBufferRegionKHRBuilder<'a> { + pub fn buffer(mut self, buffer: Buffer) -> StridedBufferRegionKHRBuilder<'a> { + self.inner.buffer = buffer; + self + } + pub fn offset(mut self, offset: DeviceSize) -> StridedBufferRegionKHRBuilder<'a> { + self.inner.offset = offset; + self + } + pub fn stride(mut self, stride: DeviceSize) -> StridedBufferRegionKHRBuilder<'a> { + self.inner.stride = stride; + self + } + pub fn size(mut self, size: DeviceSize) -> StridedBufferRegionKHRBuilder<'a> { + self.inner.size = size; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> StridedBufferRegionKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct TraceRaysIndirectCommandKHR { + pub width: u32, + pub height: u32, + pub depth: u32, +} +impl TraceRaysIndirectCommandKHR { + pub fn builder<'a>() -> TraceRaysIndirectCommandKHRBuilder<'a> { + TraceRaysIndirectCommandKHRBuilder { + inner: TraceRaysIndirectCommandKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct TraceRaysIndirectCommandKHRBuilder<'a> { + inner: TraceRaysIndirectCommandKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for TraceRaysIndirectCommandKHRBuilder<'a> { + type Target = TraceRaysIndirectCommandKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for TraceRaysIndirectCommandKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> TraceRaysIndirectCommandKHRBuilder<'a> { + pub fn width(mut self, width: u32) -> TraceRaysIndirectCommandKHRBuilder<'a> { + self.inner.width = width; + self + } + pub fn height(mut self, height: u32) -> TraceRaysIndirectCommandKHRBuilder<'a> { + self.inner.height = height; + self + } + pub fn depth(mut self, depth: u32) -> TraceRaysIndirectCommandKHRBuilder<'a> { + self.inner.depth = depth; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> TraceRaysIndirectCommandKHR { + self.inner + } +} +#[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DrmFormatModifierPropertiesListEXT { pub s_type: StructureType, pub p_next: *mut c_void, @@ -40497,7 +42747,7 @@ impl<'a> DrmFormatModifierPropertiesListEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Default, Debug)] -#[doc = ""] +#[doc = ""] pub struct DrmFormatModifierPropertiesEXT { pub drm_format_modifier: u64, pub drm_format_modifier_plane_count: u32, @@ -40558,7 +42808,7 @@ impl<'a> DrmFormatModifierPropertiesEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceImageDrmFormatModifierInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -40640,7 +42890,7 @@ impl<'a> PhysicalDeviceImageDrmFormatModifierInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ImageDrmFormatModifierListCreateInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -40701,7 +42951,7 @@ impl<'a> ImageDrmFormatModifierListCreateInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ImageDrmFormatModifierExplicitCreateInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -40771,7 +43021,7 @@ impl<'a> ImageDrmFormatModifierExplicitCreateInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct ImageDrmFormatModifierPropertiesEXT { pub s_type: StructureType, pub p_next: *mut c_void, @@ -40845,67 +43095,67 @@ impl<'a> ImageDrmFormatModifierPropertiesEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct ImageStencilUsageCreateInfoEXT { +#[doc = ""] +pub struct ImageStencilUsageCreateInfo { pub s_type: StructureType, pub p_next: *const c_void, pub stencil_usage: ImageUsageFlags, } -impl ::std::default::Default for ImageStencilUsageCreateInfoEXT { - fn default() -> ImageStencilUsageCreateInfoEXT { - ImageStencilUsageCreateInfoEXT { - s_type: StructureType::IMAGE_STENCIL_USAGE_CREATE_INFO_EXT, +impl ::std::default::Default for ImageStencilUsageCreateInfo { + fn default() -> ImageStencilUsageCreateInfo { + ImageStencilUsageCreateInfo { + s_type: StructureType::IMAGE_STENCIL_USAGE_CREATE_INFO, p_next: ::std::ptr::null(), stencil_usage: ImageUsageFlags::default(), } } } -impl ImageStencilUsageCreateInfoEXT { - pub fn builder<'a>() -> ImageStencilUsageCreateInfoEXTBuilder<'a> { - ImageStencilUsageCreateInfoEXTBuilder { - inner: ImageStencilUsageCreateInfoEXT::default(), +impl ImageStencilUsageCreateInfo { + pub fn builder<'a>() -> ImageStencilUsageCreateInfoBuilder<'a> { + ImageStencilUsageCreateInfoBuilder { + inner: ImageStencilUsageCreateInfo::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct ImageStencilUsageCreateInfoEXTBuilder<'a> { - inner: ImageStencilUsageCreateInfoEXT, +pub struct ImageStencilUsageCreateInfoBuilder<'a> { + inner: ImageStencilUsageCreateInfo, marker: ::std::marker::PhantomData<&'a ()>, } -unsafe impl ExtendsImageCreateInfo for ImageStencilUsageCreateInfoEXTBuilder<'_> {} -unsafe impl ExtendsImageCreateInfo for ImageStencilUsageCreateInfoEXT {} -unsafe impl ExtendsPhysicalDeviceImageFormatInfo2 for ImageStencilUsageCreateInfoEXTBuilder<'_> {} -unsafe impl ExtendsPhysicalDeviceImageFormatInfo2 for ImageStencilUsageCreateInfoEXT {} -impl<'a> ::std::ops::Deref for ImageStencilUsageCreateInfoEXTBuilder<'a> { - type Target = ImageStencilUsageCreateInfoEXT; +unsafe impl ExtendsImageCreateInfo for ImageStencilUsageCreateInfoBuilder<'_> {} +unsafe impl ExtendsImageCreateInfo for ImageStencilUsageCreateInfo {} +unsafe impl ExtendsPhysicalDeviceImageFormatInfo2 for ImageStencilUsageCreateInfoBuilder<'_> {} +unsafe impl ExtendsPhysicalDeviceImageFormatInfo2 for ImageStencilUsageCreateInfo {} +impl<'a> ::std::ops::Deref for ImageStencilUsageCreateInfoBuilder<'a> { + type Target = ImageStencilUsageCreateInfo; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for ImageStencilUsageCreateInfoEXTBuilder<'a> { +impl<'a> ::std::ops::DerefMut for ImageStencilUsageCreateInfoBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> ImageStencilUsageCreateInfoEXTBuilder<'a> { +impl<'a> ImageStencilUsageCreateInfoBuilder<'a> { pub fn stencil_usage( mut self, stencil_usage: ImageUsageFlags, - ) -> ImageStencilUsageCreateInfoEXTBuilder<'a> { + ) -> ImageStencilUsageCreateInfoBuilder<'a> { self.inner.stencil_usage = stencil_usage; self } #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> ImageStencilUsageCreateInfoEXT { + pub fn build(self) -> ImageStencilUsageCreateInfo { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct DeviceMemoryOverallocationCreateInfoAMD { pub s_type: StructureType, pub p_next: *const c_void, @@ -40963,7 +43213,7 @@ impl<'a> DeviceMemoryOverallocationCreateInfoAMDBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceFragmentDensityMapFeaturesEXT { pub s_type: StructureType, pub p_next: *mut c_void, @@ -41040,7 +43290,7 @@ impl<'a> PhysicalDeviceFragmentDensityMapFeaturesEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceFragmentDensityMapPropertiesEXT { pub s_type: StructureType, pub p_next: *mut c_void, @@ -41119,7 +43369,7 @@ impl<'a> PhysicalDeviceFragmentDensityMapPropertiesEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct RenderPassFragmentDensityMapCreateInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -41149,6 +43399,8 @@ pub struct RenderPassFragmentDensityMapCreateInfoEXTBuilder<'a> { } unsafe impl ExtendsRenderPassCreateInfo for RenderPassFragmentDensityMapCreateInfoEXTBuilder<'_> {} unsafe impl ExtendsRenderPassCreateInfo for RenderPassFragmentDensityMapCreateInfoEXT {} +unsafe impl ExtendsRenderPassCreateInfo2 for RenderPassFragmentDensityMapCreateInfoEXTBuilder<'_> {} +unsafe impl ExtendsRenderPassCreateInfo2 for RenderPassFragmentDensityMapCreateInfoEXT {} impl<'a> ::std::ops::Deref for RenderPassFragmentDensityMapCreateInfoEXTBuilder<'a> { type Target = RenderPassFragmentDensityMapCreateInfoEXT; fn deref(&self) -> &Self::Target { @@ -41177,65 +43429,315 @@ impl<'a> RenderPassFragmentDensityMapCreateInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct PhysicalDeviceScalarBlockLayoutFeaturesEXT { +#[doc = ""] +pub struct PhysicalDeviceScalarBlockLayoutFeatures { pub s_type: StructureType, pub p_next: *mut c_void, pub scalar_block_layout: Bool32, } -impl ::std::default::Default for PhysicalDeviceScalarBlockLayoutFeaturesEXT { - fn default() -> PhysicalDeviceScalarBlockLayoutFeaturesEXT { - PhysicalDeviceScalarBlockLayoutFeaturesEXT { - s_type: StructureType::PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES_EXT, +impl ::std::default::Default for PhysicalDeviceScalarBlockLayoutFeatures { + fn default() -> PhysicalDeviceScalarBlockLayoutFeatures { + PhysicalDeviceScalarBlockLayoutFeatures { + s_type: StructureType::PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES, p_next: ::std::ptr::null_mut(), scalar_block_layout: Bool32::default(), } } } -impl PhysicalDeviceScalarBlockLayoutFeaturesEXT { - pub fn builder<'a>() -> PhysicalDeviceScalarBlockLayoutFeaturesEXTBuilder<'a> { - PhysicalDeviceScalarBlockLayoutFeaturesEXTBuilder { - inner: PhysicalDeviceScalarBlockLayoutFeaturesEXT::default(), +impl PhysicalDeviceScalarBlockLayoutFeatures { + pub fn builder<'a>() -> PhysicalDeviceScalarBlockLayoutFeaturesBuilder<'a> { + PhysicalDeviceScalarBlockLayoutFeaturesBuilder { + inner: PhysicalDeviceScalarBlockLayoutFeatures::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct PhysicalDeviceScalarBlockLayoutFeaturesEXTBuilder<'a> { - inner: PhysicalDeviceScalarBlockLayoutFeaturesEXT, +pub struct PhysicalDeviceScalarBlockLayoutFeaturesBuilder<'a> { + inner: PhysicalDeviceScalarBlockLayoutFeatures, marker: ::std::marker::PhantomData<&'a ()>, } -unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceScalarBlockLayoutFeaturesEXTBuilder<'_> {} -unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceScalarBlockLayoutFeaturesEXT {} -impl<'a> ::std::ops::Deref for PhysicalDeviceScalarBlockLayoutFeaturesEXTBuilder<'a> { - type Target = PhysicalDeviceScalarBlockLayoutFeaturesEXT; +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceScalarBlockLayoutFeaturesBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceScalarBlockLayoutFeatures {} +impl<'a> ::std::ops::Deref for PhysicalDeviceScalarBlockLayoutFeaturesBuilder<'a> { + type Target = PhysicalDeviceScalarBlockLayoutFeatures; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for PhysicalDeviceScalarBlockLayoutFeaturesEXTBuilder<'a> { +impl<'a> ::std::ops::DerefMut for PhysicalDeviceScalarBlockLayoutFeaturesBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> PhysicalDeviceScalarBlockLayoutFeaturesEXTBuilder<'a> { +impl<'a> PhysicalDeviceScalarBlockLayoutFeaturesBuilder<'a> { pub fn scalar_block_layout( mut self, scalar_block_layout: bool, - ) -> PhysicalDeviceScalarBlockLayoutFeaturesEXTBuilder<'a> { + ) -> PhysicalDeviceScalarBlockLayoutFeaturesBuilder<'a> { self.inner.scalar_block_layout = scalar_block_layout.into(); self } #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> PhysicalDeviceScalarBlockLayoutFeaturesEXT { + pub fn build(self) -> PhysicalDeviceScalarBlockLayoutFeatures { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] +pub struct SurfaceProtectedCapabilitiesKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub supports_protected: Bool32, +} +impl ::std::default::Default for SurfaceProtectedCapabilitiesKHR { + fn default() -> SurfaceProtectedCapabilitiesKHR { + SurfaceProtectedCapabilitiesKHR { + s_type: StructureType::SURFACE_PROTECTED_CAPABILITIES_KHR, + p_next: ::std::ptr::null(), + supports_protected: Bool32::default(), + } + } +} +impl SurfaceProtectedCapabilitiesKHR { + pub fn builder<'a>() -> SurfaceProtectedCapabilitiesKHRBuilder<'a> { + SurfaceProtectedCapabilitiesKHRBuilder { + inner: SurfaceProtectedCapabilitiesKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SurfaceProtectedCapabilitiesKHRBuilder<'a> { + inner: SurfaceProtectedCapabilitiesKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsSurfaceCapabilities2KHR for SurfaceProtectedCapabilitiesKHRBuilder<'_> {} +unsafe impl ExtendsSurfaceCapabilities2KHR for SurfaceProtectedCapabilitiesKHR {} +impl<'a> ::std::ops::Deref for SurfaceProtectedCapabilitiesKHRBuilder<'a> { + type Target = SurfaceProtectedCapabilitiesKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SurfaceProtectedCapabilitiesKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SurfaceProtectedCapabilitiesKHRBuilder<'a> { + pub fn supports_protected( + mut self, + supports_protected: bool, + ) -> SurfaceProtectedCapabilitiesKHRBuilder<'a> { + self.inner.supports_protected = supports_protected.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SurfaceProtectedCapabilitiesKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceUniformBufferStandardLayoutFeatures { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub uniform_buffer_standard_layout: Bool32, +} +impl ::std::default::Default for PhysicalDeviceUniformBufferStandardLayoutFeatures { + fn default() -> PhysicalDeviceUniformBufferStandardLayoutFeatures { + PhysicalDeviceUniformBufferStandardLayoutFeatures { + s_type: StructureType::PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES, + p_next: ::std::ptr::null_mut(), + uniform_buffer_standard_layout: Bool32::default(), + } + } +} +impl PhysicalDeviceUniformBufferStandardLayoutFeatures { + pub fn builder<'a>() -> PhysicalDeviceUniformBufferStandardLayoutFeaturesBuilder<'a> { + PhysicalDeviceUniformBufferStandardLayoutFeaturesBuilder { + inner: PhysicalDeviceUniformBufferStandardLayoutFeatures::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceUniformBufferStandardLayoutFeaturesBuilder<'a> { + inner: PhysicalDeviceUniformBufferStandardLayoutFeatures, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo + for PhysicalDeviceUniformBufferStandardLayoutFeaturesBuilder<'_> +{ +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceUniformBufferStandardLayoutFeatures {} +impl<'a> ::std::ops::Deref for PhysicalDeviceUniformBufferStandardLayoutFeaturesBuilder<'a> { + type Target = PhysicalDeviceUniformBufferStandardLayoutFeatures; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceUniformBufferStandardLayoutFeaturesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceUniformBufferStandardLayoutFeaturesBuilder<'a> { + pub fn uniform_buffer_standard_layout( + mut self, + uniform_buffer_standard_layout: bool, + ) -> PhysicalDeviceUniformBufferStandardLayoutFeaturesBuilder<'a> { + self.inner.uniform_buffer_standard_layout = uniform_buffer_standard_layout.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceUniformBufferStandardLayoutFeatures { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceDepthClipEnableFeaturesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub depth_clip_enable: Bool32, +} +impl ::std::default::Default for PhysicalDeviceDepthClipEnableFeaturesEXT { + fn default() -> PhysicalDeviceDepthClipEnableFeaturesEXT { + PhysicalDeviceDepthClipEnableFeaturesEXT { + s_type: StructureType::PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT, + p_next: ::std::ptr::null_mut(), + depth_clip_enable: Bool32::default(), + } + } +} +impl PhysicalDeviceDepthClipEnableFeaturesEXT { + pub fn builder<'a>() -> PhysicalDeviceDepthClipEnableFeaturesEXTBuilder<'a> { + PhysicalDeviceDepthClipEnableFeaturesEXTBuilder { + inner: PhysicalDeviceDepthClipEnableFeaturesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceDepthClipEnableFeaturesEXTBuilder<'a> { + inner: PhysicalDeviceDepthClipEnableFeaturesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceDepthClipEnableFeaturesEXTBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceDepthClipEnableFeaturesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceDepthClipEnableFeaturesEXTBuilder<'a> { + type Target = PhysicalDeviceDepthClipEnableFeaturesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceDepthClipEnableFeaturesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceDepthClipEnableFeaturesEXTBuilder<'a> { + pub fn depth_clip_enable( + mut self, + depth_clip_enable: bool, + ) -> PhysicalDeviceDepthClipEnableFeaturesEXTBuilder<'a> { + self.inner.depth_clip_enable = depth_clip_enable.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceDepthClipEnableFeaturesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineRasterizationDepthClipStateCreateInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: PipelineRasterizationDepthClipStateCreateFlagsEXT, + pub depth_clip_enable: Bool32, +} +impl ::std::default::Default for PipelineRasterizationDepthClipStateCreateInfoEXT { + fn default() -> PipelineRasterizationDepthClipStateCreateInfoEXT { + PipelineRasterizationDepthClipStateCreateInfoEXT { + s_type: StructureType::PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT, + p_next: ::std::ptr::null(), + flags: PipelineRasterizationDepthClipStateCreateFlagsEXT::default(), + depth_clip_enable: Bool32::default(), + } + } +} +impl PipelineRasterizationDepthClipStateCreateInfoEXT { + pub fn builder<'a>() -> PipelineRasterizationDepthClipStateCreateInfoEXTBuilder<'a> { + PipelineRasterizationDepthClipStateCreateInfoEXTBuilder { + inner: PipelineRasterizationDepthClipStateCreateInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineRasterizationDepthClipStateCreateInfoEXTBuilder<'a> { + inner: PipelineRasterizationDepthClipStateCreateInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPipelineRasterizationStateCreateInfo + for PipelineRasterizationDepthClipStateCreateInfoEXTBuilder<'_> +{ +} +unsafe impl ExtendsPipelineRasterizationStateCreateInfo + for PipelineRasterizationDepthClipStateCreateInfoEXT +{ +} +impl<'a> ::std::ops::Deref for PipelineRasterizationDepthClipStateCreateInfoEXTBuilder<'a> { + type Target = PipelineRasterizationDepthClipStateCreateInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineRasterizationDepthClipStateCreateInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineRasterizationDepthClipStateCreateInfoEXTBuilder<'a> { + pub fn flags( + mut self, + flags: PipelineRasterizationDepthClipStateCreateFlagsEXT, + ) -> PipelineRasterizationDepthClipStateCreateInfoEXTBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn depth_clip_enable( + mut self, + depth_clip_enable: bool, + ) -> PipelineRasterizationDepthClipStateCreateInfoEXTBuilder<'a> { + self.inner.depth_clip_enable = depth_clip_enable.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineRasterizationDepthClipStateCreateInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] pub struct PhysicalDeviceMemoryBudgetPropertiesEXT { pub s_type: StructureType, pub p_next: *mut c_void, @@ -41305,7 +43807,7 @@ impl<'a> PhysicalDeviceMemoryBudgetPropertiesEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceMemoryPriorityFeaturesEXT { pub s_type: StructureType, pub p_next: *mut c_void, @@ -41363,7 +43865,7 @@ impl<'a> PhysicalDeviceMemoryPriorityFeaturesEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] pub struct MemoryPriorityAllocateInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, @@ -41418,18 +43920,18 @@ impl<'a> MemoryPriorityAllocateInfoEXTBuilder<'a> { } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct PhysicalDeviceBufferAddressFeaturesEXT { +#[doc = ""] +pub struct PhysicalDeviceBufferDeviceAddressFeatures { pub s_type: StructureType, pub p_next: *mut c_void, pub buffer_device_address: Bool32, pub buffer_device_address_capture_replay: Bool32, pub buffer_device_address_multi_device: Bool32, } -impl ::std::default::Default for PhysicalDeviceBufferAddressFeaturesEXT { - fn default() -> PhysicalDeviceBufferAddressFeaturesEXT { - PhysicalDeviceBufferAddressFeaturesEXT { - s_type: StructureType::PHYSICAL_DEVICE_BUFFER_ADDRESS_FEATURES_EXT, +impl ::std::default::Default for PhysicalDeviceBufferDeviceAddressFeatures { + fn default() -> PhysicalDeviceBufferDeviceAddressFeatures { + PhysicalDeviceBufferDeviceAddressFeatures { + s_type: StructureType::PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES, p_next: ::std::ptr::null_mut(), buffer_device_address: Bool32::default(), buffer_device_address_capture_replay: Bool32::default(), @@ -41437,44 +43939,44 @@ impl ::std::default::Default for PhysicalDeviceBufferAddressFeaturesEXT { } } } -impl PhysicalDeviceBufferAddressFeaturesEXT { - pub fn builder<'a>() -> PhysicalDeviceBufferAddressFeaturesEXTBuilder<'a> { - PhysicalDeviceBufferAddressFeaturesEXTBuilder { - inner: PhysicalDeviceBufferAddressFeaturesEXT::default(), +impl PhysicalDeviceBufferDeviceAddressFeatures { + pub fn builder<'a>() -> PhysicalDeviceBufferDeviceAddressFeaturesBuilder<'a> { + PhysicalDeviceBufferDeviceAddressFeaturesBuilder { + inner: PhysicalDeviceBufferDeviceAddressFeatures::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct PhysicalDeviceBufferAddressFeaturesEXTBuilder<'a> { - inner: PhysicalDeviceBufferAddressFeaturesEXT, +pub struct PhysicalDeviceBufferDeviceAddressFeaturesBuilder<'a> { + inner: PhysicalDeviceBufferDeviceAddressFeatures, marker: ::std::marker::PhantomData<&'a ()>, } -unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceBufferAddressFeaturesEXTBuilder<'_> {} -unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceBufferAddressFeaturesEXT {} -impl<'a> ::std::ops::Deref for PhysicalDeviceBufferAddressFeaturesEXTBuilder<'a> { - type Target = PhysicalDeviceBufferAddressFeaturesEXT; +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceBufferDeviceAddressFeaturesBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceBufferDeviceAddressFeatures {} +impl<'a> ::std::ops::Deref for PhysicalDeviceBufferDeviceAddressFeaturesBuilder<'a> { + type Target = PhysicalDeviceBufferDeviceAddressFeatures; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for PhysicalDeviceBufferAddressFeaturesEXTBuilder<'a> { +impl<'a> ::std::ops::DerefMut for PhysicalDeviceBufferDeviceAddressFeaturesBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> PhysicalDeviceBufferAddressFeaturesEXTBuilder<'a> { +impl<'a> PhysicalDeviceBufferDeviceAddressFeaturesBuilder<'a> { pub fn buffer_device_address( mut self, buffer_device_address: bool, - ) -> PhysicalDeviceBufferAddressFeaturesEXTBuilder<'a> { + ) -> PhysicalDeviceBufferDeviceAddressFeaturesBuilder<'a> { self.inner.buffer_device_address = buffer_device_address.into(); self } pub fn buffer_device_address_capture_replay( mut self, buffer_device_address_capture_replay: bool, - ) -> PhysicalDeviceBufferAddressFeaturesEXTBuilder<'a> { + ) -> PhysicalDeviceBufferDeviceAddressFeaturesBuilder<'a> { self.inner.buffer_device_address_capture_replay = buffer_device_address_capture_replay.into(); self @@ -41482,61 +43984,138 @@ impl<'a> PhysicalDeviceBufferAddressFeaturesEXTBuilder<'a> { pub fn buffer_device_address_multi_device( mut self, buffer_device_address_multi_device: bool, - ) -> PhysicalDeviceBufferAddressFeaturesEXTBuilder<'a> { + ) -> PhysicalDeviceBufferDeviceAddressFeaturesBuilder<'a> { self.inner.buffer_device_address_multi_device = buffer_device_address_multi_device.into(); self } #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> PhysicalDeviceBufferAddressFeaturesEXT { + pub fn build(self) -> PhysicalDeviceBufferDeviceAddressFeatures { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] -pub struct BufferDeviceAddressInfoEXT { +#[doc = ""] +pub struct PhysicalDeviceBufferDeviceAddressFeaturesEXT { pub s_type: StructureType, - pub p_next: *const c_void, - pub buffer: Buffer, + pub p_next: *mut c_void, + pub buffer_device_address: Bool32, + pub buffer_device_address_capture_replay: Bool32, + pub buffer_device_address_multi_device: Bool32, } -impl ::std::default::Default for BufferDeviceAddressInfoEXT { - fn default() -> BufferDeviceAddressInfoEXT { - BufferDeviceAddressInfoEXT { - s_type: StructureType::BUFFER_DEVICE_ADDRESS_INFO_EXT, - p_next: ::std::ptr::null(), - buffer: Buffer::default(), +impl ::std::default::Default for PhysicalDeviceBufferDeviceAddressFeaturesEXT { + fn default() -> PhysicalDeviceBufferDeviceAddressFeaturesEXT { + PhysicalDeviceBufferDeviceAddressFeaturesEXT { + s_type: StructureType::PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT, + p_next: ::std::ptr::null_mut(), + buffer_device_address: Bool32::default(), + buffer_device_address_capture_replay: Bool32::default(), + buffer_device_address_multi_device: Bool32::default(), } } } -impl BufferDeviceAddressInfoEXT { - pub fn builder<'a>() -> BufferDeviceAddressInfoEXTBuilder<'a> { - BufferDeviceAddressInfoEXTBuilder { - inner: BufferDeviceAddressInfoEXT::default(), +impl PhysicalDeviceBufferDeviceAddressFeaturesEXT { + pub fn builder<'a>() -> PhysicalDeviceBufferDeviceAddressFeaturesEXTBuilder<'a> { + PhysicalDeviceBufferDeviceAddressFeaturesEXTBuilder { + inner: PhysicalDeviceBufferDeviceAddressFeaturesEXT::default(), marker: ::std::marker::PhantomData, } } } #[repr(transparent)] -pub struct BufferDeviceAddressInfoEXTBuilder<'a> { - inner: BufferDeviceAddressInfoEXT, +pub struct PhysicalDeviceBufferDeviceAddressFeaturesEXTBuilder<'a> { + inner: PhysicalDeviceBufferDeviceAddressFeaturesEXT, marker: ::std::marker::PhantomData<&'a ()>, } -pub unsafe trait ExtendsBufferDeviceAddressInfoEXT {} -impl<'a> ::std::ops::Deref for BufferDeviceAddressInfoEXTBuilder<'a> { - type Target = BufferDeviceAddressInfoEXT; +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceBufferDeviceAddressFeaturesEXTBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceBufferDeviceAddressFeaturesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceBufferDeviceAddressFeaturesEXTBuilder<'a> { + type Target = PhysicalDeviceBufferDeviceAddressFeaturesEXT; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> ::std::ops::DerefMut for BufferDeviceAddressInfoEXTBuilder<'a> { +impl<'a> ::std::ops::DerefMut for PhysicalDeviceBufferDeviceAddressFeaturesEXTBuilder<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl<'a> BufferDeviceAddressInfoEXTBuilder<'a> { - pub fn buffer(mut self, buffer: Buffer) -> BufferDeviceAddressInfoEXTBuilder<'a> { +impl<'a> PhysicalDeviceBufferDeviceAddressFeaturesEXTBuilder<'a> { + pub fn buffer_device_address( + mut self, + buffer_device_address: bool, + ) -> PhysicalDeviceBufferDeviceAddressFeaturesEXTBuilder<'a> { + self.inner.buffer_device_address = buffer_device_address.into(); + self + } + pub fn buffer_device_address_capture_replay( + mut self, + buffer_device_address_capture_replay: bool, + ) -> PhysicalDeviceBufferDeviceAddressFeaturesEXTBuilder<'a> { + self.inner.buffer_device_address_capture_replay = + buffer_device_address_capture_replay.into(); + self + } + pub fn buffer_device_address_multi_device( + mut self, + buffer_device_address_multi_device: bool, + ) -> PhysicalDeviceBufferDeviceAddressFeaturesEXTBuilder<'a> { + self.inner.buffer_device_address_multi_device = buffer_device_address_multi_device.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceBufferDeviceAddressFeaturesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct BufferDeviceAddressInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub buffer: Buffer, +} +impl ::std::default::Default for BufferDeviceAddressInfo { + fn default() -> BufferDeviceAddressInfo { + BufferDeviceAddressInfo { + s_type: StructureType::BUFFER_DEVICE_ADDRESS_INFO, + p_next: ::std::ptr::null(), + buffer: Buffer::default(), + } + } +} +impl BufferDeviceAddressInfo { + pub fn builder<'a>() -> BufferDeviceAddressInfoBuilder<'a> { + BufferDeviceAddressInfoBuilder { + inner: BufferDeviceAddressInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct BufferDeviceAddressInfoBuilder<'a> { + inner: BufferDeviceAddressInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsBufferDeviceAddressInfo {} +impl<'a> ::std::ops::Deref for BufferDeviceAddressInfoBuilder<'a> { + type Target = BufferDeviceAddressInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for BufferDeviceAddressInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> BufferDeviceAddressInfoBuilder<'a> { + pub fn buffer(mut self, buffer: Buffer) -> BufferDeviceAddressInfoBuilder<'a> { self.inner.buffer = buffer; self } @@ -41545,10 +44124,10 @@ impl<'a> BufferDeviceAddressInfoEXTBuilder<'a> { #[doc = r" valid extension structs can be pushed into the chain."] #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] #[doc = r" chain will look like `A -> D -> B -> C`."] - pub fn push_next( + pub fn push_next( mut self, next: &'a mut T, - ) -> BufferDeviceAddressInfoEXTBuilder<'a> { + ) -> BufferDeviceAddressInfoBuilder<'a> { unsafe { let next_ptr = next as *mut T as *mut BaseOutStructure; let last_next = ptr_chain_iter(next).last().unwrap(); @@ -41560,24 +44139,82 @@ impl<'a> BufferDeviceAddressInfoEXTBuilder<'a> { #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] #[doc = r" so references to builders can be passed directly to Vulkan functions."] - pub fn build(self) -> BufferDeviceAddressInfoEXT { + pub fn build(self) -> BufferDeviceAddressInfo { self.inner } } #[repr(C)] #[derive(Copy, Clone, Debug)] -#[doc = ""] +#[doc = ""] +pub struct BufferOpaqueCaptureAddressCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub opaque_capture_address: u64, +} +impl ::std::default::Default for BufferOpaqueCaptureAddressCreateInfo { + fn default() -> BufferOpaqueCaptureAddressCreateInfo { + BufferOpaqueCaptureAddressCreateInfo { + s_type: StructureType::BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO, + p_next: ::std::ptr::null(), + opaque_capture_address: u64::default(), + } + } +} +impl BufferOpaqueCaptureAddressCreateInfo { + pub fn builder<'a>() -> BufferOpaqueCaptureAddressCreateInfoBuilder<'a> { + BufferOpaqueCaptureAddressCreateInfoBuilder { + inner: BufferOpaqueCaptureAddressCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct BufferOpaqueCaptureAddressCreateInfoBuilder<'a> { + inner: BufferOpaqueCaptureAddressCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsBufferCreateInfo for BufferOpaqueCaptureAddressCreateInfoBuilder<'_> {} +unsafe impl ExtendsBufferCreateInfo for BufferOpaqueCaptureAddressCreateInfo {} +impl<'a> ::std::ops::Deref for BufferOpaqueCaptureAddressCreateInfoBuilder<'a> { + type Target = BufferOpaqueCaptureAddressCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for BufferOpaqueCaptureAddressCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> BufferOpaqueCaptureAddressCreateInfoBuilder<'a> { + pub fn opaque_capture_address( + mut self, + opaque_capture_address: u64, + ) -> BufferOpaqueCaptureAddressCreateInfoBuilder<'a> { + self.inner.opaque_capture_address = opaque_capture_address; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> BufferOpaqueCaptureAddressCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] pub struct BufferDeviceAddressCreateInfoEXT { pub s_type: StructureType, pub p_next: *const c_void, - pub device_address: DeviceSize, + pub device_address: DeviceAddress, } impl ::std::default::Default for BufferDeviceAddressCreateInfoEXT { fn default() -> BufferDeviceAddressCreateInfoEXT { BufferDeviceAddressCreateInfoEXT { s_type: StructureType::BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT, p_next: ::std::ptr::null(), - device_address: DeviceSize::default(), + device_address: DeviceAddress::default(), } } } @@ -41610,7 +44247,7 @@ impl<'a> ::std::ops::DerefMut for BufferDeviceAddressCreateInfoEXTBuilder<'a> { impl<'a> BufferDeviceAddressCreateInfoEXTBuilder<'a> { pub fn device_address( mut self, - device_address: DeviceSize, + device_address: DeviceAddress, ) -> BufferDeviceAddressCreateInfoEXTBuilder<'a> { self.inner.device_address = device_address; self @@ -41622,9 +44259,8461 @@ impl<'a> BufferDeviceAddressCreateInfoEXTBuilder<'a> { self.inner } } +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceImageViewImageFormatInfoEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub image_view_type: ImageViewType, +} +impl ::std::default::Default for PhysicalDeviceImageViewImageFormatInfoEXT { + fn default() -> PhysicalDeviceImageViewImageFormatInfoEXT { + PhysicalDeviceImageViewImageFormatInfoEXT { + s_type: StructureType::PHYSICAL_DEVICE_IMAGE_VIEW_IMAGE_FORMAT_INFO_EXT, + p_next: ::std::ptr::null_mut(), + image_view_type: ImageViewType::default(), + } + } +} +impl PhysicalDeviceImageViewImageFormatInfoEXT { + pub fn builder<'a>() -> PhysicalDeviceImageViewImageFormatInfoEXTBuilder<'a> { + PhysicalDeviceImageViewImageFormatInfoEXTBuilder { + inner: PhysicalDeviceImageViewImageFormatInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceImageViewImageFormatInfoEXTBuilder<'a> { + inner: PhysicalDeviceImageViewImageFormatInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceImageFormatInfo2 + for PhysicalDeviceImageViewImageFormatInfoEXTBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceImageFormatInfo2 for PhysicalDeviceImageViewImageFormatInfoEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceImageViewImageFormatInfoEXTBuilder<'a> { + type Target = PhysicalDeviceImageViewImageFormatInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceImageViewImageFormatInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceImageViewImageFormatInfoEXTBuilder<'a> { + pub fn image_view_type( + mut self, + image_view_type: ImageViewType, + ) -> PhysicalDeviceImageViewImageFormatInfoEXTBuilder<'a> { + self.inner.image_view_type = image_view_type; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceImageViewImageFormatInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct FilterCubicImageViewImageFormatPropertiesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub filter_cubic: Bool32, + pub filter_cubic_minmax: Bool32, +} +impl ::std::default::Default for FilterCubicImageViewImageFormatPropertiesEXT { + fn default() -> FilterCubicImageViewImageFormatPropertiesEXT { + FilterCubicImageViewImageFormatPropertiesEXT { + s_type: StructureType::FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT, + p_next: ::std::ptr::null_mut(), + filter_cubic: Bool32::default(), + filter_cubic_minmax: Bool32::default(), + } + } +} +impl FilterCubicImageViewImageFormatPropertiesEXT { + pub fn builder<'a>() -> FilterCubicImageViewImageFormatPropertiesEXTBuilder<'a> { + FilterCubicImageViewImageFormatPropertiesEXTBuilder { + inner: FilterCubicImageViewImageFormatPropertiesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct FilterCubicImageViewImageFormatPropertiesEXTBuilder<'a> { + inner: FilterCubicImageViewImageFormatPropertiesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsImageFormatProperties2 + for FilterCubicImageViewImageFormatPropertiesEXTBuilder<'_> +{ +} +unsafe impl ExtendsImageFormatProperties2 for FilterCubicImageViewImageFormatPropertiesEXT {} +impl<'a> ::std::ops::Deref for FilterCubicImageViewImageFormatPropertiesEXTBuilder<'a> { + type Target = FilterCubicImageViewImageFormatPropertiesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for FilterCubicImageViewImageFormatPropertiesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> FilterCubicImageViewImageFormatPropertiesEXTBuilder<'a> { + pub fn filter_cubic( + mut self, + filter_cubic: bool, + ) -> FilterCubicImageViewImageFormatPropertiesEXTBuilder<'a> { + self.inner.filter_cubic = filter_cubic.into(); + self + } + pub fn filter_cubic_minmax( + mut self, + filter_cubic_minmax: bool, + ) -> FilterCubicImageViewImageFormatPropertiesEXTBuilder<'a> { + self.inner.filter_cubic_minmax = filter_cubic_minmax.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> FilterCubicImageViewImageFormatPropertiesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceImagelessFramebufferFeatures { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub imageless_framebuffer: Bool32, +} +impl ::std::default::Default for PhysicalDeviceImagelessFramebufferFeatures { + fn default() -> PhysicalDeviceImagelessFramebufferFeatures { + PhysicalDeviceImagelessFramebufferFeatures { + s_type: StructureType::PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES, + p_next: ::std::ptr::null_mut(), + imageless_framebuffer: Bool32::default(), + } + } +} +impl PhysicalDeviceImagelessFramebufferFeatures { + pub fn builder<'a>() -> PhysicalDeviceImagelessFramebufferFeaturesBuilder<'a> { + PhysicalDeviceImagelessFramebufferFeaturesBuilder { + inner: PhysicalDeviceImagelessFramebufferFeatures::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceImagelessFramebufferFeaturesBuilder<'a> { + inner: PhysicalDeviceImagelessFramebufferFeatures, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceImagelessFramebufferFeaturesBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceImagelessFramebufferFeatures {} +impl<'a> ::std::ops::Deref for PhysicalDeviceImagelessFramebufferFeaturesBuilder<'a> { + type Target = PhysicalDeviceImagelessFramebufferFeatures; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceImagelessFramebufferFeaturesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceImagelessFramebufferFeaturesBuilder<'a> { + pub fn imageless_framebuffer( + mut self, + imageless_framebuffer: bool, + ) -> PhysicalDeviceImagelessFramebufferFeaturesBuilder<'a> { + self.inner.imageless_framebuffer = imageless_framebuffer.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceImagelessFramebufferFeatures { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct FramebufferAttachmentsCreateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub attachment_image_info_count: u32, + pub p_attachment_image_infos: *const FramebufferAttachmentImageInfo, +} +impl ::std::default::Default for FramebufferAttachmentsCreateInfo { + fn default() -> FramebufferAttachmentsCreateInfo { + FramebufferAttachmentsCreateInfo { + s_type: StructureType::FRAMEBUFFER_ATTACHMENTS_CREATE_INFO, + p_next: ::std::ptr::null(), + attachment_image_info_count: u32::default(), + p_attachment_image_infos: ::std::ptr::null(), + } + } +} +impl FramebufferAttachmentsCreateInfo { + pub fn builder<'a>() -> FramebufferAttachmentsCreateInfoBuilder<'a> { + FramebufferAttachmentsCreateInfoBuilder { + inner: FramebufferAttachmentsCreateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct FramebufferAttachmentsCreateInfoBuilder<'a> { + inner: FramebufferAttachmentsCreateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsFramebufferCreateInfo for FramebufferAttachmentsCreateInfoBuilder<'_> {} +unsafe impl ExtendsFramebufferCreateInfo for FramebufferAttachmentsCreateInfo {} +impl<'a> ::std::ops::Deref for FramebufferAttachmentsCreateInfoBuilder<'a> { + type Target = FramebufferAttachmentsCreateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for FramebufferAttachmentsCreateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> FramebufferAttachmentsCreateInfoBuilder<'a> { + pub fn attachment_image_infos( + mut self, + attachment_image_infos: &'a [FramebufferAttachmentImageInfo], + ) -> FramebufferAttachmentsCreateInfoBuilder<'a> { + self.inner.attachment_image_info_count = attachment_image_infos.len() as _; + self.inner.p_attachment_image_infos = attachment_image_infos.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> FramebufferAttachmentsCreateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct FramebufferAttachmentImageInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: ImageCreateFlags, + pub usage: ImageUsageFlags, + pub width: u32, + pub height: u32, + pub layer_count: u32, + pub view_format_count: u32, + pub p_view_formats: *const Format, +} +impl ::std::default::Default for FramebufferAttachmentImageInfo { + fn default() -> FramebufferAttachmentImageInfo { + FramebufferAttachmentImageInfo { + s_type: StructureType::FRAMEBUFFER_ATTACHMENT_IMAGE_INFO, + p_next: ::std::ptr::null(), + flags: ImageCreateFlags::default(), + usage: ImageUsageFlags::default(), + width: u32::default(), + height: u32::default(), + layer_count: u32::default(), + view_format_count: u32::default(), + p_view_formats: ::std::ptr::null(), + } + } +} +impl FramebufferAttachmentImageInfo { + pub fn builder<'a>() -> FramebufferAttachmentImageInfoBuilder<'a> { + FramebufferAttachmentImageInfoBuilder { + inner: FramebufferAttachmentImageInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct FramebufferAttachmentImageInfoBuilder<'a> { + inner: FramebufferAttachmentImageInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsFramebufferAttachmentImageInfo {} +impl<'a> ::std::ops::Deref for FramebufferAttachmentImageInfoBuilder<'a> { + type Target = FramebufferAttachmentImageInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for FramebufferAttachmentImageInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> FramebufferAttachmentImageInfoBuilder<'a> { + pub fn flags(mut self, flags: ImageCreateFlags) -> FramebufferAttachmentImageInfoBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn usage(mut self, usage: ImageUsageFlags) -> FramebufferAttachmentImageInfoBuilder<'a> { + self.inner.usage = usage; + self + } + pub fn width(mut self, width: u32) -> FramebufferAttachmentImageInfoBuilder<'a> { + self.inner.width = width; + self + } + pub fn height(mut self, height: u32) -> FramebufferAttachmentImageInfoBuilder<'a> { + self.inner.height = height; + self + } + pub fn layer_count(mut self, layer_count: u32) -> FramebufferAttachmentImageInfoBuilder<'a> { + self.inner.layer_count = layer_count; + self + } + pub fn view_formats( + mut self, + view_formats: &'a [Format], + ) -> FramebufferAttachmentImageInfoBuilder<'a> { + self.inner.view_format_count = view_formats.len() as _; + self.inner.p_view_formats = view_formats.as_ptr(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> FramebufferAttachmentImageInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> FramebufferAttachmentImageInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct RenderPassAttachmentBeginInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub attachment_count: u32, + pub p_attachments: *const ImageView, +} +impl ::std::default::Default for RenderPassAttachmentBeginInfo { + fn default() -> RenderPassAttachmentBeginInfo { + RenderPassAttachmentBeginInfo { + s_type: StructureType::RENDER_PASS_ATTACHMENT_BEGIN_INFO, + p_next: ::std::ptr::null(), + attachment_count: u32::default(), + p_attachments: ::std::ptr::null(), + } + } +} +impl RenderPassAttachmentBeginInfo { + pub fn builder<'a>() -> RenderPassAttachmentBeginInfoBuilder<'a> { + RenderPassAttachmentBeginInfoBuilder { + inner: RenderPassAttachmentBeginInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct RenderPassAttachmentBeginInfoBuilder<'a> { + inner: RenderPassAttachmentBeginInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsRenderPassBeginInfo for RenderPassAttachmentBeginInfoBuilder<'_> {} +unsafe impl ExtendsRenderPassBeginInfo for RenderPassAttachmentBeginInfo {} +impl<'a> ::std::ops::Deref for RenderPassAttachmentBeginInfoBuilder<'a> { + type Target = RenderPassAttachmentBeginInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for RenderPassAttachmentBeginInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> RenderPassAttachmentBeginInfoBuilder<'a> { + pub fn attachments( + mut self, + attachments: &'a [ImageView], + ) -> RenderPassAttachmentBeginInfoBuilder<'a> { + self.inner.attachment_count = attachments.len() as _; + self.inner.p_attachments = attachments.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> RenderPassAttachmentBeginInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceTextureCompressionASTCHDRFeaturesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub texture_compression_astc_hdr: Bool32, +} +impl ::std::default::Default for PhysicalDeviceTextureCompressionASTCHDRFeaturesEXT { + fn default() -> PhysicalDeviceTextureCompressionASTCHDRFeaturesEXT { + PhysicalDeviceTextureCompressionASTCHDRFeaturesEXT { + s_type: StructureType::PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT, + p_next: ::std::ptr::null_mut(), + texture_compression_astc_hdr: Bool32::default(), + } + } +} +impl PhysicalDeviceTextureCompressionASTCHDRFeaturesEXT { + pub fn builder<'a>() -> PhysicalDeviceTextureCompressionASTCHDRFeaturesEXTBuilder<'a> { + PhysicalDeviceTextureCompressionASTCHDRFeaturesEXTBuilder { + inner: PhysicalDeviceTextureCompressionASTCHDRFeaturesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceTextureCompressionASTCHDRFeaturesEXTBuilder<'a> { + inner: PhysicalDeviceTextureCompressionASTCHDRFeaturesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo + for PhysicalDeviceTextureCompressionASTCHDRFeaturesEXTBuilder<'_> +{ +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceTextureCompressionASTCHDRFeaturesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceTextureCompressionASTCHDRFeaturesEXTBuilder<'a> { + type Target = PhysicalDeviceTextureCompressionASTCHDRFeaturesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceTextureCompressionASTCHDRFeaturesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceTextureCompressionASTCHDRFeaturesEXTBuilder<'a> { + pub fn texture_compression_astc_hdr( + mut self, + texture_compression_astc_hdr: bool, + ) -> PhysicalDeviceTextureCompressionASTCHDRFeaturesEXTBuilder<'a> { + self.inner.texture_compression_astc_hdr = texture_compression_astc_hdr.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceTextureCompressionASTCHDRFeaturesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceCooperativeMatrixFeaturesNV { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub cooperative_matrix: Bool32, + pub cooperative_matrix_robust_buffer_access: Bool32, +} +impl ::std::default::Default for PhysicalDeviceCooperativeMatrixFeaturesNV { + fn default() -> PhysicalDeviceCooperativeMatrixFeaturesNV { + PhysicalDeviceCooperativeMatrixFeaturesNV { + s_type: StructureType::PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_NV, + p_next: ::std::ptr::null_mut(), + cooperative_matrix: Bool32::default(), + cooperative_matrix_robust_buffer_access: Bool32::default(), + } + } +} +impl PhysicalDeviceCooperativeMatrixFeaturesNV { + pub fn builder<'a>() -> PhysicalDeviceCooperativeMatrixFeaturesNVBuilder<'a> { + PhysicalDeviceCooperativeMatrixFeaturesNVBuilder { + inner: PhysicalDeviceCooperativeMatrixFeaturesNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceCooperativeMatrixFeaturesNVBuilder<'a> { + inner: PhysicalDeviceCooperativeMatrixFeaturesNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceCooperativeMatrixFeaturesNVBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceCooperativeMatrixFeaturesNV {} +impl<'a> ::std::ops::Deref for PhysicalDeviceCooperativeMatrixFeaturesNVBuilder<'a> { + type Target = PhysicalDeviceCooperativeMatrixFeaturesNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceCooperativeMatrixFeaturesNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceCooperativeMatrixFeaturesNVBuilder<'a> { + pub fn cooperative_matrix( + mut self, + cooperative_matrix: bool, + ) -> PhysicalDeviceCooperativeMatrixFeaturesNVBuilder<'a> { + self.inner.cooperative_matrix = cooperative_matrix.into(); + self + } + pub fn cooperative_matrix_robust_buffer_access( + mut self, + cooperative_matrix_robust_buffer_access: bool, + ) -> PhysicalDeviceCooperativeMatrixFeaturesNVBuilder<'a> { + self.inner.cooperative_matrix_robust_buffer_access = + cooperative_matrix_robust_buffer_access.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceCooperativeMatrixFeaturesNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceCooperativeMatrixPropertiesNV { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub cooperative_matrix_supported_stages: ShaderStageFlags, +} +impl ::std::default::Default for PhysicalDeviceCooperativeMatrixPropertiesNV { + fn default() -> PhysicalDeviceCooperativeMatrixPropertiesNV { + PhysicalDeviceCooperativeMatrixPropertiesNV { + s_type: StructureType::PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_NV, + p_next: ::std::ptr::null_mut(), + cooperative_matrix_supported_stages: ShaderStageFlags::default(), + } + } +} +impl PhysicalDeviceCooperativeMatrixPropertiesNV { + pub fn builder<'a>() -> PhysicalDeviceCooperativeMatrixPropertiesNVBuilder<'a> { + PhysicalDeviceCooperativeMatrixPropertiesNVBuilder { + inner: PhysicalDeviceCooperativeMatrixPropertiesNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceCooperativeMatrixPropertiesNVBuilder<'a> { + inner: PhysicalDeviceCooperativeMatrixPropertiesNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDeviceCooperativeMatrixPropertiesNVBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceCooperativeMatrixPropertiesNV {} +impl<'a> ::std::ops::Deref for PhysicalDeviceCooperativeMatrixPropertiesNVBuilder<'a> { + type Target = PhysicalDeviceCooperativeMatrixPropertiesNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceCooperativeMatrixPropertiesNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceCooperativeMatrixPropertiesNVBuilder<'a> { + pub fn cooperative_matrix_supported_stages( + mut self, + cooperative_matrix_supported_stages: ShaderStageFlags, + ) -> PhysicalDeviceCooperativeMatrixPropertiesNVBuilder<'a> { + self.inner.cooperative_matrix_supported_stages = cooperative_matrix_supported_stages; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceCooperativeMatrixPropertiesNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct CooperativeMatrixPropertiesNV { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub m_size: u32, + pub n_size: u32, + pub k_size: u32, + pub a_type: ComponentTypeNV, + pub b_type: ComponentTypeNV, + pub c_type: ComponentTypeNV, + pub d_type: ComponentTypeNV, + pub scope: ScopeNV, +} +impl ::std::default::Default for CooperativeMatrixPropertiesNV { + fn default() -> CooperativeMatrixPropertiesNV { + CooperativeMatrixPropertiesNV { + s_type: StructureType::COOPERATIVE_MATRIX_PROPERTIES_NV, + p_next: ::std::ptr::null_mut(), + m_size: u32::default(), + n_size: u32::default(), + k_size: u32::default(), + a_type: ComponentTypeNV::default(), + b_type: ComponentTypeNV::default(), + c_type: ComponentTypeNV::default(), + d_type: ComponentTypeNV::default(), + scope: ScopeNV::default(), + } + } +} +impl CooperativeMatrixPropertiesNV { + pub fn builder<'a>() -> CooperativeMatrixPropertiesNVBuilder<'a> { + CooperativeMatrixPropertiesNVBuilder { + inner: CooperativeMatrixPropertiesNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct CooperativeMatrixPropertiesNVBuilder<'a> { + inner: CooperativeMatrixPropertiesNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsCooperativeMatrixPropertiesNV {} +impl<'a> ::std::ops::Deref for CooperativeMatrixPropertiesNVBuilder<'a> { + type Target = CooperativeMatrixPropertiesNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for CooperativeMatrixPropertiesNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> CooperativeMatrixPropertiesNVBuilder<'a> { + pub fn m_size(mut self, m_size: u32) -> CooperativeMatrixPropertiesNVBuilder<'a> { + self.inner.m_size = m_size; + self + } + pub fn n_size(mut self, n_size: u32) -> CooperativeMatrixPropertiesNVBuilder<'a> { + self.inner.n_size = n_size; + self + } + pub fn k_size(mut self, k_size: u32) -> CooperativeMatrixPropertiesNVBuilder<'a> { + self.inner.k_size = k_size; + self + } + pub fn a_type(mut self, a_type: ComponentTypeNV) -> CooperativeMatrixPropertiesNVBuilder<'a> { + self.inner.a_type = a_type; + self + } + pub fn b_type(mut self, b_type: ComponentTypeNV) -> CooperativeMatrixPropertiesNVBuilder<'a> { + self.inner.b_type = b_type; + self + } + pub fn c_type(mut self, c_type: ComponentTypeNV) -> CooperativeMatrixPropertiesNVBuilder<'a> { + self.inner.c_type = c_type; + self + } + pub fn d_type(mut self, d_type: ComponentTypeNV) -> CooperativeMatrixPropertiesNVBuilder<'a> { + self.inner.d_type = d_type; + self + } + pub fn scope(mut self, scope: ScopeNV) -> CooperativeMatrixPropertiesNVBuilder<'a> { + self.inner.scope = scope; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> CooperativeMatrixPropertiesNVBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> CooperativeMatrixPropertiesNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceYcbcrImageArraysFeaturesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub ycbcr_image_arrays: Bool32, +} +impl ::std::default::Default for PhysicalDeviceYcbcrImageArraysFeaturesEXT { + fn default() -> PhysicalDeviceYcbcrImageArraysFeaturesEXT { + PhysicalDeviceYcbcrImageArraysFeaturesEXT { + s_type: StructureType::PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT, + p_next: ::std::ptr::null_mut(), + ycbcr_image_arrays: Bool32::default(), + } + } +} +impl PhysicalDeviceYcbcrImageArraysFeaturesEXT { + pub fn builder<'a>() -> PhysicalDeviceYcbcrImageArraysFeaturesEXTBuilder<'a> { + PhysicalDeviceYcbcrImageArraysFeaturesEXTBuilder { + inner: PhysicalDeviceYcbcrImageArraysFeaturesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceYcbcrImageArraysFeaturesEXTBuilder<'a> { + inner: PhysicalDeviceYcbcrImageArraysFeaturesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceYcbcrImageArraysFeaturesEXTBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceYcbcrImageArraysFeaturesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceYcbcrImageArraysFeaturesEXTBuilder<'a> { + type Target = PhysicalDeviceYcbcrImageArraysFeaturesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceYcbcrImageArraysFeaturesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceYcbcrImageArraysFeaturesEXTBuilder<'a> { + pub fn ycbcr_image_arrays( + mut self, + ycbcr_image_arrays: bool, + ) -> PhysicalDeviceYcbcrImageArraysFeaturesEXTBuilder<'a> { + self.inner.ycbcr_image_arrays = ycbcr_image_arrays.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceYcbcrImageArraysFeaturesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct ImageViewHandleInfoNVX { + pub s_type: StructureType, + pub p_next: *const c_void, + pub image_view: ImageView, + pub descriptor_type: DescriptorType, + pub sampler: Sampler, +} +impl ::std::default::Default for ImageViewHandleInfoNVX { + fn default() -> ImageViewHandleInfoNVX { + ImageViewHandleInfoNVX { + s_type: StructureType::IMAGE_VIEW_HANDLE_INFO_NVX, + p_next: ::std::ptr::null(), + image_view: ImageView::default(), + descriptor_type: DescriptorType::default(), + sampler: Sampler::default(), + } + } +} +impl ImageViewHandleInfoNVX { + pub fn builder<'a>() -> ImageViewHandleInfoNVXBuilder<'a> { + ImageViewHandleInfoNVXBuilder { + inner: ImageViewHandleInfoNVX::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct ImageViewHandleInfoNVXBuilder<'a> { + inner: ImageViewHandleInfoNVX, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsImageViewHandleInfoNVX {} +impl<'a> ::std::ops::Deref for ImageViewHandleInfoNVXBuilder<'a> { + type Target = ImageViewHandleInfoNVX; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for ImageViewHandleInfoNVXBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> ImageViewHandleInfoNVXBuilder<'a> { + pub fn image_view(mut self, image_view: ImageView) -> ImageViewHandleInfoNVXBuilder<'a> { + self.inner.image_view = image_view; + self + } + pub fn descriptor_type( + mut self, + descriptor_type: DescriptorType, + ) -> ImageViewHandleInfoNVXBuilder<'a> { + self.inner.descriptor_type = descriptor_type; + self + } + pub fn sampler(mut self, sampler: Sampler) -> ImageViewHandleInfoNVXBuilder<'a> { + self.inner.sampler = sampler; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> ImageViewHandleInfoNVXBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> ImageViewHandleInfoNVX { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PresentFrameTokenGGP { + pub s_type: StructureType, + pub p_next: *const c_void, + pub frame_token: GgpFrameToken, +} +impl ::std::default::Default for PresentFrameTokenGGP { + fn default() -> PresentFrameTokenGGP { + PresentFrameTokenGGP { + s_type: StructureType::PRESENT_FRAME_TOKEN_GGP, + p_next: ::std::ptr::null(), + frame_token: GgpFrameToken::default(), + } + } +} +impl PresentFrameTokenGGP { + pub fn builder<'a>() -> PresentFrameTokenGGPBuilder<'a> { + PresentFrameTokenGGPBuilder { + inner: PresentFrameTokenGGP::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PresentFrameTokenGGPBuilder<'a> { + inner: PresentFrameTokenGGP, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPresentInfoKHR for PresentFrameTokenGGPBuilder<'_> {} +unsafe impl ExtendsPresentInfoKHR for PresentFrameTokenGGP {} +impl<'a> ::std::ops::Deref for PresentFrameTokenGGPBuilder<'a> { + type Target = PresentFrameTokenGGP; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PresentFrameTokenGGPBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PresentFrameTokenGGPBuilder<'a> { + pub fn frame_token(mut self, frame_token: GgpFrameToken) -> PresentFrameTokenGGPBuilder<'a> { + self.inner.frame_token = frame_token; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PresentFrameTokenGGP { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct PipelineCreationFeedbackEXT { + pub flags: PipelineCreationFeedbackFlagsEXT, + pub duration: u64, +} +impl PipelineCreationFeedbackEXT { + pub fn builder<'a>() -> PipelineCreationFeedbackEXTBuilder<'a> { + PipelineCreationFeedbackEXTBuilder { + inner: PipelineCreationFeedbackEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineCreationFeedbackEXTBuilder<'a> { + inner: PipelineCreationFeedbackEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for PipelineCreationFeedbackEXTBuilder<'a> { + type Target = PipelineCreationFeedbackEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineCreationFeedbackEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineCreationFeedbackEXTBuilder<'a> { + pub fn flags( + mut self, + flags: PipelineCreationFeedbackFlagsEXT, + ) -> PipelineCreationFeedbackEXTBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn duration(mut self, duration: u64) -> PipelineCreationFeedbackEXTBuilder<'a> { + self.inner.duration = duration; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineCreationFeedbackEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineCreationFeedbackCreateInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub p_pipeline_creation_feedback: *mut PipelineCreationFeedbackEXT, + pub pipeline_stage_creation_feedback_count: u32, + pub p_pipeline_stage_creation_feedbacks: *mut PipelineCreationFeedbackEXT, +} +impl ::std::default::Default for PipelineCreationFeedbackCreateInfoEXT { + fn default() -> PipelineCreationFeedbackCreateInfoEXT { + PipelineCreationFeedbackCreateInfoEXT { + s_type: StructureType::PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT, + p_next: ::std::ptr::null(), + p_pipeline_creation_feedback: ::std::ptr::null_mut(), + pipeline_stage_creation_feedback_count: u32::default(), + p_pipeline_stage_creation_feedbacks: ::std::ptr::null_mut(), + } + } +} +impl PipelineCreationFeedbackCreateInfoEXT { + pub fn builder<'a>() -> PipelineCreationFeedbackCreateInfoEXTBuilder<'a> { + PipelineCreationFeedbackCreateInfoEXTBuilder { + inner: PipelineCreationFeedbackCreateInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineCreationFeedbackCreateInfoEXTBuilder<'a> { + inner: PipelineCreationFeedbackCreateInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsGraphicsPipelineCreateInfo for PipelineCreationFeedbackCreateInfoEXTBuilder<'_> {} +unsafe impl ExtendsGraphicsPipelineCreateInfo for PipelineCreationFeedbackCreateInfoEXT {} +unsafe impl ExtendsComputePipelineCreateInfo for PipelineCreationFeedbackCreateInfoEXTBuilder<'_> {} +unsafe impl ExtendsComputePipelineCreateInfo for PipelineCreationFeedbackCreateInfoEXT {} +unsafe impl ExtendsRayTracingPipelineCreateInfoNV + for PipelineCreationFeedbackCreateInfoEXTBuilder<'_> +{ +} +unsafe impl ExtendsRayTracingPipelineCreateInfoNV for PipelineCreationFeedbackCreateInfoEXT {} +unsafe impl ExtendsRayTracingPipelineCreateInfoKHR + for PipelineCreationFeedbackCreateInfoEXTBuilder<'_> +{ +} +unsafe impl ExtendsRayTracingPipelineCreateInfoKHR for PipelineCreationFeedbackCreateInfoEXT {} +impl<'a> ::std::ops::Deref for PipelineCreationFeedbackCreateInfoEXTBuilder<'a> { + type Target = PipelineCreationFeedbackCreateInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineCreationFeedbackCreateInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineCreationFeedbackCreateInfoEXTBuilder<'a> { + pub fn pipeline_creation_feedback( + mut self, + pipeline_creation_feedback: *mut PipelineCreationFeedbackEXT, + ) -> PipelineCreationFeedbackCreateInfoEXTBuilder<'a> { + self.inner.p_pipeline_creation_feedback = pipeline_creation_feedback; + self + } + pub fn pipeline_stage_creation_feedbacks( + mut self, + pipeline_stage_creation_feedbacks: &'a mut [PipelineCreationFeedbackEXT], + ) -> PipelineCreationFeedbackCreateInfoEXTBuilder<'a> { + self.inner.pipeline_stage_creation_feedback_count = + pipeline_stage_creation_feedbacks.len() as _; + self.inner.p_pipeline_stage_creation_feedbacks = + pipeline_stage_creation_feedbacks.as_mut_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineCreationFeedbackCreateInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SurfaceFullScreenExclusiveInfoEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub full_screen_exclusive: FullScreenExclusiveEXT, +} +impl ::std::default::Default for SurfaceFullScreenExclusiveInfoEXT { + fn default() -> SurfaceFullScreenExclusiveInfoEXT { + SurfaceFullScreenExclusiveInfoEXT { + s_type: StructureType::SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT, + p_next: ::std::ptr::null_mut(), + full_screen_exclusive: FullScreenExclusiveEXT::default(), + } + } +} +impl SurfaceFullScreenExclusiveInfoEXT { + pub fn builder<'a>() -> SurfaceFullScreenExclusiveInfoEXTBuilder<'a> { + SurfaceFullScreenExclusiveInfoEXTBuilder { + inner: SurfaceFullScreenExclusiveInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SurfaceFullScreenExclusiveInfoEXTBuilder<'a> { + inner: SurfaceFullScreenExclusiveInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceSurfaceInfo2KHR for SurfaceFullScreenExclusiveInfoEXTBuilder<'_> {} +unsafe impl ExtendsPhysicalDeviceSurfaceInfo2KHR for SurfaceFullScreenExclusiveInfoEXT {} +unsafe impl ExtendsSwapchainCreateInfoKHR for SurfaceFullScreenExclusiveInfoEXTBuilder<'_> {} +unsafe impl ExtendsSwapchainCreateInfoKHR for SurfaceFullScreenExclusiveInfoEXT {} +impl<'a> ::std::ops::Deref for SurfaceFullScreenExclusiveInfoEXTBuilder<'a> { + type Target = SurfaceFullScreenExclusiveInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SurfaceFullScreenExclusiveInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SurfaceFullScreenExclusiveInfoEXTBuilder<'a> { + pub fn full_screen_exclusive( + mut self, + full_screen_exclusive: FullScreenExclusiveEXT, + ) -> SurfaceFullScreenExclusiveInfoEXTBuilder<'a> { + self.inner.full_screen_exclusive = full_screen_exclusive; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SurfaceFullScreenExclusiveInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SurfaceFullScreenExclusiveWin32InfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub hmonitor: HMONITOR, +} +impl ::std::default::Default for SurfaceFullScreenExclusiveWin32InfoEXT { + fn default() -> SurfaceFullScreenExclusiveWin32InfoEXT { + SurfaceFullScreenExclusiveWin32InfoEXT { + s_type: StructureType::SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT, + p_next: ::std::ptr::null(), + hmonitor: unsafe { ::std::mem::zeroed() }, + } + } +} +impl SurfaceFullScreenExclusiveWin32InfoEXT { + pub fn builder<'a>() -> SurfaceFullScreenExclusiveWin32InfoEXTBuilder<'a> { + SurfaceFullScreenExclusiveWin32InfoEXTBuilder { + inner: SurfaceFullScreenExclusiveWin32InfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SurfaceFullScreenExclusiveWin32InfoEXTBuilder<'a> { + inner: SurfaceFullScreenExclusiveWin32InfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceSurfaceInfo2KHR + for SurfaceFullScreenExclusiveWin32InfoEXTBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceSurfaceInfo2KHR for SurfaceFullScreenExclusiveWin32InfoEXT {} +unsafe impl ExtendsSwapchainCreateInfoKHR for SurfaceFullScreenExclusiveWin32InfoEXTBuilder<'_> {} +unsafe impl ExtendsSwapchainCreateInfoKHR for SurfaceFullScreenExclusiveWin32InfoEXT {} +impl<'a> ::std::ops::Deref for SurfaceFullScreenExclusiveWin32InfoEXTBuilder<'a> { + type Target = SurfaceFullScreenExclusiveWin32InfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SurfaceFullScreenExclusiveWin32InfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SurfaceFullScreenExclusiveWin32InfoEXTBuilder<'a> { + pub fn hmonitor( + mut self, + hmonitor: HMONITOR, + ) -> SurfaceFullScreenExclusiveWin32InfoEXTBuilder<'a> { + self.inner.hmonitor = hmonitor; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SurfaceFullScreenExclusiveWin32InfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct SurfaceCapabilitiesFullScreenExclusiveEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub full_screen_exclusive_supported: Bool32, +} +impl ::std::default::Default for SurfaceCapabilitiesFullScreenExclusiveEXT { + fn default() -> SurfaceCapabilitiesFullScreenExclusiveEXT { + SurfaceCapabilitiesFullScreenExclusiveEXT { + s_type: StructureType::SURFACE_CAPABILITIES_FULL_SCREEN_EXCLUSIVE_EXT, + p_next: ::std::ptr::null_mut(), + full_screen_exclusive_supported: Bool32::default(), + } + } +} +impl SurfaceCapabilitiesFullScreenExclusiveEXT { + pub fn builder<'a>() -> SurfaceCapabilitiesFullScreenExclusiveEXTBuilder<'a> { + SurfaceCapabilitiesFullScreenExclusiveEXTBuilder { + inner: SurfaceCapabilitiesFullScreenExclusiveEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct SurfaceCapabilitiesFullScreenExclusiveEXTBuilder<'a> { + inner: SurfaceCapabilitiesFullScreenExclusiveEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsSurfaceCapabilities2KHR + for SurfaceCapabilitiesFullScreenExclusiveEXTBuilder<'_> +{ +} +unsafe impl ExtendsSurfaceCapabilities2KHR for SurfaceCapabilitiesFullScreenExclusiveEXT {} +impl<'a> ::std::ops::Deref for SurfaceCapabilitiesFullScreenExclusiveEXTBuilder<'a> { + type Target = SurfaceCapabilitiesFullScreenExclusiveEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for SurfaceCapabilitiesFullScreenExclusiveEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> SurfaceCapabilitiesFullScreenExclusiveEXTBuilder<'a> { + pub fn full_screen_exclusive_supported( + mut self, + full_screen_exclusive_supported: bool, + ) -> SurfaceCapabilitiesFullScreenExclusiveEXTBuilder<'a> { + self.inner.full_screen_exclusive_supported = full_screen_exclusive_supported.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> SurfaceCapabilitiesFullScreenExclusiveEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDevicePerformanceQueryFeaturesKHR { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub performance_counter_query_pools: Bool32, + pub performance_counter_multiple_query_pools: Bool32, +} +impl ::std::default::Default for PhysicalDevicePerformanceQueryFeaturesKHR { + fn default() -> PhysicalDevicePerformanceQueryFeaturesKHR { + PhysicalDevicePerformanceQueryFeaturesKHR { + s_type: StructureType::PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR, + p_next: ::std::ptr::null_mut(), + performance_counter_query_pools: Bool32::default(), + performance_counter_multiple_query_pools: Bool32::default(), + } + } +} +impl PhysicalDevicePerformanceQueryFeaturesKHR { + pub fn builder<'a>() -> PhysicalDevicePerformanceQueryFeaturesKHRBuilder<'a> { + PhysicalDevicePerformanceQueryFeaturesKHRBuilder { + inner: PhysicalDevicePerformanceQueryFeaturesKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDevicePerformanceQueryFeaturesKHRBuilder<'a> { + inner: PhysicalDevicePerformanceQueryFeaturesKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDevicePerformanceQueryFeaturesKHRBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDevicePerformanceQueryFeaturesKHR {} +impl<'a> ::std::ops::Deref for PhysicalDevicePerformanceQueryFeaturesKHRBuilder<'a> { + type Target = PhysicalDevicePerformanceQueryFeaturesKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDevicePerformanceQueryFeaturesKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDevicePerformanceQueryFeaturesKHRBuilder<'a> { + pub fn performance_counter_query_pools( + mut self, + performance_counter_query_pools: bool, + ) -> PhysicalDevicePerformanceQueryFeaturesKHRBuilder<'a> { + self.inner.performance_counter_query_pools = performance_counter_query_pools.into(); + self + } + pub fn performance_counter_multiple_query_pools( + mut self, + performance_counter_multiple_query_pools: bool, + ) -> PhysicalDevicePerformanceQueryFeaturesKHRBuilder<'a> { + self.inner.performance_counter_multiple_query_pools = + performance_counter_multiple_query_pools.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDevicePerformanceQueryFeaturesKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDevicePerformanceQueryPropertiesKHR { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub allow_command_buffer_query_copies: Bool32, +} +impl ::std::default::Default for PhysicalDevicePerformanceQueryPropertiesKHR { + fn default() -> PhysicalDevicePerformanceQueryPropertiesKHR { + PhysicalDevicePerformanceQueryPropertiesKHR { + s_type: StructureType::PHYSICAL_DEVICE_PERFORMANCE_QUERY_PROPERTIES_KHR, + p_next: ::std::ptr::null_mut(), + allow_command_buffer_query_copies: Bool32::default(), + } + } +} +impl PhysicalDevicePerformanceQueryPropertiesKHR { + pub fn builder<'a>() -> PhysicalDevicePerformanceQueryPropertiesKHRBuilder<'a> { + PhysicalDevicePerformanceQueryPropertiesKHRBuilder { + inner: PhysicalDevicePerformanceQueryPropertiesKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDevicePerformanceQueryPropertiesKHRBuilder<'a> { + inner: PhysicalDevicePerformanceQueryPropertiesKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDevicePerformanceQueryPropertiesKHRBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDevicePerformanceQueryPropertiesKHR {} +impl<'a> ::std::ops::Deref for PhysicalDevicePerformanceQueryPropertiesKHRBuilder<'a> { + type Target = PhysicalDevicePerformanceQueryPropertiesKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDevicePerformanceQueryPropertiesKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDevicePerformanceQueryPropertiesKHRBuilder<'a> { + pub fn allow_command_buffer_query_copies( + mut self, + allow_command_buffer_query_copies: bool, + ) -> PhysicalDevicePerformanceQueryPropertiesKHRBuilder<'a> { + self.inner.allow_command_buffer_query_copies = allow_command_buffer_query_copies.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDevicePerformanceQueryPropertiesKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PerformanceCounterKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub unit: PerformanceCounterUnitKHR, + pub scope: PerformanceCounterScopeKHR, + pub storage: PerformanceCounterStorageKHR, + pub uuid: [u8; UUID_SIZE], +} +impl ::std::default::Default for PerformanceCounterKHR { + fn default() -> PerformanceCounterKHR { + PerformanceCounterKHR { + s_type: StructureType::PERFORMANCE_COUNTER_KHR, + p_next: ::std::ptr::null(), + unit: PerformanceCounterUnitKHR::default(), + scope: PerformanceCounterScopeKHR::default(), + storage: PerformanceCounterStorageKHR::default(), + uuid: unsafe { ::std::mem::zeroed() }, + } + } +} +impl PerformanceCounterKHR { + pub fn builder<'a>() -> PerformanceCounterKHRBuilder<'a> { + PerformanceCounterKHRBuilder { + inner: PerformanceCounterKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PerformanceCounterKHRBuilder<'a> { + inner: PerformanceCounterKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPerformanceCounterKHR {} +impl<'a> ::std::ops::Deref for PerformanceCounterKHRBuilder<'a> { + type Target = PerformanceCounterKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PerformanceCounterKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PerformanceCounterKHRBuilder<'a> { + pub fn unit(mut self, unit: PerformanceCounterUnitKHR) -> PerformanceCounterKHRBuilder<'a> { + self.inner.unit = unit; + self + } + pub fn scope(mut self, scope: PerformanceCounterScopeKHR) -> PerformanceCounterKHRBuilder<'a> { + self.inner.scope = scope; + self + } + pub fn storage( + mut self, + storage: PerformanceCounterStorageKHR, + ) -> PerformanceCounterKHRBuilder<'a> { + self.inner.storage = storage; + self + } + pub fn uuid(mut self, uuid: [u8; UUID_SIZE]) -> PerformanceCounterKHRBuilder<'a> { + self.inner.uuid = uuid; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PerformanceCounterKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PerformanceCounterKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct PerformanceCounterDescriptionKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: PerformanceCounterDescriptionFlagsKHR, + pub name: [c_char; MAX_DESCRIPTION_SIZE], + pub category: [c_char; MAX_DESCRIPTION_SIZE], + pub description: [c_char; MAX_DESCRIPTION_SIZE], +} +impl fmt::Debug for PerformanceCounterDescriptionKHR { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("PerformanceCounterDescriptionKHR") + .field("s_type", &self.s_type) + .field("p_next", &self.p_next) + .field("flags", &self.flags) + .field("name", &unsafe { + ::std::ffi::CStr::from_ptr(self.name.as_ptr() as *const c_char) + }) + .field("category", &unsafe { + ::std::ffi::CStr::from_ptr(self.category.as_ptr() as *const c_char) + }) + .field("description", &unsafe { + ::std::ffi::CStr::from_ptr(self.description.as_ptr() as *const c_char) + }) + .finish() + } +} +impl ::std::default::Default for PerformanceCounterDescriptionKHR { + fn default() -> PerformanceCounterDescriptionKHR { + PerformanceCounterDescriptionKHR { + s_type: StructureType::PERFORMANCE_COUNTER_DESCRIPTION_KHR, + p_next: ::std::ptr::null(), + flags: PerformanceCounterDescriptionFlagsKHR::default(), + name: unsafe { ::std::mem::zeroed() }, + category: unsafe { ::std::mem::zeroed() }, + description: unsafe { ::std::mem::zeroed() }, + } + } +} +impl PerformanceCounterDescriptionKHR { + pub fn builder<'a>() -> PerformanceCounterDescriptionKHRBuilder<'a> { + PerformanceCounterDescriptionKHRBuilder { + inner: PerformanceCounterDescriptionKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PerformanceCounterDescriptionKHRBuilder<'a> { + inner: PerformanceCounterDescriptionKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPerformanceCounterDescriptionKHR {} +impl<'a> ::std::ops::Deref for PerformanceCounterDescriptionKHRBuilder<'a> { + type Target = PerformanceCounterDescriptionKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PerformanceCounterDescriptionKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PerformanceCounterDescriptionKHRBuilder<'a> { + pub fn flags( + mut self, + flags: PerformanceCounterDescriptionFlagsKHR, + ) -> PerformanceCounterDescriptionKHRBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn name( + mut self, + name: [c_char; MAX_DESCRIPTION_SIZE], + ) -> PerformanceCounterDescriptionKHRBuilder<'a> { + self.inner.name = name; + self + } + pub fn category( + mut self, + category: [c_char; MAX_DESCRIPTION_SIZE], + ) -> PerformanceCounterDescriptionKHRBuilder<'a> { + self.inner.category = category; + self + } + pub fn description( + mut self, + description: [c_char; MAX_DESCRIPTION_SIZE], + ) -> PerformanceCounterDescriptionKHRBuilder<'a> { + self.inner.description = description; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PerformanceCounterDescriptionKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PerformanceCounterDescriptionKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct QueryPoolPerformanceCreateInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub queue_family_index: u32, + pub counter_index_count: u32, + pub p_counter_indices: *const u32, +} +impl ::std::default::Default for QueryPoolPerformanceCreateInfoKHR { + fn default() -> QueryPoolPerformanceCreateInfoKHR { + QueryPoolPerformanceCreateInfoKHR { + s_type: StructureType::QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR, + p_next: ::std::ptr::null(), + queue_family_index: u32::default(), + counter_index_count: u32::default(), + p_counter_indices: ::std::ptr::null(), + } + } +} +impl QueryPoolPerformanceCreateInfoKHR { + pub fn builder<'a>() -> QueryPoolPerformanceCreateInfoKHRBuilder<'a> { + QueryPoolPerformanceCreateInfoKHRBuilder { + inner: QueryPoolPerformanceCreateInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct QueryPoolPerformanceCreateInfoKHRBuilder<'a> { + inner: QueryPoolPerformanceCreateInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsQueryPoolCreateInfo for QueryPoolPerformanceCreateInfoKHRBuilder<'_> {} +unsafe impl ExtendsQueryPoolCreateInfo for QueryPoolPerformanceCreateInfoKHR {} +impl<'a> ::std::ops::Deref for QueryPoolPerformanceCreateInfoKHRBuilder<'a> { + type Target = QueryPoolPerformanceCreateInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for QueryPoolPerformanceCreateInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> QueryPoolPerformanceCreateInfoKHRBuilder<'a> { + pub fn queue_family_index( + mut self, + queue_family_index: u32, + ) -> QueryPoolPerformanceCreateInfoKHRBuilder<'a> { + self.inner.queue_family_index = queue_family_index; + self + } + pub fn counter_indices( + mut self, + counter_indices: &'a [u32], + ) -> QueryPoolPerformanceCreateInfoKHRBuilder<'a> { + self.inner.counter_index_count = counter_indices.len() as _; + self.inner.p_counter_indices = counter_indices.as_ptr(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> QueryPoolPerformanceCreateInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone)] +#[doc = ""] +pub union PerformanceCounterResultKHR { + pub int32: i32, + pub int64: i64, + pub uint32: u32, + pub uint64: u64, + pub float32: f32, + pub float64: f64, +} +impl ::std::default::Default for PerformanceCounterResultKHR { + fn default() -> PerformanceCounterResultKHR { + unsafe { ::std::mem::zeroed() } + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct AcquireProfilingLockInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: AcquireProfilingLockFlagsKHR, + pub timeout: u64, +} +impl ::std::default::Default for AcquireProfilingLockInfoKHR { + fn default() -> AcquireProfilingLockInfoKHR { + AcquireProfilingLockInfoKHR { + s_type: StructureType::ACQUIRE_PROFILING_LOCK_INFO_KHR, + p_next: ::std::ptr::null(), + flags: AcquireProfilingLockFlagsKHR::default(), + timeout: u64::default(), + } + } +} +impl AcquireProfilingLockInfoKHR { + pub fn builder<'a>() -> AcquireProfilingLockInfoKHRBuilder<'a> { + AcquireProfilingLockInfoKHRBuilder { + inner: AcquireProfilingLockInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct AcquireProfilingLockInfoKHRBuilder<'a> { + inner: AcquireProfilingLockInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsAcquireProfilingLockInfoKHR {} +impl<'a> ::std::ops::Deref for AcquireProfilingLockInfoKHRBuilder<'a> { + type Target = AcquireProfilingLockInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for AcquireProfilingLockInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> AcquireProfilingLockInfoKHRBuilder<'a> { + pub fn flags( + mut self, + flags: AcquireProfilingLockFlagsKHR, + ) -> AcquireProfilingLockInfoKHRBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn timeout(mut self, timeout: u64) -> AcquireProfilingLockInfoKHRBuilder<'a> { + self.inner.timeout = timeout; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> AcquireProfilingLockInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> AcquireProfilingLockInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PerformanceQuerySubmitInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub counter_pass_index: u32, +} +impl ::std::default::Default for PerformanceQuerySubmitInfoKHR { + fn default() -> PerformanceQuerySubmitInfoKHR { + PerformanceQuerySubmitInfoKHR { + s_type: StructureType::PERFORMANCE_QUERY_SUBMIT_INFO_KHR, + p_next: ::std::ptr::null(), + counter_pass_index: u32::default(), + } + } +} +impl PerformanceQuerySubmitInfoKHR { + pub fn builder<'a>() -> PerformanceQuerySubmitInfoKHRBuilder<'a> { + PerformanceQuerySubmitInfoKHRBuilder { + inner: PerformanceQuerySubmitInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PerformanceQuerySubmitInfoKHRBuilder<'a> { + inner: PerformanceQuerySubmitInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsSubmitInfo for PerformanceQuerySubmitInfoKHRBuilder<'_> {} +unsafe impl ExtendsSubmitInfo for PerformanceQuerySubmitInfoKHR {} +impl<'a> ::std::ops::Deref for PerformanceQuerySubmitInfoKHRBuilder<'a> { + type Target = PerformanceQuerySubmitInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PerformanceQuerySubmitInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PerformanceQuerySubmitInfoKHRBuilder<'a> { + pub fn counter_pass_index( + mut self, + counter_pass_index: u32, + ) -> PerformanceQuerySubmitInfoKHRBuilder<'a> { + self.inner.counter_pass_index = counter_pass_index; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PerformanceQuerySubmitInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct HeadlessSurfaceCreateInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: HeadlessSurfaceCreateFlagsEXT, +} +impl ::std::default::Default for HeadlessSurfaceCreateInfoEXT { + fn default() -> HeadlessSurfaceCreateInfoEXT { + HeadlessSurfaceCreateInfoEXT { + s_type: StructureType::HEADLESS_SURFACE_CREATE_INFO_EXT, + p_next: ::std::ptr::null(), + flags: HeadlessSurfaceCreateFlagsEXT::default(), + } + } +} +impl HeadlessSurfaceCreateInfoEXT { + pub fn builder<'a>() -> HeadlessSurfaceCreateInfoEXTBuilder<'a> { + HeadlessSurfaceCreateInfoEXTBuilder { + inner: HeadlessSurfaceCreateInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct HeadlessSurfaceCreateInfoEXTBuilder<'a> { + inner: HeadlessSurfaceCreateInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsHeadlessSurfaceCreateInfoEXT {} +impl<'a> ::std::ops::Deref for HeadlessSurfaceCreateInfoEXTBuilder<'a> { + type Target = HeadlessSurfaceCreateInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for HeadlessSurfaceCreateInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> HeadlessSurfaceCreateInfoEXTBuilder<'a> { + pub fn flags( + mut self, + flags: HeadlessSurfaceCreateFlagsEXT, + ) -> HeadlessSurfaceCreateInfoEXTBuilder<'a> { + self.inner.flags = flags; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> HeadlessSurfaceCreateInfoEXTBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> HeadlessSurfaceCreateInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceCoverageReductionModeFeaturesNV { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub coverage_reduction_mode: Bool32, +} +impl ::std::default::Default for PhysicalDeviceCoverageReductionModeFeaturesNV { + fn default() -> PhysicalDeviceCoverageReductionModeFeaturesNV { + PhysicalDeviceCoverageReductionModeFeaturesNV { + s_type: StructureType::PHYSICAL_DEVICE_COVERAGE_REDUCTION_MODE_FEATURES_NV, + p_next: ::std::ptr::null_mut(), + coverage_reduction_mode: Bool32::default(), + } + } +} +impl PhysicalDeviceCoverageReductionModeFeaturesNV { + pub fn builder<'a>() -> PhysicalDeviceCoverageReductionModeFeaturesNVBuilder<'a> { + PhysicalDeviceCoverageReductionModeFeaturesNVBuilder { + inner: PhysicalDeviceCoverageReductionModeFeaturesNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceCoverageReductionModeFeaturesNVBuilder<'a> { + inner: PhysicalDeviceCoverageReductionModeFeaturesNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceCoverageReductionModeFeaturesNVBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceCoverageReductionModeFeaturesNV {} +impl<'a> ::std::ops::Deref for PhysicalDeviceCoverageReductionModeFeaturesNVBuilder<'a> { + type Target = PhysicalDeviceCoverageReductionModeFeaturesNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceCoverageReductionModeFeaturesNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceCoverageReductionModeFeaturesNVBuilder<'a> { + pub fn coverage_reduction_mode( + mut self, + coverage_reduction_mode: bool, + ) -> PhysicalDeviceCoverageReductionModeFeaturesNVBuilder<'a> { + self.inner.coverage_reduction_mode = coverage_reduction_mode.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceCoverageReductionModeFeaturesNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineCoverageReductionStateCreateInfoNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: PipelineCoverageReductionStateCreateFlagsNV, + pub coverage_reduction_mode: CoverageReductionModeNV, +} +impl ::std::default::Default for PipelineCoverageReductionStateCreateInfoNV { + fn default() -> PipelineCoverageReductionStateCreateInfoNV { + PipelineCoverageReductionStateCreateInfoNV { + s_type: StructureType::PIPELINE_COVERAGE_REDUCTION_STATE_CREATE_INFO_NV, + p_next: ::std::ptr::null(), + flags: PipelineCoverageReductionStateCreateFlagsNV::default(), + coverage_reduction_mode: CoverageReductionModeNV::default(), + } + } +} +impl PipelineCoverageReductionStateCreateInfoNV { + pub fn builder<'a>() -> PipelineCoverageReductionStateCreateInfoNVBuilder<'a> { + PipelineCoverageReductionStateCreateInfoNVBuilder { + inner: PipelineCoverageReductionStateCreateInfoNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineCoverageReductionStateCreateInfoNVBuilder<'a> { + inner: PipelineCoverageReductionStateCreateInfoNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPipelineMultisampleStateCreateInfo + for PipelineCoverageReductionStateCreateInfoNVBuilder<'_> +{ +} +unsafe impl ExtendsPipelineMultisampleStateCreateInfo + for PipelineCoverageReductionStateCreateInfoNV +{ +} +impl<'a> ::std::ops::Deref for PipelineCoverageReductionStateCreateInfoNVBuilder<'a> { + type Target = PipelineCoverageReductionStateCreateInfoNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineCoverageReductionStateCreateInfoNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineCoverageReductionStateCreateInfoNVBuilder<'a> { + pub fn flags( + mut self, + flags: PipelineCoverageReductionStateCreateFlagsNV, + ) -> PipelineCoverageReductionStateCreateInfoNVBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn coverage_reduction_mode( + mut self, + coverage_reduction_mode: CoverageReductionModeNV, + ) -> PipelineCoverageReductionStateCreateInfoNVBuilder<'a> { + self.inner.coverage_reduction_mode = coverage_reduction_mode; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineCoverageReductionStateCreateInfoNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct FramebufferMixedSamplesCombinationNV { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub coverage_reduction_mode: CoverageReductionModeNV, + pub rasterization_samples: SampleCountFlags, + pub depth_stencil_samples: SampleCountFlags, + pub color_samples: SampleCountFlags, +} +impl ::std::default::Default for FramebufferMixedSamplesCombinationNV { + fn default() -> FramebufferMixedSamplesCombinationNV { + FramebufferMixedSamplesCombinationNV { + s_type: StructureType::FRAMEBUFFER_MIXED_SAMPLES_COMBINATION_NV, + p_next: ::std::ptr::null_mut(), + coverage_reduction_mode: CoverageReductionModeNV::default(), + rasterization_samples: SampleCountFlags::default(), + depth_stencil_samples: SampleCountFlags::default(), + color_samples: SampleCountFlags::default(), + } + } +} +impl FramebufferMixedSamplesCombinationNV { + pub fn builder<'a>() -> FramebufferMixedSamplesCombinationNVBuilder<'a> { + FramebufferMixedSamplesCombinationNVBuilder { + inner: FramebufferMixedSamplesCombinationNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct FramebufferMixedSamplesCombinationNVBuilder<'a> { + inner: FramebufferMixedSamplesCombinationNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsFramebufferMixedSamplesCombinationNV {} +impl<'a> ::std::ops::Deref for FramebufferMixedSamplesCombinationNVBuilder<'a> { + type Target = FramebufferMixedSamplesCombinationNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for FramebufferMixedSamplesCombinationNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> FramebufferMixedSamplesCombinationNVBuilder<'a> { + pub fn coverage_reduction_mode( + mut self, + coverage_reduction_mode: CoverageReductionModeNV, + ) -> FramebufferMixedSamplesCombinationNVBuilder<'a> { + self.inner.coverage_reduction_mode = coverage_reduction_mode; + self + } + pub fn rasterization_samples( + mut self, + rasterization_samples: SampleCountFlags, + ) -> FramebufferMixedSamplesCombinationNVBuilder<'a> { + self.inner.rasterization_samples = rasterization_samples; + self + } + pub fn depth_stencil_samples( + mut self, + depth_stencil_samples: SampleCountFlags, + ) -> FramebufferMixedSamplesCombinationNVBuilder<'a> { + self.inner.depth_stencil_samples = depth_stencil_samples; + self + } + pub fn color_samples( + mut self, + color_samples: SampleCountFlags, + ) -> FramebufferMixedSamplesCombinationNVBuilder<'a> { + self.inner.color_samples = color_samples; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> FramebufferMixedSamplesCombinationNVBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> FramebufferMixedSamplesCombinationNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceShaderIntegerFunctions2FeaturesINTEL { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub shader_integer_functions2: Bool32, +} +impl ::std::default::Default for PhysicalDeviceShaderIntegerFunctions2FeaturesINTEL { + fn default() -> PhysicalDeviceShaderIntegerFunctions2FeaturesINTEL { + PhysicalDeviceShaderIntegerFunctions2FeaturesINTEL { + s_type: StructureType::PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL, + p_next: ::std::ptr::null_mut(), + shader_integer_functions2: Bool32::default(), + } + } +} +impl PhysicalDeviceShaderIntegerFunctions2FeaturesINTEL { + pub fn builder<'a>() -> PhysicalDeviceShaderIntegerFunctions2FeaturesINTELBuilder<'a> { + PhysicalDeviceShaderIntegerFunctions2FeaturesINTELBuilder { + inner: PhysicalDeviceShaderIntegerFunctions2FeaturesINTEL::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceShaderIntegerFunctions2FeaturesINTELBuilder<'a> { + inner: PhysicalDeviceShaderIntegerFunctions2FeaturesINTEL, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo + for PhysicalDeviceShaderIntegerFunctions2FeaturesINTELBuilder<'_> +{ +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceShaderIntegerFunctions2FeaturesINTEL {} +impl<'a> ::std::ops::Deref for PhysicalDeviceShaderIntegerFunctions2FeaturesINTELBuilder<'a> { + type Target = PhysicalDeviceShaderIntegerFunctions2FeaturesINTEL; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceShaderIntegerFunctions2FeaturesINTELBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceShaderIntegerFunctions2FeaturesINTELBuilder<'a> { + pub fn shader_integer_functions2( + mut self, + shader_integer_functions2: bool, + ) -> PhysicalDeviceShaderIntegerFunctions2FeaturesINTELBuilder<'a> { + self.inner.shader_integer_functions2 = shader_integer_functions2.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceShaderIntegerFunctions2FeaturesINTEL { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone)] +#[doc = ""] +pub union PerformanceValueDataINTEL { + pub value32: u32, + pub value64: u64, + pub value_float: f32, + pub value_bool: Bool32, + pub value_string: *const c_char, +} +impl ::std::default::Default for PerformanceValueDataINTEL { + fn default() -> PerformanceValueDataINTEL { + unsafe { ::std::mem::zeroed() } + } +} +#[repr(C)] +#[derive(Copy, Clone, Default)] +#[doc = ""] +pub struct PerformanceValueINTEL { + pub ty: PerformanceValueTypeINTEL, + pub data: PerformanceValueDataINTEL, +} +impl fmt::Debug for PerformanceValueINTEL { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("PerformanceValueINTEL") + .field("ty", &self.ty) + .field("data", &"union") + .finish() + } +} +impl PerformanceValueINTEL { + pub fn builder<'a>() -> PerformanceValueINTELBuilder<'a> { + PerformanceValueINTELBuilder { + inner: PerformanceValueINTEL::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PerformanceValueINTELBuilder<'a> { + inner: PerformanceValueINTEL, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for PerformanceValueINTELBuilder<'a> { + type Target = PerformanceValueINTEL; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PerformanceValueINTELBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PerformanceValueINTELBuilder<'a> { + pub fn ty(mut self, ty: PerformanceValueTypeINTEL) -> PerformanceValueINTELBuilder<'a> { + self.inner.ty = ty; + self + } + pub fn data(mut self, data: PerformanceValueDataINTEL) -> PerformanceValueINTELBuilder<'a> { + self.inner.data = data; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PerformanceValueINTEL { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct InitializePerformanceApiInfoINTEL { + pub s_type: StructureType, + pub p_next: *const c_void, + pub p_user_data: *mut c_void, +} +impl ::std::default::Default for InitializePerformanceApiInfoINTEL { + fn default() -> InitializePerformanceApiInfoINTEL { + InitializePerformanceApiInfoINTEL { + s_type: StructureType::INITIALIZE_PERFORMANCE_API_INFO_INTEL, + p_next: ::std::ptr::null(), + p_user_data: ::std::ptr::null_mut(), + } + } +} +impl InitializePerformanceApiInfoINTEL { + pub fn builder<'a>() -> InitializePerformanceApiInfoINTELBuilder<'a> { + InitializePerformanceApiInfoINTELBuilder { + inner: InitializePerformanceApiInfoINTEL::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct InitializePerformanceApiInfoINTELBuilder<'a> { + inner: InitializePerformanceApiInfoINTEL, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsInitializePerformanceApiInfoINTEL {} +impl<'a> ::std::ops::Deref for InitializePerformanceApiInfoINTELBuilder<'a> { + type Target = InitializePerformanceApiInfoINTEL; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for InitializePerformanceApiInfoINTELBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> InitializePerformanceApiInfoINTELBuilder<'a> { + pub fn user_data( + mut self, + user_data: *mut c_void, + ) -> InitializePerformanceApiInfoINTELBuilder<'a> { + self.inner.p_user_data = user_data; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> InitializePerformanceApiInfoINTELBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> InitializePerformanceApiInfoINTEL { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct QueryPoolPerformanceQueryCreateInfoINTEL { + pub s_type: StructureType, + pub p_next: *const c_void, + pub performance_counters_sampling: QueryPoolSamplingModeINTEL, +} +impl ::std::default::Default for QueryPoolPerformanceQueryCreateInfoINTEL { + fn default() -> QueryPoolPerformanceQueryCreateInfoINTEL { + QueryPoolPerformanceQueryCreateInfoINTEL { + s_type: StructureType::QUERY_POOL_CREATE_INFO_INTEL, + p_next: ::std::ptr::null(), + performance_counters_sampling: QueryPoolSamplingModeINTEL::default(), + } + } +} +impl QueryPoolPerformanceQueryCreateInfoINTEL { + pub fn builder<'a>() -> QueryPoolPerformanceQueryCreateInfoINTELBuilder<'a> { + QueryPoolPerformanceQueryCreateInfoINTELBuilder { + inner: QueryPoolPerformanceQueryCreateInfoINTEL::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct QueryPoolPerformanceQueryCreateInfoINTELBuilder<'a> { + inner: QueryPoolPerformanceQueryCreateInfoINTEL, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsQueryPoolCreateInfo for QueryPoolPerformanceQueryCreateInfoINTELBuilder<'_> {} +unsafe impl ExtendsQueryPoolCreateInfo for QueryPoolPerformanceQueryCreateInfoINTEL {} +impl<'a> ::std::ops::Deref for QueryPoolPerformanceQueryCreateInfoINTELBuilder<'a> { + type Target = QueryPoolPerformanceQueryCreateInfoINTEL; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for QueryPoolPerformanceQueryCreateInfoINTELBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> QueryPoolPerformanceQueryCreateInfoINTELBuilder<'a> { + pub fn performance_counters_sampling( + mut self, + performance_counters_sampling: QueryPoolSamplingModeINTEL, + ) -> QueryPoolPerformanceQueryCreateInfoINTELBuilder<'a> { + self.inner.performance_counters_sampling = performance_counters_sampling; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> QueryPoolPerformanceQueryCreateInfoINTEL { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PerformanceMarkerInfoINTEL { + pub s_type: StructureType, + pub p_next: *const c_void, + pub marker: u64, +} +impl ::std::default::Default for PerformanceMarkerInfoINTEL { + fn default() -> PerformanceMarkerInfoINTEL { + PerformanceMarkerInfoINTEL { + s_type: StructureType::PERFORMANCE_MARKER_INFO_INTEL, + p_next: ::std::ptr::null(), + marker: u64::default(), + } + } +} +impl PerformanceMarkerInfoINTEL { + pub fn builder<'a>() -> PerformanceMarkerInfoINTELBuilder<'a> { + PerformanceMarkerInfoINTELBuilder { + inner: PerformanceMarkerInfoINTEL::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PerformanceMarkerInfoINTELBuilder<'a> { + inner: PerformanceMarkerInfoINTEL, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPerformanceMarkerInfoINTEL {} +impl<'a> ::std::ops::Deref for PerformanceMarkerInfoINTELBuilder<'a> { + type Target = PerformanceMarkerInfoINTEL; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PerformanceMarkerInfoINTELBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PerformanceMarkerInfoINTELBuilder<'a> { + pub fn marker(mut self, marker: u64) -> PerformanceMarkerInfoINTELBuilder<'a> { + self.inner.marker = marker; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PerformanceMarkerInfoINTELBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PerformanceMarkerInfoINTEL { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PerformanceStreamMarkerInfoINTEL { + pub s_type: StructureType, + pub p_next: *const c_void, + pub marker: u32, +} +impl ::std::default::Default for PerformanceStreamMarkerInfoINTEL { + fn default() -> PerformanceStreamMarkerInfoINTEL { + PerformanceStreamMarkerInfoINTEL { + s_type: StructureType::PERFORMANCE_STREAM_MARKER_INFO_INTEL, + p_next: ::std::ptr::null(), + marker: u32::default(), + } + } +} +impl PerformanceStreamMarkerInfoINTEL { + pub fn builder<'a>() -> PerformanceStreamMarkerInfoINTELBuilder<'a> { + PerformanceStreamMarkerInfoINTELBuilder { + inner: PerformanceStreamMarkerInfoINTEL::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PerformanceStreamMarkerInfoINTELBuilder<'a> { + inner: PerformanceStreamMarkerInfoINTEL, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPerformanceStreamMarkerInfoINTEL {} +impl<'a> ::std::ops::Deref for PerformanceStreamMarkerInfoINTELBuilder<'a> { + type Target = PerformanceStreamMarkerInfoINTEL; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PerformanceStreamMarkerInfoINTELBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PerformanceStreamMarkerInfoINTELBuilder<'a> { + pub fn marker(mut self, marker: u32) -> PerformanceStreamMarkerInfoINTELBuilder<'a> { + self.inner.marker = marker; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PerformanceStreamMarkerInfoINTELBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PerformanceStreamMarkerInfoINTEL { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PerformanceOverrideInfoINTEL { + pub s_type: StructureType, + pub p_next: *const c_void, + pub ty: PerformanceOverrideTypeINTEL, + pub enable: Bool32, + pub parameter: u64, +} +impl ::std::default::Default for PerformanceOverrideInfoINTEL { + fn default() -> PerformanceOverrideInfoINTEL { + PerformanceOverrideInfoINTEL { + s_type: StructureType::PERFORMANCE_OVERRIDE_INFO_INTEL, + p_next: ::std::ptr::null(), + ty: PerformanceOverrideTypeINTEL::default(), + enable: Bool32::default(), + parameter: u64::default(), + } + } +} +impl PerformanceOverrideInfoINTEL { + pub fn builder<'a>() -> PerformanceOverrideInfoINTELBuilder<'a> { + PerformanceOverrideInfoINTELBuilder { + inner: PerformanceOverrideInfoINTEL::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PerformanceOverrideInfoINTELBuilder<'a> { + inner: PerformanceOverrideInfoINTEL, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPerformanceOverrideInfoINTEL {} +impl<'a> ::std::ops::Deref for PerformanceOverrideInfoINTELBuilder<'a> { + type Target = PerformanceOverrideInfoINTEL; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PerformanceOverrideInfoINTELBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PerformanceOverrideInfoINTELBuilder<'a> { + pub fn ty( + mut self, + ty: PerformanceOverrideTypeINTEL, + ) -> PerformanceOverrideInfoINTELBuilder<'a> { + self.inner.ty = ty; + self + } + pub fn enable(mut self, enable: bool) -> PerformanceOverrideInfoINTELBuilder<'a> { + self.inner.enable = enable.into(); + self + } + pub fn parameter(mut self, parameter: u64) -> PerformanceOverrideInfoINTELBuilder<'a> { + self.inner.parameter = parameter; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PerformanceOverrideInfoINTELBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PerformanceOverrideInfoINTEL { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PerformanceConfigurationAcquireInfoINTEL { + pub s_type: StructureType, + pub p_next: *const c_void, + pub ty: PerformanceConfigurationTypeINTEL, +} +impl ::std::default::Default for PerformanceConfigurationAcquireInfoINTEL { + fn default() -> PerformanceConfigurationAcquireInfoINTEL { + PerformanceConfigurationAcquireInfoINTEL { + s_type: StructureType::PERFORMANCE_CONFIGURATION_ACQUIRE_INFO_INTEL, + p_next: ::std::ptr::null(), + ty: PerformanceConfigurationTypeINTEL::default(), + } + } +} +impl PerformanceConfigurationAcquireInfoINTEL { + pub fn builder<'a>() -> PerformanceConfigurationAcquireInfoINTELBuilder<'a> { + PerformanceConfigurationAcquireInfoINTELBuilder { + inner: PerformanceConfigurationAcquireInfoINTEL::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PerformanceConfigurationAcquireInfoINTELBuilder<'a> { + inner: PerformanceConfigurationAcquireInfoINTEL, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPerformanceConfigurationAcquireInfoINTEL {} +impl<'a> ::std::ops::Deref for PerformanceConfigurationAcquireInfoINTELBuilder<'a> { + type Target = PerformanceConfigurationAcquireInfoINTEL; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PerformanceConfigurationAcquireInfoINTELBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PerformanceConfigurationAcquireInfoINTELBuilder<'a> { + pub fn ty( + mut self, + ty: PerformanceConfigurationTypeINTEL, + ) -> PerformanceConfigurationAcquireInfoINTELBuilder<'a> { + self.inner.ty = ty; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PerformanceConfigurationAcquireInfoINTELBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PerformanceConfigurationAcquireInfoINTEL { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceShaderClockFeaturesKHR { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub shader_subgroup_clock: Bool32, + pub shader_device_clock: Bool32, +} +impl ::std::default::Default for PhysicalDeviceShaderClockFeaturesKHR { + fn default() -> PhysicalDeviceShaderClockFeaturesKHR { + PhysicalDeviceShaderClockFeaturesKHR { + s_type: StructureType::PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR, + p_next: ::std::ptr::null_mut(), + shader_subgroup_clock: Bool32::default(), + shader_device_clock: Bool32::default(), + } + } +} +impl PhysicalDeviceShaderClockFeaturesKHR { + pub fn builder<'a>() -> PhysicalDeviceShaderClockFeaturesKHRBuilder<'a> { + PhysicalDeviceShaderClockFeaturesKHRBuilder { + inner: PhysicalDeviceShaderClockFeaturesKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceShaderClockFeaturesKHRBuilder<'a> { + inner: PhysicalDeviceShaderClockFeaturesKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceShaderClockFeaturesKHRBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceShaderClockFeaturesKHR {} +impl<'a> ::std::ops::Deref for PhysicalDeviceShaderClockFeaturesKHRBuilder<'a> { + type Target = PhysicalDeviceShaderClockFeaturesKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceShaderClockFeaturesKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceShaderClockFeaturesKHRBuilder<'a> { + pub fn shader_subgroup_clock( + mut self, + shader_subgroup_clock: bool, + ) -> PhysicalDeviceShaderClockFeaturesKHRBuilder<'a> { + self.inner.shader_subgroup_clock = shader_subgroup_clock.into(); + self + } + pub fn shader_device_clock( + mut self, + shader_device_clock: bool, + ) -> PhysicalDeviceShaderClockFeaturesKHRBuilder<'a> { + self.inner.shader_device_clock = shader_device_clock.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceShaderClockFeaturesKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceIndexTypeUint8FeaturesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub index_type_uint8: Bool32, +} +impl ::std::default::Default for PhysicalDeviceIndexTypeUint8FeaturesEXT { + fn default() -> PhysicalDeviceIndexTypeUint8FeaturesEXT { + PhysicalDeviceIndexTypeUint8FeaturesEXT { + s_type: StructureType::PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT, + p_next: ::std::ptr::null_mut(), + index_type_uint8: Bool32::default(), + } + } +} +impl PhysicalDeviceIndexTypeUint8FeaturesEXT { + pub fn builder<'a>() -> PhysicalDeviceIndexTypeUint8FeaturesEXTBuilder<'a> { + PhysicalDeviceIndexTypeUint8FeaturesEXTBuilder { + inner: PhysicalDeviceIndexTypeUint8FeaturesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceIndexTypeUint8FeaturesEXTBuilder<'a> { + inner: PhysicalDeviceIndexTypeUint8FeaturesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceIndexTypeUint8FeaturesEXTBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceIndexTypeUint8FeaturesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceIndexTypeUint8FeaturesEXTBuilder<'a> { + type Target = PhysicalDeviceIndexTypeUint8FeaturesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceIndexTypeUint8FeaturesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceIndexTypeUint8FeaturesEXTBuilder<'a> { + pub fn index_type_uint8( + mut self, + index_type_uint8: bool, + ) -> PhysicalDeviceIndexTypeUint8FeaturesEXTBuilder<'a> { + self.inner.index_type_uint8 = index_type_uint8.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceIndexTypeUint8FeaturesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceShaderSMBuiltinsPropertiesNV { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub shader_sm_count: u32, + pub shader_warps_per_sm: u32, +} +impl ::std::default::Default for PhysicalDeviceShaderSMBuiltinsPropertiesNV { + fn default() -> PhysicalDeviceShaderSMBuiltinsPropertiesNV { + PhysicalDeviceShaderSMBuiltinsPropertiesNV { + s_type: StructureType::PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV, + p_next: ::std::ptr::null_mut(), + shader_sm_count: u32::default(), + shader_warps_per_sm: u32::default(), + } + } +} +impl PhysicalDeviceShaderSMBuiltinsPropertiesNV { + pub fn builder<'a>() -> PhysicalDeviceShaderSMBuiltinsPropertiesNVBuilder<'a> { + PhysicalDeviceShaderSMBuiltinsPropertiesNVBuilder { + inner: PhysicalDeviceShaderSMBuiltinsPropertiesNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceShaderSMBuiltinsPropertiesNVBuilder<'a> { + inner: PhysicalDeviceShaderSMBuiltinsPropertiesNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDeviceShaderSMBuiltinsPropertiesNVBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceShaderSMBuiltinsPropertiesNV {} +impl<'a> ::std::ops::Deref for PhysicalDeviceShaderSMBuiltinsPropertiesNVBuilder<'a> { + type Target = PhysicalDeviceShaderSMBuiltinsPropertiesNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceShaderSMBuiltinsPropertiesNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceShaderSMBuiltinsPropertiesNVBuilder<'a> { + pub fn shader_sm_count( + mut self, + shader_sm_count: u32, + ) -> PhysicalDeviceShaderSMBuiltinsPropertiesNVBuilder<'a> { + self.inner.shader_sm_count = shader_sm_count; + self + } + pub fn shader_warps_per_sm( + mut self, + shader_warps_per_sm: u32, + ) -> PhysicalDeviceShaderSMBuiltinsPropertiesNVBuilder<'a> { + self.inner.shader_warps_per_sm = shader_warps_per_sm; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceShaderSMBuiltinsPropertiesNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceShaderSMBuiltinsFeaturesNV { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub shader_sm_builtins: Bool32, +} +impl ::std::default::Default for PhysicalDeviceShaderSMBuiltinsFeaturesNV { + fn default() -> PhysicalDeviceShaderSMBuiltinsFeaturesNV { + PhysicalDeviceShaderSMBuiltinsFeaturesNV { + s_type: StructureType::PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV, + p_next: ::std::ptr::null_mut(), + shader_sm_builtins: Bool32::default(), + } + } +} +impl PhysicalDeviceShaderSMBuiltinsFeaturesNV { + pub fn builder<'a>() -> PhysicalDeviceShaderSMBuiltinsFeaturesNVBuilder<'a> { + PhysicalDeviceShaderSMBuiltinsFeaturesNVBuilder { + inner: PhysicalDeviceShaderSMBuiltinsFeaturesNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceShaderSMBuiltinsFeaturesNVBuilder<'a> { + inner: PhysicalDeviceShaderSMBuiltinsFeaturesNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceShaderSMBuiltinsFeaturesNVBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceShaderSMBuiltinsFeaturesNV {} +impl<'a> ::std::ops::Deref for PhysicalDeviceShaderSMBuiltinsFeaturesNVBuilder<'a> { + type Target = PhysicalDeviceShaderSMBuiltinsFeaturesNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceShaderSMBuiltinsFeaturesNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceShaderSMBuiltinsFeaturesNVBuilder<'a> { + pub fn shader_sm_builtins( + mut self, + shader_sm_builtins: bool, + ) -> PhysicalDeviceShaderSMBuiltinsFeaturesNVBuilder<'a> { + self.inner.shader_sm_builtins = shader_sm_builtins.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceShaderSMBuiltinsFeaturesNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceFragmentShaderInterlockFeaturesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub fragment_shader_sample_interlock: Bool32, + pub fragment_shader_pixel_interlock: Bool32, + pub fragment_shader_shading_rate_interlock: Bool32, +} +impl ::std::default::Default for PhysicalDeviceFragmentShaderInterlockFeaturesEXT { + fn default() -> PhysicalDeviceFragmentShaderInterlockFeaturesEXT { + PhysicalDeviceFragmentShaderInterlockFeaturesEXT { + s_type: StructureType::PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT, + p_next: ::std::ptr::null_mut(), + fragment_shader_sample_interlock: Bool32::default(), + fragment_shader_pixel_interlock: Bool32::default(), + fragment_shader_shading_rate_interlock: Bool32::default(), + } + } +} +impl PhysicalDeviceFragmentShaderInterlockFeaturesEXT { + pub fn builder<'a>() -> PhysicalDeviceFragmentShaderInterlockFeaturesEXTBuilder<'a> { + PhysicalDeviceFragmentShaderInterlockFeaturesEXTBuilder { + inner: PhysicalDeviceFragmentShaderInterlockFeaturesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceFragmentShaderInterlockFeaturesEXTBuilder<'a> { + inner: PhysicalDeviceFragmentShaderInterlockFeaturesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo + for PhysicalDeviceFragmentShaderInterlockFeaturesEXTBuilder<'_> +{ +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceFragmentShaderInterlockFeaturesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceFragmentShaderInterlockFeaturesEXTBuilder<'a> { + type Target = PhysicalDeviceFragmentShaderInterlockFeaturesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceFragmentShaderInterlockFeaturesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceFragmentShaderInterlockFeaturesEXTBuilder<'a> { + pub fn fragment_shader_sample_interlock( + mut self, + fragment_shader_sample_interlock: bool, + ) -> PhysicalDeviceFragmentShaderInterlockFeaturesEXTBuilder<'a> { + self.inner.fragment_shader_sample_interlock = fragment_shader_sample_interlock.into(); + self + } + pub fn fragment_shader_pixel_interlock( + mut self, + fragment_shader_pixel_interlock: bool, + ) -> PhysicalDeviceFragmentShaderInterlockFeaturesEXTBuilder<'a> { + self.inner.fragment_shader_pixel_interlock = fragment_shader_pixel_interlock.into(); + self + } + pub fn fragment_shader_shading_rate_interlock( + mut self, + fragment_shader_shading_rate_interlock: bool, + ) -> PhysicalDeviceFragmentShaderInterlockFeaturesEXTBuilder<'a> { + self.inner.fragment_shader_shading_rate_interlock = + fragment_shader_shading_rate_interlock.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceFragmentShaderInterlockFeaturesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceSeparateDepthStencilLayoutsFeatures { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub separate_depth_stencil_layouts: Bool32, +} +impl ::std::default::Default for PhysicalDeviceSeparateDepthStencilLayoutsFeatures { + fn default() -> PhysicalDeviceSeparateDepthStencilLayoutsFeatures { + PhysicalDeviceSeparateDepthStencilLayoutsFeatures { + s_type: StructureType::PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES, + p_next: ::std::ptr::null_mut(), + separate_depth_stencil_layouts: Bool32::default(), + } + } +} +impl PhysicalDeviceSeparateDepthStencilLayoutsFeatures { + pub fn builder<'a>() -> PhysicalDeviceSeparateDepthStencilLayoutsFeaturesBuilder<'a> { + PhysicalDeviceSeparateDepthStencilLayoutsFeaturesBuilder { + inner: PhysicalDeviceSeparateDepthStencilLayoutsFeatures::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceSeparateDepthStencilLayoutsFeaturesBuilder<'a> { + inner: PhysicalDeviceSeparateDepthStencilLayoutsFeatures, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo + for PhysicalDeviceSeparateDepthStencilLayoutsFeaturesBuilder<'_> +{ +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceSeparateDepthStencilLayoutsFeatures {} +impl<'a> ::std::ops::Deref for PhysicalDeviceSeparateDepthStencilLayoutsFeaturesBuilder<'a> { + type Target = PhysicalDeviceSeparateDepthStencilLayoutsFeatures; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceSeparateDepthStencilLayoutsFeaturesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceSeparateDepthStencilLayoutsFeaturesBuilder<'a> { + pub fn separate_depth_stencil_layouts( + mut self, + separate_depth_stencil_layouts: bool, + ) -> PhysicalDeviceSeparateDepthStencilLayoutsFeaturesBuilder<'a> { + self.inner.separate_depth_stencil_layouts = separate_depth_stencil_layouts.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceSeparateDepthStencilLayoutsFeatures { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct AttachmentReferenceStencilLayout { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub stencil_layout: ImageLayout, +} +impl ::std::default::Default for AttachmentReferenceStencilLayout { + fn default() -> AttachmentReferenceStencilLayout { + AttachmentReferenceStencilLayout { + s_type: StructureType::ATTACHMENT_REFERENCE_STENCIL_LAYOUT, + p_next: ::std::ptr::null_mut(), + stencil_layout: ImageLayout::default(), + } + } +} +impl AttachmentReferenceStencilLayout { + pub fn builder<'a>() -> AttachmentReferenceStencilLayoutBuilder<'a> { + AttachmentReferenceStencilLayoutBuilder { + inner: AttachmentReferenceStencilLayout::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct AttachmentReferenceStencilLayoutBuilder<'a> { + inner: AttachmentReferenceStencilLayout, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsAttachmentReference2 for AttachmentReferenceStencilLayoutBuilder<'_> {} +unsafe impl ExtendsAttachmentReference2 for AttachmentReferenceStencilLayout {} +impl<'a> ::std::ops::Deref for AttachmentReferenceStencilLayoutBuilder<'a> { + type Target = AttachmentReferenceStencilLayout; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for AttachmentReferenceStencilLayoutBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> AttachmentReferenceStencilLayoutBuilder<'a> { + pub fn stencil_layout( + mut self, + stencil_layout: ImageLayout, + ) -> AttachmentReferenceStencilLayoutBuilder<'a> { + self.inner.stencil_layout = stencil_layout; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> AttachmentReferenceStencilLayout { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct AttachmentDescriptionStencilLayout { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub stencil_initial_layout: ImageLayout, + pub stencil_final_layout: ImageLayout, +} +impl ::std::default::Default for AttachmentDescriptionStencilLayout { + fn default() -> AttachmentDescriptionStencilLayout { + AttachmentDescriptionStencilLayout { + s_type: StructureType::ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT, + p_next: ::std::ptr::null_mut(), + stencil_initial_layout: ImageLayout::default(), + stencil_final_layout: ImageLayout::default(), + } + } +} +impl AttachmentDescriptionStencilLayout { + pub fn builder<'a>() -> AttachmentDescriptionStencilLayoutBuilder<'a> { + AttachmentDescriptionStencilLayoutBuilder { + inner: AttachmentDescriptionStencilLayout::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct AttachmentDescriptionStencilLayoutBuilder<'a> { + inner: AttachmentDescriptionStencilLayout, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsAttachmentDescription2 for AttachmentDescriptionStencilLayoutBuilder<'_> {} +unsafe impl ExtendsAttachmentDescription2 for AttachmentDescriptionStencilLayout {} +impl<'a> ::std::ops::Deref for AttachmentDescriptionStencilLayoutBuilder<'a> { + type Target = AttachmentDescriptionStencilLayout; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for AttachmentDescriptionStencilLayoutBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> AttachmentDescriptionStencilLayoutBuilder<'a> { + pub fn stencil_initial_layout( + mut self, + stencil_initial_layout: ImageLayout, + ) -> AttachmentDescriptionStencilLayoutBuilder<'a> { + self.inner.stencil_initial_layout = stencil_initial_layout; + self + } + pub fn stencil_final_layout( + mut self, + stencil_final_layout: ImageLayout, + ) -> AttachmentDescriptionStencilLayoutBuilder<'a> { + self.inner.stencil_final_layout = stencil_final_layout; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> AttachmentDescriptionStencilLayout { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDevicePipelineExecutablePropertiesFeaturesKHR { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub pipeline_executable_info: Bool32, +} +impl ::std::default::Default for PhysicalDevicePipelineExecutablePropertiesFeaturesKHR { + fn default() -> PhysicalDevicePipelineExecutablePropertiesFeaturesKHR { + PhysicalDevicePipelineExecutablePropertiesFeaturesKHR { + s_type: StructureType::PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR, + p_next: ::std::ptr::null_mut(), + pipeline_executable_info: Bool32::default(), + } + } +} +impl PhysicalDevicePipelineExecutablePropertiesFeaturesKHR { + pub fn builder<'a>() -> PhysicalDevicePipelineExecutablePropertiesFeaturesKHRBuilder<'a> { + PhysicalDevicePipelineExecutablePropertiesFeaturesKHRBuilder { + inner: PhysicalDevicePipelineExecutablePropertiesFeaturesKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDevicePipelineExecutablePropertiesFeaturesKHRBuilder<'a> { + inner: PhysicalDevicePipelineExecutablePropertiesFeaturesKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo + for PhysicalDevicePipelineExecutablePropertiesFeaturesKHRBuilder<'_> +{ +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDevicePipelineExecutablePropertiesFeaturesKHR {} +impl<'a> ::std::ops::Deref for PhysicalDevicePipelineExecutablePropertiesFeaturesKHRBuilder<'a> { + type Target = PhysicalDevicePipelineExecutablePropertiesFeaturesKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDevicePipelineExecutablePropertiesFeaturesKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDevicePipelineExecutablePropertiesFeaturesKHRBuilder<'a> { + pub fn pipeline_executable_info( + mut self, + pipeline_executable_info: bool, + ) -> PhysicalDevicePipelineExecutablePropertiesFeaturesKHRBuilder<'a> { + self.inner.pipeline_executable_info = pipeline_executable_info.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDevicePipelineExecutablePropertiesFeaturesKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub pipeline: Pipeline, +} +impl ::std::default::Default for PipelineInfoKHR { + fn default() -> PipelineInfoKHR { + PipelineInfoKHR { + s_type: StructureType::PIPELINE_INFO_KHR, + p_next: ::std::ptr::null(), + pipeline: Pipeline::default(), + } + } +} +impl PipelineInfoKHR { + pub fn builder<'a>() -> PipelineInfoKHRBuilder<'a> { + PipelineInfoKHRBuilder { + inner: PipelineInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineInfoKHRBuilder<'a> { + inner: PipelineInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPipelineInfoKHR {} +impl<'a> ::std::ops::Deref for PipelineInfoKHRBuilder<'a> { + type Target = PipelineInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineInfoKHRBuilder<'a> { + pub fn pipeline(mut self, pipeline: Pipeline) -> PipelineInfoKHRBuilder<'a> { + self.inner.pipeline = pipeline; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PipelineInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct PipelineExecutablePropertiesKHR { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub stages: ShaderStageFlags, + pub name: [c_char; MAX_DESCRIPTION_SIZE], + pub description: [c_char; MAX_DESCRIPTION_SIZE], + pub subgroup_size: u32, +} +impl fmt::Debug for PipelineExecutablePropertiesKHR { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("PipelineExecutablePropertiesKHR") + .field("s_type", &self.s_type) + .field("p_next", &self.p_next) + .field("stages", &self.stages) + .field("name", &unsafe { + ::std::ffi::CStr::from_ptr(self.name.as_ptr() as *const c_char) + }) + .field("description", &unsafe { + ::std::ffi::CStr::from_ptr(self.description.as_ptr() as *const c_char) + }) + .field("subgroup_size", &self.subgroup_size) + .finish() + } +} +impl ::std::default::Default for PipelineExecutablePropertiesKHR { + fn default() -> PipelineExecutablePropertiesKHR { + PipelineExecutablePropertiesKHR { + s_type: StructureType::PIPELINE_EXECUTABLE_PROPERTIES_KHR, + p_next: ::std::ptr::null_mut(), + stages: ShaderStageFlags::default(), + name: unsafe { ::std::mem::zeroed() }, + description: unsafe { ::std::mem::zeroed() }, + subgroup_size: u32::default(), + } + } +} +impl PipelineExecutablePropertiesKHR { + pub fn builder<'a>() -> PipelineExecutablePropertiesKHRBuilder<'a> { + PipelineExecutablePropertiesKHRBuilder { + inner: PipelineExecutablePropertiesKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineExecutablePropertiesKHRBuilder<'a> { + inner: PipelineExecutablePropertiesKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPipelineExecutablePropertiesKHR {} +impl<'a> ::std::ops::Deref for PipelineExecutablePropertiesKHRBuilder<'a> { + type Target = PipelineExecutablePropertiesKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineExecutablePropertiesKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineExecutablePropertiesKHRBuilder<'a> { + pub fn stages( + mut self, + stages: ShaderStageFlags, + ) -> PipelineExecutablePropertiesKHRBuilder<'a> { + self.inner.stages = stages; + self + } + pub fn name( + mut self, + name: [c_char; MAX_DESCRIPTION_SIZE], + ) -> PipelineExecutablePropertiesKHRBuilder<'a> { + self.inner.name = name; + self + } + pub fn description( + mut self, + description: [c_char; MAX_DESCRIPTION_SIZE], + ) -> PipelineExecutablePropertiesKHRBuilder<'a> { + self.inner.description = description; + self + } + pub fn subgroup_size( + mut self, + subgroup_size: u32, + ) -> PipelineExecutablePropertiesKHRBuilder<'a> { + self.inner.subgroup_size = subgroup_size; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PipelineExecutablePropertiesKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineExecutablePropertiesKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineExecutableInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub pipeline: Pipeline, + pub executable_index: u32, +} +impl ::std::default::Default for PipelineExecutableInfoKHR { + fn default() -> PipelineExecutableInfoKHR { + PipelineExecutableInfoKHR { + s_type: StructureType::PIPELINE_EXECUTABLE_INFO_KHR, + p_next: ::std::ptr::null(), + pipeline: Pipeline::default(), + executable_index: u32::default(), + } + } +} +impl PipelineExecutableInfoKHR { + pub fn builder<'a>() -> PipelineExecutableInfoKHRBuilder<'a> { + PipelineExecutableInfoKHRBuilder { + inner: PipelineExecutableInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineExecutableInfoKHRBuilder<'a> { + inner: PipelineExecutableInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPipelineExecutableInfoKHR {} +impl<'a> ::std::ops::Deref for PipelineExecutableInfoKHRBuilder<'a> { + type Target = PipelineExecutableInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineExecutableInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineExecutableInfoKHRBuilder<'a> { + pub fn pipeline(mut self, pipeline: Pipeline) -> PipelineExecutableInfoKHRBuilder<'a> { + self.inner.pipeline = pipeline; + self + } + pub fn executable_index( + mut self, + executable_index: u32, + ) -> PipelineExecutableInfoKHRBuilder<'a> { + self.inner.executable_index = executable_index; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PipelineExecutableInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineExecutableInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone)] +#[doc = ""] +pub union PipelineExecutableStatisticValueKHR { + pub b32: Bool32, + pub i64: i64, + pub u64: u64, + pub f64: f64, +} +impl ::std::default::Default for PipelineExecutableStatisticValueKHR { + fn default() -> PipelineExecutableStatisticValueKHR { + unsafe { ::std::mem::zeroed() } + } +} +#[repr(C)] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct PipelineExecutableStatisticKHR { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub name: [c_char; MAX_DESCRIPTION_SIZE], + pub description: [c_char; MAX_DESCRIPTION_SIZE], + pub format: PipelineExecutableStatisticFormatKHR, + pub value: PipelineExecutableStatisticValueKHR, +} +impl fmt::Debug for PipelineExecutableStatisticKHR { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("PipelineExecutableStatisticKHR") + .field("s_type", &self.s_type) + .field("p_next", &self.p_next) + .field("name", &unsafe { + ::std::ffi::CStr::from_ptr(self.name.as_ptr() as *const c_char) + }) + .field("description", &unsafe { + ::std::ffi::CStr::from_ptr(self.description.as_ptr() as *const c_char) + }) + .field("format", &self.format) + .field("value", &"union") + .finish() + } +} +impl ::std::default::Default for PipelineExecutableStatisticKHR { + fn default() -> PipelineExecutableStatisticKHR { + PipelineExecutableStatisticKHR { + s_type: StructureType::PIPELINE_EXECUTABLE_STATISTIC_KHR, + p_next: ::std::ptr::null_mut(), + name: unsafe { ::std::mem::zeroed() }, + description: unsafe { ::std::mem::zeroed() }, + format: PipelineExecutableStatisticFormatKHR::default(), + value: PipelineExecutableStatisticValueKHR::default(), + } + } +} +impl PipelineExecutableStatisticKHR { + pub fn builder<'a>() -> PipelineExecutableStatisticKHRBuilder<'a> { + PipelineExecutableStatisticKHRBuilder { + inner: PipelineExecutableStatisticKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineExecutableStatisticKHRBuilder<'a> { + inner: PipelineExecutableStatisticKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPipelineExecutableStatisticKHR {} +impl<'a> ::std::ops::Deref for PipelineExecutableStatisticKHRBuilder<'a> { + type Target = PipelineExecutableStatisticKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineExecutableStatisticKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineExecutableStatisticKHRBuilder<'a> { + pub fn name( + mut self, + name: [c_char; MAX_DESCRIPTION_SIZE], + ) -> PipelineExecutableStatisticKHRBuilder<'a> { + self.inner.name = name; + self + } + pub fn description( + mut self, + description: [c_char; MAX_DESCRIPTION_SIZE], + ) -> PipelineExecutableStatisticKHRBuilder<'a> { + self.inner.description = description; + self + } + pub fn format( + mut self, + format: PipelineExecutableStatisticFormatKHR, + ) -> PipelineExecutableStatisticKHRBuilder<'a> { + self.inner.format = format; + self + } + pub fn value( + mut self, + value: PipelineExecutableStatisticValueKHR, + ) -> PipelineExecutableStatisticKHRBuilder<'a> { + self.inner.value = value; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PipelineExecutableStatisticKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineExecutableStatisticKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct PipelineExecutableInternalRepresentationKHR { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub name: [c_char; MAX_DESCRIPTION_SIZE], + pub description: [c_char; MAX_DESCRIPTION_SIZE], + pub is_text: Bool32, + pub data_size: usize, + pub p_data: *mut c_void, +} +impl fmt::Debug for PipelineExecutableInternalRepresentationKHR { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("PipelineExecutableInternalRepresentationKHR") + .field("s_type", &self.s_type) + .field("p_next", &self.p_next) + .field("name", &unsafe { + ::std::ffi::CStr::from_ptr(self.name.as_ptr() as *const c_char) + }) + .field("description", &unsafe { + ::std::ffi::CStr::from_ptr(self.description.as_ptr() as *const c_char) + }) + .field("is_text", &self.is_text) + .field("data_size", &self.data_size) + .field("p_data", &self.p_data) + .finish() + } +} +impl ::std::default::Default for PipelineExecutableInternalRepresentationKHR { + fn default() -> PipelineExecutableInternalRepresentationKHR { + PipelineExecutableInternalRepresentationKHR { + s_type: StructureType::PIPELINE_EXECUTABLE_INTERNAL_REPRESENTATION_KHR, + p_next: ::std::ptr::null_mut(), + name: unsafe { ::std::mem::zeroed() }, + description: unsafe { ::std::mem::zeroed() }, + is_text: Bool32::default(), + data_size: usize::default(), + p_data: ::std::ptr::null_mut(), + } + } +} +impl PipelineExecutableInternalRepresentationKHR { + pub fn builder<'a>() -> PipelineExecutableInternalRepresentationKHRBuilder<'a> { + PipelineExecutableInternalRepresentationKHRBuilder { + inner: PipelineExecutableInternalRepresentationKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineExecutableInternalRepresentationKHRBuilder<'a> { + inner: PipelineExecutableInternalRepresentationKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPipelineExecutableInternalRepresentationKHR {} +impl<'a> ::std::ops::Deref for PipelineExecutableInternalRepresentationKHRBuilder<'a> { + type Target = PipelineExecutableInternalRepresentationKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineExecutableInternalRepresentationKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineExecutableInternalRepresentationKHRBuilder<'a> { + pub fn name( + mut self, + name: [c_char; MAX_DESCRIPTION_SIZE], + ) -> PipelineExecutableInternalRepresentationKHRBuilder<'a> { + self.inner.name = name; + self + } + pub fn description( + mut self, + description: [c_char; MAX_DESCRIPTION_SIZE], + ) -> PipelineExecutableInternalRepresentationKHRBuilder<'a> { + self.inner.description = description; + self + } + pub fn is_text( + mut self, + is_text: bool, + ) -> PipelineExecutableInternalRepresentationKHRBuilder<'a> { + self.inner.is_text = is_text.into(); + self + } + pub fn data( + mut self, + data: &'a mut [u8], + ) -> PipelineExecutableInternalRepresentationKHRBuilder<'a> { + self.inner.data_size = data.len() as _; + self.inner.p_data = data.as_mut_ptr() as *mut c_void; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PipelineExecutableInternalRepresentationKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineExecutableInternalRepresentationKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub shader_demote_to_helper_invocation: Bool32, +} +impl ::std::default::Default for PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT { + fn default() -> PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT { + PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT { + s_type: StructureType::PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT, + p_next: ::std::ptr::null_mut(), + shader_demote_to_helper_invocation: Bool32::default(), + } + } +} +impl PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT { + pub fn builder<'a>() -> PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXTBuilder<'a> { + PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXTBuilder { + inner: PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXTBuilder<'a> { + inner: PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo + for PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXTBuilder<'_> +{ +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXTBuilder<'a> { + type Target = PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut + for PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXTBuilder<'a> +{ + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXTBuilder<'a> { + pub fn shader_demote_to_helper_invocation( + mut self, + shader_demote_to_helper_invocation: bool, + ) -> PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXTBuilder<'a> { + self.inner.shader_demote_to_helper_invocation = shader_demote_to_helper_invocation.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceTexelBufferAlignmentFeaturesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub texel_buffer_alignment: Bool32, +} +impl ::std::default::Default for PhysicalDeviceTexelBufferAlignmentFeaturesEXT { + fn default() -> PhysicalDeviceTexelBufferAlignmentFeaturesEXT { + PhysicalDeviceTexelBufferAlignmentFeaturesEXT { + s_type: StructureType::PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT, + p_next: ::std::ptr::null_mut(), + texel_buffer_alignment: Bool32::default(), + } + } +} +impl PhysicalDeviceTexelBufferAlignmentFeaturesEXT { + pub fn builder<'a>() -> PhysicalDeviceTexelBufferAlignmentFeaturesEXTBuilder<'a> { + PhysicalDeviceTexelBufferAlignmentFeaturesEXTBuilder { + inner: PhysicalDeviceTexelBufferAlignmentFeaturesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceTexelBufferAlignmentFeaturesEXTBuilder<'a> { + inner: PhysicalDeviceTexelBufferAlignmentFeaturesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceTexelBufferAlignmentFeaturesEXTBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceTexelBufferAlignmentFeaturesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceTexelBufferAlignmentFeaturesEXTBuilder<'a> { + type Target = PhysicalDeviceTexelBufferAlignmentFeaturesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceTexelBufferAlignmentFeaturesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceTexelBufferAlignmentFeaturesEXTBuilder<'a> { + pub fn texel_buffer_alignment( + mut self, + texel_buffer_alignment: bool, + ) -> PhysicalDeviceTexelBufferAlignmentFeaturesEXTBuilder<'a> { + self.inner.texel_buffer_alignment = texel_buffer_alignment.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceTexelBufferAlignmentFeaturesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceTexelBufferAlignmentPropertiesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub storage_texel_buffer_offset_alignment_bytes: DeviceSize, + pub storage_texel_buffer_offset_single_texel_alignment: Bool32, + pub uniform_texel_buffer_offset_alignment_bytes: DeviceSize, + pub uniform_texel_buffer_offset_single_texel_alignment: Bool32, +} +impl ::std::default::Default for PhysicalDeviceTexelBufferAlignmentPropertiesEXT { + fn default() -> PhysicalDeviceTexelBufferAlignmentPropertiesEXT { + PhysicalDeviceTexelBufferAlignmentPropertiesEXT { + s_type: StructureType::PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT, + p_next: ::std::ptr::null_mut(), + storage_texel_buffer_offset_alignment_bytes: DeviceSize::default(), + storage_texel_buffer_offset_single_texel_alignment: Bool32::default(), + uniform_texel_buffer_offset_alignment_bytes: DeviceSize::default(), + uniform_texel_buffer_offset_single_texel_alignment: Bool32::default(), + } + } +} +impl PhysicalDeviceTexelBufferAlignmentPropertiesEXT { + pub fn builder<'a>() -> PhysicalDeviceTexelBufferAlignmentPropertiesEXTBuilder<'a> { + PhysicalDeviceTexelBufferAlignmentPropertiesEXTBuilder { + inner: PhysicalDeviceTexelBufferAlignmentPropertiesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceTexelBufferAlignmentPropertiesEXTBuilder<'a> { + inner: PhysicalDeviceTexelBufferAlignmentPropertiesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDeviceTexelBufferAlignmentPropertiesEXTBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceTexelBufferAlignmentPropertiesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceTexelBufferAlignmentPropertiesEXTBuilder<'a> { + type Target = PhysicalDeviceTexelBufferAlignmentPropertiesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceTexelBufferAlignmentPropertiesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceTexelBufferAlignmentPropertiesEXTBuilder<'a> { + pub fn storage_texel_buffer_offset_alignment_bytes( + mut self, + storage_texel_buffer_offset_alignment_bytes: DeviceSize, + ) -> PhysicalDeviceTexelBufferAlignmentPropertiesEXTBuilder<'a> { + self.inner.storage_texel_buffer_offset_alignment_bytes = + storage_texel_buffer_offset_alignment_bytes; + self + } + pub fn storage_texel_buffer_offset_single_texel_alignment( + mut self, + storage_texel_buffer_offset_single_texel_alignment: bool, + ) -> PhysicalDeviceTexelBufferAlignmentPropertiesEXTBuilder<'a> { + self.inner + .storage_texel_buffer_offset_single_texel_alignment = + storage_texel_buffer_offset_single_texel_alignment.into(); + self + } + pub fn uniform_texel_buffer_offset_alignment_bytes( + mut self, + uniform_texel_buffer_offset_alignment_bytes: DeviceSize, + ) -> PhysicalDeviceTexelBufferAlignmentPropertiesEXTBuilder<'a> { + self.inner.uniform_texel_buffer_offset_alignment_bytes = + uniform_texel_buffer_offset_alignment_bytes; + self + } + pub fn uniform_texel_buffer_offset_single_texel_alignment( + mut self, + uniform_texel_buffer_offset_single_texel_alignment: bool, + ) -> PhysicalDeviceTexelBufferAlignmentPropertiesEXTBuilder<'a> { + self.inner + .uniform_texel_buffer_offset_single_texel_alignment = + uniform_texel_buffer_offset_single_texel_alignment.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceTexelBufferAlignmentPropertiesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceSubgroupSizeControlFeaturesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub subgroup_size_control: Bool32, + pub compute_full_subgroups: Bool32, +} +impl ::std::default::Default for PhysicalDeviceSubgroupSizeControlFeaturesEXT { + fn default() -> PhysicalDeviceSubgroupSizeControlFeaturesEXT { + PhysicalDeviceSubgroupSizeControlFeaturesEXT { + s_type: StructureType::PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT, + p_next: ::std::ptr::null_mut(), + subgroup_size_control: Bool32::default(), + compute_full_subgroups: Bool32::default(), + } + } +} +impl PhysicalDeviceSubgroupSizeControlFeaturesEXT { + pub fn builder<'a>() -> PhysicalDeviceSubgroupSizeControlFeaturesEXTBuilder<'a> { + PhysicalDeviceSubgroupSizeControlFeaturesEXTBuilder { + inner: PhysicalDeviceSubgroupSizeControlFeaturesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceSubgroupSizeControlFeaturesEXTBuilder<'a> { + inner: PhysicalDeviceSubgroupSizeControlFeaturesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceSubgroupSizeControlFeaturesEXTBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceSubgroupSizeControlFeaturesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceSubgroupSizeControlFeaturesEXTBuilder<'a> { + type Target = PhysicalDeviceSubgroupSizeControlFeaturesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceSubgroupSizeControlFeaturesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceSubgroupSizeControlFeaturesEXTBuilder<'a> { + pub fn subgroup_size_control( + mut self, + subgroup_size_control: bool, + ) -> PhysicalDeviceSubgroupSizeControlFeaturesEXTBuilder<'a> { + self.inner.subgroup_size_control = subgroup_size_control.into(); + self + } + pub fn compute_full_subgroups( + mut self, + compute_full_subgroups: bool, + ) -> PhysicalDeviceSubgroupSizeControlFeaturesEXTBuilder<'a> { + self.inner.compute_full_subgroups = compute_full_subgroups.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceSubgroupSizeControlFeaturesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceSubgroupSizeControlPropertiesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub min_subgroup_size: u32, + pub max_subgroup_size: u32, + pub max_compute_workgroup_subgroups: u32, + pub required_subgroup_size_stages: ShaderStageFlags, +} +impl ::std::default::Default for PhysicalDeviceSubgroupSizeControlPropertiesEXT { + fn default() -> PhysicalDeviceSubgroupSizeControlPropertiesEXT { + PhysicalDeviceSubgroupSizeControlPropertiesEXT { + s_type: StructureType::PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT, + p_next: ::std::ptr::null_mut(), + min_subgroup_size: u32::default(), + max_subgroup_size: u32::default(), + max_compute_workgroup_subgroups: u32::default(), + required_subgroup_size_stages: ShaderStageFlags::default(), + } + } +} +impl PhysicalDeviceSubgroupSizeControlPropertiesEXT { + pub fn builder<'a>() -> PhysicalDeviceSubgroupSizeControlPropertiesEXTBuilder<'a> { + PhysicalDeviceSubgroupSizeControlPropertiesEXTBuilder { + inner: PhysicalDeviceSubgroupSizeControlPropertiesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceSubgroupSizeControlPropertiesEXTBuilder<'a> { + inner: PhysicalDeviceSubgroupSizeControlPropertiesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDeviceSubgroupSizeControlPropertiesEXTBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceSubgroupSizeControlPropertiesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceSubgroupSizeControlPropertiesEXTBuilder<'a> { + type Target = PhysicalDeviceSubgroupSizeControlPropertiesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceSubgroupSizeControlPropertiesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceSubgroupSizeControlPropertiesEXTBuilder<'a> { + pub fn min_subgroup_size( + mut self, + min_subgroup_size: u32, + ) -> PhysicalDeviceSubgroupSizeControlPropertiesEXTBuilder<'a> { + self.inner.min_subgroup_size = min_subgroup_size; + self + } + pub fn max_subgroup_size( + mut self, + max_subgroup_size: u32, + ) -> PhysicalDeviceSubgroupSizeControlPropertiesEXTBuilder<'a> { + self.inner.max_subgroup_size = max_subgroup_size; + self + } + pub fn max_compute_workgroup_subgroups( + mut self, + max_compute_workgroup_subgroups: u32, + ) -> PhysicalDeviceSubgroupSizeControlPropertiesEXTBuilder<'a> { + self.inner.max_compute_workgroup_subgroups = max_compute_workgroup_subgroups; + self + } + pub fn required_subgroup_size_stages( + mut self, + required_subgroup_size_stages: ShaderStageFlags, + ) -> PhysicalDeviceSubgroupSizeControlPropertiesEXTBuilder<'a> { + self.inner.required_subgroup_size_stages = required_subgroup_size_stages; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceSubgroupSizeControlPropertiesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub required_subgroup_size: u32, +} +impl ::std::default::Default for PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT { + fn default() -> PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT { + PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT { + s_type: StructureType::PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT, + p_next: ::std::ptr::null_mut(), + required_subgroup_size: u32::default(), + } + } +} +impl PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT { + pub fn builder<'a>() -> PipelineShaderStageRequiredSubgroupSizeCreateInfoEXTBuilder<'a> { + PipelineShaderStageRequiredSubgroupSizeCreateInfoEXTBuilder { + inner: PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineShaderStageRequiredSubgroupSizeCreateInfoEXTBuilder<'a> { + inner: PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPipelineShaderStageCreateInfo + for PipelineShaderStageRequiredSubgroupSizeCreateInfoEXTBuilder<'_> +{ +} +unsafe impl ExtendsPipelineShaderStageCreateInfo + for PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT +{ +} +impl<'a> ::std::ops::Deref for PipelineShaderStageRequiredSubgroupSizeCreateInfoEXTBuilder<'a> { + type Target = PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineShaderStageRequiredSubgroupSizeCreateInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineShaderStageRequiredSubgroupSizeCreateInfoEXTBuilder<'a> { + pub fn required_subgroup_size( + mut self, + required_subgroup_size: u32, + ) -> PipelineShaderStageRequiredSubgroupSizeCreateInfoEXTBuilder<'a> { + self.inner.required_subgroup_size = required_subgroup_size; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct MemoryOpaqueCaptureAddressAllocateInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub opaque_capture_address: u64, +} +impl ::std::default::Default for MemoryOpaqueCaptureAddressAllocateInfo { + fn default() -> MemoryOpaqueCaptureAddressAllocateInfo { + MemoryOpaqueCaptureAddressAllocateInfo { + s_type: StructureType::MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO, + p_next: ::std::ptr::null(), + opaque_capture_address: u64::default(), + } + } +} +impl MemoryOpaqueCaptureAddressAllocateInfo { + pub fn builder<'a>() -> MemoryOpaqueCaptureAddressAllocateInfoBuilder<'a> { + MemoryOpaqueCaptureAddressAllocateInfoBuilder { + inner: MemoryOpaqueCaptureAddressAllocateInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct MemoryOpaqueCaptureAddressAllocateInfoBuilder<'a> { + inner: MemoryOpaqueCaptureAddressAllocateInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsMemoryAllocateInfo for MemoryOpaqueCaptureAddressAllocateInfoBuilder<'_> {} +unsafe impl ExtendsMemoryAllocateInfo for MemoryOpaqueCaptureAddressAllocateInfo {} +impl<'a> ::std::ops::Deref for MemoryOpaqueCaptureAddressAllocateInfoBuilder<'a> { + type Target = MemoryOpaqueCaptureAddressAllocateInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for MemoryOpaqueCaptureAddressAllocateInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> MemoryOpaqueCaptureAddressAllocateInfoBuilder<'a> { + pub fn opaque_capture_address( + mut self, + opaque_capture_address: u64, + ) -> MemoryOpaqueCaptureAddressAllocateInfoBuilder<'a> { + self.inner.opaque_capture_address = opaque_capture_address; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> MemoryOpaqueCaptureAddressAllocateInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DeviceMemoryOpaqueCaptureAddressInfo { + pub s_type: StructureType, + pub p_next: *const c_void, + pub memory: DeviceMemory, +} +impl ::std::default::Default for DeviceMemoryOpaqueCaptureAddressInfo { + fn default() -> DeviceMemoryOpaqueCaptureAddressInfo { + DeviceMemoryOpaqueCaptureAddressInfo { + s_type: StructureType::DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO, + p_next: ::std::ptr::null(), + memory: DeviceMemory::default(), + } + } +} +impl DeviceMemoryOpaqueCaptureAddressInfo { + pub fn builder<'a>() -> DeviceMemoryOpaqueCaptureAddressInfoBuilder<'a> { + DeviceMemoryOpaqueCaptureAddressInfoBuilder { + inner: DeviceMemoryOpaqueCaptureAddressInfo::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DeviceMemoryOpaqueCaptureAddressInfoBuilder<'a> { + inner: DeviceMemoryOpaqueCaptureAddressInfo, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsDeviceMemoryOpaqueCaptureAddressInfo {} +impl<'a> ::std::ops::Deref for DeviceMemoryOpaqueCaptureAddressInfoBuilder<'a> { + type Target = DeviceMemoryOpaqueCaptureAddressInfo; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DeviceMemoryOpaqueCaptureAddressInfoBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DeviceMemoryOpaqueCaptureAddressInfoBuilder<'a> { + pub fn memory( + mut self, + memory: DeviceMemory, + ) -> DeviceMemoryOpaqueCaptureAddressInfoBuilder<'a> { + self.inner.memory = memory; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> DeviceMemoryOpaqueCaptureAddressInfoBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DeviceMemoryOpaqueCaptureAddressInfo { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceLineRasterizationFeaturesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub rectangular_lines: Bool32, + pub bresenham_lines: Bool32, + pub smooth_lines: Bool32, + pub stippled_rectangular_lines: Bool32, + pub stippled_bresenham_lines: Bool32, + pub stippled_smooth_lines: Bool32, +} +impl ::std::default::Default for PhysicalDeviceLineRasterizationFeaturesEXT { + fn default() -> PhysicalDeviceLineRasterizationFeaturesEXT { + PhysicalDeviceLineRasterizationFeaturesEXT { + s_type: StructureType::PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT, + p_next: ::std::ptr::null_mut(), + rectangular_lines: Bool32::default(), + bresenham_lines: Bool32::default(), + smooth_lines: Bool32::default(), + stippled_rectangular_lines: Bool32::default(), + stippled_bresenham_lines: Bool32::default(), + stippled_smooth_lines: Bool32::default(), + } + } +} +impl PhysicalDeviceLineRasterizationFeaturesEXT { + pub fn builder<'a>() -> PhysicalDeviceLineRasterizationFeaturesEXTBuilder<'a> { + PhysicalDeviceLineRasterizationFeaturesEXTBuilder { + inner: PhysicalDeviceLineRasterizationFeaturesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceLineRasterizationFeaturesEXTBuilder<'a> { + inner: PhysicalDeviceLineRasterizationFeaturesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceLineRasterizationFeaturesEXTBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceLineRasterizationFeaturesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceLineRasterizationFeaturesEXTBuilder<'a> { + type Target = PhysicalDeviceLineRasterizationFeaturesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceLineRasterizationFeaturesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceLineRasterizationFeaturesEXTBuilder<'a> { + pub fn rectangular_lines( + mut self, + rectangular_lines: bool, + ) -> PhysicalDeviceLineRasterizationFeaturesEXTBuilder<'a> { + self.inner.rectangular_lines = rectangular_lines.into(); + self + } + pub fn bresenham_lines( + mut self, + bresenham_lines: bool, + ) -> PhysicalDeviceLineRasterizationFeaturesEXTBuilder<'a> { + self.inner.bresenham_lines = bresenham_lines.into(); + self + } + pub fn smooth_lines( + mut self, + smooth_lines: bool, + ) -> PhysicalDeviceLineRasterizationFeaturesEXTBuilder<'a> { + self.inner.smooth_lines = smooth_lines.into(); + self + } + pub fn stippled_rectangular_lines( + mut self, + stippled_rectangular_lines: bool, + ) -> PhysicalDeviceLineRasterizationFeaturesEXTBuilder<'a> { + self.inner.stippled_rectangular_lines = stippled_rectangular_lines.into(); + self + } + pub fn stippled_bresenham_lines( + mut self, + stippled_bresenham_lines: bool, + ) -> PhysicalDeviceLineRasterizationFeaturesEXTBuilder<'a> { + self.inner.stippled_bresenham_lines = stippled_bresenham_lines.into(); + self + } + pub fn stippled_smooth_lines( + mut self, + stippled_smooth_lines: bool, + ) -> PhysicalDeviceLineRasterizationFeaturesEXTBuilder<'a> { + self.inner.stippled_smooth_lines = stippled_smooth_lines.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceLineRasterizationFeaturesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceLineRasterizationPropertiesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub line_sub_pixel_precision_bits: u32, +} +impl ::std::default::Default for PhysicalDeviceLineRasterizationPropertiesEXT { + fn default() -> PhysicalDeviceLineRasterizationPropertiesEXT { + PhysicalDeviceLineRasterizationPropertiesEXT { + s_type: StructureType::PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT, + p_next: ::std::ptr::null_mut(), + line_sub_pixel_precision_bits: u32::default(), + } + } +} +impl PhysicalDeviceLineRasterizationPropertiesEXT { + pub fn builder<'a>() -> PhysicalDeviceLineRasterizationPropertiesEXTBuilder<'a> { + PhysicalDeviceLineRasterizationPropertiesEXTBuilder { + inner: PhysicalDeviceLineRasterizationPropertiesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceLineRasterizationPropertiesEXTBuilder<'a> { + inner: PhysicalDeviceLineRasterizationPropertiesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 + for PhysicalDeviceLineRasterizationPropertiesEXTBuilder<'_> +{ +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceLineRasterizationPropertiesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceLineRasterizationPropertiesEXTBuilder<'a> { + type Target = PhysicalDeviceLineRasterizationPropertiesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceLineRasterizationPropertiesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceLineRasterizationPropertiesEXTBuilder<'a> { + pub fn line_sub_pixel_precision_bits( + mut self, + line_sub_pixel_precision_bits: u32, + ) -> PhysicalDeviceLineRasterizationPropertiesEXTBuilder<'a> { + self.inner.line_sub_pixel_precision_bits = line_sub_pixel_precision_bits; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceLineRasterizationPropertiesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineRasterizationLineStateCreateInfoEXT { + pub s_type: StructureType, + pub p_next: *const c_void, + pub line_rasterization_mode: LineRasterizationModeEXT, + pub stippled_line_enable: Bool32, + pub line_stipple_factor: u32, + pub line_stipple_pattern: u16, +} +impl ::std::default::Default for PipelineRasterizationLineStateCreateInfoEXT { + fn default() -> PipelineRasterizationLineStateCreateInfoEXT { + PipelineRasterizationLineStateCreateInfoEXT { + s_type: StructureType::PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT, + p_next: ::std::ptr::null(), + line_rasterization_mode: LineRasterizationModeEXT::default(), + stippled_line_enable: Bool32::default(), + line_stipple_factor: u32::default(), + line_stipple_pattern: u16::default(), + } + } +} +impl PipelineRasterizationLineStateCreateInfoEXT { + pub fn builder<'a>() -> PipelineRasterizationLineStateCreateInfoEXTBuilder<'a> { + PipelineRasterizationLineStateCreateInfoEXTBuilder { + inner: PipelineRasterizationLineStateCreateInfoEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineRasterizationLineStateCreateInfoEXTBuilder<'a> { + inner: PipelineRasterizationLineStateCreateInfoEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPipelineRasterizationStateCreateInfo + for PipelineRasterizationLineStateCreateInfoEXTBuilder<'_> +{ +} +unsafe impl ExtendsPipelineRasterizationStateCreateInfo + for PipelineRasterizationLineStateCreateInfoEXT +{ +} +impl<'a> ::std::ops::Deref for PipelineRasterizationLineStateCreateInfoEXTBuilder<'a> { + type Target = PipelineRasterizationLineStateCreateInfoEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineRasterizationLineStateCreateInfoEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineRasterizationLineStateCreateInfoEXTBuilder<'a> { + pub fn line_rasterization_mode( + mut self, + line_rasterization_mode: LineRasterizationModeEXT, + ) -> PipelineRasterizationLineStateCreateInfoEXTBuilder<'a> { + self.inner.line_rasterization_mode = line_rasterization_mode; + self + } + pub fn stippled_line_enable( + mut self, + stippled_line_enable: bool, + ) -> PipelineRasterizationLineStateCreateInfoEXTBuilder<'a> { + self.inner.stippled_line_enable = stippled_line_enable.into(); + self + } + pub fn line_stipple_factor( + mut self, + line_stipple_factor: u32, + ) -> PipelineRasterizationLineStateCreateInfoEXTBuilder<'a> { + self.inner.line_stipple_factor = line_stipple_factor; + self + } + pub fn line_stipple_pattern( + mut self, + line_stipple_pattern: u16, + ) -> PipelineRasterizationLineStateCreateInfoEXTBuilder<'a> { + self.inner.line_stipple_pattern = line_stipple_pattern; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineRasterizationLineStateCreateInfoEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDevicePipelineCreationCacheControlFeaturesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub pipeline_creation_cache_control: Bool32, +} +impl ::std::default::Default for PhysicalDevicePipelineCreationCacheControlFeaturesEXT { + fn default() -> PhysicalDevicePipelineCreationCacheControlFeaturesEXT { + PhysicalDevicePipelineCreationCacheControlFeaturesEXT { + s_type: StructureType::PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT, + p_next: ::std::ptr::null_mut(), + pipeline_creation_cache_control: Bool32::default(), + } + } +} +impl PhysicalDevicePipelineCreationCacheControlFeaturesEXT { + pub fn builder<'a>() -> PhysicalDevicePipelineCreationCacheControlFeaturesEXTBuilder<'a> { + PhysicalDevicePipelineCreationCacheControlFeaturesEXTBuilder { + inner: PhysicalDevicePipelineCreationCacheControlFeaturesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDevicePipelineCreationCacheControlFeaturesEXTBuilder<'a> { + inner: PhysicalDevicePipelineCreationCacheControlFeaturesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo + for PhysicalDevicePipelineCreationCacheControlFeaturesEXTBuilder<'_> +{ +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDevicePipelineCreationCacheControlFeaturesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDevicePipelineCreationCacheControlFeaturesEXTBuilder<'a> { + type Target = PhysicalDevicePipelineCreationCacheControlFeaturesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDevicePipelineCreationCacheControlFeaturesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDevicePipelineCreationCacheControlFeaturesEXTBuilder<'a> { + pub fn pipeline_creation_cache_control( + mut self, + pipeline_creation_cache_control: bool, + ) -> PhysicalDevicePipelineCreationCacheControlFeaturesEXTBuilder<'a> { + self.inner.pipeline_creation_cache_control = pipeline_creation_cache_control.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDevicePipelineCreationCacheControlFeaturesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceVulkan11Features { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub storage_buffer16_bit_access: Bool32, + pub uniform_and_storage_buffer16_bit_access: Bool32, + pub storage_push_constant16: Bool32, + pub storage_input_output16: Bool32, + pub multiview: Bool32, + pub multiview_geometry_shader: Bool32, + pub multiview_tessellation_shader: Bool32, + pub variable_pointers_storage_buffer: Bool32, + pub variable_pointers: Bool32, + pub protected_memory: Bool32, + pub sampler_ycbcr_conversion: Bool32, + pub shader_draw_parameters: Bool32, +} +impl ::std::default::Default for PhysicalDeviceVulkan11Features { + fn default() -> PhysicalDeviceVulkan11Features { + PhysicalDeviceVulkan11Features { + s_type: StructureType::PHYSICAL_DEVICE_VULKAN_1_1_FEATURES, + p_next: ::std::ptr::null_mut(), + storage_buffer16_bit_access: Bool32::default(), + uniform_and_storage_buffer16_bit_access: Bool32::default(), + storage_push_constant16: Bool32::default(), + storage_input_output16: Bool32::default(), + multiview: Bool32::default(), + multiview_geometry_shader: Bool32::default(), + multiview_tessellation_shader: Bool32::default(), + variable_pointers_storage_buffer: Bool32::default(), + variable_pointers: Bool32::default(), + protected_memory: Bool32::default(), + sampler_ycbcr_conversion: Bool32::default(), + shader_draw_parameters: Bool32::default(), + } + } +} +impl PhysicalDeviceVulkan11Features { + pub fn builder<'a>() -> PhysicalDeviceVulkan11FeaturesBuilder<'a> { + PhysicalDeviceVulkan11FeaturesBuilder { + inner: PhysicalDeviceVulkan11Features::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceVulkan11FeaturesBuilder<'a> { + inner: PhysicalDeviceVulkan11Features, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceVulkan11FeaturesBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceVulkan11Features {} +impl<'a> ::std::ops::Deref for PhysicalDeviceVulkan11FeaturesBuilder<'a> { + type Target = PhysicalDeviceVulkan11Features; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceVulkan11FeaturesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceVulkan11FeaturesBuilder<'a> { + pub fn storage_buffer16_bit_access( + mut self, + storage_buffer16_bit_access: bool, + ) -> PhysicalDeviceVulkan11FeaturesBuilder<'a> { + self.inner.storage_buffer16_bit_access = storage_buffer16_bit_access.into(); + self + } + pub fn uniform_and_storage_buffer16_bit_access( + mut self, + uniform_and_storage_buffer16_bit_access: bool, + ) -> PhysicalDeviceVulkan11FeaturesBuilder<'a> { + self.inner.uniform_and_storage_buffer16_bit_access = + uniform_and_storage_buffer16_bit_access.into(); + self + } + pub fn storage_push_constant16( + mut self, + storage_push_constant16: bool, + ) -> PhysicalDeviceVulkan11FeaturesBuilder<'a> { + self.inner.storage_push_constant16 = storage_push_constant16.into(); + self + } + pub fn storage_input_output16( + mut self, + storage_input_output16: bool, + ) -> PhysicalDeviceVulkan11FeaturesBuilder<'a> { + self.inner.storage_input_output16 = storage_input_output16.into(); + self + } + pub fn multiview(mut self, multiview: bool) -> PhysicalDeviceVulkan11FeaturesBuilder<'a> { + self.inner.multiview = multiview.into(); + self + } + pub fn multiview_geometry_shader( + mut self, + multiview_geometry_shader: bool, + ) -> PhysicalDeviceVulkan11FeaturesBuilder<'a> { + self.inner.multiview_geometry_shader = multiview_geometry_shader.into(); + self + } + pub fn multiview_tessellation_shader( + mut self, + multiview_tessellation_shader: bool, + ) -> PhysicalDeviceVulkan11FeaturesBuilder<'a> { + self.inner.multiview_tessellation_shader = multiview_tessellation_shader.into(); + self + } + pub fn variable_pointers_storage_buffer( + mut self, + variable_pointers_storage_buffer: bool, + ) -> PhysicalDeviceVulkan11FeaturesBuilder<'a> { + self.inner.variable_pointers_storage_buffer = variable_pointers_storage_buffer.into(); + self + } + pub fn variable_pointers( + mut self, + variable_pointers: bool, + ) -> PhysicalDeviceVulkan11FeaturesBuilder<'a> { + self.inner.variable_pointers = variable_pointers.into(); + self + } + pub fn protected_memory( + mut self, + protected_memory: bool, + ) -> PhysicalDeviceVulkan11FeaturesBuilder<'a> { + self.inner.protected_memory = protected_memory.into(); + self + } + pub fn sampler_ycbcr_conversion( + mut self, + sampler_ycbcr_conversion: bool, + ) -> PhysicalDeviceVulkan11FeaturesBuilder<'a> { + self.inner.sampler_ycbcr_conversion = sampler_ycbcr_conversion.into(); + self + } + pub fn shader_draw_parameters( + mut self, + shader_draw_parameters: bool, + ) -> PhysicalDeviceVulkan11FeaturesBuilder<'a> { + self.inner.shader_draw_parameters = shader_draw_parameters.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceVulkan11Features { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceVulkan11Properties { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub device_uuid: [u8; UUID_SIZE], + pub driver_uuid: [u8; UUID_SIZE], + pub device_luid: [u8; LUID_SIZE], + pub device_node_mask: u32, + pub device_luid_valid: Bool32, + pub subgroup_size: u32, + pub subgroup_supported_stages: ShaderStageFlags, + pub subgroup_supported_operations: SubgroupFeatureFlags, + pub subgroup_quad_operations_in_all_stages: Bool32, + pub point_clipping_behavior: PointClippingBehavior, + pub max_multiview_view_count: u32, + pub max_multiview_instance_index: u32, + pub protected_no_fault: Bool32, + pub max_per_set_descriptors: u32, + pub max_memory_allocation_size: DeviceSize, +} +impl ::std::default::Default for PhysicalDeviceVulkan11Properties { + fn default() -> PhysicalDeviceVulkan11Properties { + PhysicalDeviceVulkan11Properties { + s_type: StructureType::PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES, + p_next: ::std::ptr::null_mut(), + device_uuid: unsafe { ::std::mem::zeroed() }, + driver_uuid: unsafe { ::std::mem::zeroed() }, + device_luid: unsafe { ::std::mem::zeroed() }, + device_node_mask: u32::default(), + device_luid_valid: Bool32::default(), + subgroup_size: u32::default(), + subgroup_supported_stages: ShaderStageFlags::default(), + subgroup_supported_operations: SubgroupFeatureFlags::default(), + subgroup_quad_operations_in_all_stages: Bool32::default(), + point_clipping_behavior: PointClippingBehavior::default(), + max_multiview_view_count: u32::default(), + max_multiview_instance_index: u32::default(), + protected_no_fault: Bool32::default(), + max_per_set_descriptors: u32::default(), + max_memory_allocation_size: DeviceSize::default(), + } + } +} +impl PhysicalDeviceVulkan11Properties { + pub fn builder<'a>() -> PhysicalDeviceVulkan11PropertiesBuilder<'a> { + PhysicalDeviceVulkan11PropertiesBuilder { + inner: PhysicalDeviceVulkan11Properties::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceVulkan11PropertiesBuilder<'a> { + inner: PhysicalDeviceVulkan11Properties, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceVulkan11PropertiesBuilder<'_> {} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceVulkan11Properties {} +impl<'a> ::std::ops::Deref for PhysicalDeviceVulkan11PropertiesBuilder<'a> { + type Target = PhysicalDeviceVulkan11Properties; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceVulkan11PropertiesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceVulkan11PropertiesBuilder<'a> { + pub fn device_uuid( + mut self, + device_uuid: [u8; UUID_SIZE], + ) -> PhysicalDeviceVulkan11PropertiesBuilder<'a> { + self.inner.device_uuid = device_uuid; + self + } + pub fn driver_uuid( + mut self, + driver_uuid: [u8; UUID_SIZE], + ) -> PhysicalDeviceVulkan11PropertiesBuilder<'a> { + self.inner.driver_uuid = driver_uuid; + self + } + pub fn device_luid( + mut self, + device_luid: [u8; LUID_SIZE], + ) -> PhysicalDeviceVulkan11PropertiesBuilder<'a> { + self.inner.device_luid = device_luid; + self + } + pub fn device_node_mask( + mut self, + device_node_mask: u32, + ) -> PhysicalDeviceVulkan11PropertiesBuilder<'a> { + self.inner.device_node_mask = device_node_mask; + self + } + pub fn device_luid_valid( + mut self, + device_luid_valid: bool, + ) -> PhysicalDeviceVulkan11PropertiesBuilder<'a> { + self.inner.device_luid_valid = device_luid_valid.into(); + self + } + pub fn subgroup_size( + mut self, + subgroup_size: u32, + ) -> PhysicalDeviceVulkan11PropertiesBuilder<'a> { + self.inner.subgroup_size = subgroup_size; + self + } + pub fn subgroup_supported_stages( + mut self, + subgroup_supported_stages: ShaderStageFlags, + ) -> PhysicalDeviceVulkan11PropertiesBuilder<'a> { + self.inner.subgroup_supported_stages = subgroup_supported_stages; + self + } + pub fn subgroup_supported_operations( + mut self, + subgroup_supported_operations: SubgroupFeatureFlags, + ) -> PhysicalDeviceVulkan11PropertiesBuilder<'a> { + self.inner.subgroup_supported_operations = subgroup_supported_operations; + self + } + pub fn subgroup_quad_operations_in_all_stages( + mut self, + subgroup_quad_operations_in_all_stages: bool, + ) -> PhysicalDeviceVulkan11PropertiesBuilder<'a> { + self.inner.subgroup_quad_operations_in_all_stages = + subgroup_quad_operations_in_all_stages.into(); + self + } + pub fn point_clipping_behavior( + mut self, + point_clipping_behavior: PointClippingBehavior, + ) -> PhysicalDeviceVulkan11PropertiesBuilder<'a> { + self.inner.point_clipping_behavior = point_clipping_behavior; + self + } + pub fn max_multiview_view_count( + mut self, + max_multiview_view_count: u32, + ) -> PhysicalDeviceVulkan11PropertiesBuilder<'a> { + self.inner.max_multiview_view_count = max_multiview_view_count; + self + } + pub fn max_multiview_instance_index( + mut self, + max_multiview_instance_index: u32, + ) -> PhysicalDeviceVulkan11PropertiesBuilder<'a> { + self.inner.max_multiview_instance_index = max_multiview_instance_index; + self + } + pub fn protected_no_fault( + mut self, + protected_no_fault: bool, + ) -> PhysicalDeviceVulkan11PropertiesBuilder<'a> { + self.inner.protected_no_fault = protected_no_fault.into(); + self + } + pub fn max_per_set_descriptors( + mut self, + max_per_set_descriptors: u32, + ) -> PhysicalDeviceVulkan11PropertiesBuilder<'a> { + self.inner.max_per_set_descriptors = max_per_set_descriptors; + self + } + pub fn max_memory_allocation_size( + mut self, + max_memory_allocation_size: DeviceSize, + ) -> PhysicalDeviceVulkan11PropertiesBuilder<'a> { + self.inner.max_memory_allocation_size = max_memory_allocation_size; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceVulkan11Properties { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceVulkan12Features { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub sampler_mirror_clamp_to_edge: Bool32, + pub draw_indirect_count: Bool32, + pub storage_buffer8_bit_access: Bool32, + pub uniform_and_storage_buffer8_bit_access: Bool32, + pub storage_push_constant8: Bool32, + pub shader_buffer_int64_atomics: Bool32, + pub shader_shared_int64_atomics: Bool32, + pub shader_float16: Bool32, + pub shader_int8: Bool32, + pub descriptor_indexing: Bool32, + pub shader_input_attachment_array_dynamic_indexing: Bool32, + pub shader_uniform_texel_buffer_array_dynamic_indexing: Bool32, + pub shader_storage_texel_buffer_array_dynamic_indexing: Bool32, + pub shader_uniform_buffer_array_non_uniform_indexing: Bool32, + pub shader_sampled_image_array_non_uniform_indexing: Bool32, + pub shader_storage_buffer_array_non_uniform_indexing: Bool32, + pub shader_storage_image_array_non_uniform_indexing: Bool32, + pub shader_input_attachment_array_non_uniform_indexing: Bool32, + pub shader_uniform_texel_buffer_array_non_uniform_indexing: Bool32, + pub shader_storage_texel_buffer_array_non_uniform_indexing: Bool32, + pub descriptor_binding_uniform_buffer_update_after_bind: Bool32, + pub descriptor_binding_sampled_image_update_after_bind: Bool32, + pub descriptor_binding_storage_image_update_after_bind: Bool32, + pub descriptor_binding_storage_buffer_update_after_bind: Bool32, + pub descriptor_binding_uniform_texel_buffer_update_after_bind: Bool32, + pub descriptor_binding_storage_texel_buffer_update_after_bind: Bool32, + pub descriptor_binding_update_unused_while_pending: Bool32, + pub descriptor_binding_partially_bound: Bool32, + pub descriptor_binding_variable_descriptor_count: Bool32, + pub runtime_descriptor_array: Bool32, + pub sampler_filter_minmax: Bool32, + pub scalar_block_layout: Bool32, + pub imageless_framebuffer: Bool32, + pub uniform_buffer_standard_layout: Bool32, + pub shader_subgroup_extended_types: Bool32, + pub separate_depth_stencil_layouts: Bool32, + pub host_query_reset: Bool32, + pub timeline_semaphore: Bool32, + pub buffer_device_address: Bool32, + pub buffer_device_address_capture_replay: Bool32, + pub buffer_device_address_multi_device: Bool32, + pub vulkan_memory_model: Bool32, + pub vulkan_memory_model_device_scope: Bool32, + pub vulkan_memory_model_availability_visibility_chains: Bool32, + pub shader_output_viewport_index: Bool32, + pub shader_output_layer: Bool32, + pub subgroup_broadcast_dynamic_id: Bool32, +} +impl ::std::default::Default for PhysicalDeviceVulkan12Features { + fn default() -> PhysicalDeviceVulkan12Features { + PhysicalDeviceVulkan12Features { + s_type: StructureType::PHYSICAL_DEVICE_VULKAN_1_2_FEATURES, + p_next: ::std::ptr::null_mut(), + sampler_mirror_clamp_to_edge: Bool32::default(), + draw_indirect_count: Bool32::default(), + storage_buffer8_bit_access: Bool32::default(), + uniform_and_storage_buffer8_bit_access: Bool32::default(), + storage_push_constant8: Bool32::default(), + shader_buffer_int64_atomics: Bool32::default(), + shader_shared_int64_atomics: Bool32::default(), + shader_float16: Bool32::default(), + shader_int8: Bool32::default(), + descriptor_indexing: Bool32::default(), + shader_input_attachment_array_dynamic_indexing: Bool32::default(), + shader_uniform_texel_buffer_array_dynamic_indexing: Bool32::default(), + shader_storage_texel_buffer_array_dynamic_indexing: Bool32::default(), + shader_uniform_buffer_array_non_uniform_indexing: Bool32::default(), + shader_sampled_image_array_non_uniform_indexing: Bool32::default(), + shader_storage_buffer_array_non_uniform_indexing: Bool32::default(), + shader_storage_image_array_non_uniform_indexing: Bool32::default(), + shader_input_attachment_array_non_uniform_indexing: Bool32::default(), + shader_uniform_texel_buffer_array_non_uniform_indexing: Bool32::default(), + shader_storage_texel_buffer_array_non_uniform_indexing: Bool32::default(), + descriptor_binding_uniform_buffer_update_after_bind: Bool32::default(), + descriptor_binding_sampled_image_update_after_bind: Bool32::default(), + descriptor_binding_storage_image_update_after_bind: Bool32::default(), + descriptor_binding_storage_buffer_update_after_bind: Bool32::default(), + descriptor_binding_uniform_texel_buffer_update_after_bind: Bool32::default(), + descriptor_binding_storage_texel_buffer_update_after_bind: Bool32::default(), + descriptor_binding_update_unused_while_pending: Bool32::default(), + descriptor_binding_partially_bound: Bool32::default(), + descriptor_binding_variable_descriptor_count: Bool32::default(), + runtime_descriptor_array: Bool32::default(), + sampler_filter_minmax: Bool32::default(), + scalar_block_layout: Bool32::default(), + imageless_framebuffer: Bool32::default(), + uniform_buffer_standard_layout: Bool32::default(), + shader_subgroup_extended_types: Bool32::default(), + separate_depth_stencil_layouts: Bool32::default(), + host_query_reset: Bool32::default(), + timeline_semaphore: Bool32::default(), + buffer_device_address: Bool32::default(), + buffer_device_address_capture_replay: Bool32::default(), + buffer_device_address_multi_device: Bool32::default(), + vulkan_memory_model: Bool32::default(), + vulkan_memory_model_device_scope: Bool32::default(), + vulkan_memory_model_availability_visibility_chains: Bool32::default(), + shader_output_viewport_index: Bool32::default(), + shader_output_layer: Bool32::default(), + subgroup_broadcast_dynamic_id: Bool32::default(), + } + } +} +impl PhysicalDeviceVulkan12Features { + pub fn builder<'a>() -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + PhysicalDeviceVulkan12FeaturesBuilder { + inner: PhysicalDeviceVulkan12Features::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceVulkan12FeaturesBuilder<'a> { + inner: PhysicalDeviceVulkan12Features, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceVulkan12FeaturesBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceVulkan12Features {} +impl<'a> ::std::ops::Deref for PhysicalDeviceVulkan12FeaturesBuilder<'a> { + type Target = PhysicalDeviceVulkan12Features; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceVulkan12FeaturesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + pub fn sampler_mirror_clamp_to_edge( + mut self, + sampler_mirror_clamp_to_edge: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.sampler_mirror_clamp_to_edge = sampler_mirror_clamp_to_edge.into(); + self + } + pub fn draw_indirect_count( + mut self, + draw_indirect_count: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.draw_indirect_count = draw_indirect_count.into(); + self + } + pub fn storage_buffer8_bit_access( + mut self, + storage_buffer8_bit_access: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.storage_buffer8_bit_access = storage_buffer8_bit_access.into(); + self + } + pub fn uniform_and_storage_buffer8_bit_access( + mut self, + uniform_and_storage_buffer8_bit_access: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.uniform_and_storage_buffer8_bit_access = + uniform_and_storage_buffer8_bit_access.into(); + self + } + pub fn storage_push_constant8( + mut self, + storage_push_constant8: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.storage_push_constant8 = storage_push_constant8.into(); + self + } + pub fn shader_buffer_int64_atomics( + mut self, + shader_buffer_int64_atomics: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.shader_buffer_int64_atomics = shader_buffer_int64_atomics.into(); + self + } + pub fn shader_shared_int64_atomics( + mut self, + shader_shared_int64_atomics: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.shader_shared_int64_atomics = shader_shared_int64_atomics.into(); + self + } + pub fn shader_float16( + mut self, + shader_float16: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.shader_float16 = shader_float16.into(); + self + } + pub fn shader_int8(mut self, shader_int8: bool) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.shader_int8 = shader_int8.into(); + self + } + pub fn descriptor_indexing( + mut self, + descriptor_indexing: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.descriptor_indexing = descriptor_indexing.into(); + self + } + pub fn shader_input_attachment_array_dynamic_indexing( + mut self, + shader_input_attachment_array_dynamic_indexing: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.shader_input_attachment_array_dynamic_indexing = + shader_input_attachment_array_dynamic_indexing.into(); + self + } + pub fn shader_uniform_texel_buffer_array_dynamic_indexing( + mut self, + shader_uniform_texel_buffer_array_dynamic_indexing: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner + .shader_uniform_texel_buffer_array_dynamic_indexing = + shader_uniform_texel_buffer_array_dynamic_indexing.into(); + self + } + pub fn shader_storage_texel_buffer_array_dynamic_indexing( + mut self, + shader_storage_texel_buffer_array_dynamic_indexing: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner + .shader_storage_texel_buffer_array_dynamic_indexing = + shader_storage_texel_buffer_array_dynamic_indexing.into(); + self + } + pub fn shader_uniform_buffer_array_non_uniform_indexing( + mut self, + shader_uniform_buffer_array_non_uniform_indexing: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.shader_uniform_buffer_array_non_uniform_indexing = + shader_uniform_buffer_array_non_uniform_indexing.into(); + self + } + pub fn shader_sampled_image_array_non_uniform_indexing( + mut self, + shader_sampled_image_array_non_uniform_indexing: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.shader_sampled_image_array_non_uniform_indexing = + shader_sampled_image_array_non_uniform_indexing.into(); + self + } + pub fn shader_storage_buffer_array_non_uniform_indexing( + mut self, + shader_storage_buffer_array_non_uniform_indexing: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.shader_storage_buffer_array_non_uniform_indexing = + shader_storage_buffer_array_non_uniform_indexing.into(); + self + } + pub fn shader_storage_image_array_non_uniform_indexing( + mut self, + shader_storage_image_array_non_uniform_indexing: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.shader_storage_image_array_non_uniform_indexing = + shader_storage_image_array_non_uniform_indexing.into(); + self + } + pub fn shader_input_attachment_array_non_uniform_indexing( + mut self, + shader_input_attachment_array_non_uniform_indexing: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner + .shader_input_attachment_array_non_uniform_indexing = + shader_input_attachment_array_non_uniform_indexing.into(); + self + } + pub fn shader_uniform_texel_buffer_array_non_uniform_indexing( + mut self, + shader_uniform_texel_buffer_array_non_uniform_indexing: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner + .shader_uniform_texel_buffer_array_non_uniform_indexing = + shader_uniform_texel_buffer_array_non_uniform_indexing.into(); + self + } + pub fn shader_storage_texel_buffer_array_non_uniform_indexing( + mut self, + shader_storage_texel_buffer_array_non_uniform_indexing: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner + .shader_storage_texel_buffer_array_non_uniform_indexing = + shader_storage_texel_buffer_array_non_uniform_indexing.into(); + self + } + pub fn descriptor_binding_uniform_buffer_update_after_bind( + mut self, + descriptor_binding_uniform_buffer_update_after_bind: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner + .descriptor_binding_uniform_buffer_update_after_bind = + descriptor_binding_uniform_buffer_update_after_bind.into(); + self + } + pub fn descriptor_binding_sampled_image_update_after_bind( + mut self, + descriptor_binding_sampled_image_update_after_bind: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner + .descriptor_binding_sampled_image_update_after_bind = + descriptor_binding_sampled_image_update_after_bind.into(); + self + } + pub fn descriptor_binding_storage_image_update_after_bind( + mut self, + descriptor_binding_storage_image_update_after_bind: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner + .descriptor_binding_storage_image_update_after_bind = + descriptor_binding_storage_image_update_after_bind.into(); + self + } + pub fn descriptor_binding_storage_buffer_update_after_bind( + mut self, + descriptor_binding_storage_buffer_update_after_bind: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner + .descriptor_binding_storage_buffer_update_after_bind = + descriptor_binding_storage_buffer_update_after_bind.into(); + self + } + pub fn descriptor_binding_uniform_texel_buffer_update_after_bind( + mut self, + descriptor_binding_uniform_texel_buffer_update_after_bind: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner + .descriptor_binding_uniform_texel_buffer_update_after_bind = + descriptor_binding_uniform_texel_buffer_update_after_bind.into(); + self + } + pub fn descriptor_binding_storage_texel_buffer_update_after_bind( + mut self, + descriptor_binding_storage_texel_buffer_update_after_bind: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner + .descriptor_binding_storage_texel_buffer_update_after_bind = + descriptor_binding_storage_texel_buffer_update_after_bind.into(); + self + } + pub fn descriptor_binding_update_unused_while_pending( + mut self, + descriptor_binding_update_unused_while_pending: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.descriptor_binding_update_unused_while_pending = + descriptor_binding_update_unused_while_pending.into(); + self + } + pub fn descriptor_binding_partially_bound( + mut self, + descriptor_binding_partially_bound: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.descriptor_binding_partially_bound = descriptor_binding_partially_bound.into(); + self + } + pub fn descriptor_binding_variable_descriptor_count( + mut self, + descriptor_binding_variable_descriptor_count: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.descriptor_binding_variable_descriptor_count = + descriptor_binding_variable_descriptor_count.into(); + self + } + pub fn runtime_descriptor_array( + mut self, + runtime_descriptor_array: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.runtime_descriptor_array = runtime_descriptor_array.into(); + self + } + pub fn sampler_filter_minmax( + mut self, + sampler_filter_minmax: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.sampler_filter_minmax = sampler_filter_minmax.into(); + self + } + pub fn scalar_block_layout( + mut self, + scalar_block_layout: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.scalar_block_layout = scalar_block_layout.into(); + self + } + pub fn imageless_framebuffer( + mut self, + imageless_framebuffer: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.imageless_framebuffer = imageless_framebuffer.into(); + self + } + pub fn uniform_buffer_standard_layout( + mut self, + uniform_buffer_standard_layout: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.uniform_buffer_standard_layout = uniform_buffer_standard_layout.into(); + self + } + pub fn shader_subgroup_extended_types( + mut self, + shader_subgroup_extended_types: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.shader_subgroup_extended_types = shader_subgroup_extended_types.into(); + self + } + pub fn separate_depth_stencil_layouts( + mut self, + separate_depth_stencil_layouts: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.separate_depth_stencil_layouts = separate_depth_stencil_layouts.into(); + self + } + pub fn host_query_reset( + mut self, + host_query_reset: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.host_query_reset = host_query_reset.into(); + self + } + pub fn timeline_semaphore( + mut self, + timeline_semaphore: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.timeline_semaphore = timeline_semaphore.into(); + self + } + pub fn buffer_device_address( + mut self, + buffer_device_address: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.buffer_device_address = buffer_device_address.into(); + self + } + pub fn buffer_device_address_capture_replay( + mut self, + buffer_device_address_capture_replay: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.buffer_device_address_capture_replay = + buffer_device_address_capture_replay.into(); + self + } + pub fn buffer_device_address_multi_device( + mut self, + buffer_device_address_multi_device: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.buffer_device_address_multi_device = buffer_device_address_multi_device.into(); + self + } + pub fn vulkan_memory_model( + mut self, + vulkan_memory_model: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.vulkan_memory_model = vulkan_memory_model.into(); + self + } + pub fn vulkan_memory_model_device_scope( + mut self, + vulkan_memory_model_device_scope: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.vulkan_memory_model_device_scope = vulkan_memory_model_device_scope.into(); + self + } + pub fn vulkan_memory_model_availability_visibility_chains( + mut self, + vulkan_memory_model_availability_visibility_chains: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner + .vulkan_memory_model_availability_visibility_chains = + vulkan_memory_model_availability_visibility_chains.into(); + self + } + pub fn shader_output_viewport_index( + mut self, + shader_output_viewport_index: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.shader_output_viewport_index = shader_output_viewport_index.into(); + self + } + pub fn shader_output_layer( + mut self, + shader_output_layer: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.shader_output_layer = shader_output_layer.into(); + self + } + pub fn subgroup_broadcast_dynamic_id( + mut self, + subgroup_broadcast_dynamic_id: bool, + ) -> PhysicalDeviceVulkan12FeaturesBuilder<'a> { + self.inner.subgroup_broadcast_dynamic_id = subgroup_broadcast_dynamic_id.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceVulkan12Features { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct PhysicalDeviceVulkan12Properties { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub driver_id: DriverId, + pub driver_name: [c_char; MAX_DRIVER_NAME_SIZE], + pub driver_info: [c_char; MAX_DRIVER_INFO_SIZE], + pub conformance_version: ConformanceVersion, + pub denorm_behavior_independence: ShaderFloatControlsIndependence, + pub rounding_mode_independence: ShaderFloatControlsIndependence, + pub shader_signed_zero_inf_nan_preserve_float16: Bool32, + pub shader_signed_zero_inf_nan_preserve_float32: Bool32, + pub shader_signed_zero_inf_nan_preserve_float64: Bool32, + pub shader_denorm_preserve_float16: Bool32, + pub shader_denorm_preserve_float32: Bool32, + pub shader_denorm_preserve_float64: Bool32, + pub shader_denorm_flush_to_zero_float16: Bool32, + pub shader_denorm_flush_to_zero_float32: Bool32, + pub shader_denorm_flush_to_zero_float64: Bool32, + pub shader_rounding_mode_rte_float16: Bool32, + pub shader_rounding_mode_rte_float32: Bool32, + pub shader_rounding_mode_rte_float64: Bool32, + pub shader_rounding_mode_rtz_float16: Bool32, + pub shader_rounding_mode_rtz_float32: Bool32, + pub shader_rounding_mode_rtz_float64: Bool32, + pub max_update_after_bind_descriptors_in_all_pools: u32, + pub shader_uniform_buffer_array_non_uniform_indexing_native: Bool32, + pub shader_sampled_image_array_non_uniform_indexing_native: Bool32, + pub shader_storage_buffer_array_non_uniform_indexing_native: Bool32, + pub shader_storage_image_array_non_uniform_indexing_native: Bool32, + pub shader_input_attachment_array_non_uniform_indexing_native: Bool32, + pub robust_buffer_access_update_after_bind: Bool32, + pub quad_divergent_implicit_lod: Bool32, + pub max_per_stage_descriptor_update_after_bind_samplers: u32, + pub max_per_stage_descriptor_update_after_bind_uniform_buffers: u32, + pub max_per_stage_descriptor_update_after_bind_storage_buffers: u32, + pub max_per_stage_descriptor_update_after_bind_sampled_images: u32, + pub max_per_stage_descriptor_update_after_bind_storage_images: u32, + pub max_per_stage_descriptor_update_after_bind_input_attachments: u32, + pub max_per_stage_update_after_bind_resources: u32, + pub max_descriptor_set_update_after_bind_samplers: u32, + pub max_descriptor_set_update_after_bind_uniform_buffers: u32, + pub max_descriptor_set_update_after_bind_uniform_buffers_dynamic: u32, + pub max_descriptor_set_update_after_bind_storage_buffers: u32, + pub max_descriptor_set_update_after_bind_storage_buffers_dynamic: u32, + pub max_descriptor_set_update_after_bind_sampled_images: u32, + pub max_descriptor_set_update_after_bind_storage_images: u32, + pub max_descriptor_set_update_after_bind_input_attachments: u32, + pub supported_depth_resolve_modes: ResolveModeFlags, + pub supported_stencil_resolve_modes: ResolveModeFlags, + pub independent_resolve_none: Bool32, + pub independent_resolve: Bool32, + pub filter_minmax_single_component_formats: Bool32, + pub filter_minmax_image_component_mapping: Bool32, + pub max_timeline_semaphore_value_difference: u64, + pub framebuffer_integer_color_sample_counts: SampleCountFlags, +} +impl fmt::Debug for PhysicalDeviceVulkan12Properties { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("PhysicalDeviceVulkan12Properties") + .field("s_type", &self.s_type) + .field("p_next", &self.p_next) + .field("driver_id", &self.driver_id) + .field("driver_name", &unsafe { + ::std::ffi::CStr::from_ptr(self.driver_name.as_ptr() as *const c_char) + }) + .field("driver_info", &unsafe { + ::std::ffi::CStr::from_ptr(self.driver_info.as_ptr() as *const c_char) + }) + .field("conformance_version", &self.conformance_version) + .field( + "denorm_behavior_independence", + &self.denorm_behavior_independence, + ) + .field( + "rounding_mode_independence", + &self.rounding_mode_independence, + ) + .field( + "shader_signed_zero_inf_nan_preserve_float16", + &self.shader_signed_zero_inf_nan_preserve_float16, + ) + .field( + "shader_signed_zero_inf_nan_preserve_float32", + &self.shader_signed_zero_inf_nan_preserve_float32, + ) + .field( + "shader_signed_zero_inf_nan_preserve_float64", + &self.shader_signed_zero_inf_nan_preserve_float64, + ) + .field( + "shader_denorm_preserve_float16", + &self.shader_denorm_preserve_float16, + ) + .field( + "shader_denorm_preserve_float32", + &self.shader_denorm_preserve_float32, + ) + .field( + "shader_denorm_preserve_float64", + &self.shader_denorm_preserve_float64, + ) + .field( + "shader_denorm_flush_to_zero_float16", + &self.shader_denorm_flush_to_zero_float16, + ) + .field( + "shader_denorm_flush_to_zero_float32", + &self.shader_denorm_flush_to_zero_float32, + ) + .field( + "shader_denorm_flush_to_zero_float64", + &self.shader_denorm_flush_to_zero_float64, + ) + .field( + "shader_rounding_mode_rte_float16", + &self.shader_rounding_mode_rte_float16, + ) + .field( + "shader_rounding_mode_rte_float32", + &self.shader_rounding_mode_rte_float32, + ) + .field( + "shader_rounding_mode_rte_float64", + &self.shader_rounding_mode_rte_float64, + ) + .field( + "shader_rounding_mode_rtz_float16", + &self.shader_rounding_mode_rtz_float16, + ) + .field( + "shader_rounding_mode_rtz_float32", + &self.shader_rounding_mode_rtz_float32, + ) + .field( + "shader_rounding_mode_rtz_float64", + &self.shader_rounding_mode_rtz_float64, + ) + .field( + "max_update_after_bind_descriptors_in_all_pools", + &self.max_update_after_bind_descriptors_in_all_pools, + ) + .field( + "shader_uniform_buffer_array_non_uniform_indexing_native", + &self.shader_uniform_buffer_array_non_uniform_indexing_native, + ) + .field( + "shader_sampled_image_array_non_uniform_indexing_native", + &self.shader_sampled_image_array_non_uniform_indexing_native, + ) + .field( + "shader_storage_buffer_array_non_uniform_indexing_native", + &self.shader_storage_buffer_array_non_uniform_indexing_native, + ) + .field( + "shader_storage_image_array_non_uniform_indexing_native", + &self.shader_storage_image_array_non_uniform_indexing_native, + ) + .field( + "shader_input_attachment_array_non_uniform_indexing_native", + &self.shader_input_attachment_array_non_uniform_indexing_native, + ) + .field( + "robust_buffer_access_update_after_bind", + &self.robust_buffer_access_update_after_bind, + ) + .field( + "quad_divergent_implicit_lod", + &self.quad_divergent_implicit_lod, + ) + .field( + "max_per_stage_descriptor_update_after_bind_samplers", + &self.max_per_stage_descriptor_update_after_bind_samplers, + ) + .field( + "max_per_stage_descriptor_update_after_bind_uniform_buffers", + &self.max_per_stage_descriptor_update_after_bind_uniform_buffers, + ) + .field( + "max_per_stage_descriptor_update_after_bind_storage_buffers", + &self.max_per_stage_descriptor_update_after_bind_storage_buffers, + ) + .field( + "max_per_stage_descriptor_update_after_bind_sampled_images", + &self.max_per_stage_descriptor_update_after_bind_sampled_images, + ) + .field( + "max_per_stage_descriptor_update_after_bind_storage_images", + &self.max_per_stage_descriptor_update_after_bind_storage_images, + ) + .field( + "max_per_stage_descriptor_update_after_bind_input_attachments", + &self.max_per_stage_descriptor_update_after_bind_input_attachments, + ) + .field( + "max_per_stage_update_after_bind_resources", + &self.max_per_stage_update_after_bind_resources, + ) + .field( + "max_descriptor_set_update_after_bind_samplers", + &self.max_descriptor_set_update_after_bind_samplers, + ) + .field( + "max_descriptor_set_update_after_bind_uniform_buffers", + &self.max_descriptor_set_update_after_bind_uniform_buffers, + ) + .field( + "max_descriptor_set_update_after_bind_uniform_buffers_dynamic", + &self.max_descriptor_set_update_after_bind_uniform_buffers_dynamic, + ) + .field( + "max_descriptor_set_update_after_bind_storage_buffers", + &self.max_descriptor_set_update_after_bind_storage_buffers, + ) + .field( + "max_descriptor_set_update_after_bind_storage_buffers_dynamic", + &self.max_descriptor_set_update_after_bind_storage_buffers_dynamic, + ) + .field( + "max_descriptor_set_update_after_bind_sampled_images", + &self.max_descriptor_set_update_after_bind_sampled_images, + ) + .field( + "max_descriptor_set_update_after_bind_storage_images", + &self.max_descriptor_set_update_after_bind_storage_images, + ) + .field( + "max_descriptor_set_update_after_bind_input_attachments", + &self.max_descriptor_set_update_after_bind_input_attachments, + ) + .field( + "supported_depth_resolve_modes", + &self.supported_depth_resolve_modes, + ) + .field( + "supported_stencil_resolve_modes", + &self.supported_stencil_resolve_modes, + ) + .field("independent_resolve_none", &self.independent_resolve_none) + .field("independent_resolve", &self.independent_resolve) + .field( + "filter_minmax_single_component_formats", + &self.filter_minmax_single_component_formats, + ) + .field( + "filter_minmax_image_component_mapping", + &self.filter_minmax_image_component_mapping, + ) + .field( + "max_timeline_semaphore_value_difference", + &self.max_timeline_semaphore_value_difference, + ) + .field( + "framebuffer_integer_color_sample_counts", + &self.framebuffer_integer_color_sample_counts, + ) + .finish() + } +} +impl ::std::default::Default for PhysicalDeviceVulkan12Properties { + fn default() -> PhysicalDeviceVulkan12Properties { + PhysicalDeviceVulkan12Properties { + s_type: StructureType::PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES, + p_next: ::std::ptr::null_mut(), + driver_id: DriverId::default(), + driver_name: unsafe { ::std::mem::zeroed() }, + driver_info: unsafe { ::std::mem::zeroed() }, + conformance_version: ConformanceVersion::default(), + denorm_behavior_independence: ShaderFloatControlsIndependence::default(), + rounding_mode_independence: ShaderFloatControlsIndependence::default(), + shader_signed_zero_inf_nan_preserve_float16: Bool32::default(), + shader_signed_zero_inf_nan_preserve_float32: Bool32::default(), + shader_signed_zero_inf_nan_preserve_float64: Bool32::default(), + shader_denorm_preserve_float16: Bool32::default(), + shader_denorm_preserve_float32: Bool32::default(), + shader_denorm_preserve_float64: Bool32::default(), + shader_denorm_flush_to_zero_float16: Bool32::default(), + shader_denorm_flush_to_zero_float32: Bool32::default(), + shader_denorm_flush_to_zero_float64: Bool32::default(), + shader_rounding_mode_rte_float16: Bool32::default(), + shader_rounding_mode_rte_float32: Bool32::default(), + shader_rounding_mode_rte_float64: Bool32::default(), + shader_rounding_mode_rtz_float16: Bool32::default(), + shader_rounding_mode_rtz_float32: Bool32::default(), + shader_rounding_mode_rtz_float64: Bool32::default(), + max_update_after_bind_descriptors_in_all_pools: u32::default(), + shader_uniform_buffer_array_non_uniform_indexing_native: Bool32::default(), + shader_sampled_image_array_non_uniform_indexing_native: Bool32::default(), + shader_storage_buffer_array_non_uniform_indexing_native: Bool32::default(), + shader_storage_image_array_non_uniform_indexing_native: Bool32::default(), + shader_input_attachment_array_non_uniform_indexing_native: Bool32::default(), + robust_buffer_access_update_after_bind: Bool32::default(), + quad_divergent_implicit_lod: Bool32::default(), + max_per_stage_descriptor_update_after_bind_samplers: u32::default(), + max_per_stage_descriptor_update_after_bind_uniform_buffers: u32::default(), + max_per_stage_descriptor_update_after_bind_storage_buffers: u32::default(), + max_per_stage_descriptor_update_after_bind_sampled_images: u32::default(), + max_per_stage_descriptor_update_after_bind_storage_images: u32::default(), + max_per_stage_descriptor_update_after_bind_input_attachments: u32::default(), + max_per_stage_update_after_bind_resources: u32::default(), + max_descriptor_set_update_after_bind_samplers: u32::default(), + max_descriptor_set_update_after_bind_uniform_buffers: u32::default(), + max_descriptor_set_update_after_bind_uniform_buffers_dynamic: u32::default(), + max_descriptor_set_update_after_bind_storage_buffers: u32::default(), + max_descriptor_set_update_after_bind_storage_buffers_dynamic: u32::default(), + max_descriptor_set_update_after_bind_sampled_images: u32::default(), + max_descriptor_set_update_after_bind_storage_images: u32::default(), + max_descriptor_set_update_after_bind_input_attachments: u32::default(), + supported_depth_resolve_modes: ResolveModeFlags::default(), + supported_stencil_resolve_modes: ResolveModeFlags::default(), + independent_resolve_none: Bool32::default(), + independent_resolve: Bool32::default(), + filter_minmax_single_component_formats: Bool32::default(), + filter_minmax_image_component_mapping: Bool32::default(), + max_timeline_semaphore_value_difference: u64::default(), + framebuffer_integer_color_sample_counts: SampleCountFlags::default(), + } + } +} +impl PhysicalDeviceVulkan12Properties { + pub fn builder<'a>() -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + PhysicalDeviceVulkan12PropertiesBuilder { + inner: PhysicalDeviceVulkan12Properties::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceVulkan12PropertiesBuilder<'a> { + inner: PhysicalDeviceVulkan12Properties, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceVulkan12PropertiesBuilder<'_> {} +unsafe impl ExtendsPhysicalDeviceProperties2 for PhysicalDeviceVulkan12Properties {} +impl<'a> ::std::ops::Deref for PhysicalDeviceVulkan12PropertiesBuilder<'a> { + type Target = PhysicalDeviceVulkan12Properties; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceVulkan12PropertiesBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + pub fn driver_id(mut self, driver_id: DriverId) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.driver_id = driver_id; + self + } + pub fn driver_name( + mut self, + driver_name: [c_char; MAX_DRIVER_NAME_SIZE], + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.driver_name = driver_name; + self + } + pub fn driver_info( + mut self, + driver_info: [c_char; MAX_DRIVER_INFO_SIZE], + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.driver_info = driver_info; + self + } + pub fn conformance_version( + mut self, + conformance_version: ConformanceVersion, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.conformance_version = conformance_version; + self + } + pub fn denorm_behavior_independence( + mut self, + denorm_behavior_independence: ShaderFloatControlsIndependence, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.denorm_behavior_independence = denorm_behavior_independence; + self + } + pub fn rounding_mode_independence( + mut self, + rounding_mode_independence: ShaderFloatControlsIndependence, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.rounding_mode_independence = rounding_mode_independence; + self + } + pub fn shader_signed_zero_inf_nan_preserve_float16( + mut self, + shader_signed_zero_inf_nan_preserve_float16: bool, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.shader_signed_zero_inf_nan_preserve_float16 = + shader_signed_zero_inf_nan_preserve_float16.into(); + self + } + pub fn shader_signed_zero_inf_nan_preserve_float32( + mut self, + shader_signed_zero_inf_nan_preserve_float32: bool, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.shader_signed_zero_inf_nan_preserve_float32 = + shader_signed_zero_inf_nan_preserve_float32.into(); + self + } + pub fn shader_signed_zero_inf_nan_preserve_float64( + mut self, + shader_signed_zero_inf_nan_preserve_float64: bool, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.shader_signed_zero_inf_nan_preserve_float64 = + shader_signed_zero_inf_nan_preserve_float64.into(); + self + } + pub fn shader_denorm_preserve_float16( + mut self, + shader_denorm_preserve_float16: bool, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.shader_denorm_preserve_float16 = shader_denorm_preserve_float16.into(); + self + } + pub fn shader_denorm_preserve_float32( + mut self, + shader_denorm_preserve_float32: bool, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.shader_denorm_preserve_float32 = shader_denorm_preserve_float32.into(); + self + } + pub fn shader_denorm_preserve_float64( + mut self, + shader_denorm_preserve_float64: bool, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.shader_denorm_preserve_float64 = shader_denorm_preserve_float64.into(); + self + } + pub fn shader_denorm_flush_to_zero_float16( + mut self, + shader_denorm_flush_to_zero_float16: bool, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.shader_denorm_flush_to_zero_float16 = shader_denorm_flush_to_zero_float16.into(); + self + } + pub fn shader_denorm_flush_to_zero_float32( + mut self, + shader_denorm_flush_to_zero_float32: bool, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.shader_denorm_flush_to_zero_float32 = shader_denorm_flush_to_zero_float32.into(); + self + } + pub fn shader_denorm_flush_to_zero_float64( + mut self, + shader_denorm_flush_to_zero_float64: bool, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.shader_denorm_flush_to_zero_float64 = shader_denorm_flush_to_zero_float64.into(); + self + } + pub fn shader_rounding_mode_rte_float16( + mut self, + shader_rounding_mode_rte_float16: bool, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.shader_rounding_mode_rte_float16 = shader_rounding_mode_rte_float16.into(); + self + } + pub fn shader_rounding_mode_rte_float32( + mut self, + shader_rounding_mode_rte_float32: bool, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.shader_rounding_mode_rte_float32 = shader_rounding_mode_rte_float32.into(); + self + } + pub fn shader_rounding_mode_rte_float64( + mut self, + shader_rounding_mode_rte_float64: bool, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.shader_rounding_mode_rte_float64 = shader_rounding_mode_rte_float64.into(); + self + } + pub fn shader_rounding_mode_rtz_float16( + mut self, + shader_rounding_mode_rtz_float16: bool, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.shader_rounding_mode_rtz_float16 = shader_rounding_mode_rtz_float16.into(); + self + } + pub fn shader_rounding_mode_rtz_float32( + mut self, + shader_rounding_mode_rtz_float32: bool, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.shader_rounding_mode_rtz_float32 = shader_rounding_mode_rtz_float32.into(); + self + } + pub fn shader_rounding_mode_rtz_float64( + mut self, + shader_rounding_mode_rtz_float64: bool, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.shader_rounding_mode_rtz_float64 = shader_rounding_mode_rtz_float64.into(); + self + } + pub fn max_update_after_bind_descriptors_in_all_pools( + mut self, + max_update_after_bind_descriptors_in_all_pools: u32, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.max_update_after_bind_descriptors_in_all_pools = + max_update_after_bind_descriptors_in_all_pools; + self + } + pub fn shader_uniform_buffer_array_non_uniform_indexing_native( + mut self, + shader_uniform_buffer_array_non_uniform_indexing_native: bool, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner + .shader_uniform_buffer_array_non_uniform_indexing_native = + shader_uniform_buffer_array_non_uniform_indexing_native.into(); + self + } + pub fn shader_sampled_image_array_non_uniform_indexing_native( + mut self, + shader_sampled_image_array_non_uniform_indexing_native: bool, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner + .shader_sampled_image_array_non_uniform_indexing_native = + shader_sampled_image_array_non_uniform_indexing_native.into(); + self + } + pub fn shader_storage_buffer_array_non_uniform_indexing_native( + mut self, + shader_storage_buffer_array_non_uniform_indexing_native: bool, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner + .shader_storage_buffer_array_non_uniform_indexing_native = + shader_storage_buffer_array_non_uniform_indexing_native.into(); + self + } + pub fn shader_storage_image_array_non_uniform_indexing_native( + mut self, + shader_storage_image_array_non_uniform_indexing_native: bool, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner + .shader_storage_image_array_non_uniform_indexing_native = + shader_storage_image_array_non_uniform_indexing_native.into(); + self + } + pub fn shader_input_attachment_array_non_uniform_indexing_native( + mut self, + shader_input_attachment_array_non_uniform_indexing_native: bool, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner + .shader_input_attachment_array_non_uniform_indexing_native = + shader_input_attachment_array_non_uniform_indexing_native.into(); + self + } + pub fn robust_buffer_access_update_after_bind( + mut self, + robust_buffer_access_update_after_bind: bool, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.robust_buffer_access_update_after_bind = + robust_buffer_access_update_after_bind.into(); + self + } + pub fn quad_divergent_implicit_lod( + mut self, + quad_divergent_implicit_lod: bool, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.quad_divergent_implicit_lod = quad_divergent_implicit_lod.into(); + self + } + pub fn max_per_stage_descriptor_update_after_bind_samplers( + mut self, + max_per_stage_descriptor_update_after_bind_samplers: u32, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner + .max_per_stage_descriptor_update_after_bind_samplers = + max_per_stage_descriptor_update_after_bind_samplers; + self + } + pub fn max_per_stage_descriptor_update_after_bind_uniform_buffers( + mut self, + max_per_stage_descriptor_update_after_bind_uniform_buffers: u32, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner + .max_per_stage_descriptor_update_after_bind_uniform_buffers = + max_per_stage_descriptor_update_after_bind_uniform_buffers; + self + } + pub fn max_per_stage_descriptor_update_after_bind_storage_buffers( + mut self, + max_per_stage_descriptor_update_after_bind_storage_buffers: u32, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner + .max_per_stage_descriptor_update_after_bind_storage_buffers = + max_per_stage_descriptor_update_after_bind_storage_buffers; + self + } + pub fn max_per_stage_descriptor_update_after_bind_sampled_images( + mut self, + max_per_stage_descriptor_update_after_bind_sampled_images: u32, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner + .max_per_stage_descriptor_update_after_bind_sampled_images = + max_per_stage_descriptor_update_after_bind_sampled_images; + self + } + pub fn max_per_stage_descriptor_update_after_bind_storage_images( + mut self, + max_per_stage_descriptor_update_after_bind_storage_images: u32, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner + .max_per_stage_descriptor_update_after_bind_storage_images = + max_per_stage_descriptor_update_after_bind_storage_images; + self + } + pub fn max_per_stage_descriptor_update_after_bind_input_attachments( + mut self, + max_per_stage_descriptor_update_after_bind_input_attachments: u32, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner + .max_per_stage_descriptor_update_after_bind_input_attachments = + max_per_stage_descriptor_update_after_bind_input_attachments; + self + } + pub fn max_per_stage_update_after_bind_resources( + mut self, + max_per_stage_update_after_bind_resources: u32, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.max_per_stage_update_after_bind_resources = + max_per_stage_update_after_bind_resources; + self + } + pub fn max_descriptor_set_update_after_bind_samplers( + mut self, + max_descriptor_set_update_after_bind_samplers: u32, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.max_descriptor_set_update_after_bind_samplers = + max_descriptor_set_update_after_bind_samplers; + self + } + pub fn max_descriptor_set_update_after_bind_uniform_buffers( + mut self, + max_descriptor_set_update_after_bind_uniform_buffers: u32, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner + .max_descriptor_set_update_after_bind_uniform_buffers = + max_descriptor_set_update_after_bind_uniform_buffers; + self + } + pub fn max_descriptor_set_update_after_bind_uniform_buffers_dynamic( + mut self, + max_descriptor_set_update_after_bind_uniform_buffers_dynamic: u32, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner + .max_descriptor_set_update_after_bind_uniform_buffers_dynamic = + max_descriptor_set_update_after_bind_uniform_buffers_dynamic; + self + } + pub fn max_descriptor_set_update_after_bind_storage_buffers( + mut self, + max_descriptor_set_update_after_bind_storage_buffers: u32, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner + .max_descriptor_set_update_after_bind_storage_buffers = + max_descriptor_set_update_after_bind_storage_buffers; + self + } + pub fn max_descriptor_set_update_after_bind_storage_buffers_dynamic( + mut self, + max_descriptor_set_update_after_bind_storage_buffers_dynamic: u32, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner + .max_descriptor_set_update_after_bind_storage_buffers_dynamic = + max_descriptor_set_update_after_bind_storage_buffers_dynamic; + self + } + pub fn max_descriptor_set_update_after_bind_sampled_images( + mut self, + max_descriptor_set_update_after_bind_sampled_images: u32, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner + .max_descriptor_set_update_after_bind_sampled_images = + max_descriptor_set_update_after_bind_sampled_images; + self + } + pub fn max_descriptor_set_update_after_bind_storage_images( + mut self, + max_descriptor_set_update_after_bind_storage_images: u32, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner + .max_descriptor_set_update_after_bind_storage_images = + max_descriptor_set_update_after_bind_storage_images; + self + } + pub fn max_descriptor_set_update_after_bind_input_attachments( + mut self, + max_descriptor_set_update_after_bind_input_attachments: u32, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner + .max_descriptor_set_update_after_bind_input_attachments = + max_descriptor_set_update_after_bind_input_attachments; + self + } + pub fn supported_depth_resolve_modes( + mut self, + supported_depth_resolve_modes: ResolveModeFlags, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.supported_depth_resolve_modes = supported_depth_resolve_modes; + self + } + pub fn supported_stencil_resolve_modes( + mut self, + supported_stencil_resolve_modes: ResolveModeFlags, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.supported_stencil_resolve_modes = supported_stencil_resolve_modes; + self + } + pub fn independent_resolve_none( + mut self, + independent_resolve_none: bool, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.independent_resolve_none = independent_resolve_none.into(); + self + } + pub fn independent_resolve( + mut self, + independent_resolve: bool, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.independent_resolve = independent_resolve.into(); + self + } + pub fn filter_minmax_single_component_formats( + mut self, + filter_minmax_single_component_formats: bool, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.filter_minmax_single_component_formats = + filter_minmax_single_component_formats.into(); + self + } + pub fn filter_minmax_image_component_mapping( + mut self, + filter_minmax_image_component_mapping: bool, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.filter_minmax_image_component_mapping = + filter_minmax_image_component_mapping.into(); + self + } + pub fn max_timeline_semaphore_value_difference( + mut self, + max_timeline_semaphore_value_difference: u64, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.max_timeline_semaphore_value_difference = + max_timeline_semaphore_value_difference; + self + } + pub fn framebuffer_integer_color_sample_counts( + mut self, + framebuffer_integer_color_sample_counts: SampleCountFlags, + ) -> PhysicalDeviceVulkan12PropertiesBuilder<'a> { + self.inner.framebuffer_integer_color_sample_counts = + framebuffer_integer_color_sample_counts; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceVulkan12Properties { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineCompilerControlCreateInfoAMD { + pub s_type: StructureType, + pub p_next: *const c_void, + pub compiler_control_flags: PipelineCompilerControlFlagsAMD, +} +impl ::std::default::Default for PipelineCompilerControlCreateInfoAMD { + fn default() -> PipelineCompilerControlCreateInfoAMD { + PipelineCompilerControlCreateInfoAMD { + s_type: StructureType::PIPELINE_COMPILER_CONTROL_CREATE_INFO_AMD, + p_next: ::std::ptr::null(), + compiler_control_flags: PipelineCompilerControlFlagsAMD::default(), + } + } +} +impl PipelineCompilerControlCreateInfoAMD { + pub fn builder<'a>() -> PipelineCompilerControlCreateInfoAMDBuilder<'a> { + PipelineCompilerControlCreateInfoAMDBuilder { + inner: PipelineCompilerControlCreateInfoAMD::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineCompilerControlCreateInfoAMDBuilder<'a> { + inner: PipelineCompilerControlCreateInfoAMD, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsGraphicsPipelineCreateInfo for PipelineCompilerControlCreateInfoAMDBuilder<'_> {} +unsafe impl ExtendsGraphicsPipelineCreateInfo for PipelineCompilerControlCreateInfoAMD {} +unsafe impl ExtendsComputePipelineCreateInfo for PipelineCompilerControlCreateInfoAMDBuilder<'_> {} +unsafe impl ExtendsComputePipelineCreateInfo for PipelineCompilerControlCreateInfoAMD {} +impl<'a> ::std::ops::Deref for PipelineCompilerControlCreateInfoAMDBuilder<'a> { + type Target = PipelineCompilerControlCreateInfoAMD; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineCompilerControlCreateInfoAMDBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineCompilerControlCreateInfoAMDBuilder<'a> { + pub fn compiler_control_flags( + mut self, + compiler_control_flags: PipelineCompilerControlFlagsAMD, + ) -> PipelineCompilerControlCreateInfoAMDBuilder<'a> { + self.inner.compiler_control_flags = compiler_control_flags; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineCompilerControlCreateInfoAMD { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceCoherentMemoryFeaturesAMD { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub device_coherent_memory: Bool32, +} +impl ::std::default::Default for PhysicalDeviceCoherentMemoryFeaturesAMD { + fn default() -> PhysicalDeviceCoherentMemoryFeaturesAMD { + PhysicalDeviceCoherentMemoryFeaturesAMD { + s_type: StructureType::PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD, + p_next: ::std::ptr::null_mut(), + device_coherent_memory: Bool32::default(), + } + } +} +impl PhysicalDeviceCoherentMemoryFeaturesAMD { + pub fn builder<'a>() -> PhysicalDeviceCoherentMemoryFeaturesAMDBuilder<'a> { + PhysicalDeviceCoherentMemoryFeaturesAMDBuilder { + inner: PhysicalDeviceCoherentMemoryFeaturesAMD::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceCoherentMemoryFeaturesAMDBuilder<'a> { + inner: PhysicalDeviceCoherentMemoryFeaturesAMD, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceCoherentMemoryFeaturesAMDBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceCoherentMemoryFeaturesAMD {} +impl<'a> ::std::ops::Deref for PhysicalDeviceCoherentMemoryFeaturesAMDBuilder<'a> { + type Target = PhysicalDeviceCoherentMemoryFeaturesAMD; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceCoherentMemoryFeaturesAMDBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceCoherentMemoryFeaturesAMDBuilder<'a> { + pub fn device_coherent_memory( + mut self, + device_coherent_memory: bool, + ) -> PhysicalDeviceCoherentMemoryFeaturesAMDBuilder<'a> { + self.inner.device_coherent_memory = device_coherent_memory.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceCoherentMemoryFeaturesAMD { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct PhysicalDeviceToolPropertiesEXT { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub name: [c_char; MAX_EXTENSION_NAME_SIZE], + pub version: [c_char; MAX_EXTENSION_NAME_SIZE], + pub purposes: ToolPurposeFlagsEXT, + pub description: [c_char; MAX_DESCRIPTION_SIZE], + pub layer: [c_char; MAX_EXTENSION_NAME_SIZE], +} +impl fmt::Debug for PhysicalDeviceToolPropertiesEXT { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("PhysicalDeviceToolPropertiesEXT") + .field("s_type", &self.s_type) + .field("p_next", &self.p_next) + .field("name", &unsafe { + ::std::ffi::CStr::from_ptr(self.name.as_ptr() as *const c_char) + }) + .field("version", &unsafe { + ::std::ffi::CStr::from_ptr(self.version.as_ptr() as *const c_char) + }) + .field("purposes", &self.purposes) + .field("description", &unsafe { + ::std::ffi::CStr::from_ptr(self.description.as_ptr() as *const c_char) + }) + .field("layer", &unsafe { + ::std::ffi::CStr::from_ptr(self.layer.as_ptr() as *const c_char) + }) + .finish() + } +} +impl ::std::default::Default for PhysicalDeviceToolPropertiesEXT { + fn default() -> PhysicalDeviceToolPropertiesEXT { + PhysicalDeviceToolPropertiesEXT { + s_type: StructureType::PHYSICAL_DEVICE_TOOL_PROPERTIES_EXT, + p_next: ::std::ptr::null_mut(), + name: unsafe { ::std::mem::zeroed() }, + version: unsafe { ::std::mem::zeroed() }, + purposes: ToolPurposeFlagsEXT::default(), + description: unsafe { ::std::mem::zeroed() }, + layer: unsafe { ::std::mem::zeroed() }, + } + } +} +impl PhysicalDeviceToolPropertiesEXT { + pub fn builder<'a>() -> PhysicalDeviceToolPropertiesEXTBuilder<'a> { + PhysicalDeviceToolPropertiesEXTBuilder { + inner: PhysicalDeviceToolPropertiesEXT::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceToolPropertiesEXTBuilder<'a> { + inner: PhysicalDeviceToolPropertiesEXT, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPhysicalDeviceToolPropertiesEXT {} +impl<'a> ::std::ops::Deref for PhysicalDeviceToolPropertiesEXTBuilder<'a> { + type Target = PhysicalDeviceToolPropertiesEXT; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceToolPropertiesEXTBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceToolPropertiesEXTBuilder<'a> { + pub fn name( + mut self, + name: [c_char; MAX_EXTENSION_NAME_SIZE], + ) -> PhysicalDeviceToolPropertiesEXTBuilder<'a> { + self.inner.name = name; + self + } + pub fn version( + mut self, + version: [c_char; MAX_EXTENSION_NAME_SIZE], + ) -> PhysicalDeviceToolPropertiesEXTBuilder<'a> { + self.inner.version = version; + self + } + pub fn purposes( + mut self, + purposes: ToolPurposeFlagsEXT, + ) -> PhysicalDeviceToolPropertiesEXTBuilder<'a> { + self.inner.purposes = purposes; + self + } + pub fn description( + mut self, + description: [c_char; MAX_DESCRIPTION_SIZE], + ) -> PhysicalDeviceToolPropertiesEXTBuilder<'a> { + self.inner.description = description; + self + } + pub fn layer( + mut self, + layer: [c_char; MAX_EXTENSION_NAME_SIZE], + ) -> PhysicalDeviceToolPropertiesEXTBuilder<'a> { + self.inner.layer = layer; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PhysicalDeviceToolPropertiesEXTBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceToolPropertiesEXT { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone)] +#[doc = ""] +pub union DeviceOrHostAddressKHR { + pub device_address: DeviceAddress, + pub host_address: *mut c_void, +} +impl ::std::default::Default for DeviceOrHostAddressKHR { + fn default() -> DeviceOrHostAddressKHR { + unsafe { ::std::mem::zeroed() } + } +} +#[repr(C)] +#[derive(Copy, Clone)] +#[doc = ""] +pub union DeviceOrHostAddressConstKHR { + pub device_address: DeviceAddress, + pub host_address: *const c_void, +} +impl ::std::default::Default for DeviceOrHostAddressConstKHR { + fn default() -> DeviceOrHostAddressConstKHR { + unsafe { ::std::mem::zeroed() } + } +} +#[repr(C)] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct AccelerationStructureGeometryTrianglesDataKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub vertex_format: Format, + pub vertex_data: DeviceOrHostAddressConstKHR, + pub vertex_stride: DeviceSize, + pub index_type: IndexType, + pub index_data: DeviceOrHostAddressConstKHR, + pub transform_data: DeviceOrHostAddressConstKHR, +} +impl fmt::Debug for AccelerationStructureGeometryTrianglesDataKHR { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("AccelerationStructureGeometryTrianglesDataKHR") + .field("s_type", &self.s_type) + .field("p_next", &self.p_next) + .field("vertex_format", &self.vertex_format) + .field("vertex_data", &"union") + .field("vertex_stride", &self.vertex_stride) + .field("index_type", &self.index_type) + .field("index_data", &"union") + .field("transform_data", &"union") + .finish() + } +} +impl ::std::default::Default for AccelerationStructureGeometryTrianglesDataKHR { + fn default() -> AccelerationStructureGeometryTrianglesDataKHR { + AccelerationStructureGeometryTrianglesDataKHR { + s_type: StructureType::ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR, + p_next: ::std::ptr::null(), + vertex_format: Format::default(), + vertex_data: DeviceOrHostAddressConstKHR::default(), + vertex_stride: DeviceSize::default(), + index_type: IndexType::default(), + index_data: DeviceOrHostAddressConstKHR::default(), + transform_data: DeviceOrHostAddressConstKHR::default(), + } + } +} +impl AccelerationStructureGeometryTrianglesDataKHR { + pub fn builder<'a>() -> AccelerationStructureGeometryTrianglesDataKHRBuilder<'a> { + AccelerationStructureGeometryTrianglesDataKHRBuilder { + inner: AccelerationStructureGeometryTrianglesDataKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct AccelerationStructureGeometryTrianglesDataKHRBuilder<'a> { + inner: AccelerationStructureGeometryTrianglesDataKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsAccelerationStructureGeometryTrianglesDataKHR {} +impl<'a> ::std::ops::Deref for AccelerationStructureGeometryTrianglesDataKHRBuilder<'a> { + type Target = AccelerationStructureGeometryTrianglesDataKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for AccelerationStructureGeometryTrianglesDataKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> AccelerationStructureGeometryTrianglesDataKHRBuilder<'a> { + pub fn vertex_format( + mut self, + vertex_format: Format, + ) -> AccelerationStructureGeometryTrianglesDataKHRBuilder<'a> { + self.inner.vertex_format = vertex_format; + self + } + pub fn vertex_data( + mut self, + vertex_data: DeviceOrHostAddressConstKHR, + ) -> AccelerationStructureGeometryTrianglesDataKHRBuilder<'a> { + self.inner.vertex_data = vertex_data; + self + } + pub fn vertex_stride( + mut self, + vertex_stride: DeviceSize, + ) -> AccelerationStructureGeometryTrianglesDataKHRBuilder<'a> { + self.inner.vertex_stride = vertex_stride; + self + } + pub fn index_type( + mut self, + index_type: IndexType, + ) -> AccelerationStructureGeometryTrianglesDataKHRBuilder<'a> { + self.inner.index_type = index_type; + self + } + pub fn index_data( + mut self, + index_data: DeviceOrHostAddressConstKHR, + ) -> AccelerationStructureGeometryTrianglesDataKHRBuilder<'a> { + self.inner.index_data = index_data; + self + } + pub fn transform_data( + mut self, + transform_data: DeviceOrHostAddressConstKHR, + ) -> AccelerationStructureGeometryTrianglesDataKHRBuilder<'a> { + self.inner.transform_data = transform_data; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> AccelerationStructureGeometryTrianglesDataKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> AccelerationStructureGeometryTrianglesDataKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct AccelerationStructureGeometryAabbsDataKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub data: DeviceOrHostAddressConstKHR, + pub stride: DeviceSize, +} +impl fmt::Debug for AccelerationStructureGeometryAabbsDataKHR { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("AccelerationStructureGeometryAabbsDataKHR") + .field("s_type", &self.s_type) + .field("p_next", &self.p_next) + .field("data", &"union") + .field("stride", &self.stride) + .finish() + } +} +impl ::std::default::Default for AccelerationStructureGeometryAabbsDataKHR { + fn default() -> AccelerationStructureGeometryAabbsDataKHR { + AccelerationStructureGeometryAabbsDataKHR { + s_type: StructureType::ACCELERATION_STRUCTURE_GEOMETRY_AABBS_DATA_KHR, + p_next: ::std::ptr::null(), + data: DeviceOrHostAddressConstKHR::default(), + stride: DeviceSize::default(), + } + } +} +impl AccelerationStructureGeometryAabbsDataKHR { + pub fn builder<'a>() -> AccelerationStructureGeometryAabbsDataKHRBuilder<'a> { + AccelerationStructureGeometryAabbsDataKHRBuilder { + inner: AccelerationStructureGeometryAabbsDataKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct AccelerationStructureGeometryAabbsDataKHRBuilder<'a> { + inner: AccelerationStructureGeometryAabbsDataKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsAccelerationStructureGeometryAabbsDataKHR {} +impl<'a> ::std::ops::Deref for AccelerationStructureGeometryAabbsDataKHRBuilder<'a> { + type Target = AccelerationStructureGeometryAabbsDataKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for AccelerationStructureGeometryAabbsDataKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> AccelerationStructureGeometryAabbsDataKHRBuilder<'a> { + pub fn data( + mut self, + data: DeviceOrHostAddressConstKHR, + ) -> AccelerationStructureGeometryAabbsDataKHRBuilder<'a> { + self.inner.data = data; + self + } + pub fn stride( + mut self, + stride: DeviceSize, + ) -> AccelerationStructureGeometryAabbsDataKHRBuilder<'a> { + self.inner.stride = stride; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> AccelerationStructureGeometryAabbsDataKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> AccelerationStructureGeometryAabbsDataKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct AccelerationStructureGeometryInstancesDataKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub array_of_pointers: Bool32, + pub data: DeviceOrHostAddressConstKHR, +} +impl fmt::Debug for AccelerationStructureGeometryInstancesDataKHR { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("AccelerationStructureGeometryInstancesDataKHR") + .field("s_type", &self.s_type) + .field("p_next", &self.p_next) + .field("array_of_pointers", &self.array_of_pointers) + .field("data", &"union") + .finish() + } +} +impl ::std::default::Default for AccelerationStructureGeometryInstancesDataKHR { + fn default() -> AccelerationStructureGeometryInstancesDataKHR { + AccelerationStructureGeometryInstancesDataKHR { + s_type: StructureType::ACCELERATION_STRUCTURE_GEOMETRY_INSTANCES_DATA_KHR, + p_next: ::std::ptr::null(), + array_of_pointers: Bool32::default(), + data: DeviceOrHostAddressConstKHR::default(), + } + } +} +impl AccelerationStructureGeometryInstancesDataKHR { + pub fn builder<'a>() -> AccelerationStructureGeometryInstancesDataKHRBuilder<'a> { + AccelerationStructureGeometryInstancesDataKHRBuilder { + inner: AccelerationStructureGeometryInstancesDataKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct AccelerationStructureGeometryInstancesDataKHRBuilder<'a> { + inner: AccelerationStructureGeometryInstancesDataKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsAccelerationStructureGeometryInstancesDataKHR {} +impl<'a> ::std::ops::Deref for AccelerationStructureGeometryInstancesDataKHRBuilder<'a> { + type Target = AccelerationStructureGeometryInstancesDataKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for AccelerationStructureGeometryInstancesDataKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> AccelerationStructureGeometryInstancesDataKHRBuilder<'a> { + pub fn array_of_pointers( + mut self, + array_of_pointers: bool, + ) -> AccelerationStructureGeometryInstancesDataKHRBuilder<'a> { + self.inner.array_of_pointers = array_of_pointers.into(); + self + } + pub fn data( + mut self, + data: DeviceOrHostAddressConstKHR, + ) -> AccelerationStructureGeometryInstancesDataKHRBuilder<'a> { + self.inner.data = data; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> AccelerationStructureGeometryInstancesDataKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> AccelerationStructureGeometryInstancesDataKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone)] +#[doc = ""] +pub union AccelerationStructureGeometryDataKHR { + pub triangles: AccelerationStructureGeometryTrianglesDataKHR, + pub aabbs: AccelerationStructureGeometryAabbsDataKHR, + pub instances: AccelerationStructureGeometryInstancesDataKHR, +} +impl ::std::default::Default for AccelerationStructureGeometryDataKHR { + fn default() -> AccelerationStructureGeometryDataKHR { + unsafe { ::std::mem::zeroed() } + } +} +#[repr(C)] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct AccelerationStructureGeometryKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub geometry_type: GeometryTypeKHR, + pub geometry: AccelerationStructureGeometryDataKHR, + pub flags: GeometryFlagsKHR, +} +impl fmt::Debug for AccelerationStructureGeometryKHR { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("AccelerationStructureGeometryKHR") + .field("s_type", &self.s_type) + .field("p_next", &self.p_next) + .field("geometry_type", &self.geometry_type) + .field("geometry", &"union") + .field("flags", &self.flags) + .finish() + } +} +impl ::std::default::Default for AccelerationStructureGeometryKHR { + fn default() -> AccelerationStructureGeometryKHR { + AccelerationStructureGeometryKHR { + s_type: StructureType::ACCELERATION_STRUCTURE_GEOMETRY_KHR, + p_next: ::std::ptr::null(), + geometry_type: GeometryTypeKHR::default(), + geometry: AccelerationStructureGeometryDataKHR::default(), + flags: GeometryFlagsKHR::default(), + } + } +} +impl AccelerationStructureGeometryKHR { + pub fn builder<'a>() -> AccelerationStructureGeometryKHRBuilder<'a> { + AccelerationStructureGeometryKHRBuilder { + inner: AccelerationStructureGeometryKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct AccelerationStructureGeometryKHRBuilder<'a> { + inner: AccelerationStructureGeometryKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsAccelerationStructureGeometryKHR {} +impl<'a> ::std::ops::Deref for AccelerationStructureGeometryKHRBuilder<'a> { + type Target = AccelerationStructureGeometryKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for AccelerationStructureGeometryKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> AccelerationStructureGeometryKHRBuilder<'a> { + pub fn geometry_type( + mut self, + geometry_type: GeometryTypeKHR, + ) -> AccelerationStructureGeometryKHRBuilder<'a> { + self.inner.geometry_type = geometry_type; + self + } + pub fn geometry( + mut self, + geometry: AccelerationStructureGeometryDataKHR, + ) -> AccelerationStructureGeometryKHRBuilder<'a> { + self.inner.geometry = geometry; + self + } + pub fn flags(mut self, flags: GeometryFlagsKHR) -> AccelerationStructureGeometryKHRBuilder<'a> { + self.inner.flags = flags; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> AccelerationStructureGeometryKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> AccelerationStructureGeometryKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct AccelerationStructureBuildGeometryInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub ty: AccelerationStructureTypeKHR, + pub flags: BuildAccelerationStructureFlagsKHR, + pub update: Bool32, + pub src_acceleration_structure: AccelerationStructureKHR, + pub dst_acceleration_structure: AccelerationStructureKHR, + pub geometry_array_of_pointers: Bool32, + pub geometry_count: u32, + pub pp_geometries: *const *const AccelerationStructureGeometryKHR, + pub scratch_data: DeviceOrHostAddressKHR, +} +impl fmt::Debug for AccelerationStructureBuildGeometryInfoKHR { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("AccelerationStructureBuildGeometryInfoKHR") + .field("s_type", &self.s_type) + .field("p_next", &self.p_next) + .field("ty", &self.ty) + .field("flags", &self.flags) + .field("update", &self.update) + .field( + "src_acceleration_structure", + &self.src_acceleration_structure, + ) + .field( + "dst_acceleration_structure", + &self.dst_acceleration_structure, + ) + .field( + "geometry_array_of_pointers", + &self.geometry_array_of_pointers, + ) + .field("geometry_count", &self.geometry_count) + .field("pp_geometries", &self.pp_geometries) + .field("scratch_data", &"union") + .finish() + } +} +impl ::std::default::Default for AccelerationStructureBuildGeometryInfoKHR { + fn default() -> AccelerationStructureBuildGeometryInfoKHR { + AccelerationStructureBuildGeometryInfoKHR { + s_type: StructureType::ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR, + p_next: ::std::ptr::null(), + ty: AccelerationStructureTypeKHR::default(), + flags: BuildAccelerationStructureFlagsKHR::default(), + update: Bool32::default(), + src_acceleration_structure: AccelerationStructureKHR::default(), + dst_acceleration_structure: AccelerationStructureKHR::default(), + geometry_array_of_pointers: Bool32::default(), + geometry_count: u32::default(), + pp_geometries: ::std::ptr::null(), + scratch_data: DeviceOrHostAddressKHR::default(), + } + } +} +impl AccelerationStructureBuildGeometryInfoKHR { + pub fn builder<'a>() -> AccelerationStructureBuildGeometryInfoKHRBuilder<'a> { + AccelerationStructureBuildGeometryInfoKHRBuilder { + inner: AccelerationStructureBuildGeometryInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct AccelerationStructureBuildGeometryInfoKHRBuilder<'a> { + inner: AccelerationStructureBuildGeometryInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsAccelerationStructureBuildGeometryInfoKHR {} +impl<'a> ::std::ops::Deref for AccelerationStructureBuildGeometryInfoKHRBuilder<'a> { + type Target = AccelerationStructureBuildGeometryInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for AccelerationStructureBuildGeometryInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> AccelerationStructureBuildGeometryInfoKHRBuilder<'a> { + pub fn ty( + mut self, + ty: AccelerationStructureTypeKHR, + ) -> AccelerationStructureBuildGeometryInfoKHRBuilder<'a> { + self.inner.ty = ty; + self + } + pub fn flags( + mut self, + flags: BuildAccelerationStructureFlagsKHR, + ) -> AccelerationStructureBuildGeometryInfoKHRBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn update(mut self, update: bool) -> AccelerationStructureBuildGeometryInfoKHRBuilder<'a> { + self.inner.update = update.into(); + self + } + pub fn src_acceleration_structure( + mut self, + src_acceleration_structure: AccelerationStructureKHR, + ) -> AccelerationStructureBuildGeometryInfoKHRBuilder<'a> { + self.inner.src_acceleration_structure = src_acceleration_structure; + self + } + pub fn dst_acceleration_structure( + mut self, + dst_acceleration_structure: AccelerationStructureKHR, + ) -> AccelerationStructureBuildGeometryInfoKHRBuilder<'a> { + self.inner.dst_acceleration_structure = dst_acceleration_structure; + self + } + pub fn geometry_array_of_pointers( + mut self, + geometry_array_of_pointers: bool, + ) -> AccelerationStructureBuildGeometryInfoKHRBuilder<'a> { + self.inner.geometry_array_of_pointers = geometry_array_of_pointers.into(); + self + } + pub fn geometry_count( + mut self, + geometry_count: u32, + ) -> AccelerationStructureBuildGeometryInfoKHRBuilder<'a> { + self.inner.geometry_count = geometry_count; + self + } + pub fn geometries( + mut self, + geometries: &'a *const AccelerationStructureGeometryKHR, + ) -> AccelerationStructureBuildGeometryInfoKHRBuilder<'a> { + self.inner.pp_geometries = geometries; + self + } + pub fn scratch_data( + mut self, + scratch_data: DeviceOrHostAddressKHR, + ) -> AccelerationStructureBuildGeometryInfoKHRBuilder<'a> { + self.inner.scratch_data = scratch_data; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> AccelerationStructureBuildGeometryInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> AccelerationStructureBuildGeometryInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct AccelerationStructureBuildOffsetInfoKHR { + pub primitive_count: u32, + pub primitive_offset: u32, + pub first_vertex: u32, + pub transform_offset: u32, +} +impl AccelerationStructureBuildOffsetInfoKHR { + pub fn builder<'a>() -> AccelerationStructureBuildOffsetInfoKHRBuilder<'a> { + AccelerationStructureBuildOffsetInfoKHRBuilder { + inner: AccelerationStructureBuildOffsetInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct AccelerationStructureBuildOffsetInfoKHRBuilder<'a> { + inner: AccelerationStructureBuildOffsetInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for AccelerationStructureBuildOffsetInfoKHRBuilder<'a> { + type Target = AccelerationStructureBuildOffsetInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for AccelerationStructureBuildOffsetInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> AccelerationStructureBuildOffsetInfoKHRBuilder<'a> { + pub fn primitive_count( + mut self, + primitive_count: u32, + ) -> AccelerationStructureBuildOffsetInfoKHRBuilder<'a> { + self.inner.primitive_count = primitive_count; + self + } + pub fn primitive_offset( + mut self, + primitive_offset: u32, + ) -> AccelerationStructureBuildOffsetInfoKHRBuilder<'a> { + self.inner.primitive_offset = primitive_offset; + self + } + pub fn first_vertex( + mut self, + first_vertex: u32, + ) -> AccelerationStructureBuildOffsetInfoKHRBuilder<'a> { + self.inner.first_vertex = first_vertex; + self + } + pub fn transform_offset( + mut self, + transform_offset: u32, + ) -> AccelerationStructureBuildOffsetInfoKHRBuilder<'a> { + self.inner.transform_offset = transform_offset; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> AccelerationStructureBuildOffsetInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct AccelerationStructureCreateGeometryTypeInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub geometry_type: GeometryTypeKHR, + pub max_primitive_count: u32, + pub index_type: IndexType, + pub max_vertex_count: u32, + pub vertex_format: Format, + pub allows_transforms: Bool32, +} +impl ::std::default::Default for AccelerationStructureCreateGeometryTypeInfoKHR { + fn default() -> AccelerationStructureCreateGeometryTypeInfoKHR { + AccelerationStructureCreateGeometryTypeInfoKHR { + s_type: StructureType::ACCELERATION_STRUCTURE_CREATE_GEOMETRY_TYPE_INFO_KHR, + p_next: ::std::ptr::null(), + geometry_type: GeometryTypeKHR::default(), + max_primitive_count: u32::default(), + index_type: IndexType::default(), + max_vertex_count: u32::default(), + vertex_format: Format::default(), + allows_transforms: Bool32::default(), + } + } +} +impl AccelerationStructureCreateGeometryTypeInfoKHR { + pub fn builder<'a>() -> AccelerationStructureCreateGeometryTypeInfoKHRBuilder<'a> { + AccelerationStructureCreateGeometryTypeInfoKHRBuilder { + inner: AccelerationStructureCreateGeometryTypeInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct AccelerationStructureCreateGeometryTypeInfoKHRBuilder<'a> { + inner: AccelerationStructureCreateGeometryTypeInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsAccelerationStructureCreateGeometryTypeInfoKHR {} +impl<'a> ::std::ops::Deref for AccelerationStructureCreateGeometryTypeInfoKHRBuilder<'a> { + type Target = AccelerationStructureCreateGeometryTypeInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for AccelerationStructureCreateGeometryTypeInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> AccelerationStructureCreateGeometryTypeInfoKHRBuilder<'a> { + pub fn geometry_type( + mut self, + geometry_type: GeometryTypeKHR, + ) -> AccelerationStructureCreateGeometryTypeInfoKHRBuilder<'a> { + self.inner.geometry_type = geometry_type; + self + } + pub fn max_primitive_count( + mut self, + max_primitive_count: u32, + ) -> AccelerationStructureCreateGeometryTypeInfoKHRBuilder<'a> { + self.inner.max_primitive_count = max_primitive_count; + self + } + pub fn index_type( + mut self, + index_type: IndexType, + ) -> AccelerationStructureCreateGeometryTypeInfoKHRBuilder<'a> { + self.inner.index_type = index_type; + self + } + pub fn max_vertex_count( + mut self, + max_vertex_count: u32, + ) -> AccelerationStructureCreateGeometryTypeInfoKHRBuilder<'a> { + self.inner.max_vertex_count = max_vertex_count; + self + } + pub fn vertex_format( + mut self, + vertex_format: Format, + ) -> AccelerationStructureCreateGeometryTypeInfoKHRBuilder<'a> { + self.inner.vertex_format = vertex_format; + self + } + pub fn allows_transforms( + mut self, + allows_transforms: bool, + ) -> AccelerationStructureCreateGeometryTypeInfoKHRBuilder<'a> { + self.inner.allows_transforms = allows_transforms.into(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> AccelerationStructureCreateGeometryTypeInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> AccelerationStructureCreateGeometryTypeInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct AccelerationStructureCreateInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub compacted_size: DeviceSize, + pub ty: AccelerationStructureTypeKHR, + pub flags: BuildAccelerationStructureFlagsKHR, + pub max_geometry_count: u32, + pub p_geometry_infos: *const AccelerationStructureCreateGeometryTypeInfoKHR, + pub device_address: DeviceAddress, +} +impl ::std::default::Default for AccelerationStructureCreateInfoKHR { + fn default() -> AccelerationStructureCreateInfoKHR { + AccelerationStructureCreateInfoKHR { + s_type: StructureType::ACCELERATION_STRUCTURE_CREATE_INFO_KHR, + p_next: ::std::ptr::null(), + compacted_size: DeviceSize::default(), + ty: AccelerationStructureTypeKHR::default(), + flags: BuildAccelerationStructureFlagsKHR::default(), + max_geometry_count: u32::default(), + p_geometry_infos: ::std::ptr::null(), + device_address: DeviceAddress::default(), + } + } +} +impl AccelerationStructureCreateInfoKHR { + pub fn builder<'a>() -> AccelerationStructureCreateInfoKHRBuilder<'a> { + AccelerationStructureCreateInfoKHRBuilder { + inner: AccelerationStructureCreateInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct AccelerationStructureCreateInfoKHRBuilder<'a> { + inner: AccelerationStructureCreateInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsAccelerationStructureCreateInfoKHR {} +impl<'a> ::std::ops::Deref for AccelerationStructureCreateInfoKHRBuilder<'a> { + type Target = AccelerationStructureCreateInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for AccelerationStructureCreateInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> AccelerationStructureCreateInfoKHRBuilder<'a> { + pub fn compacted_size( + mut self, + compacted_size: DeviceSize, + ) -> AccelerationStructureCreateInfoKHRBuilder<'a> { + self.inner.compacted_size = compacted_size; + self + } + pub fn ty( + mut self, + ty: AccelerationStructureTypeKHR, + ) -> AccelerationStructureCreateInfoKHRBuilder<'a> { + self.inner.ty = ty; + self + } + pub fn flags( + mut self, + flags: BuildAccelerationStructureFlagsKHR, + ) -> AccelerationStructureCreateInfoKHRBuilder<'a> { + self.inner.flags = flags; + self + } + pub fn geometry_infos( + mut self, + geometry_infos: &'a [AccelerationStructureCreateGeometryTypeInfoKHR], + ) -> AccelerationStructureCreateInfoKHRBuilder<'a> { + self.inner.max_geometry_count = geometry_infos.len() as _; + self.inner.p_geometry_infos = geometry_infos.as_ptr(); + self + } + pub fn device_address( + mut self, + device_address: DeviceAddress, + ) -> AccelerationStructureCreateInfoKHRBuilder<'a> { + self.inner.device_address = device_address; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> AccelerationStructureCreateInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> AccelerationStructureCreateInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Default, Debug)] +#[doc = ""] +pub struct AabbPositionsKHR { + pub min_x: f32, + pub min_y: f32, + pub min_z: f32, + pub max_x: f32, + pub max_y: f32, + pub max_z: f32, +} +impl AabbPositionsKHR { + pub fn builder<'a>() -> AabbPositionsKHRBuilder<'a> { + AabbPositionsKHRBuilder { + inner: AabbPositionsKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct AabbPositionsKHRBuilder<'a> { + inner: AabbPositionsKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +impl<'a> ::std::ops::Deref for AabbPositionsKHRBuilder<'a> { + type Target = AabbPositionsKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for AabbPositionsKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> AabbPositionsKHRBuilder<'a> { + pub fn min_x(mut self, min_x: f32) -> AabbPositionsKHRBuilder<'a> { + self.inner.min_x = min_x; + self + } + pub fn min_y(mut self, min_y: f32) -> AabbPositionsKHRBuilder<'a> { + self.inner.min_y = min_y; + self + } + pub fn min_z(mut self, min_z: f32) -> AabbPositionsKHRBuilder<'a> { + self.inner.min_z = min_z; + self + } + pub fn max_x(mut self, max_x: f32) -> AabbPositionsKHRBuilder<'a> { + self.inner.max_x = max_x; + self + } + pub fn max_y(mut self, max_y: f32) -> AabbPositionsKHRBuilder<'a> { + self.inner.max_y = max_y; + self + } + pub fn max_z(mut self, max_z: f32) -> AabbPositionsKHRBuilder<'a> { + self.inner.max_z = max_z; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> AabbPositionsKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct TransformMatrixKHR { + pub matrix: [f32; 12], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct AccelerationStructureInstanceKHR { + pub transform: TransformMatrixKHR, + pub instance_custom_index_and_mask: u32, + pub instance_shader_binding_table_record_offset_and_flags: u32, + pub acceleration_structure_reference: u64, +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct AccelerationStructureDeviceAddressInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub acceleration_structure: AccelerationStructureKHR, +} +impl ::std::default::Default for AccelerationStructureDeviceAddressInfoKHR { + fn default() -> AccelerationStructureDeviceAddressInfoKHR { + AccelerationStructureDeviceAddressInfoKHR { + s_type: StructureType::ACCELERATION_STRUCTURE_DEVICE_ADDRESS_INFO_KHR, + p_next: ::std::ptr::null(), + acceleration_structure: AccelerationStructureKHR::default(), + } + } +} +impl AccelerationStructureDeviceAddressInfoKHR { + pub fn builder<'a>() -> AccelerationStructureDeviceAddressInfoKHRBuilder<'a> { + AccelerationStructureDeviceAddressInfoKHRBuilder { + inner: AccelerationStructureDeviceAddressInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct AccelerationStructureDeviceAddressInfoKHRBuilder<'a> { + inner: AccelerationStructureDeviceAddressInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsAccelerationStructureDeviceAddressInfoKHR {} +impl<'a> ::std::ops::Deref for AccelerationStructureDeviceAddressInfoKHRBuilder<'a> { + type Target = AccelerationStructureDeviceAddressInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for AccelerationStructureDeviceAddressInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> AccelerationStructureDeviceAddressInfoKHRBuilder<'a> { + pub fn acceleration_structure( + mut self, + acceleration_structure: AccelerationStructureKHR, + ) -> AccelerationStructureDeviceAddressInfoKHRBuilder<'a> { + self.inner.acceleration_structure = acceleration_structure; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> AccelerationStructureDeviceAddressInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> AccelerationStructureDeviceAddressInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct AccelerationStructureVersionKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub version_data: *const u8, +} +impl ::std::default::Default for AccelerationStructureVersionKHR { + fn default() -> AccelerationStructureVersionKHR { + AccelerationStructureVersionKHR { + s_type: StructureType::ACCELERATION_STRUCTURE_VERSION_KHR, + p_next: ::std::ptr::null(), + version_data: ::std::ptr::null(), + } + } +} +impl AccelerationStructureVersionKHR { + pub fn builder<'a>() -> AccelerationStructureVersionKHRBuilder<'a> { + AccelerationStructureVersionKHRBuilder { + inner: AccelerationStructureVersionKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct AccelerationStructureVersionKHRBuilder<'a> { + inner: AccelerationStructureVersionKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsAccelerationStructureVersionKHR {} +impl<'a> ::std::ops::Deref for AccelerationStructureVersionKHRBuilder<'a> { + type Target = AccelerationStructureVersionKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for AccelerationStructureVersionKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> AccelerationStructureVersionKHRBuilder<'a> { + pub fn version_data( + mut self, + version_data: *const u8, + ) -> AccelerationStructureVersionKHRBuilder<'a> { + self.inner.version_data = version_data; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> AccelerationStructureVersionKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> AccelerationStructureVersionKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct CopyAccelerationStructureInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub src: AccelerationStructureKHR, + pub dst: AccelerationStructureKHR, + pub mode: CopyAccelerationStructureModeKHR, +} +impl ::std::default::Default for CopyAccelerationStructureInfoKHR { + fn default() -> CopyAccelerationStructureInfoKHR { + CopyAccelerationStructureInfoKHR { + s_type: StructureType::COPY_ACCELERATION_STRUCTURE_INFO_KHR, + p_next: ::std::ptr::null(), + src: AccelerationStructureKHR::default(), + dst: AccelerationStructureKHR::default(), + mode: CopyAccelerationStructureModeKHR::default(), + } + } +} +impl CopyAccelerationStructureInfoKHR { + pub fn builder<'a>() -> CopyAccelerationStructureInfoKHRBuilder<'a> { + CopyAccelerationStructureInfoKHRBuilder { + inner: CopyAccelerationStructureInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct CopyAccelerationStructureInfoKHRBuilder<'a> { + inner: CopyAccelerationStructureInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsCopyAccelerationStructureInfoKHR {} +impl<'a> ::std::ops::Deref for CopyAccelerationStructureInfoKHRBuilder<'a> { + type Target = CopyAccelerationStructureInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for CopyAccelerationStructureInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> CopyAccelerationStructureInfoKHRBuilder<'a> { + pub fn src( + mut self, + src: AccelerationStructureKHR, + ) -> CopyAccelerationStructureInfoKHRBuilder<'a> { + self.inner.src = src; + self + } + pub fn dst( + mut self, + dst: AccelerationStructureKHR, + ) -> CopyAccelerationStructureInfoKHRBuilder<'a> { + self.inner.dst = dst; + self + } + pub fn mode( + mut self, + mode: CopyAccelerationStructureModeKHR, + ) -> CopyAccelerationStructureInfoKHRBuilder<'a> { + self.inner.mode = mode; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> CopyAccelerationStructureInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> CopyAccelerationStructureInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct CopyAccelerationStructureToMemoryInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub src: AccelerationStructureKHR, + pub dst: DeviceOrHostAddressKHR, + pub mode: CopyAccelerationStructureModeKHR, +} +impl fmt::Debug for CopyAccelerationStructureToMemoryInfoKHR { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("CopyAccelerationStructureToMemoryInfoKHR") + .field("s_type", &self.s_type) + .field("p_next", &self.p_next) + .field("src", &self.src) + .field("dst", &"union") + .field("mode", &self.mode) + .finish() + } +} +impl ::std::default::Default for CopyAccelerationStructureToMemoryInfoKHR { + fn default() -> CopyAccelerationStructureToMemoryInfoKHR { + CopyAccelerationStructureToMemoryInfoKHR { + s_type: StructureType::COPY_ACCELERATION_STRUCTURE_TO_MEMORY_INFO_KHR, + p_next: ::std::ptr::null(), + src: AccelerationStructureKHR::default(), + dst: DeviceOrHostAddressKHR::default(), + mode: CopyAccelerationStructureModeKHR::default(), + } + } +} +impl CopyAccelerationStructureToMemoryInfoKHR { + pub fn builder<'a>() -> CopyAccelerationStructureToMemoryInfoKHRBuilder<'a> { + CopyAccelerationStructureToMemoryInfoKHRBuilder { + inner: CopyAccelerationStructureToMemoryInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct CopyAccelerationStructureToMemoryInfoKHRBuilder<'a> { + inner: CopyAccelerationStructureToMemoryInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsCopyAccelerationStructureToMemoryInfoKHR {} +impl<'a> ::std::ops::Deref for CopyAccelerationStructureToMemoryInfoKHRBuilder<'a> { + type Target = CopyAccelerationStructureToMemoryInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for CopyAccelerationStructureToMemoryInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> CopyAccelerationStructureToMemoryInfoKHRBuilder<'a> { + pub fn src( + mut self, + src: AccelerationStructureKHR, + ) -> CopyAccelerationStructureToMemoryInfoKHRBuilder<'a> { + self.inner.src = src; + self + } + pub fn dst( + mut self, + dst: DeviceOrHostAddressKHR, + ) -> CopyAccelerationStructureToMemoryInfoKHRBuilder<'a> { + self.inner.dst = dst; + self + } + pub fn mode( + mut self, + mode: CopyAccelerationStructureModeKHR, + ) -> CopyAccelerationStructureToMemoryInfoKHRBuilder<'a> { + self.inner.mode = mode; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> CopyAccelerationStructureToMemoryInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> CopyAccelerationStructureToMemoryInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone)] +#[doc = ""] +pub struct CopyMemoryToAccelerationStructureInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub src: DeviceOrHostAddressConstKHR, + pub dst: AccelerationStructureKHR, + pub mode: CopyAccelerationStructureModeKHR, +} +impl fmt::Debug for CopyMemoryToAccelerationStructureInfoKHR { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("CopyMemoryToAccelerationStructureInfoKHR") + .field("s_type", &self.s_type) + .field("p_next", &self.p_next) + .field("src", &"union") + .field("dst", &self.dst) + .field("mode", &self.mode) + .finish() + } +} +impl ::std::default::Default for CopyMemoryToAccelerationStructureInfoKHR { + fn default() -> CopyMemoryToAccelerationStructureInfoKHR { + CopyMemoryToAccelerationStructureInfoKHR { + s_type: StructureType::COPY_MEMORY_TO_ACCELERATION_STRUCTURE_INFO_KHR, + p_next: ::std::ptr::null(), + src: DeviceOrHostAddressConstKHR::default(), + dst: AccelerationStructureKHR::default(), + mode: CopyAccelerationStructureModeKHR::default(), + } + } +} +impl CopyMemoryToAccelerationStructureInfoKHR { + pub fn builder<'a>() -> CopyMemoryToAccelerationStructureInfoKHRBuilder<'a> { + CopyMemoryToAccelerationStructureInfoKHRBuilder { + inner: CopyMemoryToAccelerationStructureInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct CopyMemoryToAccelerationStructureInfoKHRBuilder<'a> { + inner: CopyMemoryToAccelerationStructureInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsCopyMemoryToAccelerationStructureInfoKHR {} +impl<'a> ::std::ops::Deref for CopyMemoryToAccelerationStructureInfoKHRBuilder<'a> { + type Target = CopyMemoryToAccelerationStructureInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for CopyMemoryToAccelerationStructureInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> CopyMemoryToAccelerationStructureInfoKHRBuilder<'a> { + pub fn src( + mut self, + src: DeviceOrHostAddressConstKHR, + ) -> CopyMemoryToAccelerationStructureInfoKHRBuilder<'a> { + self.inner.src = src; + self + } + pub fn dst( + mut self, + dst: AccelerationStructureKHR, + ) -> CopyMemoryToAccelerationStructureInfoKHRBuilder<'a> { + self.inner.dst = dst; + self + } + pub fn mode( + mut self, + mode: CopyAccelerationStructureModeKHR, + ) -> CopyMemoryToAccelerationStructureInfoKHRBuilder<'a> { + self.inner.mode = mode; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> CopyMemoryToAccelerationStructureInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> CopyMemoryToAccelerationStructureInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct RayTracingPipelineInterfaceCreateInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub max_payload_size: u32, + pub max_attribute_size: u32, + pub max_callable_size: u32, +} +impl ::std::default::Default for RayTracingPipelineInterfaceCreateInfoKHR { + fn default() -> RayTracingPipelineInterfaceCreateInfoKHR { + RayTracingPipelineInterfaceCreateInfoKHR { + s_type: StructureType::RAY_TRACING_PIPELINE_INTERFACE_CREATE_INFO_KHR, + p_next: ::std::ptr::null(), + max_payload_size: u32::default(), + max_attribute_size: u32::default(), + max_callable_size: u32::default(), + } + } +} +impl RayTracingPipelineInterfaceCreateInfoKHR { + pub fn builder<'a>() -> RayTracingPipelineInterfaceCreateInfoKHRBuilder<'a> { + RayTracingPipelineInterfaceCreateInfoKHRBuilder { + inner: RayTracingPipelineInterfaceCreateInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct RayTracingPipelineInterfaceCreateInfoKHRBuilder<'a> { + inner: RayTracingPipelineInterfaceCreateInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsRayTracingPipelineInterfaceCreateInfoKHR {} +impl<'a> ::std::ops::Deref for RayTracingPipelineInterfaceCreateInfoKHRBuilder<'a> { + type Target = RayTracingPipelineInterfaceCreateInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for RayTracingPipelineInterfaceCreateInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> RayTracingPipelineInterfaceCreateInfoKHRBuilder<'a> { + pub fn max_payload_size( + mut self, + max_payload_size: u32, + ) -> RayTracingPipelineInterfaceCreateInfoKHRBuilder<'a> { + self.inner.max_payload_size = max_payload_size; + self + } + pub fn max_attribute_size( + mut self, + max_attribute_size: u32, + ) -> RayTracingPipelineInterfaceCreateInfoKHRBuilder<'a> { + self.inner.max_attribute_size = max_attribute_size; + self + } + pub fn max_callable_size( + mut self, + max_callable_size: u32, + ) -> RayTracingPipelineInterfaceCreateInfoKHRBuilder<'a> { + self.inner.max_callable_size = max_callable_size; + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> RayTracingPipelineInterfaceCreateInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> RayTracingPipelineInterfaceCreateInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DeferredOperationInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub operation_handle: DeferredOperationKHR, +} +impl ::std::default::Default for DeferredOperationInfoKHR { + fn default() -> DeferredOperationInfoKHR { + DeferredOperationInfoKHR { + s_type: StructureType::DEFERRED_OPERATION_INFO_KHR, + p_next: ::std::ptr::null(), + operation_handle: DeferredOperationKHR::default(), + } + } +} +impl DeferredOperationInfoKHR { + pub fn builder<'a>() -> DeferredOperationInfoKHRBuilder<'a> { + DeferredOperationInfoKHRBuilder { + inner: DeferredOperationInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DeferredOperationInfoKHRBuilder<'a> { + inner: DeferredOperationInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsRayTracingPipelineCreateInfoKHR for DeferredOperationInfoKHRBuilder<'_> {} +unsafe impl ExtendsRayTracingPipelineCreateInfoKHR for DeferredOperationInfoKHR {} +unsafe impl ExtendsAccelerationStructureBuildGeometryInfoKHR + for DeferredOperationInfoKHRBuilder<'_> +{ +} +unsafe impl ExtendsAccelerationStructureBuildGeometryInfoKHR for DeferredOperationInfoKHR {} +unsafe impl ExtendsCopyAccelerationStructureInfoKHR for DeferredOperationInfoKHRBuilder<'_> {} +unsafe impl ExtendsCopyAccelerationStructureInfoKHR for DeferredOperationInfoKHR {} +unsafe impl ExtendsCopyMemoryToAccelerationStructureInfoKHR + for DeferredOperationInfoKHRBuilder<'_> +{ +} +unsafe impl ExtendsCopyMemoryToAccelerationStructureInfoKHR for DeferredOperationInfoKHR {} +unsafe impl ExtendsCopyAccelerationStructureToMemoryInfoKHR + for DeferredOperationInfoKHRBuilder<'_> +{ +} +unsafe impl ExtendsCopyAccelerationStructureToMemoryInfoKHR for DeferredOperationInfoKHR {} +impl<'a> ::std::ops::Deref for DeferredOperationInfoKHRBuilder<'a> { + type Target = DeferredOperationInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DeferredOperationInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DeferredOperationInfoKHRBuilder<'a> { + pub fn operation_handle( + mut self, + operation_handle: DeferredOperationKHR, + ) -> DeferredOperationInfoKHRBuilder<'a> { + self.inner.operation_handle = operation_handle; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DeferredOperationInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PipelineLibraryCreateInfoKHR { + pub s_type: StructureType, + pub p_next: *const c_void, + pub library_count: u32, + pub p_libraries: *const Pipeline, +} +impl ::std::default::Default for PipelineLibraryCreateInfoKHR { + fn default() -> PipelineLibraryCreateInfoKHR { + PipelineLibraryCreateInfoKHR { + s_type: StructureType::PIPELINE_LIBRARY_CREATE_INFO_KHR, + p_next: ::std::ptr::null(), + library_count: u32::default(), + p_libraries: ::std::ptr::null(), + } + } +} +impl PipelineLibraryCreateInfoKHR { + pub fn builder<'a>() -> PipelineLibraryCreateInfoKHRBuilder<'a> { + PipelineLibraryCreateInfoKHRBuilder { + inner: PipelineLibraryCreateInfoKHR::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PipelineLibraryCreateInfoKHRBuilder<'a> { + inner: PipelineLibraryCreateInfoKHR, + marker: ::std::marker::PhantomData<&'a ()>, +} +pub unsafe trait ExtendsPipelineLibraryCreateInfoKHR {} +impl<'a> ::std::ops::Deref for PipelineLibraryCreateInfoKHRBuilder<'a> { + type Target = PipelineLibraryCreateInfoKHR; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PipelineLibraryCreateInfoKHRBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PipelineLibraryCreateInfoKHRBuilder<'a> { + pub fn libraries( + mut self, + libraries: &'a [Pipeline], + ) -> PipelineLibraryCreateInfoKHRBuilder<'a> { + self.inner.library_count = libraries.len() as _; + self.inner.p_libraries = libraries.as_ptr(); + self + } + #[doc = r" Prepends the given extension struct between the root and the first pointer. This"] + #[doc = r" method only exists on structs that can be passed to a function directly. Only"] + #[doc = r" valid extension structs can be pushed into the chain."] + #[doc = r" If the chain looks like `A -> B -> C`, and you call `builder.push_next(&mut D)`, then the"] + #[doc = r" chain will look like `A -> D -> B -> C`."] + pub fn push_next( + mut self, + next: &'a mut T, + ) -> PipelineLibraryCreateInfoKHRBuilder<'a> { + unsafe { + let next_ptr = next as *mut T as *mut BaseOutStructure; + let last_next = ptr_chain_iter(next).last().unwrap(); + (*last_next).p_next = self.inner.p_next as _; + self.inner.p_next = next_ptr as _; + } + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PipelineLibraryCreateInfoKHR { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct RenderPassTransformBeginInfoQCOM { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub transform: SurfaceTransformFlagsKHR, +} +impl ::std::default::Default for RenderPassTransformBeginInfoQCOM { + fn default() -> RenderPassTransformBeginInfoQCOM { + RenderPassTransformBeginInfoQCOM { + s_type: StructureType::RENDER_PASS_TRANSFORM_BEGIN_INFO_QCOM, + p_next: ::std::ptr::null_mut(), + transform: SurfaceTransformFlagsKHR::default(), + } + } +} +impl RenderPassTransformBeginInfoQCOM { + pub fn builder<'a>() -> RenderPassTransformBeginInfoQCOMBuilder<'a> { + RenderPassTransformBeginInfoQCOMBuilder { + inner: RenderPassTransformBeginInfoQCOM::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct RenderPassTransformBeginInfoQCOMBuilder<'a> { + inner: RenderPassTransformBeginInfoQCOM, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsRenderPassBeginInfo for RenderPassTransformBeginInfoQCOMBuilder<'_> {} +unsafe impl ExtendsRenderPassBeginInfo for RenderPassTransformBeginInfoQCOM {} +impl<'a> ::std::ops::Deref for RenderPassTransformBeginInfoQCOMBuilder<'a> { + type Target = RenderPassTransformBeginInfoQCOM; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for RenderPassTransformBeginInfoQCOMBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> RenderPassTransformBeginInfoQCOMBuilder<'a> { + pub fn transform( + mut self, + transform: SurfaceTransformFlagsKHR, + ) -> RenderPassTransformBeginInfoQCOMBuilder<'a> { + self.inner.transform = transform; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> RenderPassTransformBeginInfoQCOM { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct CommandBufferInheritanceRenderPassTransformInfoQCOM { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub transform: SurfaceTransformFlagsKHR, + pub render_area: Rect2D, +} +impl ::std::default::Default for CommandBufferInheritanceRenderPassTransformInfoQCOM { + fn default() -> CommandBufferInheritanceRenderPassTransformInfoQCOM { + CommandBufferInheritanceRenderPassTransformInfoQCOM { + s_type: StructureType::COMMAND_BUFFER_INHERITANCE_RENDER_PASS_TRANSFORM_INFO_QCOM, + p_next: ::std::ptr::null_mut(), + transform: SurfaceTransformFlagsKHR::default(), + render_area: Rect2D::default(), + } + } +} +impl CommandBufferInheritanceRenderPassTransformInfoQCOM { + pub fn builder<'a>() -> CommandBufferInheritanceRenderPassTransformInfoQCOMBuilder<'a> { + CommandBufferInheritanceRenderPassTransformInfoQCOMBuilder { + inner: CommandBufferInheritanceRenderPassTransformInfoQCOM::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct CommandBufferInheritanceRenderPassTransformInfoQCOMBuilder<'a> { + inner: CommandBufferInheritanceRenderPassTransformInfoQCOM, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsCommandBufferInheritanceInfo + for CommandBufferInheritanceRenderPassTransformInfoQCOMBuilder<'_> +{ +} +unsafe impl ExtendsCommandBufferInheritanceInfo + for CommandBufferInheritanceRenderPassTransformInfoQCOM +{ +} +impl<'a> ::std::ops::Deref for CommandBufferInheritanceRenderPassTransformInfoQCOMBuilder<'a> { + type Target = CommandBufferInheritanceRenderPassTransformInfoQCOM; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for CommandBufferInheritanceRenderPassTransformInfoQCOMBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> CommandBufferInheritanceRenderPassTransformInfoQCOMBuilder<'a> { + pub fn transform( + mut self, + transform: SurfaceTransformFlagsKHR, + ) -> CommandBufferInheritanceRenderPassTransformInfoQCOMBuilder<'a> { + self.inner.transform = transform; + self + } + pub fn render_area( + mut self, + render_area: Rect2D, + ) -> CommandBufferInheritanceRenderPassTransformInfoQCOMBuilder<'a> { + self.inner.render_area = render_area; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> CommandBufferInheritanceRenderPassTransformInfoQCOM { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct PhysicalDeviceDiagnosticsConfigFeaturesNV { + pub s_type: StructureType, + pub p_next: *mut c_void, + pub diagnostics_config: Bool32, +} +impl ::std::default::Default for PhysicalDeviceDiagnosticsConfigFeaturesNV { + fn default() -> PhysicalDeviceDiagnosticsConfigFeaturesNV { + PhysicalDeviceDiagnosticsConfigFeaturesNV { + s_type: StructureType::PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV, + p_next: ::std::ptr::null_mut(), + diagnostics_config: Bool32::default(), + } + } +} +impl PhysicalDeviceDiagnosticsConfigFeaturesNV { + pub fn builder<'a>() -> PhysicalDeviceDiagnosticsConfigFeaturesNVBuilder<'a> { + PhysicalDeviceDiagnosticsConfigFeaturesNVBuilder { + inner: PhysicalDeviceDiagnosticsConfigFeaturesNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct PhysicalDeviceDiagnosticsConfigFeaturesNVBuilder<'a> { + inner: PhysicalDeviceDiagnosticsConfigFeaturesNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceDiagnosticsConfigFeaturesNVBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for PhysicalDeviceDiagnosticsConfigFeaturesNV {} +impl<'a> ::std::ops::Deref for PhysicalDeviceDiagnosticsConfigFeaturesNVBuilder<'a> { + type Target = PhysicalDeviceDiagnosticsConfigFeaturesNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for PhysicalDeviceDiagnosticsConfigFeaturesNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> PhysicalDeviceDiagnosticsConfigFeaturesNVBuilder<'a> { + pub fn diagnostics_config( + mut self, + diagnostics_config: bool, + ) -> PhysicalDeviceDiagnosticsConfigFeaturesNVBuilder<'a> { + self.inner.diagnostics_config = diagnostics_config.into(); + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> PhysicalDeviceDiagnosticsConfigFeaturesNV { + self.inner + } +} +#[repr(C)] +#[derive(Copy, Clone, Debug)] +#[doc = ""] +pub struct DeviceDiagnosticsConfigCreateInfoNV { + pub s_type: StructureType, + pub p_next: *const c_void, + pub flags: DeviceDiagnosticsConfigFlagsNV, +} +impl ::std::default::Default for DeviceDiagnosticsConfigCreateInfoNV { + fn default() -> DeviceDiagnosticsConfigCreateInfoNV { + DeviceDiagnosticsConfigCreateInfoNV { + s_type: StructureType::DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV, + p_next: ::std::ptr::null(), + flags: DeviceDiagnosticsConfigFlagsNV::default(), + } + } +} +impl DeviceDiagnosticsConfigCreateInfoNV { + pub fn builder<'a>() -> DeviceDiagnosticsConfigCreateInfoNVBuilder<'a> { + DeviceDiagnosticsConfigCreateInfoNVBuilder { + inner: DeviceDiagnosticsConfigCreateInfoNV::default(), + marker: ::std::marker::PhantomData, + } + } +} +#[repr(transparent)] +pub struct DeviceDiagnosticsConfigCreateInfoNVBuilder<'a> { + inner: DeviceDiagnosticsConfigCreateInfoNV, + marker: ::std::marker::PhantomData<&'a ()>, +} +unsafe impl ExtendsDeviceCreateInfo for DeviceDiagnosticsConfigCreateInfoNVBuilder<'_> {} +unsafe impl ExtendsDeviceCreateInfo for DeviceDiagnosticsConfigCreateInfoNV {} +impl<'a> ::std::ops::Deref for DeviceDiagnosticsConfigCreateInfoNVBuilder<'a> { + type Target = DeviceDiagnosticsConfigCreateInfoNV; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a> ::std::ops::DerefMut for DeviceDiagnosticsConfigCreateInfoNVBuilder<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} +impl<'a> DeviceDiagnosticsConfigCreateInfoNVBuilder<'a> { + pub fn flags( + mut self, + flags: DeviceDiagnosticsConfigFlagsNV, + ) -> DeviceDiagnosticsConfigCreateInfoNVBuilder<'a> { + self.inner.flags = flags; + self + } + #[doc = r" Calling build will **discard** all the lifetime information. Only call this if"] + #[doc = r" necessary! Builders implement `Deref` targeting their corresponding Vulkan struct,"] + #[doc = r" so references to builders can be passed directly to Vulkan functions."] + pub fn build(self) -> DeviceDiagnosticsConfigCreateInfoNV { + self.inner + } +} #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct ImageLayout(pub(crate) i32); impl ImageLayout { pub fn from_raw(x: i32) -> Self { @@ -41656,7 +52745,7 @@ impl ImageLayout { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct AttachmentLoadOp(pub(crate) i32); impl AttachmentLoadOp { pub fn from_raw(x: i32) -> Self { @@ -41673,7 +52762,7 @@ impl AttachmentLoadOp { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct AttachmentStoreOp(pub(crate) i32); impl AttachmentStoreOp { pub fn from_raw(x: i32) -> Self { @@ -41689,7 +52778,7 @@ impl AttachmentStoreOp { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct ImageType(pub(crate) i32); impl ImageType { pub fn from_raw(x: i32) -> Self { @@ -41706,7 +52795,7 @@ impl ImageType { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct ImageTiling(pub(crate) i32); impl ImageTiling { pub fn from_raw(x: i32) -> Self { @@ -41722,7 +52811,7 @@ impl ImageTiling { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct ImageViewType(pub(crate) i32); impl ImageViewType { pub fn from_raw(x: i32) -> Self { @@ -41743,7 +52832,7 @@ impl ImageViewType { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct CommandBufferLevel(pub(crate) i32); impl CommandBufferLevel { pub fn from_raw(x: i32) -> Self { @@ -41759,7 +52848,7 @@ impl CommandBufferLevel { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct ComponentSwizzle(pub(crate) i32); impl ComponentSwizzle { pub fn from_raw(x: i32) -> Self { @@ -41780,7 +52869,7 @@ impl ComponentSwizzle { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct DescriptorType(pub(crate) i32); impl DescriptorType { pub fn from_raw(x: i32) -> Self { @@ -41805,7 +52894,7 @@ impl DescriptorType { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct QueryType(pub(crate) i32); impl QueryType { pub fn from_raw(x: i32) -> Self { @@ -41823,7 +52912,7 @@ impl QueryType { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct BorderColor(pub(crate) i32); impl BorderColor { pub fn from_raw(x: i32) -> Self { @@ -41843,7 +52932,7 @@ impl BorderColor { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct PipelineBindPoint(pub(crate) i32); impl PipelineBindPoint { pub fn from_raw(x: i32) -> Self { @@ -41859,7 +52948,7 @@ impl PipelineBindPoint { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct PipelineCacheHeaderVersion(pub(crate) i32); impl PipelineCacheHeaderVersion { pub fn from_raw(x: i32) -> Self { @@ -41874,7 +52963,7 @@ impl PipelineCacheHeaderVersion { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct PrimitiveTopology(pub(crate) i32); impl PrimitiveTopology { pub fn from_raw(x: i32) -> Self { @@ -41899,7 +52988,7 @@ impl PrimitiveTopology { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct SharingMode(pub(crate) i32); impl SharingMode { pub fn from_raw(x: i32) -> Self { @@ -41915,7 +53004,7 @@ impl SharingMode { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct IndexType(pub(crate) i32); impl IndexType { pub fn from_raw(x: i32) -> Self { @@ -41931,7 +53020,7 @@ impl IndexType { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct Filter(pub(crate) i32); impl Filter { pub fn from_raw(x: i32) -> Self { @@ -41947,7 +53036,7 @@ impl Filter { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct SamplerMipmapMode(pub(crate) i32); impl SamplerMipmapMode { pub fn from_raw(x: i32) -> Self { @@ -41965,7 +53054,7 @@ impl SamplerMipmapMode { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct SamplerAddressMode(pub(crate) i32); impl SamplerAddressMode { pub fn from_raw(x: i32) -> Self { @@ -41983,7 +53072,7 @@ impl SamplerAddressMode { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct CompareOp(pub(crate) i32); impl CompareOp { pub fn from_raw(x: i32) -> Self { @@ -42005,7 +53094,7 @@ impl CompareOp { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct PolygonMode(pub(crate) i32); impl PolygonMode { pub fn from_raw(x: i32) -> Self { @@ -42022,7 +53111,7 @@ impl PolygonMode { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct FrontFace(pub(crate) i32); impl FrontFace { pub fn from_raw(x: i32) -> Self { @@ -42038,7 +53127,7 @@ impl FrontFace { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct BlendFactor(pub(crate) i32); impl BlendFactor { pub fn from_raw(x: i32) -> Self { @@ -42071,7 +53160,7 @@ impl BlendFactor { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct BlendOp(pub(crate) i32); impl BlendOp { pub fn from_raw(x: i32) -> Self { @@ -42090,7 +53179,7 @@ impl BlendOp { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct StencilOp(pub(crate) i32); impl StencilOp { pub fn from_raw(x: i32) -> Self { @@ -42112,7 +53201,7 @@ impl StencilOp { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct LogicOp(pub(crate) i32); impl LogicOp { pub fn from_raw(x: i32) -> Self { @@ -42142,7 +53231,7 @@ impl LogicOp { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct InternalAllocationType(pub(crate) i32); impl InternalAllocationType { pub fn from_raw(x: i32) -> Self { @@ -42157,7 +53246,7 @@ impl InternalAllocationType { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct SystemAllocationScope(pub(crate) i32); impl SystemAllocationScope { pub fn from_raw(x: i32) -> Self { @@ -42176,7 +53265,7 @@ impl SystemAllocationScope { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct PhysicalDeviceType(pub(crate) i32); impl PhysicalDeviceType { pub fn from_raw(x: i32) -> Self { @@ -42195,7 +53284,7 @@ impl PhysicalDeviceType { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct VertexInputRate(pub(crate) i32); impl VertexInputRate { pub fn from_raw(x: i32) -> Self { @@ -42211,7 +53300,7 @@ impl VertexInputRate { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct Format(pub(crate) i32); impl Format { pub fn from_raw(x: i32) -> Self { @@ -42410,7 +53499,7 @@ impl Format { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct StructureType(pub(crate) i32); impl StructureType { pub fn from_raw(x: i32) -> Self { @@ -42475,7 +53564,7 @@ impl StructureType { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct SubpassContents(pub(crate) i32); impl SubpassContents { pub fn from_raw(x: i32) -> Self { @@ -42491,7 +53580,7 @@ impl SubpassContents { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct Result(pub(crate) i32); impl Result { pub fn from_raw(x: i32) -> Self { @@ -42538,6 +53627,8 @@ impl Result { pub const ERROR_FORMAT_NOT_SUPPORTED: Self = Result(-11); #[doc = "A requested pool allocation has failed due to fragmentation of the pool\'s memory"] pub const ERROR_FRAGMENTED_POOL: Self = Result(-12); + #[doc = "An unknown error has occurred, due to an implementation or application bug"] + pub const ERROR_UNKNOWN: Self = Result(-13); } impl ::std::error::Error for Result { fn description(&self) -> &str { @@ -42570,6 +53661,9 @@ impl ::std::error::Error for Result { Result::ERROR_FRAGMENTED_POOL => Some( "A requested pool allocation has failed due to fragmentation of the pool\'s memory", ), + Result::ERROR_UNKNOWN => { + Some("An unknown error has occurred, due to an implementation or application bug") + } _ => None, }; name.unwrap_or("unknown error") @@ -42606,6 +53700,9 @@ impl fmt::Display for Result { Result::ERROR_FRAGMENTED_POOL => Some( "A requested pool allocation has failed due to fragmentation of the pool\'s memory", ), + Result::ERROR_UNKNOWN => { + Some("An unknown error has occurred, due to an implementation or application bug") + } _ => None, }; if let Some(x) = name { @@ -42617,7 +53714,7 @@ impl fmt::Display for Result { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct DynamicState(pub(crate) i32); impl DynamicState { pub fn from_raw(x: i32) -> Self { @@ -42640,7 +53737,7 @@ impl DynamicState { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct DescriptorUpdateTemplateType(pub(crate) i32); impl DescriptorUpdateTemplateType { pub fn from_raw(x: i32) -> Self { @@ -42656,7 +53753,7 @@ impl DescriptorUpdateTemplateType { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct ObjectType(pub(crate) i32); impl ObjectType { pub fn from_raw(x: i32) -> Self { @@ -42721,7 +53818,23 @@ impl ObjectType { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] +pub struct SemaphoreType(pub(crate) i32); +impl SemaphoreType { + pub fn from_raw(x: i32) -> Self { + SemaphoreType(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl SemaphoreType { + pub const BINARY: Self = SemaphoreType(0); + pub const TIMELINE: Self = SemaphoreType(1); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] pub struct PresentModeKHR(pub(crate) i32); impl PresentModeKHR { pub fn from_raw(x: i32) -> Self { @@ -42739,7 +53852,7 @@ impl PresentModeKHR { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct ColorSpaceKHR(pub(crate) i32); impl ColorSpaceKHR { pub fn from_raw(x: i32) -> Self { @@ -42754,7 +53867,7 @@ impl ColorSpaceKHR { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct TimeDomainEXT(pub(crate) i32); impl TimeDomainEXT { pub fn from_raw(x: i32) -> Self { @@ -42772,7 +53885,7 @@ impl TimeDomainEXT { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct DebugReportObjectTypeEXT(pub(crate) i32); impl DebugReportObjectTypeEXT { pub fn from_raw(x: i32) -> Self { @@ -42814,13 +53927,11 @@ impl DebugReportObjectTypeEXT { pub const DEBUG_REPORT_CALLBACK: Self = DebugReportObjectTypeEXT(28); pub const DISPLAY_KHR: Self = DebugReportObjectTypeEXT(29); pub const DISPLAY_MODE_KHR: Self = DebugReportObjectTypeEXT(30); - pub const OBJECT_TABLE_NVX: Self = DebugReportObjectTypeEXT(31); - pub const INDIRECT_COMMANDS_LAYOUT_NVX: Self = DebugReportObjectTypeEXT(32); pub const VALIDATION_CACHE: Self = DebugReportObjectTypeEXT(33); } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct RasterizationOrderAMD(pub(crate) i32); impl RasterizationOrderAMD { pub fn from_raw(x: i32) -> Self { @@ -42836,7 +53947,7 @@ impl RasterizationOrderAMD { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct ValidationCheckEXT(pub(crate) i32); impl ValidationCheckEXT { pub fn from_raw(x: i32) -> Self { @@ -42852,7 +53963,7 @@ impl ValidationCheckEXT { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct ValidationFeatureEnableEXT(pub(crate) i32); impl ValidationFeatureEnableEXT { pub fn from_raw(x: i32) -> Self { @@ -42865,10 +53976,12 @@ impl ValidationFeatureEnableEXT { impl ValidationFeatureEnableEXT { pub const GPU_ASSISTED: Self = ValidationFeatureEnableEXT(0); pub const GPU_ASSISTED_RESERVE_BINDING_SLOT: Self = ValidationFeatureEnableEXT(1); + pub const BEST_PRACTICES: Self = ValidationFeatureEnableEXT(2); + pub const DEBUG_PRINTF: Self = ValidationFeatureEnableEXT(3); } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct ValidationFeatureDisableEXT(pub(crate) i32); impl ValidationFeatureDisableEXT { pub fn from_raw(x: i32) -> Self { @@ -42889,48 +54002,29 @@ impl ValidationFeatureDisableEXT { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] -pub struct IndirectCommandsTokenTypeNVX(pub(crate) i32); -impl IndirectCommandsTokenTypeNVX { +#[doc = ""] +pub struct IndirectCommandsTokenTypeNV(pub(crate) i32); +impl IndirectCommandsTokenTypeNV { pub fn from_raw(x: i32) -> Self { - IndirectCommandsTokenTypeNVX(x) + IndirectCommandsTokenTypeNV(x) } pub fn as_raw(self) -> i32 { self.0 } } -impl IndirectCommandsTokenTypeNVX { - pub const PIPELINE: Self = IndirectCommandsTokenTypeNVX(0); - pub const DESCRIPTOR_SET: Self = IndirectCommandsTokenTypeNVX(1); - pub const INDEX_BUFFER: Self = IndirectCommandsTokenTypeNVX(2); - pub const VERTEX_BUFFER: Self = IndirectCommandsTokenTypeNVX(3); - pub const PUSH_CONSTANT: Self = IndirectCommandsTokenTypeNVX(4); - pub const DRAW_INDEXED: Self = IndirectCommandsTokenTypeNVX(5); - pub const DRAW: Self = IndirectCommandsTokenTypeNVX(6); - pub const DISPATCH: Self = IndirectCommandsTokenTypeNVX(7); +impl IndirectCommandsTokenTypeNV { + pub const SHADER_GROUP: Self = IndirectCommandsTokenTypeNV(0); + pub const STATE_FLAGS: Self = IndirectCommandsTokenTypeNV(1); + pub const INDEX_BUFFER: Self = IndirectCommandsTokenTypeNV(2); + pub const VERTEX_BUFFER: Self = IndirectCommandsTokenTypeNV(3); + pub const PUSH_CONSTANT: Self = IndirectCommandsTokenTypeNV(4); + pub const DRAW_INDEXED: Self = IndirectCommandsTokenTypeNV(5); + pub const DRAW: Self = IndirectCommandsTokenTypeNV(6); + pub const DRAW_TASKS: Self = IndirectCommandsTokenTypeNV(7); } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] -pub struct ObjectEntryTypeNVX(pub(crate) i32); -impl ObjectEntryTypeNVX { - pub fn from_raw(x: i32) -> Self { - ObjectEntryTypeNVX(x) - } - pub fn as_raw(self) -> i32 { - self.0 - } -} -impl ObjectEntryTypeNVX { - pub const DESCRIPTOR_SET: Self = ObjectEntryTypeNVX(0); - pub const PIPELINE: Self = ObjectEntryTypeNVX(1); - pub const INDEX_BUFFER: Self = ObjectEntryTypeNVX(2); - pub const VERTEX_BUFFER: Self = ObjectEntryTypeNVX(3); - pub const PUSH_CONSTANT: Self = ObjectEntryTypeNVX(4); -} -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] -#[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct DisplayPowerStateEXT(pub(crate) i32); impl DisplayPowerStateEXT { pub fn from_raw(x: i32) -> Self { @@ -42947,7 +54041,7 @@ impl DisplayPowerStateEXT { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct DeviceEventTypeEXT(pub(crate) i32); impl DeviceEventTypeEXT { pub fn from_raw(x: i32) -> Self { @@ -42962,7 +54056,7 @@ impl DeviceEventTypeEXT { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct DisplayEventTypeEXT(pub(crate) i32); impl DisplayEventTypeEXT { pub fn from_raw(x: i32) -> Self { @@ -42977,7 +54071,7 @@ impl DisplayEventTypeEXT { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct ViewportCoordinateSwizzleNV(pub(crate) i32); impl ViewportCoordinateSwizzleNV { pub fn from_raw(x: i32) -> Self { @@ -42999,7 +54093,7 @@ impl ViewportCoordinateSwizzleNV { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct DiscardRectangleModeEXT(pub(crate) i32); impl DiscardRectangleModeEXT { pub fn from_raw(x: i32) -> Self { @@ -43015,7 +54109,7 @@ impl DiscardRectangleModeEXT { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct PointClippingBehavior(pub(crate) i32); impl PointClippingBehavior { pub fn from_raw(x: i32) -> Self { @@ -43031,24 +54125,24 @@ impl PointClippingBehavior { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] -pub struct SamplerReductionModeEXT(pub(crate) i32); -impl SamplerReductionModeEXT { +#[doc = ""] +pub struct SamplerReductionMode(pub(crate) i32); +impl SamplerReductionMode { pub fn from_raw(x: i32) -> Self { - SamplerReductionModeEXT(x) + SamplerReductionMode(x) } pub fn as_raw(self) -> i32 { self.0 } } -impl SamplerReductionModeEXT { - pub const WEIGHTED_AVERAGE: Self = SamplerReductionModeEXT(0); - pub const MIN: Self = SamplerReductionModeEXT(1); - pub const MAX: Self = SamplerReductionModeEXT(2); +impl SamplerReductionMode { + pub const WEIGHTED_AVERAGE: Self = SamplerReductionMode(0); + pub const MIN: Self = SamplerReductionMode(1); + pub const MAX: Self = SamplerReductionMode(2); } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct TessellationDomainOrigin(pub(crate) i32); impl TessellationDomainOrigin { pub fn from_raw(x: i32) -> Self { @@ -43064,7 +54158,7 @@ impl TessellationDomainOrigin { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct SamplerYcbcrModelConversion(pub(crate) i32); impl SamplerYcbcrModelConversion { pub fn from_raw(x: i32) -> Self { @@ -43087,7 +54181,7 @@ impl SamplerYcbcrModelConversion { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct SamplerYcbcrRange(pub(crate) i32); impl SamplerYcbcrRange { pub fn from_raw(x: i32) -> Self { @@ -43105,7 +54199,7 @@ impl SamplerYcbcrRange { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct ChromaLocation(pub(crate) i32); impl ChromaLocation { pub fn from_raw(x: i32) -> Self { @@ -43121,7 +54215,7 @@ impl ChromaLocation { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct BlendOverlapEXT(pub(crate) i32); impl BlendOverlapEXT { pub fn from_raw(x: i32) -> Self { @@ -43138,7 +54232,7 @@ impl BlendOverlapEXT { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct CoverageModulationModeNV(pub(crate) i32); impl CoverageModulationModeNV { pub fn from_raw(x: i32) -> Self { @@ -43156,7 +54250,23 @@ impl CoverageModulationModeNV { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] +pub struct CoverageReductionModeNV(pub(crate) i32); +impl CoverageReductionModeNV { + pub fn from_raw(x: i32) -> Self { + CoverageReductionModeNV(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl CoverageReductionModeNV { + pub const MERGE: Self = CoverageReductionModeNV(0); + pub const TRUNCATE: Self = CoverageReductionModeNV(1); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] pub struct ValidationCacheHeaderVersionEXT(pub(crate) i32); impl ValidationCacheHeaderVersionEXT { pub fn from_raw(x: i32) -> Self { @@ -43171,7 +54281,7 @@ impl ValidationCacheHeaderVersionEXT { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct ShaderInfoTypeAMD(pub(crate) i32); impl ShaderInfoTypeAMD { pub fn from_raw(x: i32) -> Self { @@ -43188,7 +54298,7 @@ impl ShaderInfoTypeAMD { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct QueueGlobalPriorityEXT(pub(crate) i32); impl QueueGlobalPriorityEXT { pub fn from_raw(x: i32) -> Self { @@ -43202,11 +54312,11 @@ impl QueueGlobalPriorityEXT { pub const LOW: Self = QueueGlobalPriorityEXT(128); pub const MEDIUM: Self = QueueGlobalPriorityEXT(256); pub const HIGH: Self = QueueGlobalPriorityEXT(512); - pub const REALTIME: Self = QueueGlobalPriorityEXT(1024); + pub const REALTIME: Self = QueueGlobalPriorityEXT(1_024); } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct ConservativeRasterizationModeEXT(pub(crate) i32); impl ConservativeRasterizationModeEXT { pub fn from_raw(x: i32) -> Self { @@ -43223,7 +54333,7 @@ impl ConservativeRasterizationModeEXT { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct VendorId(pub(crate) i32); impl VendorId { pub fn from_raw(x: i32) -> Self { @@ -43235,49 +54345,55 @@ impl VendorId { } impl VendorId { #[doc = "Vivante vendor ID"] - pub const VIV: Self = VendorId(0x10001); + pub const VIV: Self = VendorId(0x1_0001); #[doc = "VeriSilicon vendor ID"] - pub const VSI: Self = VendorId(0x10002); + pub const VSI: Self = VendorId(0x1_0002); #[doc = "Kazan Software Renderer"] - pub const KAZAN: Self = VendorId(0x10003); + pub const KAZAN: Self = VendorId(0x1_0003); + #[doc = "Codeplay Software Ltd. vendor ID"] + pub const CODEPLAY: Self = VendorId(0x1_0004); } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] -pub struct DriverIdKHR(pub(crate) i32); -impl DriverIdKHR { +#[doc = ""] +pub struct DriverId(pub(crate) i32); +impl DriverId { pub fn from_raw(x: i32) -> Self { - DriverIdKHR(x) + DriverId(x) } pub fn as_raw(self) -> i32 { self.0 } } -impl DriverIdKHR { +impl DriverId { #[doc = "Advanced Micro Devices, Inc."] - pub const AMD_PROPRIETARY: Self = DriverIdKHR(1); + pub const AMD_PROPRIETARY: Self = DriverId(1); #[doc = "Advanced Micro Devices, Inc."] - pub const AMD_OPEN_SOURCE: Self = DriverIdKHR(2); + pub const AMD_OPEN_SOURCE: Self = DriverId(2); #[doc = "Mesa open source project"] - pub const MESA_RADV: Self = DriverIdKHR(3); + pub const MESA_RADV: Self = DriverId(3); #[doc = "NVIDIA Corporation"] - pub const NVIDIA_PROPRIETARY: Self = DriverIdKHR(4); + pub const NVIDIA_PROPRIETARY: Self = DriverId(4); #[doc = "Intel Corporation"] - pub const INTEL_PROPRIETARY_WINDOWS: Self = DriverIdKHR(5); + pub const INTEL_PROPRIETARY_WINDOWS: Self = DriverId(5); #[doc = "Intel Corporation"] - pub const INTEL_OPEN_SOURCE_MESA: Self = DriverIdKHR(6); + pub const INTEL_OPEN_SOURCE_MESA: Self = DriverId(6); #[doc = "Imagination Technologies"] - pub const IMAGINATION_PROPRIETARY: Self = DriverIdKHR(7); + pub const IMAGINATION_PROPRIETARY: Self = DriverId(7); #[doc = "Qualcomm Technologies, Inc."] - pub const QUALCOMM_PROPRIETARY: Self = DriverIdKHR(8); + pub const QUALCOMM_PROPRIETARY: Self = DriverId(8); #[doc = "Arm Limited"] - pub const ARM_PROPRIETARY: Self = DriverIdKHR(9); + pub const ARM_PROPRIETARY: Self = DriverId(9); #[doc = "Google LLC"] - pub const GOOGLE_PASTEL: Self = DriverIdKHR(10); + pub const GOOGLE_SWIFTSHADER: Self = DriverId(10); + #[doc = "Google LLC"] + pub const GGP_PROPRIETARY: Self = DriverId(11); + #[doc = "Broadcom Inc."] + pub const BROADCOM_PROPRIETARY: Self = DriverId(12); } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct ShadingRatePaletteEntryNV(pub(crate) i32); impl ShadingRatePaletteEntryNV { pub fn from_raw(x: i32) -> Self { @@ -43303,7 +54419,7 @@ impl ShadingRatePaletteEntryNV { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] pub struct CoarseSampleOrderTypeNV(pub(crate) i32); impl CoarseSampleOrderTypeNV { pub fn from_raw(x: i32) -> Self { @@ -43321,89 +54437,108 @@ impl CoarseSampleOrderTypeNV { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] -pub struct CopyAccelerationStructureModeNV(pub(crate) i32); -impl CopyAccelerationStructureModeNV { +#[doc = ""] +pub struct CopyAccelerationStructureModeKHR(pub(crate) i32); +impl CopyAccelerationStructureModeKHR { pub fn from_raw(x: i32) -> Self { - CopyAccelerationStructureModeNV(x) + CopyAccelerationStructureModeKHR(x) } pub fn as_raw(self) -> i32 { self.0 } } -impl CopyAccelerationStructureModeNV { - pub const CLONE: Self = CopyAccelerationStructureModeNV(0); - pub const COMPACT: Self = CopyAccelerationStructureModeNV(1); +impl CopyAccelerationStructureModeKHR { + pub const CLONE: Self = CopyAccelerationStructureModeKHR(0); + pub const COMPACT: Self = CopyAccelerationStructureModeKHR(1); + pub const SERIALIZE: Self = CopyAccelerationStructureModeKHR(2); + pub const DESERIALIZE: Self = CopyAccelerationStructureModeKHR(3); } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] -pub struct AccelerationStructureTypeNV(pub(crate) i32); -impl AccelerationStructureTypeNV { +#[doc = ""] +pub struct AccelerationStructureTypeKHR(pub(crate) i32); +impl AccelerationStructureTypeKHR { pub fn from_raw(x: i32) -> Self { - AccelerationStructureTypeNV(x) + AccelerationStructureTypeKHR(x) } pub fn as_raw(self) -> i32 { self.0 } } -impl AccelerationStructureTypeNV { - pub const TOP_LEVEL: Self = AccelerationStructureTypeNV(0); - pub const BOTTOM_LEVEL: Self = AccelerationStructureTypeNV(1); +impl AccelerationStructureTypeKHR { + pub const TOP_LEVEL: Self = AccelerationStructureTypeKHR(0); + pub const BOTTOM_LEVEL: Self = AccelerationStructureTypeKHR(1); } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] -pub struct GeometryTypeNV(pub(crate) i32); -impl GeometryTypeNV { +#[doc = ""] +pub struct GeometryTypeKHR(pub(crate) i32); +impl GeometryTypeKHR { pub fn from_raw(x: i32) -> Self { - GeometryTypeNV(x) + GeometryTypeKHR(x) } pub fn as_raw(self) -> i32 { self.0 } } -impl GeometryTypeNV { - pub const TRIANGLES: Self = GeometryTypeNV(0); - pub const AABBS: Self = GeometryTypeNV(1); +impl GeometryTypeKHR { + pub const TRIANGLES: Self = GeometryTypeKHR(0); + pub const AABBS: Self = GeometryTypeKHR(1); } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] -pub struct AccelerationStructureMemoryRequirementsTypeNV(pub(crate) i32); -impl AccelerationStructureMemoryRequirementsTypeNV { +#[doc = ""] +pub struct AccelerationStructureMemoryRequirementsTypeKHR(pub(crate) i32); +impl AccelerationStructureMemoryRequirementsTypeKHR { pub fn from_raw(x: i32) -> Self { - AccelerationStructureMemoryRequirementsTypeNV(x) + AccelerationStructureMemoryRequirementsTypeKHR(x) } pub fn as_raw(self) -> i32 { self.0 } } -impl AccelerationStructureMemoryRequirementsTypeNV { - pub const OBJECT: Self = AccelerationStructureMemoryRequirementsTypeNV(0); - pub const BUILD_SCRATCH: Self = AccelerationStructureMemoryRequirementsTypeNV(1); - pub const UPDATE_SCRATCH: Self = AccelerationStructureMemoryRequirementsTypeNV(2); +impl AccelerationStructureMemoryRequirementsTypeKHR { + pub const OBJECT: Self = AccelerationStructureMemoryRequirementsTypeKHR(0); + pub const BUILD_SCRATCH: Self = AccelerationStructureMemoryRequirementsTypeKHR(1); + pub const UPDATE_SCRATCH: Self = AccelerationStructureMemoryRequirementsTypeKHR(2); } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] -pub struct RayTracingShaderGroupTypeNV(pub(crate) i32); -impl RayTracingShaderGroupTypeNV { +#[doc = ""] +pub struct AccelerationStructureBuildTypeKHR(pub(crate) i32); +impl AccelerationStructureBuildTypeKHR { pub fn from_raw(x: i32) -> Self { - RayTracingShaderGroupTypeNV(x) + AccelerationStructureBuildTypeKHR(x) } pub fn as_raw(self) -> i32 { self.0 } } -impl RayTracingShaderGroupTypeNV { - pub const GENERAL: Self = RayTracingShaderGroupTypeNV(0); - pub const TRIANGLES_HIT_GROUP: Self = RayTracingShaderGroupTypeNV(1); - pub const PROCEDURAL_HIT_GROUP: Self = RayTracingShaderGroupTypeNV(2); +impl AccelerationStructureBuildTypeKHR { + pub const HOST: Self = AccelerationStructureBuildTypeKHR(0); + pub const DEVICE: Self = AccelerationStructureBuildTypeKHR(1); + pub const HOST_OR_DEVICE: Self = AccelerationStructureBuildTypeKHR(2); } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] -#[doc = ""] +#[doc = ""] +pub struct RayTracingShaderGroupTypeKHR(pub(crate) i32); +impl RayTracingShaderGroupTypeKHR { + pub fn from_raw(x: i32) -> Self { + RayTracingShaderGroupTypeKHR(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl RayTracingShaderGroupTypeKHR { + pub const GENERAL: Self = RayTracingShaderGroupTypeKHR(0); + pub const TRIANGLES_HIT_GROUP: Self = RayTracingShaderGroupTypeKHR(1); + pub const PROCEDURAL_HIT_GROUP: Self = RayTracingShaderGroupTypeKHR(2); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] pub struct MemoryOverallocationBehaviorAMD(pub(crate) i32); impl MemoryOverallocationBehaviorAMD { pub fn from_raw(x: i32) -> Self { @@ -43418,20 +54553,287 @@ impl MemoryOverallocationBehaviorAMD { pub const ALLOWED: Self = MemoryOverallocationBehaviorAMD(1); pub const DISALLOWED: Self = MemoryOverallocationBehaviorAMD(2); } +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct ScopeNV(pub(crate) i32); +impl ScopeNV { + pub fn from_raw(x: i32) -> Self { + ScopeNV(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl ScopeNV { + pub const DEVICE: Self = ScopeNV(1); + pub const WORKGROUP: Self = ScopeNV(2); + pub const SUBGROUP: Self = ScopeNV(3); + pub const QUEUE_FAMILY: Self = ScopeNV(5); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct ComponentTypeNV(pub(crate) i32); +impl ComponentTypeNV { + pub fn from_raw(x: i32) -> Self { + ComponentTypeNV(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl ComponentTypeNV { + pub const FLOAT16: Self = ComponentTypeNV(0); + pub const FLOAT32: Self = ComponentTypeNV(1); + pub const FLOAT64: Self = ComponentTypeNV(2); + pub const SINT8: Self = ComponentTypeNV(3); + pub const SINT16: Self = ComponentTypeNV(4); + pub const SINT32: Self = ComponentTypeNV(5); + pub const SINT64: Self = ComponentTypeNV(6); + pub const UINT8: Self = ComponentTypeNV(7); + pub const UINT16: Self = ComponentTypeNV(8); + pub const UINT32: Self = ComponentTypeNV(9); + pub const UINT64: Self = ComponentTypeNV(10); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct FullScreenExclusiveEXT(pub(crate) i32); +impl FullScreenExclusiveEXT { + pub fn from_raw(x: i32) -> Self { + FullScreenExclusiveEXT(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl FullScreenExclusiveEXT { + pub const DEFAULT: Self = FullScreenExclusiveEXT(0); + pub const ALLOWED: Self = FullScreenExclusiveEXT(1); + pub const DISALLOWED: Self = FullScreenExclusiveEXT(2); + pub const APPLICATION_CONTROLLED: Self = FullScreenExclusiveEXT(3); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct PerformanceCounterScopeKHR(pub(crate) i32); +impl PerformanceCounterScopeKHR { + pub fn from_raw(x: i32) -> Self { + PerformanceCounterScopeKHR(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl PerformanceCounterScopeKHR { + pub const COMMAND_BUFFER: Self = PerformanceCounterScopeKHR(0); + pub const RENDER_PASS: Self = PerformanceCounterScopeKHR(1); + pub const COMMAND: Self = PerformanceCounterScopeKHR(2); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct PerformanceCounterUnitKHR(pub(crate) i32); +impl PerformanceCounterUnitKHR { + pub fn from_raw(x: i32) -> Self { + PerformanceCounterUnitKHR(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl PerformanceCounterUnitKHR { + pub const GENERIC: Self = PerformanceCounterUnitKHR(0); + pub const PERCENTAGE: Self = PerformanceCounterUnitKHR(1); + pub const NANOSECONDS: Self = PerformanceCounterUnitKHR(2); + pub const BYTES: Self = PerformanceCounterUnitKHR(3); + pub const BYTES_PER_SECOND: Self = PerformanceCounterUnitKHR(4); + pub const KELVIN: Self = PerformanceCounterUnitKHR(5); + pub const WATTS: Self = PerformanceCounterUnitKHR(6); + pub const VOLTS: Self = PerformanceCounterUnitKHR(7); + pub const AMPS: Self = PerformanceCounterUnitKHR(8); + pub const HERTZ: Self = PerformanceCounterUnitKHR(9); + pub const CYCLES: Self = PerformanceCounterUnitKHR(10); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct PerformanceCounterStorageKHR(pub(crate) i32); +impl PerformanceCounterStorageKHR { + pub fn from_raw(x: i32) -> Self { + PerformanceCounterStorageKHR(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl PerformanceCounterStorageKHR { + pub const INT32: Self = PerformanceCounterStorageKHR(0); + pub const INT64: Self = PerformanceCounterStorageKHR(1); + pub const UINT32: Self = PerformanceCounterStorageKHR(2); + pub const UINT64: Self = PerformanceCounterStorageKHR(3); + pub const FLOAT32: Self = PerformanceCounterStorageKHR(4); + pub const FLOAT64: Self = PerformanceCounterStorageKHR(5); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct PerformanceConfigurationTypeINTEL(pub(crate) i32); +impl PerformanceConfigurationTypeINTEL { + pub fn from_raw(x: i32) -> Self { + PerformanceConfigurationTypeINTEL(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl PerformanceConfigurationTypeINTEL { + pub const PERFORMANCE_CONFIGURATION_TYPE_COMMAND_QUEUE_METRICS_DISCOVERY_ACTIVATED_INTEL: Self = + PerformanceConfigurationTypeINTEL(0); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct QueryPoolSamplingModeINTEL(pub(crate) i32); +impl QueryPoolSamplingModeINTEL { + pub fn from_raw(x: i32) -> Self { + QueryPoolSamplingModeINTEL(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl QueryPoolSamplingModeINTEL { + pub const QUERY_POOL_SAMPLING_MODE_MANUAL_INTEL: Self = QueryPoolSamplingModeINTEL(0); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct PerformanceOverrideTypeINTEL(pub(crate) i32); +impl PerformanceOverrideTypeINTEL { + pub fn from_raw(x: i32) -> Self { + PerformanceOverrideTypeINTEL(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl PerformanceOverrideTypeINTEL { + pub const PERFORMANCE_OVERRIDE_TYPE_NULL_HARDWARE_INTEL: Self = PerformanceOverrideTypeINTEL(0); + pub const PERFORMANCE_OVERRIDE_TYPE_FLUSH_GPU_CACHES_INTEL: Self = + PerformanceOverrideTypeINTEL(1); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct PerformanceParameterTypeINTEL(pub(crate) i32); +impl PerformanceParameterTypeINTEL { + pub fn from_raw(x: i32) -> Self { + PerformanceParameterTypeINTEL(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl PerformanceParameterTypeINTEL { + pub const PERFORMANCE_PARAMETER_TYPE_HW_COUNTERS_SUPPORTED_INTEL: Self = + PerformanceParameterTypeINTEL(0); + pub const PERFORMANCE_PARAMETER_TYPE_STREAM_MARKER_VALIDS_INTEL: Self = + PerformanceParameterTypeINTEL(1); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct PerformanceValueTypeINTEL(pub(crate) i32); +impl PerformanceValueTypeINTEL { + pub fn from_raw(x: i32) -> Self { + PerformanceValueTypeINTEL(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl PerformanceValueTypeINTEL { + pub const PERFORMANCE_VALUE_TYPE_UINT32_INTEL: Self = PerformanceValueTypeINTEL(0); + pub const PERFORMANCE_VALUE_TYPE_UINT64_INTEL: Self = PerformanceValueTypeINTEL(1); + pub const PERFORMANCE_VALUE_TYPE_FLOAT_INTEL: Self = PerformanceValueTypeINTEL(2); + pub const PERFORMANCE_VALUE_TYPE_BOOL_INTEL: Self = PerformanceValueTypeINTEL(3); + pub const PERFORMANCE_VALUE_TYPE_STRING_INTEL: Self = PerformanceValueTypeINTEL(4); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct ShaderFloatControlsIndependence(pub(crate) i32); +impl ShaderFloatControlsIndependence { + pub fn from_raw(x: i32) -> Self { + ShaderFloatControlsIndependence(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl ShaderFloatControlsIndependence { + pub const TYPE_32_ONLY: Self = ShaderFloatControlsIndependence(0); + pub const ALL: Self = ShaderFloatControlsIndependence(1); + pub const NONE: Self = ShaderFloatControlsIndependence(2); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct PipelineExecutableStatisticFormatKHR(pub(crate) i32); +impl PipelineExecutableStatisticFormatKHR { + pub fn from_raw(x: i32) -> Self { + PipelineExecutableStatisticFormatKHR(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl PipelineExecutableStatisticFormatKHR { + pub const BOOL32: Self = PipelineExecutableStatisticFormatKHR(0); + pub const INT64: Self = PipelineExecutableStatisticFormatKHR(1); + pub const UINT64: Self = PipelineExecutableStatisticFormatKHR(2); + pub const FLOAT64: Self = PipelineExecutableStatisticFormatKHR(3); +} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +#[doc = ""] +pub struct LineRasterizationModeEXT(pub(crate) i32); +impl LineRasterizationModeEXT { + pub fn from_raw(x: i32) -> Self { + LineRasterizationModeEXT(x) + } + pub fn as_raw(self) -> i32 { + self.0 + } +} +impl LineRasterizationModeEXT { + pub const DEFAULT: Self = LineRasterizationModeEXT(0); + pub const RECTANGULAR: Self = LineRasterizationModeEXT(1); + pub const BRESENHAM: Self = LineRasterizationModeEXT(2); + pub const RECTANGULAR_SMOOTH: Self = LineRasterizationModeEXT(3); +} #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] +pub struct PipelineCacheCreateFlags(pub(crate) Flags); +vk_bitflags_wrapped!(PipelineCacheCreateFlags, 0b0, Flags); +impl PipelineCacheCreateFlags {} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] pub struct CullModeFlags(pub(crate) Flags); vk_bitflags_wrapped!(CullModeFlags, 0b11, Flags); impl CullModeFlags { pub const NONE: Self = CullModeFlags(0); pub const FRONT: Self = CullModeFlags(0b1); pub const BACK: Self = CullModeFlags(0b10); - pub const FRONT_AND_BACK: Self = CullModeFlags(0x00000003); + pub const FRONT_AND_BACK: Self = CullModeFlags(0x0000_0003); } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct QueueFlags(pub(crate) Flags); vk_bitflags_wrapped!(QueueFlags, 0b1111, Flags); impl QueueFlags { @@ -43446,21 +54848,21 @@ impl QueueFlags { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct RenderPassCreateFlags(pub(crate) Flags); vk_bitflags_wrapped!(RenderPassCreateFlags, 0b0, Flags); impl RenderPassCreateFlags {} #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct DeviceQueueCreateFlags(pub(crate) Flags); vk_bitflags_wrapped!(DeviceQueueCreateFlags, 0b0, Flags); impl DeviceQueueCreateFlags {} #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct MemoryPropertyFlags(pub(crate) Flags); -vk_bitflags_wrapped!(MemoryPropertyFlags, 0b11111, Flags); +vk_bitflags_wrapped!(MemoryPropertyFlags, 0b1_1111, Flags); impl MemoryPropertyFlags { #[doc = "If otherwise stated, then allocate memory on device"] pub const DEVICE_LOCAL: Self = MemoryPropertyFlags(0b1); @@ -43471,11 +54873,11 @@ impl MemoryPropertyFlags { #[doc = "Memory will be cached by the host"] pub const HOST_CACHED: Self = MemoryPropertyFlags(0b1000); #[doc = "Memory may be allocated by the driver when it is required"] - pub const LAZILY_ALLOCATED: Self = MemoryPropertyFlags(0b10000); + pub const LAZILY_ALLOCATED: Self = MemoryPropertyFlags(0b1_0000); } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct MemoryHeapFlags(pub(crate) Flags); vk_bitflags_wrapped!(MemoryHeapFlags, 0b1, Flags); impl MemoryHeapFlags { @@ -43484,9 +54886,9 @@ impl MemoryHeapFlags { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct AccessFlags(pub(crate) Flags); -vk_bitflags_wrapped!(AccessFlags, 0b11111111111111111, Flags); +vk_bitflags_wrapped!(AccessFlags, 0b1_1111_1111_1111_1111, Flags); impl AccessFlags { #[doc = "Controls coherency of indirect command reads"] pub const INDIRECT_COMMAND_READ: Self = AccessFlags(0b1); @@ -43497,37 +54899,37 @@ impl AccessFlags { #[doc = "Controls coherency of uniform buffer reads"] pub const UNIFORM_READ: Self = AccessFlags(0b1000); #[doc = "Controls coherency of input attachment reads"] - pub const INPUT_ATTACHMENT_READ: Self = AccessFlags(0b10000); + pub const INPUT_ATTACHMENT_READ: Self = AccessFlags(0b1_0000); #[doc = "Controls coherency of shader reads"] - pub const SHADER_READ: Self = AccessFlags(0b100000); + pub const SHADER_READ: Self = AccessFlags(0b10_0000); #[doc = "Controls coherency of shader writes"] - pub const SHADER_WRITE: Self = AccessFlags(0b1000000); + pub const SHADER_WRITE: Self = AccessFlags(0b100_0000); #[doc = "Controls coherency of color attachment reads"] - pub const COLOR_ATTACHMENT_READ: Self = AccessFlags(0b10000000); + pub const COLOR_ATTACHMENT_READ: Self = AccessFlags(0b1000_0000); #[doc = "Controls coherency of color attachment writes"] - pub const COLOR_ATTACHMENT_WRITE: Self = AccessFlags(0b100000000); + pub const COLOR_ATTACHMENT_WRITE: Self = AccessFlags(0b1_0000_0000); #[doc = "Controls coherency of depth/stencil attachment reads"] - pub const DEPTH_STENCIL_ATTACHMENT_READ: Self = AccessFlags(0b1000000000); + pub const DEPTH_STENCIL_ATTACHMENT_READ: Self = AccessFlags(0b10_0000_0000); #[doc = "Controls coherency of depth/stencil attachment writes"] - pub const DEPTH_STENCIL_ATTACHMENT_WRITE: Self = AccessFlags(0b10000000000); + pub const DEPTH_STENCIL_ATTACHMENT_WRITE: Self = AccessFlags(0b100_0000_0000); #[doc = "Controls coherency of transfer reads"] - pub const TRANSFER_READ: Self = AccessFlags(0b100000000000); + pub const TRANSFER_READ: Self = AccessFlags(0b1000_0000_0000); #[doc = "Controls coherency of transfer writes"] - pub const TRANSFER_WRITE: Self = AccessFlags(0b1000000000000); + pub const TRANSFER_WRITE: Self = AccessFlags(0b1_0000_0000_0000); #[doc = "Controls coherency of host reads"] - pub const HOST_READ: Self = AccessFlags(0b10000000000000); + pub const HOST_READ: Self = AccessFlags(0b10_0000_0000_0000); #[doc = "Controls coherency of host writes"] - pub const HOST_WRITE: Self = AccessFlags(0b100000000000000); + pub const HOST_WRITE: Self = AccessFlags(0b100_0000_0000_0000); #[doc = "Controls coherency of memory reads"] - pub const MEMORY_READ: Self = AccessFlags(0b1000000000000000); + pub const MEMORY_READ: Self = AccessFlags(0b1000_0000_0000_0000); #[doc = "Controls coherency of memory writes"] - pub const MEMORY_WRITE: Self = AccessFlags(0b10000000000000000); + pub const MEMORY_WRITE: Self = AccessFlags(0b1_0000_0000_0000_0000); } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct BufferUsageFlags(pub(crate) Flags); -vk_bitflags_wrapped!(BufferUsageFlags, 0b111111111, Flags); +vk_bitflags_wrapped!(BufferUsageFlags, 0b1_1111_1111, Flags); impl BufferUsageFlags { #[doc = "Can be used as a source of transfer operations"] pub const TRANSFER_SRC: Self = BufferUsageFlags(0b1); @@ -43538,19 +54940,19 @@ impl BufferUsageFlags { #[doc = "Can be used as IBO"] pub const STORAGE_TEXEL_BUFFER: Self = BufferUsageFlags(0b1000); #[doc = "Can be used as UBO"] - pub const UNIFORM_BUFFER: Self = BufferUsageFlags(0b10000); + pub const UNIFORM_BUFFER: Self = BufferUsageFlags(0b1_0000); #[doc = "Can be used as SSBO"] - pub const STORAGE_BUFFER: Self = BufferUsageFlags(0b100000); + pub const STORAGE_BUFFER: Self = BufferUsageFlags(0b10_0000); #[doc = "Can be used as source of fixed-function index fetch (index buffer)"] - pub const INDEX_BUFFER: Self = BufferUsageFlags(0b1000000); + pub const INDEX_BUFFER: Self = BufferUsageFlags(0b100_0000); #[doc = "Can be used as source of fixed-function vertex fetch (VBO)"] - pub const VERTEX_BUFFER: Self = BufferUsageFlags(0b10000000); + pub const VERTEX_BUFFER: Self = BufferUsageFlags(0b1000_0000); #[doc = "Can be the source of indirect parameters (e.g. indirect buffer, parameter buffer)"] - pub const INDIRECT_BUFFER: Self = BufferUsageFlags(0b100000000); + pub const INDIRECT_BUFFER: Self = BufferUsageFlags(0b1_0000_0000); } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct BufferCreateFlags(pub(crate) Flags); vk_bitflags_wrapped!(BufferCreateFlags, 0b111, Flags); impl BufferCreateFlags { @@ -43563,24 +54965,28 @@ impl BufferCreateFlags { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct ShaderStageFlags(pub(crate) Flags); -vk_bitflags_wrapped!(ShaderStageFlags, 0b1111111111111111111111111111111, Flags); +vk_bitflags_wrapped!( + ShaderStageFlags, + 0b111_1111_1111_1111_1111_1111_1111_1111, + Flags +); impl ShaderStageFlags { pub const VERTEX: Self = ShaderStageFlags(0b1); pub const TESSELLATION_CONTROL: Self = ShaderStageFlags(0b10); pub const TESSELLATION_EVALUATION: Self = ShaderStageFlags(0b100); pub const GEOMETRY: Self = ShaderStageFlags(0b1000); - pub const FRAGMENT: Self = ShaderStageFlags(0b10000); - pub const COMPUTE: Self = ShaderStageFlags(0b100000); - pub const ALL_GRAPHICS: Self = ShaderStageFlags(0x0000001F); - pub const ALL: Self = ShaderStageFlags(0x7FFFFFFF); + pub const FRAGMENT: Self = ShaderStageFlags(0b1_0000); + pub const COMPUTE: Self = ShaderStageFlags(0b10_0000); + pub const ALL_GRAPHICS: Self = ShaderStageFlags(0x0000_001F); + pub const ALL: Self = ShaderStageFlags(0x7FFF_FFFF); } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct ImageUsageFlags(pub(crate) Flags); -vk_bitflags_wrapped!(ImageUsageFlags, 0b11111111, Flags); +vk_bitflags_wrapped!(ImageUsageFlags, 0b1111_1111, Flags); impl ImageUsageFlags { #[doc = "Can be used as a source of transfer operations"] pub const TRANSFER_SRC: Self = ImageUsageFlags(0b1); @@ -43591,19 +54997,19 @@ impl ImageUsageFlags { #[doc = "Can be used as storage image (STORAGE_IMAGE descriptor type)"] pub const STORAGE: Self = ImageUsageFlags(0b1000); #[doc = "Can be used as framebuffer color attachment"] - pub const COLOR_ATTACHMENT: Self = ImageUsageFlags(0b10000); + pub const COLOR_ATTACHMENT: Self = ImageUsageFlags(0b1_0000); #[doc = "Can be used as framebuffer depth/stencil attachment"] - pub const DEPTH_STENCIL_ATTACHMENT: Self = ImageUsageFlags(0b100000); + pub const DEPTH_STENCIL_ATTACHMENT: Self = ImageUsageFlags(0b10_0000); #[doc = "Image data not needed outside of rendering"] - pub const TRANSIENT_ATTACHMENT: Self = ImageUsageFlags(0b1000000); + pub const TRANSIENT_ATTACHMENT: Self = ImageUsageFlags(0b100_0000); #[doc = "Can be used as framebuffer input attachment"] - pub const INPUT_ATTACHMENT: Self = ImageUsageFlags(0b10000000); + pub const INPUT_ATTACHMENT: Self = ImageUsageFlags(0b1000_0000); } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct ImageCreateFlags(pub(crate) Flags); -vk_bitflags_wrapped!(ImageCreateFlags, 0b11111, Flags); +vk_bitflags_wrapped!(ImageCreateFlags, 0b1_1111, Flags); impl ImageCreateFlags { #[doc = "Image should support sparse backing"] pub const SPARSE_BINDING: Self = ImageCreateFlags(0b1); @@ -43614,23 +55020,23 @@ impl ImageCreateFlags { #[doc = "Allows image views to have different format than the base image"] pub const MUTABLE_FORMAT: Self = ImageCreateFlags(0b1000); #[doc = "Allows creating image views with cube type from the created image"] - pub const CUBE_COMPATIBLE: Self = ImageCreateFlags(0b10000); + pub const CUBE_COMPATIBLE: Self = ImageCreateFlags(0b1_0000); } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct ImageViewCreateFlags(pub(crate) Flags); vk_bitflags_wrapped!(ImageViewCreateFlags, 0b0, Flags); impl ImageViewCreateFlags {} #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct SamplerCreateFlags(pub(crate) Flags); vk_bitflags_wrapped!(SamplerCreateFlags, 0b0, Flags); impl SamplerCreateFlags {} #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct PipelineCreateFlags(pub(crate) Flags); vk_bitflags_wrapped!(PipelineCreateFlags, 0b111, Flags); impl PipelineCreateFlags { @@ -43640,7 +55046,13 @@ impl PipelineCreateFlags { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] +pub struct PipelineShaderStageCreateFlags(pub(crate) Flags); +vk_bitflags_wrapped!(PipelineShaderStageCreateFlags, 0b0, Flags); +impl PipelineShaderStageCreateFlags {} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] pub struct ColorComponentFlags(pub(crate) Flags); vk_bitflags_wrapped!(ColorComponentFlags, 0b1111, Flags); impl ColorComponentFlags { @@ -43651,7 +55063,7 @@ impl ColorComponentFlags { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct FenceCreateFlags(pub(crate) Flags); vk_bitflags_wrapped!(FenceCreateFlags, 0b1, Flags); impl FenceCreateFlags { @@ -43659,9 +55071,15 @@ impl FenceCreateFlags { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] +pub struct SemaphoreCreateFlags(pub(crate) Flags); +vk_bitflags_wrapped!(SemaphoreCreateFlags, 0b0, Flags); +impl SemaphoreCreateFlags {} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] pub struct FormatFeatureFlags(pub(crate) Flags); -vk_bitflags_wrapped!(FormatFeatureFlags, 0b1111111111111, Flags); +vk_bitflags_wrapped!(FormatFeatureFlags, 0b1_1111_1111_1111, Flags); impl FormatFeatureFlags { #[doc = "Format can be used for sampled images (SAMPLED_IMAGE and COMBINED_IMAGE_SAMPLER descriptor types)"] pub const SAMPLED_IMAGE: Self = FormatFeatureFlags(0b1); @@ -43672,27 +55090,27 @@ impl FormatFeatureFlags { #[doc = "Format can be used for uniform texel buffers (TBOs)"] pub const UNIFORM_TEXEL_BUFFER: Self = FormatFeatureFlags(0b1000); #[doc = "Format can be used for storage texel buffers (IBOs)"] - pub const STORAGE_TEXEL_BUFFER: Self = FormatFeatureFlags(0b10000); + pub const STORAGE_TEXEL_BUFFER: Self = FormatFeatureFlags(0b1_0000); #[doc = "Format supports atomic operations in case it is used for storage texel buffers"] - pub const STORAGE_TEXEL_BUFFER_ATOMIC: Self = FormatFeatureFlags(0b100000); + pub const STORAGE_TEXEL_BUFFER_ATOMIC: Self = FormatFeatureFlags(0b10_0000); #[doc = "Format can be used for vertex buffers (VBOs)"] - pub const VERTEX_BUFFER: Self = FormatFeatureFlags(0b1000000); + pub const VERTEX_BUFFER: Self = FormatFeatureFlags(0b100_0000); #[doc = "Format can be used for color attachment images"] - pub const COLOR_ATTACHMENT: Self = FormatFeatureFlags(0b10000000); + pub const COLOR_ATTACHMENT: Self = FormatFeatureFlags(0b1000_0000); #[doc = "Format supports blending in case it is used for color attachment images"] - pub const COLOR_ATTACHMENT_BLEND: Self = FormatFeatureFlags(0b100000000); + pub const COLOR_ATTACHMENT_BLEND: Self = FormatFeatureFlags(0b1_0000_0000); #[doc = "Format can be used for depth/stencil attachment images"] - pub const DEPTH_STENCIL_ATTACHMENT: Self = FormatFeatureFlags(0b1000000000); + pub const DEPTH_STENCIL_ATTACHMENT: Self = FormatFeatureFlags(0b10_0000_0000); #[doc = "Format can be used as the source image of blits with vkCmdBlitImage"] - pub const BLIT_SRC: Self = FormatFeatureFlags(0b10000000000); + pub const BLIT_SRC: Self = FormatFeatureFlags(0b100_0000_0000); #[doc = "Format can be used as the destination image of blits with vkCmdBlitImage"] - pub const BLIT_DST: Self = FormatFeatureFlags(0b100000000000); + pub const BLIT_DST: Self = FormatFeatureFlags(0b1000_0000_0000); #[doc = "Format can be filtered with VK_FILTER_LINEAR when being sampled"] - pub const SAMPLED_IMAGE_FILTER_LINEAR: Self = FormatFeatureFlags(0b1000000000000); + pub const SAMPLED_IMAGE_FILTER_LINEAR: Self = FormatFeatureFlags(0b1_0000_0000_0000); } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct QueryControlFlags(pub(crate) Flags); vk_bitflags_wrapped!(QueryControlFlags, 0b1, Flags); impl QueryControlFlags { @@ -43701,7 +55119,7 @@ impl QueryControlFlags { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct QueryResultFlags(pub(crate) Flags); vk_bitflags_wrapped!(QueryResultFlags, 0b1111, Flags); impl QueryResultFlags { @@ -43716,7 +55134,7 @@ impl QueryResultFlags { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct CommandBufferUsageFlags(pub(crate) Flags); vk_bitflags_wrapped!(CommandBufferUsageFlags, 0b111, Flags); impl CommandBufferUsageFlags { @@ -43727,9 +55145,9 @@ impl CommandBufferUsageFlags { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct QueryPipelineStatisticFlags(pub(crate) Flags); -vk_bitflags_wrapped!(QueryPipelineStatisticFlags, 0b11111111111, Flags); +vk_bitflags_wrapped!(QueryPipelineStatisticFlags, 0b111_1111_1111, Flags); impl QueryPipelineStatisticFlags { #[doc = "Optional"] pub const INPUT_ASSEMBLY_VERTICES: Self = QueryPipelineStatisticFlags(0b1); @@ -43740,24 +55158,25 @@ impl QueryPipelineStatisticFlags { #[doc = "Optional"] pub const GEOMETRY_SHADER_INVOCATIONS: Self = QueryPipelineStatisticFlags(0b1000); #[doc = "Optional"] - pub const GEOMETRY_SHADER_PRIMITIVES: Self = QueryPipelineStatisticFlags(0b10000); + pub const GEOMETRY_SHADER_PRIMITIVES: Self = QueryPipelineStatisticFlags(0b1_0000); #[doc = "Optional"] - pub const CLIPPING_INVOCATIONS: Self = QueryPipelineStatisticFlags(0b100000); + pub const CLIPPING_INVOCATIONS: Self = QueryPipelineStatisticFlags(0b10_0000); #[doc = "Optional"] - pub const CLIPPING_PRIMITIVES: Self = QueryPipelineStatisticFlags(0b1000000); + pub const CLIPPING_PRIMITIVES: Self = QueryPipelineStatisticFlags(0b100_0000); #[doc = "Optional"] - pub const FRAGMENT_SHADER_INVOCATIONS: Self = QueryPipelineStatisticFlags(0b10000000); + pub const FRAGMENT_SHADER_INVOCATIONS: Self = QueryPipelineStatisticFlags(0b1000_0000); #[doc = "Optional"] - pub const TESSELLATION_CONTROL_SHADER_PATCHES: Self = QueryPipelineStatisticFlags(0b100000000); + pub const TESSELLATION_CONTROL_SHADER_PATCHES: Self = + QueryPipelineStatisticFlags(0b1_0000_0000); #[doc = "Optional"] pub const TESSELLATION_EVALUATION_SHADER_INVOCATIONS: Self = - QueryPipelineStatisticFlags(0b1000000000); + QueryPipelineStatisticFlags(0b10_0000_0000); #[doc = "Optional"] - pub const COMPUTE_SHADER_INVOCATIONS: Self = QueryPipelineStatisticFlags(0b10000000000); + pub const COMPUTE_SHADER_INVOCATIONS: Self = QueryPipelineStatisticFlags(0b100_0000_0000); } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct ImageAspectFlags(pub(crate) Flags); vk_bitflags_wrapped!(ImageAspectFlags, 0b1111, Flags); impl ImageAspectFlags { @@ -43768,7 +55187,7 @@ impl ImageAspectFlags { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct SparseImageFormatFlags(pub(crate) Flags); vk_bitflags_wrapped!(SparseImageFormatFlags, 0b111, Flags); impl SparseImageFormatFlags { @@ -43781,7 +55200,7 @@ impl SparseImageFormatFlags { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct SparseMemoryBindFlags(pub(crate) Flags); vk_bitflags_wrapped!(SparseMemoryBindFlags, 0b1, Flags); impl SparseMemoryBindFlags { @@ -43790,9 +55209,9 @@ impl SparseMemoryBindFlags { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct PipelineStageFlags(pub(crate) Flags); -vk_bitflags_wrapped!(PipelineStageFlags, 0b11111111111111111, Flags); +vk_bitflags_wrapped!(PipelineStageFlags, 0b1_1111_1111_1111_1111, Flags); impl PipelineStageFlags { #[doc = "Before subsequent commands are processed"] pub const TOP_OF_PIPE: Self = PipelineStageFlags(0b1); @@ -43803,35 +55222,35 @@ impl PipelineStageFlags { #[doc = "Vertex shading"] pub const VERTEX_SHADER: Self = PipelineStageFlags(0b1000); #[doc = "Tessellation control shading"] - pub const TESSELLATION_CONTROL_SHADER: Self = PipelineStageFlags(0b10000); + pub const TESSELLATION_CONTROL_SHADER: Self = PipelineStageFlags(0b1_0000); #[doc = "Tessellation evaluation shading"] - pub const TESSELLATION_EVALUATION_SHADER: Self = PipelineStageFlags(0b100000); + pub const TESSELLATION_EVALUATION_SHADER: Self = PipelineStageFlags(0b10_0000); #[doc = "Geometry shading"] - pub const GEOMETRY_SHADER: Self = PipelineStageFlags(0b1000000); + pub const GEOMETRY_SHADER: Self = PipelineStageFlags(0b100_0000); #[doc = "Fragment shading"] - pub const FRAGMENT_SHADER: Self = PipelineStageFlags(0b10000000); + pub const FRAGMENT_SHADER: Self = PipelineStageFlags(0b1000_0000); #[doc = "Early fragment (depth and stencil) tests"] - pub const EARLY_FRAGMENT_TESTS: Self = PipelineStageFlags(0b100000000); + pub const EARLY_FRAGMENT_TESTS: Self = PipelineStageFlags(0b1_0000_0000); #[doc = "Late fragment (depth and stencil) tests"] - pub const LATE_FRAGMENT_TESTS: Self = PipelineStageFlags(0b1000000000); + pub const LATE_FRAGMENT_TESTS: Self = PipelineStageFlags(0b10_0000_0000); #[doc = "Color attachment writes"] - pub const COLOR_ATTACHMENT_OUTPUT: Self = PipelineStageFlags(0b10000000000); + pub const COLOR_ATTACHMENT_OUTPUT: Self = PipelineStageFlags(0b100_0000_0000); #[doc = "Compute shading"] - pub const COMPUTE_SHADER: Self = PipelineStageFlags(0b100000000000); + pub const COMPUTE_SHADER: Self = PipelineStageFlags(0b1000_0000_0000); #[doc = "Transfer/copy operations"] - pub const TRANSFER: Self = PipelineStageFlags(0b1000000000000); + pub const TRANSFER: Self = PipelineStageFlags(0b1_0000_0000_0000); #[doc = "After previous commands have completed"] - pub const BOTTOM_OF_PIPE: Self = PipelineStageFlags(0b10000000000000); + pub const BOTTOM_OF_PIPE: Self = PipelineStageFlags(0b10_0000_0000_0000); #[doc = "Indicates host (CPU) is a source/sink of the dependency"] - pub const HOST: Self = PipelineStageFlags(0b100000000000000); + pub const HOST: Self = PipelineStageFlags(0b100_0000_0000_0000); #[doc = "All stages of the graphics pipeline"] - pub const ALL_GRAPHICS: Self = PipelineStageFlags(0b1000000000000000); + pub const ALL_GRAPHICS: Self = PipelineStageFlags(0b1000_0000_0000_0000); #[doc = "All stages supported on the queue"] - pub const ALL_COMMANDS: Self = PipelineStageFlags(0b10000000000000000); + pub const ALL_COMMANDS: Self = PipelineStageFlags(0b1_0000_0000_0000_0000); } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct CommandPoolCreateFlags(pub(crate) Flags); vk_bitflags_wrapped!(CommandPoolCreateFlags, 0b11, Flags); impl CommandPoolCreateFlags { @@ -43842,7 +55261,7 @@ impl CommandPoolCreateFlags { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct CommandPoolResetFlags(pub(crate) Flags); vk_bitflags_wrapped!(CommandPoolResetFlags, 0b1, Flags); impl CommandPoolResetFlags { @@ -43851,7 +55270,7 @@ impl CommandPoolResetFlags { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct CommandBufferResetFlags(pub(crate) Flags); vk_bitflags_wrapped!(CommandBufferResetFlags, 0b1, Flags); impl CommandBufferResetFlags { @@ -43860,9 +55279,9 @@ impl CommandBufferResetFlags { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct SampleCountFlags(pub(crate) Flags); -vk_bitflags_wrapped!(SampleCountFlags, 0b1111111, Flags); +vk_bitflags_wrapped!(SampleCountFlags, 0b111_1111, Flags); impl SampleCountFlags { #[doc = "Sample count 1 supported"] pub const TYPE_1: Self = SampleCountFlags(0b1); @@ -43873,15 +55292,15 @@ impl SampleCountFlags { #[doc = "Sample count 8 supported"] pub const TYPE_8: Self = SampleCountFlags(0b1000); #[doc = "Sample count 16 supported"] - pub const TYPE_16: Self = SampleCountFlags(0b10000); + pub const TYPE_16: Self = SampleCountFlags(0b1_0000); #[doc = "Sample count 32 supported"] - pub const TYPE_32: Self = SampleCountFlags(0b100000); + pub const TYPE_32: Self = SampleCountFlags(0b10_0000); #[doc = "Sample count 64 supported"] - pub const TYPE_64: Self = SampleCountFlags(0b1000000); + pub const TYPE_64: Self = SampleCountFlags(0b100_0000); } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct AttachmentDescriptionFlags(pub(crate) Flags); vk_bitflags_wrapped!(AttachmentDescriptionFlags, 0b1, Flags); impl AttachmentDescriptionFlags { @@ -43890,7 +55309,7 @@ impl AttachmentDescriptionFlags { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct StencilFaceFlags(pub(crate) Flags); vk_bitflags_wrapped!(StencilFaceFlags, 0b11, Flags); impl StencilFaceFlags { @@ -43899,11 +55318,11 @@ impl StencilFaceFlags { #[doc = "Back face"] pub const BACK: Self = StencilFaceFlags(0b10); #[doc = "Front and back faces"] - pub const STENCIL_FRONT_AND_BACK: Self = StencilFaceFlags(0x00000003); + pub const FRONT_AND_BACK: Self = StencilFaceFlags(0x0000_0003); } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct DescriptorPoolCreateFlags(pub(crate) Flags); vk_bitflags_wrapped!(DescriptorPoolCreateFlags, 0b1, Flags); impl DescriptorPoolCreateFlags { @@ -43912,7 +55331,7 @@ impl DescriptorPoolCreateFlags { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct DependencyFlags(pub(crate) Flags); vk_bitflags_wrapped!(DependencyFlags, 0b1, Flags); impl DependencyFlags { @@ -43921,7 +55340,15 @@ impl DependencyFlags { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] +pub struct SemaphoreWaitFlags(pub(crate) Flags); +vk_bitflags_wrapped!(SemaphoreWaitFlags, 0b1, Flags); +impl SemaphoreWaitFlags { + pub const ANY: Self = SemaphoreWaitFlags(0b1); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] pub struct DisplayPlaneAlphaFlagsKHR(pub(crate) Flags); vk_bitflags_wrapped!(DisplayPlaneAlphaFlagsKHR, 0b1111, Flags); impl DisplayPlaneAlphaFlagsKHR { @@ -43932,7 +55359,7 @@ impl DisplayPlaneAlphaFlagsKHR { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct CompositeAlphaFlagsKHR(pub(crate) Flags); vk_bitflags_wrapped!(CompositeAlphaFlagsKHR, 0b1111, Flags); impl CompositeAlphaFlagsKHR { @@ -43943,35 +55370,43 @@ impl CompositeAlphaFlagsKHR { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct SurfaceTransformFlagsKHR(pub(crate) Flags); -vk_bitflags_wrapped!(SurfaceTransformFlagsKHR, 0b111111111, Flags); +vk_bitflags_wrapped!(SurfaceTransformFlagsKHR, 0b1_1111_1111, Flags); impl SurfaceTransformFlagsKHR { pub const IDENTITY: Self = SurfaceTransformFlagsKHR(0b1); pub const ROTATE_90: Self = SurfaceTransformFlagsKHR(0b10); pub const ROTATE_180: Self = SurfaceTransformFlagsKHR(0b100); pub const ROTATE_270: Self = SurfaceTransformFlagsKHR(0b1000); - pub const HORIZONTAL_MIRROR: Self = SurfaceTransformFlagsKHR(0b10000); - pub const HORIZONTAL_MIRROR_ROTATE_90: Self = SurfaceTransformFlagsKHR(0b100000); - pub const HORIZONTAL_MIRROR_ROTATE_180: Self = SurfaceTransformFlagsKHR(0b1000000); - pub const HORIZONTAL_MIRROR_ROTATE_270: Self = SurfaceTransformFlagsKHR(0b10000000); - pub const INHERIT: Self = SurfaceTransformFlagsKHR(0b100000000); + pub const HORIZONTAL_MIRROR: Self = SurfaceTransformFlagsKHR(0b1_0000); + pub const HORIZONTAL_MIRROR_ROTATE_90: Self = SurfaceTransformFlagsKHR(0b10_0000); + pub const HORIZONTAL_MIRROR_ROTATE_180: Self = SurfaceTransformFlagsKHR(0b100_0000); + pub const HORIZONTAL_MIRROR_ROTATE_270: Self = SurfaceTransformFlagsKHR(0b1000_0000); + pub const INHERIT: Self = SurfaceTransformFlagsKHR(0b1_0000_0000); } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] +pub struct SwapchainImageUsageFlagsANDROID(pub(crate) Flags); +vk_bitflags_wrapped!(SwapchainImageUsageFlagsANDROID, 0b1, Flags); +impl SwapchainImageUsageFlagsANDROID { + pub const SHARED: Self = SwapchainImageUsageFlagsANDROID(0b1); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] pub struct DebugReportFlagsEXT(pub(crate) Flags); -vk_bitflags_wrapped!(DebugReportFlagsEXT, 0b11111, Flags); +vk_bitflags_wrapped!(DebugReportFlagsEXT, 0b1_1111, Flags); impl DebugReportFlagsEXT { pub const INFORMATION: Self = DebugReportFlagsEXT(0b1); pub const WARNING: Self = DebugReportFlagsEXT(0b10); pub const PERFORMANCE_WARNING: Self = DebugReportFlagsEXT(0b100); pub const ERROR: Self = DebugReportFlagsEXT(0b1000); - pub const DEBUG: Self = DebugReportFlagsEXT(0b10000); + pub const DEBUG: Self = DebugReportFlagsEXT(0b1_0000); } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct ExternalMemoryHandleTypeFlagsNV(pub(crate) Flags); vk_bitflags_wrapped!(ExternalMemoryHandleTypeFlagsNV, 0b1111, Flags); impl ExternalMemoryHandleTypeFlagsNV { @@ -43986,7 +55421,7 @@ impl ExternalMemoryHandleTypeFlagsNV { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct ExternalMemoryFeatureFlagsNV(pub(crate) Flags); vk_bitflags_wrapped!(ExternalMemoryFeatureFlagsNV, 0b111, Flags); impl ExternalMemoryFeatureFlagsNV { @@ -43996,9 +55431,9 @@ impl ExternalMemoryFeatureFlagsNV { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct SubgroupFeatureFlags(pub(crate) Flags); -vk_bitflags_wrapped!(SubgroupFeatureFlags, 0b11111111, Flags); +vk_bitflags_wrapped!(SubgroupFeatureFlags, 0b1111_1111, Flags); impl SubgroupFeatureFlags { #[doc = "Basic subgroup operations"] pub const BASIC: Self = SubgroupFeatureFlags(0b1); @@ -44009,45 +55444,43 @@ impl SubgroupFeatureFlags { #[doc = "Ballot subgroup operations"] pub const BALLOT: Self = SubgroupFeatureFlags(0b1000); #[doc = "Shuffle subgroup operations"] - pub const SHUFFLE: Self = SubgroupFeatureFlags(0b10000); + pub const SHUFFLE: Self = SubgroupFeatureFlags(0b1_0000); #[doc = "Shuffle relative subgroup operations"] - pub const SHUFFLE_RELATIVE: Self = SubgroupFeatureFlags(0b100000); + pub const SHUFFLE_RELATIVE: Self = SubgroupFeatureFlags(0b10_0000); #[doc = "Clustered subgroup operations"] - pub const CLUSTERED: Self = SubgroupFeatureFlags(0b1000000); + pub const CLUSTERED: Self = SubgroupFeatureFlags(0b100_0000); #[doc = "Quad subgroup operations"] - pub const QUAD: Self = SubgroupFeatureFlags(0b10000000); + pub const QUAD: Self = SubgroupFeatureFlags(0b1000_0000); } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] -pub struct IndirectCommandsLayoutUsageFlagsNVX(pub(crate) Flags); -vk_bitflags_wrapped!(IndirectCommandsLayoutUsageFlagsNVX, 0b1111, Flags); -impl IndirectCommandsLayoutUsageFlagsNVX { - pub const UNORDERED_SEQUENCES: Self = IndirectCommandsLayoutUsageFlagsNVX(0b1); - pub const SPARSE_SEQUENCES: Self = IndirectCommandsLayoutUsageFlagsNVX(0b10); - pub const EMPTY_EXECUTIONS: Self = IndirectCommandsLayoutUsageFlagsNVX(0b100); - pub const INDEXED_SEQUENCES: Self = IndirectCommandsLayoutUsageFlagsNVX(0b1000); +#[doc = ""] +pub struct IndirectCommandsLayoutUsageFlagsNV(pub(crate) Flags); +vk_bitflags_wrapped!(IndirectCommandsLayoutUsageFlagsNV, 0b111, Flags); +impl IndirectCommandsLayoutUsageFlagsNV { + pub const EXPLICIT_PREPROCESS: Self = IndirectCommandsLayoutUsageFlagsNV(0b1); + pub const INDEXED_SEQUENCES: Self = IndirectCommandsLayoutUsageFlagsNV(0b10); + pub const UNORDERED_SEQUENCES: Self = IndirectCommandsLayoutUsageFlagsNV(0b100); } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] -pub struct ObjectEntryUsageFlagsNVX(pub(crate) Flags); -vk_bitflags_wrapped!(ObjectEntryUsageFlagsNVX, 0b11, Flags); -impl ObjectEntryUsageFlagsNVX { - pub const GRAPHICS: Self = ObjectEntryUsageFlagsNVX(0b1); - pub const COMPUTE: Self = ObjectEntryUsageFlagsNVX(0b10); +#[doc = ""] +pub struct IndirectStateFlagsNV(pub(crate) Flags); +vk_bitflags_wrapped!(IndirectStateFlagsNV, 0b1, Flags); +impl IndirectStateFlagsNV { + pub const FLAG_FRONTFACE: Self = IndirectStateFlagsNV(0b1); } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct DescriptorSetLayoutCreateFlags(pub(crate) Flags); vk_bitflags_wrapped!(DescriptorSetLayoutCreateFlags, 0b0, Flags); impl DescriptorSetLayoutCreateFlags {} #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct ExternalMemoryHandleTypeFlags(pub(crate) Flags); -vk_bitflags_wrapped!(ExternalMemoryHandleTypeFlags, 0b1111111, Flags); +vk_bitflags_wrapped!(ExternalMemoryHandleTypeFlags, 0b111_1111, Flags); impl ExternalMemoryHandleTypeFlags { pub const EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD: Self = ExternalMemoryHandleTypeFlags(0b1); pub const EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32: Self = ExternalMemoryHandleTypeFlags(0b10); @@ -44056,15 +55489,15 @@ impl ExternalMemoryHandleTypeFlags { pub const EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE: Self = ExternalMemoryHandleTypeFlags(0b1000); pub const EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT: Self = - ExternalMemoryHandleTypeFlags(0b10000); + ExternalMemoryHandleTypeFlags(0b1_0000); pub const EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP: Self = - ExternalMemoryHandleTypeFlags(0b100000); + ExternalMemoryHandleTypeFlags(0b10_0000); pub const EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE: Self = - ExternalMemoryHandleTypeFlags(0b1000000); + ExternalMemoryHandleTypeFlags(0b100_0000); } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct ExternalMemoryFeatureFlags(pub(crate) Flags); vk_bitflags_wrapped!(ExternalMemoryFeatureFlags, 0b111, Flags); impl ExternalMemoryFeatureFlags { @@ -44074,9 +55507,9 @@ impl ExternalMemoryFeatureFlags { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct ExternalSemaphoreHandleTypeFlags(pub(crate) Flags); -vk_bitflags_wrapped!(ExternalSemaphoreHandleTypeFlags, 0b11111, Flags); +vk_bitflags_wrapped!(ExternalSemaphoreHandleTypeFlags, 0b1_1111, Flags); impl ExternalSemaphoreHandleTypeFlags { pub const EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD: Self = ExternalSemaphoreHandleTypeFlags(0b1); @@ -44087,11 +55520,11 @@ impl ExternalSemaphoreHandleTypeFlags { pub const EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE: Self = ExternalSemaphoreHandleTypeFlags(0b1000); pub const EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD: Self = - ExternalSemaphoreHandleTypeFlags(0b10000); + ExternalSemaphoreHandleTypeFlags(0b1_0000); } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct ExternalSemaphoreFeatureFlags(pub(crate) Flags); vk_bitflags_wrapped!(ExternalSemaphoreFeatureFlags, 0b11, Flags); impl ExternalSemaphoreFeatureFlags { @@ -44100,7 +55533,7 @@ impl ExternalSemaphoreFeatureFlags { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct SemaphoreImportFlags(pub(crate) Flags); vk_bitflags_wrapped!(SemaphoreImportFlags, 0b1, Flags); impl SemaphoreImportFlags { @@ -44108,7 +55541,7 @@ impl SemaphoreImportFlags { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct ExternalFenceHandleTypeFlags(pub(crate) Flags); vk_bitflags_wrapped!(ExternalFenceHandleTypeFlags, 0b1111, Flags); impl ExternalFenceHandleTypeFlags { @@ -44120,7 +55553,7 @@ impl ExternalFenceHandleTypeFlags { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct ExternalFenceFeatureFlags(pub(crate) Flags); vk_bitflags_wrapped!(ExternalFenceFeatureFlags, 0b11, Flags); impl ExternalFenceFeatureFlags { @@ -44129,7 +55562,7 @@ impl ExternalFenceFeatureFlags { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct FenceImportFlags(pub(crate) Flags); vk_bitflags_wrapped!(FenceImportFlags, 0b1, Flags); impl FenceImportFlags { @@ -44137,7 +55570,7 @@ impl FenceImportFlags { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct SurfaceCounterFlagsEXT(pub(crate) Flags); vk_bitflags_wrapped!(SurfaceCounterFlagsEXT, 0b1, Flags); impl SurfaceCounterFlagsEXT { @@ -44145,7 +55578,7 @@ impl SurfaceCounterFlagsEXT { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct PeerMemoryFeatureFlags(pub(crate) Flags); vk_bitflags_wrapped!(PeerMemoryFeatureFlags, 0b1111, Flags); impl PeerMemoryFeatureFlags { @@ -44160,7 +55593,7 @@ impl PeerMemoryFeatureFlags { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct MemoryAllocateFlags(pub(crate) Flags); vk_bitflags_wrapped!(MemoryAllocateFlags, 0b1, Flags); impl MemoryAllocateFlags { @@ -44169,7 +55602,7 @@ impl MemoryAllocateFlags { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct DeviceGroupPresentModeFlagsKHR(pub(crate) Flags); vk_bitflags_wrapped!(DeviceGroupPresentModeFlagsKHR, 0b1111, Flags); impl DeviceGroupPresentModeFlagsKHR { @@ -44184,30 +55617,30 @@ impl DeviceGroupPresentModeFlagsKHR { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct SwapchainCreateFlagsKHR(pub(crate) Flags); vk_bitflags_wrapped!(SwapchainCreateFlagsKHR, 0b0, Flags); impl SwapchainCreateFlagsKHR {} #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct SubpassDescriptionFlags(pub(crate) Flags); vk_bitflags_wrapped!(SubpassDescriptionFlags, 0b0, Flags); impl SubpassDescriptionFlags {} #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct DebugUtilsMessageSeverityFlagsEXT(pub(crate) Flags); -vk_bitflags_wrapped!(DebugUtilsMessageSeverityFlagsEXT, 0b1000100010001, Flags); +vk_bitflags_wrapped!(DebugUtilsMessageSeverityFlagsEXT, 0b1_0001_0001_0001, Flags); impl DebugUtilsMessageSeverityFlagsEXT { pub const VERBOSE: Self = DebugUtilsMessageSeverityFlagsEXT(0b1); - pub const INFO: Self = DebugUtilsMessageSeverityFlagsEXT(0b10000); - pub const WARNING: Self = DebugUtilsMessageSeverityFlagsEXT(0b100000000); - pub const ERROR: Self = DebugUtilsMessageSeverityFlagsEXT(0b1000000000000); + pub const INFO: Self = DebugUtilsMessageSeverityFlagsEXT(0b1_0000); + pub const WARNING: Self = DebugUtilsMessageSeverityFlagsEXT(0b1_0000_0000); + pub const ERROR: Self = DebugUtilsMessageSeverityFlagsEXT(0b1_0000_0000_0000); } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct DebugUtilsMessageTypeFlagsEXT(pub(crate) Flags); vk_bitflags_wrapped!(DebugUtilsMessageTypeFlagsEXT, 0b111, Flags); impl DebugUtilsMessageTypeFlagsEXT { @@ -44217,18 +55650,18 @@ impl DebugUtilsMessageTypeFlagsEXT { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] -pub struct DescriptorBindingFlagsEXT(pub(crate) Flags); -vk_bitflags_wrapped!(DescriptorBindingFlagsEXT, 0b1111, Flags); -impl DescriptorBindingFlagsEXT { - pub const UPDATE_AFTER_BIND: Self = DescriptorBindingFlagsEXT(0b1); - pub const UPDATE_UNUSED_WHILE_PENDING: Self = DescriptorBindingFlagsEXT(0b10); - pub const PARTIALLY_BOUND: Self = DescriptorBindingFlagsEXT(0b100); - pub const VARIABLE_DESCRIPTOR_COUNT: Self = DescriptorBindingFlagsEXT(0b1000); +#[doc = ""] +pub struct DescriptorBindingFlags(pub(crate) Flags); +vk_bitflags_wrapped!(DescriptorBindingFlags, 0b1111, Flags); +impl DescriptorBindingFlags { + pub const UPDATE_AFTER_BIND: Self = DescriptorBindingFlags(0b1); + pub const UPDATE_UNUSED_WHILE_PENDING: Self = DescriptorBindingFlags(0b10); + pub const PARTIALLY_BOUND: Self = DescriptorBindingFlags(0b100); + pub const VARIABLE_DESCRIPTOR_COUNT: Self = DescriptorBindingFlags(0b1000); } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] +#[doc = ""] pub struct ConditionalRenderingFlagsEXT(pub(crate) Flags); vk_bitflags_wrapped!(ConditionalRenderingFlagsEXT, 0b1, Flags); impl ConditionalRenderingFlagsEXT { @@ -44236,70 +55669,142 @@ impl ConditionalRenderingFlagsEXT { } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] -pub struct ResolveModeFlagsKHR(pub(crate) Flags); -vk_bitflags_wrapped!(ResolveModeFlagsKHR, 0b1111, Flags); -impl ResolveModeFlagsKHR { - pub const NONE: Self = ResolveModeFlagsKHR(0); - pub const SAMPLE_ZERO: Self = ResolveModeFlagsKHR(0b1); - pub const AVERAGE: Self = ResolveModeFlagsKHR(0b10); - pub const MIN: Self = ResolveModeFlagsKHR(0b100); - pub const MAX: Self = ResolveModeFlagsKHR(0b1000); +#[doc = ""] +pub struct ResolveModeFlags(pub(crate) Flags); +vk_bitflags_wrapped!(ResolveModeFlags, 0b1111, Flags); +impl ResolveModeFlags { + pub const NONE: Self = ResolveModeFlags(0); + pub const SAMPLE_ZERO: Self = ResolveModeFlags(0b1); + pub const AVERAGE: Self = ResolveModeFlags(0b10); + pub const MIN: Self = ResolveModeFlags(0b100); + pub const MAX: Self = ResolveModeFlags(0b1000); } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] -pub struct GeometryInstanceFlagsNV(pub(crate) Flags); -vk_bitflags_wrapped!(GeometryInstanceFlagsNV, 0b1111, Flags); -impl GeometryInstanceFlagsNV { - pub const TRIANGLE_CULL_DISABLE: Self = GeometryInstanceFlagsNV(0b1); - pub const TRIANGLE_FRONT_COUNTERCLOCKWISE: Self = GeometryInstanceFlagsNV(0b10); - pub const FORCE_OPAQUE: Self = GeometryInstanceFlagsNV(0b100); - pub const FORCE_NO_OPAQUE: Self = GeometryInstanceFlagsNV(0b1000); +#[doc = ""] +pub struct GeometryInstanceFlagsKHR(pub(crate) Flags); +vk_bitflags_wrapped!(GeometryInstanceFlagsKHR, 0b1111, Flags); +impl GeometryInstanceFlagsKHR { + pub const TRIANGLE_FACING_CULL_DISABLE: Self = GeometryInstanceFlagsKHR(0b1); + pub const TRIANGLE_FRONT_COUNTERCLOCKWISE: Self = GeometryInstanceFlagsKHR(0b10); + pub const FORCE_OPAQUE: Self = GeometryInstanceFlagsKHR(0b100); + pub const FORCE_NO_OPAQUE: Self = GeometryInstanceFlagsKHR(0b1000); } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] -pub struct GeometryFlagsNV(pub(crate) Flags); -vk_bitflags_wrapped!(GeometryFlagsNV, 0b11, Flags); -impl GeometryFlagsNV { - pub const OPAQUE: Self = GeometryFlagsNV(0b1); - pub const NO_DUPLICATE_ANY_HIT_INVOCATION: Self = GeometryFlagsNV(0b10); +#[doc = ""] +pub struct GeometryFlagsKHR(pub(crate) Flags); +vk_bitflags_wrapped!(GeometryFlagsKHR, 0b11, Flags); +impl GeometryFlagsKHR { + pub const OPAQUE: Self = GeometryFlagsKHR(0b1); + pub const NO_DUPLICATE_ANY_HIT_INVOCATION: Self = GeometryFlagsKHR(0b10); } #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[doc = ""] -pub struct BuildAccelerationStructureFlagsNV(pub(crate) Flags); -vk_bitflags_wrapped!(BuildAccelerationStructureFlagsNV, 0b11111, Flags); -impl BuildAccelerationStructureFlagsNV { - pub const ALLOW_UPDATE: Self = BuildAccelerationStructureFlagsNV(0b1); - pub const ALLOW_COMPACTION: Self = BuildAccelerationStructureFlagsNV(0b10); - pub const PREFER_FAST_TRACE: Self = BuildAccelerationStructureFlagsNV(0b100); - pub const PREFER_FAST_BUILD: Self = BuildAccelerationStructureFlagsNV(0b1000); - pub const LOW_MEMORY: Self = BuildAccelerationStructureFlagsNV(0b10000); +#[doc = ""] +pub struct BuildAccelerationStructureFlagsKHR(pub(crate) Flags); +vk_bitflags_wrapped!(BuildAccelerationStructureFlagsKHR, 0b1_1111, Flags); +impl BuildAccelerationStructureFlagsKHR { + pub const ALLOW_UPDATE: Self = BuildAccelerationStructureFlagsKHR(0b1); + pub const ALLOW_COMPACTION: Self = BuildAccelerationStructureFlagsKHR(0b10); + pub const PREFER_FAST_TRACE: Self = BuildAccelerationStructureFlagsKHR(0b100); + pub const PREFER_FAST_BUILD: Self = BuildAccelerationStructureFlagsKHR(0b1000); + pub const LOW_MEMORY: Self = BuildAccelerationStructureFlagsKHR(0b1_0000); } -pub const MAX_PHYSICAL_DEVICE_NAME_SIZE: usize = 256; -pub const UUID_SIZE: usize = 16; -pub const LUID_SIZE: usize = 8; -pub const MAX_EXTENSION_NAME_SIZE: usize = 256; -pub const MAX_DESCRIPTION_SIZE: usize = 256; -pub const MAX_MEMORY_TYPES: usize = 32; -pub const MAX_MEMORY_HEAPS: usize = 16; -pub const LOD_CLAMP_NONE: f32 = 1000.00; -pub const REMAINING_MIP_LEVELS: u32 = !0; -pub const REMAINING_ARRAY_LAYERS: u32 = !0; -pub const WHOLE_SIZE: u64 = !0; -pub const ATTACHMENT_UNUSED: u32 = !0; -pub const TRUE: Bool32 = 1; -pub const FALSE: Bool32 = 0; -pub const QUEUE_FAMILY_IGNORED: u32 = !0; -pub const QUEUE_FAMILY_EXTERNAL: u32 = !0 - 1; -pub const QUEUE_FAMILY_FOREIGN_EXT: u32 = !0 - 2; -pub const SUBPASS_EXTERNAL: u32 = !0; -pub const MAX_DEVICE_GROUP_SIZE: usize = 32; -pub const MAX_DRIVER_NAME_SIZE_KHR: usize = 256; -pub const MAX_DRIVER_INFO_SIZE_KHR: usize = 256; -pub const SHADER_UNUSED_NV: u32 = !0; +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct FramebufferCreateFlags(pub(crate) Flags); +vk_bitflags_wrapped!(FramebufferCreateFlags, 0b0, Flags); +impl FramebufferCreateFlags {} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct DeviceDiagnosticsConfigFlagsNV(pub(crate) Flags); +vk_bitflags_wrapped!(DeviceDiagnosticsConfigFlagsNV, 0b111, Flags); +impl DeviceDiagnosticsConfigFlagsNV { + pub const ENABLE_SHADER_DEBUG_INFO: Self = DeviceDiagnosticsConfigFlagsNV(0b1); + pub const ENABLE_RESOURCE_TRACKING: Self = DeviceDiagnosticsConfigFlagsNV(0b10); + pub const ENABLE_AUTOMATIC_CHECKPOINTS: Self = DeviceDiagnosticsConfigFlagsNV(0b100); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct PipelineCreationFeedbackFlagsEXT(pub(crate) Flags); +vk_bitflags_wrapped!(PipelineCreationFeedbackFlagsEXT, 0b111, Flags); +impl PipelineCreationFeedbackFlagsEXT { + pub const VALID: Self = PipelineCreationFeedbackFlagsEXT(0b1); + pub const APPLICATION_PIPELINE_CACHE_HIT: Self = PipelineCreationFeedbackFlagsEXT(0b10); + pub const BASE_PIPELINE_ACCELERATION: Self = PipelineCreationFeedbackFlagsEXT(0b100); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct PerformanceCounterDescriptionFlagsKHR(pub(crate) Flags); +vk_bitflags_wrapped!(PerformanceCounterDescriptionFlagsKHR, 0b11, Flags); +impl PerformanceCounterDescriptionFlagsKHR { + pub const PERFORMANCE_IMPACTING: Self = PerformanceCounterDescriptionFlagsKHR(0b1); + pub const CONCURRENTLY_IMPACTED: Self = PerformanceCounterDescriptionFlagsKHR(0b10); +} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct AcquireProfilingLockFlagsKHR(pub(crate) Flags); +vk_bitflags_wrapped!(AcquireProfilingLockFlagsKHR, 0b0, Flags); +impl AcquireProfilingLockFlagsKHR {} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct ShaderCorePropertiesFlagsAMD(pub(crate) Flags); +vk_bitflags_wrapped!(ShaderCorePropertiesFlagsAMD, 0b0, Flags); +impl ShaderCorePropertiesFlagsAMD {} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct ShaderModuleCreateFlags(pub(crate) Flags); +vk_bitflags_wrapped!(ShaderModuleCreateFlags, 0b0, Flags); +impl ShaderModuleCreateFlags {} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct PipelineCompilerControlFlagsAMD(pub(crate) Flags); +vk_bitflags_wrapped!(PipelineCompilerControlFlagsAMD, 0b0, Flags); +impl PipelineCompilerControlFlagsAMD {} +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[doc = ""] +pub struct ToolPurposeFlagsEXT(pub(crate) Flags); +vk_bitflags_wrapped!(ToolPurposeFlagsEXT, 0b1_1111, Flags); +impl ToolPurposeFlagsEXT { + pub const VALIDATION: Self = ToolPurposeFlagsEXT(0b1); + pub const PROFILING: Self = ToolPurposeFlagsEXT(0b10); + pub const TRACING: Self = ToolPurposeFlagsEXT(0b100); + pub const ADDITIONAL_FEATURES: Self = ToolPurposeFlagsEXT(0b1000); + pub const MODIFYING_FEATURES: Self = ToolPurposeFlagsEXT(0b1_0000); +} +pub const MAX_PHYSICAL_DEVICE_NAME_SIZE: usize = (256); +pub const UUID_SIZE: usize = (16); +pub const LUID_SIZE: usize = (8); +pub const MAX_EXTENSION_NAME_SIZE: usize = (256); +pub const MAX_DESCRIPTION_SIZE: usize = (256); +pub const MAX_MEMORY_TYPES: usize = (32); +pub const MAX_MEMORY_HEAPS: usize = (16); +pub const LOD_CLAMP_NONE: f32 = (1000.00); +pub const REMAINING_MIP_LEVELS: u32 = (!0); +pub const REMAINING_ARRAY_LAYERS: u32 = (!0); +pub const WHOLE_SIZE: u64 = (!0); +pub const ATTACHMENT_UNUSED: u32 = (!0); +pub const TRUE: Bool32 = (1); +pub const FALSE: Bool32 = (0); +pub const QUEUE_FAMILY_IGNORED: u32 = (!0); +pub const QUEUE_FAMILY_EXTERNAL: u32 = (!0 - 1); +pub const QUEUE_FAMILY_FOREIGN_EXT: u32 = (!0 - 2); +pub const SUBPASS_EXTERNAL: u32 = (!0); +pub const MAX_DEVICE_GROUP_SIZE: usize = (32); +pub const MAX_DRIVER_NAME_SIZE: usize = (256); +pub const MAX_DRIVER_INFO_SIZE: usize = (256); +pub const SHADER_UNUSED_KHR: u32 = (!0); +pub const SHADER_UNUSED_NV: u32 = SHADER_UNUSED_KHR; impl KhrSurfaceFn { pub fn name() -> &'static ::std::ffi::CStr { ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_surface\0").expect("Wrong extension string") @@ -44491,7 +55996,7 @@ impl KhrSurfaceFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_surface_khr( &self, instance: Instance, @@ -44500,7 +56005,7 @@ impl KhrSurfaceFn { ) -> c_void { (self.destroy_surface_khr)(instance, surface, p_allocator) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_surface_support_khr( &self, physical_device: PhysicalDevice, @@ -44515,7 +56020,7 @@ impl KhrSurfaceFn { p_supported, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_surface_capabilities_khr( &self, physical_device: PhysicalDevice, @@ -44528,7 +56033,7 @@ impl KhrSurfaceFn { p_surface_capabilities, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_surface_formats_khr( &self, physical_device: PhysicalDevice, @@ -44543,7 +56048,7 @@ impl KhrSurfaceFn { p_surface_formats, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_surface_present_modes_khr( &self, physical_device: PhysicalDevice, @@ -44561,15 +56066,15 @@ impl KhrSurfaceFn { } #[doc = "Generated from \'VK_KHR_surface\'"] impl Result { - pub const ERROR_SURFACE_LOST_KHR: Self = Result(-1000000000); + pub const ERROR_SURFACE_LOST_KHR: Self = Result(-1_000_000_000); } #[doc = "Generated from \'VK_KHR_surface\'"] impl Result { - pub const ERROR_NATIVE_WINDOW_IN_USE_KHR: Self = Result(-1000000001); + pub const ERROR_NATIVE_WINDOW_IN_USE_KHR: Self = Result(-1_000_000_001); } #[doc = "Generated from \'VK_KHR_surface\'"] impl ObjectType { - pub const SURFACE_KHR: Self = ObjectType(1000000000); + pub const SURFACE_KHR: Self = ObjectType(1_000_000_000); } impl KhrSwapchainFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -44888,7 +56393,7 @@ impl KhrSwapchainFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn create_swapchain_khr( &self, device: Device, @@ -44898,7 +56403,7 @@ impl KhrSwapchainFn { ) -> Result { (self.create_swapchain_khr)(device, p_create_info, p_allocator, p_swapchain) } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_swapchain_khr( &self, device: Device, @@ -44907,7 +56412,7 @@ impl KhrSwapchainFn { ) -> c_void { (self.destroy_swapchain_khr)(device, swapchain, p_allocator) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_swapchain_images_khr( &self, device: Device, @@ -44922,7 +56427,7 @@ impl KhrSwapchainFn { p_swapchain_images, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn acquire_next_image_khr( &self, device: Device, @@ -44934,7 +56439,7 @@ impl KhrSwapchainFn { ) -> Result { (self.acquire_next_image_khr)(device, swapchain, timeout, semaphore, fence, p_image_index) } - #[doc = ""] + #[doc = ""] pub unsafe fn queue_present_khr( &self, queue: Queue, @@ -44942,7 +56447,7 @@ impl KhrSwapchainFn { ) -> Result { (self.queue_present_khr)(queue, p_present_info) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_device_group_present_capabilities_khr( &self, device: Device, @@ -44953,7 +56458,7 @@ impl KhrSwapchainFn { p_device_group_present_capabilities, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_device_group_surface_present_modes_khr( &self, device: Device, @@ -44962,7 +56467,7 @@ impl KhrSwapchainFn { ) -> Result { (self.get_device_group_surface_present_modes_khr)(device, surface, p_modes) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_present_rectangles_khr( &self, physical_device: PhysicalDevice, @@ -44977,7 +56482,7 @@ impl KhrSwapchainFn { p_rects, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn acquire_next_image2_khr( &self, device: Device, @@ -44989,51 +56494,51 @@ impl KhrSwapchainFn { } #[doc = "Generated from \'VK_KHR_swapchain\'"] impl StructureType { - pub const SWAPCHAIN_CREATE_INFO_KHR: Self = StructureType(1000001000); + pub const SWAPCHAIN_CREATE_INFO_KHR: Self = StructureType(1_000_001_000); } #[doc = "Generated from \'VK_KHR_swapchain\'"] impl StructureType { - pub const PRESENT_INFO_KHR: Self = StructureType(1000001001); + pub const PRESENT_INFO_KHR: Self = StructureType(1_000_001_001); } #[doc = "Generated from \'VK_KHR_swapchain\'"] impl ImageLayout { - pub const PRESENT_SRC_KHR: Self = ImageLayout(1000001002); + pub const PRESENT_SRC_KHR: Self = ImageLayout(1_000_001_002); } #[doc = "Generated from \'VK_KHR_swapchain\'"] impl Result { - pub const SUBOPTIMAL_KHR: Self = Result(1000001003); + pub const SUBOPTIMAL_KHR: Self = Result(1_000_001_003); } #[doc = "Generated from \'VK_KHR_swapchain\'"] impl Result { - pub const ERROR_OUT_OF_DATE_KHR: Self = Result(-1000001004); + pub const ERROR_OUT_OF_DATE_KHR: Self = Result(-1_000_001_004); } #[doc = "Generated from \'VK_KHR_swapchain\'"] impl ObjectType { - pub const SWAPCHAIN_KHR: Self = ObjectType(1000001000); + pub const SWAPCHAIN_KHR: Self = ObjectType(1_000_001_000); } #[doc = "Generated from \'VK_KHR_swapchain\'"] impl StructureType { - pub const DEVICE_GROUP_PRESENT_CAPABILITIES_KHR: Self = StructureType(1000060007); + pub const DEVICE_GROUP_PRESENT_CAPABILITIES_KHR: Self = StructureType(1_000_060_007); } #[doc = "Generated from \'VK_KHR_swapchain\'"] impl StructureType { - pub const IMAGE_SWAPCHAIN_CREATE_INFO_KHR: Self = StructureType(1000060008); + pub const IMAGE_SWAPCHAIN_CREATE_INFO_KHR: Self = StructureType(1_000_060_008); } #[doc = "Generated from \'VK_KHR_swapchain\'"] impl StructureType { - pub const BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR: Self = StructureType(1000060009); + pub const BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR: Self = StructureType(1_000_060_009); } #[doc = "Generated from \'VK_KHR_swapchain\'"] impl StructureType { - pub const ACQUIRE_NEXT_IMAGE_INFO_KHR: Self = StructureType(1000060010); + pub const ACQUIRE_NEXT_IMAGE_INFO_KHR: Self = StructureType(1_000_060_010); } #[doc = "Generated from \'VK_KHR_swapchain\'"] impl StructureType { - pub const DEVICE_GROUP_PRESENT_INFO_KHR: Self = StructureType(1000060011); + pub const DEVICE_GROUP_PRESENT_INFO_KHR: Self = StructureType(1_000_060_011); } #[doc = "Generated from \'VK_KHR_swapchain\'"] impl StructureType { - pub const DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHR: Self = StructureType(1000060012); + pub const DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHR: Self = StructureType(1_000_060_012); } #[doc = "Generated from \'VK_KHR_swapchain\'"] impl SwapchainCreateFlagsKHR { @@ -45310,7 +56815,7 @@ impl KhrDisplayFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_display_properties_khr( &self, physical_device: PhysicalDevice, @@ -45323,7 +56828,7 @@ impl KhrDisplayFn { p_properties, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_display_plane_properties_khr( &self, physical_device: PhysicalDevice, @@ -45336,7 +56841,7 @@ impl KhrDisplayFn { p_properties, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_display_plane_supported_displays_khr( &self, physical_device: PhysicalDevice, @@ -45351,7 +56856,7 @@ impl KhrDisplayFn { p_displays, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_display_mode_properties_khr( &self, physical_device: PhysicalDevice, @@ -45366,7 +56871,7 @@ impl KhrDisplayFn { p_properties, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn create_display_mode_khr( &self, physical_device: PhysicalDevice, @@ -45377,7 +56882,7 @@ impl KhrDisplayFn { ) -> Result { (self.create_display_mode_khr)(physical_device, display, p_create_info, p_allocator, p_mode) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_display_plane_capabilities_khr( &self, physical_device: PhysicalDevice, @@ -45392,7 +56897,7 @@ impl KhrDisplayFn { p_capabilities, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn create_display_plane_surface_khr( &self, instance: Instance, @@ -45405,19 +56910,19 @@ impl KhrDisplayFn { } #[doc = "Generated from \'VK_KHR_display\'"] impl StructureType { - pub const DISPLAY_MODE_CREATE_INFO_KHR: Self = StructureType(1000002000); + pub const DISPLAY_MODE_CREATE_INFO_KHR: Self = StructureType(1_000_002_000); } #[doc = "Generated from \'VK_KHR_display\'"] impl StructureType { - pub const DISPLAY_SURFACE_CREATE_INFO_KHR: Self = StructureType(1000002001); + pub const DISPLAY_SURFACE_CREATE_INFO_KHR: Self = StructureType(1_000_002_001); } #[doc = "Generated from \'VK_KHR_display\'"] impl ObjectType { - pub const DISPLAY_KHR: Self = ObjectType(1000002000); + pub const DISPLAY_KHR: Self = ObjectType(1_000_002_000); } #[doc = "Generated from \'VK_KHR_display\'"] impl ObjectType { - pub const DISPLAY_MODE_KHR: Self = ObjectType(1000002001); + pub const DISPLAY_MODE_KHR: Self = ObjectType(1_000_002_001); } impl KhrDisplaySwapchainFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -45481,7 +56986,7 @@ impl KhrDisplaySwapchainFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn create_shared_swapchains_khr( &self, device: Device, @@ -45501,11 +57006,11 @@ impl KhrDisplaySwapchainFn { } #[doc = "Generated from \'VK_KHR_display_swapchain\'"] impl StructureType { - pub const DISPLAY_PRESENT_INFO_KHR: Self = StructureType(1000003000); + pub const DISPLAY_PRESENT_INFO_KHR: Self = StructureType(1_000_003_000); } #[doc = "Generated from \'VK_KHR_display_swapchain\'"] impl Result { - pub const ERROR_INCOMPATIBLE_DISPLAY_KHR: Self = Result(-1000003001); + pub const ERROR_INCOMPATIBLE_DISPLAY_KHR: Self = Result(-1_000_003_001); } impl KhrXlibSurfaceFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -45602,7 +57107,7 @@ impl KhrXlibSurfaceFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn create_xlib_surface_khr( &self, instance: Instance, @@ -45612,7 +57117,7 @@ impl KhrXlibSurfaceFn { ) -> Result { (self.create_xlib_surface_khr)(instance, p_create_info, p_allocator, p_surface) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_xlib_presentation_support_khr( &self, physical_device: PhysicalDevice, @@ -45630,7 +57135,7 @@ impl KhrXlibSurfaceFn { } #[doc = "Generated from \'VK_KHR_xlib_surface\'"] impl StructureType { - pub const XLIB_SURFACE_CREATE_INFO_KHR: Self = StructureType(1000004000); + pub const XLIB_SURFACE_CREATE_INFO_KHR: Self = StructureType(1_000_004_000); } impl KhrXcbSurfaceFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -45727,7 +57232,7 @@ impl KhrXcbSurfaceFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn create_xcb_surface_khr( &self, instance: Instance, @@ -45737,7 +57242,7 @@ impl KhrXcbSurfaceFn { ) -> Result { (self.create_xcb_surface_khr)(instance, p_create_info, p_allocator, p_surface) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_xcb_presentation_support_khr( &self, physical_device: PhysicalDevice, @@ -45755,7 +57260,7 @@ impl KhrXcbSurfaceFn { } #[doc = "Generated from \'VK_KHR_xcb_surface\'"] impl StructureType { - pub const XCB_SURFACE_CREATE_INFO_KHR: Self = StructureType(1000005000); + pub const XCB_SURFACE_CREATE_INFO_KHR: Self = StructureType(1_000_005_000); } impl KhrWaylandSurfaceFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -45849,7 +57354,7 @@ impl KhrWaylandSurfaceFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn create_wayland_surface_khr( &self, instance: Instance, @@ -45859,7 +57364,7 @@ impl KhrWaylandSurfaceFn { ) -> Result { (self.create_wayland_surface_khr)(instance, p_create_info, p_allocator, p_surface) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_wayland_presentation_support_khr( &self, physical_device: PhysicalDevice, @@ -45875,7 +57380,7 @@ impl KhrWaylandSurfaceFn { } #[doc = "Generated from \'VK_KHR_wayland_surface\'"] impl StructureType { - pub const WAYLAND_SURFACE_CREATE_INFO_KHR: Self = StructureType(1000006000); + pub const WAYLAND_SURFACE_CREATE_INFO_KHR: Self = StructureType(1_000_006_000); } impl KhrMirSurfaceFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -45958,7 +57463,7 @@ impl KhrAndroidSurfaceFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn create_android_surface_khr( &self, instance: Instance, @@ -45971,7 +57476,7 @@ impl KhrAndroidSurfaceFn { } #[doc = "Generated from \'VK_KHR_android_surface\'"] impl StructureType { - pub const ANDROID_SURFACE_CREATE_INFO_KHR: Self = StructureType(1000008000); + pub const ANDROID_SURFACE_CREATE_INFO_KHR: Self = StructureType(1_000_008_000); } impl KhrWin32SurfaceFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -46058,7 +57563,7 @@ impl KhrWin32SurfaceFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn create_win32_surface_khr( &self, instance: Instance, @@ -46068,7 +57573,7 @@ impl KhrWin32SurfaceFn { ) -> Result { (self.create_win32_surface_khr)(instance, p_create_info, p_allocator, p_surface) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_win32_presentation_support_khr( &self, physical_device: PhysicalDevice, @@ -46082,7 +57587,7 @@ impl KhrWin32SurfaceFn { } #[doc = "Generated from \'VK_KHR_win32_surface\'"] impl StructureType { - pub const WIN32_SURFACE_CREATE_INFO_KHR: Self = StructureType(1000009000); + pub const WIN32_SURFACE_CREATE_INFO_KHR: Self = StructureType(1_000_009_000); } impl AndroidNativeBufferFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -46113,6 +57618,15 @@ pub type PFN_vkQueueSignalReleaseImageANDROID = extern "system" fn( image: Image, p_native_fence_fd: *mut c_int, ) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetSwapchainGrallocUsage2ANDROID = extern "system" fn( + device: Device, + format: Format, + image_usage: ImageUsageFlags, + swapchain_image_usage: SwapchainImageUsageFlagsANDROID, + gralloc_consumer_usage: *mut u64, + gralloc_producer_usage: *mut u64, +) -> Result; pub struct AndroidNativeBufferFn { pub get_swapchain_gralloc_usage_android: extern "system" fn( device: Device, @@ -46134,6 +57648,14 @@ pub struct AndroidNativeBufferFn { image: Image, p_native_fence_fd: *mut c_int, ) -> Result, + pub get_swapchain_gralloc_usage2_android: extern "system" fn( + device: Device, + format: Format, + image_usage: ImageUsageFlags, + swapchain_image_usage: SwapchainImageUsageFlagsANDROID, + gralloc_consumer_usage: *mut u64, + gralloc_producer_usage: *mut u64, + ) -> Result, } unsafe impl Send for AndroidNativeBufferFn {} unsafe impl Sync for AndroidNativeBufferFn {} @@ -46143,6 +57665,7 @@ impl ::std::clone::Clone for AndroidNativeBufferFn { get_swapchain_gralloc_usage_android: self.get_swapchain_gralloc_usage_android, acquire_image_android: self.acquire_image_android, queue_signal_release_image_android: self.queue_signal_release_image_android, + get_swapchain_gralloc_usage2_android: self.get_swapchain_gralloc_usage2_android, } } } @@ -46217,9 +57740,32 @@ impl AndroidNativeBufferFn { ::std::mem::transmute(val) } }, + get_swapchain_gralloc_usage2_android: unsafe { + extern "system" fn get_swapchain_gralloc_usage2_android( + _device: Device, + _format: Format, + _image_usage: ImageUsageFlags, + _swapchain_image_usage: SwapchainImageUsageFlagsANDROID, + _gralloc_consumer_usage: *mut u64, + _gralloc_producer_usage: *mut u64, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_swapchain_gralloc_usage2_android) + )) + } + let raw_name = stringify!(vkGetSwapchainGrallocUsage2ANDROID); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_swapchain_gralloc_usage2_android + } else { + ::std::mem::transmute(val) + } + }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn get_swapchain_gralloc_usage_android( &self, device: Device, @@ -46229,7 +57775,7 @@ impl AndroidNativeBufferFn { ) -> Result { (self.get_swapchain_gralloc_usage_android)(device, format, image_usage, gralloc_usage) } - #[doc = ""] + #[doc = ""] pub unsafe fn acquire_image_android( &self, device: Device, @@ -46240,7 +57786,7 @@ impl AndroidNativeBufferFn { ) -> Result { (self.acquire_image_android)(device, image, native_fence_fd, semaphore, fence) } - #[doc = ""] + #[doc = ""] pub unsafe fn queue_signal_release_image_android( &self, queue: Queue, @@ -46257,10 +57803,37 @@ impl AndroidNativeBufferFn { p_native_fence_fd, ) } + #[doc = ""] + pub unsafe fn get_swapchain_gralloc_usage2_android( + &self, + device: Device, + format: Format, + image_usage: ImageUsageFlags, + swapchain_image_usage: SwapchainImageUsageFlagsANDROID, + gralloc_consumer_usage: *mut u64, + gralloc_producer_usage: *mut u64, + ) -> Result { + (self.get_swapchain_gralloc_usage2_android)( + device, + format, + image_usage, + swapchain_image_usage, + gralloc_consumer_usage, + gralloc_producer_usage, + ) + } } #[doc = "Generated from \'VK_ANDROID_native_buffer\'"] impl StructureType { - pub const NATIVE_BUFFER_ANDROID: Self = StructureType(1000010000); + pub const NATIVE_BUFFER_ANDROID: Self = StructureType(1_000_010_000); +} +#[doc = "Generated from \'VK_ANDROID_native_buffer\'"] +impl StructureType { + pub const SWAPCHAIN_IMAGE_CREATE_INFO_ANDROID: Self = StructureType(1_000_010_001); +} +#[doc = "Generated from \'VK_ANDROID_native_buffer\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_PRESENTATION_PROPERTIES_ANDROID: Self = StructureType(1_000_010_002); } impl ExtDebugReportFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -46400,7 +57973,7 @@ impl ExtDebugReportFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn create_debug_report_callback_ext( &self, instance: Instance, @@ -46410,7 +57983,7 @@ impl ExtDebugReportFn { ) -> Result { (self.create_debug_report_callback_ext)(instance, p_create_info, p_allocator, p_callback) } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_debug_report_callback_ext( &self, instance: Instance, @@ -46419,7 +57992,7 @@ impl ExtDebugReportFn { ) -> c_void { (self.destroy_debug_report_callback_ext)(instance, callback, p_allocator) } - #[doc = ""] + #[doc = ""] pub unsafe fn debug_report_message_ext( &self, instance: Instance, @@ -46445,23 +58018,28 @@ impl ExtDebugReportFn { } #[doc = "Generated from \'VK_EXT_debug_report\'"] impl StructureType { - pub const DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT: Self = StructureType(1000011000); + pub const DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT: Self = StructureType(1_000_011_000); +} +#[doc = "Generated from \'VK_EXT_debug_report\'"] +impl StructureType { + pub const DEBUG_REPORT_CREATE_INFO_EXT: Self = + StructureType::DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT; } #[doc = "Generated from \'VK_EXT_debug_report\'"] impl Result { - pub const ERROR_VALIDATION_FAILED_EXT: Self = Result(-1000011001); + pub const ERROR_VALIDATION_FAILED_EXT: Self = Result(-1_000_011_001); } #[doc = "Generated from \'VK_EXT_debug_report\'"] impl ObjectType { - pub const DEBUG_REPORT_CALLBACK_EXT: Self = ObjectType(1000011000); + pub const DEBUG_REPORT_CALLBACK_EXT: Self = ObjectType(1_000_011_000); } #[doc = "Generated from \'VK_EXT_debug_report\'"] impl DebugReportObjectTypeEXT { - pub const SAMPLER_YCBCR_CONVERSION: Self = DebugReportObjectTypeEXT(1000156000); + pub const SAMPLER_YCBCR_CONVERSION: Self = DebugReportObjectTypeEXT(1_000_156_000); } #[doc = "Generated from \'VK_EXT_debug_report\'"] impl DebugReportObjectTypeEXT { - pub const DESCRIPTOR_UPDATE_TEMPLATE: Self = DebugReportObjectTypeEXT(1000085000); + pub const DESCRIPTOR_UPDATE_TEMPLATE: Self = DebugReportObjectTypeEXT(1_000_085_000); } impl NvGlslShaderFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -46487,7 +58065,7 @@ impl NvGlslShaderFn { } #[doc = "Generated from \'VK_NV_glsl_shader\'"] impl Result { - pub const ERROR_INVALID_SHADER_NV: Self = Result(-1000012000); + pub const ERROR_INVALID_SHADER_NV: Self = Result(-1_000_012_000); } impl ExtDepthRangeUnrestrictedFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -46533,6 +58111,14 @@ impl KhrSamplerMirrorClampToEdgeFn { KhrSamplerMirrorClampToEdgeFn {} } } +#[doc = "Generated from \'VK_KHR_sampler_mirror_clamp_to_edge\'"] +impl SamplerAddressMode { + pub const MIRROR_CLAMP_TO_EDGE: Self = SamplerAddressMode(4); +} +#[doc = "Generated from \'VK_KHR_sampler_mirror_clamp_to_edge\'"] +impl SamplerAddressMode { + pub const MIRROR_CLAMP_TO_EDGE_KHR: Self = SamplerAddressMode::MIRROR_CLAMP_TO_EDGE; +} impl ImgFilterCubicFn { pub fn name() -> &'static ::std::ffi::CStr { ::std::ffi::CStr::from_bytes_with_nul(b"VK_IMG_filter_cubic\0") @@ -46557,11 +58143,11 @@ impl ImgFilterCubicFn { } #[doc = "Generated from \'VK_IMG_filter_cubic\'"] impl Filter { - pub const CUBIC_IMG: Self = Filter(1000015000); + pub const CUBIC_IMG: Self = Filter(1_000_015_000); } #[doc = "Generated from \'VK_IMG_filter_cubic\'"] impl FormatFeatureFlags { - pub const SAMPLED_IMAGE_FILTER_CUBIC_IMG: Self = FormatFeatureFlags(0b10000000000000); + pub const SAMPLED_IMAGE_FILTER_CUBIC_IMG: Self = FormatFeatureFlags(0b10_0000_0000_0000); } impl AmdExtension17Fn { pub fn name() -> &'static ::std::ffi::CStr { @@ -46632,7 +58218,7 @@ impl AmdRasterizationOrderFn { #[doc = "Generated from \'VK_AMD_rasterization_order\'"] impl StructureType { pub const PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD: Self = - StructureType(1000018000); + StructureType(1_000_018_000); } impl AmdExtension20Fn { pub fn name() -> &'static ::std::ffi::CStr { @@ -46858,7 +58444,7 @@ impl ExtDebugMarkerFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn debug_marker_set_object_tag_ext( &self, device: Device, @@ -46866,7 +58452,7 @@ impl ExtDebugMarkerFn { ) -> Result { (self.debug_marker_set_object_tag_ext)(device, p_tag_info) } - #[doc = ""] + #[doc = ""] pub unsafe fn debug_marker_set_object_name_ext( &self, device: Device, @@ -46874,7 +58460,7 @@ impl ExtDebugMarkerFn { ) -> Result { (self.debug_marker_set_object_name_ext)(device, p_name_info) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_debug_marker_begin_ext( &self, command_buffer: CommandBuffer, @@ -46882,11 +58468,11 @@ impl ExtDebugMarkerFn { ) -> c_void { (self.cmd_debug_marker_begin_ext)(command_buffer, p_marker_info) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_debug_marker_end_ext(&self, command_buffer: CommandBuffer) -> c_void { (self.cmd_debug_marker_end_ext)(command_buffer) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_debug_marker_insert_ext( &self, command_buffer: CommandBuffer, @@ -46897,15 +58483,15 @@ impl ExtDebugMarkerFn { } #[doc = "Generated from \'VK_EXT_debug_marker\'"] impl StructureType { - pub const DEBUG_MARKER_OBJECT_NAME_INFO_EXT: Self = StructureType(1000022000); + pub const DEBUG_MARKER_OBJECT_NAME_INFO_EXT: Self = StructureType(1_000_022_000); } #[doc = "Generated from \'VK_EXT_debug_marker\'"] impl StructureType { - pub const DEBUG_MARKER_OBJECT_TAG_INFO_EXT: Self = StructureType(1000022001); + pub const DEBUG_MARKER_OBJECT_TAG_INFO_EXT: Self = StructureType(1_000_022_001); } #[doc = "Generated from \'VK_EXT_debug_marker\'"] impl StructureType { - pub const DEBUG_MARKER_MARKER_INFO_EXT: Self = StructureType(1000022002); + pub const DEBUG_MARKER_MARKER_INFO_EXT: Self = StructureType(1_000_022_002); } impl AmdExtension24Fn { pub fn name() -> &'static ::std::ffi::CStr { @@ -46931,51 +58517,51 @@ impl AmdExtension24Fn { } #[doc = "Generated from \'VK_AMD_extension_24\'"] impl QueueFlags { - pub const RESERVED_6_KHR: Self = QueueFlags(0b1000000); + pub const RESERVED_6_KHR: Self = QueueFlags(0b100_0000); } #[doc = "Generated from \'VK_AMD_extension_24\'"] impl PipelineStageFlags { - pub const RESERVED_27_KHR: Self = PipelineStageFlags(0b1000000000000000000000000000); + pub const RESERVED_27_KHR: Self = PipelineStageFlags(0b1000_0000_0000_0000_0000_0000_0000); } #[doc = "Generated from \'VK_AMD_extension_24\'"] impl AccessFlags { - pub const RESERVED_30_KHR: Self = AccessFlags(0b1000000000000000000000000000000); + pub const RESERVED_30_KHR: Self = AccessFlags(0b100_0000_0000_0000_0000_0000_0000_0000); } #[doc = "Generated from \'VK_AMD_extension_24\'"] impl AccessFlags { - pub const RESERVED_31_KHR: Self = AccessFlags(0b10000000000000000000000000000000); + pub const RESERVED_31_KHR: Self = AccessFlags(0b1000_0000_0000_0000_0000_0000_0000_0000); } #[doc = "Generated from \'VK_AMD_extension_24\'"] impl BufferUsageFlags { - pub const RESERVED_15_KHR: Self = BufferUsageFlags(0b1000000000000000); + pub const RESERVED_15_KHR: Self = BufferUsageFlags(0b1000_0000_0000_0000); } #[doc = "Generated from \'VK_AMD_extension_24\'"] impl BufferUsageFlags { - pub const RESERVED_16_KHR: Self = BufferUsageFlags(0b10000000000000000); + pub const RESERVED_16_KHR: Self = BufferUsageFlags(0b1_0000_0000_0000_0000); } #[doc = "Generated from \'VK_AMD_extension_24\'"] impl ImageUsageFlags { - pub const RESERVED_13_KHR: Self = ImageUsageFlags(0b10000000000000); + pub const RESERVED_13_KHR: Self = ImageUsageFlags(0b10_0000_0000_0000); } #[doc = "Generated from \'VK_AMD_extension_24\'"] impl ImageUsageFlags { - pub const RESERVED_14_KHR: Self = ImageUsageFlags(0b100000000000000); + pub const RESERVED_14_KHR: Self = ImageUsageFlags(0b100_0000_0000_0000); } #[doc = "Generated from \'VK_AMD_extension_24\'"] impl ImageUsageFlags { - pub const RESERVED_15_KHR: Self = ImageUsageFlags(0b1000000000000000); + pub const RESERVED_15_KHR: Self = ImageUsageFlags(0b1000_0000_0000_0000); } #[doc = "Generated from \'VK_AMD_extension_24\'"] impl FormatFeatureFlags { - pub const RESERVED_27_KHR: Self = FormatFeatureFlags(0b1000000000000000000000000000); + pub const RESERVED_27_KHR: Self = FormatFeatureFlags(0b1000_0000_0000_0000_0000_0000_0000); } #[doc = "Generated from \'VK_AMD_extension_24\'"] impl FormatFeatureFlags { - pub const RESERVED_28_KHR: Self = FormatFeatureFlags(0b10000000000000000000000000000); + pub const RESERVED_28_KHR: Self = FormatFeatureFlags(0b1_0000_0000_0000_0000_0000_0000_0000); } #[doc = "Generated from \'VK_AMD_extension_24\'"] impl QueryType { - pub const RESERVED_8: Self = QueryType(1000023008); + pub const RESERVED_8: Self = QueryType(1_000_023_008); } impl AmdExtension25Fn { pub fn name() -> &'static ::std::ffi::CStr { @@ -47001,51 +58587,51 @@ impl AmdExtension25Fn { } #[doc = "Generated from \'VK_AMD_extension_25\'"] impl QueueFlags { - pub const RESERVED_5_KHR: Self = QueueFlags(0b100000); + pub const RESERVED_5_KHR: Self = QueueFlags(0b10_0000); } #[doc = "Generated from \'VK_AMD_extension_25\'"] impl PipelineStageFlags { - pub const RESERVED_26_KHR: Self = PipelineStageFlags(0b100000000000000000000000000); + pub const RESERVED_26_KHR: Self = PipelineStageFlags(0b100_0000_0000_0000_0000_0000_0000); } #[doc = "Generated from \'VK_AMD_extension_25\'"] impl AccessFlags { - pub const RESERVED_28_KHR: Self = AccessFlags(0b10000000000000000000000000000); + pub const RESERVED_28_KHR: Self = AccessFlags(0b1_0000_0000_0000_0000_0000_0000_0000); } #[doc = "Generated from \'VK_AMD_extension_25\'"] impl AccessFlags { - pub const RESERVED_29_KHR: Self = AccessFlags(0b100000000000000000000000000000); + pub const RESERVED_29_KHR: Self = AccessFlags(0b10_0000_0000_0000_0000_0000_0000_0000); } #[doc = "Generated from \'VK_AMD_extension_25\'"] impl BufferUsageFlags { - pub const RESERVED_13_KHR: Self = BufferUsageFlags(0b10000000000000); + pub const RESERVED_13_KHR: Self = BufferUsageFlags(0b10_0000_0000_0000); } #[doc = "Generated from \'VK_AMD_extension_25\'"] impl BufferUsageFlags { - pub const RESERVED_14_KHR: Self = BufferUsageFlags(0b100000000000000); + pub const RESERVED_14_KHR: Self = BufferUsageFlags(0b100_0000_0000_0000); } #[doc = "Generated from \'VK_AMD_extension_25\'"] impl ImageUsageFlags { - pub const RESERVED_10_KHR: Self = ImageUsageFlags(0b10000000000); + pub const RESERVED_10_KHR: Self = ImageUsageFlags(0b100_0000_0000); } #[doc = "Generated from \'VK_AMD_extension_25\'"] impl ImageUsageFlags { - pub const RESERVED_11_KHR: Self = ImageUsageFlags(0b100000000000); + pub const RESERVED_11_KHR: Self = ImageUsageFlags(0b1000_0000_0000); } #[doc = "Generated from \'VK_AMD_extension_25\'"] impl ImageUsageFlags { - pub const RESERVED_12_KHR: Self = ImageUsageFlags(0b1000000000000); + pub const RESERVED_12_KHR: Self = ImageUsageFlags(0b1_0000_0000_0000); } #[doc = "Generated from \'VK_AMD_extension_25\'"] impl FormatFeatureFlags { - pub const RESERVED_25_KHR: Self = FormatFeatureFlags(0b10000000000000000000000000); + pub const RESERVED_25_KHR: Self = FormatFeatureFlags(0b10_0000_0000_0000_0000_0000_0000); } #[doc = "Generated from \'VK_AMD_extension_25\'"] impl FormatFeatureFlags { - pub const RESERVED_26_KHR: Self = FormatFeatureFlags(0b100000000000000000000000000); + pub const RESERVED_26_KHR: Self = FormatFeatureFlags(0b100_0000_0000_0000_0000_0000_0000); } #[doc = "Generated from \'VK_AMD_extension_25\'"] impl QueryType { - pub const RESERVED_4: Self = QueryType(1000024004); + pub const RESERVED_4: Self = QueryType(1_000_024_004); } impl AmdGcnShaderFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -47093,15 +58679,15 @@ impl NvDedicatedAllocationFn { } #[doc = "Generated from \'VK_NV_dedicated_allocation\'"] impl StructureType { - pub const DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV: Self = StructureType(1000026000); + pub const DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV: Self = StructureType(1_000_026_000); } #[doc = "Generated from \'VK_NV_dedicated_allocation\'"] impl StructureType { - pub const DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV: Self = StructureType(1000026001); + pub const DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV: Self = StructureType(1_000_026_001); } #[doc = "Generated from \'VK_NV_dedicated_allocation\'"] impl StructureType { - pub const DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV: Self = StructureType(1000026002); + pub const DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV: Self = StructureType(1_000_026_002); } impl ExtExtension28Fn { pub fn name() -> &'static ::std::ffi::CStr { @@ -47383,7 +58969,7 @@ impl ExtTransformFeedbackFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_bind_transform_feedback_buffers_ext( &self, command_buffer: CommandBuffer, @@ -47402,7 +58988,7 @@ impl ExtTransformFeedbackFn { p_sizes, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_begin_transform_feedback_ext( &self, command_buffer: CommandBuffer, @@ -47419,7 +59005,7 @@ impl ExtTransformFeedbackFn { p_counter_buffer_offsets, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_end_transform_feedback_ext( &self, command_buffer: CommandBuffer, @@ -47436,7 +59022,7 @@ impl ExtTransformFeedbackFn { p_counter_buffer_offsets, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_begin_query_indexed_ext( &self, command_buffer: CommandBuffer, @@ -47447,7 +59033,7 @@ impl ExtTransformFeedbackFn { ) -> c_void { (self.cmd_begin_query_indexed_ext)(command_buffer, query_pool, query, flags, index) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_end_query_indexed_ext( &self, command_buffer: CommandBuffer, @@ -47457,7 +59043,7 @@ impl ExtTransformFeedbackFn { ) -> c_void { (self.cmd_end_query_indexed_ext)(command_buffer, query_pool, query, index) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_draw_indirect_byte_count_ext( &self, command_buffer: CommandBuffer, @@ -47481,45 +59067,47 @@ impl ExtTransformFeedbackFn { } #[doc = "Generated from \'VK_EXT_transform_feedback\'"] impl StructureType { - pub const PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT: Self = StructureType(1000028000); + pub const PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT: Self = StructureType(1_000_028_000); } #[doc = "Generated from \'VK_EXT_transform_feedback\'"] impl StructureType { - pub const PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT: Self = StructureType(1000028001); + pub const PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT: Self = + StructureType(1_000_028_001); } #[doc = "Generated from \'VK_EXT_transform_feedback\'"] impl StructureType { - pub const PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT: Self = StructureType(1000028002); + pub const PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT: Self = + StructureType(1_000_028_002); } #[doc = "Generated from \'VK_EXT_transform_feedback\'"] impl QueryType { - pub const TRANSFORM_FEEDBACK_STREAM_EXT: Self = QueryType(1000028004); + pub const TRANSFORM_FEEDBACK_STREAM_EXT: Self = QueryType(1_000_028_004); } #[doc = "Generated from \'VK_EXT_transform_feedback\'"] impl BufferUsageFlags { - pub const TRANSFORM_FEEDBACK_BUFFER_EXT: Self = BufferUsageFlags(0b100000000000); + pub const TRANSFORM_FEEDBACK_BUFFER_EXT: Self = BufferUsageFlags(0b1000_0000_0000); } #[doc = "Generated from \'VK_EXT_transform_feedback\'"] impl BufferUsageFlags { - pub const TRANSFORM_FEEDBACK_COUNTER_BUFFER_EXT: Self = BufferUsageFlags(0b1000000000000); + pub const TRANSFORM_FEEDBACK_COUNTER_BUFFER_EXT: Self = BufferUsageFlags(0b1_0000_0000_0000); } #[doc = "Generated from \'VK_EXT_transform_feedback\'"] impl AccessFlags { - pub const TRANSFORM_FEEDBACK_WRITE_EXT: Self = AccessFlags(0b10000000000000000000000000); + pub const TRANSFORM_FEEDBACK_WRITE_EXT: Self = AccessFlags(0b10_0000_0000_0000_0000_0000_0000); } #[doc = "Generated from \'VK_EXT_transform_feedback\'"] impl AccessFlags { pub const TRANSFORM_FEEDBACK_COUNTER_READ_EXT: Self = - AccessFlags(0b100000000000000000000000000); + AccessFlags(0b100_0000_0000_0000_0000_0000_0000); } #[doc = "Generated from \'VK_EXT_transform_feedback\'"] impl AccessFlags { pub const TRANSFORM_FEEDBACK_COUNTER_WRITE_EXT: Self = - AccessFlags(0b1000000000000000000000000000); + AccessFlags(0b1000_0000_0000_0000_0000_0000_0000); } #[doc = "Generated from \'VK_EXT_transform_feedback\'"] impl PipelineStageFlags { - pub const TRANSFORM_FEEDBACK_EXT: Self = PipelineStageFlags(0b1000000000000000000000000); + pub const TRANSFORM_FEEDBACK_EXT: Self = PipelineStageFlags(0b1_0000_0000_0000_0000_0000_0000); } impl NvxExtension30Fn { pub fn name() -> &'static ::std::ffi::CStr { @@ -47543,27 +59131,67 @@ impl NvxExtension30Fn { NvxExtension30Fn {} } } -impl NvxExtension31Fn { +impl NvxImageViewHandleFn { pub fn name() -> &'static ::std::ffi::CStr { - ::std::ffi::CStr::from_bytes_with_nul(b"VK_NVX_extension_31\0") + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NVX_image_view_handle\0") .expect("Wrong extension string") } } -pub struct NvxExtension31Fn {} -unsafe impl Send for NvxExtension31Fn {} -unsafe impl Sync for NvxExtension31Fn {} -impl ::std::clone::Clone for NvxExtension31Fn { +#[allow(non_camel_case_types)] +pub type PFN_vkGetImageViewHandleNVX = + extern "system" fn(device: Device, p_info: *const ImageViewHandleInfoNVX) -> u32; +pub struct NvxImageViewHandleFn { + pub get_image_view_handle_nvx: + extern "system" fn(device: Device, p_info: *const ImageViewHandleInfoNVX) -> u32, +} +unsafe impl Send for NvxImageViewHandleFn {} +unsafe impl Sync for NvxImageViewHandleFn {} +impl ::std::clone::Clone for NvxImageViewHandleFn { fn clone(&self) -> Self { - NvxExtension31Fn {} + NvxImageViewHandleFn { + get_image_view_handle_nvx: self.get_image_view_handle_nvx, + } } } -impl NvxExtension31Fn { +impl NvxImageViewHandleFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - NvxExtension31Fn {} + NvxImageViewHandleFn { + get_image_view_handle_nvx: unsafe { + extern "system" fn get_image_view_handle_nvx( + _device: Device, + _p_info: *const ImageViewHandleInfoNVX, + ) -> u32 { + panic!(concat!( + "Unable to load ", + stringify!(get_image_view_handle_nvx) + )) + } + let raw_name = stringify!(vkGetImageViewHandleNVX); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_image_view_handle_nvx + } else { + ::std::mem::transmute(val) + } + }, + } } + #[doc = ""] + pub unsafe fn get_image_view_handle_nvx( + &self, + device: Device, + p_info: *const ImageViewHandleInfoNVX, + ) -> u32 { + (self.get_image_view_handle_nvx)(device, p_info) + } +} +#[doc = "Generated from \'VK_NVX_image_view_handle\'"] +impl StructureType { + pub const IMAGE_VIEW_HANDLE_INFO_NVX: Self = StructureType(1_000_030_000); } impl AmdExtension32Fn { pub fn name() -> &'static ::std::ffi::CStr { @@ -47616,7 +59244,7 @@ impl AmdDrawIndirectCountFn { } } #[allow(non_camel_case_types)] -pub type PFN_vkCmdDrawIndirectCountAMD = extern "system" fn( +pub type PFN_vkCmdDrawIndirectCount = extern "system" fn( command_buffer: CommandBuffer, buffer: Buffer, offset: DeviceSize, @@ -47626,7 +59254,7 @@ pub type PFN_vkCmdDrawIndirectCountAMD = extern "system" fn( stride: u32, ) -> c_void; #[allow(non_camel_case_types)] -pub type PFN_vkCmdDrawIndexedIndirectCountAMD = extern "system" fn( +pub type PFN_vkCmdDrawIndexedIndirectCount = extern "system" fn( command_buffer: CommandBuffer, buffer: Buffer, offset: DeviceSize, @@ -47721,7 +59349,7 @@ impl AmdDrawIndirectCountFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_draw_indirect_count_amd( &self, command_buffer: CommandBuffer, @@ -47742,7 +59370,7 @@ impl AmdDrawIndirectCountFn { stride, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_draw_indexed_indirect_count_amd( &self, command_buffer: CommandBuffer, @@ -47942,7 +59570,7 @@ impl AmdTextureGatherBiasLodFn { } #[doc = "Generated from \'VK_AMD_texture_gather_bias_lod\'"] impl StructureType { - pub const TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD: Self = StructureType(1000041000); + pub const TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD: Self = StructureType(1_000_041_000); } impl AmdShaderInfoFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -48006,7 +59634,7 @@ impl AmdShaderInfoFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn get_shader_info_amd( &self, device: Device, @@ -48158,27 +59786,79 @@ impl GoogleExtension49Fn { GoogleExtension49Fn {} } } -impl GoogleExtension50Fn { +impl GgpStreamDescriptorSurfaceFn { pub fn name() -> &'static ::std::ffi::CStr { - ::std::ffi::CStr::from_bytes_with_nul(b"VK_GOOGLE_extension_50\0") + ::std::ffi::CStr::from_bytes_with_nul(b"VK_GGP_stream_descriptor_surface\0") .expect("Wrong extension string") } } -pub struct GoogleExtension50Fn {} -unsafe impl Send for GoogleExtension50Fn {} -unsafe impl Sync for GoogleExtension50Fn {} -impl ::std::clone::Clone for GoogleExtension50Fn { +#[allow(non_camel_case_types)] +pub type PFN_vkCreateStreamDescriptorSurfaceGGP = extern "system" fn( + instance: Instance, + p_create_info: *const StreamDescriptorSurfaceCreateInfoGGP, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, +) -> Result; +pub struct GgpStreamDescriptorSurfaceFn { + pub create_stream_descriptor_surface_ggp: extern "system" fn( + instance: Instance, + p_create_info: *const StreamDescriptorSurfaceCreateInfoGGP, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, + ) -> Result, +} +unsafe impl Send for GgpStreamDescriptorSurfaceFn {} +unsafe impl Sync for GgpStreamDescriptorSurfaceFn {} +impl ::std::clone::Clone for GgpStreamDescriptorSurfaceFn { fn clone(&self) -> Self { - GoogleExtension50Fn {} + GgpStreamDescriptorSurfaceFn { + create_stream_descriptor_surface_ggp: self.create_stream_descriptor_surface_ggp, + } } } -impl GoogleExtension50Fn { +impl GgpStreamDescriptorSurfaceFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - GoogleExtension50Fn {} + GgpStreamDescriptorSurfaceFn { + create_stream_descriptor_surface_ggp: unsafe { + extern "system" fn create_stream_descriptor_surface_ggp( + _instance: Instance, + _p_create_info: *const StreamDescriptorSurfaceCreateInfoGGP, + _p_allocator: *const AllocationCallbacks, + _p_surface: *mut SurfaceKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_stream_descriptor_surface_ggp) + )) + } + let raw_name = stringify!(vkCreateStreamDescriptorSurfaceGGP); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_stream_descriptor_surface_ggp + } else { + ::std::mem::transmute(val) + } + }, + } } + #[doc = ""] + pub unsafe fn create_stream_descriptor_surface_ggp( + &self, + instance: Instance, + p_create_info: *const StreamDescriptorSurfaceCreateInfoGGP, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, + ) -> Result { + (self.create_stream_descriptor_surface_ggp)(instance, p_create_info, p_allocator, p_surface) + } +} +#[doc = "Generated from \'VK_GGP_stream_descriptor_surface\'"] +impl StructureType { + pub const STREAM_DESCRIPTOR_SURFACE_CREATE_INFO_GGP: Self = StructureType(1_000_049_000); } impl NvCornerSampledImageFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -48204,34 +59884,42 @@ impl NvCornerSampledImageFn { } #[doc = "Generated from \'VK_NV_corner_sampled_image\'"] impl ImageCreateFlags { - pub const CORNER_SAMPLED_NV: Self = ImageCreateFlags(0b10000000000000); + pub const CORNER_SAMPLED_NV: Self = ImageCreateFlags(0b10_0000_0000_0000); } #[doc = "Generated from \'VK_NV_corner_sampled_image\'"] impl StructureType { - pub const PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV: Self = StructureType(1000050000); + pub const PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV: Self = StructureType(1_000_050_000); } -impl NvxExtension52Fn { +impl NvExtension52Fn { pub fn name() -> &'static ::std::ffi::CStr { - ::std::ffi::CStr::from_bytes_with_nul(b"VK_NVX_extension_52\0") + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_extension_52\0") .expect("Wrong extension string") } } -pub struct NvxExtension52Fn {} -unsafe impl Send for NvxExtension52Fn {} -unsafe impl Sync for NvxExtension52Fn {} -impl ::std::clone::Clone for NvxExtension52Fn { +pub struct NvExtension52Fn {} +unsafe impl Send for NvExtension52Fn {} +unsafe impl Sync for NvExtension52Fn {} +impl ::std::clone::Clone for NvExtension52Fn { fn clone(&self) -> Self { - NvxExtension52Fn {} + NvExtension52Fn {} } } -impl NvxExtension52Fn { +impl NvExtension52Fn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - NvxExtension52Fn {} + NvExtension52Fn {} } } +#[doc = "Generated from \'VK_NV_extension_52\'"] +impl ShaderModuleCreateFlags { + pub const RESERVED_0_NV: Self = ShaderModuleCreateFlags(0b1); +} +#[doc = "Generated from \'VK_NV_extension_52\'"] +impl PipelineShaderStageCreateFlags { + pub const RESERVED_2_NV: Self = PipelineShaderStageCreateFlags(0b100); +} impl NvExtension53Fn { pub fn name() -> &'static ::std::ffi::CStr { ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_extension_53\0") @@ -48276,6 +59964,25 @@ impl KhrMultiviewFn { KhrMultiviewFn {} } } +#[doc = "Generated from \'VK_KHR_multiview\'"] +impl StructureType { + pub const RENDER_PASS_MULTIVIEW_CREATE_INFO_KHR: Self = + StructureType::RENDER_PASS_MULTIVIEW_CREATE_INFO; +} +#[doc = "Generated from \'VK_KHR_multiview\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR: Self = + StructureType::PHYSICAL_DEVICE_MULTIVIEW_FEATURES; +} +#[doc = "Generated from \'VK_KHR_multiview\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR: Self = + StructureType::PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES; +} +#[doc = "Generated from \'VK_KHR_multiview\'"] +impl DependencyFlags { + pub const VIEW_LOCAL_KHR: Self = DependencyFlags::VIEW_LOCAL; +} impl ImgFormatPvrtcFn { pub fn name() -> &'static ::std::ffi::CStr { ::std::ffi::CStr::from_bytes_with_nul(b"VK_IMG_format_pvrtc\0") @@ -48300,35 +60007,35 @@ impl ImgFormatPvrtcFn { } #[doc = "Generated from \'VK_IMG_format_pvrtc\'"] impl Format { - pub const PVRTC1_2BPP_UNORM_BLOCK_IMG: Self = Format(1000054000); + pub const PVRTC1_2BPP_UNORM_BLOCK_IMG: Self = Format(1_000_054_000); } #[doc = "Generated from \'VK_IMG_format_pvrtc\'"] impl Format { - pub const PVRTC1_4BPP_UNORM_BLOCK_IMG: Self = Format(1000054001); + pub const PVRTC1_4BPP_UNORM_BLOCK_IMG: Self = Format(1_000_054_001); } #[doc = "Generated from \'VK_IMG_format_pvrtc\'"] impl Format { - pub const PVRTC2_2BPP_UNORM_BLOCK_IMG: Self = Format(1000054002); + pub const PVRTC2_2BPP_UNORM_BLOCK_IMG: Self = Format(1_000_054_002); } #[doc = "Generated from \'VK_IMG_format_pvrtc\'"] impl Format { - pub const PVRTC2_4BPP_UNORM_BLOCK_IMG: Self = Format(1000054003); + pub const PVRTC2_4BPP_UNORM_BLOCK_IMG: Self = Format(1_000_054_003); } #[doc = "Generated from \'VK_IMG_format_pvrtc\'"] impl Format { - pub const PVRTC1_2BPP_SRGB_BLOCK_IMG: Self = Format(1000054004); + pub const PVRTC1_2BPP_SRGB_BLOCK_IMG: Self = Format(1_000_054_004); } #[doc = "Generated from \'VK_IMG_format_pvrtc\'"] impl Format { - pub const PVRTC1_4BPP_SRGB_BLOCK_IMG: Self = Format(1000054005); + pub const PVRTC1_4BPP_SRGB_BLOCK_IMG: Self = Format(1_000_054_005); } #[doc = "Generated from \'VK_IMG_format_pvrtc\'"] impl Format { - pub const PVRTC2_2BPP_SRGB_BLOCK_IMG: Self = Format(1000054006); + pub const PVRTC2_2BPP_SRGB_BLOCK_IMG: Self = Format(1_000_054_006); } #[doc = "Generated from \'VK_IMG_format_pvrtc\'"] impl Format { - pub const PVRTC2_4BPP_SRGB_BLOCK_IMG: Self = Format(1000054007); + pub const PVRTC2_4BPP_SRGB_BLOCK_IMG: Self = Format(1_000_054_007); } impl NvExternalMemoryCapabilitiesFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -48402,7 +60109,7 @@ impl NvExternalMemoryCapabilitiesFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_external_image_format_properties_nv( &self, physical_device: PhysicalDevice, @@ -48450,11 +60157,11 @@ impl NvExternalMemoryFn { } #[doc = "Generated from \'VK_NV_external_memory\'"] impl StructureType { - pub const EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV: Self = StructureType(1000056000); + pub const EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV: Self = StructureType(1_000_056_000); } #[doc = "Generated from \'VK_NV_external_memory\'"] impl StructureType { - pub const EXPORT_MEMORY_ALLOCATE_INFO_NV: Self = StructureType(1000056001); + pub const EXPORT_MEMORY_ALLOCATE_INFO_NV: Self = StructureType(1_000_056_001); } impl NvExternalMemoryWin32Fn { pub fn name() -> &'static ::std::ffi::CStr { @@ -48515,7 +60222,7 @@ impl NvExternalMemoryWin32Fn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn get_memory_win32_handle_nv( &self, device: Device, @@ -48528,11 +60235,11 @@ impl NvExternalMemoryWin32Fn { } #[doc = "Generated from \'VK_NV_external_memory_win32\'"] impl StructureType { - pub const IMPORT_MEMORY_WIN32_HANDLE_INFO_NV: Self = StructureType(1000057000); + pub const IMPORT_MEMORY_WIN32_HANDLE_INFO_NV: Self = StructureType(1_000_057_000); } #[doc = "Generated from \'VK_NV_external_memory_win32\'"] impl StructureType { - pub const EXPORT_MEMORY_WIN32_HANDLE_INFO_NV: Self = StructureType(1000057001); + pub const EXPORT_MEMORY_WIN32_HANDLE_INFO_NV: Self = StructureType(1_000_057_001); } impl NvWin32KeyedMutexFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -48558,7 +60265,7 @@ impl NvWin32KeyedMutexFn { } #[doc = "Generated from \'VK_NV_win32_keyed_mutex\'"] impl StructureType { - pub const WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV: Self = StructureType(1000058000); + pub const WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV: Self = StructureType(1_000_058_000); } impl KhrGetPhysicalDeviceProperties2Fn { pub fn name() -> &'static ::std::ffi::CStr { @@ -48566,12 +60273,99 @@ impl KhrGetPhysicalDeviceProperties2Fn { .expect("Wrong extension string") } } -pub struct KhrGetPhysicalDeviceProperties2Fn {} +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceFeatures2 = extern "system" fn( + physical_device: PhysicalDevice, + p_features: *mut PhysicalDeviceFeatures2, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceProperties2 = extern "system" fn( + physical_device: PhysicalDevice, + p_properties: *mut PhysicalDeviceProperties2, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceFormatProperties2 = extern "system" fn( + physical_device: PhysicalDevice, + format: Format, + p_format_properties: *mut FormatProperties2, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceImageFormatProperties2 = extern "system" fn( + physical_device: PhysicalDevice, + p_image_format_info: *const PhysicalDeviceImageFormatInfo2, + p_image_format_properties: *mut ImageFormatProperties2, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceQueueFamilyProperties2 = extern "system" fn( + physical_device: PhysicalDevice, + p_queue_family_property_count: *mut u32, + p_queue_family_properties: *mut QueueFamilyProperties2, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceMemoryProperties2 = extern "system" fn( + physical_device: PhysicalDevice, + p_memory_properties: *mut PhysicalDeviceMemoryProperties2, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceSparseImageFormatProperties2 = extern "system" fn( + physical_device: PhysicalDevice, + p_format_info: *const PhysicalDeviceSparseImageFormatInfo2, + p_property_count: *mut u32, + p_properties: *mut SparseImageFormatProperties2, +) -> c_void; +pub struct KhrGetPhysicalDeviceProperties2Fn { + pub get_physical_device_features2_khr: extern "system" fn( + physical_device: PhysicalDevice, + p_features: *mut PhysicalDeviceFeatures2, + ) -> c_void, + pub get_physical_device_properties2_khr: extern "system" fn( + physical_device: PhysicalDevice, + p_properties: *mut PhysicalDeviceProperties2, + ) -> c_void, + pub get_physical_device_format_properties2_khr: extern "system" fn( + physical_device: PhysicalDevice, + format: Format, + p_format_properties: *mut FormatProperties2, + ) -> c_void, + pub get_physical_device_image_format_properties2_khr: extern "system" fn( + physical_device: PhysicalDevice, + p_image_format_info: *const PhysicalDeviceImageFormatInfo2, + p_image_format_properties: *mut ImageFormatProperties2, + ) -> Result, + pub get_physical_device_queue_family_properties2_khr: extern "system" fn( + physical_device: PhysicalDevice, + p_queue_family_property_count: *mut u32, + p_queue_family_properties: *mut QueueFamilyProperties2, + ) -> c_void, + pub get_physical_device_memory_properties2_khr: extern "system" fn( + physical_device: PhysicalDevice, + p_memory_properties: *mut PhysicalDeviceMemoryProperties2, + ) -> c_void, + pub get_physical_device_sparse_image_format_properties2_khr: extern "system" fn( + physical_device: PhysicalDevice, + p_format_info: *const PhysicalDeviceSparseImageFormatInfo2, + p_property_count: *mut u32, + p_properties: *mut SparseImageFormatProperties2, + ) -> c_void, +} unsafe impl Send for KhrGetPhysicalDeviceProperties2Fn {} unsafe impl Sync for KhrGetPhysicalDeviceProperties2Fn {} impl ::std::clone::Clone for KhrGetPhysicalDeviceProperties2Fn { fn clone(&self) -> Self { - KhrGetPhysicalDeviceProperties2Fn {} + KhrGetPhysicalDeviceProperties2Fn { + get_physical_device_features2_khr: self.get_physical_device_features2_khr, + get_physical_device_properties2_khr: self.get_physical_device_properties2_khr, + get_physical_device_format_properties2_khr: self + .get_physical_device_format_properties2_khr, + get_physical_device_image_format_properties2_khr: self + .get_physical_device_image_format_properties2_khr, + get_physical_device_queue_family_properties2_khr: self + .get_physical_device_queue_family_properties2_khr, + get_physical_device_memory_properties2_khr: self + .get_physical_device_memory_properties2_khr, + get_physical_device_sparse_image_format_properties2_khr: self + .get_physical_device_sparse_image_format_properties2_khr, + } } } impl KhrGetPhysicalDeviceProperties2Fn { @@ -48579,8 +60373,265 @@ impl KhrGetPhysicalDeviceProperties2Fn { where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - KhrGetPhysicalDeviceProperties2Fn {} + KhrGetPhysicalDeviceProperties2Fn { + get_physical_device_features2_khr: unsafe { + extern "system" fn get_physical_device_features2_khr( + _physical_device: PhysicalDevice, + _p_features: *mut PhysicalDeviceFeatures2, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_features2_khr) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceFeatures2KHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_features2_khr + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_properties2_khr: unsafe { + extern "system" fn get_physical_device_properties2_khr( + _physical_device: PhysicalDevice, + _p_properties: *mut PhysicalDeviceProperties2, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_properties2_khr) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceProperties2KHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_properties2_khr + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_format_properties2_khr: unsafe { + extern "system" fn get_physical_device_format_properties2_khr( + _physical_device: PhysicalDevice, + _format: Format, + _p_format_properties: *mut FormatProperties2, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_format_properties2_khr) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceFormatProperties2KHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_format_properties2_khr + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_image_format_properties2_khr: unsafe { + extern "system" fn get_physical_device_image_format_properties2_khr( + _physical_device: PhysicalDevice, + _p_image_format_info: *const PhysicalDeviceImageFormatInfo2, + _p_image_format_properties: *mut ImageFormatProperties2, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_image_format_properties2_khr) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceImageFormatProperties2KHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_image_format_properties2_khr + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_queue_family_properties2_khr: unsafe { + extern "system" fn get_physical_device_queue_family_properties2_khr( + _physical_device: PhysicalDevice, + _p_queue_family_property_count: *mut u32, + _p_queue_family_properties: *mut QueueFamilyProperties2, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_queue_family_properties2_khr) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceQueueFamilyProperties2KHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_queue_family_properties2_khr + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_memory_properties2_khr: unsafe { + extern "system" fn get_physical_device_memory_properties2_khr( + _physical_device: PhysicalDevice, + _p_memory_properties: *mut PhysicalDeviceMemoryProperties2, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_memory_properties2_khr) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceMemoryProperties2KHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_memory_properties2_khr + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_sparse_image_format_properties2_khr: unsafe { + extern "system" fn get_physical_device_sparse_image_format_properties2_khr( + _physical_device: PhysicalDevice, + _p_format_info: *const PhysicalDeviceSparseImageFormatInfo2, + _p_property_count: *mut u32, + _p_properties: *mut SparseImageFormatProperties2, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_sparse_image_format_properties2_khr) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceSparseImageFormatProperties2KHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_sparse_image_format_properties2_khr + } else { + ::std::mem::transmute(val) + } + }, + } } + #[doc = ""] + pub unsafe fn get_physical_device_features2_khr( + &self, + physical_device: PhysicalDevice, + p_features: *mut PhysicalDeviceFeatures2, + ) -> c_void { + (self.get_physical_device_features2_khr)(physical_device, p_features) + } + #[doc = ""] + pub unsafe fn get_physical_device_properties2_khr( + &self, + physical_device: PhysicalDevice, + p_properties: *mut PhysicalDeviceProperties2, + ) -> c_void { + (self.get_physical_device_properties2_khr)(physical_device, p_properties) + } + #[doc = ""] + pub unsafe fn get_physical_device_format_properties2_khr( + &self, + physical_device: PhysicalDevice, + format: Format, + p_format_properties: *mut FormatProperties2, + ) -> c_void { + (self.get_physical_device_format_properties2_khr)( + physical_device, + format, + p_format_properties, + ) + } + #[doc = ""] + pub unsafe fn get_physical_device_image_format_properties2_khr( + &self, + physical_device: PhysicalDevice, + p_image_format_info: *const PhysicalDeviceImageFormatInfo2, + p_image_format_properties: *mut ImageFormatProperties2, + ) -> Result { + (self.get_physical_device_image_format_properties2_khr)( + physical_device, + p_image_format_info, + p_image_format_properties, + ) + } + #[doc = ""] + pub unsafe fn get_physical_device_queue_family_properties2_khr( + &self, + physical_device: PhysicalDevice, + p_queue_family_property_count: *mut u32, + p_queue_family_properties: *mut QueueFamilyProperties2, + ) -> c_void { + (self.get_physical_device_queue_family_properties2_khr)( + physical_device, + p_queue_family_property_count, + p_queue_family_properties, + ) + } + #[doc = ""] + pub unsafe fn get_physical_device_memory_properties2_khr( + &self, + physical_device: PhysicalDevice, + p_memory_properties: *mut PhysicalDeviceMemoryProperties2, + ) -> c_void { + (self.get_physical_device_memory_properties2_khr)(physical_device, p_memory_properties) + } + #[doc = ""] + pub unsafe fn get_physical_device_sparse_image_format_properties2_khr( + &self, + physical_device: PhysicalDevice, + p_format_info: *const PhysicalDeviceSparseImageFormatInfo2, + p_property_count: *mut u32, + p_properties: *mut SparseImageFormatProperties2, + ) -> c_void { + (self.get_physical_device_sparse_image_format_properties2_khr)( + physical_device, + p_format_info, + p_property_count, + p_properties, + ) + } +} +#[doc = "Generated from \'VK_KHR_get_physical_device_properties2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_FEATURES_2_KHR: Self = StructureType::PHYSICAL_DEVICE_FEATURES_2; +} +#[doc = "Generated from \'VK_KHR_get_physical_device_properties2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_PROPERTIES_2_KHR: Self = StructureType::PHYSICAL_DEVICE_PROPERTIES_2; +} +#[doc = "Generated from \'VK_KHR_get_physical_device_properties2\'"] +impl StructureType { + pub const FORMAT_PROPERTIES_2_KHR: Self = StructureType::FORMAT_PROPERTIES_2; +} +#[doc = "Generated from \'VK_KHR_get_physical_device_properties2\'"] +impl StructureType { + pub const IMAGE_FORMAT_PROPERTIES_2_KHR: Self = StructureType::IMAGE_FORMAT_PROPERTIES_2; +} +#[doc = "Generated from \'VK_KHR_get_physical_device_properties2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR: Self = + StructureType::PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2; +} +#[doc = "Generated from \'VK_KHR_get_physical_device_properties2\'"] +impl StructureType { + pub const QUEUE_FAMILY_PROPERTIES_2_KHR: Self = StructureType::QUEUE_FAMILY_PROPERTIES_2; +} +#[doc = "Generated from \'VK_KHR_get_physical_device_properties2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR: Self = + StructureType::PHYSICAL_DEVICE_MEMORY_PROPERTIES_2; +} +#[doc = "Generated from \'VK_KHR_get_physical_device_properties2\'"] +impl StructureType { + pub const SPARSE_IMAGE_FORMAT_PROPERTIES_2_KHR: Self = + StructureType::SPARSE_IMAGE_FORMAT_PROPERTIES_2; +} +#[doc = "Generated from \'VK_KHR_get_physical_device_properties2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2_KHR: Self = + StructureType::PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2; } impl KhrDeviceGroupFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -48588,7 +60639,46 @@ impl KhrDeviceGroupFn { .expect("Wrong extension string") } } +#[allow(non_camel_case_types)] +pub type PFN_vkGetDeviceGroupPeerMemoryFeatures = extern "system" fn( + device: Device, + heap_index: u32, + local_device_index: u32, + remote_device_index: u32, + p_peer_memory_features: *mut PeerMemoryFeatureFlags, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdSetDeviceMask = + extern "system" fn(command_buffer: CommandBuffer, device_mask: u32) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdDispatchBase = extern "system" fn( + command_buffer: CommandBuffer, + base_group_x: u32, + base_group_y: u32, + base_group_z: u32, + group_count_x: u32, + group_count_y: u32, + group_count_z: u32, +) -> c_void; pub struct KhrDeviceGroupFn { + pub get_device_group_peer_memory_features_khr: extern "system" fn( + device: Device, + heap_index: u32, + local_device_index: u32, + remote_device_index: u32, + p_peer_memory_features: *mut PeerMemoryFeatureFlags, + ) -> c_void, + pub cmd_set_device_mask_khr: + extern "system" fn(command_buffer: CommandBuffer, device_mask: u32) -> c_void, + pub cmd_dispatch_base_khr: extern "system" fn( + command_buffer: CommandBuffer, + base_group_x: u32, + base_group_y: u32, + base_group_z: u32, + group_count_x: u32, + group_count_y: u32, + group_count_z: u32, + ) -> c_void, pub get_device_group_present_capabilities_khr: extern "system" fn( device: Device, p_device_group_present_capabilities: *mut DeviceGroupPresentCapabilitiesKHR, @@ -48615,6 +60705,10 @@ unsafe impl Sync for KhrDeviceGroupFn {} impl ::std::clone::Clone for KhrDeviceGroupFn { fn clone(&self) -> Self { KhrDeviceGroupFn { + get_device_group_peer_memory_features_khr: self + .get_device_group_peer_memory_features_khr, + cmd_set_device_mask_khr: self.cmd_set_device_mask_khr, + cmd_dispatch_base_khr: self.cmd_dispatch_base_khr, get_device_group_present_capabilities_khr: self .get_device_group_present_capabilities_khr, get_device_group_surface_present_modes_khr: self @@ -48631,6 +60725,71 @@ impl KhrDeviceGroupFn { F: FnMut(&::std::ffi::CStr) -> *const c_void, { KhrDeviceGroupFn { + get_device_group_peer_memory_features_khr: unsafe { + extern "system" fn get_device_group_peer_memory_features_khr( + _device: Device, + _heap_index: u32, + _local_device_index: u32, + _remote_device_index: u32, + _p_peer_memory_features: *mut PeerMemoryFeatureFlags, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_device_group_peer_memory_features_khr) + )) + } + let raw_name = stringify!(vkGetDeviceGroupPeerMemoryFeaturesKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_device_group_peer_memory_features_khr + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_device_mask_khr: unsafe { + extern "system" fn cmd_set_device_mask_khr( + _command_buffer: CommandBuffer, + _device_mask: u32, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_device_mask_khr) + )) + } + let raw_name = stringify!(vkCmdSetDeviceMaskKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_set_device_mask_khr + } else { + ::std::mem::transmute(val) + } + }, + cmd_dispatch_base_khr: unsafe { + extern "system" fn cmd_dispatch_base_khr( + _command_buffer: CommandBuffer, + _base_group_x: u32, + _base_group_y: u32, + _base_group_z: u32, + _group_count_x: u32, + _group_count_y: u32, + _group_count_z: u32, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_dispatch_base_khr) + )) + } + let raw_name = stringify!(vkCmdDispatchBaseKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_dispatch_base_khr + } else { + ::std::mem::transmute(val) + } + }, get_device_group_present_capabilities_khr: unsafe { extern "system" fn get_device_group_present_capabilities_khr( _device: Device, @@ -48713,7 +60872,53 @@ impl KhrDeviceGroupFn { }, } } - #[doc = ""] + #[doc = ""] + pub unsafe fn get_device_group_peer_memory_features_khr( + &self, + device: Device, + heap_index: u32, + local_device_index: u32, + remote_device_index: u32, + p_peer_memory_features: *mut PeerMemoryFeatureFlags, + ) -> c_void { + (self.get_device_group_peer_memory_features_khr)( + device, + heap_index, + local_device_index, + remote_device_index, + p_peer_memory_features, + ) + } + #[doc = ""] + pub unsafe fn cmd_set_device_mask_khr( + &self, + command_buffer: CommandBuffer, + device_mask: u32, + ) -> c_void { + (self.cmd_set_device_mask_khr)(command_buffer, device_mask) + } + #[doc = ""] + pub unsafe fn cmd_dispatch_base_khr( + &self, + command_buffer: CommandBuffer, + base_group_x: u32, + base_group_y: u32, + base_group_z: u32, + group_count_x: u32, + group_count_y: u32, + group_count_z: u32, + ) -> c_void { + (self.cmd_dispatch_base_khr)( + command_buffer, + base_group_x, + base_group_y, + base_group_z, + group_count_x, + group_count_y, + group_count_z, + ) + } + #[doc = ""] pub unsafe fn get_device_group_present_capabilities_khr( &self, device: Device, @@ -48724,7 +60929,7 @@ impl KhrDeviceGroupFn { p_device_group_present_capabilities, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_device_group_surface_present_modes_khr( &self, device: Device, @@ -48733,7 +60938,7 @@ impl KhrDeviceGroupFn { ) -> Result { (self.get_device_group_surface_present_modes_khr)(device, surface, p_modes) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_present_rectangles_khr( &self, physical_device: PhysicalDevice, @@ -48748,7 +60953,7 @@ impl KhrDeviceGroupFn { p_rects, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn acquire_next_image2_khr( &self, device: Device, @@ -48758,6 +60963,72 @@ impl KhrDeviceGroupFn { (self.acquire_next_image2_khr)(device, p_acquire_info, p_image_index) } } +#[doc = "Generated from \'VK_KHR_device_group\'"] +impl StructureType { + pub const MEMORY_ALLOCATE_FLAGS_INFO_KHR: Self = StructureType::MEMORY_ALLOCATE_FLAGS_INFO; +} +#[doc = "Generated from \'VK_KHR_device_group\'"] +impl StructureType { + pub const DEVICE_GROUP_RENDER_PASS_BEGIN_INFO_KHR: Self = + StructureType::DEVICE_GROUP_RENDER_PASS_BEGIN_INFO; +} +#[doc = "Generated from \'VK_KHR_device_group\'"] +impl StructureType { + pub const DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO_KHR: Self = + StructureType::DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO; +} +#[doc = "Generated from \'VK_KHR_device_group\'"] +impl StructureType { + pub const DEVICE_GROUP_SUBMIT_INFO_KHR: Self = StructureType::DEVICE_GROUP_SUBMIT_INFO; +} +#[doc = "Generated from \'VK_KHR_device_group\'"] +impl StructureType { + pub const DEVICE_GROUP_BIND_SPARSE_INFO_KHR: Self = + StructureType::DEVICE_GROUP_BIND_SPARSE_INFO; +} +#[doc = "Generated from \'VK_KHR_device_group\'"] +impl PeerMemoryFeatureFlags { + pub const COPY_SRC_KHR: Self = PeerMemoryFeatureFlags::COPY_SRC; +} +#[doc = "Generated from \'VK_KHR_device_group\'"] +impl PeerMemoryFeatureFlags { + pub const COPY_DST_KHR: Self = PeerMemoryFeatureFlags::COPY_DST; +} +#[doc = "Generated from \'VK_KHR_device_group\'"] +impl PeerMemoryFeatureFlags { + pub const GENERIC_SRC_KHR: Self = PeerMemoryFeatureFlags::GENERIC_SRC; +} +#[doc = "Generated from \'VK_KHR_device_group\'"] +impl PeerMemoryFeatureFlags { + pub const GENERIC_DST_KHR: Self = PeerMemoryFeatureFlags::GENERIC_DST; +} +#[doc = "Generated from \'VK_KHR_device_group\'"] +impl MemoryAllocateFlags { + pub const DEVICE_MASK_KHR: Self = MemoryAllocateFlags::DEVICE_MASK; +} +#[doc = "Generated from \'VK_KHR_device_group\'"] +impl PipelineCreateFlags { + pub const VIEW_INDEX_FROM_DEVICE_INDEX_KHR: Self = + PipelineCreateFlags::VIEW_INDEX_FROM_DEVICE_INDEX; +} +#[doc = "Generated from \'VK_KHR_device_group\'"] +impl DependencyFlags { + pub const DEVICE_GROUP_KHR: Self = DependencyFlags::DEVICE_GROUP; +} +#[doc = "Generated from \'VK_KHR_device_group\'"] +impl StructureType { + pub const BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO_KHR: Self = + StructureType::BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO; +} +#[doc = "Generated from \'VK_KHR_device_group\'"] +impl StructureType { + pub const BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO_KHR: Self = + StructureType::BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO; +} +#[doc = "Generated from \'VK_KHR_device_group\'"] +impl ImageCreateFlags { + pub const SPLIT_INSTANCE_BIND_REGIONS_KHR: Self = ImageCreateFlags::SPLIT_INSTANCE_BIND_REGIONS; +} impl ExtValidationFlagsFn { pub fn name() -> &'static ::std::ffi::CStr { ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_validation_flags\0") @@ -48782,7 +61053,7 @@ impl ExtValidationFlagsFn { } #[doc = "Generated from \'VK_EXT_validation_flags\'"] impl StructureType { - pub const VALIDATION_FLAGS_EXT: Self = StructureType(1000061000); + pub const VALIDATION_FLAGS_EXT: Self = StructureType(1_000_061_000); } impl NnViSurfaceFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -48840,7 +61111,7 @@ impl NnViSurfaceFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn create_vi_surface_nn( &self, instance: Instance, @@ -48853,7 +61124,7 @@ impl NnViSurfaceFn { } #[doc = "Generated from \'VK_NN_vi_surface\'"] impl StructureType { - pub const VI_SURFACE_CREATE_INFO_NN: Self = StructureType(1000062000); + pub const VI_SURFACE_CREATE_INFO_NN: Self = StructureType(1_000_062_000); } impl KhrShaderDrawParametersFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -48921,28 +61192,89 @@ impl ExtShaderSubgroupVoteFn { ExtShaderSubgroupVoteFn {} } } -impl ArmExtension01Fn { +impl ExtTextureCompressionAstcHdrFn { pub fn name() -> &'static ::std::ffi::CStr { - ::std::ffi::CStr::from_bytes_with_nul(b"VK_ARM_extension_01\0") + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_texture_compression_astc_hdr\0") .expect("Wrong extension string") } } -pub struct ArmExtension01Fn {} -unsafe impl Send for ArmExtension01Fn {} -unsafe impl Sync for ArmExtension01Fn {} -impl ::std::clone::Clone for ArmExtension01Fn { +pub struct ExtTextureCompressionAstcHdrFn {} +unsafe impl Send for ExtTextureCompressionAstcHdrFn {} +unsafe impl Sync for ExtTextureCompressionAstcHdrFn {} +impl ::std::clone::Clone for ExtTextureCompressionAstcHdrFn { fn clone(&self) -> Self { - ArmExtension01Fn {} + ExtTextureCompressionAstcHdrFn {} } } -impl ArmExtension01Fn { +impl ExtTextureCompressionAstcHdrFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - ArmExtension01Fn {} + ExtTextureCompressionAstcHdrFn {} } } +#[doc = "Generated from \'VK_EXT_texture_compression_astc_hdr\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT: Self = + StructureType(1_000_066_000); +} +#[doc = "Generated from \'VK_EXT_texture_compression_astc_hdr\'"] +impl Format { + pub const ASTC_4X4_SFLOAT_BLOCK_EXT: Self = Format(1_000_066_000); +} +#[doc = "Generated from \'VK_EXT_texture_compression_astc_hdr\'"] +impl Format { + pub const ASTC_5X4_SFLOAT_BLOCK_EXT: Self = Format(1_000_066_001); +} +#[doc = "Generated from \'VK_EXT_texture_compression_astc_hdr\'"] +impl Format { + pub const ASTC_5X5_SFLOAT_BLOCK_EXT: Self = Format(1_000_066_002); +} +#[doc = "Generated from \'VK_EXT_texture_compression_astc_hdr\'"] +impl Format { + pub const ASTC_6X5_SFLOAT_BLOCK_EXT: Self = Format(1_000_066_003); +} +#[doc = "Generated from \'VK_EXT_texture_compression_astc_hdr\'"] +impl Format { + pub const ASTC_6X6_SFLOAT_BLOCK_EXT: Self = Format(1_000_066_004); +} +#[doc = "Generated from \'VK_EXT_texture_compression_astc_hdr\'"] +impl Format { + pub const ASTC_8X5_SFLOAT_BLOCK_EXT: Self = Format(1_000_066_005); +} +#[doc = "Generated from \'VK_EXT_texture_compression_astc_hdr\'"] +impl Format { + pub const ASTC_8X6_SFLOAT_BLOCK_EXT: Self = Format(1_000_066_006); +} +#[doc = "Generated from \'VK_EXT_texture_compression_astc_hdr\'"] +impl Format { + pub const ASTC_8X8_SFLOAT_BLOCK_EXT: Self = Format(1_000_066_007); +} +#[doc = "Generated from \'VK_EXT_texture_compression_astc_hdr\'"] +impl Format { + pub const ASTC_10X5_SFLOAT_BLOCK_EXT: Self = Format(1_000_066_008); +} +#[doc = "Generated from \'VK_EXT_texture_compression_astc_hdr\'"] +impl Format { + pub const ASTC_10X6_SFLOAT_BLOCK_EXT: Self = Format(1_000_066_009); +} +#[doc = "Generated from \'VK_EXT_texture_compression_astc_hdr\'"] +impl Format { + pub const ASTC_10X8_SFLOAT_BLOCK_EXT: Self = Format(1_000_066_010); +} +#[doc = "Generated from \'VK_EXT_texture_compression_astc_hdr\'"] +impl Format { + pub const ASTC_10X10_SFLOAT_BLOCK_EXT: Self = Format(1_000_066_011); +} +#[doc = "Generated from \'VK_EXT_texture_compression_astc_hdr\'"] +impl Format { + pub const ASTC_12X10_SFLOAT_BLOCK_EXT: Self = Format(1_000_066_012); +} +#[doc = "Generated from \'VK_EXT_texture_compression_astc_hdr\'"] +impl Format { + pub const ASTC_12X12_SFLOAT_BLOCK_EXT: Self = Format(1_000_066_013); +} impl ExtAstcDecodeModeFn { pub fn name() -> &'static ::std::ffi::CStr { ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_astc_decode_mode\0") @@ -48967,11 +61299,11 @@ impl ExtAstcDecodeModeFn { } #[doc = "Generated from \'VK_EXT_astc_decode_mode\'"] impl StructureType { - pub const IMAGE_VIEW_ASTC_DECODE_MODE_EXT: Self = StructureType(1000067000); + pub const IMAGE_VIEW_ASTC_DECODE_MODE_EXT: Self = StructureType(1_000_067_000); } #[doc = "Generated from \'VK_EXT_astc_decode_mode\'"] impl StructureType { - pub const PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT: Self = StructureType(1000067001); + pub const PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT: Self = StructureType(1_000_067_001); } impl ImgExtension69Fn { pub fn name() -> &'static ::std::ffi::CStr { @@ -49001,12 +61333,26 @@ impl KhrMaintenance1Fn { .expect("Wrong extension string") } } -pub struct KhrMaintenance1Fn {} +#[allow(non_camel_case_types)] +pub type PFN_vkTrimCommandPool = extern "system" fn( + device: Device, + command_pool: CommandPool, + flags: CommandPoolTrimFlags, +) -> c_void; +pub struct KhrMaintenance1Fn { + pub trim_command_pool_khr: extern "system" fn( + device: Device, + command_pool: CommandPool, + flags: CommandPoolTrimFlags, + ) -> c_void, +} unsafe impl Send for KhrMaintenance1Fn {} unsafe impl Sync for KhrMaintenance1Fn {} impl ::std::clone::Clone for KhrMaintenance1Fn { fn clone(&self) -> Self { - KhrMaintenance1Fn {} + KhrMaintenance1Fn { + trim_command_pool_khr: self.trim_command_pool_khr, + } } } impl KhrMaintenance1Fn { @@ -49014,8 +61360,54 @@ impl KhrMaintenance1Fn { where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - KhrMaintenance1Fn {} + KhrMaintenance1Fn { + trim_command_pool_khr: unsafe { + extern "system" fn trim_command_pool_khr( + _device: Device, + _command_pool: CommandPool, + _flags: CommandPoolTrimFlags, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(trim_command_pool_khr) + )) + } + let raw_name = stringify!(vkTrimCommandPoolKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + trim_command_pool_khr + } else { + ::std::mem::transmute(val) + } + }, + } } + #[doc = ""] + pub unsafe fn trim_command_pool_khr( + &self, + device: Device, + command_pool: CommandPool, + flags: CommandPoolTrimFlags, + ) -> c_void { + (self.trim_command_pool_khr)(device, command_pool, flags) + } +} +#[doc = "Generated from \'VK_KHR_maintenance1\'"] +impl Result { + pub const ERROR_OUT_OF_POOL_MEMORY_KHR: Self = Result::ERROR_OUT_OF_POOL_MEMORY; +} +#[doc = "Generated from \'VK_KHR_maintenance1\'"] +impl FormatFeatureFlags { + pub const TRANSFER_SRC_KHR: Self = FormatFeatureFlags::TRANSFER_SRC; +} +#[doc = "Generated from \'VK_KHR_maintenance1\'"] +impl FormatFeatureFlags { + pub const TRANSFER_DST_KHR: Self = FormatFeatureFlags::TRANSFER_DST; +} +#[doc = "Generated from \'VK_KHR_maintenance1\'"] +impl ImageCreateFlags { + pub const TYPE_2D_ARRAY_COMPATIBLE_KHR: Self = ImageCreateFlags::TYPE_2D_ARRAY_COMPATIBLE; } impl KhrDeviceGroupCreationFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -49023,12 +61415,26 @@ impl KhrDeviceGroupCreationFn { .expect("Wrong extension string") } } -pub struct KhrDeviceGroupCreationFn {} +#[allow(non_camel_case_types)] +pub type PFN_vkEnumeratePhysicalDeviceGroups = extern "system" fn( + instance: Instance, + p_physical_device_group_count: *mut u32, + p_physical_device_group_properties: *mut PhysicalDeviceGroupProperties, +) -> Result; +pub struct KhrDeviceGroupCreationFn { + pub enumerate_physical_device_groups_khr: extern "system" fn( + instance: Instance, + p_physical_device_group_count: *mut u32, + p_physical_device_group_properties: *mut PhysicalDeviceGroupProperties, + ) -> Result, +} unsafe impl Send for KhrDeviceGroupCreationFn {} unsafe impl Sync for KhrDeviceGroupCreationFn {} impl ::std::clone::Clone for KhrDeviceGroupCreationFn { fn clone(&self) -> Self { - KhrDeviceGroupCreationFn {} + KhrDeviceGroupCreationFn { + enumerate_physical_device_groups_khr: self.enumerate_physical_device_groups_khr, + } } } impl KhrDeviceGroupCreationFn { @@ -49036,8 +61442,56 @@ impl KhrDeviceGroupCreationFn { where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - KhrDeviceGroupCreationFn {} + KhrDeviceGroupCreationFn { + enumerate_physical_device_groups_khr: unsafe { + extern "system" fn enumerate_physical_device_groups_khr( + _instance: Instance, + _p_physical_device_group_count: *mut u32, + _p_physical_device_group_properties: *mut PhysicalDeviceGroupProperties, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(enumerate_physical_device_groups_khr) + )) + } + let raw_name = stringify!(vkEnumeratePhysicalDeviceGroupsKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + enumerate_physical_device_groups_khr + } else { + ::std::mem::transmute(val) + } + }, + } } + #[doc = ""] + pub unsafe fn enumerate_physical_device_groups_khr( + &self, + instance: Instance, + p_physical_device_group_count: *mut u32, + p_physical_device_group_properties: *mut PhysicalDeviceGroupProperties, + ) -> Result { + (self.enumerate_physical_device_groups_khr)( + instance, + p_physical_device_group_count, + p_physical_device_group_properties, + ) + } +} +#[doc = "Generated from \'VK_KHR_device_group_creation\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_GROUP_PROPERTIES_KHR: Self = + StructureType::PHYSICAL_DEVICE_GROUP_PROPERTIES; +} +#[doc = "Generated from \'VK_KHR_device_group_creation\'"] +impl StructureType { + pub const DEVICE_GROUP_DEVICE_CREATE_INFO_KHR: Self = + StructureType::DEVICE_GROUP_DEVICE_CREATE_INFO; +} +#[doc = "Generated from \'VK_KHR_device_group_creation\'"] +impl MemoryHeapFlags { + pub const MULTI_INSTANCE_KHR: Self = MemoryHeapFlags::MULTI_INSTANCE; } impl KhrExternalMemoryCapabilitiesFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -49045,12 +61499,27 @@ impl KhrExternalMemoryCapabilitiesFn { .expect("Wrong extension string") } } -pub struct KhrExternalMemoryCapabilitiesFn {} +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceExternalBufferProperties = extern "system" fn( + physical_device: PhysicalDevice, + p_external_buffer_info: *const PhysicalDeviceExternalBufferInfo, + p_external_buffer_properties: *mut ExternalBufferProperties, +) -> c_void; +pub struct KhrExternalMemoryCapabilitiesFn { + pub get_physical_device_external_buffer_properties_khr: extern "system" fn( + physical_device: PhysicalDevice, + p_external_buffer_info: *const PhysicalDeviceExternalBufferInfo, + p_external_buffer_properties: *mut ExternalBufferProperties, + ) -> c_void, +} unsafe impl Send for KhrExternalMemoryCapabilitiesFn {} unsafe impl Sync for KhrExternalMemoryCapabilitiesFn {} impl ::std::clone::Clone for KhrExternalMemoryCapabilitiesFn { fn clone(&self) -> Self { - KhrExternalMemoryCapabilitiesFn {} + KhrExternalMemoryCapabilitiesFn { + get_physical_device_external_buffer_properties_khr: self + .get_physical_device_external_buffer_properties_khr, + } } } impl KhrExternalMemoryCapabilitiesFn { @@ -49058,8 +61527,116 @@ impl KhrExternalMemoryCapabilitiesFn { where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - KhrExternalMemoryCapabilitiesFn {} + KhrExternalMemoryCapabilitiesFn { + get_physical_device_external_buffer_properties_khr: unsafe { + extern "system" fn get_physical_device_external_buffer_properties_khr( + _physical_device: PhysicalDevice, + _p_external_buffer_info: *const PhysicalDeviceExternalBufferInfo, + _p_external_buffer_properties: *mut ExternalBufferProperties, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_external_buffer_properties_khr) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceExternalBufferPropertiesKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_external_buffer_properties_khr + } else { + ::std::mem::transmute(val) + } + }, + } } + #[doc = ""] + pub unsafe fn get_physical_device_external_buffer_properties_khr( + &self, + physical_device: PhysicalDevice, + p_external_buffer_info: *const PhysicalDeviceExternalBufferInfo, + p_external_buffer_properties: *mut ExternalBufferProperties, + ) -> c_void { + (self.get_physical_device_external_buffer_properties_khr)( + physical_device, + p_external_buffer_info, + p_external_buffer_properties, + ) + } +} +#[doc = "Generated from \'VK_KHR_external_memory_capabilities\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR: Self = + StructureType::PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO; +} +#[doc = "Generated from \'VK_KHR_external_memory_capabilities\'"] +impl StructureType { + pub const EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR: Self = + StructureType::EXTERNAL_IMAGE_FORMAT_PROPERTIES; +} +#[doc = "Generated from \'VK_KHR_external_memory_capabilities\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO_KHR: Self = + StructureType::PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO; +} +#[doc = "Generated from \'VK_KHR_external_memory_capabilities\'"] +impl StructureType { + pub const EXTERNAL_BUFFER_PROPERTIES_KHR: Self = StructureType::EXTERNAL_BUFFER_PROPERTIES; +} +#[doc = "Generated from \'VK_KHR_external_memory_capabilities\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_ID_PROPERTIES_KHR: Self = + StructureType::PHYSICAL_DEVICE_ID_PROPERTIES; +} +#[doc = "Generated from \'VK_KHR_external_memory_capabilities\'"] +impl ExternalMemoryHandleTypeFlags { + pub const EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_KHR: Self = + ExternalMemoryHandleTypeFlags::EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD; +} +#[doc = "Generated from \'VK_KHR_external_memory_capabilities\'"] +impl ExternalMemoryHandleTypeFlags { + pub const EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KHR: Self = + ExternalMemoryHandleTypeFlags::EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32; +} +#[doc = "Generated from \'VK_KHR_external_memory_capabilities\'"] +impl ExternalMemoryHandleTypeFlags { + pub const EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_KHR: Self = + ExternalMemoryHandleTypeFlags::EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT; +} +#[doc = "Generated from \'VK_KHR_external_memory_capabilities\'"] +impl ExternalMemoryHandleTypeFlags { + pub const EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KHR: Self = + ExternalMemoryHandleTypeFlags::EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE; +} +#[doc = "Generated from \'VK_KHR_external_memory_capabilities\'"] +impl ExternalMemoryHandleTypeFlags { + pub const EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_KHR: Self = + ExternalMemoryHandleTypeFlags::EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT; +} +#[doc = "Generated from \'VK_KHR_external_memory_capabilities\'"] +impl ExternalMemoryHandleTypeFlags { + pub const EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_KHR: Self = + ExternalMemoryHandleTypeFlags::EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP; +} +#[doc = "Generated from \'VK_KHR_external_memory_capabilities\'"] +impl ExternalMemoryHandleTypeFlags { + pub const EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_KHR: Self = + ExternalMemoryHandleTypeFlags::EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE; +} +#[doc = "Generated from \'VK_KHR_external_memory_capabilities\'"] +impl ExternalMemoryFeatureFlags { + pub const EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_KHR: Self = + ExternalMemoryFeatureFlags::EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY; +} +#[doc = "Generated from \'VK_KHR_external_memory_capabilities\'"] +impl ExternalMemoryFeatureFlags { + pub const EXTERNAL_MEMORY_FEATURE_EXPORTABLE_KHR: Self = + ExternalMemoryFeatureFlags::EXTERNAL_MEMORY_FEATURE_EXPORTABLE; +} +#[doc = "Generated from \'VK_KHR_external_memory_capabilities\'"] +impl ExternalMemoryFeatureFlags { + pub const EXTERNAL_MEMORY_FEATURE_IMPORTABLE_KHR: Self = + ExternalMemoryFeatureFlags::EXTERNAL_MEMORY_FEATURE_IMPORTABLE; } impl KhrExternalMemoryFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -49083,6 +61660,24 @@ impl KhrExternalMemoryFn { KhrExternalMemoryFn {} } } +#[doc = "Generated from \'VK_KHR_external_memory\'"] +impl StructureType { + pub const EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR: Self = + StructureType::EXTERNAL_MEMORY_BUFFER_CREATE_INFO; +} +#[doc = "Generated from \'VK_KHR_external_memory\'"] +impl StructureType { + pub const EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR: Self = + StructureType::EXTERNAL_MEMORY_IMAGE_CREATE_INFO; +} +#[doc = "Generated from \'VK_KHR_external_memory\'"] +impl StructureType { + pub const EXPORT_MEMORY_ALLOCATE_INFO_KHR: Self = StructureType::EXPORT_MEMORY_ALLOCATE_INFO; +} +#[doc = "Generated from \'VK_KHR_external_memory\'"] +impl Result { + pub const ERROR_INVALID_EXTERNAL_HANDLE_KHR: Self = Result::ERROR_INVALID_EXTERNAL_HANDLE; +} impl KhrExternalMemoryWin32Fn { pub fn name() -> &'static ::std::ffi::CStr { ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_external_memory_win32\0") @@ -49174,7 +61769,7 @@ impl KhrExternalMemoryWin32Fn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn get_memory_win32_handle_khr( &self, device: Device, @@ -49183,7 +61778,7 @@ impl KhrExternalMemoryWin32Fn { ) -> Result { (self.get_memory_win32_handle_khr)(device, p_get_win32_handle_info, p_handle) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_memory_win32_handle_properties_khr( &self, device: Device, @@ -49201,19 +61796,19 @@ impl KhrExternalMemoryWin32Fn { } #[doc = "Generated from \'VK_KHR_external_memory_win32\'"] impl StructureType { - pub const IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR: Self = StructureType(1000073000); + pub const IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR: Self = StructureType(1_000_073_000); } #[doc = "Generated from \'VK_KHR_external_memory_win32\'"] impl StructureType { - pub const EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR: Self = StructureType(1000073001); + pub const EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR: Self = StructureType(1_000_073_001); } #[doc = "Generated from \'VK_KHR_external_memory_win32\'"] impl StructureType { - pub const MEMORY_WIN32_HANDLE_PROPERTIES_KHR: Self = StructureType(1000073002); + pub const MEMORY_WIN32_HANDLE_PROPERTIES_KHR: Self = StructureType(1_000_073_002); } #[doc = "Generated from \'VK_KHR_external_memory_win32\'"] impl StructureType { - pub const MEMORY_GET_WIN32_HANDLE_INFO_KHR: Self = StructureType(1000073003); + pub const MEMORY_GET_WIN32_HANDLE_INFO_KHR: Self = StructureType(1_000_073_003); } impl KhrExternalMemoryFdFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -49303,7 +61898,7 @@ impl KhrExternalMemoryFdFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn get_memory_fd_khr( &self, device: Device, @@ -49312,7 +61907,7 @@ impl KhrExternalMemoryFdFn { ) -> Result { (self.get_memory_fd_khr)(device, p_get_fd_info, p_fd) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_memory_fd_properties_khr( &self, device: Device, @@ -49325,15 +61920,15 @@ impl KhrExternalMemoryFdFn { } #[doc = "Generated from \'VK_KHR_external_memory_fd\'"] impl StructureType { - pub const IMPORT_MEMORY_FD_INFO_KHR: Self = StructureType(1000074000); + pub const IMPORT_MEMORY_FD_INFO_KHR: Self = StructureType(1_000_074_000); } #[doc = "Generated from \'VK_KHR_external_memory_fd\'"] impl StructureType { - pub const MEMORY_FD_PROPERTIES_KHR: Self = StructureType(1000074001); + pub const MEMORY_FD_PROPERTIES_KHR: Self = StructureType(1_000_074_001); } #[doc = "Generated from \'VK_KHR_external_memory_fd\'"] impl StructureType { - pub const MEMORY_GET_FD_INFO_KHR: Self = StructureType(1000074002); + pub const MEMORY_GET_FD_INFO_KHR: Self = StructureType(1_000_074_002); } impl KhrWin32KeyedMutexFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -49359,7 +61954,7 @@ impl KhrWin32KeyedMutexFn { } #[doc = "Generated from \'VK_KHR_win32_keyed_mutex\'"] impl StructureType { - pub const WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR: Self = StructureType(1000075000); + pub const WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR: Self = StructureType(1_000_075_000); } impl KhrExternalSemaphoreCapabilitiesFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -49367,12 +61962,27 @@ impl KhrExternalSemaphoreCapabilitiesFn { .expect("Wrong extension string") } } -pub struct KhrExternalSemaphoreCapabilitiesFn {} +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceExternalSemaphoreProperties = extern "system" fn( + physical_device: PhysicalDevice, + p_external_semaphore_info: *const PhysicalDeviceExternalSemaphoreInfo, + p_external_semaphore_properties: *mut ExternalSemaphoreProperties, +) -> c_void; +pub struct KhrExternalSemaphoreCapabilitiesFn { + pub get_physical_device_external_semaphore_properties_khr: extern "system" fn( + physical_device: PhysicalDevice, + p_external_semaphore_info: *const PhysicalDeviceExternalSemaphoreInfo, + p_external_semaphore_properties: *mut ExternalSemaphoreProperties, + ) -> c_void, +} unsafe impl Send for KhrExternalSemaphoreCapabilitiesFn {} unsafe impl Sync for KhrExternalSemaphoreCapabilitiesFn {} impl ::std::clone::Clone for KhrExternalSemaphoreCapabilitiesFn { fn clone(&self) -> Self { - KhrExternalSemaphoreCapabilitiesFn {} + KhrExternalSemaphoreCapabilitiesFn { + get_physical_device_external_semaphore_properties_khr: self + .get_physical_device_external_semaphore_properties_khr, + } } } impl KhrExternalSemaphoreCapabilitiesFn { @@ -49380,8 +61990,87 @@ impl KhrExternalSemaphoreCapabilitiesFn { where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - KhrExternalSemaphoreCapabilitiesFn {} + KhrExternalSemaphoreCapabilitiesFn { + get_physical_device_external_semaphore_properties_khr: unsafe { + extern "system" fn get_physical_device_external_semaphore_properties_khr( + _physical_device: PhysicalDevice, + _p_external_semaphore_info: *const PhysicalDeviceExternalSemaphoreInfo, + _p_external_semaphore_properties: *mut ExternalSemaphoreProperties, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_external_semaphore_properties_khr) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceExternalSemaphorePropertiesKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_external_semaphore_properties_khr + } else { + ::std::mem::transmute(val) + } + }, + } } + #[doc = ""] + pub unsafe fn get_physical_device_external_semaphore_properties_khr( + &self, + physical_device: PhysicalDevice, + p_external_semaphore_info: *const PhysicalDeviceExternalSemaphoreInfo, + p_external_semaphore_properties: *mut ExternalSemaphoreProperties, + ) -> c_void { + (self.get_physical_device_external_semaphore_properties_khr)( + physical_device, + p_external_semaphore_info, + p_external_semaphore_properties, + ) + } +} +#[doc = "Generated from \'VK_KHR_external_semaphore_capabilities\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR: Self = + StructureType::PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO; +} +#[doc = "Generated from \'VK_KHR_external_semaphore_capabilities\'"] +impl StructureType { + pub const EXTERNAL_SEMAPHORE_PROPERTIES_KHR: Self = + StructureType::EXTERNAL_SEMAPHORE_PROPERTIES; +} +#[doc = "Generated from \'VK_KHR_external_semaphore_capabilities\'"] +impl ExternalSemaphoreHandleTypeFlags { + pub const EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_KHR: Self = + ExternalSemaphoreHandleTypeFlags::EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD; +} +#[doc = "Generated from \'VK_KHR_external_semaphore_capabilities\'"] +impl ExternalSemaphoreHandleTypeFlags { + pub const EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KHR: Self = + ExternalSemaphoreHandleTypeFlags::EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32; +} +#[doc = "Generated from \'VK_KHR_external_semaphore_capabilities\'"] +impl ExternalSemaphoreHandleTypeFlags { + pub const EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_KHR: Self = + ExternalSemaphoreHandleTypeFlags::EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT; +} +#[doc = "Generated from \'VK_KHR_external_semaphore_capabilities\'"] +impl ExternalSemaphoreHandleTypeFlags { + pub const EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_KHR: Self = + ExternalSemaphoreHandleTypeFlags::EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE; +} +#[doc = "Generated from \'VK_KHR_external_semaphore_capabilities\'"] +impl ExternalSemaphoreHandleTypeFlags { + pub const EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_KHR: Self = + ExternalSemaphoreHandleTypeFlags::EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD; +} +#[doc = "Generated from \'VK_KHR_external_semaphore_capabilities\'"] +impl ExternalSemaphoreFeatureFlags { + pub const EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_KHR: Self = + ExternalSemaphoreFeatureFlags::EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE; +} +#[doc = "Generated from \'VK_KHR_external_semaphore_capabilities\'"] +impl ExternalSemaphoreFeatureFlags { + pub const EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_KHR: Self = + ExternalSemaphoreFeatureFlags::EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE; } impl KhrExternalSemaphoreFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -49405,6 +62094,14 @@ impl KhrExternalSemaphoreFn { KhrExternalSemaphoreFn {} } } +#[doc = "Generated from \'VK_KHR_external_semaphore\'"] +impl StructureType { + pub const EXPORT_SEMAPHORE_CREATE_INFO_KHR: Self = StructureType::EXPORT_SEMAPHORE_CREATE_INFO; +} +#[doc = "Generated from \'VK_KHR_external_semaphore\'"] +impl SemaphoreImportFlags { + pub const TEMPORARY_KHR: Self = SemaphoreImportFlags::TEMPORARY; +} impl KhrExternalSemaphoreWin32Fn { pub fn name() -> &'static ::std::ffi::CStr { ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_external_semaphore_win32\0") @@ -49490,7 +62187,7 @@ impl KhrExternalSemaphoreWin32Fn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn import_semaphore_win32_handle_khr( &self, device: Device, @@ -49498,7 +62195,7 @@ impl KhrExternalSemaphoreWin32Fn { ) -> Result { (self.import_semaphore_win32_handle_khr)(device, p_import_semaphore_win32_handle_info) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_semaphore_win32_handle_khr( &self, device: Device, @@ -49510,19 +62207,19 @@ impl KhrExternalSemaphoreWin32Fn { } #[doc = "Generated from \'VK_KHR_external_semaphore_win32\'"] impl StructureType { - pub const IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR: Self = StructureType(1000078000); + pub const IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR: Self = StructureType(1_000_078_000); } #[doc = "Generated from \'VK_KHR_external_semaphore_win32\'"] impl StructureType { - pub const EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR: Self = StructureType(1000078001); + pub const EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR: Self = StructureType(1_000_078_001); } #[doc = "Generated from \'VK_KHR_external_semaphore_win32\'"] impl StructureType { - pub const D3D12_FENCE_SUBMIT_INFO_KHR: Self = StructureType(1000078002); + pub const D3D12_FENCE_SUBMIT_INFO_KHR: Self = StructureType(1_000_078_002); } #[doc = "Generated from \'VK_KHR_external_semaphore_win32\'"] impl StructureType { - pub const SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR: Self = StructureType(1000078003); + pub const SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR: Self = StructureType(1_000_078_003); } impl KhrExternalSemaphoreFdFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -49606,7 +62303,7 @@ impl KhrExternalSemaphoreFdFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn import_semaphore_fd_khr( &self, device: Device, @@ -49614,7 +62311,7 @@ impl KhrExternalSemaphoreFdFn { ) -> Result { (self.import_semaphore_fd_khr)(device, p_import_semaphore_fd_info) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_semaphore_fd_khr( &self, device: Device, @@ -49626,11 +62323,11 @@ impl KhrExternalSemaphoreFdFn { } #[doc = "Generated from \'VK_KHR_external_semaphore_fd\'"] impl StructureType { - pub const IMPORT_SEMAPHORE_FD_INFO_KHR: Self = StructureType(1000079000); + pub const IMPORT_SEMAPHORE_FD_INFO_KHR: Self = StructureType(1_000_079_000); } #[doc = "Generated from \'VK_KHR_external_semaphore_fd\'"] impl StructureType { - pub const SEMAPHORE_GET_FD_INFO_KHR: Self = StructureType(1000079001); + pub const SEMAPHORE_GET_FD_INFO_KHR: Self = StructureType(1_000_079_001); } impl KhrPushDescriptorFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -49736,7 +62433,7 @@ impl KhrPushDescriptorFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_push_descriptor_set_khr( &self, command_buffer: CommandBuffer, @@ -49755,7 +62452,7 @@ impl KhrPushDescriptorFn { p_descriptor_writes, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_push_descriptor_set_with_template_khr( &self, command_buffer: CommandBuffer, @@ -49775,12 +62472,16 @@ impl KhrPushDescriptorFn { } #[doc = "Generated from \'VK_KHR_push_descriptor\'"] impl StructureType { - pub const PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: Self = StructureType(1000080000); + pub const PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: Self = StructureType(1_000_080_000); } #[doc = "Generated from \'VK_KHR_push_descriptor\'"] impl DescriptorSetLayoutCreateFlags { pub const PUSH_DESCRIPTOR_KHR: Self = DescriptorSetLayoutCreateFlags(0b1); } +#[doc = "Generated from \'VK_KHR_push_descriptor\'"] +impl DescriptorUpdateTemplateType { + pub const PUSH_DESCRIPTORS_KHR: Self = DescriptorUpdateTemplateType(1); +} impl ExtConditionalRenderingFn { pub fn name() -> &'static ::std::ffi::CStr { ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_conditional_rendering\0") @@ -49858,7 +62559,7 @@ impl ExtConditionalRenderingFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_begin_conditional_rendering_ext( &self, command_buffer: CommandBuffer, @@ -49866,7 +62567,7 @@ impl ExtConditionalRenderingFn { ) -> c_void { (self.cmd_begin_conditional_rendering_ext)(command_buffer, p_conditional_rendering_begin) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_end_conditional_rendering_ext( &self, command_buffer: CommandBuffer, @@ -49877,27 +62578,28 @@ impl ExtConditionalRenderingFn { #[doc = "Generated from \'VK_EXT_conditional_rendering\'"] impl StructureType { pub const COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT: Self = - StructureType(1000081000); + StructureType(1_000_081_000); } #[doc = "Generated from \'VK_EXT_conditional_rendering\'"] impl StructureType { - pub const PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: Self = StructureType(1000081001); + pub const PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: Self = + StructureType(1_000_081_001); } #[doc = "Generated from \'VK_EXT_conditional_rendering\'"] impl StructureType { - pub const CONDITIONAL_RENDERING_BEGIN_INFO_EXT: Self = StructureType(1000081002); + pub const CONDITIONAL_RENDERING_BEGIN_INFO_EXT: Self = StructureType(1_000_081_002); } #[doc = "Generated from \'VK_EXT_conditional_rendering\'"] impl AccessFlags { - pub const CONDITIONAL_RENDERING_READ_EXT: Self = AccessFlags(0b100000000000000000000); + pub const CONDITIONAL_RENDERING_READ_EXT: Self = AccessFlags(0b1_0000_0000_0000_0000_0000); } #[doc = "Generated from \'VK_EXT_conditional_rendering\'"] impl BufferUsageFlags { - pub const CONDITIONAL_RENDERING_EXT: Self = BufferUsageFlags(0b1000000000); + pub const CONDITIONAL_RENDERING_EXT: Self = BufferUsageFlags(0b10_0000_0000); } #[doc = "Generated from \'VK_EXT_conditional_rendering\'"] impl PipelineStageFlags { - pub const CONDITIONAL_RENDERING_EXT: Self = PipelineStageFlags(0b1000000000000000000); + pub const CONDITIONAL_RENDERING_EXT: Self = PipelineStageFlags(0b100_0000_0000_0000_0000); } impl KhrShaderFloat16Int8Fn { pub fn name() -> &'static ::std::ffi::CStr { @@ -49923,7 +62625,13 @@ impl KhrShaderFloat16Int8Fn { } #[doc = "Generated from \'VK_KHR_shader_float16_int8\'"] impl StructureType { - pub const PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR: Self = StructureType(1000082000); + pub const PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR: Self = + StructureType::PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES; +} +#[doc = "Generated from \'VK_KHR_shader_float16_int8\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR: Self = + StructureType::PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES; } impl Khr16bitStorageFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -49947,6 +62655,11 @@ impl Khr16bitStorageFn { Khr16bitStorageFn {} } } +#[doc = "Generated from \'VK_KHR_16bit_storage\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES_KHR: Self = + StructureType::PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES; +} impl KhrIncrementalPresentFn { pub fn name() -> &'static ::std::ffi::CStr { ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_incremental_present\0") @@ -49971,7 +62684,7 @@ impl KhrIncrementalPresentFn { } #[doc = "Generated from \'VK_KHR_incremental_present\'"] impl StructureType { - pub const PRESENT_REGIONS_KHR: Self = StructureType(1000084000); + pub const PRESENT_REGIONS_KHR: Self = StructureType(1_000_084_000); } impl KhrDescriptorUpdateTemplateFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -49979,7 +62692,44 @@ impl KhrDescriptorUpdateTemplateFn { .expect("Wrong extension string") } } +#[allow(non_camel_case_types)] +pub type PFN_vkCreateDescriptorUpdateTemplate = extern "system" fn( + device: Device, + p_create_info: *const DescriptorUpdateTemplateCreateInfo, + p_allocator: *const AllocationCallbacks, + p_descriptor_update_template: *mut DescriptorUpdateTemplate, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyDescriptorUpdateTemplate = extern "system" fn( + device: Device, + descriptor_update_template: DescriptorUpdateTemplate, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkUpdateDescriptorSetWithTemplate = extern "system" fn( + device: Device, + descriptor_set: DescriptorSet, + descriptor_update_template: DescriptorUpdateTemplate, + p_data: *const c_void, +) -> c_void; pub struct KhrDescriptorUpdateTemplateFn { + pub create_descriptor_update_template_khr: extern "system" fn( + device: Device, + p_create_info: *const DescriptorUpdateTemplateCreateInfo, + p_allocator: *const AllocationCallbacks, + p_descriptor_update_template: *mut DescriptorUpdateTemplate, + ) -> Result, + pub destroy_descriptor_update_template_khr: extern "system" fn( + device: Device, + descriptor_update_template: DescriptorUpdateTemplate, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub update_descriptor_set_with_template_khr: extern "system" fn( + device: Device, + descriptor_set: DescriptorSet, + descriptor_update_template: DescriptorUpdateTemplate, + p_data: *const c_void, + ) -> c_void, pub cmd_push_descriptor_set_with_template_khr: extern "system" fn( command_buffer: CommandBuffer, descriptor_update_template: DescriptorUpdateTemplate, @@ -49993,6 +62743,9 @@ unsafe impl Sync for KhrDescriptorUpdateTemplateFn {} impl ::std::clone::Clone for KhrDescriptorUpdateTemplateFn { fn clone(&self) -> Self { KhrDescriptorUpdateTemplateFn { + create_descriptor_update_template_khr: self.create_descriptor_update_template_khr, + destroy_descriptor_update_template_khr: self.destroy_descriptor_update_template_khr, + update_descriptor_set_with_template_khr: self.update_descriptor_set_with_template_khr, cmd_push_descriptor_set_with_template_khr: self .cmd_push_descriptor_set_with_template_khr, } @@ -50004,6 +62757,68 @@ impl KhrDescriptorUpdateTemplateFn { F: FnMut(&::std::ffi::CStr) -> *const c_void, { KhrDescriptorUpdateTemplateFn { + create_descriptor_update_template_khr: unsafe { + extern "system" fn create_descriptor_update_template_khr( + _device: Device, + _p_create_info: *const DescriptorUpdateTemplateCreateInfo, + _p_allocator: *const AllocationCallbacks, + _p_descriptor_update_template: *mut DescriptorUpdateTemplate, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_descriptor_update_template_khr) + )) + } + let raw_name = stringify!(vkCreateDescriptorUpdateTemplateKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_descriptor_update_template_khr + } else { + ::std::mem::transmute(val) + } + }, + destroy_descriptor_update_template_khr: unsafe { + extern "system" fn destroy_descriptor_update_template_khr( + _device: Device, + _descriptor_update_template: DescriptorUpdateTemplate, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(destroy_descriptor_update_template_khr) + )) + } + let raw_name = stringify!(vkDestroyDescriptorUpdateTemplateKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_descriptor_update_template_khr + } else { + ::std::mem::transmute(val) + } + }, + update_descriptor_set_with_template_khr: unsafe { + extern "system" fn update_descriptor_set_with_template_khr( + _device: Device, + _descriptor_set: DescriptorSet, + _descriptor_update_template: DescriptorUpdateTemplate, + _p_data: *const c_void, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(update_descriptor_set_with_template_khr) + )) + } + let raw_name = stringify!(vkUpdateDescriptorSetWithTemplateKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + update_descriptor_set_with_template_khr + } else { + ::std::mem::transmute(val) + } + }, cmd_push_descriptor_set_with_template_khr: unsafe { extern "system" fn cmd_push_descriptor_set_with_template_khr( _command_buffer: CommandBuffer, @@ -50028,7 +62843,50 @@ impl KhrDescriptorUpdateTemplateFn { }, } } - #[doc = ""] + #[doc = ""] + pub unsafe fn create_descriptor_update_template_khr( + &self, + device: Device, + p_create_info: *const DescriptorUpdateTemplateCreateInfo, + p_allocator: *const AllocationCallbacks, + p_descriptor_update_template: *mut DescriptorUpdateTemplate, + ) -> Result { + (self.create_descriptor_update_template_khr)( + device, + p_create_info, + p_allocator, + p_descriptor_update_template, + ) + } + #[doc = ""] + pub unsafe fn destroy_descriptor_update_template_khr( + &self, + device: Device, + descriptor_update_template: DescriptorUpdateTemplate, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_descriptor_update_template_khr)( + device, + descriptor_update_template, + p_allocator, + ) + } + #[doc = ""] + pub unsafe fn update_descriptor_set_with_template_khr( + &self, + device: Device, + descriptor_set: DescriptorSet, + descriptor_update_template: DescriptorUpdateTemplate, + p_data: *const c_void, + ) -> c_void { + (self.update_descriptor_set_with_template_khr)( + device, + descriptor_set, + descriptor_update_template, + p_data, + ) + } + #[doc = ""] pub unsafe fn cmd_push_descriptor_set_with_template_khr( &self, command_buffer: CommandBuffer, @@ -50046,137 +62904,36 @@ impl KhrDescriptorUpdateTemplateFn { ) } } +#[doc = "Generated from \'VK_KHR_descriptor_update_template\'"] +impl StructureType { + pub const DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR: Self = + StructureType::DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO; +} +#[doc = "Generated from \'VK_KHR_descriptor_update_template\'"] +impl ObjectType { + pub const DESCRIPTOR_UPDATE_TEMPLATE_KHR: Self = ObjectType::DESCRIPTOR_UPDATE_TEMPLATE; +} +#[doc = "Generated from \'VK_KHR_descriptor_update_template\'"] +impl DescriptorUpdateTemplateType { + pub const DESCRIPTOR_SET_KHR: Self = DescriptorUpdateTemplateType::DESCRIPTOR_SET; +} +#[doc = "Generated from \'VK_KHR_descriptor_update_template\'"] +impl DebugReportObjectTypeEXT { + pub const DESCRIPTOR_UPDATE_TEMPLATE_KHR: Self = + DebugReportObjectTypeEXT::DESCRIPTOR_UPDATE_TEMPLATE; +} impl NvxDeviceGeneratedCommandsFn { pub fn name() -> &'static ::std::ffi::CStr { ::std::ffi::CStr::from_bytes_with_nul(b"VK_NVX_device_generated_commands\0") .expect("Wrong extension string") } } -#[allow(non_camel_case_types)] -pub type PFN_vkCmdProcessCommandsNVX = extern "system" fn( - command_buffer: CommandBuffer, - p_process_commands_info: *const CmdProcessCommandsInfoNVX, -) -> c_void; -#[allow(non_camel_case_types)] -pub type PFN_vkCmdReserveSpaceForCommandsNVX = extern "system" fn( - command_buffer: CommandBuffer, - p_reserve_space_info: *const CmdReserveSpaceForCommandsInfoNVX, -) -> c_void; -#[allow(non_camel_case_types)] -pub type PFN_vkCreateIndirectCommandsLayoutNVX = extern "system" fn( - device: Device, - p_create_info: *const IndirectCommandsLayoutCreateInfoNVX, - p_allocator: *const AllocationCallbacks, - p_indirect_commands_layout: *mut IndirectCommandsLayoutNVX, -) -> Result; -#[allow(non_camel_case_types)] -pub type PFN_vkDestroyIndirectCommandsLayoutNVX = extern "system" fn( - device: Device, - indirect_commands_layout: IndirectCommandsLayoutNVX, - p_allocator: *const AllocationCallbacks, -) -> c_void; -#[allow(non_camel_case_types)] -pub type PFN_vkCreateObjectTableNVX = extern "system" fn( - device: Device, - p_create_info: *const ObjectTableCreateInfoNVX, - p_allocator: *const AllocationCallbacks, - p_object_table: *mut ObjectTableNVX, -) -> Result; -#[allow(non_camel_case_types)] -pub type PFN_vkDestroyObjectTableNVX = extern "system" fn( - device: Device, - object_table: ObjectTableNVX, - p_allocator: *const AllocationCallbacks, -) -> c_void; -#[allow(non_camel_case_types)] -pub type PFN_vkRegisterObjectsNVX = extern "system" fn( - device: Device, - object_table: ObjectTableNVX, - object_count: u32, - pp_object_table_entries: *const *const ObjectTableEntryNVX, - p_object_indices: *const u32, -) -> Result; -#[allow(non_camel_case_types)] -pub type PFN_vkUnregisterObjectsNVX = extern "system" fn( - device: Device, - object_table: ObjectTableNVX, - object_count: u32, - p_object_entry_types: *const ObjectEntryTypeNVX, - p_object_indices: *const u32, -) -> Result; -#[allow(non_camel_case_types)] -pub type PFN_vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX = extern "system" fn( - physical_device: PhysicalDevice, - p_features: *mut DeviceGeneratedCommandsFeaturesNVX, - p_limits: *mut DeviceGeneratedCommandsLimitsNVX, -) -> c_void; -pub struct NvxDeviceGeneratedCommandsFn { - pub cmd_process_commands_nvx: extern "system" fn( - command_buffer: CommandBuffer, - p_process_commands_info: *const CmdProcessCommandsInfoNVX, - ) -> c_void, - pub cmd_reserve_space_for_commands_nvx: extern "system" fn( - command_buffer: CommandBuffer, - p_reserve_space_info: *const CmdReserveSpaceForCommandsInfoNVX, - ) -> c_void, - pub create_indirect_commands_layout_nvx: extern "system" fn( - device: Device, - p_create_info: *const IndirectCommandsLayoutCreateInfoNVX, - p_allocator: *const AllocationCallbacks, - p_indirect_commands_layout: *mut IndirectCommandsLayoutNVX, - ) -> Result, - pub destroy_indirect_commands_layout_nvx: extern "system" fn( - device: Device, - indirect_commands_layout: IndirectCommandsLayoutNVX, - p_allocator: *const AllocationCallbacks, - ) -> c_void, - pub create_object_table_nvx: extern "system" fn( - device: Device, - p_create_info: *const ObjectTableCreateInfoNVX, - p_allocator: *const AllocationCallbacks, - p_object_table: *mut ObjectTableNVX, - ) -> Result, - pub destroy_object_table_nvx: extern "system" fn( - device: Device, - object_table: ObjectTableNVX, - p_allocator: *const AllocationCallbacks, - ) -> c_void, - pub register_objects_nvx: extern "system" fn( - device: Device, - object_table: ObjectTableNVX, - object_count: u32, - pp_object_table_entries: *const *const ObjectTableEntryNVX, - p_object_indices: *const u32, - ) -> Result, - pub unregister_objects_nvx: extern "system" fn( - device: Device, - object_table: ObjectTableNVX, - object_count: u32, - p_object_entry_types: *const ObjectEntryTypeNVX, - p_object_indices: *const u32, - ) -> Result, - pub get_physical_device_generated_commands_properties_nvx: extern "system" fn( - physical_device: PhysicalDevice, - p_features: *mut DeviceGeneratedCommandsFeaturesNVX, - p_limits: *mut DeviceGeneratedCommandsLimitsNVX, - ) -> c_void, -} +pub struct NvxDeviceGeneratedCommandsFn {} unsafe impl Send for NvxDeviceGeneratedCommandsFn {} unsafe impl Sync for NvxDeviceGeneratedCommandsFn {} impl ::std::clone::Clone for NvxDeviceGeneratedCommandsFn { fn clone(&self) -> Self { - NvxDeviceGeneratedCommandsFn { - cmd_process_commands_nvx: self.cmd_process_commands_nvx, - cmd_reserve_space_for_commands_nvx: self.cmd_reserve_space_for_commands_nvx, - create_indirect_commands_layout_nvx: self.create_indirect_commands_layout_nvx, - destroy_indirect_commands_layout_nvx: self.destroy_indirect_commands_layout_nvx, - create_object_table_nvx: self.create_object_table_nvx, - destroy_object_table_nvx: self.destroy_object_table_nvx, - register_objects_nvx: self.register_objects_nvx, - unregister_objects_nvx: self.unregister_objects_nvx, - get_physical_device_generated_commands_properties_nvx: self - .get_physical_device_generated_commands_properties_nvx, - } + NvxDeviceGeneratedCommandsFn {} } } impl NvxDeviceGeneratedCommandsFn { @@ -50184,340 +62941,8 @@ impl NvxDeviceGeneratedCommandsFn { where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - NvxDeviceGeneratedCommandsFn { - cmd_process_commands_nvx: unsafe { - extern "system" fn cmd_process_commands_nvx( - _command_buffer: CommandBuffer, - _p_process_commands_info: *const CmdProcessCommandsInfoNVX, - ) -> c_void { - panic!(concat!( - "Unable to load ", - stringify!(cmd_process_commands_nvx) - )) - } - let raw_name = stringify!(vkCmdProcessCommandsNVX); - let cname = ::std::ffi::CString::new(raw_name).unwrap(); - let val = _f(&cname); - if val.is_null() { - cmd_process_commands_nvx - } else { - ::std::mem::transmute(val) - } - }, - cmd_reserve_space_for_commands_nvx: unsafe { - extern "system" fn cmd_reserve_space_for_commands_nvx( - _command_buffer: CommandBuffer, - _p_reserve_space_info: *const CmdReserveSpaceForCommandsInfoNVX, - ) -> c_void { - panic!(concat!( - "Unable to load ", - stringify!(cmd_reserve_space_for_commands_nvx) - )) - } - let raw_name = stringify!(vkCmdReserveSpaceForCommandsNVX); - let cname = ::std::ffi::CString::new(raw_name).unwrap(); - let val = _f(&cname); - if val.is_null() { - cmd_reserve_space_for_commands_nvx - } else { - ::std::mem::transmute(val) - } - }, - create_indirect_commands_layout_nvx: unsafe { - extern "system" fn create_indirect_commands_layout_nvx( - _device: Device, - _p_create_info: *const IndirectCommandsLayoutCreateInfoNVX, - _p_allocator: *const AllocationCallbacks, - _p_indirect_commands_layout: *mut IndirectCommandsLayoutNVX, - ) -> Result { - panic!(concat!( - "Unable to load ", - stringify!(create_indirect_commands_layout_nvx) - )) - } - let raw_name = stringify!(vkCreateIndirectCommandsLayoutNVX); - let cname = ::std::ffi::CString::new(raw_name).unwrap(); - let val = _f(&cname); - if val.is_null() { - create_indirect_commands_layout_nvx - } else { - ::std::mem::transmute(val) - } - }, - destroy_indirect_commands_layout_nvx: unsafe { - extern "system" fn destroy_indirect_commands_layout_nvx( - _device: Device, - _indirect_commands_layout: IndirectCommandsLayoutNVX, - _p_allocator: *const AllocationCallbacks, - ) -> c_void { - panic!(concat!( - "Unable to load ", - stringify!(destroy_indirect_commands_layout_nvx) - )) - } - let raw_name = stringify!(vkDestroyIndirectCommandsLayoutNVX); - let cname = ::std::ffi::CString::new(raw_name).unwrap(); - let val = _f(&cname); - if val.is_null() { - destroy_indirect_commands_layout_nvx - } else { - ::std::mem::transmute(val) - } - }, - create_object_table_nvx: unsafe { - extern "system" fn create_object_table_nvx( - _device: Device, - _p_create_info: *const ObjectTableCreateInfoNVX, - _p_allocator: *const AllocationCallbacks, - _p_object_table: *mut ObjectTableNVX, - ) -> Result { - panic!(concat!( - "Unable to load ", - stringify!(create_object_table_nvx) - )) - } - let raw_name = stringify!(vkCreateObjectTableNVX); - let cname = ::std::ffi::CString::new(raw_name).unwrap(); - let val = _f(&cname); - if val.is_null() { - create_object_table_nvx - } else { - ::std::mem::transmute(val) - } - }, - destroy_object_table_nvx: unsafe { - extern "system" fn destroy_object_table_nvx( - _device: Device, - _object_table: ObjectTableNVX, - _p_allocator: *const AllocationCallbacks, - ) -> c_void { - panic!(concat!( - "Unable to load ", - stringify!(destroy_object_table_nvx) - )) - } - let raw_name = stringify!(vkDestroyObjectTableNVX); - let cname = ::std::ffi::CString::new(raw_name).unwrap(); - let val = _f(&cname); - if val.is_null() { - destroy_object_table_nvx - } else { - ::std::mem::transmute(val) - } - }, - register_objects_nvx: unsafe { - extern "system" fn register_objects_nvx( - _device: Device, - _object_table: ObjectTableNVX, - _object_count: u32, - _pp_object_table_entries: *const *const ObjectTableEntryNVX, - _p_object_indices: *const u32, - ) -> Result { - panic!(concat!("Unable to load ", stringify!(register_objects_nvx))) - } - let raw_name = stringify!(vkRegisterObjectsNVX); - let cname = ::std::ffi::CString::new(raw_name).unwrap(); - let val = _f(&cname); - if val.is_null() { - register_objects_nvx - } else { - ::std::mem::transmute(val) - } - }, - unregister_objects_nvx: unsafe { - extern "system" fn unregister_objects_nvx( - _device: Device, - _object_table: ObjectTableNVX, - _object_count: u32, - _p_object_entry_types: *const ObjectEntryTypeNVX, - _p_object_indices: *const u32, - ) -> Result { - panic!(concat!( - "Unable to load ", - stringify!(unregister_objects_nvx) - )) - } - let raw_name = stringify!(vkUnregisterObjectsNVX); - let cname = ::std::ffi::CString::new(raw_name).unwrap(); - let val = _f(&cname); - if val.is_null() { - unregister_objects_nvx - } else { - ::std::mem::transmute(val) - } - }, - get_physical_device_generated_commands_properties_nvx: unsafe { - extern "system" fn get_physical_device_generated_commands_properties_nvx( - _physical_device: PhysicalDevice, - _p_features: *mut DeviceGeneratedCommandsFeaturesNVX, - _p_limits: *mut DeviceGeneratedCommandsLimitsNVX, - ) -> c_void { - panic!(concat!( - "Unable to load ", - stringify!(get_physical_device_generated_commands_properties_nvx) - )) - } - let raw_name = stringify!(vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX); - let cname = ::std::ffi::CString::new(raw_name).unwrap(); - let val = _f(&cname); - if val.is_null() { - get_physical_device_generated_commands_properties_nvx - } else { - ::std::mem::transmute(val) - } - }, - } + NvxDeviceGeneratedCommandsFn {} } - #[doc = ""] - pub unsafe fn cmd_process_commands_nvx( - &self, - command_buffer: CommandBuffer, - p_process_commands_info: *const CmdProcessCommandsInfoNVX, - ) -> c_void { - (self.cmd_process_commands_nvx)(command_buffer, p_process_commands_info) - } - #[doc = ""] - pub unsafe fn cmd_reserve_space_for_commands_nvx( - &self, - command_buffer: CommandBuffer, - p_reserve_space_info: *const CmdReserveSpaceForCommandsInfoNVX, - ) -> c_void { - (self.cmd_reserve_space_for_commands_nvx)(command_buffer, p_reserve_space_info) - } - #[doc = ""] - pub unsafe fn create_indirect_commands_layout_nvx( - &self, - device: Device, - p_create_info: *const IndirectCommandsLayoutCreateInfoNVX, - p_allocator: *const AllocationCallbacks, - p_indirect_commands_layout: *mut IndirectCommandsLayoutNVX, - ) -> Result { - (self.create_indirect_commands_layout_nvx)( - device, - p_create_info, - p_allocator, - p_indirect_commands_layout, - ) - } - #[doc = ""] - pub unsafe fn destroy_indirect_commands_layout_nvx( - &self, - device: Device, - indirect_commands_layout: IndirectCommandsLayoutNVX, - p_allocator: *const AllocationCallbacks, - ) -> c_void { - (self.destroy_indirect_commands_layout_nvx)(device, indirect_commands_layout, p_allocator) - } - #[doc = ""] - pub unsafe fn create_object_table_nvx( - &self, - device: Device, - p_create_info: *const ObjectTableCreateInfoNVX, - p_allocator: *const AllocationCallbacks, - p_object_table: *mut ObjectTableNVX, - ) -> Result { - (self.create_object_table_nvx)(device, p_create_info, p_allocator, p_object_table) - } - #[doc = ""] - pub unsafe fn destroy_object_table_nvx( - &self, - device: Device, - object_table: ObjectTableNVX, - p_allocator: *const AllocationCallbacks, - ) -> c_void { - (self.destroy_object_table_nvx)(device, object_table, p_allocator) - } - #[doc = ""] - pub unsafe fn register_objects_nvx( - &self, - device: Device, - object_table: ObjectTableNVX, - object_count: u32, - pp_object_table_entries: *const *const ObjectTableEntryNVX, - p_object_indices: *const u32, - ) -> Result { - (self.register_objects_nvx)( - device, - object_table, - object_count, - pp_object_table_entries, - p_object_indices, - ) - } - #[doc = ""] - pub unsafe fn unregister_objects_nvx( - &self, - device: Device, - object_table: ObjectTableNVX, - object_count: u32, - p_object_entry_types: *const ObjectEntryTypeNVX, - p_object_indices: *const u32, - ) -> Result { - (self.unregister_objects_nvx)( - device, - object_table, - object_count, - p_object_entry_types, - p_object_indices, - ) - } - #[doc = ""] - pub unsafe fn get_physical_device_generated_commands_properties_nvx( - &self, - physical_device: PhysicalDevice, - p_features: *mut DeviceGeneratedCommandsFeaturesNVX, - p_limits: *mut DeviceGeneratedCommandsLimitsNVX, - ) -> c_void { - (self.get_physical_device_generated_commands_properties_nvx)( - physical_device, - p_features, - p_limits, - ) - } -} -#[doc = "Generated from \'VK_NVX_device_generated_commands\'"] -impl StructureType { - pub const OBJECT_TABLE_CREATE_INFO_NVX: Self = StructureType(1000086000); -} -#[doc = "Generated from \'VK_NVX_device_generated_commands\'"] -impl StructureType { - pub const INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NVX: Self = StructureType(1000086001); -} -#[doc = "Generated from \'VK_NVX_device_generated_commands\'"] -impl StructureType { - pub const CMD_PROCESS_COMMANDS_INFO_NVX: Self = StructureType(1000086002); -} -#[doc = "Generated from \'VK_NVX_device_generated_commands\'"] -impl StructureType { - pub const CMD_RESERVE_SPACE_FOR_COMMANDS_INFO_NVX: Self = StructureType(1000086003); -} -#[doc = "Generated from \'VK_NVX_device_generated_commands\'"] -impl StructureType { - pub const DEVICE_GENERATED_COMMANDS_LIMITS_NVX: Self = StructureType(1000086004); -} -#[doc = "Generated from \'VK_NVX_device_generated_commands\'"] -impl StructureType { - pub const DEVICE_GENERATED_COMMANDS_FEATURES_NVX: Self = StructureType(1000086005); -} -#[doc = "Generated from \'VK_NVX_device_generated_commands\'"] -impl PipelineStageFlags { - pub const COMMAND_PROCESS_NVX: Self = PipelineStageFlags(0b100000000000000000); -} -#[doc = "Generated from \'VK_NVX_device_generated_commands\'"] -impl AccessFlags { - pub const COMMAND_PROCESS_READ_NVX: Self = AccessFlags(0b100000000000000000); -} -#[doc = "Generated from \'VK_NVX_device_generated_commands\'"] -impl AccessFlags { - pub const COMMAND_PROCESS_WRITE_NVX: Self = AccessFlags(0b1000000000000000000); -} -#[doc = "Generated from \'VK_NVX_device_generated_commands\'"] -impl ObjectType { - pub const OBJECT_TABLE_NVX: Self = ObjectType(1000086000); -} -#[doc = "Generated from \'VK_NVX_device_generated_commands\'"] -impl ObjectType { - pub const INDIRECT_COMMANDS_LAYOUT_NVX: Self = ObjectType(1000086001); } impl NvClipSpaceWScalingFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -50578,7 +63003,7 @@ impl NvClipSpaceWScalingFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_set_viewport_w_scaling_nv( &self, command_buffer: CommandBuffer, @@ -50596,11 +63021,11 @@ impl NvClipSpaceWScalingFn { } #[doc = "Generated from \'VK_NV_clip_space_w_scaling\'"] impl StructureType { - pub const PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV: Self = StructureType(1000087000); + pub const PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV: Self = StructureType(1_000_087_000); } #[doc = "Generated from \'VK_NV_clip_space_w_scaling\'"] impl DynamicState { - pub const VIEWPORT_W_SCALING_NV: Self = DynamicState(1000087000); + pub const VIEWPORT_W_SCALING_NV: Self = DynamicState(1_000_087_000); } impl ExtDirectModeDisplayFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -50648,7 +63073,7 @@ impl ExtDirectModeDisplayFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn release_display_ext( &self, physical_device: PhysicalDevice, @@ -50748,7 +63173,7 @@ impl ExtAcquireXlibDisplayFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn acquire_xlib_display_ext( &self, physical_device: PhysicalDevice, @@ -50757,7 +63182,7 @@ impl ExtAcquireXlibDisplayFn { ) -> Result { (self.acquire_xlib_display_ext)(physical_device, dpy, display) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_rand_r_output_display_ext( &self, physical_device: PhysicalDevice, @@ -50825,7 +63250,7 @@ impl ExtDisplaySurfaceCounterFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_surface_capabilities2_ext( &self, physical_device: PhysicalDevice, @@ -50841,7 +63266,11 @@ impl ExtDisplaySurfaceCounterFn { } #[doc = "Generated from \'VK_EXT_display_surface_counter\'"] impl StructureType { - pub const SURFACE_CAPABILITIES_2_EXT: Self = StructureType(1000090000); + pub const SURFACE_CAPABILITIES_2_EXT: Self = StructureType(1_000_090_000); +} +#[doc = "Generated from \'VK_EXT_display_surface_counter\'"] +impl StructureType { + pub const SURFACE_CAPABILITIES2_EXT: Self = StructureType::SURFACE_CAPABILITIES_2_EXT; } impl ExtDisplayControlFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -51007,7 +63436,7 @@ impl ExtDisplayControlFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn display_power_control_ext( &self, device: Device, @@ -51016,7 +63445,7 @@ impl ExtDisplayControlFn { ) -> Result { (self.display_power_control_ext)(device, display, p_display_power_info) } - #[doc = ""] + #[doc = ""] pub unsafe fn register_device_event_ext( &self, device: Device, @@ -51026,7 +63455,7 @@ impl ExtDisplayControlFn { ) -> Result { (self.register_device_event_ext)(device, p_device_event_info, p_allocator, p_fence) } - #[doc = ""] + #[doc = ""] pub unsafe fn register_display_event_ext( &self, device: Device, @@ -51043,7 +63472,7 @@ impl ExtDisplayControlFn { p_fence, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_swapchain_counter_ext( &self, device: Device, @@ -51056,19 +63485,19 @@ impl ExtDisplayControlFn { } #[doc = "Generated from \'VK_EXT_display_control\'"] impl StructureType { - pub const DISPLAY_POWER_INFO_EXT: Self = StructureType(1000091000); + pub const DISPLAY_POWER_INFO_EXT: Self = StructureType(1_000_091_000); } #[doc = "Generated from \'VK_EXT_display_control\'"] impl StructureType { - pub const DEVICE_EVENT_INFO_EXT: Self = StructureType(1000091001); + pub const DEVICE_EVENT_INFO_EXT: Self = StructureType(1_000_091_001); } #[doc = "Generated from \'VK_EXT_display_control\'"] impl StructureType { - pub const DISPLAY_EVENT_INFO_EXT: Self = StructureType(1000091002); + pub const DISPLAY_EVENT_INFO_EXT: Self = StructureType(1_000_091_002); } #[doc = "Generated from \'VK_EXT_display_control\'"] impl StructureType { - pub const SWAPCHAIN_COUNTER_CREATE_INFO_EXT: Self = StructureType(1000091003); + pub const SWAPCHAIN_COUNTER_CREATE_INFO_EXT: Self = StructureType(1_000_091_003); } impl GoogleDisplayTimingFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -51161,7 +63590,7 @@ impl GoogleDisplayTimingFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn get_refresh_cycle_duration_google( &self, device: Device, @@ -51170,7 +63599,7 @@ impl GoogleDisplayTimingFn { ) -> Result { (self.get_refresh_cycle_duration_google)(device, swapchain, p_display_timing_properties) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_past_presentation_timing_google( &self, device: Device, @@ -51188,7 +63617,7 @@ impl GoogleDisplayTimingFn { } #[doc = "Generated from \'VK_GOOGLE_display_timing\'"] impl StructureType { - pub const PRESENT_TIMES_INFO_GOOGLE: Self = StructureType(1000092000); + pub const PRESENT_TIMES_INFO_GOOGLE: Self = StructureType(1_000_092_000); } impl NvSampleMaskOverrideCoverageFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -51281,7 +63710,7 @@ impl NvxMultiviewPerViewAttributesFn { #[doc = "Generated from \'VK_NVX_multiview_per_view_attributes\'"] impl StructureType { pub const PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX: Self = - StructureType(1000097000); + StructureType(1_000_097_000); } #[doc = "Generated from \'VK_NVX_multiview_per_view_attributes\'"] impl SubpassDescriptionFlags { @@ -51315,7 +63744,7 @@ impl NvViewportSwizzleFn { } #[doc = "Generated from \'VK_NV_viewport_swizzle\'"] impl StructureType { - pub const PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV: Self = StructureType(1000098000); + pub const PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV: Self = StructureType(1_000_098_000); } impl ExtDiscardRectanglesFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -51376,7 +63805,7 @@ impl ExtDiscardRectanglesFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_set_discard_rectangle_ext( &self, command_buffer: CommandBuffer, @@ -51394,15 +63823,15 @@ impl ExtDiscardRectanglesFn { } #[doc = "Generated from \'VK_EXT_discard_rectangles\'"] impl StructureType { - pub const PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT: Self = StructureType(1000099000); + pub const PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT: Self = StructureType(1_000_099_000); } #[doc = "Generated from \'VK_EXT_discard_rectangles\'"] impl StructureType { - pub const PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT: Self = StructureType(1000099001); + pub const PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT: Self = StructureType(1_000_099_001); } #[doc = "Generated from \'VK_EXT_discard_rectangles\'"] impl DynamicState { - pub const DISCARD_RECTANGLE_EXT: Self = DynamicState(1000099000); + pub const DISCARD_RECTANGLE_EXT: Self = DynamicState(1_000_099_000); } impl NvExtension101Fn { pub fn name() -> &'static ::std::ffi::CStr { @@ -51451,35 +63880,44 @@ impl ExtConservativeRasterizationFn { #[doc = "Generated from \'VK_EXT_conservative_rasterization\'"] impl StructureType { pub const PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT: Self = - StructureType(1000101000); + StructureType(1_000_101_000); } #[doc = "Generated from \'VK_EXT_conservative_rasterization\'"] impl StructureType { pub const PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT: Self = - StructureType(1000101001); + StructureType(1_000_101_001); } -impl NvExtension103Fn { +impl ExtDepthClipEnableFn { pub fn name() -> &'static ::std::ffi::CStr { - ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_extension_103\0") + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_depth_clip_enable\0") .expect("Wrong extension string") } } -pub struct NvExtension103Fn {} -unsafe impl Send for NvExtension103Fn {} -unsafe impl Sync for NvExtension103Fn {} -impl ::std::clone::Clone for NvExtension103Fn { +pub struct ExtDepthClipEnableFn {} +unsafe impl Send for ExtDepthClipEnableFn {} +unsafe impl Sync for ExtDepthClipEnableFn {} +impl ::std::clone::Clone for ExtDepthClipEnableFn { fn clone(&self) -> Self { - NvExtension103Fn {} + ExtDepthClipEnableFn {} } } -impl NvExtension103Fn { +impl ExtDepthClipEnableFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - NvExtension103Fn {} + ExtDepthClipEnableFn {} } } +#[doc = "Generated from \'VK_EXT_depth_clip_enable\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT: Self = StructureType(1_000_102_000); +} +#[doc = "Generated from \'VK_EXT_depth_clip_enable\'"] +impl StructureType { + pub const PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT: Self = + StructureType(1_000_102_001); +} impl NvExtension104Fn { pub fn name() -> &'static ::std::ffi::CStr { ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_extension_104\0") @@ -51526,59 +63964,63 @@ impl ExtSwapchainColorspaceFn { } #[doc = "Generated from \'VK_EXT_swapchain_colorspace\'"] impl ColorSpaceKHR { - pub const DISPLAY_P3_NONLINEAR_EXT: Self = ColorSpaceKHR(1000104001); + pub const DISPLAY_P3_NONLINEAR_EXT: Self = ColorSpaceKHR(1_000_104_001); } #[doc = "Generated from \'VK_EXT_swapchain_colorspace\'"] impl ColorSpaceKHR { - pub const EXTENDED_SRGB_LINEAR_EXT: Self = ColorSpaceKHR(1000104002); + pub const EXTENDED_SRGB_LINEAR_EXT: Self = ColorSpaceKHR(1_000_104_002); } #[doc = "Generated from \'VK_EXT_swapchain_colorspace\'"] impl ColorSpaceKHR { - pub const DCI_P3_LINEAR_EXT: Self = ColorSpaceKHR(1000104003); + pub const DISPLAY_P3_LINEAR_EXT: Self = ColorSpaceKHR(1_000_104_003); } #[doc = "Generated from \'VK_EXT_swapchain_colorspace\'"] impl ColorSpaceKHR { - pub const DCI_P3_NONLINEAR_EXT: Self = ColorSpaceKHR(1000104004); + pub const DCI_P3_NONLINEAR_EXT: Self = ColorSpaceKHR(1_000_104_004); } #[doc = "Generated from \'VK_EXT_swapchain_colorspace\'"] impl ColorSpaceKHR { - pub const BT709_LINEAR_EXT: Self = ColorSpaceKHR(1000104005); + pub const BT709_LINEAR_EXT: Self = ColorSpaceKHR(1_000_104_005); } #[doc = "Generated from \'VK_EXT_swapchain_colorspace\'"] impl ColorSpaceKHR { - pub const BT709_NONLINEAR_EXT: Self = ColorSpaceKHR(1000104006); + pub const BT709_NONLINEAR_EXT: Self = ColorSpaceKHR(1_000_104_006); } #[doc = "Generated from \'VK_EXT_swapchain_colorspace\'"] impl ColorSpaceKHR { - pub const BT2020_LINEAR_EXT: Self = ColorSpaceKHR(1000104007); + pub const BT2020_LINEAR_EXT: Self = ColorSpaceKHR(1_000_104_007); } #[doc = "Generated from \'VK_EXT_swapchain_colorspace\'"] impl ColorSpaceKHR { - pub const HDR10_ST2084_EXT: Self = ColorSpaceKHR(1000104008); + pub const HDR10_ST2084_EXT: Self = ColorSpaceKHR(1_000_104_008); } #[doc = "Generated from \'VK_EXT_swapchain_colorspace\'"] impl ColorSpaceKHR { - pub const DOLBYVISION_EXT: Self = ColorSpaceKHR(1000104009); + pub const DOLBYVISION_EXT: Self = ColorSpaceKHR(1_000_104_009); } #[doc = "Generated from \'VK_EXT_swapchain_colorspace\'"] impl ColorSpaceKHR { - pub const HDR10_HLG_EXT: Self = ColorSpaceKHR(1000104010); + pub const HDR10_HLG_EXT: Self = ColorSpaceKHR(1_000_104_010); } #[doc = "Generated from \'VK_EXT_swapchain_colorspace\'"] impl ColorSpaceKHR { - pub const ADOBERGB_LINEAR_EXT: Self = ColorSpaceKHR(1000104011); + pub const ADOBERGB_LINEAR_EXT: Self = ColorSpaceKHR(1_000_104_011); } #[doc = "Generated from \'VK_EXT_swapchain_colorspace\'"] impl ColorSpaceKHR { - pub const ADOBERGB_NONLINEAR_EXT: Self = ColorSpaceKHR(1000104012); + pub const ADOBERGB_NONLINEAR_EXT: Self = ColorSpaceKHR(1_000_104_012); } #[doc = "Generated from \'VK_EXT_swapchain_colorspace\'"] impl ColorSpaceKHR { - pub const PASS_THROUGH_EXT: Self = ColorSpaceKHR(1000104013); + pub const PASS_THROUGH_EXT: Self = ColorSpaceKHR(1_000_104_013); } #[doc = "Generated from \'VK_EXT_swapchain_colorspace\'"] impl ColorSpaceKHR { - pub const EXTENDED_SRGB_NONLINEAR_EXT: Self = ColorSpaceKHR(1000104014); + pub const EXTENDED_SRGB_NONLINEAR_EXT: Self = ColorSpaceKHR(1_000_104_014); +} +#[doc = "Generated from \'VK_EXT_swapchain_colorspace\'"] +impl ColorSpaceKHR { + pub const DCI_P3_LINEAR_EXT: Self = ColorSpaceKHR::DISPLAY_P3_LINEAR_EXT; } impl ExtHdrMetadataFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -51636,7 +64078,7 @@ impl ExtHdrMetadataFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn set_hdr_metadata_ext( &self, device: Device, @@ -51649,7 +64091,7 @@ impl ExtHdrMetadataFn { } #[doc = "Generated from \'VK_EXT_hdr_metadata\'"] impl StructureType { - pub const HDR_METADATA_EXT: Self = StructureType(1000105000); + pub const HDR_METADATA_EXT: Self = StructureType(1_000_105_000); } impl ImgExtension107Fn { pub fn name() -> &'static ::std::ffi::CStr { @@ -51695,28 +64137,52 @@ impl ImgExtension108Fn { ImgExtension108Fn {} } } -impl ImgExtension109Fn { +impl KhrImagelessFramebufferFn { pub fn name() -> &'static ::std::ffi::CStr { - ::std::ffi::CStr::from_bytes_with_nul(b"VK_IMG_extension_109\0") + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_imageless_framebuffer\0") .expect("Wrong extension string") } } -pub struct ImgExtension109Fn {} -unsafe impl Send for ImgExtension109Fn {} -unsafe impl Sync for ImgExtension109Fn {} -impl ::std::clone::Clone for ImgExtension109Fn { +pub struct KhrImagelessFramebufferFn {} +unsafe impl Send for KhrImagelessFramebufferFn {} +unsafe impl Sync for KhrImagelessFramebufferFn {} +impl ::std::clone::Clone for KhrImagelessFramebufferFn { fn clone(&self) -> Self { - ImgExtension109Fn {} + KhrImagelessFramebufferFn {} } } -impl ImgExtension109Fn { +impl KhrImagelessFramebufferFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - ImgExtension109Fn {} + KhrImagelessFramebufferFn {} } } +#[doc = "Generated from \'VK_KHR_imageless_framebuffer\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES_KHR: Self = + StructureType::PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES; +} +#[doc = "Generated from \'VK_KHR_imageless_framebuffer\'"] +impl StructureType { + pub const FRAMEBUFFER_ATTACHMENTS_CREATE_INFO_KHR: Self = + StructureType::FRAMEBUFFER_ATTACHMENTS_CREATE_INFO; +} +#[doc = "Generated from \'VK_KHR_imageless_framebuffer\'"] +impl StructureType { + pub const FRAMEBUFFER_ATTACHMENT_IMAGE_INFO_KHR: Self = + StructureType::FRAMEBUFFER_ATTACHMENT_IMAGE_INFO; +} +#[doc = "Generated from \'VK_KHR_imageless_framebuffer\'"] +impl StructureType { + pub const RENDER_PASS_ATTACHMENT_BEGIN_INFO_KHR: Self = + StructureType::RENDER_PASS_ATTACHMENT_BEGIN_INFO; +} +#[doc = "Generated from \'VK_KHR_imageless_framebuffer\'"] +impl FramebufferCreateFlags { + pub const IMAGELESS_KHR: Self = FramebufferCreateFlags::IMAGELESS; +} impl KhrCreateRenderpass2Fn { pub fn name() -> &'static ::std::ffi::CStr { ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_create_renderpass2\0") @@ -51724,49 +64190,49 @@ impl KhrCreateRenderpass2Fn { } } #[allow(non_camel_case_types)] -pub type PFN_vkCreateRenderPass2KHR = extern "system" fn( +pub type PFN_vkCreateRenderPass2 = extern "system" fn( device: Device, - p_create_info: *const RenderPassCreateInfo2KHR, + p_create_info: *const RenderPassCreateInfo2, p_allocator: *const AllocationCallbacks, p_render_pass: *mut RenderPass, ) -> Result; #[allow(non_camel_case_types)] -pub type PFN_vkCmdBeginRenderPass2KHR = extern "system" fn( +pub type PFN_vkCmdBeginRenderPass2 = extern "system" fn( command_buffer: CommandBuffer, p_render_pass_begin: *const RenderPassBeginInfo, - p_subpass_begin_info: *const SubpassBeginInfoKHR, + p_subpass_begin_info: *const SubpassBeginInfo, ) -> c_void; #[allow(non_camel_case_types)] -pub type PFN_vkCmdNextSubpass2KHR = extern "system" fn( +pub type PFN_vkCmdNextSubpass2 = extern "system" fn( command_buffer: CommandBuffer, - p_subpass_begin_info: *const SubpassBeginInfoKHR, - p_subpass_end_info: *const SubpassEndInfoKHR, + p_subpass_begin_info: *const SubpassBeginInfo, + p_subpass_end_info: *const SubpassEndInfo, ) -> c_void; #[allow(non_camel_case_types)] -pub type PFN_vkCmdEndRenderPass2KHR = extern "system" fn( +pub type PFN_vkCmdEndRenderPass2 = extern "system" fn( command_buffer: CommandBuffer, - p_subpass_end_info: *const SubpassEndInfoKHR, + p_subpass_end_info: *const SubpassEndInfo, ) -> c_void; pub struct KhrCreateRenderpass2Fn { pub create_render_pass2_khr: extern "system" fn( device: Device, - p_create_info: *const RenderPassCreateInfo2KHR, + p_create_info: *const RenderPassCreateInfo2, p_allocator: *const AllocationCallbacks, p_render_pass: *mut RenderPass, ) -> Result, pub cmd_begin_render_pass2_khr: extern "system" fn( command_buffer: CommandBuffer, p_render_pass_begin: *const RenderPassBeginInfo, - p_subpass_begin_info: *const SubpassBeginInfoKHR, + p_subpass_begin_info: *const SubpassBeginInfo, ) -> c_void, pub cmd_next_subpass2_khr: extern "system" fn( command_buffer: CommandBuffer, - p_subpass_begin_info: *const SubpassBeginInfoKHR, - p_subpass_end_info: *const SubpassEndInfoKHR, + p_subpass_begin_info: *const SubpassBeginInfo, + p_subpass_end_info: *const SubpassEndInfo, ) -> c_void, pub cmd_end_render_pass2_khr: extern "system" fn( command_buffer: CommandBuffer, - p_subpass_end_info: *const SubpassEndInfoKHR, + p_subpass_end_info: *const SubpassEndInfo, ) -> c_void, } unsafe impl Send for KhrCreateRenderpass2Fn {} @@ -51790,7 +64256,7 @@ impl KhrCreateRenderpass2Fn { create_render_pass2_khr: unsafe { extern "system" fn create_render_pass2_khr( _device: Device, - _p_create_info: *const RenderPassCreateInfo2KHR, + _p_create_info: *const RenderPassCreateInfo2, _p_allocator: *const AllocationCallbacks, _p_render_pass: *mut RenderPass, ) -> Result { @@ -51812,7 +64278,7 @@ impl KhrCreateRenderpass2Fn { extern "system" fn cmd_begin_render_pass2_khr( _command_buffer: CommandBuffer, _p_render_pass_begin: *const RenderPassBeginInfo, - _p_subpass_begin_info: *const SubpassBeginInfoKHR, + _p_subpass_begin_info: *const SubpassBeginInfo, ) -> c_void { panic!(concat!( "Unable to load ", @@ -51831,8 +64297,8 @@ impl KhrCreateRenderpass2Fn { cmd_next_subpass2_khr: unsafe { extern "system" fn cmd_next_subpass2_khr( _command_buffer: CommandBuffer, - _p_subpass_begin_info: *const SubpassBeginInfoKHR, - _p_subpass_end_info: *const SubpassEndInfoKHR, + _p_subpass_begin_info: *const SubpassBeginInfo, + _p_subpass_end_info: *const SubpassEndInfo, ) -> c_void { panic!(concat!( "Unable to load ", @@ -51851,7 +64317,7 @@ impl KhrCreateRenderpass2Fn { cmd_end_render_pass2_khr: unsafe { extern "system" fn cmd_end_render_pass2_khr( _command_buffer: CommandBuffer, - _p_subpass_end_info: *const SubpassEndInfoKHR, + _p_subpass_end_info: *const SubpassEndInfo, ) -> c_void { panic!(concat!( "Unable to load ", @@ -51869,70 +64335,70 @@ impl KhrCreateRenderpass2Fn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn create_render_pass2_khr( &self, device: Device, - p_create_info: *const RenderPassCreateInfo2KHR, + p_create_info: *const RenderPassCreateInfo2, p_allocator: *const AllocationCallbacks, p_render_pass: *mut RenderPass, ) -> Result { (self.create_render_pass2_khr)(device, p_create_info, p_allocator, p_render_pass) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_begin_render_pass2_khr( &self, command_buffer: CommandBuffer, p_render_pass_begin: *const RenderPassBeginInfo, - p_subpass_begin_info: *const SubpassBeginInfoKHR, + p_subpass_begin_info: *const SubpassBeginInfo, ) -> c_void { (self.cmd_begin_render_pass2_khr)(command_buffer, p_render_pass_begin, p_subpass_begin_info) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_next_subpass2_khr( &self, command_buffer: CommandBuffer, - p_subpass_begin_info: *const SubpassBeginInfoKHR, - p_subpass_end_info: *const SubpassEndInfoKHR, + p_subpass_begin_info: *const SubpassBeginInfo, + p_subpass_end_info: *const SubpassEndInfo, ) -> c_void { (self.cmd_next_subpass2_khr)(command_buffer, p_subpass_begin_info, p_subpass_end_info) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_end_render_pass2_khr( &self, command_buffer: CommandBuffer, - p_subpass_end_info: *const SubpassEndInfoKHR, + p_subpass_end_info: *const SubpassEndInfo, ) -> c_void { (self.cmd_end_render_pass2_khr)(command_buffer, p_subpass_end_info) } } #[doc = "Generated from \'VK_KHR_create_renderpass2\'"] impl StructureType { - pub const ATTACHMENT_DESCRIPTION_2_KHR: Self = StructureType(1000109000); + pub const ATTACHMENT_DESCRIPTION_2_KHR: Self = StructureType::ATTACHMENT_DESCRIPTION_2; } #[doc = "Generated from \'VK_KHR_create_renderpass2\'"] impl StructureType { - pub const ATTACHMENT_REFERENCE_2_KHR: Self = StructureType(1000109001); + pub const ATTACHMENT_REFERENCE_2_KHR: Self = StructureType::ATTACHMENT_REFERENCE_2; } #[doc = "Generated from \'VK_KHR_create_renderpass2\'"] impl StructureType { - pub const SUBPASS_DESCRIPTION_2_KHR: Self = StructureType(1000109002); + pub const SUBPASS_DESCRIPTION_2_KHR: Self = StructureType::SUBPASS_DESCRIPTION_2; } #[doc = "Generated from \'VK_KHR_create_renderpass2\'"] impl StructureType { - pub const SUBPASS_DEPENDENCY_2_KHR: Self = StructureType(1000109003); + pub const SUBPASS_DEPENDENCY_2_KHR: Self = StructureType::SUBPASS_DEPENDENCY_2; } #[doc = "Generated from \'VK_KHR_create_renderpass2\'"] impl StructureType { - pub const RENDER_PASS_CREATE_INFO_2_KHR: Self = StructureType(1000109004); + pub const RENDER_PASS_CREATE_INFO_2_KHR: Self = StructureType::RENDER_PASS_CREATE_INFO_2; } #[doc = "Generated from \'VK_KHR_create_renderpass2\'"] impl StructureType { - pub const SUBPASS_BEGIN_INFO_KHR: Self = StructureType(1000109005); + pub const SUBPASS_BEGIN_INFO_KHR: Self = StructureType::SUBPASS_BEGIN_INFO; } #[doc = "Generated from \'VK_KHR_create_renderpass2\'"] impl StructureType { - pub const SUBPASS_END_INFO_KHR: Self = StructureType(1000109006); + pub const SUBPASS_END_INFO_KHR: Self = StructureType::SUBPASS_END_INFO; } impl ImgExtension111Fn { pub fn name() -> &'static ::std::ffi::CStr { @@ -52005,7 +64471,7 @@ impl KhrSharedPresentableImageFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn get_swapchain_status_khr( &self, device: Device, @@ -52016,19 +64482,19 @@ impl KhrSharedPresentableImageFn { } #[doc = "Generated from \'VK_KHR_shared_presentable_image\'"] impl StructureType { - pub const SHARED_PRESENT_SURFACE_CAPABILITIES_KHR: Self = StructureType(1000111000); + pub const SHARED_PRESENT_SURFACE_CAPABILITIES_KHR: Self = StructureType(1_000_111_000); } #[doc = "Generated from \'VK_KHR_shared_presentable_image\'"] impl PresentModeKHR { - pub const SHARED_DEMAND_REFRESH: Self = PresentModeKHR(1000111000); + pub const SHARED_DEMAND_REFRESH: Self = PresentModeKHR(1_000_111_000); } #[doc = "Generated from \'VK_KHR_shared_presentable_image\'"] impl PresentModeKHR { - pub const SHARED_CONTINUOUS_REFRESH: Self = PresentModeKHR(1000111001); + pub const SHARED_CONTINUOUS_REFRESH: Self = PresentModeKHR(1_000_111_001); } #[doc = "Generated from \'VK_KHR_shared_presentable_image\'"] impl ImageLayout { - pub const SHARED_PRESENT_KHR: Self = ImageLayout(1000111000); + pub const SHARED_PRESENT_KHR: Self = ImageLayout(1_000_111_000); } impl KhrExternalFenceCapabilitiesFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -52036,12 +64502,27 @@ impl KhrExternalFenceCapabilitiesFn { .expect("Wrong extension string") } } -pub struct KhrExternalFenceCapabilitiesFn {} +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceExternalFenceProperties = extern "system" fn( + physical_device: PhysicalDevice, + p_external_fence_info: *const PhysicalDeviceExternalFenceInfo, + p_external_fence_properties: *mut ExternalFenceProperties, +) -> c_void; +pub struct KhrExternalFenceCapabilitiesFn { + pub get_physical_device_external_fence_properties_khr: extern "system" fn( + physical_device: PhysicalDevice, + p_external_fence_info: *const PhysicalDeviceExternalFenceInfo, + p_external_fence_properties: *mut ExternalFenceProperties, + ) -> c_void, +} unsafe impl Send for KhrExternalFenceCapabilitiesFn {} unsafe impl Sync for KhrExternalFenceCapabilitiesFn {} impl ::std::clone::Clone for KhrExternalFenceCapabilitiesFn { fn clone(&self) -> Self { - KhrExternalFenceCapabilitiesFn {} + KhrExternalFenceCapabilitiesFn { + get_physical_device_external_fence_properties_khr: self + .get_physical_device_external_fence_properties_khr, + } } } impl KhrExternalFenceCapabilitiesFn { @@ -52049,8 +64530,81 @@ impl KhrExternalFenceCapabilitiesFn { where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - KhrExternalFenceCapabilitiesFn {} + KhrExternalFenceCapabilitiesFn { + get_physical_device_external_fence_properties_khr: unsafe { + extern "system" fn get_physical_device_external_fence_properties_khr( + _physical_device: PhysicalDevice, + _p_external_fence_info: *const PhysicalDeviceExternalFenceInfo, + _p_external_fence_properties: *mut ExternalFenceProperties, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_external_fence_properties_khr) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceExternalFencePropertiesKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_external_fence_properties_khr + } else { + ::std::mem::transmute(val) + } + }, + } } + #[doc = ""] + pub unsafe fn get_physical_device_external_fence_properties_khr( + &self, + physical_device: PhysicalDevice, + p_external_fence_info: *const PhysicalDeviceExternalFenceInfo, + p_external_fence_properties: *mut ExternalFenceProperties, + ) -> c_void { + (self.get_physical_device_external_fence_properties_khr)( + physical_device, + p_external_fence_info, + p_external_fence_properties, + ) + } +} +#[doc = "Generated from \'VK_KHR_external_fence_capabilities\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO_KHR: Self = + StructureType::PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO; +} +#[doc = "Generated from \'VK_KHR_external_fence_capabilities\'"] +impl StructureType { + pub const EXTERNAL_FENCE_PROPERTIES_KHR: Self = StructureType::EXTERNAL_FENCE_PROPERTIES; +} +#[doc = "Generated from \'VK_KHR_external_fence_capabilities\'"] +impl ExternalFenceHandleTypeFlags { + pub const EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_KHR: Self = + ExternalFenceHandleTypeFlags::EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD; +} +#[doc = "Generated from \'VK_KHR_external_fence_capabilities\'"] +impl ExternalFenceHandleTypeFlags { + pub const EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KHR: Self = + ExternalFenceHandleTypeFlags::EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32; +} +#[doc = "Generated from \'VK_KHR_external_fence_capabilities\'"] +impl ExternalFenceHandleTypeFlags { + pub const EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_KHR: Self = + ExternalFenceHandleTypeFlags::EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT; +} +#[doc = "Generated from \'VK_KHR_external_fence_capabilities\'"] +impl ExternalFenceHandleTypeFlags { + pub const EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_KHR: Self = + ExternalFenceHandleTypeFlags::EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD; +} +#[doc = "Generated from \'VK_KHR_external_fence_capabilities\'"] +impl ExternalFenceFeatureFlags { + pub const EXTERNAL_FENCE_FEATURE_EXPORTABLE_KHR: Self = + ExternalFenceFeatureFlags::EXTERNAL_FENCE_FEATURE_EXPORTABLE; +} +#[doc = "Generated from \'VK_KHR_external_fence_capabilities\'"] +impl ExternalFenceFeatureFlags { + pub const EXTERNAL_FENCE_FEATURE_IMPORTABLE_KHR: Self = + ExternalFenceFeatureFlags::EXTERNAL_FENCE_FEATURE_IMPORTABLE; } impl KhrExternalFenceFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -52074,6 +64628,14 @@ impl KhrExternalFenceFn { KhrExternalFenceFn {} } } +#[doc = "Generated from \'VK_KHR_external_fence\'"] +impl StructureType { + pub const EXPORT_FENCE_CREATE_INFO_KHR: Self = StructureType::EXPORT_FENCE_CREATE_INFO; +} +#[doc = "Generated from \'VK_KHR_external_fence\'"] +impl FenceImportFlags { + pub const TEMPORARY_KHR: Self = FenceImportFlags::TEMPORARY; +} impl KhrExternalFenceWin32Fn { pub fn name() -> &'static ::std::ffi::CStr { ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_external_fence_win32\0") @@ -52159,7 +64721,7 @@ impl KhrExternalFenceWin32Fn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn import_fence_win32_handle_khr( &self, device: Device, @@ -52167,7 +64729,7 @@ impl KhrExternalFenceWin32Fn { ) -> Result { (self.import_fence_win32_handle_khr)(device, p_import_fence_win32_handle_info) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_fence_win32_handle_khr( &self, device: Device, @@ -52179,15 +64741,15 @@ impl KhrExternalFenceWin32Fn { } #[doc = "Generated from \'VK_KHR_external_fence_win32\'"] impl StructureType { - pub const IMPORT_FENCE_WIN32_HANDLE_INFO_KHR: Self = StructureType(1000114000); + pub const IMPORT_FENCE_WIN32_HANDLE_INFO_KHR: Self = StructureType(1_000_114_000); } #[doc = "Generated from \'VK_KHR_external_fence_win32\'"] impl StructureType { - pub const EXPORT_FENCE_WIN32_HANDLE_INFO_KHR: Self = StructureType(1000114001); + pub const EXPORT_FENCE_WIN32_HANDLE_INFO_KHR: Self = StructureType(1_000_114_001); } #[doc = "Generated from \'VK_KHR_external_fence_win32\'"] impl StructureType { - pub const FENCE_GET_WIN32_HANDLE_INFO_KHR: Self = StructureType(1000114002); + pub const FENCE_GET_WIN32_HANDLE_INFO_KHR: Self = StructureType(1_000_114_002); } impl KhrExternalFenceFdFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -52268,7 +64830,7 @@ impl KhrExternalFenceFdFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn import_fence_fd_khr( &self, device: Device, @@ -52276,7 +64838,7 @@ impl KhrExternalFenceFdFn { ) -> Result { (self.import_fence_fd_khr)(device, p_import_fence_fd_info) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_fence_fd_khr( &self, device: Device, @@ -52288,33 +64850,234 @@ impl KhrExternalFenceFdFn { } #[doc = "Generated from \'VK_KHR_external_fence_fd\'"] impl StructureType { - pub const IMPORT_FENCE_FD_INFO_KHR: Self = StructureType(1000115000); + pub const IMPORT_FENCE_FD_INFO_KHR: Self = StructureType(1_000_115_000); } #[doc = "Generated from \'VK_KHR_external_fence_fd\'"] impl StructureType { - pub const FENCE_GET_FD_INFO_KHR: Self = StructureType(1000115001); + pub const FENCE_GET_FD_INFO_KHR: Self = StructureType(1_000_115_001); } -impl KhrExtension117Fn { +impl KhrPerformanceQueryFn { pub fn name() -> &'static ::std::ffi::CStr { - ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_extension_117\0") + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_performance_query\0") .expect("Wrong extension string") } } -pub struct KhrExtension117Fn {} -unsafe impl Send for KhrExtension117Fn {} -unsafe impl Sync for KhrExtension117Fn {} -impl ::std::clone::Clone for KhrExtension117Fn { +#[allow(non_camel_case_types)] +pub type PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR = + extern "system" fn( + physical_device: PhysicalDevice, + queue_family_index: u32, + p_counter_count: *mut u32, + p_counters: *mut PerformanceCounterKHR, + p_counter_descriptions: *mut PerformanceCounterDescriptionKHR, + ) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR = extern "system" fn( + physical_device: PhysicalDevice, + p_performance_query_create_info: *const QueryPoolPerformanceCreateInfoKHR, + p_num_passes: *mut u32, +) + -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkAcquireProfilingLockKHR = + extern "system" fn(device: Device, p_info: *const AcquireProfilingLockInfoKHR) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkReleaseProfilingLockKHR = extern "system" fn(device: Device) -> c_void; +pub struct KhrPerformanceQueryFn { + pub enumerate_physical_device_queue_family_performance_query_counters_khr: + extern "system" fn( + physical_device: PhysicalDevice, + queue_family_index: u32, + p_counter_count: *mut u32, + p_counters: *mut PerformanceCounterKHR, + p_counter_descriptions: *mut PerformanceCounterDescriptionKHR, + ) -> Result, + pub get_physical_device_queue_family_performance_query_passes_khr: extern "system" fn( + physical_device: PhysicalDevice, + p_performance_query_create_info: *const QueryPoolPerformanceCreateInfoKHR, + p_num_passes: *mut u32, + ) + -> c_void, + pub acquire_profiling_lock_khr: + extern "system" fn(device: Device, p_info: *const AcquireProfilingLockInfoKHR) -> Result, + pub release_profiling_lock_khr: extern "system" fn(device: Device) -> c_void, +} +unsafe impl Send for KhrPerformanceQueryFn {} +unsafe impl Sync for KhrPerformanceQueryFn {} +impl ::std::clone::Clone for KhrPerformanceQueryFn { fn clone(&self) -> Self { - KhrExtension117Fn {} + KhrPerformanceQueryFn { + enumerate_physical_device_queue_family_performance_query_counters_khr: self + .enumerate_physical_device_queue_family_performance_query_counters_khr, + get_physical_device_queue_family_performance_query_passes_khr: self + .get_physical_device_queue_family_performance_query_passes_khr, + acquire_profiling_lock_khr: self.acquire_profiling_lock_khr, + release_profiling_lock_khr: self.release_profiling_lock_khr, + } } } -impl KhrExtension117Fn { +impl KhrPerformanceQueryFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - KhrExtension117Fn {} + KhrPerformanceQueryFn { + enumerate_physical_device_queue_family_performance_query_counters_khr: unsafe { + extern "system" fn enumerate_physical_device_queue_family_performance_query_counters_khr( + _physical_device: PhysicalDevice, + _queue_family_index: u32, + _p_counter_count: *mut u32, + _p_counters: *mut PerformanceCounterKHR, + _p_counter_descriptions: *mut PerformanceCounterDescriptionKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!( + enumerate_physical_device_queue_family_performance_query_counters_khr + ) + )) + } + let raw_name = + stringify!(vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + enumerate_physical_device_queue_family_performance_query_counters_khr + } else { + ::std::mem::transmute(val) + } + }, + get_physical_device_queue_family_performance_query_passes_khr: unsafe { + extern "system" fn get_physical_device_queue_family_performance_query_passes_khr( + _physical_device: PhysicalDevice, + _p_performance_query_create_info: *const QueryPoolPerformanceCreateInfoKHR, + _p_num_passes: *mut u32, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_queue_family_performance_query_passes_khr) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_queue_family_performance_query_passes_khr + } else { + ::std::mem::transmute(val) + } + }, + acquire_profiling_lock_khr: unsafe { + extern "system" fn acquire_profiling_lock_khr( + _device: Device, + _p_info: *const AcquireProfilingLockInfoKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(acquire_profiling_lock_khr) + )) + } + let raw_name = stringify!(vkAcquireProfilingLockKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + acquire_profiling_lock_khr + } else { + ::std::mem::transmute(val) + } + }, + release_profiling_lock_khr: unsafe { + extern "system" fn release_profiling_lock_khr(_device: Device) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(release_profiling_lock_khr) + )) + } + let raw_name = stringify!(vkReleaseProfilingLockKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + release_profiling_lock_khr + } else { + ::std::mem::transmute(val) + } + }, + } } + #[doc = ""] + pub unsafe fn enumerate_physical_device_queue_family_performance_query_counters_khr( + &self, + physical_device: PhysicalDevice, + queue_family_index: u32, + p_counter_count: *mut u32, + p_counters: *mut PerformanceCounterKHR, + p_counter_descriptions: *mut PerformanceCounterDescriptionKHR, + ) -> Result { + (self.enumerate_physical_device_queue_family_performance_query_counters_khr)( + physical_device, + queue_family_index, + p_counter_count, + p_counters, + p_counter_descriptions, + ) + } + #[doc = ""] + pub unsafe fn get_physical_device_queue_family_performance_query_passes_khr( + &self, + physical_device: PhysicalDevice, + p_performance_query_create_info: *const QueryPoolPerformanceCreateInfoKHR, + p_num_passes: *mut u32, + ) -> c_void { + (self.get_physical_device_queue_family_performance_query_passes_khr)( + physical_device, + p_performance_query_create_info, + p_num_passes, + ) + } + #[doc = ""] + pub unsafe fn acquire_profiling_lock_khr( + &self, + device: Device, + p_info: *const AcquireProfilingLockInfoKHR, + ) -> Result { + (self.acquire_profiling_lock_khr)(device, p_info) + } + #[doc = ""] + pub unsafe fn release_profiling_lock_khr(&self, device: Device) -> c_void { + (self.release_profiling_lock_khr)(device) + } +} +#[doc = "Generated from \'VK_KHR_performance_query\'"] +impl QueryType { + pub const PERFORMANCE_QUERY_KHR: Self = QueryType(1_000_116_000); +} +#[doc = "Generated from \'VK_KHR_performance_query\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR: Self = StructureType(1_000_116_000); +} +#[doc = "Generated from \'VK_KHR_performance_query\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_PERFORMANCE_QUERY_PROPERTIES_KHR: Self = StructureType(1_000_116_001); +} +#[doc = "Generated from \'VK_KHR_performance_query\'"] +impl StructureType { + pub const QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR: Self = StructureType(1_000_116_002); +} +#[doc = "Generated from \'VK_KHR_performance_query\'"] +impl StructureType { + pub const PERFORMANCE_QUERY_SUBMIT_INFO_KHR: Self = StructureType(1_000_116_003); +} +#[doc = "Generated from \'VK_KHR_performance_query\'"] +impl StructureType { + pub const ACQUIRE_PROFILING_LOCK_INFO_KHR: Self = StructureType(1_000_116_004); +} +#[doc = "Generated from \'VK_KHR_performance_query\'"] +impl StructureType { + pub const PERFORMANCE_COUNTER_KHR: Self = StructureType(1_000_116_005); +} +#[doc = "Generated from \'VK_KHR_performance_query\'"] +impl StructureType { + pub const PERFORMANCE_COUNTER_DESCRIPTION_KHR: Self = StructureType(1_000_116_006); } impl KhrMaintenance2Fn { pub fn name() -> &'static ::std::ffi::CStr { @@ -52338,6 +65101,59 @@ impl KhrMaintenance2Fn { KhrMaintenance2Fn {} } } +#[doc = "Generated from \'VK_KHR_maintenance2\'"] +impl ImageCreateFlags { + pub const BLOCK_TEXEL_VIEW_COMPATIBLE_KHR: Self = ImageCreateFlags::BLOCK_TEXEL_VIEW_COMPATIBLE; +} +#[doc = "Generated from \'VK_KHR_maintenance2\'"] +impl ImageCreateFlags { + pub const EXTENDED_USAGE_KHR: Self = ImageCreateFlags::EXTENDED_USAGE; +} +#[doc = "Generated from \'VK_KHR_maintenance2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: Self = + StructureType::PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES; +} +#[doc = "Generated from \'VK_KHR_maintenance2\'"] +impl StructureType { + pub const RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO_KHR: Self = + StructureType::RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO; +} +#[doc = "Generated from \'VK_KHR_maintenance2\'"] +impl StructureType { + pub const IMAGE_VIEW_USAGE_CREATE_INFO_KHR: Self = StructureType::IMAGE_VIEW_USAGE_CREATE_INFO; +} +#[doc = "Generated from \'VK_KHR_maintenance2\'"] +impl StructureType { + pub const PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO_KHR: Self = + StructureType::PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO; +} +#[doc = "Generated from \'VK_KHR_maintenance2\'"] +impl ImageLayout { + pub const DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR: Self = + ImageLayout::DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL; +} +#[doc = "Generated from \'VK_KHR_maintenance2\'"] +impl ImageLayout { + pub const DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL_KHR: Self = + ImageLayout::DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL; +} +#[doc = "Generated from \'VK_KHR_maintenance2\'"] +impl PointClippingBehavior { + pub const ALL_CLIP_PLANES_KHR: Self = PointClippingBehavior::ALL_CLIP_PLANES; +} +#[doc = "Generated from \'VK_KHR_maintenance2\'"] +impl PointClippingBehavior { + pub const USER_CLIP_PLANES_ONLY_KHR: Self = PointClippingBehavior::USER_CLIP_PLANES_ONLY; +} +#[doc = "Generated from \'VK_KHR_maintenance2\'"] +impl TessellationDomainOrigin { + pub const UPPER_LEFT_KHR: Self = TessellationDomainOrigin::UPPER_LEFT; +} +#[doc = "Generated from \'VK_KHR_maintenance2\'"] +impl TessellationDomainOrigin { + pub const LOWER_LEFT_KHR: Self = TessellationDomainOrigin::LOWER_LEFT; +} impl KhrExtension119Fn { pub fn name() -> &'static ::std::ffi::CStr { ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_extension_119\0") @@ -52452,7 +65268,7 @@ impl KhrGetSurfaceCapabilities2Fn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_surface_capabilities2_khr( &self, physical_device: PhysicalDevice, @@ -52465,7 +65281,7 @@ impl KhrGetSurfaceCapabilities2Fn { p_surface_capabilities, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_surface_formats2_khr( &self, physical_device: PhysicalDevice, @@ -52483,15 +65299,15 @@ impl KhrGetSurfaceCapabilities2Fn { } #[doc = "Generated from \'VK_KHR_get_surface_capabilities2\'"] impl StructureType { - pub const PHYSICAL_DEVICE_SURFACE_INFO_2_KHR: Self = StructureType(1000119000); + pub const PHYSICAL_DEVICE_SURFACE_INFO_2_KHR: Self = StructureType(1_000_119_000); } #[doc = "Generated from \'VK_KHR_get_surface_capabilities2\'"] impl StructureType { - pub const SURFACE_CAPABILITIES_2_KHR: Self = StructureType(1000119001); + pub const SURFACE_CAPABILITIES_2_KHR: Self = StructureType(1_000_119_001); } #[doc = "Generated from \'VK_KHR_get_surface_capabilities2\'"] impl StructureType { - pub const SURFACE_FORMAT_2_KHR: Self = StructureType(1000119002); + pub const SURFACE_FORMAT_2_KHR: Self = StructureType(1_000_119_002); } impl KhrVariablePointersFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -52515,6 +65331,16 @@ impl KhrVariablePointersFn { KhrVariablePointersFn {} } } +#[doc = "Generated from \'VK_KHR_variable_pointers\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: Self = + StructureType::PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES; +} +#[doc = "Generated from \'VK_KHR_variable_pointers\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES_KHR: Self = + StructureType::PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES; +} impl KhrGetDisplayProperties2Fn { pub fn name() -> &'static ::std::ffi::CStr { ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_get_display_properties2\0") @@ -52672,7 +65498,7 @@ impl KhrGetDisplayProperties2Fn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_display_properties2_khr( &self, physical_device: PhysicalDevice, @@ -52685,7 +65511,7 @@ impl KhrGetDisplayProperties2Fn { p_properties, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_display_plane_properties2_khr( &self, physical_device: PhysicalDevice, @@ -52698,7 +65524,7 @@ impl KhrGetDisplayProperties2Fn { p_properties, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_display_mode_properties2_khr( &self, physical_device: PhysicalDevice, @@ -52713,7 +65539,7 @@ impl KhrGetDisplayProperties2Fn { p_properties, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_display_plane_capabilities2_khr( &self, physical_device: PhysicalDevice, @@ -52729,23 +65555,23 @@ impl KhrGetDisplayProperties2Fn { } #[doc = "Generated from \'VK_KHR_get_display_properties2\'"] impl StructureType { - pub const DISPLAY_PROPERTIES_2_KHR: Self = StructureType(1000121000); + pub const DISPLAY_PROPERTIES_2_KHR: Self = StructureType(1_000_121_000); } #[doc = "Generated from \'VK_KHR_get_display_properties2\'"] impl StructureType { - pub const DISPLAY_PLANE_PROPERTIES_2_KHR: Self = StructureType(1000121001); + pub const DISPLAY_PLANE_PROPERTIES_2_KHR: Self = StructureType(1_000_121_001); } #[doc = "Generated from \'VK_KHR_get_display_properties2\'"] impl StructureType { - pub const DISPLAY_MODE_PROPERTIES_2_KHR: Self = StructureType(1000121002); + pub const DISPLAY_MODE_PROPERTIES_2_KHR: Self = StructureType(1_000_121_002); } #[doc = "Generated from \'VK_KHR_get_display_properties2\'"] impl StructureType { - pub const DISPLAY_PLANE_INFO_2_KHR: Self = StructureType(1000121003); + pub const DISPLAY_PLANE_INFO_2_KHR: Self = StructureType(1_000_121_003); } #[doc = "Generated from \'VK_KHR_get_display_properties2\'"] impl StructureType { - pub const DISPLAY_PLANE_CAPABILITIES_2_KHR: Self = StructureType(1000121004); + pub const DISPLAY_PLANE_CAPABILITIES_2_KHR: Self = StructureType(1_000_121_004); } impl MvkIosSurfaceFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -52806,7 +65632,7 @@ impl MvkIosSurfaceFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn create_ios_surface_mvk( &self, instance: Instance, @@ -52819,7 +65645,7 @@ impl MvkIosSurfaceFn { } #[doc = "Generated from \'VK_MVK_ios_surface\'"] impl StructureType { - pub const IOS_SURFACE_CREATE_INFO_M: Self = StructureType(1000122000); + pub const IOS_SURFACE_CREATE_INFO_M: Self = StructureType(1_000_122_000); } impl MvkMacosSurfaceFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -52880,7 +65706,7 @@ impl MvkMacosSurfaceFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn create_mac_os_surface_mvk( &self, instance: Instance, @@ -52893,7 +65719,7 @@ impl MvkMacosSurfaceFn { } #[doc = "Generated from \'VK_MVK_macos_surface\'"] impl StructureType { - pub const MACOS_SURFACE_CREATE_INFO_M: Self = StructureType(1000123000); + pub const MACOS_SURFACE_CREATE_INFO_M: Self = StructureType(1_000_123_000); } impl MvkMoltenvkFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -52941,7 +65767,7 @@ impl ExtExternalMemoryDmaBufFn { #[doc = "Generated from \'VK_EXT_external_memory_dma_buf\'"] impl ExternalMemoryHandleTypeFlags { pub const EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF: Self = - ExternalMemoryHandleTypeFlags(0b1000000000); + ExternalMemoryHandleTypeFlags(0b10_0000_0000); } impl ExtQueueFamilyForeignFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -52987,6 +65813,16 @@ impl KhrDedicatedAllocationFn { KhrDedicatedAllocationFn {} } } +#[doc = "Generated from \'VK_KHR_dedicated_allocation\'"] +impl StructureType { + pub const MEMORY_DEDICATED_REQUIREMENTS_KHR: Self = + StructureType::MEMORY_DEDICATED_REQUIREMENTS; +} +#[doc = "Generated from \'VK_KHR_dedicated_allocation\'"] +impl StructureType { + pub const MEMORY_DEDICATED_ALLOCATE_INFO_KHR: Self = + StructureType::MEMORY_DEDICATED_ALLOCATE_INFO; +} impl ExtDebugUtilsFn { pub fn name() -> &'static ::std::ffi::CStr { ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_debug_utils\0") @@ -53316,7 +66152,7 @@ impl ExtDebugUtilsFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn set_debug_utils_object_name_ext( &self, device: Device, @@ -53324,7 +66160,7 @@ impl ExtDebugUtilsFn { ) -> Result { (self.set_debug_utils_object_name_ext)(device, p_name_info) } - #[doc = ""] + #[doc = ""] pub unsafe fn set_debug_utils_object_tag_ext( &self, device: Device, @@ -53332,7 +66168,7 @@ impl ExtDebugUtilsFn { ) -> Result { (self.set_debug_utils_object_tag_ext)(device, p_tag_info) } - #[doc = ""] + #[doc = ""] pub unsafe fn queue_begin_debug_utils_label_ext( &self, queue: Queue, @@ -53340,11 +66176,11 @@ impl ExtDebugUtilsFn { ) -> c_void { (self.queue_begin_debug_utils_label_ext)(queue, p_label_info) } - #[doc = ""] + #[doc = ""] pub unsafe fn queue_end_debug_utils_label_ext(&self, queue: Queue) -> c_void { (self.queue_end_debug_utils_label_ext)(queue) } - #[doc = ""] + #[doc = ""] pub unsafe fn queue_insert_debug_utils_label_ext( &self, queue: Queue, @@ -53352,7 +66188,7 @@ impl ExtDebugUtilsFn { ) -> c_void { (self.queue_insert_debug_utils_label_ext)(queue, p_label_info) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_begin_debug_utils_label_ext( &self, command_buffer: CommandBuffer, @@ -53360,11 +66196,11 @@ impl ExtDebugUtilsFn { ) -> c_void { (self.cmd_begin_debug_utils_label_ext)(command_buffer, p_label_info) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_end_debug_utils_label_ext(&self, command_buffer: CommandBuffer) -> c_void { (self.cmd_end_debug_utils_label_ext)(command_buffer) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_insert_debug_utils_label_ext( &self, command_buffer: CommandBuffer, @@ -53372,7 +66208,7 @@ impl ExtDebugUtilsFn { ) -> c_void { (self.cmd_insert_debug_utils_label_ext)(command_buffer, p_label_info) } - #[doc = ""] + #[doc = ""] pub unsafe fn create_debug_utils_messenger_ext( &self, instance: Instance, @@ -53382,7 +66218,7 @@ impl ExtDebugUtilsFn { ) -> Result { (self.create_debug_utils_messenger_ext)(instance, p_create_info, p_allocator, p_messenger) } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_debug_utils_messenger_ext( &self, instance: Instance, @@ -53391,7 +66227,7 @@ impl ExtDebugUtilsFn { ) -> c_void { (self.destroy_debug_utils_messenger_ext)(instance, messenger, p_allocator) } - #[doc = ""] + #[doc = ""] pub unsafe fn submit_debug_utils_message_ext( &self, instance: Instance, @@ -53409,27 +66245,27 @@ impl ExtDebugUtilsFn { } #[doc = "Generated from \'VK_EXT_debug_utils\'"] impl StructureType { - pub const DEBUG_UTILS_OBJECT_NAME_INFO_EXT: Self = StructureType(1000128000); + pub const DEBUG_UTILS_OBJECT_NAME_INFO_EXT: Self = StructureType(1_000_128_000); } #[doc = "Generated from \'VK_EXT_debug_utils\'"] impl StructureType { - pub const DEBUG_UTILS_OBJECT_TAG_INFO_EXT: Self = StructureType(1000128001); + pub const DEBUG_UTILS_OBJECT_TAG_INFO_EXT: Self = StructureType(1_000_128_001); } #[doc = "Generated from \'VK_EXT_debug_utils\'"] impl StructureType { - pub const DEBUG_UTILS_LABEL_EXT: Self = StructureType(1000128002); + pub const DEBUG_UTILS_LABEL_EXT: Self = StructureType(1_000_128_002); } #[doc = "Generated from \'VK_EXT_debug_utils\'"] impl StructureType { - pub const DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT: Self = StructureType(1000128003); + pub const DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT: Self = StructureType(1_000_128_003); } #[doc = "Generated from \'VK_EXT_debug_utils\'"] impl StructureType { - pub const DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT: Self = StructureType(1000128004); + pub const DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT: Self = StructureType(1_000_128_004); } #[doc = "Generated from \'VK_EXT_debug_utils\'"] impl ObjectType { - pub const DEBUG_UTILS_MESSENGER_EXT: Self = ObjectType(1000128000); + pub const DEBUG_UTILS_MESSENGER_EXT: Self = ObjectType(1_000_128_000); } impl AndroidExternalMemoryAndroidHardwareBufferFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -53523,7 +66359,7 @@ impl AndroidExternalMemoryAndroidHardwareBufferFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn get_android_hardware_buffer_properties_android( &self, device: Device, @@ -53532,7 +66368,7 @@ impl AndroidExternalMemoryAndroidHardwareBufferFn { ) -> Result { (self.get_android_hardware_buffer_properties_android)(device, buffer, p_properties) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_memory_android_hardware_buffer_android( &self, device: Device, @@ -53545,31 +66381,32 @@ impl AndroidExternalMemoryAndroidHardwareBufferFn { #[doc = "Generated from \'VK_ANDROID_external_memory_android_hardware_buffer\'"] impl ExternalMemoryHandleTypeFlags { pub const EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_ANDROID: Self = - ExternalMemoryHandleTypeFlags(0b10000000000); + ExternalMemoryHandleTypeFlags(0b100_0000_0000); } #[doc = "Generated from \'VK_ANDROID_external_memory_android_hardware_buffer\'"] impl StructureType { - pub const ANDROID_HARDWARE_BUFFER_USAGE_ANDROID: Self = StructureType(1000129000); + pub const ANDROID_HARDWARE_BUFFER_USAGE_ANDROID: Self = StructureType(1_000_129_000); } #[doc = "Generated from \'VK_ANDROID_external_memory_android_hardware_buffer\'"] impl StructureType { - pub const ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID: Self = StructureType(1000129001); + pub const ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID: Self = StructureType(1_000_129_001); } #[doc = "Generated from \'VK_ANDROID_external_memory_android_hardware_buffer\'"] impl StructureType { - pub const ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID: Self = StructureType(1000129002); + pub const ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID: Self = + StructureType(1_000_129_002); } #[doc = "Generated from \'VK_ANDROID_external_memory_android_hardware_buffer\'"] impl StructureType { - pub const IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID: Self = StructureType(1000129003); + pub const IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID: Self = StructureType(1_000_129_003); } #[doc = "Generated from \'VK_ANDROID_external_memory_android_hardware_buffer\'"] impl StructureType { - pub const MEMORY_GET_ANDROID_HARDWARE_BUFFER_INFO_ANDROID: Self = StructureType(1000129004); + pub const MEMORY_GET_ANDROID_HARDWARE_BUFFER_INFO_ANDROID: Self = StructureType(1_000_129_004); } #[doc = "Generated from \'VK_ANDROID_external_memory_android_hardware_buffer\'"] impl StructureType { - pub const EXTERNAL_FORMAT_ANDROID: Self = StructureType(1000129005); + pub const EXTERNAL_FORMAT_ANDROID: Self = StructureType(1_000_129_005); } impl ExtSamplerFilterMinmaxFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -53596,15 +66433,29 @@ impl ExtSamplerFilterMinmaxFn { #[doc = "Generated from \'VK_EXT_sampler_filter_minmax\'"] impl StructureType { pub const PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT: Self = - StructureType(1000130000); + StructureType::PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES; } #[doc = "Generated from \'VK_EXT_sampler_filter_minmax\'"] impl StructureType { - pub const SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT: Self = StructureType(1000130001); + pub const SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT: Self = + StructureType::SAMPLER_REDUCTION_MODE_CREATE_INFO; } #[doc = "Generated from \'VK_EXT_sampler_filter_minmax\'"] impl FormatFeatureFlags { - pub const SAMPLED_IMAGE_FILTER_MINMAX_EXT: Self = FormatFeatureFlags(0b10000000000000000); + pub const SAMPLED_IMAGE_FILTER_MINMAX_EXT: Self = + FormatFeatureFlags::SAMPLED_IMAGE_FILTER_MINMAX; +} +#[doc = "Generated from \'VK_EXT_sampler_filter_minmax\'"] +impl SamplerReductionMode { + pub const WEIGHTED_AVERAGE_EXT: Self = SamplerReductionMode::WEIGHTED_AVERAGE; +} +#[doc = "Generated from \'VK_EXT_sampler_filter_minmax\'"] +impl SamplerReductionMode { + pub const MIN_EXT: Self = SamplerReductionMode::MIN; +} +#[doc = "Generated from \'VK_EXT_sampler_filter_minmax\'"] +impl SamplerReductionMode { + pub const MAX_EXT: Self = SamplerReductionMode::MAX; } impl KhrStorageBufferStorageClassFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -53784,24 +66635,26 @@ impl ExtInlineUniformBlockFn { } #[doc = "Generated from \'VK_EXT_inline_uniform_block\'"] impl DescriptorType { - pub const INLINE_UNIFORM_BLOCK_EXT: Self = DescriptorType(1000138000); + pub const INLINE_UNIFORM_BLOCK_EXT: Self = DescriptorType(1_000_138_000); } #[doc = "Generated from \'VK_EXT_inline_uniform_block\'"] impl StructureType { - pub const PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT: Self = StructureType(1000138000); + pub const PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT: Self = + StructureType(1_000_138_000); } #[doc = "Generated from \'VK_EXT_inline_uniform_block\'"] impl StructureType { - pub const PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT: Self = StructureType(1000138001); + pub const PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT: Self = + StructureType(1_000_138_001); } #[doc = "Generated from \'VK_EXT_inline_uniform_block\'"] impl StructureType { - pub const WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT: Self = StructureType(1000138002); + pub const WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT: Self = StructureType(1_000_138_002); } #[doc = "Generated from \'VK_EXT_inline_uniform_block\'"] impl StructureType { pub const DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT: Self = - StructureType(1000138003); + StructureType(1_000_138_003); } impl AmdExtension140Fn { pub fn name() -> &'static ::std::ffi::CStr { @@ -53977,7 +66830,7 @@ impl ExtSampleLocationsFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_set_sample_locations_ext( &self, command_buffer: CommandBuffer, @@ -53985,7 +66838,7 @@ impl ExtSampleLocationsFn { ) -> c_void { (self.cmd_set_sample_locations_ext)(command_buffer, p_sample_locations_info) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_multisample_properties_ext( &self, physical_device: PhysicalDevice, @@ -54001,31 +66854,31 @@ impl ExtSampleLocationsFn { } #[doc = "Generated from \'VK_EXT_sample_locations\'"] impl ImageCreateFlags { - pub const SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_EXT: Self = ImageCreateFlags(0b1000000000000); + pub const SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_EXT: Self = ImageCreateFlags(0b1_0000_0000_0000); } #[doc = "Generated from \'VK_EXT_sample_locations\'"] impl StructureType { - pub const SAMPLE_LOCATIONS_INFO_EXT: Self = StructureType(1000143000); + pub const SAMPLE_LOCATIONS_INFO_EXT: Self = StructureType(1_000_143_000); } #[doc = "Generated from \'VK_EXT_sample_locations\'"] impl StructureType { - pub const RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT: Self = StructureType(1000143001); + pub const RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT: Self = StructureType(1_000_143_001); } #[doc = "Generated from \'VK_EXT_sample_locations\'"] impl StructureType { - pub const PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT: Self = StructureType(1000143002); + pub const PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT: Self = StructureType(1_000_143_002); } #[doc = "Generated from \'VK_EXT_sample_locations\'"] impl StructureType { - pub const PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT: Self = StructureType(1000143003); + pub const PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT: Self = StructureType(1_000_143_003); } #[doc = "Generated from \'VK_EXT_sample_locations\'"] impl StructureType { - pub const MULTISAMPLE_PROPERTIES_EXT: Self = StructureType(1000143004); + pub const MULTISAMPLE_PROPERTIES_EXT: Self = StructureType(1_000_143_004); } #[doc = "Generated from \'VK_EXT_sample_locations\'"] impl DynamicState { - pub const SAMPLE_LOCATIONS_EXT: Self = DynamicState(1000143000); + pub const SAMPLE_LOCATIONS_EXT: Self = DynamicState(1_000_143_000); } impl KhrRelaxedBlockLayoutFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -54055,12 +66908,53 @@ impl KhrGetMemoryRequirements2Fn { .expect("Wrong extension string") } } -pub struct KhrGetMemoryRequirements2Fn {} +#[allow(non_camel_case_types)] +pub type PFN_vkGetImageMemoryRequirements2 = extern "system" fn( + device: Device, + p_info: *const ImageMemoryRequirementsInfo2, + p_memory_requirements: *mut MemoryRequirements2, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetBufferMemoryRequirements2 = extern "system" fn( + device: Device, + p_info: *const BufferMemoryRequirementsInfo2, + p_memory_requirements: *mut MemoryRequirements2, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetImageSparseMemoryRequirements2 = extern "system" fn( + device: Device, + p_info: *const ImageSparseMemoryRequirementsInfo2, + p_sparse_memory_requirement_count: *mut u32, + p_sparse_memory_requirements: *mut SparseImageMemoryRequirements2, +) -> c_void; +pub struct KhrGetMemoryRequirements2Fn { + pub get_image_memory_requirements2_khr: extern "system" fn( + device: Device, + p_info: *const ImageMemoryRequirementsInfo2, + p_memory_requirements: *mut MemoryRequirements2, + ) -> c_void, + pub get_buffer_memory_requirements2_khr: extern "system" fn( + device: Device, + p_info: *const BufferMemoryRequirementsInfo2, + p_memory_requirements: *mut MemoryRequirements2, + ) -> c_void, + pub get_image_sparse_memory_requirements2_khr: extern "system" fn( + device: Device, + p_info: *const ImageSparseMemoryRequirementsInfo2, + p_sparse_memory_requirement_count: *mut u32, + p_sparse_memory_requirements: *mut SparseImageMemoryRequirements2, + ) -> c_void, +} unsafe impl Send for KhrGetMemoryRequirements2Fn {} unsafe impl Sync for KhrGetMemoryRequirements2Fn {} impl ::std::clone::Clone for KhrGetMemoryRequirements2Fn { fn clone(&self) -> Self { - KhrGetMemoryRequirements2Fn {} + KhrGetMemoryRequirements2Fn { + get_image_memory_requirements2_khr: self.get_image_memory_requirements2_khr, + get_buffer_memory_requirements2_khr: self.get_buffer_memory_requirements2_khr, + get_image_sparse_memory_requirements2_khr: self + .get_image_sparse_memory_requirements2_khr, + } } } impl KhrGetMemoryRequirements2Fn { @@ -54068,8 +66962,127 @@ impl KhrGetMemoryRequirements2Fn { where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - KhrGetMemoryRequirements2Fn {} + KhrGetMemoryRequirements2Fn { + get_image_memory_requirements2_khr: unsafe { + extern "system" fn get_image_memory_requirements2_khr( + _device: Device, + _p_info: *const ImageMemoryRequirementsInfo2, + _p_memory_requirements: *mut MemoryRequirements2, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_image_memory_requirements2_khr) + )) + } + let raw_name = stringify!(vkGetImageMemoryRequirements2KHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_image_memory_requirements2_khr + } else { + ::std::mem::transmute(val) + } + }, + get_buffer_memory_requirements2_khr: unsafe { + extern "system" fn get_buffer_memory_requirements2_khr( + _device: Device, + _p_info: *const BufferMemoryRequirementsInfo2, + _p_memory_requirements: *mut MemoryRequirements2, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_buffer_memory_requirements2_khr) + )) + } + let raw_name = stringify!(vkGetBufferMemoryRequirements2KHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_buffer_memory_requirements2_khr + } else { + ::std::mem::transmute(val) + } + }, + get_image_sparse_memory_requirements2_khr: unsafe { + extern "system" fn get_image_sparse_memory_requirements2_khr( + _device: Device, + _p_info: *const ImageSparseMemoryRequirementsInfo2, + _p_sparse_memory_requirement_count: *mut u32, + _p_sparse_memory_requirements: *mut SparseImageMemoryRequirements2, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_image_sparse_memory_requirements2_khr) + )) + } + let raw_name = stringify!(vkGetImageSparseMemoryRequirements2KHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_image_sparse_memory_requirements2_khr + } else { + ::std::mem::transmute(val) + } + }, + } } + #[doc = ""] + pub unsafe fn get_image_memory_requirements2_khr( + &self, + device: Device, + p_info: *const ImageMemoryRequirementsInfo2, + p_memory_requirements: *mut MemoryRequirements2, + ) -> c_void { + (self.get_image_memory_requirements2_khr)(device, p_info, p_memory_requirements) + } + #[doc = ""] + pub unsafe fn get_buffer_memory_requirements2_khr( + &self, + device: Device, + p_info: *const BufferMemoryRequirementsInfo2, + p_memory_requirements: *mut MemoryRequirements2, + ) -> c_void { + (self.get_buffer_memory_requirements2_khr)(device, p_info, p_memory_requirements) + } + #[doc = ""] + pub unsafe fn get_image_sparse_memory_requirements2_khr( + &self, + device: Device, + p_info: *const ImageSparseMemoryRequirementsInfo2, + p_sparse_memory_requirement_count: *mut u32, + p_sparse_memory_requirements: *mut SparseImageMemoryRequirements2, + ) -> c_void { + (self.get_image_sparse_memory_requirements2_khr)( + device, + p_info, + p_sparse_memory_requirement_count, + p_sparse_memory_requirements, + ) + } +} +#[doc = "Generated from \'VK_KHR_get_memory_requirements2\'"] +impl StructureType { + pub const BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR: Self = + StructureType::BUFFER_MEMORY_REQUIREMENTS_INFO_2; +} +#[doc = "Generated from \'VK_KHR_get_memory_requirements2\'"] +impl StructureType { + pub const IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR: Self = + StructureType::IMAGE_MEMORY_REQUIREMENTS_INFO_2; +} +#[doc = "Generated from \'VK_KHR_get_memory_requirements2\'"] +impl StructureType { + pub const IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2_KHR: Self = + StructureType::IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2; +} +#[doc = "Generated from \'VK_KHR_get_memory_requirements2\'"] +impl StructureType { + pub const MEMORY_REQUIREMENTS_2_KHR: Self = StructureType::MEMORY_REQUIREMENTS_2; +} +#[doc = "Generated from \'VK_KHR_get_memory_requirements2\'"] +impl StructureType { + pub const SPARSE_IMAGE_MEMORY_REQUIREMENTS_2_KHR: Self = + StructureType::SPARSE_IMAGE_MEMORY_REQUIREMENTS_2; } impl KhrImageFormatListFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -54095,7 +67108,8 @@ impl KhrImageFormatListFn { } #[doc = "Generated from \'VK_KHR_image_format_list\'"] impl StructureType { - pub const IMAGE_FORMAT_LIST_CREATE_INFO_KHR: Self = StructureType(1000147000); + pub const IMAGE_FORMAT_LIST_CREATE_INFO_KHR: Self = + StructureType::IMAGE_FORMAT_LIST_CREATE_INFO; } impl ExtBlendOperationAdvancedFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -54122,204 +67136,205 @@ impl ExtBlendOperationAdvancedFn { #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl StructureType { pub const PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT: Self = - StructureType(1000148000); + StructureType(1_000_148_000); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl StructureType { pub const PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT: Self = - StructureType(1000148001); + StructureType(1_000_148_001); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl StructureType { - pub const PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT: Self = StructureType(1000148002); + pub const PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT: Self = + StructureType(1_000_148_002); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const ZERO_EXT: Self = BlendOp(1000148000); + pub const ZERO_EXT: Self = BlendOp(1_000_148_000); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const SRC_EXT: Self = BlendOp(1000148001); + pub const SRC_EXT: Self = BlendOp(1_000_148_001); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const DST_EXT: Self = BlendOp(1000148002); + pub const DST_EXT: Self = BlendOp(1_000_148_002); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const SRC_OVER_EXT: Self = BlendOp(1000148003); + pub const SRC_OVER_EXT: Self = BlendOp(1_000_148_003); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const DST_OVER_EXT: Self = BlendOp(1000148004); + pub const DST_OVER_EXT: Self = BlendOp(1_000_148_004); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const SRC_IN_EXT: Self = BlendOp(1000148005); + pub const SRC_IN_EXT: Self = BlendOp(1_000_148_005); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const DST_IN_EXT: Self = BlendOp(1000148006); + pub const DST_IN_EXT: Self = BlendOp(1_000_148_006); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const SRC_OUT_EXT: Self = BlendOp(1000148007); + pub const SRC_OUT_EXT: Self = BlendOp(1_000_148_007); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const DST_OUT_EXT: Self = BlendOp(1000148008); + pub const DST_OUT_EXT: Self = BlendOp(1_000_148_008); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const SRC_ATOP_EXT: Self = BlendOp(1000148009); + pub const SRC_ATOP_EXT: Self = BlendOp(1_000_148_009); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const DST_ATOP_EXT: Self = BlendOp(1000148010); + pub const DST_ATOP_EXT: Self = BlendOp(1_000_148_010); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const XOR_EXT: Self = BlendOp(1000148011); + pub const XOR_EXT: Self = BlendOp(1_000_148_011); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const MULTIPLY_EXT: Self = BlendOp(1000148012); + pub const MULTIPLY_EXT: Self = BlendOp(1_000_148_012); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const SCREEN_EXT: Self = BlendOp(1000148013); + pub const SCREEN_EXT: Self = BlendOp(1_000_148_013); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const OVERLAY_EXT: Self = BlendOp(1000148014); + pub const OVERLAY_EXT: Self = BlendOp(1_000_148_014); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const DARKEN_EXT: Self = BlendOp(1000148015); + pub const DARKEN_EXT: Self = BlendOp(1_000_148_015); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const LIGHTEN_EXT: Self = BlendOp(1000148016); + pub const LIGHTEN_EXT: Self = BlendOp(1_000_148_016); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const COLORDODGE_EXT: Self = BlendOp(1000148017); + pub const COLORDODGE_EXT: Self = BlendOp(1_000_148_017); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const COLORBURN_EXT: Self = BlendOp(1000148018); + pub const COLORBURN_EXT: Self = BlendOp(1_000_148_018); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const HARDLIGHT_EXT: Self = BlendOp(1000148019); + pub const HARDLIGHT_EXT: Self = BlendOp(1_000_148_019); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const SOFTLIGHT_EXT: Self = BlendOp(1000148020); + pub const SOFTLIGHT_EXT: Self = BlendOp(1_000_148_020); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const DIFFERENCE_EXT: Self = BlendOp(1000148021); + pub const DIFFERENCE_EXT: Self = BlendOp(1_000_148_021); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const EXCLUSION_EXT: Self = BlendOp(1000148022); + pub const EXCLUSION_EXT: Self = BlendOp(1_000_148_022); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const INVERT_EXT: Self = BlendOp(1000148023); + pub const INVERT_EXT: Self = BlendOp(1_000_148_023); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const INVERT_RGB_EXT: Self = BlendOp(1000148024); + pub const INVERT_RGB_EXT: Self = BlendOp(1_000_148_024); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const LINEARDODGE_EXT: Self = BlendOp(1000148025); + pub const LINEARDODGE_EXT: Self = BlendOp(1_000_148_025); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const LINEARBURN_EXT: Self = BlendOp(1000148026); + pub const LINEARBURN_EXT: Self = BlendOp(1_000_148_026); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const VIVIDLIGHT_EXT: Self = BlendOp(1000148027); + pub const VIVIDLIGHT_EXT: Self = BlendOp(1_000_148_027); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const LINEARLIGHT_EXT: Self = BlendOp(1000148028); + pub const LINEARLIGHT_EXT: Self = BlendOp(1_000_148_028); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const PINLIGHT_EXT: Self = BlendOp(1000148029); + pub const PINLIGHT_EXT: Self = BlendOp(1_000_148_029); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const HARDMIX_EXT: Self = BlendOp(1000148030); + pub const HARDMIX_EXT: Self = BlendOp(1_000_148_030); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const HSL_HUE_EXT: Self = BlendOp(1000148031); + pub const HSL_HUE_EXT: Self = BlendOp(1_000_148_031); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const HSL_SATURATION_EXT: Self = BlendOp(1000148032); + pub const HSL_SATURATION_EXT: Self = BlendOp(1_000_148_032); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const HSL_COLOR_EXT: Self = BlendOp(1000148033); + pub const HSL_COLOR_EXT: Self = BlendOp(1_000_148_033); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const HSL_LUMINOSITY_EXT: Self = BlendOp(1000148034); + pub const HSL_LUMINOSITY_EXT: Self = BlendOp(1_000_148_034); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const PLUS_EXT: Self = BlendOp(1000148035); + pub const PLUS_EXT: Self = BlendOp(1_000_148_035); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const PLUS_CLAMPED_EXT: Self = BlendOp(1000148036); + pub const PLUS_CLAMPED_EXT: Self = BlendOp(1_000_148_036); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const PLUS_CLAMPED_ALPHA_EXT: Self = BlendOp(1000148037); + pub const PLUS_CLAMPED_ALPHA_EXT: Self = BlendOp(1_000_148_037); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const PLUS_DARKER_EXT: Self = BlendOp(1000148038); + pub const PLUS_DARKER_EXT: Self = BlendOp(1_000_148_038); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const MINUS_EXT: Self = BlendOp(1000148039); + pub const MINUS_EXT: Self = BlendOp(1_000_148_039); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const MINUS_CLAMPED_EXT: Self = BlendOp(1000148040); + pub const MINUS_CLAMPED_EXT: Self = BlendOp(1_000_148_040); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const CONTRAST_EXT: Self = BlendOp(1000148041); + pub const CONTRAST_EXT: Self = BlendOp(1_000_148_041); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const INVERT_OVG_EXT: Self = BlendOp(1000148042); + pub const INVERT_OVG_EXT: Self = BlendOp(1_000_148_042); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const RED_EXT: Self = BlendOp(1000148043); + pub const RED_EXT: Self = BlendOp(1_000_148_043); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const GREEN_EXT: Self = BlendOp(1000148044); + pub const GREEN_EXT: Self = BlendOp(1_000_148_044); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl BlendOp { - pub const BLUE_EXT: Self = BlendOp(1000148045); + pub const BLUE_EXT: Self = BlendOp(1_000_148_045); } #[doc = "Generated from \'VK_EXT_blend_operation_advanced\'"] impl AccessFlags { - pub const COLOR_ATTACHMENT_READ_NONCOHERENT_EXT: Self = AccessFlags(0b10000000000000000000); + pub const COLOR_ATTACHMENT_READ_NONCOHERENT_EXT: Self = AccessFlags(0b1000_0000_0000_0000_0000); } impl NvFragmentCoverageToColorFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -54345,29 +67360,1301 @@ impl NvFragmentCoverageToColorFn { } #[doc = "Generated from \'VK_NV_fragment_coverage_to_color\'"] impl StructureType { - pub const PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV: Self = StructureType(1000149000); + pub const PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV: Self = StructureType(1_000_149_000); } -impl NvExtension151Fn { +impl KhrRayTracingFn { pub fn name() -> &'static ::std::ffi::CStr { - ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_extension_151\0") + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_ray_tracing\0") .expect("Wrong extension string") } } -pub struct NvExtension151Fn {} -unsafe impl Send for NvExtension151Fn {} -unsafe impl Sync for NvExtension151Fn {} -impl ::std::clone::Clone for NvExtension151Fn { +#[allow(non_camel_case_types)] +pub type PFN_vkCreateAccelerationStructureKHR = extern "system" fn( + device: Device, + p_create_info: *const AccelerationStructureCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_acceleration_structure: *mut AccelerationStructureKHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyAccelerationStructureKHR = extern "system" fn( + device: Device, + acceleration_structure: AccelerationStructureKHR, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetAccelerationStructureMemoryRequirementsKHR = extern "system" fn( + device: Device, + p_info: *const AccelerationStructureMemoryRequirementsInfoKHR, + p_memory_requirements: *mut MemoryRequirements2, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkBindAccelerationStructureMemoryKHR = extern "system" fn( + device: Device, + bind_info_count: u32, + p_bind_infos: *const BindAccelerationStructureMemoryInfoKHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdBuildAccelerationStructureKHR = extern "system" fn( + command_buffer: CommandBuffer, + info_count: u32, + p_infos: *const AccelerationStructureBuildGeometryInfoKHR, + pp_offset_infos: *const *const AccelerationStructureBuildOffsetInfoKHR, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdBuildAccelerationStructureIndirectKHR = extern "system" fn( + command_buffer: CommandBuffer, + p_info: *const AccelerationStructureBuildGeometryInfoKHR, + indirect_buffer: Buffer, + indirect_offset: DeviceSize, + indirect_stride: u32, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkBuildAccelerationStructureKHR = extern "system" fn( + device: Device, + info_count: u32, + p_infos: *const AccelerationStructureBuildGeometryInfoKHR, + pp_offset_infos: *const *const AccelerationStructureBuildOffsetInfoKHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkCopyAccelerationStructureKHR = + extern "system" fn(device: Device, p_info: *const CopyAccelerationStructureInfoKHR) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkCopyAccelerationStructureToMemoryKHR = extern "system" fn( + device: Device, + p_info: *const CopyAccelerationStructureToMemoryInfoKHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkCopyMemoryToAccelerationStructureKHR = extern "system" fn( + device: Device, + p_info: *const CopyMemoryToAccelerationStructureInfoKHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkWriteAccelerationStructuresPropertiesKHR = extern "system" fn( + device: Device, + acceleration_structure_count: u32, + p_acceleration_structures: *const AccelerationStructureKHR, + query_type: QueryType, + data_size: usize, + p_data: *mut c_void, + stride: usize, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdCopyAccelerationStructureKHR = extern "system" fn( + command_buffer: CommandBuffer, + p_info: *const CopyAccelerationStructureInfoKHR, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdCopyAccelerationStructureToMemoryKHR = extern "system" fn( + command_buffer: CommandBuffer, + p_info: *const CopyAccelerationStructureToMemoryInfoKHR, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdCopyMemoryToAccelerationStructureKHR = extern "system" fn( + command_buffer: CommandBuffer, + p_info: *const CopyMemoryToAccelerationStructureInfoKHR, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdTraceRaysKHR = extern "system" fn( + command_buffer: CommandBuffer, + p_raygen_shader_binding_table: *const StridedBufferRegionKHR, + p_miss_shader_binding_table: *const StridedBufferRegionKHR, + p_hit_shader_binding_table: *const StridedBufferRegionKHR, + p_callable_shader_binding_table: *const StridedBufferRegionKHR, + width: u32, + height: u32, + depth: u32, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCreateRayTracingPipelinesKHR = extern "system" fn( + device: Device, + pipeline_cache: PipelineCache, + create_info_count: u32, + p_create_infos: *const RayTracingPipelineCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_pipelines: *mut Pipeline, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetRayTracingShaderGroupHandlesKHR = extern "system" fn( + device: Device, + pipeline: Pipeline, + first_group: u32, + group_count: u32, + data_size: usize, + p_data: *mut c_void, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetAccelerationStructureDeviceAddressKHR = extern "system" fn( + device: Device, + p_info: *const AccelerationStructureDeviceAddressInfoKHR, +) -> DeviceAddress; +#[allow(non_camel_case_types)] +pub type PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR = extern "system" fn( + device: Device, + pipeline: Pipeline, + first_group: u32, + group_count: u32, + data_size: usize, + p_data: *mut c_void, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdWriteAccelerationStructuresPropertiesKHR = extern "system" fn( + command_buffer: CommandBuffer, + acceleration_structure_count: u32, + p_acceleration_structures: *const AccelerationStructureKHR, + query_type: QueryType, + query_pool: QueryPool, + first_query: u32, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdTraceRaysIndirectKHR = extern "system" fn( + command_buffer: CommandBuffer, + p_raygen_shader_binding_table: *const StridedBufferRegionKHR, + p_miss_shader_binding_table: *const StridedBufferRegionKHR, + p_hit_shader_binding_table: *const StridedBufferRegionKHR, + p_callable_shader_binding_table: *const StridedBufferRegionKHR, + buffer: Buffer, + offset: DeviceSize, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetDeviceAccelerationStructureCompatibilityKHR = + extern "system" fn(device: Device, version: *const AccelerationStructureVersionKHR) -> Result; +pub struct KhrRayTracingFn { + pub create_acceleration_structure_khr: extern "system" fn( + device: Device, + p_create_info: *const AccelerationStructureCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_acceleration_structure: *mut AccelerationStructureKHR, + ) -> Result, + pub destroy_acceleration_structure_khr: extern "system" fn( + device: Device, + acceleration_structure: AccelerationStructureKHR, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub get_acceleration_structure_memory_requirements_khr: extern "system" fn( + device: Device, + p_info: *const AccelerationStructureMemoryRequirementsInfoKHR, + p_memory_requirements: *mut MemoryRequirements2, + ) -> c_void, + pub bind_acceleration_structure_memory_khr: extern "system" fn( + device: Device, + bind_info_count: u32, + p_bind_infos: *const BindAccelerationStructureMemoryInfoKHR, + ) -> Result, + pub cmd_build_acceleration_structure_khr: extern "system" fn( + command_buffer: CommandBuffer, + info_count: u32, + p_infos: *const AccelerationStructureBuildGeometryInfoKHR, + pp_offset_infos: *const *const AccelerationStructureBuildOffsetInfoKHR, + ) -> c_void, + pub cmd_build_acceleration_structure_indirect_khr: extern "system" fn( + command_buffer: CommandBuffer, + p_info: *const AccelerationStructureBuildGeometryInfoKHR, + indirect_buffer: Buffer, + indirect_offset: DeviceSize, + indirect_stride: u32, + ) -> c_void, + pub build_acceleration_structure_khr: extern "system" fn( + device: Device, + info_count: u32, + p_infos: *const AccelerationStructureBuildGeometryInfoKHR, + pp_offset_infos: *const *const AccelerationStructureBuildOffsetInfoKHR, + ) -> Result, + pub copy_acceleration_structure_khr: extern "system" fn( + device: Device, + p_info: *const CopyAccelerationStructureInfoKHR, + ) -> Result, + pub copy_acceleration_structure_to_memory_khr: extern "system" fn( + device: Device, + p_info: *const CopyAccelerationStructureToMemoryInfoKHR, + ) -> Result, + pub copy_memory_to_acceleration_structure_khr: extern "system" fn( + device: Device, + p_info: *const CopyMemoryToAccelerationStructureInfoKHR, + ) -> Result, + pub write_acceleration_structures_properties_khr: extern "system" fn( + device: Device, + acceleration_structure_count: u32, + p_acceleration_structures: *const AccelerationStructureKHR, + query_type: QueryType, + data_size: usize, + p_data: *mut c_void, + stride: usize, + ) -> Result, + pub cmd_copy_acceleration_structure_khr: extern "system" fn( + command_buffer: CommandBuffer, + p_info: *const CopyAccelerationStructureInfoKHR, + ) -> c_void, + pub cmd_copy_acceleration_structure_to_memory_khr: extern "system" fn( + command_buffer: CommandBuffer, + p_info: *const CopyAccelerationStructureToMemoryInfoKHR, + ) -> c_void, + pub cmd_copy_memory_to_acceleration_structure_khr: extern "system" fn( + command_buffer: CommandBuffer, + p_info: *const CopyMemoryToAccelerationStructureInfoKHR, + ) -> c_void, + pub cmd_trace_rays_khr: extern "system" fn( + command_buffer: CommandBuffer, + p_raygen_shader_binding_table: *const StridedBufferRegionKHR, + p_miss_shader_binding_table: *const StridedBufferRegionKHR, + p_hit_shader_binding_table: *const StridedBufferRegionKHR, + p_callable_shader_binding_table: *const StridedBufferRegionKHR, + width: u32, + height: u32, + depth: u32, + ) -> c_void, + pub create_ray_tracing_pipelines_khr: extern "system" fn( + device: Device, + pipeline_cache: PipelineCache, + create_info_count: u32, + p_create_infos: *const RayTracingPipelineCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_pipelines: *mut Pipeline, + ) -> Result, + pub get_ray_tracing_shader_group_handles_khr: extern "system" fn( + device: Device, + pipeline: Pipeline, + first_group: u32, + group_count: u32, + data_size: usize, + p_data: *mut c_void, + ) -> Result, + pub get_acceleration_structure_device_address_khr: extern "system" fn( + device: Device, + p_info: *const AccelerationStructureDeviceAddressInfoKHR, + ) -> DeviceAddress, + pub get_ray_tracing_capture_replay_shader_group_handles_khr: extern "system" fn( + device: Device, + pipeline: Pipeline, + first_group: u32, + group_count: u32, + data_size: usize, + p_data: *mut c_void, + ) -> Result, + pub cmd_write_acceleration_structures_properties_khr: extern "system" fn( + command_buffer: CommandBuffer, + acceleration_structure_count: u32, + p_acceleration_structures: *const AccelerationStructureKHR, + query_type: QueryType, + query_pool: QueryPool, + first_query: u32, + ) -> c_void, + pub cmd_trace_rays_indirect_khr: extern "system" fn( + command_buffer: CommandBuffer, + p_raygen_shader_binding_table: *const StridedBufferRegionKHR, + p_miss_shader_binding_table: *const StridedBufferRegionKHR, + p_hit_shader_binding_table: *const StridedBufferRegionKHR, + p_callable_shader_binding_table: *const StridedBufferRegionKHR, + buffer: Buffer, + offset: DeviceSize, + ) -> c_void, + pub get_device_acceleration_structure_compatibility_khr: extern "system" fn( + device: Device, + version: *const AccelerationStructureVersionKHR, + ) -> Result, +} +unsafe impl Send for KhrRayTracingFn {} +unsafe impl Sync for KhrRayTracingFn {} +impl ::std::clone::Clone for KhrRayTracingFn { fn clone(&self) -> Self { - NvExtension151Fn {} + KhrRayTracingFn { + create_acceleration_structure_khr: self.create_acceleration_structure_khr, + destroy_acceleration_structure_khr: self.destroy_acceleration_structure_khr, + get_acceleration_structure_memory_requirements_khr: self + .get_acceleration_structure_memory_requirements_khr, + bind_acceleration_structure_memory_khr: self.bind_acceleration_structure_memory_khr, + cmd_build_acceleration_structure_khr: self.cmd_build_acceleration_structure_khr, + cmd_build_acceleration_structure_indirect_khr: self + .cmd_build_acceleration_structure_indirect_khr, + build_acceleration_structure_khr: self.build_acceleration_structure_khr, + copy_acceleration_structure_khr: self.copy_acceleration_structure_khr, + copy_acceleration_structure_to_memory_khr: self + .copy_acceleration_structure_to_memory_khr, + copy_memory_to_acceleration_structure_khr: self + .copy_memory_to_acceleration_structure_khr, + write_acceleration_structures_properties_khr: self + .write_acceleration_structures_properties_khr, + cmd_copy_acceleration_structure_khr: self.cmd_copy_acceleration_structure_khr, + cmd_copy_acceleration_structure_to_memory_khr: self + .cmd_copy_acceleration_structure_to_memory_khr, + cmd_copy_memory_to_acceleration_structure_khr: self + .cmd_copy_memory_to_acceleration_structure_khr, + cmd_trace_rays_khr: self.cmd_trace_rays_khr, + create_ray_tracing_pipelines_khr: self.create_ray_tracing_pipelines_khr, + get_ray_tracing_shader_group_handles_khr: self.get_ray_tracing_shader_group_handles_khr, + get_acceleration_structure_device_address_khr: self + .get_acceleration_structure_device_address_khr, + get_ray_tracing_capture_replay_shader_group_handles_khr: self + .get_ray_tracing_capture_replay_shader_group_handles_khr, + cmd_write_acceleration_structures_properties_khr: self + .cmd_write_acceleration_structures_properties_khr, + cmd_trace_rays_indirect_khr: self.cmd_trace_rays_indirect_khr, + get_device_acceleration_structure_compatibility_khr: self + .get_device_acceleration_structure_compatibility_khr, + } } } -impl NvExtension151Fn { +impl KhrRayTracingFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - NvExtension151Fn {} + KhrRayTracingFn { + create_acceleration_structure_khr: unsafe { + extern "system" fn create_acceleration_structure_khr( + _device: Device, + _p_create_info: *const AccelerationStructureCreateInfoKHR, + _p_allocator: *const AllocationCallbacks, + _p_acceleration_structure: *mut AccelerationStructureKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_acceleration_structure_khr) + )) + } + let raw_name = stringify!(vkCreateAccelerationStructureKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_acceleration_structure_khr + } else { + ::std::mem::transmute(val) + } + }, + destroy_acceleration_structure_khr: unsafe { + extern "system" fn destroy_acceleration_structure_khr( + _device: Device, + _acceleration_structure: AccelerationStructureKHR, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(destroy_acceleration_structure_khr) + )) + } + let raw_name = stringify!(vkDestroyAccelerationStructureKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_acceleration_structure_khr + } else { + ::std::mem::transmute(val) + } + }, + get_acceleration_structure_memory_requirements_khr: unsafe { + extern "system" fn get_acceleration_structure_memory_requirements_khr( + _device: Device, + _p_info: *const AccelerationStructureMemoryRequirementsInfoKHR, + _p_memory_requirements: *mut MemoryRequirements2, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_acceleration_structure_memory_requirements_khr) + )) + } + let raw_name = stringify!(vkGetAccelerationStructureMemoryRequirementsKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_acceleration_structure_memory_requirements_khr + } else { + ::std::mem::transmute(val) + } + }, + bind_acceleration_structure_memory_khr: unsafe { + extern "system" fn bind_acceleration_structure_memory_khr( + _device: Device, + _bind_info_count: u32, + _p_bind_infos: *const BindAccelerationStructureMemoryInfoKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(bind_acceleration_structure_memory_khr) + )) + } + let raw_name = stringify!(vkBindAccelerationStructureMemoryKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + bind_acceleration_structure_memory_khr + } else { + ::std::mem::transmute(val) + } + }, + cmd_build_acceleration_structure_khr: unsafe { + extern "system" fn cmd_build_acceleration_structure_khr( + _command_buffer: CommandBuffer, + _info_count: u32, + _p_infos: *const AccelerationStructureBuildGeometryInfoKHR, + _pp_offset_infos: *const *const AccelerationStructureBuildOffsetInfoKHR, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_build_acceleration_structure_khr) + )) + } + let raw_name = stringify!(vkCmdBuildAccelerationStructureKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_build_acceleration_structure_khr + } else { + ::std::mem::transmute(val) + } + }, + cmd_build_acceleration_structure_indirect_khr: unsafe { + extern "system" fn cmd_build_acceleration_structure_indirect_khr( + _command_buffer: CommandBuffer, + _p_info: *const AccelerationStructureBuildGeometryInfoKHR, + _indirect_buffer: Buffer, + _indirect_offset: DeviceSize, + _indirect_stride: u32, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_build_acceleration_structure_indirect_khr) + )) + } + let raw_name = stringify!(vkCmdBuildAccelerationStructureIndirectKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_build_acceleration_structure_indirect_khr + } else { + ::std::mem::transmute(val) + } + }, + build_acceleration_structure_khr: unsafe { + extern "system" fn build_acceleration_structure_khr( + _device: Device, + _info_count: u32, + _p_infos: *const AccelerationStructureBuildGeometryInfoKHR, + _pp_offset_infos: *const *const AccelerationStructureBuildOffsetInfoKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(build_acceleration_structure_khr) + )) + } + let raw_name = stringify!(vkBuildAccelerationStructureKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + build_acceleration_structure_khr + } else { + ::std::mem::transmute(val) + } + }, + copy_acceleration_structure_khr: unsafe { + extern "system" fn copy_acceleration_structure_khr( + _device: Device, + _p_info: *const CopyAccelerationStructureInfoKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(copy_acceleration_structure_khr) + )) + } + let raw_name = stringify!(vkCopyAccelerationStructureKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + copy_acceleration_structure_khr + } else { + ::std::mem::transmute(val) + } + }, + copy_acceleration_structure_to_memory_khr: unsafe { + extern "system" fn copy_acceleration_structure_to_memory_khr( + _device: Device, + _p_info: *const CopyAccelerationStructureToMemoryInfoKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(copy_acceleration_structure_to_memory_khr) + )) + } + let raw_name = stringify!(vkCopyAccelerationStructureToMemoryKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + copy_acceleration_structure_to_memory_khr + } else { + ::std::mem::transmute(val) + } + }, + copy_memory_to_acceleration_structure_khr: unsafe { + extern "system" fn copy_memory_to_acceleration_structure_khr( + _device: Device, + _p_info: *const CopyMemoryToAccelerationStructureInfoKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(copy_memory_to_acceleration_structure_khr) + )) + } + let raw_name = stringify!(vkCopyMemoryToAccelerationStructureKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + copy_memory_to_acceleration_structure_khr + } else { + ::std::mem::transmute(val) + } + }, + write_acceleration_structures_properties_khr: unsafe { + extern "system" fn write_acceleration_structures_properties_khr( + _device: Device, + _acceleration_structure_count: u32, + _p_acceleration_structures: *const AccelerationStructureKHR, + _query_type: QueryType, + _data_size: usize, + _p_data: *mut c_void, + _stride: usize, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(write_acceleration_structures_properties_khr) + )) + } + let raw_name = stringify!(vkWriteAccelerationStructuresPropertiesKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + write_acceleration_structures_properties_khr + } else { + ::std::mem::transmute(val) + } + }, + cmd_copy_acceleration_structure_khr: unsafe { + extern "system" fn cmd_copy_acceleration_structure_khr( + _command_buffer: CommandBuffer, + _p_info: *const CopyAccelerationStructureInfoKHR, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_copy_acceleration_structure_khr) + )) + } + let raw_name = stringify!(vkCmdCopyAccelerationStructureKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_copy_acceleration_structure_khr + } else { + ::std::mem::transmute(val) + } + }, + cmd_copy_acceleration_structure_to_memory_khr: unsafe { + extern "system" fn cmd_copy_acceleration_structure_to_memory_khr( + _command_buffer: CommandBuffer, + _p_info: *const CopyAccelerationStructureToMemoryInfoKHR, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_copy_acceleration_structure_to_memory_khr) + )) + } + let raw_name = stringify!(vkCmdCopyAccelerationStructureToMemoryKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_copy_acceleration_structure_to_memory_khr + } else { + ::std::mem::transmute(val) + } + }, + cmd_copy_memory_to_acceleration_structure_khr: unsafe { + extern "system" fn cmd_copy_memory_to_acceleration_structure_khr( + _command_buffer: CommandBuffer, + _p_info: *const CopyMemoryToAccelerationStructureInfoKHR, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_copy_memory_to_acceleration_structure_khr) + )) + } + let raw_name = stringify!(vkCmdCopyMemoryToAccelerationStructureKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_copy_memory_to_acceleration_structure_khr + } else { + ::std::mem::transmute(val) + } + }, + cmd_trace_rays_khr: unsafe { + extern "system" fn cmd_trace_rays_khr( + _command_buffer: CommandBuffer, + _p_raygen_shader_binding_table: *const StridedBufferRegionKHR, + _p_miss_shader_binding_table: *const StridedBufferRegionKHR, + _p_hit_shader_binding_table: *const StridedBufferRegionKHR, + _p_callable_shader_binding_table: *const StridedBufferRegionKHR, + _width: u32, + _height: u32, + _depth: u32, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(cmd_trace_rays_khr))) + } + let raw_name = stringify!(vkCmdTraceRaysKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_trace_rays_khr + } else { + ::std::mem::transmute(val) + } + }, + create_ray_tracing_pipelines_khr: unsafe { + extern "system" fn create_ray_tracing_pipelines_khr( + _device: Device, + _pipeline_cache: PipelineCache, + _create_info_count: u32, + _p_create_infos: *const RayTracingPipelineCreateInfoKHR, + _p_allocator: *const AllocationCallbacks, + _p_pipelines: *mut Pipeline, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_ray_tracing_pipelines_khr) + )) + } + let raw_name = stringify!(vkCreateRayTracingPipelinesKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_ray_tracing_pipelines_khr + } else { + ::std::mem::transmute(val) + } + }, + get_ray_tracing_shader_group_handles_khr: unsafe { + extern "system" fn get_ray_tracing_shader_group_handles_khr( + _device: Device, + _pipeline: Pipeline, + _first_group: u32, + _group_count: u32, + _data_size: usize, + _p_data: *mut c_void, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_ray_tracing_shader_group_handles_khr) + )) + } + let raw_name = stringify!(vkGetRayTracingShaderGroupHandlesKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_ray_tracing_shader_group_handles_khr + } else { + ::std::mem::transmute(val) + } + }, + get_acceleration_structure_device_address_khr: unsafe { + extern "system" fn get_acceleration_structure_device_address_khr( + _device: Device, + _p_info: *const AccelerationStructureDeviceAddressInfoKHR, + ) -> DeviceAddress { + panic!(concat!( + "Unable to load ", + stringify!(get_acceleration_structure_device_address_khr) + )) + } + let raw_name = stringify!(vkGetAccelerationStructureDeviceAddressKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_acceleration_structure_device_address_khr + } else { + ::std::mem::transmute(val) + } + }, + get_ray_tracing_capture_replay_shader_group_handles_khr: unsafe { + extern "system" fn get_ray_tracing_capture_replay_shader_group_handles_khr( + _device: Device, + _pipeline: Pipeline, + _first_group: u32, + _group_count: u32, + _data_size: usize, + _p_data: *mut c_void, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_ray_tracing_capture_replay_shader_group_handles_khr) + )) + } + let raw_name = stringify!(vkGetRayTracingCaptureReplayShaderGroupHandlesKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_ray_tracing_capture_replay_shader_group_handles_khr + } else { + ::std::mem::transmute(val) + } + }, + cmd_write_acceleration_structures_properties_khr: unsafe { + extern "system" fn cmd_write_acceleration_structures_properties_khr( + _command_buffer: CommandBuffer, + _acceleration_structure_count: u32, + _p_acceleration_structures: *const AccelerationStructureKHR, + _query_type: QueryType, + _query_pool: QueryPool, + _first_query: u32, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_write_acceleration_structures_properties_khr) + )) + } + let raw_name = stringify!(vkCmdWriteAccelerationStructuresPropertiesKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_write_acceleration_structures_properties_khr + } else { + ::std::mem::transmute(val) + } + }, + cmd_trace_rays_indirect_khr: unsafe { + extern "system" fn cmd_trace_rays_indirect_khr( + _command_buffer: CommandBuffer, + _p_raygen_shader_binding_table: *const StridedBufferRegionKHR, + _p_miss_shader_binding_table: *const StridedBufferRegionKHR, + _p_hit_shader_binding_table: *const StridedBufferRegionKHR, + _p_callable_shader_binding_table: *const StridedBufferRegionKHR, + _buffer: Buffer, + _offset: DeviceSize, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_trace_rays_indirect_khr) + )) + } + let raw_name = stringify!(vkCmdTraceRaysIndirectKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_trace_rays_indirect_khr + } else { + ::std::mem::transmute(val) + } + }, + get_device_acceleration_structure_compatibility_khr: unsafe { + extern "system" fn get_device_acceleration_structure_compatibility_khr( + _device: Device, + _version: *const AccelerationStructureVersionKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_device_acceleration_structure_compatibility_khr) + )) + } + let raw_name = stringify!(vkGetDeviceAccelerationStructureCompatibilityKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_device_acceleration_structure_compatibility_khr + } else { + ::std::mem::transmute(val) + } + }, + } } + #[doc = ""] + pub unsafe fn create_acceleration_structure_khr( + &self, + device: Device, + p_create_info: *const AccelerationStructureCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_acceleration_structure: *mut AccelerationStructureKHR, + ) -> Result { + (self.create_acceleration_structure_khr)( + device, + p_create_info, + p_allocator, + p_acceleration_structure, + ) + } + #[doc = ""] + pub unsafe fn destroy_acceleration_structure_khr( + &self, + device: Device, + acceleration_structure: AccelerationStructureKHR, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_acceleration_structure_khr)(device, acceleration_structure, p_allocator) + } + #[doc = ""] + pub unsafe fn get_acceleration_structure_memory_requirements_khr( + &self, + device: Device, + p_info: *const AccelerationStructureMemoryRequirementsInfoKHR, + p_memory_requirements: *mut MemoryRequirements2, + ) -> c_void { + (self.get_acceleration_structure_memory_requirements_khr)( + device, + p_info, + p_memory_requirements, + ) + } + #[doc = ""] + pub unsafe fn bind_acceleration_structure_memory_khr( + &self, + device: Device, + bind_info_count: u32, + p_bind_infos: *const BindAccelerationStructureMemoryInfoKHR, + ) -> Result { + (self.bind_acceleration_structure_memory_khr)(device, bind_info_count, p_bind_infos) + } + #[doc = ""] + pub unsafe fn cmd_build_acceleration_structure_khr( + &self, + command_buffer: CommandBuffer, + info_count: u32, + p_infos: *const AccelerationStructureBuildGeometryInfoKHR, + pp_offset_infos: *const *const AccelerationStructureBuildOffsetInfoKHR, + ) -> c_void { + (self.cmd_build_acceleration_structure_khr)( + command_buffer, + info_count, + p_infos, + pp_offset_infos, + ) + } + #[doc = ""] + pub unsafe fn cmd_build_acceleration_structure_indirect_khr( + &self, + command_buffer: CommandBuffer, + p_info: *const AccelerationStructureBuildGeometryInfoKHR, + indirect_buffer: Buffer, + indirect_offset: DeviceSize, + indirect_stride: u32, + ) -> c_void { + (self.cmd_build_acceleration_structure_indirect_khr)( + command_buffer, + p_info, + indirect_buffer, + indirect_offset, + indirect_stride, + ) + } + #[doc = ""] + pub unsafe fn build_acceleration_structure_khr( + &self, + device: Device, + info_count: u32, + p_infos: *const AccelerationStructureBuildGeometryInfoKHR, + pp_offset_infos: *const *const AccelerationStructureBuildOffsetInfoKHR, + ) -> Result { + (self.build_acceleration_structure_khr)(device, info_count, p_infos, pp_offset_infos) + } + #[doc = ""] + pub unsafe fn copy_acceleration_structure_khr( + &self, + device: Device, + p_info: *const CopyAccelerationStructureInfoKHR, + ) -> Result { + (self.copy_acceleration_structure_khr)(device, p_info) + } + #[doc = ""] + pub unsafe fn copy_acceleration_structure_to_memory_khr( + &self, + device: Device, + p_info: *const CopyAccelerationStructureToMemoryInfoKHR, + ) -> Result { + (self.copy_acceleration_structure_to_memory_khr)(device, p_info) + } + #[doc = ""] + pub unsafe fn copy_memory_to_acceleration_structure_khr( + &self, + device: Device, + p_info: *const CopyMemoryToAccelerationStructureInfoKHR, + ) -> Result { + (self.copy_memory_to_acceleration_structure_khr)(device, p_info) + } + #[doc = ""] + pub unsafe fn write_acceleration_structures_properties_khr( + &self, + device: Device, + acceleration_structure_count: u32, + p_acceleration_structures: *const AccelerationStructureKHR, + query_type: QueryType, + data_size: usize, + p_data: *mut c_void, + stride: usize, + ) -> Result { + (self.write_acceleration_structures_properties_khr)( + device, + acceleration_structure_count, + p_acceleration_structures, + query_type, + data_size, + p_data, + stride, + ) + } + #[doc = ""] + pub unsafe fn cmd_copy_acceleration_structure_khr( + &self, + command_buffer: CommandBuffer, + p_info: *const CopyAccelerationStructureInfoKHR, + ) -> c_void { + (self.cmd_copy_acceleration_structure_khr)(command_buffer, p_info) + } + #[doc = ""] + pub unsafe fn cmd_copy_acceleration_structure_to_memory_khr( + &self, + command_buffer: CommandBuffer, + p_info: *const CopyAccelerationStructureToMemoryInfoKHR, + ) -> c_void { + (self.cmd_copy_acceleration_structure_to_memory_khr)(command_buffer, p_info) + } + #[doc = ""] + pub unsafe fn cmd_copy_memory_to_acceleration_structure_khr( + &self, + command_buffer: CommandBuffer, + p_info: *const CopyMemoryToAccelerationStructureInfoKHR, + ) -> c_void { + (self.cmd_copy_memory_to_acceleration_structure_khr)(command_buffer, p_info) + } + #[doc = ""] + pub unsafe fn cmd_trace_rays_khr( + &self, + command_buffer: CommandBuffer, + p_raygen_shader_binding_table: *const StridedBufferRegionKHR, + p_miss_shader_binding_table: *const StridedBufferRegionKHR, + p_hit_shader_binding_table: *const StridedBufferRegionKHR, + p_callable_shader_binding_table: *const StridedBufferRegionKHR, + width: u32, + height: u32, + depth: u32, + ) -> c_void { + (self.cmd_trace_rays_khr)( + command_buffer, + p_raygen_shader_binding_table, + p_miss_shader_binding_table, + p_hit_shader_binding_table, + p_callable_shader_binding_table, + width, + height, + depth, + ) + } + #[doc = ""] + pub unsafe fn create_ray_tracing_pipelines_khr( + &self, + device: Device, + pipeline_cache: PipelineCache, + create_info_count: u32, + p_create_infos: *const RayTracingPipelineCreateInfoKHR, + p_allocator: *const AllocationCallbacks, + p_pipelines: *mut Pipeline, + ) -> Result { + (self.create_ray_tracing_pipelines_khr)( + device, + pipeline_cache, + create_info_count, + p_create_infos, + p_allocator, + p_pipelines, + ) + } + #[doc = ""] + pub unsafe fn get_ray_tracing_shader_group_handles_khr( + &self, + device: Device, + pipeline: Pipeline, + first_group: u32, + group_count: u32, + data_size: usize, + p_data: *mut c_void, + ) -> Result { + (self.get_ray_tracing_shader_group_handles_khr)( + device, + pipeline, + first_group, + group_count, + data_size, + p_data, + ) + } + #[doc = ""] + pub unsafe fn get_acceleration_structure_device_address_khr( + &self, + device: Device, + p_info: *const AccelerationStructureDeviceAddressInfoKHR, + ) -> DeviceAddress { + (self.get_acceleration_structure_device_address_khr)(device, p_info) + } + #[doc = ""] + pub unsafe fn get_ray_tracing_capture_replay_shader_group_handles_khr( + &self, + device: Device, + pipeline: Pipeline, + first_group: u32, + group_count: u32, + data_size: usize, + p_data: *mut c_void, + ) -> Result { + (self.get_ray_tracing_capture_replay_shader_group_handles_khr)( + device, + pipeline, + first_group, + group_count, + data_size, + p_data, + ) + } + #[doc = ""] + pub unsafe fn cmd_write_acceleration_structures_properties_khr( + &self, + command_buffer: CommandBuffer, + acceleration_structure_count: u32, + p_acceleration_structures: *const AccelerationStructureKHR, + query_type: QueryType, + query_pool: QueryPool, + first_query: u32, + ) -> c_void { + (self.cmd_write_acceleration_structures_properties_khr)( + command_buffer, + acceleration_structure_count, + p_acceleration_structures, + query_type, + query_pool, + first_query, + ) + } + #[doc = ""] + pub unsafe fn cmd_trace_rays_indirect_khr( + &self, + command_buffer: CommandBuffer, + p_raygen_shader_binding_table: *const StridedBufferRegionKHR, + p_miss_shader_binding_table: *const StridedBufferRegionKHR, + p_hit_shader_binding_table: *const StridedBufferRegionKHR, + p_callable_shader_binding_table: *const StridedBufferRegionKHR, + buffer: Buffer, + offset: DeviceSize, + ) -> c_void { + (self.cmd_trace_rays_indirect_khr)( + command_buffer, + p_raygen_shader_binding_table, + p_miss_shader_binding_table, + p_hit_shader_binding_table, + p_callable_shader_binding_table, + buffer, + offset, + ) + } + #[doc = ""] + pub unsafe fn get_device_acceleration_structure_compatibility_khr( + &self, + device: Device, + version: *const AccelerationStructureVersionKHR, + ) -> Result { + (self.get_device_acceleration_structure_compatibility_khr)(device, version) + } +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl StructureType { + pub const BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_KHR: Self = StructureType(1_000_165_006); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl StructureType { + pub const WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR: Self = StructureType(1_000_165_007); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl StructureType { + pub const ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR: Self = StructureType(1_000_150_000); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl StructureType { + pub const ACCELERATION_STRUCTURE_CREATE_GEOMETRY_TYPE_INFO_KHR: Self = + StructureType(1_000_150_001); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl StructureType { + pub const ACCELERATION_STRUCTURE_DEVICE_ADDRESS_INFO_KHR: Self = StructureType(1_000_150_002); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl StructureType { + pub const ACCELERATION_STRUCTURE_GEOMETRY_AABBS_DATA_KHR: Self = StructureType(1_000_150_003); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl StructureType { + pub const ACCELERATION_STRUCTURE_GEOMETRY_INSTANCES_DATA_KHR: Self = + StructureType(1_000_150_004); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl StructureType { + pub const ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR: Self = + StructureType(1_000_150_005); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl StructureType { + pub const ACCELERATION_STRUCTURE_GEOMETRY_KHR: Self = StructureType(1_000_150_006); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl StructureType { + pub const ACCELERATION_STRUCTURE_INFO_KHR: Self = StructureType(1_000_150_007); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl StructureType { + pub const ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_KHR: Self = + StructureType(1_000_150_008); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl StructureType { + pub const ACCELERATION_STRUCTURE_VERSION_KHR: Self = StructureType(1_000_150_009); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl StructureType { + pub const COPY_ACCELERATION_STRUCTURE_INFO_KHR: Self = StructureType(1_000_150_010); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl StructureType { + pub const COPY_ACCELERATION_STRUCTURE_TO_MEMORY_INFO_KHR: Self = StructureType(1_000_150_011); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl StructureType { + pub const COPY_MEMORY_TO_ACCELERATION_STRUCTURE_INFO_KHR: Self = StructureType(1_000_150_012); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_RAY_TRACING_FEATURES_KHR: Self = StructureType(1_000_150_013); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_KHR: Self = StructureType(1_000_150_014); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl StructureType { + pub const RAY_TRACING_PIPELINE_CREATE_INFO_KHR: Self = StructureType(1_000_150_015); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl StructureType { + pub const RAY_TRACING_SHADER_GROUP_CREATE_INFO_KHR: Self = StructureType(1_000_150_016); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl StructureType { + pub const ACCELERATION_STRUCTURE_CREATE_INFO_KHR: Self = StructureType(1_000_150_017); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl StructureType { + pub const RAY_TRACING_PIPELINE_INTERFACE_CREATE_INFO_KHR: Self = StructureType(1_000_150_018); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl ShaderStageFlags { + pub const RAYGEN_KHR: Self = ShaderStageFlags(0b1_0000_0000); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl ShaderStageFlags { + pub const ANY_HIT_KHR: Self = ShaderStageFlags(0b10_0000_0000); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl ShaderStageFlags { + pub const CLOSEST_HIT_KHR: Self = ShaderStageFlags(0b100_0000_0000); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl ShaderStageFlags { + pub const MISS_KHR: Self = ShaderStageFlags(0b1000_0000_0000); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl ShaderStageFlags { + pub const INTERSECTION_KHR: Self = ShaderStageFlags(0b1_0000_0000_0000); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl ShaderStageFlags { + pub const CALLABLE_KHR: Self = ShaderStageFlags(0b10_0000_0000_0000); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl PipelineStageFlags { + pub const RAY_TRACING_SHADER_KHR: Self = PipelineStageFlags(0b10_0000_0000_0000_0000_0000); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl PipelineStageFlags { + pub const ACCELERATION_STRUCTURE_BUILD_KHR: Self = + PipelineStageFlags(0b10_0000_0000_0000_0000_0000_0000); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl BufferUsageFlags { + pub const RAY_TRACING_KHR: Self = BufferUsageFlags(0b100_0000_0000); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl PipelineBindPoint { + pub const RAY_TRACING_KHR: Self = PipelineBindPoint(1_000_165_000); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl DescriptorType { + pub const ACCELERATION_STRUCTURE_KHR: Self = DescriptorType(1_000_165_000); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl AccessFlags { + pub const ACCELERATION_STRUCTURE_READ_KHR: Self = AccessFlags(0b10_0000_0000_0000_0000_0000); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl AccessFlags { + pub const ACCELERATION_STRUCTURE_WRITE_KHR: Self = AccessFlags(0b100_0000_0000_0000_0000_0000); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl QueryType { + pub const ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR: Self = QueryType(1_000_165_000); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl QueryType { + pub const ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR: Self = QueryType(1_000_150_000); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl ObjectType { + pub const ACCELERATION_STRUCTURE_KHR: Self = ObjectType(1_000_165_000); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl DebugReportObjectTypeEXT { + pub const ACCELERATION_STRUCTURE_KHR: Self = DebugReportObjectTypeEXT(1_000_165_000); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl IndexType { + pub const NONE_KHR: Self = IndexType(1_000_165_000); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl GeometryTypeKHR { + pub const INSTANCES: Self = GeometryTypeKHR(1_000_150_000); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl Result { + pub const ERROR_INCOMPATIBLE_VERSION_KHR: Self = Result(-1_000_150_000); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl FormatFeatureFlags { + pub const ACCELERATION_STRUCTURE_VERTEX_BUFFER_KHR: Self = + FormatFeatureFlags(0b10_0000_0000_0000_0000_0000_0000_0000); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl PipelineCreateFlags { + pub const RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_KHR: Self = + PipelineCreateFlags(0b100_0000_0000_0000); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl PipelineCreateFlags { + pub const RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_KHR: Self = + PipelineCreateFlags(0b1000_0000_0000_0000); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl PipelineCreateFlags { + pub const RAY_TRACING_NO_NULL_MISS_SHADERS_KHR: Self = + PipelineCreateFlags(0b1_0000_0000_0000_0000); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl PipelineCreateFlags { + pub const RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_KHR: Self = + PipelineCreateFlags(0b10_0000_0000_0000_0000); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl PipelineCreateFlags { + pub const RAY_TRACING_SKIP_TRIANGLES_KHR: Self = PipelineCreateFlags(0b1_0000_0000_0000); +} +#[doc = "Generated from \'VK_KHR_ray_tracing\'"] +impl PipelineCreateFlags { + pub const RAY_TRACING_SKIP_AABBS_KHR: Self = PipelineCreateFlags(0b10_0000_0000_0000); } impl NvExtension152Fn { pub fn name() -> &'static ::std::ffi::CStr { @@ -54415,7 +68702,8 @@ impl NvFramebufferMixedSamplesFn { } #[doc = "Generated from \'VK_NV_framebuffer_mixed_samples\'"] impl StructureType { - pub const PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV: Self = StructureType(1000152000); + pub const PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV: Self = + StructureType(1_000_152_000); } impl NvFillRectangleFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -54441,30 +68729,38 @@ impl NvFillRectangleFn { } #[doc = "Generated from \'VK_NV_fill_rectangle\'"] impl PolygonMode { - pub const FILL_RECTANGLE_NV: Self = PolygonMode(1000153000); + pub const FILL_RECTANGLE_NV: Self = PolygonMode(1_000_153_000); } -impl NvExtension155Fn { +impl NvShaderSmBuiltinsFn { pub fn name() -> &'static ::std::ffi::CStr { - ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_extension_155\0") + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_shader_sm_builtins\0") .expect("Wrong extension string") } } -pub struct NvExtension155Fn {} -unsafe impl Send for NvExtension155Fn {} -unsafe impl Sync for NvExtension155Fn {} -impl ::std::clone::Clone for NvExtension155Fn { +pub struct NvShaderSmBuiltinsFn {} +unsafe impl Send for NvShaderSmBuiltinsFn {} +unsafe impl Sync for NvShaderSmBuiltinsFn {} +impl ::std::clone::Clone for NvShaderSmBuiltinsFn { fn clone(&self) -> Self { - NvExtension155Fn {} + NvShaderSmBuiltinsFn {} } } -impl NvExtension155Fn { +impl NvShaderSmBuiltinsFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - NvExtension155Fn {} + NvShaderSmBuiltinsFn {} } } +#[doc = "Generated from \'VK_NV_shader_sm_builtins\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV: Self = StructureType(1_000_154_000); +} +#[doc = "Generated from \'VK_NV_shader_sm_builtins\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV: Self = StructureType(1_000_154_001); +} impl ExtPostDepthCoverageFn { pub fn name() -> &'static ::std::ffi::CStr { ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_post_depth_coverage\0") @@ -54493,12 +68789,40 @@ impl KhrSamplerYcbcrConversionFn { .expect("Wrong extension string") } } -pub struct KhrSamplerYcbcrConversionFn {} +#[allow(non_camel_case_types)] +pub type PFN_vkCreateSamplerYcbcrConversion = extern "system" fn( + device: Device, + p_create_info: *const SamplerYcbcrConversionCreateInfo, + p_allocator: *const AllocationCallbacks, + p_ycbcr_conversion: *mut SamplerYcbcrConversion, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroySamplerYcbcrConversion = extern "system" fn( + device: Device, + ycbcr_conversion: SamplerYcbcrConversion, + p_allocator: *const AllocationCallbacks, +) -> c_void; +pub struct KhrSamplerYcbcrConversionFn { + pub create_sampler_ycbcr_conversion_khr: extern "system" fn( + device: Device, + p_create_info: *const SamplerYcbcrConversionCreateInfo, + p_allocator: *const AllocationCallbacks, + p_ycbcr_conversion: *mut SamplerYcbcrConversion, + ) -> Result, + pub destroy_sampler_ycbcr_conversion_khr: extern "system" fn( + device: Device, + ycbcr_conversion: SamplerYcbcrConversion, + p_allocator: *const AllocationCallbacks, + ) -> c_void, +} unsafe impl Send for KhrSamplerYcbcrConversionFn {} unsafe impl Sync for KhrSamplerYcbcrConversionFn {} impl ::std::clone::Clone for KhrSamplerYcbcrConversionFn { fn clone(&self) -> Self { - KhrSamplerYcbcrConversionFn {} + KhrSamplerYcbcrConversionFn { + create_sampler_ycbcr_conversion_khr: self.create_sampler_ycbcr_conversion_khr, + destroy_sampler_ycbcr_conversion_khr: self.destroy_sampler_ycbcr_conversion_khr, + } } } impl KhrSamplerYcbcrConversionFn { @@ -54506,8 +68830,348 @@ impl KhrSamplerYcbcrConversionFn { where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - KhrSamplerYcbcrConversionFn {} + KhrSamplerYcbcrConversionFn { + create_sampler_ycbcr_conversion_khr: unsafe { + extern "system" fn create_sampler_ycbcr_conversion_khr( + _device: Device, + _p_create_info: *const SamplerYcbcrConversionCreateInfo, + _p_allocator: *const AllocationCallbacks, + _p_ycbcr_conversion: *mut SamplerYcbcrConversion, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_sampler_ycbcr_conversion_khr) + )) + } + let raw_name = stringify!(vkCreateSamplerYcbcrConversionKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_sampler_ycbcr_conversion_khr + } else { + ::std::mem::transmute(val) + } + }, + destroy_sampler_ycbcr_conversion_khr: unsafe { + extern "system" fn destroy_sampler_ycbcr_conversion_khr( + _device: Device, + _ycbcr_conversion: SamplerYcbcrConversion, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(destroy_sampler_ycbcr_conversion_khr) + )) + } + let raw_name = stringify!(vkDestroySamplerYcbcrConversionKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_sampler_ycbcr_conversion_khr + } else { + ::std::mem::transmute(val) + } + }, + } } + #[doc = ""] + pub unsafe fn create_sampler_ycbcr_conversion_khr( + &self, + device: Device, + p_create_info: *const SamplerYcbcrConversionCreateInfo, + p_allocator: *const AllocationCallbacks, + p_ycbcr_conversion: *mut SamplerYcbcrConversion, + ) -> Result { + (self.create_sampler_ycbcr_conversion_khr)( + device, + p_create_info, + p_allocator, + p_ycbcr_conversion, + ) + } + #[doc = ""] + pub unsafe fn destroy_sampler_ycbcr_conversion_khr( + &self, + device: Device, + ycbcr_conversion: SamplerYcbcrConversion, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_sampler_ycbcr_conversion_khr)(device, ycbcr_conversion, p_allocator) + } +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl StructureType { + pub const SAMPLER_YCBCR_CONVERSION_CREATE_INFO_KHR: Self = + StructureType::SAMPLER_YCBCR_CONVERSION_CREATE_INFO; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl StructureType { + pub const SAMPLER_YCBCR_CONVERSION_INFO_KHR: Self = + StructureType::SAMPLER_YCBCR_CONVERSION_INFO; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl StructureType { + pub const BIND_IMAGE_PLANE_MEMORY_INFO_KHR: Self = StructureType::BIND_IMAGE_PLANE_MEMORY_INFO; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl StructureType { + pub const IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO_KHR: Self = + StructureType::IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES_KHR: Self = + StructureType::PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl StructureType { + pub const SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES_KHR: Self = + StructureType::SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl DebugReportObjectTypeEXT { + pub const SAMPLER_YCBCR_CONVERSION_KHR: Self = + DebugReportObjectTypeEXT::SAMPLER_YCBCR_CONVERSION; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl ObjectType { + pub const SAMPLER_YCBCR_CONVERSION_KHR: Self = ObjectType::SAMPLER_YCBCR_CONVERSION; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const G8B8G8R8_422_UNORM_KHR: Self = Format::G8B8G8R8_422_UNORM; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const B8G8R8G8_422_UNORM_KHR: Self = Format::B8G8R8G8_422_UNORM; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const G8_B8_R8_3PLANE_420_UNORM_KHR: Self = Format::G8_B8_R8_3PLANE_420_UNORM; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const G8_B8R8_2PLANE_420_UNORM_KHR: Self = Format::G8_B8R8_2PLANE_420_UNORM; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const G8_B8_R8_3PLANE_422_UNORM_KHR: Self = Format::G8_B8_R8_3PLANE_422_UNORM; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const G8_B8R8_2PLANE_422_UNORM_KHR: Self = Format::G8_B8R8_2PLANE_422_UNORM; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const G8_B8_R8_3PLANE_444_UNORM_KHR: Self = Format::G8_B8_R8_3PLANE_444_UNORM; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const R10X6_UNORM_PACK16_KHR: Self = Format::R10X6_UNORM_PACK16; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const R10X6G10X6_UNORM_2PACK16_KHR: Self = Format::R10X6G10X6_UNORM_2PACK16; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const R10X6G10X6B10X6A10X6_UNORM_4PACK16_KHR: Self = + Format::R10X6G10X6B10X6A10X6_UNORM_4PACK16; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const G10X6B10X6G10X6R10X6_422_UNORM_4PACK16_KHR: Self = + Format::G10X6B10X6G10X6R10X6_422_UNORM_4PACK16; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const B10X6G10X6R10X6G10X6_422_UNORM_4PACK16_KHR: Self = + Format::B10X6G10X6R10X6G10X6_422_UNORM_4PACK16; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16_KHR: Self = + Format::G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16_KHR: Self = + Format::G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16_KHR: Self = + Format::G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16_KHR: Self = + Format::G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16_KHR: Self = + Format::G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const R12X4_UNORM_PACK16_KHR: Self = Format::R12X4_UNORM_PACK16; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const R12X4G12X4_UNORM_2PACK16_KHR: Self = Format::R12X4G12X4_UNORM_2PACK16; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const R12X4G12X4B12X4A12X4_UNORM_4PACK16_KHR: Self = + Format::R12X4G12X4B12X4A12X4_UNORM_4PACK16; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const G12X4B12X4G12X4R12X4_422_UNORM_4PACK16_KHR: Self = + Format::G12X4B12X4G12X4R12X4_422_UNORM_4PACK16; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const B12X4G12X4R12X4G12X4_422_UNORM_4PACK16_KHR: Self = + Format::B12X4G12X4R12X4G12X4_422_UNORM_4PACK16; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16_KHR: Self = + Format::G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16_KHR: Self = + Format::G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16_KHR: Self = + Format::G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16_KHR: Self = + Format::G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16_KHR: Self = + Format::G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const G16B16G16R16_422_UNORM_KHR: Self = Format::G16B16G16R16_422_UNORM; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const B16G16R16G16_422_UNORM_KHR: Self = Format::B16G16R16G16_422_UNORM; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const G16_B16_R16_3PLANE_420_UNORM_KHR: Self = Format::G16_B16_R16_3PLANE_420_UNORM; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const G16_B16R16_2PLANE_420_UNORM_KHR: Self = Format::G16_B16R16_2PLANE_420_UNORM; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const G16_B16_R16_3PLANE_422_UNORM_KHR: Self = Format::G16_B16_R16_3PLANE_422_UNORM; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const G16_B16R16_2PLANE_422_UNORM_KHR: Self = Format::G16_B16R16_2PLANE_422_UNORM; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl Format { + pub const G16_B16_R16_3PLANE_444_UNORM_KHR: Self = Format::G16_B16_R16_3PLANE_444_UNORM; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl ImageAspectFlags { + pub const PLANE_0_KHR: Self = ImageAspectFlags::PLANE_0; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl ImageAspectFlags { + pub const PLANE_1_KHR: Self = ImageAspectFlags::PLANE_1; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl ImageAspectFlags { + pub const PLANE_2_KHR: Self = ImageAspectFlags::PLANE_2; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl ImageCreateFlags { + pub const DISJOINT_KHR: Self = ImageCreateFlags::DISJOINT; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl FormatFeatureFlags { + pub const MIDPOINT_CHROMA_SAMPLES_KHR: Self = FormatFeatureFlags::MIDPOINT_CHROMA_SAMPLES; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl FormatFeatureFlags { + pub const SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_KHR: Self = + FormatFeatureFlags::SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl FormatFeatureFlags { + pub const SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_KHR: Self = + FormatFeatureFlags::SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl FormatFeatureFlags { + pub const SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_KHR: Self = + FormatFeatureFlags::SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl FormatFeatureFlags { + pub const SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_KHR: Self = + FormatFeatureFlags::SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl FormatFeatureFlags { + pub const DISJOINT_KHR: Self = FormatFeatureFlags::DISJOINT; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl FormatFeatureFlags { + pub const COSITED_CHROMA_SAMPLES_KHR: Self = FormatFeatureFlags::COSITED_CHROMA_SAMPLES; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl SamplerYcbcrModelConversion { + pub const RGB_IDENTITY_KHR: Self = SamplerYcbcrModelConversion::RGB_IDENTITY; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl SamplerYcbcrModelConversion { + pub const YCBCR_IDENTITY_KHR: Self = SamplerYcbcrModelConversion::YCBCR_IDENTITY; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl SamplerYcbcrModelConversion { + pub const YCBCR_709_KHR: Self = SamplerYcbcrModelConversion::YCBCR_709; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl SamplerYcbcrModelConversion { + pub const YCBCR_601_KHR: Self = SamplerYcbcrModelConversion::YCBCR_601; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl SamplerYcbcrModelConversion { + pub const YCBCR_2020_KHR: Self = SamplerYcbcrModelConversion::YCBCR_2020; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl SamplerYcbcrRange { + pub const ITU_FULL_KHR: Self = SamplerYcbcrRange::ITU_FULL; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl SamplerYcbcrRange { + pub const ITU_NARROW_KHR: Self = SamplerYcbcrRange::ITU_NARROW; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl ChromaLocation { + pub const COSITED_EVEN_KHR: Self = ChromaLocation::COSITED_EVEN; +} +#[doc = "Generated from \'VK_KHR_sampler_ycbcr_conversion\'"] +impl ChromaLocation { + pub const MIDPOINT_KHR: Self = ChromaLocation::MIDPOINT; } impl KhrBindMemory2Fn { pub fn name() -> &'static ::std::ffi::CStr { @@ -54515,12 +69179,38 @@ impl KhrBindMemory2Fn { .expect("Wrong extension string") } } -pub struct KhrBindMemory2Fn {} +#[allow(non_camel_case_types)] +pub type PFN_vkBindBufferMemory2 = extern "system" fn( + device: Device, + bind_info_count: u32, + p_bind_infos: *const BindBufferMemoryInfo, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkBindImageMemory2 = extern "system" fn( + device: Device, + bind_info_count: u32, + p_bind_infos: *const BindImageMemoryInfo, +) -> Result; +pub struct KhrBindMemory2Fn { + pub bind_buffer_memory2_khr: extern "system" fn( + device: Device, + bind_info_count: u32, + p_bind_infos: *const BindBufferMemoryInfo, + ) -> Result, + pub bind_image_memory2_khr: extern "system" fn( + device: Device, + bind_info_count: u32, + p_bind_infos: *const BindImageMemoryInfo, + ) -> Result, +} unsafe impl Send for KhrBindMemory2Fn {} unsafe impl Sync for KhrBindMemory2Fn {} impl ::std::clone::Clone for KhrBindMemory2Fn { fn clone(&self) -> Self { - KhrBindMemory2Fn {} + KhrBindMemory2Fn { + bind_buffer_memory2_khr: self.bind_buffer_memory2_khr, + bind_image_memory2_khr: self.bind_image_memory2_khr, + } } } impl KhrBindMemory2Fn { @@ -54528,8 +69218,79 @@ impl KhrBindMemory2Fn { where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - KhrBindMemory2Fn {} + KhrBindMemory2Fn { + bind_buffer_memory2_khr: unsafe { + extern "system" fn bind_buffer_memory2_khr( + _device: Device, + _bind_info_count: u32, + _p_bind_infos: *const BindBufferMemoryInfo, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(bind_buffer_memory2_khr) + )) + } + let raw_name = stringify!(vkBindBufferMemory2KHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + bind_buffer_memory2_khr + } else { + ::std::mem::transmute(val) + } + }, + bind_image_memory2_khr: unsafe { + extern "system" fn bind_image_memory2_khr( + _device: Device, + _bind_info_count: u32, + _p_bind_infos: *const BindImageMemoryInfo, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(bind_image_memory2_khr) + )) + } + let raw_name = stringify!(vkBindImageMemory2KHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + bind_image_memory2_khr + } else { + ::std::mem::transmute(val) + } + }, + } } + #[doc = ""] + pub unsafe fn bind_buffer_memory2_khr( + &self, + device: Device, + bind_info_count: u32, + p_bind_infos: *const BindBufferMemoryInfo, + ) -> Result { + (self.bind_buffer_memory2_khr)(device, bind_info_count, p_bind_infos) + } + #[doc = ""] + pub unsafe fn bind_image_memory2_khr( + &self, + device: Device, + bind_info_count: u32, + p_bind_infos: *const BindImageMemoryInfo, + ) -> Result { + (self.bind_image_memory2_khr)(device, bind_info_count, p_bind_infos) + } +} +#[doc = "Generated from \'VK_KHR_bind_memory2\'"] +impl StructureType { + pub const BIND_BUFFER_MEMORY_INFO_KHR: Self = StructureType::BIND_BUFFER_MEMORY_INFO; +} +#[doc = "Generated from \'VK_KHR_bind_memory2\'"] +impl StructureType { + pub const BIND_IMAGE_MEMORY_INFO_KHR: Self = StructureType::BIND_IMAGE_MEMORY_INFO; +} +#[doc = "Generated from \'VK_KHR_bind_memory2\'"] +impl ImageCreateFlags { + pub const ALIAS_KHR: Self = ImageCreateFlags::ALIAS; } impl ExtImageDrmFormatModifierFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -54588,7 +69349,7 @@ impl ExtImageDrmFormatModifierFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn get_image_drm_format_modifier_properties_ext( &self, device: Device, @@ -54600,51 +69361,53 @@ impl ExtImageDrmFormatModifierFn { } #[doc = "Generated from \'VK_EXT_image_drm_format_modifier\'"] impl Result { - pub const ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT: Self = Result(-1000158000); + pub const ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT: Self = Result(-1_000_158_000); } #[doc = "Generated from \'VK_EXT_image_drm_format_modifier\'"] impl StructureType { - pub const DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT: Self = StructureType(1000158000); + pub const DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT: Self = StructureType(1_000_158_000); } #[doc = "Generated from \'VK_EXT_image_drm_format_modifier\'"] impl StructureType { - pub const DRM_FORMAT_MODIFIER_PROPERTIES_EXT: Self = StructureType(1000158001); + pub const DRM_FORMAT_MODIFIER_PROPERTIES_EXT: Self = StructureType(1_000_158_001); } #[doc = "Generated from \'VK_EXT_image_drm_format_modifier\'"] impl StructureType { - pub const PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT: Self = StructureType(1000158002); + pub const PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT: Self = + StructureType(1_000_158_002); } #[doc = "Generated from \'VK_EXT_image_drm_format_modifier\'"] impl StructureType { - pub const IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT: Self = StructureType(1000158003); + pub const IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT: Self = StructureType(1_000_158_003); } #[doc = "Generated from \'VK_EXT_image_drm_format_modifier\'"] impl StructureType { - pub const IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT: Self = StructureType(1000158004); + pub const IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT: Self = + StructureType(1_000_158_004); } #[doc = "Generated from \'VK_EXT_image_drm_format_modifier\'"] impl StructureType { - pub const IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT: Self = StructureType(1000158005); + pub const IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT: Self = StructureType(1_000_158_005); } #[doc = "Generated from \'VK_EXT_image_drm_format_modifier\'"] impl ImageTiling { - pub const DRM_FORMAT_MODIFIER_EXT: Self = ImageTiling(1000158000); + pub const DRM_FORMAT_MODIFIER_EXT: Self = ImageTiling(1_000_158_000); } #[doc = "Generated from \'VK_EXT_image_drm_format_modifier\'"] impl ImageAspectFlags { - pub const MEMORY_PLANE_0_EXT: Self = ImageAspectFlags(0b10000000); + pub const MEMORY_PLANE_0_EXT: Self = ImageAspectFlags(0b1000_0000); } #[doc = "Generated from \'VK_EXT_image_drm_format_modifier\'"] impl ImageAspectFlags { - pub const MEMORY_PLANE_1_EXT: Self = ImageAspectFlags(0b100000000); + pub const MEMORY_PLANE_1_EXT: Self = ImageAspectFlags(0b1_0000_0000); } #[doc = "Generated from \'VK_EXT_image_drm_format_modifier\'"] impl ImageAspectFlags { - pub const MEMORY_PLANE_2_EXT: Self = ImageAspectFlags(0b1000000000); + pub const MEMORY_PLANE_2_EXT: Self = ImageAspectFlags(0b10_0000_0000); } #[doc = "Generated from \'VK_EXT_image_drm_format_modifier\'"] impl ImageAspectFlags { - pub const MEMORY_PLANE_3_EXT: Self = ImageAspectFlags(0b10000000000); + pub const MEMORY_PLANE_3_EXT: Self = ImageAspectFlags(0b100_0000_0000); } impl ExtExtension160Fn { pub fn name() -> &'static ::std::ffi::CStr { @@ -54829,7 +69592,7 @@ impl ExtValidationCacheFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn create_validation_cache_ext( &self, device: Device, @@ -54839,7 +69602,7 @@ impl ExtValidationCacheFn { ) -> Result { (self.create_validation_cache_ext)(device, p_create_info, p_allocator, p_validation_cache) } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_validation_cache_ext( &self, device: Device, @@ -54848,7 +69611,7 @@ impl ExtValidationCacheFn { ) -> c_void { (self.destroy_validation_cache_ext)(device, validation_cache, p_allocator) } - #[doc = ""] + #[doc = ""] pub unsafe fn merge_validation_caches_ext( &self, device: Device, @@ -54858,7 +69621,7 @@ impl ExtValidationCacheFn { ) -> Result { (self.merge_validation_caches_ext)(device, dst_cache, src_cache_count, p_src_caches) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_validation_cache_data_ext( &self, device: Device, @@ -54871,15 +69634,15 @@ impl ExtValidationCacheFn { } #[doc = "Generated from \'VK_EXT_validation_cache\'"] impl StructureType { - pub const VALIDATION_CACHE_CREATE_INFO_EXT: Self = StructureType(1000160000); + pub const VALIDATION_CACHE_CREATE_INFO_EXT: Self = StructureType(1_000_160_000); } #[doc = "Generated from \'VK_EXT_validation_cache\'"] impl StructureType { - pub const SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT: Self = StructureType(1000160001); + pub const SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT: Self = StructureType(1_000_160_001); } #[doc = "Generated from \'VK_EXT_validation_cache\'"] impl ObjectType { - pub const VALIDATION_CACHE_EXT: Self = ObjectType(1000160000); + pub const VALIDATION_CACHE_EXT: Self = ObjectType(1_000_160_000); } impl ExtDescriptorIndexingFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -54905,37 +69668,59 @@ impl ExtDescriptorIndexingFn { } #[doc = "Generated from \'VK_EXT_descriptor_indexing\'"] impl StructureType { - pub const DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT: Self = StructureType(1000161000); + pub const DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT: Self = + StructureType::DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO; } #[doc = "Generated from \'VK_EXT_descriptor_indexing\'"] impl StructureType { - pub const PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: Self = StructureType(1000161001); + pub const PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: Self = + StructureType::PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES; } #[doc = "Generated from \'VK_EXT_descriptor_indexing\'"] impl StructureType { - pub const PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT: Self = StructureType(1000161002); + pub const PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT: Self = + StructureType::PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES; } #[doc = "Generated from \'VK_EXT_descriptor_indexing\'"] impl StructureType { pub const DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT: Self = - StructureType(1000161003); + StructureType::DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO; } #[doc = "Generated from \'VK_EXT_descriptor_indexing\'"] impl StructureType { pub const DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT: Self = - StructureType(1000161004); + StructureType::DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT; +} +#[doc = "Generated from \'VK_EXT_descriptor_indexing\'"] +impl DescriptorBindingFlags { + pub const UPDATE_AFTER_BIND_EXT: Self = DescriptorBindingFlags::UPDATE_AFTER_BIND; +} +#[doc = "Generated from \'VK_EXT_descriptor_indexing\'"] +impl DescriptorBindingFlags { + pub const UPDATE_UNUSED_WHILE_PENDING_EXT: Self = + DescriptorBindingFlags::UPDATE_UNUSED_WHILE_PENDING; +} +#[doc = "Generated from \'VK_EXT_descriptor_indexing\'"] +impl DescriptorBindingFlags { + pub const PARTIALLY_BOUND_EXT: Self = DescriptorBindingFlags::PARTIALLY_BOUND; +} +#[doc = "Generated from \'VK_EXT_descriptor_indexing\'"] +impl DescriptorBindingFlags { + pub const VARIABLE_DESCRIPTOR_COUNT_EXT: Self = + DescriptorBindingFlags::VARIABLE_DESCRIPTOR_COUNT; } #[doc = "Generated from \'VK_EXT_descriptor_indexing\'"] impl DescriptorPoolCreateFlags { - pub const UPDATE_AFTER_BIND_EXT: Self = DescriptorPoolCreateFlags(0b10); + pub const UPDATE_AFTER_BIND_EXT: Self = DescriptorPoolCreateFlags::UPDATE_AFTER_BIND; } #[doc = "Generated from \'VK_EXT_descriptor_indexing\'"] impl DescriptorSetLayoutCreateFlags { - pub const UPDATE_AFTER_BIND_POOL_EXT: Self = DescriptorSetLayoutCreateFlags(0b10); + pub const UPDATE_AFTER_BIND_POOL_EXT: Self = + DescriptorSetLayoutCreateFlags::UPDATE_AFTER_BIND_POOL; } #[doc = "Generated from \'VK_EXT_descriptor_indexing\'"] impl Result { - pub const ERROR_FRAGMENTATION_EXT: Self = Result(-1000161000); + pub const ERROR_FRAGMENTATION_EXT: Self = Result::ERROR_FRAGMENTATION; } impl ExtShaderViewportIndexLayerFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -55107,7 +69892,7 @@ impl NvShadingRateImageFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_bind_shading_rate_image_nv( &self, command_buffer: CommandBuffer, @@ -55116,7 +69901,7 @@ impl NvShadingRateImageFn { ) -> c_void { (self.cmd_bind_shading_rate_image_nv)(command_buffer, image_view, image_layout) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_set_viewport_shading_rate_palette_nv( &self, command_buffer: CommandBuffer, @@ -55131,7 +69916,7 @@ impl NvShadingRateImageFn { p_shading_rate_palettes, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_set_coarse_sample_order_nv( &self, command_buffer: CommandBuffer, @@ -55150,44 +69935,44 @@ impl NvShadingRateImageFn { #[doc = "Generated from \'VK_NV_shading_rate_image\'"] impl StructureType { pub const PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV: Self = - StructureType(1000164000); + StructureType(1_000_164_000); } #[doc = "Generated from \'VK_NV_shading_rate_image\'"] impl StructureType { - pub const PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV: Self = StructureType(1000164001); + pub const PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV: Self = StructureType(1_000_164_001); } #[doc = "Generated from \'VK_NV_shading_rate_image\'"] impl StructureType { - pub const PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV: Self = StructureType(1000164002); + pub const PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV: Self = StructureType(1_000_164_002); } #[doc = "Generated from \'VK_NV_shading_rate_image\'"] impl ImageLayout { - pub const SHADING_RATE_OPTIMAL_NV: Self = ImageLayout(1000164003); + pub const SHADING_RATE_OPTIMAL_NV: Self = ImageLayout(1_000_164_003); } #[doc = "Generated from \'VK_NV_shading_rate_image\'"] impl DynamicState { - pub const VIEWPORT_SHADING_RATE_PALETTE_NV: Self = DynamicState(1000164004); + pub const VIEWPORT_SHADING_RATE_PALETTE_NV: Self = DynamicState(1_000_164_004); } #[doc = "Generated from \'VK_NV_shading_rate_image\'"] impl AccessFlags { - pub const SHADING_RATE_IMAGE_READ_NV: Self = AccessFlags(0b100000000000000000000000); + pub const SHADING_RATE_IMAGE_READ_NV: Self = AccessFlags(0b1000_0000_0000_0000_0000_0000); } #[doc = "Generated from \'VK_NV_shading_rate_image\'"] impl ImageUsageFlags { - pub const SHADING_RATE_IMAGE_NV: Self = ImageUsageFlags(0b100000000); + pub const SHADING_RATE_IMAGE_NV: Self = ImageUsageFlags(0b1_0000_0000); } #[doc = "Generated from \'VK_NV_shading_rate_image\'"] impl PipelineStageFlags { - pub const SHADING_RATE_IMAGE_NV: Self = PipelineStageFlags(0b10000000000000000000000); + pub const SHADING_RATE_IMAGE_NV: Self = PipelineStageFlags(0b100_0000_0000_0000_0000_0000); } #[doc = "Generated from \'VK_NV_shading_rate_image\'"] impl StructureType { pub const PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV: Self = - StructureType(1000164005); + StructureType(1_000_164_005); } #[doc = "Generated from \'VK_NV_shading_rate_image\'"] impl DynamicState { - pub const VIEWPORT_COARSE_SAMPLE_ORDER_NV: Self = DynamicState(1000164006); + pub const VIEWPORT_COARSE_SAMPLE_ORDER_NV: Self = DynamicState(1_000_164_006); } impl NvRayTracingFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -55203,44 +69988,44 @@ pub type PFN_vkCreateAccelerationStructureNV = extern "system" fn( p_acceleration_structure: *mut AccelerationStructureNV, ) -> Result; #[allow(non_camel_case_types)] -pub type PFN_vkDestroyAccelerationStructureNV = extern "system" fn( +pub type PFN_vkGetAccelerationStructureMemoryRequirementsNV = extern "system" fn( device: Device, - acceleration_structure: AccelerationStructureNV, + acceleration_structure: AccelerationStructureKHR, p_allocator: *const AllocationCallbacks, ) -> c_void; #[allow(non_camel_case_types)] -pub type PFN_vkGetAccelerationStructureMemoryRequirementsNV = extern "system" fn( +pub type PFN_vkCmdBuildAccelerationStructureNV = extern "system" fn( device: Device, p_info: *const AccelerationStructureMemoryRequirementsInfoNV, p_memory_requirements: *mut MemoryRequirements2KHR, ) -> c_void; #[allow(non_camel_case_types)] -pub type PFN_vkBindAccelerationStructureMemoryNV = extern "system" fn( +pub type PFN_vkCmdCopyAccelerationStructureNV = extern "system" fn( device: Device, bind_info_count: u32, - p_bind_infos: *const BindAccelerationStructureMemoryInfoNV, + p_bind_infos: *const BindAccelerationStructureMemoryInfoKHR, ) -> Result; #[allow(non_camel_case_types)] -pub type PFN_vkCmdBuildAccelerationStructureNV = extern "system" fn( +pub type PFN_vkCmdTraceRaysNV = extern "system" fn( command_buffer: CommandBuffer, p_info: *const AccelerationStructureInfoNV, instance_data: Buffer, instance_offset: DeviceSize, update: Bool32, - dst: AccelerationStructureNV, - src: AccelerationStructureNV, + dst: AccelerationStructureKHR, + src: AccelerationStructureKHR, scratch: Buffer, scratch_offset: DeviceSize, ) -> c_void; #[allow(non_camel_case_types)] -pub type PFN_vkCmdCopyAccelerationStructureNV = extern "system" fn( +pub type PFN_vkCreateRayTracingPipelinesNV = extern "system" fn( command_buffer: CommandBuffer, - dst: AccelerationStructureNV, - src: AccelerationStructureNV, - mode: CopyAccelerationStructureModeNV, + dst: AccelerationStructureKHR, + src: AccelerationStructureKHR, + mode: CopyAccelerationStructureModeKHR, ) -> c_void; #[allow(non_camel_case_types)] -pub type PFN_vkCmdTraceRaysNV = extern "system" fn( +pub type PFN_vkGetAccelerationStructureHandleNV = extern "system" fn( command_buffer: CommandBuffer, raygen_shader_binding_table_buffer: Buffer, raygen_shader_binding_offset: DeviceSize, @@ -55258,7 +70043,7 @@ pub type PFN_vkCmdTraceRaysNV = extern "system" fn( depth: u32, ) -> c_void; #[allow(non_camel_case_types)] -pub type PFN_vkCreateRayTracingPipelinesNV = extern "system" fn( +pub type PFN_vkCompileDeferredNV = extern "system" fn( device: Device, pipeline_cache: PipelineCache, create_info_count: u32, @@ -55266,34 +70051,6 @@ pub type PFN_vkCreateRayTracingPipelinesNV = extern "system" fn( p_allocator: *const AllocationCallbacks, p_pipelines: *mut Pipeline, ) -> Result; -#[allow(non_camel_case_types)] -pub type PFN_vkGetRayTracingShaderGroupHandlesNV = extern "system" fn( - device: Device, - pipeline: Pipeline, - first_group: u32, - group_count: u32, - data_size: usize, - p_data: *mut c_void, -) -> Result; -#[allow(non_camel_case_types)] -pub type PFN_vkGetAccelerationStructureHandleNV = extern "system" fn( - device: Device, - acceleration_structure: AccelerationStructureNV, - data_size: usize, - p_data: *mut c_void, -) -> Result; -#[allow(non_camel_case_types)] -pub type PFN_vkCmdWriteAccelerationStructuresPropertiesNV = extern "system" fn( - command_buffer: CommandBuffer, - acceleration_structure_count: u32, - p_acceleration_structures: *const AccelerationStructureNV, - query_type: QueryType, - query_pool: QueryPool, - first_query: u32, -) -> c_void; -#[allow(non_camel_case_types)] -pub type PFN_vkCompileDeferredNV = - extern "system" fn(device: Device, pipeline: Pipeline, shader: u32) -> Result; pub struct NvRayTracingFn { pub create_acceleration_structure_nv: extern "system" fn( device: Device, @@ -55303,7 +70060,7 @@ pub struct NvRayTracingFn { ) -> Result, pub destroy_acceleration_structure_nv: extern "system" fn( device: Device, - acceleration_structure: AccelerationStructureNV, + acceleration_structure: AccelerationStructureKHR, p_allocator: *const AllocationCallbacks, ) -> c_void, pub get_acceleration_structure_memory_requirements_nv: extern "system" fn( @@ -55314,7 +70071,7 @@ pub struct NvRayTracingFn { pub bind_acceleration_structure_memory_nv: extern "system" fn( device: Device, bind_info_count: u32, - p_bind_infos: *const BindAccelerationStructureMemoryInfoNV, + p_bind_infos: *const BindAccelerationStructureMemoryInfoKHR, ) -> Result, pub cmd_build_acceleration_structure_nv: extern "system" fn( command_buffer: CommandBuffer, @@ -55322,16 +70079,16 @@ pub struct NvRayTracingFn { instance_data: Buffer, instance_offset: DeviceSize, update: Bool32, - dst: AccelerationStructureNV, - src: AccelerationStructureNV, + dst: AccelerationStructureKHR, + src: AccelerationStructureKHR, scratch: Buffer, scratch_offset: DeviceSize, ) -> c_void, pub cmd_copy_acceleration_structure_nv: extern "system" fn( command_buffer: CommandBuffer, - dst: AccelerationStructureNV, - src: AccelerationStructureNV, - mode: CopyAccelerationStructureModeNV, + dst: AccelerationStructureKHR, + src: AccelerationStructureKHR, + mode: CopyAccelerationStructureModeKHR, ) -> c_void, pub cmd_trace_rays_nv: extern "system" fn( command_buffer: CommandBuffer, @@ -55368,14 +70125,14 @@ pub struct NvRayTracingFn { ) -> Result, pub get_acceleration_structure_handle_nv: extern "system" fn( device: Device, - acceleration_structure: AccelerationStructureNV, + acceleration_structure: AccelerationStructureKHR, data_size: usize, p_data: *mut c_void, ) -> Result, pub cmd_write_acceleration_structures_properties_nv: extern "system" fn( command_buffer: CommandBuffer, acceleration_structure_count: u32, - p_acceleration_structures: *const AccelerationStructureNV, + p_acceleration_structures: *const AccelerationStructureKHR, query_type: QueryType, query_pool: QueryPool, first_query: u32, @@ -55435,7 +70192,7 @@ impl NvRayTracingFn { destroy_acceleration_structure_nv: unsafe { extern "system" fn destroy_acceleration_structure_nv( _device: Device, - _acceleration_structure: AccelerationStructureNV, + _acceleration_structure: AccelerationStructureKHR, _p_allocator: *const AllocationCallbacks, ) -> c_void { panic!(concat!( @@ -55476,7 +70233,7 @@ impl NvRayTracingFn { extern "system" fn bind_acceleration_structure_memory_nv( _device: Device, _bind_info_count: u32, - _p_bind_infos: *const BindAccelerationStructureMemoryInfoNV, + _p_bind_infos: *const BindAccelerationStructureMemoryInfoKHR, ) -> Result { panic!(concat!( "Unable to load ", @@ -55499,8 +70256,8 @@ impl NvRayTracingFn { _instance_data: Buffer, _instance_offset: DeviceSize, _update: Bool32, - _dst: AccelerationStructureNV, - _src: AccelerationStructureNV, + _dst: AccelerationStructureKHR, + _src: AccelerationStructureKHR, _scratch: Buffer, _scratch_offset: DeviceSize, ) -> c_void { @@ -55521,9 +70278,9 @@ impl NvRayTracingFn { cmd_copy_acceleration_structure_nv: unsafe { extern "system" fn cmd_copy_acceleration_structure_nv( _command_buffer: CommandBuffer, - _dst: AccelerationStructureNV, - _src: AccelerationStructureNV, - _mode: CopyAccelerationStructureModeNV, + _dst: AccelerationStructureKHR, + _src: AccelerationStructureKHR, + _mode: CopyAccelerationStructureModeKHR, ) -> c_void { panic!(concat!( "Unable to load ", @@ -55617,7 +70374,7 @@ impl NvRayTracingFn { get_acceleration_structure_handle_nv: unsafe { extern "system" fn get_acceleration_structure_handle_nv( _device: Device, - _acceleration_structure: AccelerationStructureNV, + _acceleration_structure: AccelerationStructureKHR, _data_size: usize, _p_data: *mut c_void, ) -> Result { @@ -55639,7 +70396,7 @@ impl NvRayTracingFn { extern "system" fn cmd_write_acceleration_structures_properties_nv( _command_buffer: CommandBuffer, _acceleration_structure_count: u32, - _p_acceleration_structures: *const AccelerationStructureNV, + _p_acceleration_structures: *const AccelerationStructureKHR, _query_type: QueryType, _query_pool: QueryPool, _first_query: u32, @@ -55677,7 +70434,7 @@ impl NvRayTracingFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn create_acceleration_structure_nv( &self, device: Device, @@ -55692,16 +70449,16 @@ impl NvRayTracingFn { p_acceleration_structure, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn destroy_acceleration_structure_nv( &self, device: Device, - acceleration_structure: AccelerationStructureNV, + acceleration_structure: AccelerationStructureKHR, p_allocator: *const AllocationCallbacks, ) -> c_void { (self.destroy_acceleration_structure_nv)(device, acceleration_structure, p_allocator) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_acceleration_structure_memory_requirements_nv( &self, device: Device, @@ -55714,16 +70471,16 @@ impl NvRayTracingFn { p_memory_requirements, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn bind_acceleration_structure_memory_nv( &self, device: Device, bind_info_count: u32, - p_bind_infos: *const BindAccelerationStructureMemoryInfoNV, + p_bind_infos: *const BindAccelerationStructureMemoryInfoKHR, ) -> Result { (self.bind_acceleration_structure_memory_nv)(device, bind_info_count, p_bind_infos) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_build_acceleration_structure_nv( &self, command_buffer: CommandBuffer, @@ -55731,8 +70488,8 @@ impl NvRayTracingFn { instance_data: Buffer, instance_offset: DeviceSize, update: Bool32, - dst: AccelerationStructureNV, - src: AccelerationStructureNV, + dst: AccelerationStructureKHR, + src: AccelerationStructureKHR, scratch: Buffer, scratch_offset: DeviceSize, ) -> c_void { @@ -55748,17 +70505,17 @@ impl NvRayTracingFn { scratch_offset, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_copy_acceleration_structure_nv( &self, command_buffer: CommandBuffer, - dst: AccelerationStructureNV, - src: AccelerationStructureNV, - mode: CopyAccelerationStructureModeNV, + dst: AccelerationStructureKHR, + src: AccelerationStructureKHR, + mode: CopyAccelerationStructureModeKHR, ) -> c_void { (self.cmd_copy_acceleration_structure_nv)(command_buffer, dst, src, mode) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_trace_rays_nv( &self, command_buffer: CommandBuffer, @@ -55795,7 +70552,7 @@ impl NvRayTracingFn { depth, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn create_ray_tracing_pipelines_nv( &self, device: Device, @@ -55814,7 +70571,7 @@ impl NvRayTracingFn { p_pipelines, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_ray_tracing_shader_group_handles_nv( &self, device: Device, @@ -55833,11 +70590,11 @@ impl NvRayTracingFn { p_data, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_acceleration_structure_handle_nv( &self, device: Device, - acceleration_structure: AccelerationStructureNV, + acceleration_structure: AccelerationStructureKHR, data_size: usize, p_data: *mut c_void, ) -> Result { @@ -55848,12 +70605,12 @@ impl NvRayTracingFn { p_data, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_write_acceleration_structures_properties_nv( &self, command_buffer: CommandBuffer, acceleration_structure_count: u32, - p_acceleration_structures: *const AccelerationStructureNV, + p_acceleration_structures: *const AccelerationStructureKHR, query_type: QueryType, query_pool: QueryPool, first_query: u32, @@ -55867,7 +70624,7 @@ impl NvRayTracingFn { first_query, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn compile_deferred_nv( &self, device: Device, @@ -55879,120 +70636,222 @@ impl NvRayTracingFn { } #[doc = "Generated from \'VK_NV_ray_tracing\'"] impl StructureType { - pub const RAY_TRACING_PIPELINE_CREATE_INFO_NV: Self = StructureType(1000165000); + pub const RAY_TRACING_PIPELINE_CREATE_INFO_NV: Self = StructureType(1_000_165_000); } #[doc = "Generated from \'VK_NV_ray_tracing\'"] impl StructureType { - pub const ACCELERATION_STRUCTURE_CREATE_INFO_NV: Self = StructureType(1000165001); + pub const ACCELERATION_STRUCTURE_CREATE_INFO_NV: Self = StructureType(1_000_165_001); } #[doc = "Generated from \'VK_NV_ray_tracing\'"] impl StructureType { - pub const GEOMETRY_NV: Self = StructureType(1000165003); + pub const GEOMETRY_NV: Self = StructureType(1_000_165_003); } #[doc = "Generated from \'VK_NV_ray_tracing\'"] impl StructureType { - pub const GEOMETRY_TRIANGLES_NV: Self = StructureType(1000165004); + pub const GEOMETRY_TRIANGLES_NV: Self = StructureType(1_000_165_004); } #[doc = "Generated from \'VK_NV_ray_tracing\'"] impl StructureType { - pub const GEOMETRY_AABB_NV: Self = StructureType(1000165005); + pub const GEOMETRY_AABB_NV: Self = StructureType(1_000_165_005); } #[doc = "Generated from \'VK_NV_ray_tracing\'"] impl StructureType { - pub const BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV: Self = StructureType(1000165006); + pub const BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV: Self = + StructureType::BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_KHR; } #[doc = "Generated from \'VK_NV_ray_tracing\'"] impl StructureType { - pub const WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV: Self = StructureType(1000165007); + pub const WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV: Self = + StructureType::WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR; } #[doc = "Generated from \'VK_NV_ray_tracing\'"] impl StructureType { - pub const ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV: Self = StructureType(1000165008); + pub const ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV: Self = + StructureType(1_000_165_008); } #[doc = "Generated from \'VK_NV_ray_tracing\'"] impl StructureType { - pub const PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV: Self = StructureType(1000165009); + pub const PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV: Self = StructureType(1_000_165_009); } #[doc = "Generated from \'VK_NV_ray_tracing\'"] impl StructureType { - pub const RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV: Self = StructureType(1000165011); + pub const RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV: Self = StructureType(1_000_165_011); } #[doc = "Generated from \'VK_NV_ray_tracing\'"] impl StructureType { - pub const ACCELERATION_STRUCTURE_INFO_NV: Self = StructureType(1000165012); + pub const ACCELERATION_STRUCTURE_INFO_NV: Self = StructureType(1_000_165_012); } #[doc = "Generated from \'VK_NV_ray_tracing\'"] impl ShaderStageFlags { - pub const RAYGEN_NV: Self = ShaderStageFlags(0b100000000); + pub const RAYGEN_NV: Self = ShaderStageFlags::RAYGEN_KHR; } #[doc = "Generated from \'VK_NV_ray_tracing\'"] impl ShaderStageFlags { - pub const ANY_HIT_NV: Self = ShaderStageFlags(0b1000000000); + pub const ANY_HIT_NV: Self = ShaderStageFlags::ANY_HIT_KHR; } #[doc = "Generated from \'VK_NV_ray_tracing\'"] impl ShaderStageFlags { - pub const CLOSEST_HIT_NV: Self = ShaderStageFlags(0b10000000000); + pub const CLOSEST_HIT_NV: Self = ShaderStageFlags::CLOSEST_HIT_KHR; } #[doc = "Generated from \'VK_NV_ray_tracing\'"] impl ShaderStageFlags { - pub const MISS_NV: Self = ShaderStageFlags(0b100000000000); + pub const MISS_NV: Self = ShaderStageFlags::MISS_KHR; } #[doc = "Generated from \'VK_NV_ray_tracing\'"] impl ShaderStageFlags { - pub const INTERSECTION_NV: Self = ShaderStageFlags(0b1000000000000); + pub const INTERSECTION_NV: Self = ShaderStageFlags::INTERSECTION_KHR; } #[doc = "Generated from \'VK_NV_ray_tracing\'"] impl ShaderStageFlags { - pub const CALLABLE_NV: Self = ShaderStageFlags(0b10000000000000); + pub const CALLABLE_NV: Self = ShaderStageFlags::CALLABLE_KHR; } #[doc = "Generated from \'VK_NV_ray_tracing\'"] impl PipelineStageFlags { - pub const RAY_TRACING_SHADER_NV: Self = PipelineStageFlags(0b1000000000000000000000); + pub const RAY_TRACING_SHADER_NV: Self = PipelineStageFlags::RAY_TRACING_SHADER_KHR; } #[doc = "Generated from \'VK_NV_ray_tracing\'"] impl PipelineStageFlags { pub const ACCELERATION_STRUCTURE_BUILD_NV: Self = - PipelineStageFlags(0b10000000000000000000000000); + PipelineStageFlags::ACCELERATION_STRUCTURE_BUILD_KHR; } #[doc = "Generated from \'VK_NV_ray_tracing\'"] impl BufferUsageFlags { - pub const RAY_TRACING_NV: Self = BufferUsageFlags(0b10000000000); + pub const RAY_TRACING_NV: Self = BufferUsageFlags::RAY_TRACING_KHR; } #[doc = "Generated from \'VK_NV_ray_tracing\'"] impl PipelineBindPoint { - pub const RAY_TRACING_NV: Self = PipelineBindPoint(1000165000); + pub const RAY_TRACING_NV: Self = PipelineBindPoint::RAY_TRACING_KHR; } #[doc = "Generated from \'VK_NV_ray_tracing\'"] impl DescriptorType { - pub const ACCELERATION_STRUCTURE_NV: Self = DescriptorType(1000165000); + pub const ACCELERATION_STRUCTURE_NV: Self = DescriptorType::ACCELERATION_STRUCTURE_KHR; } #[doc = "Generated from \'VK_NV_ray_tracing\'"] impl AccessFlags { - pub const ACCELERATION_STRUCTURE_READ_NV: Self = AccessFlags(0b1000000000000000000000); + pub const ACCELERATION_STRUCTURE_READ_NV: Self = AccessFlags::ACCELERATION_STRUCTURE_READ_KHR; } #[doc = "Generated from \'VK_NV_ray_tracing\'"] impl AccessFlags { - pub const ACCELERATION_STRUCTURE_WRITE_NV: Self = AccessFlags(0b10000000000000000000000); + pub const ACCELERATION_STRUCTURE_WRITE_NV: Self = AccessFlags::ACCELERATION_STRUCTURE_WRITE_KHR; } #[doc = "Generated from \'VK_NV_ray_tracing\'"] impl QueryType { - pub const ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV: Self = QueryType(1000165000); + pub const ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV: Self = + QueryType::ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR; } #[doc = "Generated from \'VK_NV_ray_tracing\'"] impl PipelineCreateFlags { - pub const DEFER_COMPILE_NV: Self = PipelineCreateFlags(0b100000); + pub const DEFER_COMPILE_NV: Self = PipelineCreateFlags(0b10_0000); } #[doc = "Generated from \'VK_NV_ray_tracing\'"] impl ObjectType { - pub const ACCELERATION_STRUCTURE_NV: Self = ObjectType(1000165000); + pub const ACCELERATION_STRUCTURE_NV: Self = ObjectType::ACCELERATION_STRUCTURE_KHR; } #[doc = "Generated from \'VK_NV_ray_tracing\'"] impl DebugReportObjectTypeEXT { - pub const ACCELERATION_STRUCTURE_NV: Self = DebugReportObjectTypeEXT(1000165000); + pub const ACCELERATION_STRUCTURE_NV: Self = + DebugReportObjectTypeEXT::ACCELERATION_STRUCTURE_KHR; } #[doc = "Generated from \'VK_NV_ray_tracing\'"] impl IndexType { - pub const NONE_NV: Self = IndexType(1000165000); + pub const NONE_NV: Self = IndexType::NONE_KHR; +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl RayTracingShaderGroupTypeKHR { + pub const GENERAL_NV: Self = RayTracingShaderGroupTypeKHR::GENERAL; +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl RayTracingShaderGroupTypeKHR { + pub const TRIANGLES_HIT_GROUP_NV: Self = RayTracingShaderGroupTypeKHR::TRIANGLES_HIT_GROUP; +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl RayTracingShaderGroupTypeKHR { + pub const PROCEDURAL_HIT_GROUP_NV: Self = RayTracingShaderGroupTypeKHR::PROCEDURAL_HIT_GROUP; +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl GeometryTypeKHR { + pub const TRIANGLES_NV: Self = GeometryTypeKHR::TRIANGLES; +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl GeometryTypeKHR { + pub const AABBS_NV: Self = GeometryTypeKHR::AABBS; +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl AccelerationStructureTypeKHR { + pub const TOP_LEVEL_NV: Self = AccelerationStructureTypeKHR::TOP_LEVEL; +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl AccelerationStructureTypeKHR { + pub const BOTTOM_LEVEL_NV: Self = AccelerationStructureTypeKHR::BOTTOM_LEVEL; +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl GeometryFlagsKHR { + pub const OPAQUE_NV: Self = GeometryFlagsKHR::OPAQUE; +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl GeometryFlagsKHR { + pub const NO_DUPLICATE_ANY_HIT_INVOCATION_NV: Self = + GeometryFlagsKHR::NO_DUPLICATE_ANY_HIT_INVOCATION; +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl GeometryInstanceFlagsKHR { + pub const TRIANGLE_CULL_DISABLE_NV: Self = + GeometryInstanceFlagsKHR::TRIANGLE_FACING_CULL_DISABLE; +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl GeometryInstanceFlagsKHR { + pub const TRIANGLE_FRONT_COUNTERCLOCKWISE_NV: Self = + GeometryInstanceFlagsKHR::TRIANGLE_FRONT_COUNTERCLOCKWISE; +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl GeometryInstanceFlagsKHR { + pub const FORCE_OPAQUE_NV: Self = GeometryInstanceFlagsKHR::FORCE_OPAQUE; +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl GeometryInstanceFlagsKHR { + pub const FORCE_NO_OPAQUE_NV: Self = GeometryInstanceFlagsKHR::FORCE_NO_OPAQUE; +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl BuildAccelerationStructureFlagsKHR { + pub const ALLOW_UPDATE_NV: Self = BuildAccelerationStructureFlagsKHR::ALLOW_UPDATE; +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl BuildAccelerationStructureFlagsKHR { + pub const ALLOW_COMPACTION_NV: Self = BuildAccelerationStructureFlagsKHR::ALLOW_COMPACTION; +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl BuildAccelerationStructureFlagsKHR { + pub const PREFER_FAST_TRACE_NV: Self = BuildAccelerationStructureFlagsKHR::PREFER_FAST_TRACE; +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl BuildAccelerationStructureFlagsKHR { + pub const PREFER_FAST_BUILD_NV: Self = BuildAccelerationStructureFlagsKHR::PREFER_FAST_BUILD; +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl BuildAccelerationStructureFlagsKHR { + pub const LOW_MEMORY_NV: Self = BuildAccelerationStructureFlagsKHR::LOW_MEMORY; +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl CopyAccelerationStructureModeKHR { + pub const CLONE_NV: Self = CopyAccelerationStructureModeKHR::CLONE; +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl CopyAccelerationStructureModeKHR { + pub const COMPACT_NV: Self = CopyAccelerationStructureModeKHR::COMPACT; +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl AccelerationStructureMemoryRequirementsTypeKHR { + pub const OBJECT_NV: Self = AccelerationStructureMemoryRequirementsTypeKHR::OBJECT; +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl AccelerationStructureMemoryRequirementsTypeKHR { + pub const BUILD_SCRATCH_NV: Self = + AccelerationStructureMemoryRequirementsTypeKHR::BUILD_SCRATCH; +} +#[doc = "Generated from \'VK_NV_ray_tracing\'"] +impl AccelerationStructureMemoryRequirementsTypeKHR { + pub const UPDATE_SCRATCH_NV: Self = + AccelerationStructureMemoryRequirementsTypeKHR::UPDATE_SCRATCH; } impl NvRepresentativeFragmentTestFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -56019,12 +70878,12 @@ impl NvRepresentativeFragmentTestFn { #[doc = "Generated from \'VK_NV_representative_fragment_test\'"] impl StructureType { pub const PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV: Self = - StructureType(1000166000); + StructureType(1_000_166_000); } #[doc = "Generated from \'VK_NV_representative_fragment_test\'"] impl StructureType { pub const PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV: Self = - StructureType(1000166001); + StructureType(1_000_166_001); } impl NvExtension168Fn { pub fn name() -> &'static ::std::ffi::CStr { @@ -56054,12 +70913,26 @@ impl KhrMaintenance3Fn { .expect("Wrong extension string") } } -pub struct KhrMaintenance3Fn {} +#[allow(non_camel_case_types)] +pub type PFN_vkGetDescriptorSetLayoutSupport = extern "system" fn( + device: Device, + p_create_info: *const DescriptorSetLayoutCreateInfo, + p_support: *mut DescriptorSetLayoutSupport, +) -> c_void; +pub struct KhrMaintenance3Fn { + pub get_descriptor_set_layout_support_khr: extern "system" fn( + device: Device, + p_create_info: *const DescriptorSetLayoutCreateInfo, + p_support: *mut DescriptorSetLayoutSupport, + ) -> c_void, +} unsafe impl Send for KhrMaintenance3Fn {} unsafe impl Sync for KhrMaintenance3Fn {} impl ::std::clone::Clone for KhrMaintenance3Fn { fn clone(&self) -> Self { - KhrMaintenance3Fn {} + KhrMaintenance3Fn { + get_descriptor_set_layout_support_khr: self.get_descriptor_set_layout_support_khr, + } } } impl KhrMaintenance3Fn { @@ -56067,8 +70940,48 @@ impl KhrMaintenance3Fn { where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - KhrMaintenance3Fn {} + KhrMaintenance3Fn { + get_descriptor_set_layout_support_khr: unsafe { + extern "system" fn get_descriptor_set_layout_support_khr( + _device: Device, + _p_create_info: *const DescriptorSetLayoutCreateInfo, + _p_support: *mut DescriptorSetLayoutSupport, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_descriptor_set_layout_support_khr) + )) + } + let raw_name = stringify!(vkGetDescriptorSetLayoutSupportKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_descriptor_set_layout_support_khr + } else { + ::std::mem::transmute(val) + } + }, + } } + #[doc = ""] + pub unsafe fn get_descriptor_set_layout_support_khr( + &self, + device: Device, + p_create_info: *const DescriptorSetLayoutCreateInfo, + p_support: *mut DescriptorSetLayoutSupport, + ) -> c_void { + (self.get_descriptor_set_layout_support_khr)(device, p_create_info, p_support) + } +} +#[doc = "Generated from \'VK_KHR_maintenance3\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES_KHR: Self = + StructureType::PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES; +} +#[doc = "Generated from \'VK_KHR_maintenance3\'"] +impl StructureType { + pub const DESCRIPTOR_SET_LAYOUT_SUPPORT_KHR: Self = + StructureType::DESCRIPTOR_SET_LAYOUT_SUPPORT; } impl KhrDrawIndirectCountFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -56076,26 +70989,6 @@ impl KhrDrawIndirectCountFn { .expect("Wrong extension string") } } -#[allow(non_camel_case_types)] -pub type PFN_vkCmdDrawIndirectCountKHR = extern "system" fn( - command_buffer: CommandBuffer, - buffer: Buffer, - offset: DeviceSize, - count_buffer: Buffer, - count_buffer_offset: DeviceSize, - max_draw_count: u32, - stride: u32, -) -> c_void; -#[allow(non_camel_case_types)] -pub type PFN_vkCmdDrawIndexedIndirectCountKHR = extern "system" fn( - command_buffer: CommandBuffer, - buffer: Buffer, - offset: DeviceSize, - count_buffer: Buffer, - count_buffer_offset: DeviceSize, - max_draw_count: u32, - stride: u32, -) -> c_void; pub struct KhrDrawIndirectCountFn { pub cmd_draw_indirect_count_khr: extern "system" fn( command_buffer: CommandBuffer, @@ -56182,7 +71075,7 @@ impl KhrDrawIndirectCountFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_draw_indirect_count_khr( &self, command_buffer: CommandBuffer, @@ -56203,7 +71096,7 @@ impl KhrDrawIndirectCountFn { stride, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_draw_indexed_indirect_count_khr( &self, command_buffer: CommandBuffer, @@ -56225,28 +71118,46 @@ impl KhrDrawIndirectCountFn { ) } } -impl QcomExtension171Fn { +impl ExtFilterCubicFn { pub fn name() -> &'static ::std::ffi::CStr { - ::std::ffi::CStr::from_bytes_with_nul(b"VK_QCOM_extension_171\0") + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_filter_cubic\0") .expect("Wrong extension string") } } -pub struct QcomExtension171Fn {} -unsafe impl Send for QcomExtension171Fn {} -unsafe impl Sync for QcomExtension171Fn {} -impl ::std::clone::Clone for QcomExtension171Fn { +pub struct ExtFilterCubicFn {} +unsafe impl Send for ExtFilterCubicFn {} +unsafe impl Sync for ExtFilterCubicFn {} +impl ::std::clone::Clone for ExtFilterCubicFn { fn clone(&self) -> Self { - QcomExtension171Fn {} + ExtFilterCubicFn {} } } -impl QcomExtension171Fn { +impl ExtFilterCubicFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - QcomExtension171Fn {} + ExtFilterCubicFn {} } } +#[doc = "Generated from \'VK_EXT_filter_cubic\'"] +impl Filter { + pub const CUBIC_EXT: Self = Filter::CUBIC_IMG; +} +#[doc = "Generated from \'VK_EXT_filter_cubic\'"] +impl FormatFeatureFlags { + pub const SAMPLED_IMAGE_FILTER_CUBIC_EXT: Self = + FormatFeatureFlags::SAMPLED_IMAGE_FILTER_CUBIC_IMG; +} +#[doc = "Generated from \'VK_EXT_filter_cubic\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_IMAGE_VIEW_IMAGE_FORMAT_INFO_EXT: Self = StructureType(1_000_170_000); +} +#[doc = "Generated from \'VK_EXT_filter_cubic\'"] +impl StructureType { + pub const FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT: Self = + StructureType(1_000_170_001); +} impl QcomExtension172Fn { pub fn name() -> &'static ::std::ffi::CStr { ::std::ffi::CStr::from_bytes_with_nul(b"VK_QCOM_extension_172\0") @@ -56269,6 +71180,14 @@ impl QcomExtension172Fn { QcomExtension172Fn {} } } +#[doc = "Generated from \'VK_QCOM_extension_172\'"] +impl SubpassDescriptionFlags { + pub const RESERVED_2_QCOM: Self = SubpassDescriptionFlags(0b100); +} +#[doc = "Generated from \'VK_QCOM_extension_172\'"] +impl SubpassDescriptionFlags { + pub const RESERVED_3_QCOM: Self = SubpassDescriptionFlags(0b1000); +} impl QcomExtension173Fn { pub fn name() -> &'static ::std::ffi::CStr { ::std::ffi::CStr::from_bytes_with_nul(b"VK_QCOM_extension_173\0") @@ -56291,6 +71210,18 @@ impl QcomExtension173Fn { QcomExtension173Fn {} } } +#[doc = "Generated from \'VK_QCOM_extension_173\'"] +impl BufferUsageFlags { + pub const RESERVED_18_QCOM: Self = BufferUsageFlags(0b100_0000_0000_0000_0000); +} +#[doc = "Generated from \'VK_QCOM_extension_173\'"] +impl ImageUsageFlags { + pub const RESERVED_16_QCOM: Self = ImageUsageFlags(0b1_0000_0000_0000_0000); +} +#[doc = "Generated from \'VK_QCOM_extension_173\'"] +impl ImageUsageFlags { + pub const RESERVED_17_QCOM: Self = ImageUsageFlags(0b10_0000_0000_0000_0000); +} impl QcomExtension174Fn { pub fn name() -> &'static ::std::ffi::CStr { ::std::ffi::CStr::from_bytes_with_nul(b"VK_QCOM_extension_174\0") @@ -56337,34 +71268,39 @@ impl ExtGlobalPriorityFn { } #[doc = "Generated from \'VK_EXT_global_priority\'"] impl StructureType { - pub const DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT: Self = StructureType(1000174000); + pub const DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT: Self = StructureType(1_000_174_000); } #[doc = "Generated from \'VK_EXT_global_priority\'"] impl Result { - pub const ERROR_NOT_PERMITTED_EXT: Self = Result(-1000174001); + pub const ERROR_NOT_PERMITTED_EXT: Self = Result(-1_000_174_001); } -impl ExtExtension176Fn { +impl KhrShaderSubgroupExtendedTypesFn { pub fn name() -> &'static ::std::ffi::CStr { - ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_extension_176\0") + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_shader_subgroup_extended_types\0") .expect("Wrong extension string") } } -pub struct ExtExtension176Fn {} -unsafe impl Send for ExtExtension176Fn {} -unsafe impl Sync for ExtExtension176Fn {} -impl ::std::clone::Clone for ExtExtension176Fn { +pub struct KhrShaderSubgroupExtendedTypesFn {} +unsafe impl Send for KhrShaderSubgroupExtendedTypesFn {} +unsafe impl Sync for KhrShaderSubgroupExtendedTypesFn {} +impl ::std::clone::Clone for KhrShaderSubgroupExtendedTypesFn { fn clone(&self) -> Self { - ExtExtension176Fn {} + KhrShaderSubgroupExtendedTypesFn {} } } -impl ExtExtension176Fn { +impl KhrShaderSubgroupExtendedTypesFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - ExtExtension176Fn {} + KhrShaderSubgroupExtendedTypesFn {} } } +#[doc = "Generated from \'VK_KHR_shader_subgroup_extended_types\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES_KHR: Self = + StructureType::PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES; +} impl ExtExtension177Fn { pub fn name() -> &'static ::std::ffi::CStr { ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_extension_177\0") @@ -56411,7 +71347,8 @@ impl Khr8bitStorageFn { } #[doc = "Generated from \'VK_KHR_8bit_storage\'"] impl StructureType { - pub const PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR: Self = StructureType(1000177000); + pub const PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR: Self = + StructureType::PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES; } impl ExtExternalMemoryHostFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -56472,7 +71409,7 @@ impl ExtExternalMemoryHostFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn get_memory_host_pointer_properties_ext( &self, device: Device, @@ -56490,25 +71427,26 @@ impl ExtExternalMemoryHostFn { } #[doc = "Generated from \'VK_EXT_external_memory_host\'"] impl StructureType { - pub const IMPORT_MEMORY_HOST_POINTER_INFO_EXT: Self = StructureType(1000178000); + pub const IMPORT_MEMORY_HOST_POINTER_INFO_EXT: Self = StructureType(1_000_178_000); } #[doc = "Generated from \'VK_EXT_external_memory_host\'"] impl StructureType { - pub const MEMORY_HOST_POINTER_PROPERTIES_EXT: Self = StructureType(1000178001); + pub const MEMORY_HOST_POINTER_PROPERTIES_EXT: Self = StructureType(1_000_178_001); } #[doc = "Generated from \'VK_EXT_external_memory_host\'"] impl StructureType { - pub const PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT: Self = StructureType(1000178002); + pub const PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT: Self = + StructureType(1_000_178_002); } #[doc = "Generated from \'VK_EXT_external_memory_host\'"] impl ExternalMemoryHandleTypeFlags { pub const EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION: Self = - ExternalMemoryHandleTypeFlags(0b10000000); + ExternalMemoryHandleTypeFlags(0b1000_0000); } #[doc = "Generated from \'VK_EXT_external_memory_host\'"] impl ExternalMemoryHandleTypeFlags { pub const EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY: Self = - ExternalMemoryHandleTypeFlags(0b100000000); + ExternalMemoryHandleTypeFlags(0b1_0000_0000); } impl AmdBufferMarkerFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -56572,7 +71510,7 @@ impl AmdBufferMarkerFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_write_buffer_marker_amd( &self, command_buffer: CommandBuffer, @@ -56614,30 +71552,35 @@ impl KhrShaderAtomicInt64Fn { } #[doc = "Generated from \'VK_KHR_shader_atomic_int64\'"] impl StructureType { - pub const PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR: Self = StructureType(1000180000); + pub const PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR: Self = + StructureType::PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES; } -impl AmdExtension182Fn { +impl KhrShaderClockFn { pub fn name() -> &'static ::std::ffi::CStr { - ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_182\0") + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_shader_clock\0") .expect("Wrong extension string") } } -pub struct AmdExtension182Fn {} -unsafe impl Send for AmdExtension182Fn {} -unsafe impl Sync for AmdExtension182Fn {} -impl ::std::clone::Clone for AmdExtension182Fn { +pub struct KhrShaderClockFn {} +unsafe impl Send for KhrShaderClockFn {} +unsafe impl Sync for KhrShaderClockFn {} +impl ::std::clone::Clone for KhrShaderClockFn { fn clone(&self) -> Self { - AmdExtension182Fn {} + KhrShaderClockFn {} } } -impl AmdExtension182Fn { +impl KhrShaderClockFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - AmdExtension182Fn {} + KhrShaderClockFn {} } } +#[doc = "Generated from \'VK_KHR_shader_clock\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR: Self = StructureType(1_000_181_000); +} impl AmdExtension183Fn { pub fn name() -> &'static ::std::ffi::CStr { ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_183\0") @@ -56660,28 +71603,32 @@ impl AmdExtension183Fn { AmdExtension183Fn {} } } -impl AmdExtension184Fn { +impl AmdPipelineCompilerControlFn { pub fn name() -> &'static ::std::ffi::CStr { - ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_184\0") + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_pipeline_compiler_control\0") .expect("Wrong extension string") } } -pub struct AmdExtension184Fn {} -unsafe impl Send for AmdExtension184Fn {} -unsafe impl Sync for AmdExtension184Fn {} -impl ::std::clone::Clone for AmdExtension184Fn { +pub struct AmdPipelineCompilerControlFn {} +unsafe impl Send for AmdPipelineCompilerControlFn {} +unsafe impl Sync for AmdPipelineCompilerControlFn {} +impl ::std::clone::Clone for AmdPipelineCompilerControlFn { fn clone(&self) -> Self { - AmdExtension184Fn {} + AmdPipelineCompilerControlFn {} } } -impl AmdExtension184Fn { +impl AmdPipelineCompilerControlFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - AmdExtension184Fn {} + AmdPipelineCompilerControlFn {} } } +#[doc = "Generated from \'VK_AMD_pipeline_compiler_control\'"] +impl StructureType { + pub const PIPELINE_COMPILER_CONTROL_CREATE_INFO_AMD: Self = StructureType(1_000_183_000); +} impl ExtCalibratedTimestampsFn { pub fn name() -> &'static ::std::ffi::CStr { ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_calibrated_timestamps\0") @@ -56777,7 +71724,7 @@ impl ExtCalibratedTimestampsFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn get_physical_device_calibrateable_time_domains_ext( &self, physical_device: PhysicalDevice, @@ -56790,7 +71737,7 @@ impl ExtCalibratedTimestampsFn { p_time_domains, ) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_calibrated_timestamps_ext( &self, device: Device, @@ -56810,7 +71757,7 @@ impl ExtCalibratedTimestampsFn { } #[doc = "Generated from \'VK_EXT_calibrated_timestamps\'"] impl StructureType { - pub const CALIBRATED_TIMESTAMP_INFO_EXT: Self = StructureType(1000184000); + pub const CALIBRATED_TIMESTAMP_INFO_EXT: Self = StructureType(1_000_184_000); } impl AmdShaderCorePropertiesFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -56836,7 +71783,7 @@ impl AmdShaderCorePropertiesFn { } #[doc = "Generated from \'VK_AMD_shader_core_properties\'"] impl StructureType { - pub const PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD: Self = StructureType(1000185000); + pub const PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD: Self = StructureType(1_000_185_000); } impl AmdExtension187Fn { pub fn name() -> &'static ::std::ffi::CStr { @@ -56928,7 +71875,7 @@ impl AmdMemoryOverallocationBehaviorFn { } #[doc = "Generated from \'VK_AMD_memory_overallocation_behavior\'"] impl StructureType { - pub const DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD: Self = StructureType(1000189000); + pub const DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD: Self = StructureType(1_000_189_000); } impl ExtVertexAttributeDivisorFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -56955,61 +71902,70 @@ impl ExtVertexAttributeDivisorFn { #[doc = "Generated from \'VK_EXT_vertex_attribute_divisor\'"] impl StructureType { pub const PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT: Self = - StructureType(1000190000); + StructureType(1_000_190_000); } #[doc = "Generated from \'VK_EXT_vertex_attribute_divisor\'"] impl StructureType { - pub const PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT: Self = StructureType(1000190001); + pub const PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT: Self = + StructureType(1_000_190_001); } #[doc = "Generated from \'VK_EXT_vertex_attribute_divisor\'"] impl StructureType { pub const PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT: Self = - StructureType(1000190002); + StructureType(1_000_190_002); } -impl GoogleExtension192Fn { +impl GgpFrameTokenFn { pub fn name() -> &'static ::std::ffi::CStr { - ::std::ffi::CStr::from_bytes_with_nul(b"VK_GOOGLE_extension_192\0") + ::std::ffi::CStr::from_bytes_with_nul(b"VK_GGP_frame_token\0") .expect("Wrong extension string") } } -pub struct GoogleExtension192Fn {} -unsafe impl Send for GoogleExtension192Fn {} -unsafe impl Sync for GoogleExtension192Fn {} -impl ::std::clone::Clone for GoogleExtension192Fn { +pub struct GgpFrameTokenFn {} +unsafe impl Send for GgpFrameTokenFn {} +unsafe impl Sync for GgpFrameTokenFn {} +impl ::std::clone::Clone for GgpFrameTokenFn { fn clone(&self) -> Self { - GoogleExtension192Fn {} + GgpFrameTokenFn {} } } -impl GoogleExtension192Fn { +impl GgpFrameTokenFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - GoogleExtension192Fn {} + GgpFrameTokenFn {} } } -impl GoogleExtension193Fn { +#[doc = "Generated from \'VK_GGP_frame_token\'"] +impl StructureType { + pub const PRESENT_FRAME_TOKEN_GGP: Self = StructureType(1_000_191_000); +} +impl ExtPipelineCreationFeedbackFn { pub fn name() -> &'static ::std::ffi::CStr { - ::std::ffi::CStr::from_bytes_with_nul(b"VK_GOOGLE_extension_193\0") + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_pipeline_creation_feedback\0") .expect("Wrong extension string") } } -pub struct GoogleExtension193Fn {} -unsafe impl Send for GoogleExtension193Fn {} -unsafe impl Sync for GoogleExtension193Fn {} -impl ::std::clone::Clone for GoogleExtension193Fn { +pub struct ExtPipelineCreationFeedbackFn {} +unsafe impl Send for ExtPipelineCreationFeedbackFn {} +unsafe impl Sync for ExtPipelineCreationFeedbackFn {} +impl ::std::clone::Clone for ExtPipelineCreationFeedbackFn { fn clone(&self) -> Self { - GoogleExtension193Fn {} + ExtPipelineCreationFeedbackFn {} } } -impl GoogleExtension193Fn { +impl ExtPipelineCreationFeedbackFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - GoogleExtension193Fn {} + ExtPipelineCreationFeedbackFn {} } } +#[doc = "Generated from \'VK_EXT_pipeline_creation_feedback\'"] +impl StructureType { + pub const PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT: Self = StructureType(1_000_192_000); +} impl GoogleExtension194Fn { pub fn name() -> &'static ::std::ffi::CStr { ::std::ffi::CStr::from_bytes_with_nul(b"VK_GOOGLE_extension_194\0") @@ -57100,7 +72056,56 @@ impl KhrDriverPropertiesFn { } #[doc = "Generated from \'VK_KHR_driver_properties\'"] impl StructureType { - pub const PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR: Self = StructureType(1000196000); + pub const PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR: Self = + StructureType::PHYSICAL_DEVICE_DRIVER_PROPERTIES; +} +#[doc = "Generated from \'VK_KHR_driver_properties\'"] +impl DriverId { + pub const AMD_PROPRIETARY_KHR: Self = DriverId::AMD_PROPRIETARY; +} +#[doc = "Generated from \'VK_KHR_driver_properties\'"] +impl DriverId { + pub const AMD_OPEN_SOURCE_KHR: Self = DriverId::AMD_OPEN_SOURCE; +} +#[doc = "Generated from \'VK_KHR_driver_properties\'"] +impl DriverId { + pub const MESA_RADV_KHR: Self = DriverId::MESA_RADV; +} +#[doc = "Generated from \'VK_KHR_driver_properties\'"] +impl DriverId { + pub const NVIDIA_PROPRIETARY_KHR: Self = DriverId::NVIDIA_PROPRIETARY; +} +#[doc = "Generated from \'VK_KHR_driver_properties\'"] +impl DriverId { + pub const INTEL_PROPRIETARY_WINDOWS_KHR: Self = DriverId::INTEL_PROPRIETARY_WINDOWS; +} +#[doc = "Generated from \'VK_KHR_driver_properties\'"] +impl DriverId { + pub const INTEL_OPEN_SOURCE_MESA_KHR: Self = DriverId::INTEL_OPEN_SOURCE_MESA; +} +#[doc = "Generated from \'VK_KHR_driver_properties\'"] +impl DriverId { + pub const IMAGINATION_PROPRIETARY_KHR: Self = DriverId::IMAGINATION_PROPRIETARY; +} +#[doc = "Generated from \'VK_KHR_driver_properties\'"] +impl DriverId { + pub const QUALCOMM_PROPRIETARY_KHR: Self = DriverId::QUALCOMM_PROPRIETARY; +} +#[doc = "Generated from \'VK_KHR_driver_properties\'"] +impl DriverId { + pub const ARM_PROPRIETARY_KHR: Self = DriverId::ARM_PROPRIETARY; +} +#[doc = "Generated from \'VK_KHR_driver_properties\'"] +impl DriverId { + pub const GOOGLE_SWIFTSHADER_KHR: Self = DriverId::GOOGLE_SWIFTSHADER; +} +#[doc = "Generated from \'VK_KHR_driver_properties\'"] +impl DriverId { + pub const GGP_PROPRIETARY_KHR: Self = DriverId::GGP_PROPRIETARY; +} +#[doc = "Generated from \'VK_KHR_driver_properties\'"] +impl DriverId { + pub const BROADCOM_PROPRIETARY_KHR: Self = DriverId::BROADCOM_PROPRIETARY; } impl KhrShaderFloatControlsFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -57126,7 +72131,20 @@ impl KhrShaderFloatControlsFn { } #[doc = "Generated from \'VK_KHR_shader_float_controls\'"] impl StructureType { - pub const PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES_KHR: Self = StructureType(1000197000); + pub const PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES_KHR: Self = + StructureType::PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES; +} +#[doc = "Generated from \'VK_KHR_shader_float_controls\'"] +impl ShaderFloatControlsIndependence { + pub const TYPE_32_ONLY_KHR: Self = ShaderFloatControlsIndependence::TYPE_32_ONLY; +} +#[doc = "Generated from \'VK_KHR_shader_float_controls\'"] +impl ShaderFloatControlsIndependence { + pub const ALL_KHR: Self = ShaderFloatControlsIndependence::ALL; +} +#[doc = "Generated from \'VK_KHR_shader_float_controls\'"] +impl ShaderFloatControlsIndependence { + pub const NONE_KHR: Self = ShaderFloatControlsIndependence::NONE; } impl NvShaderSubgroupPartitionedFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -57152,7 +72170,7 @@ impl NvShaderSubgroupPartitionedFn { } #[doc = "Generated from \'VK_NV_shader_subgroup_partitioned\'"] impl SubgroupFeatureFlags { - pub const PARTITIONED_NV: Self = SubgroupFeatureFlags(0b100000000); + pub const PARTITIONED_NV: Self = SubgroupFeatureFlags(0b1_0000_0000); } impl KhrDepthStencilResolveFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -57179,11 +72197,32 @@ impl KhrDepthStencilResolveFn { #[doc = "Generated from \'VK_KHR_depth_stencil_resolve\'"] impl StructureType { pub const PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES_KHR: Self = - StructureType(1000199000); + StructureType::PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES; } #[doc = "Generated from \'VK_KHR_depth_stencil_resolve\'"] impl StructureType { - pub const SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE_KHR: Self = StructureType(1000199001); + pub const SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE_KHR: Self = + StructureType::SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE; +} +#[doc = "Generated from \'VK_KHR_depth_stencil_resolve\'"] +impl ResolveModeFlags { + pub const NONE_KHR: Self = ResolveModeFlags::NONE; +} +#[doc = "Generated from \'VK_KHR_depth_stencil_resolve\'"] +impl ResolveModeFlags { + pub const SAMPLE_ZERO_KHR: Self = ResolveModeFlags::SAMPLE_ZERO; +} +#[doc = "Generated from \'VK_KHR_depth_stencil_resolve\'"] +impl ResolveModeFlags { + pub const AVERAGE_KHR: Self = ResolveModeFlags::AVERAGE; +} +#[doc = "Generated from \'VK_KHR_depth_stencil_resolve\'"] +impl ResolveModeFlags { + pub const MIN_KHR: Self = ResolveModeFlags::MIN; +} +#[doc = "Generated from \'VK_KHR_depth_stencil_resolve\'"] +impl ResolveModeFlags { + pub const MAX_KHR: Self = ResolveModeFlags::MAX; } impl KhrSwapchainMutableFormatFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -57236,7 +72275,7 @@ impl NvComputeShaderDerivativesFn { #[doc = "Generated from \'VK_NV_compute_shader_derivatives\'"] impl StructureType { pub const PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV: Self = - StructureType(1000201000); + StructureType(1_000_201_000); } impl NvMeshShaderFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -57373,7 +72412,7 @@ impl NvMeshShaderFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_draw_mesh_tasks_nv( &self, command_buffer: CommandBuffer, @@ -57382,7 +72421,7 @@ impl NvMeshShaderFn { ) -> c_void { (self.cmd_draw_mesh_tasks_nv)(command_buffer, task_count, first_task) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_draw_mesh_tasks_indirect_nv( &self, command_buffer: CommandBuffer, @@ -57393,7 +72432,7 @@ impl NvMeshShaderFn { ) -> c_void { (self.cmd_draw_mesh_tasks_indirect_nv)(command_buffer, buffer, offset, draw_count, stride) } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_draw_mesh_tasks_indirect_count_nv( &self, command_buffer: CommandBuffer, @@ -57417,27 +72456,27 @@ impl NvMeshShaderFn { } #[doc = "Generated from \'VK_NV_mesh_shader\'"] impl StructureType { - pub const PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV: Self = StructureType(1000202000); + pub const PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV: Self = StructureType(1_000_202_000); } #[doc = "Generated from \'VK_NV_mesh_shader\'"] impl StructureType { - pub const PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV: Self = StructureType(1000202001); + pub const PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV: Self = StructureType(1_000_202_001); } #[doc = "Generated from \'VK_NV_mesh_shader\'"] impl ShaderStageFlags { - pub const TASK_NV: Self = ShaderStageFlags(0b1000000); + pub const TASK_NV: Self = ShaderStageFlags(0b100_0000); } #[doc = "Generated from \'VK_NV_mesh_shader\'"] impl ShaderStageFlags { - pub const MESH_NV: Self = ShaderStageFlags(0b10000000); + pub const MESH_NV: Self = ShaderStageFlags(0b1000_0000); } #[doc = "Generated from \'VK_NV_mesh_shader\'"] impl PipelineStageFlags { - pub const TASK_SHADER_NV: Self = PipelineStageFlags(0b10000000000000000000); + pub const TASK_SHADER_NV: Self = PipelineStageFlags(0b1000_0000_0000_0000_0000); } #[doc = "Generated from \'VK_NV_mesh_shader\'"] impl PipelineStageFlags { - pub const MESH_SHADER_NV: Self = PipelineStageFlags(0b100000000000000000000); + pub const MESH_SHADER_NV: Self = PipelineStageFlags(0b1_0000_0000_0000_0000_0000); } impl NvFragmentShaderBarycentricFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -57464,7 +72503,7 @@ impl NvFragmentShaderBarycentricFn { #[doc = "Generated from \'VK_NV_fragment_shader_barycentric\'"] impl StructureType { pub const PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV: Self = - StructureType(1000203000); + StructureType(1_000_203_000); } impl NvShaderImageFootprintFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -57490,7 +72529,8 @@ impl NvShaderImageFootprintFn { } #[doc = "Generated from \'VK_NV_shader_image_footprint\'"] impl StructureType { - pub const PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV: Self = StructureType(1000204000); + pub const PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV: Self = + StructureType(1_000_204_000); } impl NvScissorExclusiveFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -57551,7 +72591,7 @@ impl NvScissorExclusiveFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_set_exclusive_scissor_nv( &self, command_buffer: CommandBuffer, @@ -57570,15 +72610,15 @@ impl NvScissorExclusiveFn { #[doc = "Generated from \'VK_NV_scissor_exclusive\'"] impl StructureType { pub const PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV: Self = - StructureType(1000205000); + StructureType(1_000_205_000); } #[doc = "Generated from \'VK_NV_scissor_exclusive\'"] impl DynamicState { - pub const EXCLUSIVE_SCISSOR_NV: Self = DynamicState(1000205001); + pub const EXCLUSIVE_SCISSOR_NV: Self = DynamicState(1_000_205_001); } #[doc = "Generated from \'VK_NV_scissor_exclusive\'"] impl StructureType { - pub const PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV: Self = StructureType(1000205002); + pub const PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV: Self = StructureType(1_000_205_002); } impl NvDeviceDiagnosticCheckpointsFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -57663,7 +72703,7 @@ impl NvDeviceDiagnosticCheckpointsFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn cmd_set_checkpoint_nv( &self, command_buffer: CommandBuffer, @@ -57671,7 +72711,7 @@ impl NvDeviceDiagnosticCheckpointsFn { ) -> c_void { (self.cmd_set_checkpoint_nv)(command_buffer, p_checkpoint_marker) } - #[doc = ""] + #[doc = ""] pub unsafe fn get_queue_checkpoint_data_nv( &self, queue: Queue, @@ -57683,33 +72723,178 @@ impl NvDeviceDiagnosticCheckpointsFn { } #[doc = "Generated from \'VK_NV_device_diagnostic_checkpoints\'"] impl StructureType { - pub const CHECKPOINT_DATA_NV: Self = StructureType(1000206000); + pub const CHECKPOINT_DATA_NV: Self = StructureType(1_000_206_000); } #[doc = "Generated from \'VK_NV_device_diagnostic_checkpoints\'"] impl StructureType { - pub const QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV: Self = StructureType(1000206001); + pub const QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV: Self = StructureType(1_000_206_001); } -impl KhrExtension208Fn { +impl KhrTimelineSemaphoreFn { pub fn name() -> &'static ::std::ffi::CStr { - ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_extension_208\0") + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_timeline_semaphore\0") .expect("Wrong extension string") } } -pub struct KhrExtension208Fn {} -unsafe impl Send for KhrExtension208Fn {} -unsafe impl Sync for KhrExtension208Fn {} -impl ::std::clone::Clone for KhrExtension208Fn { +#[allow(non_camel_case_types)] +pub type PFN_vkGetSemaphoreCounterValue = + extern "system" fn(device: Device, semaphore: Semaphore, p_value: *mut u64) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkWaitSemaphores = extern "system" fn( + device: Device, + p_wait_info: *const SemaphoreWaitInfo, + timeout: u64, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkSignalSemaphore = + extern "system" fn(device: Device, p_signal_info: *const SemaphoreSignalInfo) -> Result; +pub struct KhrTimelineSemaphoreFn { + pub get_semaphore_counter_value_khr: + extern "system" fn(device: Device, semaphore: Semaphore, p_value: *mut u64) -> Result, + pub wait_semaphores_khr: extern "system" fn( + device: Device, + p_wait_info: *const SemaphoreWaitInfo, + timeout: u64, + ) -> Result, + pub signal_semaphore_khr: + extern "system" fn(device: Device, p_signal_info: *const SemaphoreSignalInfo) -> Result, +} +unsafe impl Send for KhrTimelineSemaphoreFn {} +unsafe impl Sync for KhrTimelineSemaphoreFn {} +impl ::std::clone::Clone for KhrTimelineSemaphoreFn { fn clone(&self) -> Self { - KhrExtension208Fn {} + KhrTimelineSemaphoreFn { + get_semaphore_counter_value_khr: self.get_semaphore_counter_value_khr, + wait_semaphores_khr: self.wait_semaphores_khr, + signal_semaphore_khr: self.signal_semaphore_khr, + } } } -impl KhrExtension208Fn { +impl KhrTimelineSemaphoreFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - KhrExtension208Fn {} + KhrTimelineSemaphoreFn { + get_semaphore_counter_value_khr: unsafe { + extern "system" fn get_semaphore_counter_value_khr( + _device: Device, + _semaphore: Semaphore, + _p_value: *mut u64, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_semaphore_counter_value_khr) + )) + } + let raw_name = stringify!(vkGetSemaphoreCounterValueKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_semaphore_counter_value_khr + } else { + ::std::mem::transmute(val) + } + }, + wait_semaphores_khr: unsafe { + extern "system" fn wait_semaphores_khr( + _device: Device, + _p_wait_info: *const SemaphoreWaitInfo, + _timeout: u64, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(wait_semaphores_khr))) + } + let raw_name = stringify!(vkWaitSemaphoresKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + wait_semaphores_khr + } else { + ::std::mem::transmute(val) + } + }, + signal_semaphore_khr: unsafe { + extern "system" fn signal_semaphore_khr( + _device: Device, + _p_signal_info: *const SemaphoreSignalInfo, + ) -> Result { + panic!(concat!("Unable to load ", stringify!(signal_semaphore_khr))) + } + let raw_name = stringify!(vkSignalSemaphoreKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + signal_semaphore_khr + } else { + ::std::mem::transmute(val) + } + }, + } } + #[doc = ""] + pub unsafe fn get_semaphore_counter_value_khr( + &self, + device: Device, + semaphore: Semaphore, + p_value: *mut u64, + ) -> Result { + (self.get_semaphore_counter_value_khr)(device, semaphore, p_value) + } + #[doc = ""] + pub unsafe fn wait_semaphores_khr( + &self, + device: Device, + p_wait_info: *const SemaphoreWaitInfo, + timeout: u64, + ) -> Result { + (self.wait_semaphores_khr)(device, p_wait_info, timeout) + } + #[doc = ""] + pub unsafe fn signal_semaphore_khr( + &self, + device: Device, + p_signal_info: *const SemaphoreSignalInfo, + ) -> Result { + (self.signal_semaphore_khr)(device, p_signal_info) + } +} +#[doc = "Generated from \'VK_KHR_timeline_semaphore\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES_KHR: Self = + StructureType::PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES; +} +#[doc = "Generated from \'VK_KHR_timeline_semaphore\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES_KHR: Self = + StructureType::PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES; +} +#[doc = "Generated from \'VK_KHR_timeline_semaphore\'"] +impl StructureType { + pub const SEMAPHORE_TYPE_CREATE_INFO_KHR: Self = StructureType::SEMAPHORE_TYPE_CREATE_INFO; +} +#[doc = "Generated from \'VK_KHR_timeline_semaphore\'"] +impl StructureType { + pub const TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR: Self = + StructureType::TIMELINE_SEMAPHORE_SUBMIT_INFO; +} +#[doc = "Generated from \'VK_KHR_timeline_semaphore\'"] +impl StructureType { + pub const SEMAPHORE_WAIT_INFO_KHR: Self = StructureType::SEMAPHORE_WAIT_INFO; +} +#[doc = "Generated from \'VK_KHR_timeline_semaphore\'"] +impl StructureType { + pub const SEMAPHORE_SIGNAL_INFO_KHR: Self = StructureType::SEMAPHORE_SIGNAL_INFO; +} +#[doc = "Generated from \'VK_KHR_timeline_semaphore\'"] +impl SemaphoreType { + pub const BINARY_KHR: Self = SemaphoreType::BINARY; +} +#[doc = "Generated from \'VK_KHR_timeline_semaphore\'"] +impl SemaphoreType { + pub const TIMELINE_KHR: Self = SemaphoreType::TIMELINE; +} +#[doc = "Generated from \'VK_KHR_timeline_semaphore\'"] +impl SemaphoreWaitFlags { + pub const ANY_KHR: Self = SemaphoreWaitFlags::ANY; } impl KhrExtension209Fn { pub fn name() -> &'static ::std::ffi::CStr { @@ -57733,49 +72918,410 @@ impl KhrExtension209Fn { KhrExtension209Fn {} } } -impl IntelExtension210Fn { +impl IntelShaderIntegerFunctions2Fn { pub fn name() -> &'static ::std::ffi::CStr { - ::std::ffi::CStr::from_bytes_with_nul(b"VK_INTEL_extension_210\0") + ::std::ffi::CStr::from_bytes_with_nul(b"VK_INTEL_shader_integer_functions2\0") .expect("Wrong extension string") } } -pub struct IntelExtension210Fn {} -unsafe impl Send for IntelExtension210Fn {} -unsafe impl Sync for IntelExtension210Fn {} -impl ::std::clone::Clone for IntelExtension210Fn { +pub struct IntelShaderIntegerFunctions2Fn {} +unsafe impl Send for IntelShaderIntegerFunctions2Fn {} +unsafe impl Sync for IntelShaderIntegerFunctions2Fn {} +impl ::std::clone::Clone for IntelShaderIntegerFunctions2Fn { fn clone(&self) -> Self { - IntelExtension210Fn {} + IntelShaderIntegerFunctions2Fn {} } } -impl IntelExtension210Fn { +impl IntelShaderIntegerFunctions2Fn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - IntelExtension210Fn {} + IntelShaderIntegerFunctions2Fn {} } } -impl IntelExtension211Fn { +#[doc = "Generated from \'VK_INTEL_shader_integer_functions2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL: Self = + StructureType(1_000_209_000); +} +impl IntelPerformanceQueryFn { pub fn name() -> &'static ::std::ffi::CStr { - ::std::ffi::CStr::from_bytes_with_nul(b"VK_INTEL_extension_211\0") + ::std::ffi::CStr::from_bytes_with_nul(b"VK_INTEL_performance_query\0") .expect("Wrong extension string") } } -pub struct IntelExtension211Fn {} -unsafe impl Send for IntelExtension211Fn {} -unsafe impl Sync for IntelExtension211Fn {} -impl ::std::clone::Clone for IntelExtension211Fn { +#[allow(non_camel_case_types)] +pub type PFN_vkInitializePerformanceApiINTEL = extern "system" fn( + device: Device, + p_initialize_info: *const InitializePerformanceApiInfoINTEL, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkUninitializePerformanceApiINTEL = extern "system" fn(device: Device) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdSetPerformanceMarkerINTEL = extern "system" fn( + command_buffer: CommandBuffer, + p_marker_info: *const PerformanceMarkerInfoINTEL, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdSetPerformanceStreamMarkerINTEL = extern "system" fn( + command_buffer: CommandBuffer, + p_marker_info: *const PerformanceStreamMarkerInfoINTEL, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdSetPerformanceOverrideINTEL = extern "system" fn( + command_buffer: CommandBuffer, + p_override_info: *const PerformanceOverrideInfoINTEL, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkAcquirePerformanceConfigurationINTEL = extern "system" fn( + device: Device, + p_acquire_info: *const PerformanceConfigurationAcquireInfoINTEL, + p_configuration: *mut PerformanceConfigurationINTEL, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkReleasePerformanceConfigurationINTEL = + extern "system" fn(device: Device, configuration: PerformanceConfigurationINTEL) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkQueueSetPerformanceConfigurationINTEL = + extern "system" fn(queue: Queue, configuration: PerformanceConfigurationINTEL) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPerformanceParameterINTEL = extern "system" fn( + device: Device, + parameter: PerformanceParameterTypeINTEL, + p_value: *mut PerformanceValueINTEL, +) -> Result; +pub struct IntelPerformanceQueryFn { + pub initialize_performance_api_intel: extern "system" fn( + device: Device, + p_initialize_info: *const InitializePerformanceApiInfoINTEL, + ) -> Result, + pub uninitialize_performance_api_intel: extern "system" fn(device: Device) -> c_void, + pub cmd_set_performance_marker_intel: extern "system" fn( + command_buffer: CommandBuffer, + p_marker_info: *const PerformanceMarkerInfoINTEL, + ) -> Result, + pub cmd_set_performance_stream_marker_intel: extern "system" fn( + command_buffer: CommandBuffer, + p_marker_info: *const PerformanceStreamMarkerInfoINTEL, + ) -> Result, + pub cmd_set_performance_override_intel: extern "system" fn( + command_buffer: CommandBuffer, + p_override_info: *const PerformanceOverrideInfoINTEL, + ) -> Result, + pub acquire_performance_configuration_intel: extern "system" fn( + device: Device, + p_acquire_info: *const PerformanceConfigurationAcquireInfoINTEL, + p_configuration: *mut PerformanceConfigurationINTEL, + ) -> Result, + pub release_performance_configuration_intel: + extern "system" fn(device: Device, configuration: PerformanceConfigurationINTEL) -> Result, + pub queue_set_performance_configuration_intel: + extern "system" fn(queue: Queue, configuration: PerformanceConfigurationINTEL) -> Result, + pub get_performance_parameter_intel: extern "system" fn( + device: Device, + parameter: PerformanceParameterTypeINTEL, + p_value: *mut PerformanceValueINTEL, + ) -> Result, +} +unsafe impl Send for IntelPerformanceQueryFn {} +unsafe impl Sync for IntelPerformanceQueryFn {} +impl ::std::clone::Clone for IntelPerformanceQueryFn { fn clone(&self) -> Self { - IntelExtension211Fn {} + IntelPerformanceQueryFn { + initialize_performance_api_intel: self.initialize_performance_api_intel, + uninitialize_performance_api_intel: self.uninitialize_performance_api_intel, + cmd_set_performance_marker_intel: self.cmd_set_performance_marker_intel, + cmd_set_performance_stream_marker_intel: self.cmd_set_performance_stream_marker_intel, + cmd_set_performance_override_intel: self.cmd_set_performance_override_intel, + acquire_performance_configuration_intel: self.acquire_performance_configuration_intel, + release_performance_configuration_intel: self.release_performance_configuration_intel, + queue_set_performance_configuration_intel: self + .queue_set_performance_configuration_intel, + get_performance_parameter_intel: self.get_performance_parameter_intel, + } } } -impl IntelExtension211Fn { +impl IntelPerformanceQueryFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - IntelExtension211Fn {} + IntelPerformanceQueryFn { + initialize_performance_api_intel: unsafe { + extern "system" fn initialize_performance_api_intel( + _device: Device, + _p_initialize_info: *const InitializePerformanceApiInfoINTEL, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(initialize_performance_api_intel) + )) + } + let raw_name = stringify!(vkInitializePerformanceApiINTEL); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + initialize_performance_api_intel + } else { + ::std::mem::transmute(val) + } + }, + uninitialize_performance_api_intel: unsafe { + extern "system" fn uninitialize_performance_api_intel(_device: Device) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(uninitialize_performance_api_intel) + )) + } + let raw_name = stringify!(vkUninitializePerformanceApiINTEL); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + uninitialize_performance_api_intel + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_performance_marker_intel: unsafe { + extern "system" fn cmd_set_performance_marker_intel( + _command_buffer: CommandBuffer, + _p_marker_info: *const PerformanceMarkerInfoINTEL, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_performance_marker_intel) + )) + } + let raw_name = stringify!(vkCmdSetPerformanceMarkerINTEL); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_set_performance_marker_intel + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_performance_stream_marker_intel: unsafe { + extern "system" fn cmd_set_performance_stream_marker_intel( + _command_buffer: CommandBuffer, + _p_marker_info: *const PerformanceStreamMarkerInfoINTEL, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_performance_stream_marker_intel) + )) + } + let raw_name = stringify!(vkCmdSetPerformanceStreamMarkerINTEL); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_set_performance_stream_marker_intel + } else { + ::std::mem::transmute(val) + } + }, + cmd_set_performance_override_intel: unsafe { + extern "system" fn cmd_set_performance_override_intel( + _command_buffer: CommandBuffer, + _p_override_info: *const PerformanceOverrideInfoINTEL, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_performance_override_intel) + )) + } + let raw_name = stringify!(vkCmdSetPerformanceOverrideINTEL); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_set_performance_override_intel + } else { + ::std::mem::transmute(val) + } + }, + acquire_performance_configuration_intel: unsafe { + extern "system" fn acquire_performance_configuration_intel( + _device: Device, + _p_acquire_info: *const PerformanceConfigurationAcquireInfoINTEL, + _p_configuration: *mut PerformanceConfigurationINTEL, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(acquire_performance_configuration_intel) + )) + } + let raw_name = stringify!(vkAcquirePerformanceConfigurationINTEL); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + acquire_performance_configuration_intel + } else { + ::std::mem::transmute(val) + } + }, + release_performance_configuration_intel: unsafe { + extern "system" fn release_performance_configuration_intel( + _device: Device, + _configuration: PerformanceConfigurationINTEL, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(release_performance_configuration_intel) + )) + } + let raw_name = stringify!(vkReleasePerformanceConfigurationINTEL); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + release_performance_configuration_intel + } else { + ::std::mem::transmute(val) + } + }, + queue_set_performance_configuration_intel: unsafe { + extern "system" fn queue_set_performance_configuration_intel( + _queue: Queue, + _configuration: PerformanceConfigurationINTEL, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(queue_set_performance_configuration_intel) + )) + } + let raw_name = stringify!(vkQueueSetPerformanceConfigurationINTEL); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + queue_set_performance_configuration_intel + } else { + ::std::mem::transmute(val) + } + }, + get_performance_parameter_intel: unsafe { + extern "system" fn get_performance_parameter_intel( + _device: Device, + _parameter: PerformanceParameterTypeINTEL, + _p_value: *mut PerformanceValueINTEL, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_performance_parameter_intel) + )) + } + let raw_name = stringify!(vkGetPerformanceParameterINTEL); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_performance_parameter_intel + } else { + ::std::mem::transmute(val) + } + }, + } } + #[doc = ""] + pub unsafe fn initialize_performance_api_intel( + &self, + device: Device, + p_initialize_info: *const InitializePerformanceApiInfoINTEL, + ) -> Result { + (self.initialize_performance_api_intel)(device, p_initialize_info) + } + #[doc = ""] + pub unsafe fn uninitialize_performance_api_intel(&self, device: Device) -> c_void { + (self.uninitialize_performance_api_intel)(device) + } + #[doc = ""] + pub unsafe fn cmd_set_performance_marker_intel( + &self, + command_buffer: CommandBuffer, + p_marker_info: *const PerformanceMarkerInfoINTEL, + ) -> Result { + (self.cmd_set_performance_marker_intel)(command_buffer, p_marker_info) + } + #[doc = ""] + pub unsafe fn cmd_set_performance_stream_marker_intel( + &self, + command_buffer: CommandBuffer, + p_marker_info: *const PerformanceStreamMarkerInfoINTEL, + ) -> Result { + (self.cmd_set_performance_stream_marker_intel)(command_buffer, p_marker_info) + } + #[doc = ""] + pub unsafe fn cmd_set_performance_override_intel( + &self, + command_buffer: CommandBuffer, + p_override_info: *const PerformanceOverrideInfoINTEL, + ) -> Result { + (self.cmd_set_performance_override_intel)(command_buffer, p_override_info) + } + #[doc = ""] + pub unsafe fn acquire_performance_configuration_intel( + &self, + device: Device, + p_acquire_info: *const PerformanceConfigurationAcquireInfoINTEL, + p_configuration: *mut PerformanceConfigurationINTEL, + ) -> Result { + (self.acquire_performance_configuration_intel)(device, p_acquire_info, p_configuration) + } + #[doc = ""] + pub unsafe fn release_performance_configuration_intel( + &self, + device: Device, + configuration: PerformanceConfigurationINTEL, + ) -> Result { + (self.release_performance_configuration_intel)(device, configuration) + } + #[doc = ""] + pub unsafe fn queue_set_performance_configuration_intel( + &self, + queue: Queue, + configuration: PerformanceConfigurationINTEL, + ) -> Result { + (self.queue_set_performance_configuration_intel)(queue, configuration) + } + #[doc = ""] + pub unsafe fn get_performance_parameter_intel( + &self, + device: Device, + parameter: PerformanceParameterTypeINTEL, + p_value: *mut PerformanceValueINTEL, + ) -> Result { + (self.get_performance_parameter_intel)(device, parameter, p_value) + } +} +#[doc = "Generated from \'VK_INTEL_performance_query\'"] +impl StructureType { + pub const QUERY_POOL_CREATE_INFO_INTEL: Self = StructureType(1_000_210_000); +} +#[doc = "Generated from \'VK_INTEL_performance_query\'"] +impl StructureType { + pub const INITIALIZE_PERFORMANCE_API_INFO_INTEL: Self = StructureType(1_000_210_001); +} +#[doc = "Generated from \'VK_INTEL_performance_query\'"] +impl StructureType { + pub const PERFORMANCE_MARKER_INFO_INTEL: Self = StructureType(1_000_210_002); +} +#[doc = "Generated from \'VK_INTEL_performance_query\'"] +impl StructureType { + pub const PERFORMANCE_STREAM_MARKER_INFO_INTEL: Self = StructureType(1_000_210_003); +} +#[doc = "Generated from \'VK_INTEL_performance_query\'"] +impl StructureType { + pub const PERFORMANCE_OVERRIDE_INFO_INTEL: Self = StructureType(1_000_210_004); +} +#[doc = "Generated from \'VK_INTEL_performance_query\'"] +impl StructureType { + pub const PERFORMANCE_CONFIGURATION_ACQUIRE_INFO_INTEL: Self = StructureType(1_000_210_005); +} +#[doc = "Generated from \'VK_INTEL_performance_query\'"] +impl QueryType { + pub const PERFORMANCE_QUERY_INTEL: Self = QueryType(1_000_210_000); +} +#[doc = "Generated from \'VK_INTEL_performance_query\'"] +impl ObjectType { + pub const PERFORMANCE_CONFIGURATION_INTEL: Self = ObjectType(1_000_210_000); } impl KhrVulkanMemoryModelFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -57801,7 +73347,8 @@ impl KhrVulkanMemoryModelFn { } #[doc = "Generated from \'VK_KHR_vulkan_memory_model\'"] impl StructureType { - pub const PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES_KHR: Self = StructureType(1000211000); + pub const PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES_KHR: Self = + StructureType::PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES; } impl ExtPciBusInfoFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -57827,29 +73374,85 @@ impl ExtPciBusInfoFn { } #[doc = "Generated from \'VK_EXT_pci_bus_info\'"] impl StructureType { - pub const PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT: Self = StructureType(1000212000); + pub const PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT: Self = StructureType(1_000_212_000); } -impl AmdExtension214Fn { +impl AmdDisplayNativeHdrFn { pub fn name() -> &'static ::std::ffi::CStr { - ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_214\0") + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_display_native_hdr\0") .expect("Wrong extension string") } } -pub struct AmdExtension214Fn {} -unsafe impl Send for AmdExtension214Fn {} -unsafe impl Sync for AmdExtension214Fn {} -impl ::std::clone::Clone for AmdExtension214Fn { +#[allow(non_camel_case_types)] +pub type PFN_vkSetLocalDimmingAMD = extern "system" fn( + device: Device, + swap_chain: SwapchainKHR, + local_dimming_enable: Bool32, +) -> c_void; +pub struct AmdDisplayNativeHdrFn { + pub set_local_dimming_amd: extern "system" fn( + device: Device, + swap_chain: SwapchainKHR, + local_dimming_enable: Bool32, + ) -> c_void, +} +unsafe impl Send for AmdDisplayNativeHdrFn {} +unsafe impl Sync for AmdDisplayNativeHdrFn {} +impl ::std::clone::Clone for AmdDisplayNativeHdrFn { fn clone(&self) -> Self { - AmdExtension214Fn {} + AmdDisplayNativeHdrFn { + set_local_dimming_amd: self.set_local_dimming_amd, + } } } -impl AmdExtension214Fn { +impl AmdDisplayNativeHdrFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - AmdExtension214Fn {} + AmdDisplayNativeHdrFn { + set_local_dimming_amd: unsafe { + extern "system" fn set_local_dimming_amd( + _device: Device, + _swap_chain: SwapchainKHR, + _local_dimming_enable: Bool32, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(set_local_dimming_amd) + )) + } + let raw_name = stringify!(vkSetLocalDimmingAMD); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + set_local_dimming_amd + } else { + ::std::mem::transmute(val) + } + }, + } } + #[doc = ""] + pub unsafe fn set_local_dimming_amd( + &self, + device: Device, + swap_chain: SwapchainKHR, + local_dimming_enable: Bool32, + ) -> c_void { + (self.set_local_dimming_amd)(device, swap_chain, local_dimming_enable) + } +} +#[doc = "Generated from \'VK_AMD_display_native_hdr\'"] +impl StructureType { + pub const DISPLAY_NATIVE_HDR_SURFACE_CAPABILITIES_AMD: Self = StructureType(1_000_213_000); +} +#[doc = "Generated from \'VK_AMD_display_native_hdr\'"] +impl StructureType { + pub const SWAPCHAIN_DISPLAY_NATIVE_HDR_CREATE_INFO_AMD: Self = StructureType(1_000_213_001); +} +#[doc = "Generated from \'VK_AMD_display_native_hdr\'"] +impl ColorSpaceKHR { + pub const DISPLAY_NATIVE_AMD: Self = ColorSpaceKHR(1_000_213_000); } impl FuchsiaImagepipeSurfaceFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -57910,7 +73513,7 @@ impl FuchsiaImagepipeSurfaceFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn create_image_pipe_surface_fuchsia( &self, instance: Instance, @@ -57923,7 +73526,7 @@ impl FuchsiaImagepipeSurfaceFn { } #[doc = "Generated from \'VK_FUCHSIA_imagepipe_surface\'"] impl StructureType { - pub const IMAGEPIPE_SURFACE_CREATE_INFO_FUCHSIA: Self = StructureType(1000214000); + pub const IMAGEPIPE_SURFACE_CREATE_INFO_FUCHSIA: Self = StructureType(1_000_214_000); } impl GoogleExtension216Fn { pub fn name() -> &'static ::std::ffi::CStr { @@ -57969,27 +73572,79 @@ impl GoogleExtension217Fn { GoogleExtension217Fn {} } } -impl ExtMacosIosWindowFn { +impl ExtMetalSurfaceFn { pub fn name() -> &'static ::std::ffi::CStr { - ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_macos_ios_window\0") + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_metal_surface\0") .expect("Wrong extension string") } } -pub struct ExtMacosIosWindowFn {} -unsafe impl Send for ExtMacosIosWindowFn {} -unsafe impl Sync for ExtMacosIosWindowFn {} -impl ::std::clone::Clone for ExtMacosIosWindowFn { +#[allow(non_camel_case_types)] +pub type PFN_vkCreateMetalSurfaceEXT = extern "system" fn( + instance: Instance, + p_create_info: *const MetalSurfaceCreateInfoEXT, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, +) -> Result; +pub struct ExtMetalSurfaceFn { + pub create_metal_surface_ext: extern "system" fn( + instance: Instance, + p_create_info: *const MetalSurfaceCreateInfoEXT, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, + ) -> Result, +} +unsafe impl Send for ExtMetalSurfaceFn {} +unsafe impl Sync for ExtMetalSurfaceFn {} +impl ::std::clone::Clone for ExtMetalSurfaceFn { fn clone(&self) -> Self { - ExtMacosIosWindowFn {} + ExtMetalSurfaceFn { + create_metal_surface_ext: self.create_metal_surface_ext, + } } } -impl ExtMacosIosWindowFn { +impl ExtMetalSurfaceFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - ExtMacosIosWindowFn {} + ExtMetalSurfaceFn { + create_metal_surface_ext: unsafe { + extern "system" fn create_metal_surface_ext( + _instance: Instance, + _p_create_info: *const MetalSurfaceCreateInfoEXT, + _p_allocator: *const AllocationCallbacks, + _p_surface: *mut SurfaceKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_metal_surface_ext) + )) + } + let raw_name = stringify!(vkCreateMetalSurfaceEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_metal_surface_ext + } else { + ::std::mem::transmute(val) + } + }, + } } + #[doc = ""] + pub unsafe fn create_metal_surface_ext( + &self, + instance: Instance, + p_create_info: *const MetalSurfaceCreateInfoEXT, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, + ) -> Result { + (self.create_metal_surface_ext)(instance, p_create_info, p_allocator, p_surface) + } +} +#[doc = "Generated from \'VK_EXT_metal_surface\'"] +impl StructureType { + pub const METAL_SURFACE_CREATE_INFO_EXT: Self = StructureType(1_000_217_000); } impl ExtFragmentDensityMapFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -58015,35 +73670,38 @@ impl ExtFragmentDensityMapFn { } #[doc = "Generated from \'VK_EXT_fragment_density_map\'"] impl StructureType { - pub const PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT: Self = StructureType(1000218000); + pub const PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT: Self = + StructureType(1_000_218_000); } #[doc = "Generated from \'VK_EXT_fragment_density_map\'"] impl StructureType { - pub const PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT: Self = StructureType(1000218001); + pub const PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT: Self = + StructureType(1_000_218_001); } #[doc = "Generated from \'VK_EXT_fragment_density_map\'"] impl StructureType { - pub const RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT: Self = StructureType(1000218002); + pub const RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT: Self = StructureType(1_000_218_002); } #[doc = "Generated from \'VK_EXT_fragment_density_map\'"] impl ImageCreateFlags { - pub const SUBSAMPLED_EXT: Self = ImageCreateFlags(0b100000000000000); + pub const SUBSAMPLED_EXT: Self = ImageCreateFlags(0b100_0000_0000_0000); } #[doc = "Generated from \'VK_EXT_fragment_density_map\'"] impl ImageLayout { - pub const FRAGMENT_DENSITY_MAP_OPTIMAL_EXT: Self = ImageLayout(1000218000); + pub const FRAGMENT_DENSITY_MAP_OPTIMAL_EXT: Self = ImageLayout(1_000_218_000); } #[doc = "Generated from \'VK_EXT_fragment_density_map\'"] impl AccessFlags { - pub const FRAGMENT_DENSITY_MAP_READ_EXT: Self = AccessFlags(0b1000000000000000000000000); + pub const FRAGMENT_DENSITY_MAP_READ_EXT: Self = AccessFlags(0b1_0000_0000_0000_0000_0000_0000); } #[doc = "Generated from \'VK_EXT_fragment_density_map\'"] impl FormatFeatureFlags { - pub const FRAGMENT_DENSITY_MAP_EXT: Self = FormatFeatureFlags(0b1000000000000000000000000); + pub const FRAGMENT_DENSITY_MAP_EXT: Self = + FormatFeatureFlags(0b1_0000_0000_0000_0000_0000_0000); } #[doc = "Generated from \'VK_EXT_fragment_density_map\'"] impl ImageUsageFlags { - pub const FRAGMENT_DENSITY_MAP_EXT: Self = ImageUsageFlags(0b1000000000); + pub const FRAGMENT_DENSITY_MAP_EXT: Self = ImageUsageFlags(0b10_0000_0000); } #[doc = "Generated from \'VK_EXT_fragment_density_map\'"] impl ImageViewCreateFlags { @@ -58051,7 +73709,8 @@ impl ImageViewCreateFlags { } #[doc = "Generated from \'VK_EXT_fragment_density_map\'"] impl PipelineStageFlags { - pub const FRAGMENT_DENSITY_PROCESS_EXT: Self = PipelineStageFlags(0b100000000000000000000000); + pub const FRAGMENT_DENSITY_PROCESS_EXT: Self = + PipelineStageFlags(0b1000_0000_0000_0000_0000_0000); } #[doc = "Generated from \'VK_EXT_fragment_density_map\'"] impl SamplerCreateFlags { @@ -58133,7 +73792,8 @@ impl ExtScalarBlockLayoutFn { } #[doc = "Generated from \'VK_EXT_scalar_block_layout\'"] impl StructureType { - pub const PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES_EXT: Self = StructureType(1000221000); + pub const PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES_EXT: Self = + StructureType::PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES; } impl ExtExtension223Fn { pub fn name() -> &'static ::std::ffi::CStr { @@ -58201,28 +73861,51 @@ impl GoogleDecorateStringFn { GoogleDecorateStringFn {} } } -impl AmdExtension226Fn { +impl ExtSubgroupSizeControlFn { pub fn name() -> &'static ::std::ffi::CStr { - ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_226\0") + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_subgroup_size_control\0") .expect("Wrong extension string") } } -pub struct AmdExtension226Fn {} -unsafe impl Send for AmdExtension226Fn {} -unsafe impl Sync for AmdExtension226Fn {} -impl ::std::clone::Clone for AmdExtension226Fn { +pub struct ExtSubgroupSizeControlFn {} +unsafe impl Send for ExtSubgroupSizeControlFn {} +unsafe impl Sync for ExtSubgroupSizeControlFn {} +impl ::std::clone::Clone for ExtSubgroupSizeControlFn { fn clone(&self) -> Self { - AmdExtension226Fn {} + ExtSubgroupSizeControlFn {} } } -impl AmdExtension226Fn { +impl ExtSubgroupSizeControlFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - AmdExtension226Fn {} + ExtSubgroupSizeControlFn {} } } +#[doc = "Generated from \'VK_EXT_subgroup_size_control\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT: Self = + StructureType(1_000_225_000); +} +#[doc = "Generated from \'VK_EXT_subgroup_size_control\'"] +impl StructureType { + pub const PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT: Self = + StructureType(1_000_225_001); +} +#[doc = "Generated from \'VK_EXT_subgroup_size_control\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT: Self = + StructureType(1_000_225_002); +} +#[doc = "Generated from \'VK_EXT_subgroup_size_control\'"] +impl PipelineShaderStageCreateFlags { + pub const ALLOW_VARYING_SUBGROUP_SIZE_EXT: Self = PipelineShaderStageCreateFlags(0b1); +} +#[doc = "Generated from \'VK_EXT_subgroup_size_control\'"] +impl PipelineShaderStageCreateFlags { + pub const REQUIRE_FULL_SUBGROUPS_EXT: Self = PipelineShaderStageCreateFlags(0b10); +} impl AmdExtension227Fn { pub fn name() -> &'static ::std::ffi::CStr { ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_227\0") @@ -58245,28 +73928,32 @@ impl AmdExtension227Fn { AmdExtension227Fn {} } } -impl AmdExtension228Fn { +impl AmdShaderCoreProperties2Fn { pub fn name() -> &'static ::std::ffi::CStr { - ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_228\0") + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_shader_core_properties2\0") .expect("Wrong extension string") } } -pub struct AmdExtension228Fn {} -unsafe impl Send for AmdExtension228Fn {} -unsafe impl Sync for AmdExtension228Fn {} -impl ::std::clone::Clone for AmdExtension228Fn { +pub struct AmdShaderCoreProperties2Fn {} +unsafe impl Send for AmdShaderCoreProperties2Fn {} +unsafe impl Sync for AmdShaderCoreProperties2Fn {} +impl ::std::clone::Clone for AmdShaderCoreProperties2Fn { fn clone(&self) -> Self { - AmdExtension228Fn {} + AmdShaderCoreProperties2Fn {} } } -impl AmdExtension228Fn { +impl AmdShaderCoreProperties2Fn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - AmdExtension228Fn {} + AmdShaderCoreProperties2Fn {} } } +#[doc = "Generated from \'VK_AMD_shader_core_properties2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD: Self = StructureType(1_000_227_000); +} impl AmdExtension229Fn { pub fn name() -> &'static ::std::ffi::CStr { ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_229\0") @@ -58289,28 +73976,40 @@ impl AmdExtension229Fn { AmdExtension229Fn {} } } -impl AmdExtension230Fn { +impl AmdDeviceCoherentMemoryFn { pub fn name() -> &'static ::std::ffi::CStr { - ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_230\0") + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_device_coherent_memory\0") .expect("Wrong extension string") } } -pub struct AmdExtension230Fn {} -unsafe impl Send for AmdExtension230Fn {} -unsafe impl Sync for AmdExtension230Fn {} -impl ::std::clone::Clone for AmdExtension230Fn { +pub struct AmdDeviceCoherentMemoryFn {} +unsafe impl Send for AmdDeviceCoherentMemoryFn {} +unsafe impl Sync for AmdDeviceCoherentMemoryFn {} +impl ::std::clone::Clone for AmdDeviceCoherentMemoryFn { fn clone(&self) -> Self { - AmdExtension230Fn {} + AmdDeviceCoherentMemoryFn {} } } -impl AmdExtension230Fn { +impl AmdDeviceCoherentMemoryFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - AmdExtension230Fn {} + AmdDeviceCoherentMemoryFn {} } } +#[doc = "Generated from \'VK_AMD_device_coherent_memory\'"] +impl MemoryPropertyFlags { + pub const DEVICE_COHERENT_AMD: Self = MemoryPropertyFlags(0b100_0000); +} +#[doc = "Generated from \'VK_AMD_device_coherent_memory\'"] +impl MemoryPropertyFlags { + pub const DEVICE_UNCACHED_AMD: Self = MemoryPropertyFlags(0b1000_0000); +} +#[doc = "Generated from \'VK_AMD_device_coherent_memory\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD: Self = StructureType(1_000_229_000); +} impl AmdExtension231Fn { pub fn name() -> &'static ::std::ffi::CStr { ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_231\0") @@ -58443,26 +74142,26 @@ impl AmdExtension236Fn { AmdExtension236Fn {} } } -impl KhrExtension237Fn { +impl KhrSpirv14Fn { pub fn name() -> &'static ::std::ffi::CStr { - ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_extension_237\0") + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_spirv_1_4\0") .expect("Wrong extension string") } } -pub struct KhrExtension237Fn {} -unsafe impl Send for KhrExtension237Fn {} -unsafe impl Sync for KhrExtension237Fn {} -impl ::std::clone::Clone for KhrExtension237Fn { +pub struct KhrSpirv14Fn {} +unsafe impl Send for KhrSpirv14Fn {} +unsafe impl Sync for KhrSpirv14Fn {} +impl ::std::clone::Clone for KhrSpirv14Fn { fn clone(&self) -> Self { - KhrExtension237Fn {} + KhrSpirv14Fn {} } } -impl KhrExtension237Fn { +impl KhrSpirv14Fn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - KhrExtension237Fn {} + KhrSpirv14Fn {} } } impl ExtMemoryBudgetFn { @@ -58489,7 +74188,7 @@ impl ExtMemoryBudgetFn { } #[doc = "Generated from \'VK_EXT_memory_budget\'"] impl StructureType { - pub const PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT: Self = StructureType(1000237000); + pub const PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT: Self = StructureType(1_000_237_000); } impl ExtMemoryPriorityFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -58515,78 +74214,118 @@ impl ExtMemoryPriorityFn { } #[doc = "Generated from \'VK_EXT_memory_priority\'"] impl StructureType { - pub const PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT: Self = StructureType(1000238000); + pub const PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT: Self = StructureType(1_000_238_000); } #[doc = "Generated from \'VK_EXT_memory_priority\'"] impl StructureType { - pub const MEMORY_PRIORITY_ALLOCATE_INFO_EXT: Self = StructureType(1000238001); + pub const MEMORY_PRIORITY_ALLOCATE_INFO_EXT: Self = StructureType(1_000_238_001); } -impl KhrExtension240Fn { +impl KhrSurfaceProtectedCapabilitiesFn { pub fn name() -> &'static ::std::ffi::CStr { - ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_extension_240\0") + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_surface_protected_capabilities\0") .expect("Wrong extension string") } } -pub struct KhrExtension240Fn {} -unsafe impl Send for KhrExtension240Fn {} -unsafe impl Sync for KhrExtension240Fn {} -impl ::std::clone::Clone for KhrExtension240Fn { +pub struct KhrSurfaceProtectedCapabilitiesFn {} +unsafe impl Send for KhrSurfaceProtectedCapabilitiesFn {} +unsafe impl Sync for KhrSurfaceProtectedCapabilitiesFn {} +impl ::std::clone::Clone for KhrSurfaceProtectedCapabilitiesFn { fn clone(&self) -> Self { - KhrExtension240Fn {} + KhrSurfaceProtectedCapabilitiesFn {} } } -impl KhrExtension240Fn { +impl KhrSurfaceProtectedCapabilitiesFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - KhrExtension240Fn {} + KhrSurfaceProtectedCapabilitiesFn {} } } -impl NvExtension241Fn { +#[doc = "Generated from \'VK_KHR_surface_protected_capabilities\'"] +impl StructureType { + pub const SURFACE_PROTECTED_CAPABILITIES_KHR: Self = StructureType(1_000_239_000); +} +impl NvDedicatedAllocationImageAliasingFn { pub fn name() -> &'static ::std::ffi::CStr { - ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_extension_241\0") + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_dedicated_allocation_image_aliasing\0") .expect("Wrong extension string") } } -pub struct NvExtension241Fn {} -unsafe impl Send for NvExtension241Fn {} -unsafe impl Sync for NvExtension241Fn {} -impl ::std::clone::Clone for NvExtension241Fn { +pub struct NvDedicatedAllocationImageAliasingFn {} +unsafe impl Send for NvDedicatedAllocationImageAliasingFn {} +unsafe impl Sync for NvDedicatedAllocationImageAliasingFn {} +impl ::std::clone::Clone for NvDedicatedAllocationImageAliasingFn { fn clone(&self) -> Self { - NvExtension241Fn {} + NvDedicatedAllocationImageAliasingFn {} } } -impl NvExtension241Fn { +impl NvDedicatedAllocationImageAliasingFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - NvExtension241Fn {} + NvDedicatedAllocationImageAliasingFn {} } } -impl NvExtension242Fn { +#[doc = "Generated from \'VK_NV_dedicated_allocation_image_aliasing\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_DEDICATED_ALLOCATION_IMAGE_ALIASING_FEATURES_NV: Self = + StructureType(1_000_240_000); +} +impl KhrSeparateDepthStencilLayoutsFn { pub fn name() -> &'static ::std::ffi::CStr { - ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_extension_242\0") + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_separate_depth_stencil_layouts\0") .expect("Wrong extension string") } } -pub struct NvExtension242Fn {} -unsafe impl Send for NvExtension242Fn {} -unsafe impl Sync for NvExtension242Fn {} -impl ::std::clone::Clone for NvExtension242Fn { +pub struct KhrSeparateDepthStencilLayoutsFn {} +unsafe impl Send for KhrSeparateDepthStencilLayoutsFn {} +unsafe impl Sync for KhrSeparateDepthStencilLayoutsFn {} +impl ::std::clone::Clone for KhrSeparateDepthStencilLayoutsFn { fn clone(&self) -> Self { - NvExtension242Fn {} + KhrSeparateDepthStencilLayoutsFn {} } } -impl NvExtension242Fn { +impl KhrSeparateDepthStencilLayoutsFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - NvExtension242Fn {} + KhrSeparateDepthStencilLayoutsFn {} } } +#[doc = "Generated from \'VK_KHR_separate_depth_stencil_layouts\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES_KHR: Self = + StructureType::PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES; +} +#[doc = "Generated from \'VK_KHR_separate_depth_stencil_layouts\'"] +impl StructureType { + pub const ATTACHMENT_REFERENCE_STENCIL_LAYOUT_KHR: Self = + StructureType::ATTACHMENT_REFERENCE_STENCIL_LAYOUT; +} +#[doc = "Generated from \'VK_KHR_separate_depth_stencil_layouts\'"] +impl StructureType { + pub const ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT_KHR: Self = + StructureType::ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT; +} +#[doc = "Generated from \'VK_KHR_separate_depth_stencil_layouts\'"] +impl ImageLayout { + pub const DEPTH_ATTACHMENT_OPTIMAL_KHR: Self = ImageLayout::DEPTH_ATTACHMENT_OPTIMAL; +} +#[doc = "Generated from \'VK_KHR_separate_depth_stencil_layouts\'"] +impl ImageLayout { + pub const DEPTH_READ_ONLY_OPTIMAL_KHR: Self = ImageLayout::DEPTH_READ_ONLY_OPTIMAL; +} +#[doc = "Generated from \'VK_KHR_separate_depth_stencil_layouts\'"] +impl ImageLayout { + pub const STENCIL_ATTACHMENT_OPTIMAL_KHR: Self = ImageLayout::STENCIL_ATTACHMENT_OPTIMAL; +} +#[doc = "Generated from \'VK_KHR_separate_depth_stencil_layouts\'"] +impl ImageLayout { + pub const STENCIL_READ_ONLY_OPTIMAL_KHR: Self = ImageLayout::STENCIL_READ_ONLY_OPTIMAL; +} impl IntelExtension243Fn { pub fn name() -> &'static ::std::ffi::CStr { ::std::ffi::CStr::from_bytes_with_nul(b"VK_INTEL_extension_243\0") @@ -58638,13 +74377,11 @@ impl ExtBufferDeviceAddressFn { } } #[allow(non_camel_case_types)] -pub type PFN_vkGetBufferDeviceAddressEXT = - extern "system" fn(device: Device, p_info: *const BufferDeviceAddressInfoEXT) -> DeviceAddress; +pub type PFN_vkGetBufferDeviceAddress = + extern "system" fn(device: Device, p_info: *const BufferDeviceAddressInfo) -> DeviceAddress; pub struct ExtBufferDeviceAddressFn { - pub get_buffer_device_address_ext: extern "system" fn( - device: Device, - p_info: *const BufferDeviceAddressInfoEXT, - ) -> DeviceAddress, + pub get_buffer_device_address_ext: + extern "system" fn(device: Device, p_info: *const BufferDeviceAddressInfo) -> DeviceAddress, } unsafe impl Send for ExtBufferDeviceAddressFn {} unsafe impl Sync for ExtBufferDeviceAddressFn {} @@ -58664,7 +74401,7 @@ impl ExtBufferDeviceAddressFn { get_buffer_device_address_ext: unsafe { extern "system" fn get_buffer_device_address_ext( _device: Device, - _p_info: *const BufferDeviceAddressInfoEXT, + _p_info: *const BufferDeviceAddressInfo, ) -> DeviceAddress { panic!(concat!( "Unable to load ", @@ -58682,60 +74419,127 @@ impl ExtBufferDeviceAddressFn { }, } } - #[doc = ""] + #[doc = ""] pub unsafe fn get_buffer_device_address_ext( &self, device: Device, - p_info: *const BufferDeviceAddressInfoEXT, + p_info: *const BufferDeviceAddressInfo, ) -> DeviceAddress { (self.get_buffer_device_address_ext)(device, p_info) } } #[doc = "Generated from \'VK_EXT_buffer_device_address\'"] impl StructureType { - pub const PHYSICAL_DEVICE_BUFFER_ADDRESS_FEATURES_EXT: Self = StructureType(1000244000); + pub const PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT: Self = + StructureType(1_000_244_000); } #[doc = "Generated from \'VK_EXT_buffer_device_address\'"] impl StructureType { - pub const BUFFER_DEVICE_ADDRESS_INFO_EXT: Self = StructureType(1000244001); + pub const PHYSICAL_DEVICE_BUFFER_ADDRESS_FEATURES_EXT: Self = + StructureType::PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT; } #[doc = "Generated from \'VK_EXT_buffer_device_address\'"] impl StructureType { - pub const BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT: Self = StructureType(1000244002); + pub const BUFFER_DEVICE_ADDRESS_INFO_EXT: Self = StructureType::BUFFER_DEVICE_ADDRESS_INFO; +} +#[doc = "Generated from \'VK_EXT_buffer_device_address\'"] +impl StructureType { + pub const BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT: Self = StructureType(1_000_244_002); } #[doc = "Generated from \'VK_EXT_buffer_device_address\'"] impl BufferUsageFlags { - pub const SHADER_DEVICE_ADDRESS_EXT: Self = BufferUsageFlags(0b100000000000000000); + pub const SHADER_DEVICE_ADDRESS_EXT: Self = BufferUsageFlags::SHADER_DEVICE_ADDRESS; } #[doc = "Generated from \'VK_EXT_buffer_device_address\'"] impl BufferCreateFlags { - pub const DEVICE_ADDRESS_CAPTURE_REPLAY_EXT: Self = BufferCreateFlags(0b10000); + pub const DEVICE_ADDRESS_CAPTURE_REPLAY_EXT: Self = + BufferCreateFlags::DEVICE_ADDRESS_CAPTURE_REPLAY; } #[doc = "Generated from \'VK_EXT_buffer_device_address\'"] impl Result { - pub const ERROR_INVALID_DEVICE_ADDRESS_EXT: Self = Result(-1000244000); + pub const ERROR_INVALID_DEVICE_ADDRESS_EXT: Self = Result::ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS; } -impl ExtExtension246Fn { +impl ExtToolingInfoFn { pub fn name() -> &'static ::std::ffi::CStr { - ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_extension_246\0") + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_tooling_info\0") .expect("Wrong extension string") } } -pub struct ExtExtension246Fn {} -unsafe impl Send for ExtExtension246Fn {} -unsafe impl Sync for ExtExtension246Fn {} -impl ::std::clone::Clone for ExtExtension246Fn { +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceToolPropertiesEXT = extern "system" fn( + physical_device: PhysicalDevice, + p_tool_count: *mut u32, + p_tool_properties: *mut PhysicalDeviceToolPropertiesEXT, +) -> Result; +pub struct ExtToolingInfoFn { + pub get_physical_device_tool_properties_ext: extern "system" fn( + physical_device: PhysicalDevice, + p_tool_count: *mut u32, + p_tool_properties: *mut PhysicalDeviceToolPropertiesEXT, + ) -> Result, +} +unsafe impl Send for ExtToolingInfoFn {} +unsafe impl Sync for ExtToolingInfoFn {} +impl ::std::clone::Clone for ExtToolingInfoFn { fn clone(&self) -> Self { - ExtExtension246Fn {} + ExtToolingInfoFn { + get_physical_device_tool_properties_ext: self.get_physical_device_tool_properties_ext, + } } } -impl ExtExtension246Fn { +impl ExtToolingInfoFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - ExtExtension246Fn {} + ExtToolingInfoFn { + get_physical_device_tool_properties_ext: unsafe { + extern "system" fn get_physical_device_tool_properties_ext( + _physical_device: PhysicalDevice, + _p_tool_count: *mut u32, + _p_tool_properties: *mut PhysicalDeviceToolPropertiesEXT, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_tool_properties_ext) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceToolPropertiesEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_tool_properties_ext + } else { + ::std::mem::transmute(val) + } + }, + } } + #[doc = ""] + pub unsafe fn get_physical_device_tool_properties_ext( + &self, + physical_device: PhysicalDevice, + p_tool_count: *mut u32, + p_tool_properties: *mut PhysicalDeviceToolPropertiesEXT, + ) -> Result { + (self.get_physical_device_tool_properties_ext)( + physical_device, + p_tool_count, + p_tool_properties, + ) + } +} +#[doc = "Generated from \'VK_EXT_tooling_info\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_TOOL_PROPERTIES_EXT: Self = StructureType(1_000_245_000); +} +#[doc = "Generated from \'VK_EXT_tooling_info\'"] +impl ToolPurposeFlagsEXT { + pub const DEBUG_REPORTING: Self = ToolPurposeFlagsEXT(0b10_0000); +} +#[doc = "Generated from \'VK_EXT_tooling_info\'"] +impl ToolPurposeFlagsEXT { + pub const DEBUG_MARKERS: Self = ToolPurposeFlagsEXT(0b100_0000); } impl ExtSeparateStencilUsageFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -58761,7 +74565,8 @@ impl ExtSeparateStencilUsageFn { } #[doc = "Generated from \'VK_EXT_separate_stencil_usage\'"] impl StructureType { - pub const IMAGE_STENCIL_USAGE_CREATE_INFO_EXT: Self = StructureType(1000246000); + pub const IMAGE_STENCIL_USAGE_CREATE_INFO_EXT: Self = + StructureType::IMAGE_STENCIL_USAGE_CREATE_INFO; } impl ExtValidationFeaturesFn { pub fn name() -> &'static ::std::ffi::CStr { @@ -58787,7 +74592,7 @@ impl ExtValidationFeaturesFn { } #[doc = "Generated from \'VK_EXT_validation_features\'"] impl StructureType { - pub const VALIDATION_FEATURES_EXT: Self = StructureType(1000247000); + pub const VALIDATION_FEATURES_EXT: Self = StructureType(1_000_247_000); } impl KhrExtension249Fn { pub fn name() -> &'static ::std::ffi::CStr { @@ -58811,75 +74616,3117 @@ impl KhrExtension249Fn { KhrExtension249Fn {} } } -impl NvExtension250Fn { +impl NvCooperativeMatrixFn { pub fn name() -> &'static ::std::ffi::CStr { - ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_extension_250\0") + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_cooperative_matrix\0") .expect("Wrong extension string") } } -pub struct NvExtension250Fn {} -unsafe impl Send for NvExtension250Fn {} -unsafe impl Sync for NvExtension250Fn {} -impl ::std::clone::Clone for NvExtension250Fn { +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV = extern "system" fn( + physical_device: PhysicalDevice, + p_property_count: *mut u32, + p_properties: *mut CooperativeMatrixPropertiesNV, +) -> Result; +pub struct NvCooperativeMatrixFn { + pub get_physical_device_cooperative_matrix_properties_nv: extern "system" fn( + physical_device: PhysicalDevice, + p_property_count: *mut u32, + p_properties: *mut CooperativeMatrixPropertiesNV, + ) -> Result, +} +unsafe impl Send for NvCooperativeMatrixFn {} +unsafe impl Sync for NvCooperativeMatrixFn {} +impl ::std::clone::Clone for NvCooperativeMatrixFn { fn clone(&self) -> Self { - NvExtension250Fn {} + NvCooperativeMatrixFn { + get_physical_device_cooperative_matrix_properties_nv: self + .get_physical_device_cooperative_matrix_properties_nv, + } } } -impl NvExtension250Fn { +impl NvCooperativeMatrixFn { pub fn load(mut _f: F) -> Self where F: FnMut(&::std::ffi::CStr) -> *const c_void, { - NvExtension250Fn {} + NvCooperativeMatrixFn { + get_physical_device_cooperative_matrix_properties_nv: unsafe { + extern "system" fn get_physical_device_cooperative_matrix_properties_nv( + _physical_device: PhysicalDevice, + _p_property_count: *mut u32, + _p_properties: *mut CooperativeMatrixPropertiesNV, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_cooperative_matrix_properties_nv) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceCooperativeMatrixPropertiesNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_cooperative_matrix_properties_nv + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn get_physical_device_cooperative_matrix_properties_nv( + &self, + physical_device: PhysicalDevice, + p_property_count: *mut u32, + p_properties: *mut CooperativeMatrixPropertiesNV, + ) -> Result { + (self.get_physical_device_cooperative_matrix_properties_nv)( + physical_device, + p_property_count, + p_properties, + ) + } +} +#[doc = "Generated from \'VK_NV_cooperative_matrix\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_NV: Self = StructureType(1_000_249_000); +} +#[doc = "Generated from \'VK_NV_cooperative_matrix\'"] +impl StructureType { + pub const COOPERATIVE_MATRIX_PROPERTIES_NV: Self = StructureType(1_000_249_001); +} +#[doc = "Generated from \'VK_NV_cooperative_matrix\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_NV: Self = StructureType(1_000_249_002); +} +impl NvCoverageReductionModeFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_coverage_reduction_mode\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV = + extern "system" fn( + physical_device: PhysicalDevice, + p_combination_count: *mut u32, + p_combinations: *mut FramebufferMixedSamplesCombinationNV, + ) -> Result; +pub struct NvCoverageReductionModeFn { + pub get_physical_device_supported_framebuffer_mixed_samples_combinations_nv: + extern "system" fn( + physical_device: PhysicalDevice, + p_combination_count: *mut u32, + p_combinations: *mut FramebufferMixedSamplesCombinationNV, + ) -> Result, +} +unsafe impl Send for NvCoverageReductionModeFn {} +unsafe impl Sync for NvCoverageReductionModeFn {} +impl ::std::clone::Clone for NvCoverageReductionModeFn { + fn clone(&self) -> Self { + NvCoverageReductionModeFn { + get_physical_device_supported_framebuffer_mixed_samples_combinations_nv: self + .get_physical_device_supported_framebuffer_mixed_samples_combinations_nv, + } + } +} +impl NvCoverageReductionModeFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvCoverageReductionModeFn { + get_physical_device_supported_framebuffer_mixed_samples_combinations_nv: unsafe { + extern "system" fn get_physical_device_supported_framebuffer_mixed_samples_combinations_nv( + _physical_device: PhysicalDevice, + _p_combination_count: *mut u32, + _p_combinations: *mut FramebufferMixedSamplesCombinationNV, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!( + get_physical_device_supported_framebuffer_mixed_samples_combinations_nv + ) + )) + } + let raw_name = + stringify!(vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_supported_framebuffer_mixed_samples_combinations_nv + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn get_physical_device_supported_framebuffer_mixed_samples_combinations_nv( + &self, + physical_device: PhysicalDevice, + p_combination_count: *mut u32, + p_combinations: *mut FramebufferMixedSamplesCombinationNV, + ) -> Result { + (self.get_physical_device_supported_framebuffer_mixed_samples_combinations_nv)( + physical_device, + p_combination_count, + p_combinations, + ) + } +} +#[doc = "Generated from \'VK_NV_coverage_reduction_mode\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_COVERAGE_REDUCTION_MODE_FEATURES_NV: Self = + StructureType(1_000_250_000); +} +#[doc = "Generated from \'VK_NV_coverage_reduction_mode\'"] +impl StructureType { + pub const PIPELINE_COVERAGE_REDUCTION_STATE_CREATE_INFO_NV: Self = StructureType(1_000_250_001); +} +#[doc = "Generated from \'VK_NV_coverage_reduction_mode\'"] +impl StructureType { + pub const FRAMEBUFFER_MIXED_SAMPLES_COMBINATION_NV: Self = StructureType(1_000_250_002); +} +impl ExtFragmentShaderInterlockFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_fragment_shader_interlock\0") + .expect("Wrong extension string") + } +} +pub struct ExtFragmentShaderInterlockFn {} +unsafe impl Send for ExtFragmentShaderInterlockFn {} +unsafe impl Sync for ExtFragmentShaderInterlockFn {} +impl ::std::clone::Clone for ExtFragmentShaderInterlockFn { + fn clone(&self) -> Self { + ExtFragmentShaderInterlockFn {} + } +} +impl ExtFragmentShaderInterlockFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtFragmentShaderInterlockFn {} + } +} +#[doc = "Generated from \'VK_EXT_fragment_shader_interlock\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT: Self = + StructureType(1_000_251_000); +} +impl ExtYcbcrImageArraysFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_ycbcr_image_arrays\0") + .expect("Wrong extension string") + } +} +pub struct ExtYcbcrImageArraysFn {} +unsafe impl Send for ExtYcbcrImageArraysFn {} +unsafe impl Sync for ExtYcbcrImageArraysFn {} +impl ::std::clone::Clone for ExtYcbcrImageArraysFn { + fn clone(&self) -> Self { + ExtYcbcrImageArraysFn {} + } +} +impl ExtYcbcrImageArraysFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtYcbcrImageArraysFn {} + } +} +#[doc = "Generated from \'VK_EXT_ycbcr_image_arrays\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT: Self = StructureType(1_000_252_000); +} +impl KhrUniformBufferStandardLayoutFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_uniform_buffer_standard_layout\0") + .expect("Wrong extension string") + } +} +pub struct KhrUniformBufferStandardLayoutFn {} +unsafe impl Send for KhrUniformBufferStandardLayoutFn {} +unsafe impl Sync for KhrUniformBufferStandardLayoutFn {} +impl ::std::clone::Clone for KhrUniformBufferStandardLayoutFn { + fn clone(&self) -> Self { + KhrUniformBufferStandardLayoutFn {} + } +} +impl KhrUniformBufferStandardLayoutFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrUniformBufferStandardLayoutFn {} + } +} +#[doc = "Generated from \'VK_KHR_uniform_buffer_standard_layout\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES_KHR: Self = + StructureType::PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES; +} +impl ExtExtension255Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_extension_255\0") + .expect("Wrong extension string") + } +} +pub struct ExtExtension255Fn {} +unsafe impl Send for ExtExtension255Fn {} +unsafe impl Sync for ExtExtension255Fn {} +impl ::std::clone::Clone for ExtExtension255Fn { + fn clone(&self) -> Self { + ExtExtension255Fn {} + } +} +impl ExtExtension255Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtExtension255Fn {} + } +} +impl ExtFullScreenExclusiveFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_full_screen_exclusive\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkGetPhysicalDeviceSurfacePresentModes2EXT = extern "system" fn( + physical_device: PhysicalDevice, + p_surface_info: *const PhysicalDeviceSurfaceInfo2KHR, + p_present_mode_count: *mut u32, + p_present_modes: *mut PresentModeKHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkAcquireFullScreenExclusiveModeEXT = + extern "system" fn(device: Device, swapchain: SwapchainKHR) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkReleaseFullScreenExclusiveModeEXT = + extern "system" fn(device: Device, swapchain: SwapchainKHR) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetDeviceGroupSurfacePresentModes2EXT = extern "system" fn( + device: Device, + p_surface_info: *const PhysicalDeviceSurfaceInfo2KHR, + p_modes: *mut DeviceGroupPresentModeFlagsKHR, +) -> Result; +pub struct ExtFullScreenExclusiveFn { + pub get_physical_device_surface_present_modes2_ext: extern "system" fn( + physical_device: PhysicalDevice, + p_surface_info: *const PhysicalDeviceSurfaceInfo2KHR, + p_present_mode_count: *mut u32, + p_present_modes: *mut PresentModeKHR, + ) -> Result, + pub acquire_full_screen_exclusive_mode_ext: + extern "system" fn(device: Device, swapchain: SwapchainKHR) -> Result, + pub release_full_screen_exclusive_mode_ext: + extern "system" fn(device: Device, swapchain: SwapchainKHR) -> Result, + pub get_device_group_surface_present_modes2_ext: extern "system" fn( + device: Device, + p_surface_info: *const PhysicalDeviceSurfaceInfo2KHR, + p_modes: *mut DeviceGroupPresentModeFlagsKHR, + ) -> Result, +} +unsafe impl Send for ExtFullScreenExclusiveFn {} +unsafe impl Sync for ExtFullScreenExclusiveFn {} +impl ::std::clone::Clone for ExtFullScreenExclusiveFn { + fn clone(&self) -> Self { + ExtFullScreenExclusiveFn { + get_physical_device_surface_present_modes2_ext: self + .get_physical_device_surface_present_modes2_ext, + acquire_full_screen_exclusive_mode_ext: self.acquire_full_screen_exclusive_mode_ext, + release_full_screen_exclusive_mode_ext: self.release_full_screen_exclusive_mode_ext, + get_device_group_surface_present_modes2_ext: self + .get_device_group_surface_present_modes2_ext, + } + } +} +impl ExtFullScreenExclusiveFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtFullScreenExclusiveFn { + get_physical_device_surface_present_modes2_ext: unsafe { + extern "system" fn get_physical_device_surface_present_modes2_ext( + _physical_device: PhysicalDevice, + _p_surface_info: *const PhysicalDeviceSurfaceInfo2KHR, + _p_present_mode_count: *mut u32, + _p_present_modes: *mut PresentModeKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_physical_device_surface_present_modes2_ext) + )) + } + let raw_name = stringify!(vkGetPhysicalDeviceSurfacePresentModes2EXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_physical_device_surface_present_modes2_ext + } else { + ::std::mem::transmute(val) + } + }, + acquire_full_screen_exclusive_mode_ext: unsafe { + extern "system" fn acquire_full_screen_exclusive_mode_ext( + _device: Device, + _swapchain: SwapchainKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(acquire_full_screen_exclusive_mode_ext) + )) + } + let raw_name = stringify!(vkAcquireFullScreenExclusiveModeEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + acquire_full_screen_exclusive_mode_ext + } else { + ::std::mem::transmute(val) + } + }, + release_full_screen_exclusive_mode_ext: unsafe { + extern "system" fn release_full_screen_exclusive_mode_ext( + _device: Device, + _swapchain: SwapchainKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(release_full_screen_exclusive_mode_ext) + )) + } + let raw_name = stringify!(vkReleaseFullScreenExclusiveModeEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + release_full_screen_exclusive_mode_ext + } else { + ::std::mem::transmute(val) + } + }, + get_device_group_surface_present_modes2_ext: unsafe { + extern "system" fn get_device_group_surface_present_modes2_ext( + _device: Device, + _p_surface_info: *const PhysicalDeviceSurfaceInfo2KHR, + _p_modes: *mut DeviceGroupPresentModeFlagsKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_device_group_surface_present_modes2_ext) + )) + } + let raw_name = stringify!(vkGetDeviceGroupSurfacePresentModes2EXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_device_group_surface_present_modes2_ext + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn get_physical_device_surface_present_modes2_ext( + &self, + physical_device: PhysicalDevice, + p_surface_info: *const PhysicalDeviceSurfaceInfo2KHR, + p_present_mode_count: *mut u32, + p_present_modes: *mut PresentModeKHR, + ) -> Result { + (self.get_physical_device_surface_present_modes2_ext)( + physical_device, + p_surface_info, + p_present_mode_count, + p_present_modes, + ) + } + #[doc = ""] + pub unsafe fn acquire_full_screen_exclusive_mode_ext( + &self, + device: Device, + swapchain: SwapchainKHR, + ) -> Result { + (self.acquire_full_screen_exclusive_mode_ext)(device, swapchain) + } + #[doc = ""] + pub unsafe fn release_full_screen_exclusive_mode_ext( + &self, + device: Device, + swapchain: SwapchainKHR, + ) -> Result { + (self.release_full_screen_exclusive_mode_ext)(device, swapchain) + } + #[doc = ""] + pub unsafe fn get_device_group_surface_present_modes2_ext( + &self, + device: Device, + p_surface_info: *const PhysicalDeviceSurfaceInfo2KHR, + p_modes: *mut DeviceGroupPresentModeFlagsKHR, + ) -> Result { + (self.get_device_group_surface_present_modes2_ext)(device, p_surface_info, p_modes) + } +} +#[doc = "Generated from \'VK_EXT_full_screen_exclusive\'"] +impl StructureType { + pub const SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT: Self = StructureType(1_000_255_000); +} +#[doc = "Generated from \'VK_EXT_full_screen_exclusive\'"] +impl StructureType { + pub const SURFACE_CAPABILITIES_FULL_SCREEN_EXCLUSIVE_EXT: Self = StructureType(1_000_255_002); +} +#[doc = "Generated from \'VK_EXT_full_screen_exclusive\'"] +impl Result { + pub const ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT: Self = Result(-1_000_255_000); +} +#[doc = "Generated from \'VK_EXT_full_screen_exclusive\'"] +impl StructureType { + pub const SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT: Self = StructureType(1_000_255_001); +} +impl ExtHeadlessSurfaceFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_headless_surface\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCreateHeadlessSurfaceEXT = extern "system" fn( + instance: Instance, + p_create_info: *const HeadlessSurfaceCreateInfoEXT, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, +) -> Result; +pub struct ExtHeadlessSurfaceFn { + pub create_headless_surface_ext: extern "system" fn( + instance: Instance, + p_create_info: *const HeadlessSurfaceCreateInfoEXT, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, + ) -> Result, +} +unsafe impl Send for ExtHeadlessSurfaceFn {} +unsafe impl Sync for ExtHeadlessSurfaceFn {} +impl ::std::clone::Clone for ExtHeadlessSurfaceFn { + fn clone(&self) -> Self { + ExtHeadlessSurfaceFn { + create_headless_surface_ext: self.create_headless_surface_ext, + } + } +} +impl ExtHeadlessSurfaceFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtHeadlessSurfaceFn { + create_headless_surface_ext: unsafe { + extern "system" fn create_headless_surface_ext( + _instance: Instance, + _p_create_info: *const HeadlessSurfaceCreateInfoEXT, + _p_allocator: *const AllocationCallbacks, + _p_surface: *mut SurfaceKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_headless_surface_ext) + )) + } + let raw_name = stringify!(vkCreateHeadlessSurfaceEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_headless_surface_ext + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn create_headless_surface_ext( + &self, + instance: Instance, + p_create_info: *const HeadlessSurfaceCreateInfoEXT, + p_allocator: *const AllocationCallbacks, + p_surface: *mut SurfaceKHR, + ) -> Result { + (self.create_headless_surface_ext)(instance, p_create_info, p_allocator, p_surface) + } +} +#[doc = "Generated from \'VK_EXT_headless_surface\'"] +impl StructureType { + pub const HEADLESS_SURFACE_CREATE_INFO_EXT: Self = StructureType(1_000_256_000); +} +impl KhrBufferDeviceAddressFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_buffer_device_address\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkGetBufferOpaqueCaptureAddress = + extern "system" fn(device: Device, p_info: *const BufferDeviceAddressInfo) -> DeviceAddress; +#[allow(non_camel_case_types)] +pub type PFN_vkGetDeviceMemoryOpaqueCaptureAddress = + extern "system" fn(device: Device, p_info: *const BufferDeviceAddressInfo) -> u64; +pub struct KhrBufferDeviceAddressFn { + pub get_buffer_device_address_khr: + extern "system" fn(device: Device, p_info: *const BufferDeviceAddressInfo) -> DeviceAddress, + pub get_buffer_opaque_capture_address_khr: + extern "system" fn(device: Device, p_info: *const BufferDeviceAddressInfo) -> u64, + pub get_device_memory_opaque_capture_address_khr: extern "system" fn( + device: Device, + p_info: *const DeviceMemoryOpaqueCaptureAddressInfo, + ) -> u64, +} +unsafe impl Send for KhrBufferDeviceAddressFn {} +unsafe impl Sync for KhrBufferDeviceAddressFn {} +impl ::std::clone::Clone for KhrBufferDeviceAddressFn { + fn clone(&self) -> Self { + KhrBufferDeviceAddressFn { + get_buffer_device_address_khr: self.get_buffer_device_address_khr, + get_buffer_opaque_capture_address_khr: self.get_buffer_opaque_capture_address_khr, + get_device_memory_opaque_capture_address_khr: self + .get_device_memory_opaque_capture_address_khr, + } + } +} +impl KhrBufferDeviceAddressFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrBufferDeviceAddressFn { + get_buffer_device_address_khr: unsafe { + extern "system" fn get_buffer_device_address_khr( + _device: Device, + _p_info: *const BufferDeviceAddressInfo, + ) -> DeviceAddress { + panic!(concat!( + "Unable to load ", + stringify!(get_buffer_device_address_khr) + )) + } + let raw_name = stringify!(vkGetBufferDeviceAddressKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_buffer_device_address_khr + } else { + ::std::mem::transmute(val) + } + }, + get_buffer_opaque_capture_address_khr: unsafe { + extern "system" fn get_buffer_opaque_capture_address_khr( + _device: Device, + _p_info: *const BufferDeviceAddressInfo, + ) -> u64 { + panic!(concat!( + "Unable to load ", + stringify!(get_buffer_opaque_capture_address_khr) + )) + } + let raw_name = stringify!(vkGetBufferOpaqueCaptureAddressKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_buffer_opaque_capture_address_khr + } else { + ::std::mem::transmute(val) + } + }, + get_device_memory_opaque_capture_address_khr: unsafe { + extern "system" fn get_device_memory_opaque_capture_address_khr( + _device: Device, + _p_info: *const DeviceMemoryOpaqueCaptureAddressInfo, + ) -> u64 { + panic!(concat!( + "Unable to load ", + stringify!(get_device_memory_opaque_capture_address_khr) + )) + } + let raw_name = stringify!(vkGetDeviceMemoryOpaqueCaptureAddressKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_device_memory_opaque_capture_address_khr + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn get_buffer_device_address_khr( + &self, + device: Device, + p_info: *const BufferDeviceAddressInfo, + ) -> DeviceAddress { + (self.get_buffer_device_address_khr)(device, p_info) + } + #[doc = ""] + pub unsafe fn get_buffer_opaque_capture_address_khr( + &self, + device: Device, + p_info: *const BufferDeviceAddressInfo, + ) -> u64 { + (self.get_buffer_opaque_capture_address_khr)(device, p_info) + } + #[doc = ""] + pub unsafe fn get_device_memory_opaque_capture_address_khr( + &self, + device: Device, + p_info: *const DeviceMemoryOpaqueCaptureAddressInfo, + ) -> u64 { + (self.get_device_memory_opaque_capture_address_khr)(device, p_info) + } +} +#[doc = "Generated from \'VK_KHR_buffer_device_address\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_KHR: Self = + StructureType::PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES; +} +#[doc = "Generated from \'VK_KHR_buffer_device_address\'"] +impl StructureType { + pub const BUFFER_DEVICE_ADDRESS_INFO_KHR: Self = StructureType::BUFFER_DEVICE_ADDRESS_INFO; +} +#[doc = "Generated from \'VK_KHR_buffer_device_address\'"] +impl StructureType { + pub const BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO_KHR: Self = + StructureType::BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO; +} +#[doc = "Generated from \'VK_KHR_buffer_device_address\'"] +impl StructureType { + pub const MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO_KHR: Self = + StructureType::MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO; +} +#[doc = "Generated from \'VK_KHR_buffer_device_address\'"] +impl StructureType { + pub const DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO_KHR: Self = + StructureType::DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO; +} +#[doc = "Generated from \'VK_KHR_buffer_device_address\'"] +impl BufferUsageFlags { + pub const SHADER_DEVICE_ADDRESS_KHR: Self = BufferUsageFlags::SHADER_DEVICE_ADDRESS; +} +#[doc = "Generated from \'VK_KHR_buffer_device_address\'"] +impl BufferCreateFlags { + pub const DEVICE_ADDRESS_CAPTURE_REPLAY_KHR: Self = + BufferCreateFlags::DEVICE_ADDRESS_CAPTURE_REPLAY; +} +#[doc = "Generated from \'VK_KHR_buffer_device_address\'"] +impl MemoryAllocateFlags { + pub const DEVICE_ADDRESS_KHR: Self = MemoryAllocateFlags::DEVICE_ADDRESS; +} +#[doc = "Generated from \'VK_KHR_buffer_device_address\'"] +impl MemoryAllocateFlags { + pub const DEVICE_ADDRESS_CAPTURE_REPLAY_KHR: Self = + MemoryAllocateFlags::DEVICE_ADDRESS_CAPTURE_REPLAY; +} +#[doc = "Generated from \'VK_KHR_buffer_device_address\'"] +impl Result { + pub const ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS_KHR: Self = + Result::ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS; +} +impl ExtExtension259Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_extension_259\0") + .expect("Wrong extension string") + } +} +pub struct ExtExtension259Fn {} +unsafe impl Send for ExtExtension259Fn {} +unsafe impl Sync for ExtExtension259Fn {} +impl ::std::clone::Clone for ExtExtension259Fn { + fn clone(&self) -> Self { + ExtExtension259Fn {} + } +} +impl ExtExtension259Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtExtension259Fn {} + } +} +impl ExtLineRasterizationFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_line_rasterization\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCmdSetLineStippleEXT = extern "system" fn( + command_buffer: CommandBuffer, + line_stipple_factor: u32, + line_stipple_pattern: u16, +) -> c_void; +pub struct ExtLineRasterizationFn { + pub cmd_set_line_stipple_ext: extern "system" fn( + command_buffer: CommandBuffer, + line_stipple_factor: u32, + line_stipple_pattern: u16, + ) -> c_void, +} +unsafe impl Send for ExtLineRasterizationFn {} +unsafe impl Sync for ExtLineRasterizationFn {} +impl ::std::clone::Clone for ExtLineRasterizationFn { + fn clone(&self) -> Self { + ExtLineRasterizationFn { + cmd_set_line_stipple_ext: self.cmd_set_line_stipple_ext, + } + } +} +impl ExtLineRasterizationFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtLineRasterizationFn { + cmd_set_line_stipple_ext: unsafe { + extern "system" fn cmd_set_line_stipple_ext( + _command_buffer: CommandBuffer, + _line_stipple_factor: u32, + _line_stipple_pattern: u16, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_set_line_stipple_ext) + )) + } + let raw_name = stringify!(vkCmdSetLineStippleEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_set_line_stipple_ext + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn cmd_set_line_stipple_ext( + &self, + command_buffer: CommandBuffer, + line_stipple_factor: u32, + line_stipple_pattern: u16, + ) -> c_void { + (self.cmd_set_line_stipple_ext)(command_buffer, line_stipple_factor, line_stipple_pattern) + } +} +#[doc = "Generated from \'VK_EXT_line_rasterization\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT: Self = StructureType(1_000_259_000); +} +#[doc = "Generated from \'VK_EXT_line_rasterization\'"] +impl StructureType { + pub const PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT: Self = + StructureType(1_000_259_001); +} +#[doc = "Generated from \'VK_EXT_line_rasterization\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT: Self = + StructureType(1_000_259_002); +} +#[doc = "Generated from \'VK_EXT_line_rasterization\'"] +impl DynamicState { + pub const LINE_STIPPLE_EXT: Self = DynamicState(1_000_259_000); +} +impl NvExtension261Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_extension_261\0") + .expect("Wrong extension string") + } +} +pub struct NvExtension261Fn {} +unsafe impl Send for NvExtension261Fn {} +unsafe impl Sync for NvExtension261Fn {} +impl ::std::clone::Clone for NvExtension261Fn { + fn clone(&self) -> Self { + NvExtension261Fn {} + } +} +impl NvExtension261Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvExtension261Fn {} + } +} +impl ExtHostQueryResetFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_host_query_reset\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkResetQueryPool = extern "system" fn( + device: Device, + query_pool: QueryPool, + first_query: u32, + query_count: u32, +) -> c_void; +pub struct ExtHostQueryResetFn { + pub reset_query_pool_ext: extern "system" fn( + device: Device, + query_pool: QueryPool, + first_query: u32, + query_count: u32, + ) -> c_void, +} +unsafe impl Send for ExtHostQueryResetFn {} +unsafe impl Sync for ExtHostQueryResetFn {} +impl ::std::clone::Clone for ExtHostQueryResetFn { + fn clone(&self) -> Self { + ExtHostQueryResetFn { + reset_query_pool_ext: self.reset_query_pool_ext, + } + } +} +impl ExtHostQueryResetFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtHostQueryResetFn { + reset_query_pool_ext: unsafe { + extern "system" fn reset_query_pool_ext( + _device: Device, + _query_pool: QueryPool, + _first_query: u32, + _query_count: u32, + ) -> c_void { + panic!(concat!("Unable to load ", stringify!(reset_query_pool_ext))) + } + let raw_name = stringify!(vkResetQueryPoolEXT); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + reset_query_pool_ext + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn reset_query_pool_ext( + &self, + device: Device, + query_pool: QueryPool, + first_query: u32, + query_count: u32, + ) -> c_void { + (self.reset_query_pool_ext)(device, query_pool, first_query, query_count) + } +} +#[doc = "Generated from \'VK_EXT_host_query_reset\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT: Self = + StructureType::PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES; +} +impl GgpExtension263Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_GGP_extension_263\0") + .expect("Wrong extension string") + } +} +pub struct GgpExtension263Fn {} +unsafe impl Send for GgpExtension263Fn {} +unsafe impl Sync for GgpExtension263Fn {} +impl ::std::clone::Clone for GgpExtension263Fn { + fn clone(&self) -> Self { + GgpExtension263Fn {} + } +} +impl GgpExtension263Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + GgpExtension263Fn {} + } +} +impl BrcmExtension264Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_BRCM_extension_264\0") + .expect("Wrong extension string") + } +} +pub struct BrcmExtension264Fn {} +unsafe impl Send for BrcmExtension264Fn {} +unsafe impl Sync for BrcmExtension264Fn {} +impl ::std::clone::Clone for BrcmExtension264Fn { + fn clone(&self) -> Self { + BrcmExtension264Fn {} + } +} +impl BrcmExtension264Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + BrcmExtension264Fn {} + } +} +impl BrcmExtension265Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_BRCM_extension_265\0") + .expect("Wrong extension string") + } +} +pub struct BrcmExtension265Fn {} +unsafe impl Send for BrcmExtension265Fn {} +unsafe impl Sync for BrcmExtension265Fn {} +impl ::std::clone::Clone for BrcmExtension265Fn { + fn clone(&self) -> Self { + BrcmExtension265Fn {} + } +} +impl BrcmExtension265Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + BrcmExtension265Fn {} + } +} +impl ExtIndexTypeUint8Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_index_type_uint8\0") + .expect("Wrong extension string") + } +} +pub struct ExtIndexTypeUint8Fn {} +unsafe impl Send for ExtIndexTypeUint8Fn {} +unsafe impl Sync for ExtIndexTypeUint8Fn {} +impl ::std::clone::Clone for ExtIndexTypeUint8Fn { + fn clone(&self) -> Self { + ExtIndexTypeUint8Fn {} + } +} +impl ExtIndexTypeUint8Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtIndexTypeUint8Fn {} + } +} +#[doc = "Generated from \'VK_EXT_index_type_uint8\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT: Self = StructureType(1_000_265_000); +} +#[doc = "Generated from \'VK_EXT_index_type_uint8\'"] +impl IndexType { + pub const UINT8_EXT: Self = IndexType(1_000_265_000); +} +impl ExtExtension267Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_extension_267\0") + .expect("Wrong extension string") + } +} +pub struct ExtExtension267Fn {} +unsafe impl Send for ExtExtension267Fn {} +unsafe impl Sync for ExtExtension267Fn {} +impl ::std::clone::Clone for ExtExtension267Fn { + fn clone(&self) -> Self { + ExtExtension267Fn {} + } +} +impl ExtExtension267Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtExtension267Fn {} + } +} +impl KhrExtension268Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_extension_268\0") + .expect("Wrong extension string") + } +} +pub struct KhrExtension268Fn {} +unsafe impl Send for KhrExtension268Fn {} +unsafe impl Sync for KhrExtension268Fn {} +impl ::std::clone::Clone for KhrExtension268Fn { + fn clone(&self) -> Self { + KhrExtension268Fn {} + } +} +impl KhrExtension268Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrExtension268Fn {} + } +} +impl KhrDeferredHostOperationsFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_deferred_host_operations\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkCreateDeferredOperationKHR = extern "system" fn( + device: Device, + p_allocator: *const AllocationCallbacks, + p_deferred_operation: *mut DeferredOperationKHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyDeferredOperationKHR = extern "system" fn( + device: Device, + operation: DeferredOperationKHR, + p_allocator: *const AllocationCallbacks, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkGetDeferredOperationMaxConcurrencyKHR = + extern "system" fn(device: Device, operation: DeferredOperationKHR) -> u32; +#[allow(non_camel_case_types)] +pub type PFN_vkGetDeferredOperationResultKHR = + extern "system" fn(device: Device, operation: DeferredOperationKHR) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDeferredOperationJoinKHR = + extern "system" fn(device: Device, operation: DeferredOperationKHR) -> Result; +pub struct KhrDeferredHostOperationsFn { + pub create_deferred_operation_khr: extern "system" fn( + device: Device, + p_allocator: *const AllocationCallbacks, + p_deferred_operation: *mut DeferredOperationKHR, + ) -> Result, + pub destroy_deferred_operation_khr: extern "system" fn( + device: Device, + operation: DeferredOperationKHR, + p_allocator: *const AllocationCallbacks, + ) -> c_void, + pub get_deferred_operation_max_concurrency_khr: + extern "system" fn(device: Device, operation: DeferredOperationKHR) -> u32, + pub get_deferred_operation_result_khr: + extern "system" fn(device: Device, operation: DeferredOperationKHR) -> Result, + pub deferred_operation_join_khr: + extern "system" fn(device: Device, operation: DeferredOperationKHR) -> Result, +} +unsafe impl Send for KhrDeferredHostOperationsFn {} +unsafe impl Sync for KhrDeferredHostOperationsFn {} +impl ::std::clone::Clone for KhrDeferredHostOperationsFn { + fn clone(&self) -> Self { + KhrDeferredHostOperationsFn { + create_deferred_operation_khr: self.create_deferred_operation_khr, + destroy_deferred_operation_khr: self.destroy_deferred_operation_khr, + get_deferred_operation_max_concurrency_khr: self + .get_deferred_operation_max_concurrency_khr, + get_deferred_operation_result_khr: self.get_deferred_operation_result_khr, + deferred_operation_join_khr: self.deferred_operation_join_khr, + } + } +} +impl KhrDeferredHostOperationsFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrDeferredHostOperationsFn { + create_deferred_operation_khr: unsafe { + extern "system" fn create_deferred_operation_khr( + _device: Device, + _p_allocator: *const AllocationCallbacks, + _p_deferred_operation: *mut DeferredOperationKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_deferred_operation_khr) + )) + } + let raw_name = stringify!(vkCreateDeferredOperationKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_deferred_operation_khr + } else { + ::std::mem::transmute(val) + } + }, + destroy_deferred_operation_khr: unsafe { + extern "system" fn destroy_deferred_operation_khr( + _device: Device, + _operation: DeferredOperationKHR, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(destroy_deferred_operation_khr) + )) + } + let raw_name = stringify!(vkDestroyDeferredOperationKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_deferred_operation_khr + } else { + ::std::mem::transmute(val) + } + }, + get_deferred_operation_max_concurrency_khr: unsafe { + extern "system" fn get_deferred_operation_max_concurrency_khr( + _device: Device, + _operation: DeferredOperationKHR, + ) -> u32 { + panic!(concat!( + "Unable to load ", + stringify!(get_deferred_operation_max_concurrency_khr) + )) + } + let raw_name = stringify!(vkGetDeferredOperationMaxConcurrencyKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_deferred_operation_max_concurrency_khr + } else { + ::std::mem::transmute(val) + } + }, + get_deferred_operation_result_khr: unsafe { + extern "system" fn get_deferred_operation_result_khr( + _device: Device, + _operation: DeferredOperationKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_deferred_operation_result_khr) + )) + } + let raw_name = stringify!(vkGetDeferredOperationResultKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_deferred_operation_result_khr + } else { + ::std::mem::transmute(val) + } + }, + deferred_operation_join_khr: unsafe { + extern "system" fn deferred_operation_join_khr( + _device: Device, + _operation: DeferredOperationKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(deferred_operation_join_khr) + )) + } + let raw_name = stringify!(vkDeferredOperationJoinKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + deferred_operation_join_khr + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn create_deferred_operation_khr( + &self, + device: Device, + p_allocator: *const AllocationCallbacks, + p_deferred_operation: *mut DeferredOperationKHR, + ) -> Result { + (self.create_deferred_operation_khr)(device, p_allocator, p_deferred_operation) + } + #[doc = ""] + pub unsafe fn destroy_deferred_operation_khr( + &self, + device: Device, + operation: DeferredOperationKHR, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_deferred_operation_khr)(device, operation, p_allocator) + } + #[doc = ""] + pub unsafe fn get_deferred_operation_max_concurrency_khr( + &self, + device: Device, + operation: DeferredOperationKHR, + ) -> u32 { + (self.get_deferred_operation_max_concurrency_khr)(device, operation) + } + #[doc = ""] + pub unsafe fn get_deferred_operation_result_khr( + &self, + device: Device, + operation: DeferredOperationKHR, + ) -> Result { + (self.get_deferred_operation_result_khr)(device, operation) + } + #[doc = ""] + pub unsafe fn deferred_operation_join_khr( + &self, + device: Device, + operation: DeferredOperationKHR, + ) -> Result { + (self.deferred_operation_join_khr)(device, operation) + } +} +#[doc = "Generated from \'VK_KHR_deferred_host_operations\'"] +impl StructureType { + pub const DEFERRED_OPERATION_INFO_KHR: Self = StructureType(1_000_268_000); +} +#[doc = "Generated from \'VK_KHR_deferred_host_operations\'"] +impl ObjectType { + pub const DEFERRED_OPERATION_KHR: Self = ObjectType(1_000_268_000); +} +#[doc = "Generated from \'VK_KHR_deferred_host_operations\'"] +impl Result { + pub const THREAD_IDLE_KHR: Self = Result(1_000_268_000); +} +#[doc = "Generated from \'VK_KHR_deferred_host_operations\'"] +impl Result { + pub const THREAD_DONE_KHR: Self = Result(1_000_268_001); +} +#[doc = "Generated from \'VK_KHR_deferred_host_operations\'"] +impl Result { + pub const OPERATION_DEFERRED_KHR: Self = Result(1_000_268_002); +} +#[doc = "Generated from \'VK_KHR_deferred_host_operations\'"] +impl Result { + pub const OPERATION_NOT_DEFERRED_KHR: Self = Result(1_000_268_003); +} +impl KhrPipelineExecutablePropertiesFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_pipeline_executable_properties\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkGetPipelineExecutablePropertiesKHR = extern "system" fn( + device: Device, + p_pipeline_info: *const PipelineInfoKHR, + p_executable_count: *mut u32, + p_properties: *mut PipelineExecutablePropertiesKHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPipelineExecutableStatisticsKHR = extern "system" fn( + device: Device, + p_executable_info: *const PipelineExecutableInfoKHR, + p_statistic_count: *mut u32, + p_statistics: *mut PipelineExecutableStatisticKHR, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkGetPipelineExecutableInternalRepresentationsKHR = extern "system" fn( + device: Device, + p_executable_info: *const PipelineExecutableInfoKHR, + p_internal_representation_count: *mut u32, + p_internal_representations: *mut PipelineExecutableInternalRepresentationKHR, +) -> Result; +pub struct KhrPipelineExecutablePropertiesFn { + pub get_pipeline_executable_properties_khr: extern "system" fn( + device: Device, + p_pipeline_info: *const PipelineInfoKHR, + p_executable_count: *mut u32, + p_properties: *mut PipelineExecutablePropertiesKHR, + ) -> Result, + pub get_pipeline_executable_statistics_khr: extern "system" fn( + device: Device, + p_executable_info: *const PipelineExecutableInfoKHR, + p_statistic_count: *mut u32, + p_statistics: *mut PipelineExecutableStatisticKHR, + ) -> Result, + pub get_pipeline_executable_internal_representations_khr: extern "system" fn( + device: Device, + p_executable_info: *const PipelineExecutableInfoKHR, + p_internal_representation_count: *mut u32, + p_internal_representations: *mut PipelineExecutableInternalRepresentationKHR, + ) -> Result, +} +unsafe impl Send for KhrPipelineExecutablePropertiesFn {} +unsafe impl Sync for KhrPipelineExecutablePropertiesFn {} +impl ::std::clone::Clone for KhrPipelineExecutablePropertiesFn { + fn clone(&self) -> Self { + KhrPipelineExecutablePropertiesFn { + get_pipeline_executable_properties_khr: self.get_pipeline_executable_properties_khr, + get_pipeline_executable_statistics_khr: self.get_pipeline_executable_statistics_khr, + get_pipeline_executable_internal_representations_khr: self + .get_pipeline_executable_internal_representations_khr, + } + } +} +impl KhrPipelineExecutablePropertiesFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrPipelineExecutablePropertiesFn { + get_pipeline_executable_properties_khr: unsafe { + extern "system" fn get_pipeline_executable_properties_khr( + _device: Device, + _p_pipeline_info: *const PipelineInfoKHR, + _p_executable_count: *mut u32, + _p_properties: *mut PipelineExecutablePropertiesKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_pipeline_executable_properties_khr) + )) + } + let raw_name = stringify!(vkGetPipelineExecutablePropertiesKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_pipeline_executable_properties_khr + } else { + ::std::mem::transmute(val) + } + }, + get_pipeline_executable_statistics_khr: unsafe { + extern "system" fn get_pipeline_executable_statistics_khr( + _device: Device, + _p_executable_info: *const PipelineExecutableInfoKHR, + _p_statistic_count: *mut u32, + _p_statistics: *mut PipelineExecutableStatisticKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_pipeline_executable_statistics_khr) + )) + } + let raw_name = stringify!(vkGetPipelineExecutableStatisticsKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_pipeline_executable_statistics_khr + } else { + ::std::mem::transmute(val) + } + }, + get_pipeline_executable_internal_representations_khr: unsafe { + extern "system" fn get_pipeline_executable_internal_representations_khr( + _device: Device, + _p_executable_info: *const PipelineExecutableInfoKHR, + _p_internal_representation_count: *mut u32, + _p_internal_representations: *mut PipelineExecutableInternalRepresentationKHR, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(get_pipeline_executable_internal_representations_khr) + )) + } + let raw_name = stringify!(vkGetPipelineExecutableInternalRepresentationsKHR); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_pipeline_executable_internal_representations_khr + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn get_pipeline_executable_properties_khr( + &self, + device: Device, + p_pipeline_info: *const PipelineInfoKHR, + p_executable_count: *mut u32, + p_properties: *mut PipelineExecutablePropertiesKHR, + ) -> Result { + (self.get_pipeline_executable_properties_khr)( + device, + p_pipeline_info, + p_executable_count, + p_properties, + ) + } + #[doc = ""] + pub unsafe fn get_pipeline_executable_statistics_khr( + &self, + device: Device, + p_executable_info: *const PipelineExecutableInfoKHR, + p_statistic_count: *mut u32, + p_statistics: *mut PipelineExecutableStatisticKHR, + ) -> Result { + (self.get_pipeline_executable_statistics_khr)( + device, + p_executable_info, + p_statistic_count, + p_statistics, + ) + } + #[doc = ""] + pub unsafe fn get_pipeline_executable_internal_representations_khr( + &self, + device: Device, + p_executable_info: *const PipelineExecutableInfoKHR, + p_internal_representation_count: *mut u32, + p_internal_representations: *mut PipelineExecutableInternalRepresentationKHR, + ) -> Result { + (self.get_pipeline_executable_internal_representations_khr)( + device, + p_executable_info, + p_internal_representation_count, + p_internal_representations, + ) + } +} +#[doc = "Generated from \'VK_KHR_pipeline_executable_properties\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR: Self = + StructureType(1_000_269_000); +} +#[doc = "Generated from \'VK_KHR_pipeline_executable_properties\'"] +impl StructureType { + pub const PIPELINE_INFO_KHR: Self = StructureType(1_000_269_001); +} +#[doc = "Generated from \'VK_KHR_pipeline_executable_properties\'"] +impl StructureType { + pub const PIPELINE_EXECUTABLE_PROPERTIES_KHR: Self = StructureType(1_000_269_002); +} +#[doc = "Generated from \'VK_KHR_pipeline_executable_properties\'"] +impl StructureType { + pub const PIPELINE_EXECUTABLE_INFO_KHR: Self = StructureType(1_000_269_003); +} +#[doc = "Generated from \'VK_KHR_pipeline_executable_properties\'"] +impl StructureType { + pub const PIPELINE_EXECUTABLE_STATISTIC_KHR: Self = StructureType(1_000_269_004); +} +#[doc = "Generated from \'VK_KHR_pipeline_executable_properties\'"] +impl StructureType { + pub const PIPELINE_EXECUTABLE_INTERNAL_REPRESENTATION_KHR: Self = StructureType(1_000_269_005); +} +#[doc = "Generated from \'VK_KHR_pipeline_executable_properties\'"] +impl PipelineCreateFlags { + pub const CAPTURE_STATISTICS_KHR: Self = PipelineCreateFlags(0b100_0000); +} +#[doc = "Generated from \'VK_KHR_pipeline_executable_properties\'"] +impl PipelineCreateFlags { + pub const CAPTURE_INTERNAL_REPRESENTATIONS_KHR: Self = PipelineCreateFlags(0b1000_0000); +} +impl IntelExtension271Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_INTEL_extension_271\0") + .expect("Wrong extension string") + } +} +pub struct IntelExtension271Fn {} +unsafe impl Send for IntelExtension271Fn {} +unsafe impl Sync for IntelExtension271Fn {} +impl ::std::clone::Clone for IntelExtension271Fn { + fn clone(&self) -> Self { + IntelExtension271Fn {} + } +} +impl IntelExtension271Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + IntelExtension271Fn {} + } +} +impl IntelExtension272Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_INTEL_extension_272\0") + .expect("Wrong extension string") + } +} +pub struct IntelExtension272Fn {} +unsafe impl Send for IntelExtension272Fn {} +unsafe impl Sync for IntelExtension272Fn {} +impl ::std::clone::Clone for IntelExtension272Fn { + fn clone(&self) -> Self { + IntelExtension272Fn {} + } +} +impl IntelExtension272Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + IntelExtension272Fn {} + } +} +impl IntelExtension273Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_INTEL_extension_273\0") + .expect("Wrong extension string") + } +} +pub struct IntelExtension273Fn {} +unsafe impl Send for IntelExtension273Fn {} +unsafe impl Sync for IntelExtension273Fn {} +impl ::std::clone::Clone for IntelExtension273Fn { + fn clone(&self) -> Self { + IntelExtension273Fn {} + } +} +impl IntelExtension273Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + IntelExtension273Fn {} + } +} +impl IntelExtension274Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_INTEL_extension_274\0") + .expect("Wrong extension string") + } +} +pub struct IntelExtension274Fn {} +unsafe impl Send for IntelExtension274Fn {} +unsafe impl Sync for IntelExtension274Fn {} +impl ::std::clone::Clone for IntelExtension274Fn { + fn clone(&self) -> Self { + IntelExtension274Fn {} + } +} +impl IntelExtension274Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + IntelExtension274Fn {} + } +} +impl KhrExtension275Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_extension_275\0") + .expect("Wrong extension string") + } +} +pub struct KhrExtension275Fn {} +unsafe impl Send for KhrExtension275Fn {} +unsafe impl Sync for KhrExtension275Fn {} +impl ::std::clone::Clone for KhrExtension275Fn { + fn clone(&self) -> Self { + KhrExtension275Fn {} + } +} +impl KhrExtension275Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrExtension275Fn {} + } +} +impl KhrExtension276Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_extension_276\0") + .expect("Wrong extension string") + } +} +pub struct KhrExtension276Fn {} +unsafe impl Send for KhrExtension276Fn {} +unsafe impl Sync for KhrExtension276Fn {} +impl ::std::clone::Clone for KhrExtension276Fn { + fn clone(&self) -> Self { + KhrExtension276Fn {} + } +} +impl KhrExtension276Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrExtension276Fn {} + } +} +impl ExtShaderDemoteToHelperInvocationFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_shader_demote_to_helper_invocation\0") + .expect("Wrong extension string") + } +} +pub struct ExtShaderDemoteToHelperInvocationFn {} +unsafe impl Send for ExtShaderDemoteToHelperInvocationFn {} +unsafe impl Sync for ExtShaderDemoteToHelperInvocationFn {} +impl ::std::clone::Clone for ExtShaderDemoteToHelperInvocationFn { + fn clone(&self) -> Self { + ExtShaderDemoteToHelperInvocationFn {} + } +} +impl ExtShaderDemoteToHelperInvocationFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtShaderDemoteToHelperInvocationFn {} + } +} +#[doc = "Generated from \'VK_EXT_shader_demote_to_helper_invocation\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT: Self = + StructureType(1_000_276_000); +} +impl NvDeviceGeneratedCommandsFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_device_generated_commands\0") + .expect("Wrong extension string") + } +} +#[allow(non_camel_case_types)] +pub type PFN_vkGetGeneratedCommandsMemoryRequirementsNV = extern "system" fn( + device: Device, + p_info: *const GeneratedCommandsMemoryRequirementsInfoNV, + p_memory_requirements: *mut MemoryRequirements2, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdPreprocessGeneratedCommandsNV = extern "system" fn( + command_buffer: CommandBuffer, + p_generated_commands_info: *const GeneratedCommandsInfoNV, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdExecuteGeneratedCommandsNV = extern "system" fn( + command_buffer: CommandBuffer, + is_preprocessed: Bool32, + p_generated_commands_info: *const GeneratedCommandsInfoNV, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCmdBindPipelineShaderGroupNV = extern "system" fn( + command_buffer: CommandBuffer, + pipeline_bind_point: PipelineBindPoint, + pipeline: Pipeline, + group_index: u32, +) -> c_void; +#[allow(non_camel_case_types)] +pub type PFN_vkCreateIndirectCommandsLayoutNV = extern "system" fn( + device: Device, + p_create_info: *const IndirectCommandsLayoutCreateInfoNV, + p_allocator: *const AllocationCallbacks, + p_indirect_commands_layout: *mut IndirectCommandsLayoutNV, +) -> Result; +#[allow(non_camel_case_types)] +pub type PFN_vkDestroyIndirectCommandsLayoutNV = extern "system" fn( + device: Device, + indirect_commands_layout: IndirectCommandsLayoutNV, + p_allocator: *const AllocationCallbacks, +) -> c_void; +pub struct NvDeviceGeneratedCommandsFn { + pub get_generated_commands_memory_requirements_nv: extern "system" fn( + device: Device, + p_info: *const GeneratedCommandsMemoryRequirementsInfoNV, + p_memory_requirements: *mut MemoryRequirements2, + ) -> c_void, + pub cmd_preprocess_generated_commands_nv: extern "system" fn( + command_buffer: CommandBuffer, + p_generated_commands_info: *const GeneratedCommandsInfoNV, + ) -> c_void, + pub cmd_execute_generated_commands_nv: extern "system" fn( + command_buffer: CommandBuffer, + is_preprocessed: Bool32, + p_generated_commands_info: *const GeneratedCommandsInfoNV, + ) -> c_void, + pub cmd_bind_pipeline_shader_group_nv: extern "system" fn( + command_buffer: CommandBuffer, + pipeline_bind_point: PipelineBindPoint, + pipeline: Pipeline, + group_index: u32, + ) -> c_void, + pub create_indirect_commands_layout_nv: extern "system" fn( + device: Device, + p_create_info: *const IndirectCommandsLayoutCreateInfoNV, + p_allocator: *const AllocationCallbacks, + p_indirect_commands_layout: *mut IndirectCommandsLayoutNV, + ) -> Result, + pub destroy_indirect_commands_layout_nv: extern "system" fn( + device: Device, + indirect_commands_layout: IndirectCommandsLayoutNV, + p_allocator: *const AllocationCallbacks, + ) -> c_void, +} +unsafe impl Send for NvDeviceGeneratedCommandsFn {} +unsafe impl Sync for NvDeviceGeneratedCommandsFn {} +impl ::std::clone::Clone for NvDeviceGeneratedCommandsFn { + fn clone(&self) -> Self { + NvDeviceGeneratedCommandsFn { + get_generated_commands_memory_requirements_nv: self + .get_generated_commands_memory_requirements_nv, + cmd_preprocess_generated_commands_nv: self.cmd_preprocess_generated_commands_nv, + cmd_execute_generated_commands_nv: self.cmd_execute_generated_commands_nv, + cmd_bind_pipeline_shader_group_nv: self.cmd_bind_pipeline_shader_group_nv, + create_indirect_commands_layout_nv: self.create_indirect_commands_layout_nv, + destroy_indirect_commands_layout_nv: self.destroy_indirect_commands_layout_nv, + } + } +} +impl NvDeviceGeneratedCommandsFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvDeviceGeneratedCommandsFn { + get_generated_commands_memory_requirements_nv: unsafe { + extern "system" fn get_generated_commands_memory_requirements_nv( + _device: Device, + _p_info: *const GeneratedCommandsMemoryRequirementsInfoNV, + _p_memory_requirements: *mut MemoryRequirements2, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(get_generated_commands_memory_requirements_nv) + )) + } + let raw_name = stringify!(vkGetGeneratedCommandsMemoryRequirementsNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + get_generated_commands_memory_requirements_nv + } else { + ::std::mem::transmute(val) + } + }, + cmd_preprocess_generated_commands_nv: unsafe { + extern "system" fn cmd_preprocess_generated_commands_nv( + _command_buffer: CommandBuffer, + _p_generated_commands_info: *const GeneratedCommandsInfoNV, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_preprocess_generated_commands_nv) + )) + } + let raw_name = stringify!(vkCmdPreprocessGeneratedCommandsNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_preprocess_generated_commands_nv + } else { + ::std::mem::transmute(val) + } + }, + cmd_execute_generated_commands_nv: unsafe { + extern "system" fn cmd_execute_generated_commands_nv( + _command_buffer: CommandBuffer, + _is_preprocessed: Bool32, + _p_generated_commands_info: *const GeneratedCommandsInfoNV, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_execute_generated_commands_nv) + )) + } + let raw_name = stringify!(vkCmdExecuteGeneratedCommandsNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_execute_generated_commands_nv + } else { + ::std::mem::transmute(val) + } + }, + cmd_bind_pipeline_shader_group_nv: unsafe { + extern "system" fn cmd_bind_pipeline_shader_group_nv( + _command_buffer: CommandBuffer, + _pipeline_bind_point: PipelineBindPoint, + _pipeline: Pipeline, + _group_index: u32, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(cmd_bind_pipeline_shader_group_nv) + )) + } + let raw_name = stringify!(vkCmdBindPipelineShaderGroupNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + cmd_bind_pipeline_shader_group_nv + } else { + ::std::mem::transmute(val) + } + }, + create_indirect_commands_layout_nv: unsafe { + extern "system" fn create_indirect_commands_layout_nv( + _device: Device, + _p_create_info: *const IndirectCommandsLayoutCreateInfoNV, + _p_allocator: *const AllocationCallbacks, + _p_indirect_commands_layout: *mut IndirectCommandsLayoutNV, + ) -> Result { + panic!(concat!( + "Unable to load ", + stringify!(create_indirect_commands_layout_nv) + )) + } + let raw_name = stringify!(vkCreateIndirectCommandsLayoutNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + create_indirect_commands_layout_nv + } else { + ::std::mem::transmute(val) + } + }, + destroy_indirect_commands_layout_nv: unsafe { + extern "system" fn destroy_indirect_commands_layout_nv( + _device: Device, + _indirect_commands_layout: IndirectCommandsLayoutNV, + _p_allocator: *const AllocationCallbacks, + ) -> c_void { + panic!(concat!( + "Unable to load ", + stringify!(destroy_indirect_commands_layout_nv) + )) + } + let raw_name = stringify!(vkDestroyIndirectCommandsLayoutNV); + let cname = ::std::ffi::CString::new(raw_name).unwrap(); + let val = _f(&cname); + if val.is_null() { + destroy_indirect_commands_layout_nv + } else { + ::std::mem::transmute(val) + } + }, + } + } + #[doc = ""] + pub unsafe fn get_generated_commands_memory_requirements_nv( + &self, + device: Device, + p_info: *const GeneratedCommandsMemoryRequirementsInfoNV, + p_memory_requirements: *mut MemoryRequirements2, + ) -> c_void { + (self.get_generated_commands_memory_requirements_nv)(device, p_info, p_memory_requirements) + } + #[doc = ""] + pub unsafe fn cmd_preprocess_generated_commands_nv( + &self, + command_buffer: CommandBuffer, + p_generated_commands_info: *const GeneratedCommandsInfoNV, + ) -> c_void { + (self.cmd_preprocess_generated_commands_nv)(command_buffer, p_generated_commands_info) + } + #[doc = ""] + pub unsafe fn cmd_execute_generated_commands_nv( + &self, + command_buffer: CommandBuffer, + is_preprocessed: Bool32, + p_generated_commands_info: *const GeneratedCommandsInfoNV, + ) -> c_void { + (self.cmd_execute_generated_commands_nv)( + command_buffer, + is_preprocessed, + p_generated_commands_info, + ) + } + #[doc = ""] + pub unsafe fn cmd_bind_pipeline_shader_group_nv( + &self, + command_buffer: CommandBuffer, + pipeline_bind_point: PipelineBindPoint, + pipeline: Pipeline, + group_index: u32, + ) -> c_void { + (self.cmd_bind_pipeline_shader_group_nv)( + command_buffer, + pipeline_bind_point, + pipeline, + group_index, + ) + } + #[doc = ""] + pub unsafe fn create_indirect_commands_layout_nv( + &self, + device: Device, + p_create_info: *const IndirectCommandsLayoutCreateInfoNV, + p_allocator: *const AllocationCallbacks, + p_indirect_commands_layout: *mut IndirectCommandsLayoutNV, + ) -> Result { + (self.create_indirect_commands_layout_nv)( + device, + p_create_info, + p_allocator, + p_indirect_commands_layout, + ) + } + #[doc = ""] + pub unsafe fn destroy_indirect_commands_layout_nv( + &self, + device: Device, + indirect_commands_layout: IndirectCommandsLayoutNV, + p_allocator: *const AllocationCallbacks, + ) -> c_void { + (self.destroy_indirect_commands_layout_nv)(device, indirect_commands_layout, p_allocator) + } +} +#[doc = "Generated from \'VK_NV_device_generated_commands\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV: Self = + StructureType(1_000_277_000); +} +#[doc = "Generated from \'VK_NV_device_generated_commands\'"] +impl StructureType { + pub const GRAPHICS_SHADER_GROUP_CREATE_INFO_NV: Self = StructureType(1_000_277_001); +} +#[doc = "Generated from \'VK_NV_device_generated_commands\'"] +impl StructureType { + pub const GRAPHICS_PIPELINE_SHADER_GROUPS_CREATE_INFO_NV: Self = StructureType(1_000_277_002); +} +#[doc = "Generated from \'VK_NV_device_generated_commands\'"] +impl StructureType { + pub const INDIRECT_COMMANDS_LAYOUT_TOKEN_NV: Self = StructureType(1_000_277_003); +} +#[doc = "Generated from \'VK_NV_device_generated_commands\'"] +impl StructureType { + pub const INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NV: Self = StructureType(1_000_277_004); +} +#[doc = "Generated from \'VK_NV_device_generated_commands\'"] +impl StructureType { + pub const GENERATED_COMMANDS_INFO_NV: Self = StructureType(1_000_277_005); +} +#[doc = "Generated from \'VK_NV_device_generated_commands\'"] +impl StructureType { + pub const GENERATED_COMMANDS_MEMORY_REQUIREMENTS_INFO_NV: Self = StructureType(1_000_277_006); +} +#[doc = "Generated from \'VK_NV_device_generated_commands\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_NV: Self = + StructureType(1_000_277_007); +} +#[doc = "Generated from \'VK_NV_device_generated_commands\'"] +impl PipelineCreateFlags { + pub const INDIRECT_BINDABLE_NV: Self = PipelineCreateFlags(0b100_0000_0000_0000_0000); +} +#[doc = "Generated from \'VK_NV_device_generated_commands\'"] +impl PipelineStageFlags { + pub const COMMAND_PREPROCESS_NV: Self = PipelineStageFlags(0b10_0000_0000_0000_0000); +} +#[doc = "Generated from \'VK_NV_device_generated_commands\'"] +impl AccessFlags { + pub const COMMAND_PREPROCESS_READ_NV: Self = AccessFlags(0b10_0000_0000_0000_0000); +} +#[doc = "Generated from \'VK_NV_device_generated_commands\'"] +impl AccessFlags { + pub const COMMAND_PREPROCESS_WRITE_NV: Self = AccessFlags(0b100_0000_0000_0000_0000); +} +#[doc = "Generated from \'VK_NV_device_generated_commands\'"] +impl ObjectType { + pub const INDIRECT_COMMANDS_LAYOUT_NV: Self = ObjectType(1_000_277_000); +} +impl NvExtension279Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_extension_279\0") + .expect("Wrong extension string") + } +} +pub struct NvExtension279Fn {} +unsafe impl Send for NvExtension279Fn {} +unsafe impl Sync for NvExtension279Fn {} +impl ::std::clone::Clone for NvExtension279Fn { + fn clone(&self) -> Self { + NvExtension279Fn {} + } +} +impl NvExtension279Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvExtension279Fn {} + } +} +impl KhrExtension280Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_extension_280\0") + .expect("Wrong extension string") + } +} +pub struct KhrExtension280Fn {} +unsafe impl Send for KhrExtension280Fn {} +unsafe impl Sync for KhrExtension280Fn {} +impl ::std::clone::Clone for KhrExtension280Fn { + fn clone(&self) -> Self { + KhrExtension280Fn {} + } +} +impl KhrExtension280Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrExtension280Fn {} + } +} +impl ArmExtension281Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_ARM_extension_281\0") + .expect("Wrong extension string") + } +} +pub struct ArmExtension281Fn {} +unsafe impl Send for ArmExtension281Fn {} +unsafe impl Sync for ArmExtension281Fn {} +impl ::std::clone::Clone for ArmExtension281Fn { + fn clone(&self) -> Self { + ArmExtension281Fn {} + } +} +impl ArmExtension281Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ArmExtension281Fn {} + } +} +impl ExtTexelBufferAlignmentFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_texel_buffer_alignment\0") + .expect("Wrong extension string") + } +} +pub struct ExtTexelBufferAlignmentFn {} +unsafe impl Send for ExtTexelBufferAlignmentFn {} +unsafe impl Sync for ExtTexelBufferAlignmentFn {} +impl ::std::clone::Clone for ExtTexelBufferAlignmentFn { + fn clone(&self) -> Self { + ExtTexelBufferAlignmentFn {} + } +} +impl ExtTexelBufferAlignmentFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtTexelBufferAlignmentFn {} + } +} +#[doc = "Generated from \'VK_EXT_texel_buffer_alignment\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT: Self = + StructureType(1_000_281_000); +} +#[doc = "Generated from \'VK_EXT_texel_buffer_alignment\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT: Self = + StructureType(1_000_281_001); +} +impl QcomRenderPassTransformFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_QCOM_render_pass_transform\0") + .expect("Wrong extension string") + } +} +pub struct QcomRenderPassTransformFn {} +unsafe impl Send for QcomRenderPassTransformFn {} +unsafe impl Sync for QcomRenderPassTransformFn {} +impl ::std::clone::Clone for QcomRenderPassTransformFn { + fn clone(&self) -> Self { + QcomRenderPassTransformFn {} + } +} +impl QcomRenderPassTransformFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + QcomRenderPassTransformFn {} + } +} +#[doc = "Generated from \'VK_QCOM_render_pass_transform\'"] +impl StructureType { + pub const COMMAND_BUFFER_INHERITANCE_RENDER_PASS_TRANSFORM_INFO_QCOM: Self = + StructureType(1_000_282_000); +} +#[doc = "Generated from \'VK_QCOM_render_pass_transform\'"] +impl StructureType { + pub const RENDER_PASS_TRANSFORM_BEGIN_INFO_QCOM: Self = StructureType(1_000_282_001); +} +#[doc = "Generated from \'VK_QCOM_render_pass_transform\'"] +impl RenderPassCreateFlags { + pub const TRANSFORM_QCOM: Self = RenderPassCreateFlags(0b10); +} +impl ExtExtension284Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_extension_284\0") + .expect("Wrong extension string") + } +} +pub struct ExtExtension284Fn {} +unsafe impl Send for ExtExtension284Fn {} +unsafe impl Sync for ExtExtension284Fn {} +impl ::std::clone::Clone for ExtExtension284Fn { + fn clone(&self) -> Self { + ExtExtension284Fn {} + } +} +impl ExtExtension284Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtExtension284Fn {} + } +} +impl ExtExtension285Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_extension_285\0") + .expect("Wrong extension string") + } +} +pub struct ExtExtension285Fn {} +unsafe impl Send for ExtExtension285Fn {} +unsafe impl Sync for ExtExtension285Fn {} +impl ::std::clone::Clone for ExtExtension285Fn { + fn clone(&self) -> Self { + ExtExtension285Fn {} + } +} +impl ExtExtension285Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtExtension285Fn {} + } +} +impl ExtExtension286Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_extension_286\0") + .expect("Wrong extension string") + } +} +pub struct ExtExtension286Fn {} +unsafe impl Send for ExtExtension286Fn {} +unsafe impl Sync for ExtExtension286Fn {} +impl ::std::clone::Clone for ExtExtension286Fn { + fn clone(&self) -> Self { + ExtExtension286Fn {} + } +} +impl ExtExtension286Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtExtension286Fn {} + } +} +impl NvxExtension287Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NVX_extension_287\0") + .expect("Wrong extension string") + } +} +pub struct NvxExtension287Fn {} +unsafe impl Send for NvxExtension287Fn {} +unsafe impl Sync for NvxExtension287Fn {} +impl ::std::clone::Clone for NvxExtension287Fn { + fn clone(&self) -> Self { + NvxExtension287Fn {} + } +} +impl NvxExtension287Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvxExtension287Fn {} + } +} +impl NvxExtension288Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NVX_extension_288\0") + .expect("Wrong extension string") + } +} +pub struct NvxExtension288Fn {} +unsafe impl Send for NvxExtension288Fn {} +unsafe impl Sync for NvxExtension288Fn {} +impl ::std::clone::Clone for NvxExtension288Fn { + fn clone(&self) -> Self { + NvxExtension288Fn {} + } +} +impl NvxExtension288Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvxExtension288Fn {} + } +} +impl ExtExtension289Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_extension_289\0") + .expect("Wrong extension string") + } +} +pub struct ExtExtension289Fn {} +unsafe impl Send for ExtExtension289Fn {} +unsafe impl Sync for ExtExtension289Fn {} +impl ::std::clone::Clone for ExtExtension289Fn { + fn clone(&self) -> Self { + ExtExtension289Fn {} + } +} +impl ExtExtension289Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtExtension289Fn {} + } +} +impl GoogleUserTypeFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_GOOGLE_user_type\0") + .expect("Wrong extension string") + } +} +pub struct GoogleUserTypeFn {} +unsafe impl Send for GoogleUserTypeFn {} +unsafe impl Sync for GoogleUserTypeFn {} +impl ::std::clone::Clone for GoogleUserTypeFn { + fn clone(&self) -> Self { + GoogleUserTypeFn {} + } +} +impl GoogleUserTypeFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + GoogleUserTypeFn {} + } +} +impl KhrPipelineLibraryFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_pipeline_library\0") + .expect("Wrong extension string") + } +} +pub struct KhrPipelineLibraryFn {} +unsafe impl Send for KhrPipelineLibraryFn {} +unsafe impl Sync for KhrPipelineLibraryFn {} +impl ::std::clone::Clone for KhrPipelineLibraryFn { + fn clone(&self) -> Self { + KhrPipelineLibraryFn {} + } +} +impl KhrPipelineLibraryFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrPipelineLibraryFn {} + } +} +#[doc = "Generated from \'VK_KHR_pipeline_library\'"] +impl PipelineCreateFlags { + pub const LIBRARY_KHR: Self = PipelineCreateFlags(0b1000_0000_0000); +} +#[doc = "Generated from \'VK_KHR_pipeline_library\'"] +impl StructureType { + pub const PIPELINE_LIBRARY_CREATE_INFO_KHR: Self = StructureType(1_000_290_000); +} +impl NvExtension292Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_extension_292\0") + .expect("Wrong extension string") + } +} +pub struct NvExtension292Fn {} +unsafe impl Send for NvExtension292Fn {} +unsafe impl Sync for NvExtension292Fn {} +impl ::std::clone::Clone for NvExtension292Fn { + fn clone(&self) -> Self { + NvExtension292Fn {} + } +} +impl NvExtension292Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvExtension292Fn {} + } +} +impl NvExtension293Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_extension_293\0") + .expect("Wrong extension string") + } +} +pub struct NvExtension293Fn {} +unsafe impl Send for NvExtension293Fn {} +unsafe impl Sync for NvExtension293Fn {} +impl ::std::clone::Clone for NvExtension293Fn { + fn clone(&self) -> Self { + NvExtension293Fn {} + } +} +impl NvExtension293Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvExtension293Fn {} + } +} +impl KhrShaderNonSemanticInfoFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_shader_non_semantic_info\0") + .expect("Wrong extension string") + } +} +pub struct KhrShaderNonSemanticInfoFn {} +unsafe impl Send for KhrShaderNonSemanticInfoFn {} +unsafe impl Sync for KhrShaderNonSemanticInfoFn {} +impl ::std::clone::Clone for KhrShaderNonSemanticInfoFn { + fn clone(&self) -> Self { + KhrShaderNonSemanticInfoFn {} + } +} +impl KhrShaderNonSemanticInfoFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrShaderNonSemanticInfoFn {} + } +} +impl KhrExtension295Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_extension_295\0") + .expect("Wrong extension string") + } +} +pub struct KhrExtension295Fn {} +unsafe impl Send for KhrExtension295Fn {} +unsafe impl Sync for KhrExtension295Fn {} +impl ::std::clone::Clone for KhrExtension295Fn { + fn clone(&self) -> Self { + KhrExtension295Fn {} + } +} +impl KhrExtension295Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrExtension295Fn {} + } +} +impl NvExtension296Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_extension_296\0") + .expect("Wrong extension string") + } +} +pub struct NvExtension296Fn {} +unsafe impl Send for NvExtension296Fn {} +unsafe impl Sync for NvExtension296Fn {} +impl ::std::clone::Clone for NvExtension296Fn { + fn clone(&self) -> Self { + NvExtension296Fn {} + } +} +impl NvExtension296Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvExtension296Fn {} + } +} +impl KhrExtension297Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_extension_297\0") + .expect("Wrong extension string") + } +} +pub struct KhrExtension297Fn {} +unsafe impl Send for KhrExtension297Fn {} +unsafe impl Sync for KhrExtension297Fn {} +impl ::std::clone::Clone for KhrExtension297Fn { + fn clone(&self) -> Self { + KhrExtension297Fn {} + } +} +impl KhrExtension297Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrExtension297Fn {} + } +} +#[doc = "Generated from \'VK_KHR_extension_297\'"] +impl PipelineShaderStageCreateFlags { + pub const RESERVED_3_KHR: Self = PipelineShaderStageCreateFlags(0b1000); +} +impl ExtPipelineCreationCacheControlFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_pipeline_creation_cache_control\0") + .expect("Wrong extension string") + } +} +pub struct ExtPipelineCreationCacheControlFn {} +unsafe impl Send for ExtPipelineCreationCacheControlFn {} +unsafe impl Sync for ExtPipelineCreationCacheControlFn {} +impl ::std::clone::Clone for ExtPipelineCreationCacheControlFn { + fn clone(&self) -> Self { + ExtPipelineCreationCacheControlFn {} + } +} +impl ExtPipelineCreationCacheControlFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtPipelineCreationCacheControlFn {} + } +} +#[doc = "Generated from \'VK_EXT_pipeline_creation_cache_control\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT: Self = + StructureType(1_000_297_000); +} +#[doc = "Generated from \'VK_EXT_pipeline_creation_cache_control\'"] +impl PipelineCreateFlags { + pub const FAIL_ON_PIPELINE_COMPILE_REQUIRED_EXT: Self = PipelineCreateFlags(0b1_0000_0000); +} +#[doc = "Generated from \'VK_EXT_pipeline_creation_cache_control\'"] +impl PipelineCreateFlags { + pub const EARLY_RETURN_ON_FAILURE_EXT: Self = PipelineCreateFlags(0b10_0000_0000); +} +#[doc = "Generated from \'VK_EXT_pipeline_creation_cache_control\'"] +impl Result { + pub const ERROR_PIPELINE_COMPILE_REQUIRED_EXT: Self = Result(1_000_297_000); +} +#[doc = "Generated from \'VK_EXT_pipeline_creation_cache_control\'"] +impl PipelineCacheCreateFlags { + pub const EXTERNALLY_SYNCHRONIZED_EXT: Self = PipelineCacheCreateFlags(0b1); +} +impl KhrExtension299Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_extension_299\0") + .expect("Wrong extension string") + } +} +pub struct KhrExtension299Fn {} +unsafe impl Send for KhrExtension299Fn {} +unsafe impl Sync for KhrExtension299Fn {} +impl ::std::clone::Clone for KhrExtension299Fn { + fn clone(&self) -> Self { + KhrExtension299Fn {} + } +} +impl KhrExtension299Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrExtension299Fn {} + } +} +impl KhrExtension300Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_extension_300\0") + .expect("Wrong extension string") + } +} +pub struct KhrExtension300Fn {} +unsafe impl Send for KhrExtension300Fn {} +unsafe impl Sync for KhrExtension300Fn {} +impl ::std::clone::Clone for KhrExtension300Fn { + fn clone(&self) -> Self { + KhrExtension300Fn {} + } +} +impl KhrExtension300Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrExtension300Fn {} + } +} +impl NvDeviceDiagnosticsConfigFn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_device_diagnostics_config\0") + .expect("Wrong extension string") + } +} +pub struct NvDeviceDiagnosticsConfigFn {} +unsafe impl Send for NvDeviceDiagnosticsConfigFn {} +unsafe impl Sync for NvDeviceDiagnosticsConfigFn {} +impl ::std::clone::Clone for NvDeviceDiagnosticsConfigFn { + fn clone(&self) -> Self { + NvDeviceDiagnosticsConfigFn {} + } +} +impl NvDeviceDiagnosticsConfigFn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvDeviceDiagnosticsConfigFn {} + } +} +#[doc = "Generated from \'VK_NV_device_diagnostics_config\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV: Self = StructureType(1_000_300_000); +} +#[doc = "Generated from \'VK_NV_device_diagnostics_config\'"] +impl StructureType { + pub const DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV: Self = StructureType(1_000_300_001); +} +impl QcomExtension302Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_QCOM_extension_302\0") + .expect("Wrong extension string") + } +} +pub struct QcomExtension302Fn {} +unsafe impl Send for QcomExtension302Fn {} +unsafe impl Sync for QcomExtension302Fn {} +impl ::std::clone::Clone for QcomExtension302Fn { + fn clone(&self) -> Self { + QcomExtension302Fn {} + } +} +impl QcomExtension302Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + QcomExtension302Fn {} + } +} +impl QcomExtension303Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_QCOM_extension_303\0") + .expect("Wrong extension string") + } +} +pub struct QcomExtension303Fn {} +unsafe impl Send for QcomExtension303Fn {} +unsafe impl Sync for QcomExtension303Fn {} +impl ::std::clone::Clone for QcomExtension303Fn { + fn clone(&self) -> Self { + QcomExtension303Fn {} + } +} +impl QcomExtension303Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + QcomExtension303Fn {} + } +} +impl QcomExtension304Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_QCOM_extension_304\0") + .expect("Wrong extension string") + } +} +pub struct QcomExtension304Fn {} +unsafe impl Send for QcomExtension304Fn {} +unsafe impl Sync for QcomExtension304Fn {} +impl ::std::clone::Clone for QcomExtension304Fn { + fn clone(&self) -> Self { + QcomExtension304Fn {} + } +} +impl QcomExtension304Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + QcomExtension304Fn {} + } +} +impl QcomExtension305Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_QCOM_extension_305\0") + .expect("Wrong extension string") + } +} +pub struct QcomExtension305Fn {} +unsafe impl Send for QcomExtension305Fn {} +unsafe impl Sync for QcomExtension305Fn {} +impl ::std::clone::Clone for QcomExtension305Fn { + fn clone(&self) -> Self { + QcomExtension305Fn {} + } +} +impl QcomExtension305Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + QcomExtension305Fn {} + } +} +impl QcomExtension306Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_QCOM_extension_306\0") + .expect("Wrong extension string") + } +} +pub struct QcomExtension306Fn {} +unsafe impl Send for QcomExtension306Fn {} +unsafe impl Sync for QcomExtension306Fn {} +impl ::std::clone::Clone for QcomExtension306Fn { + fn clone(&self) -> Self { + QcomExtension306Fn {} + } +} +impl QcomExtension306Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + QcomExtension306Fn {} + } +} +impl QcomExtension307Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_QCOM_extension_307\0") + .expect("Wrong extension string") + } +} +pub struct QcomExtension307Fn {} +unsafe impl Send for QcomExtension307Fn {} +unsafe impl Sync for QcomExtension307Fn {} +impl ::std::clone::Clone for QcomExtension307Fn { + fn clone(&self) -> Self { + QcomExtension307Fn {} + } +} +impl QcomExtension307Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + QcomExtension307Fn {} + } +} +impl NvExtension308Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_extension_308\0") + .expect("Wrong extension string") + } +} +pub struct NvExtension308Fn {} +unsafe impl Send for NvExtension308Fn {} +unsafe impl Sync for NvExtension308Fn {} +impl ::std::clone::Clone for NvExtension308Fn { + fn clone(&self) -> Self { + NvExtension308Fn {} + } +} +impl NvExtension308Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvExtension308Fn {} + } +} +impl KhrExtension309Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_KHR_extension_309\0") + .expect("Wrong extension string") + } +} +pub struct KhrExtension309Fn {} +unsafe impl Send for KhrExtension309Fn {} +unsafe impl Sync for KhrExtension309Fn {} +impl ::std::clone::Clone for KhrExtension309Fn { + fn clone(&self) -> Self { + KhrExtension309Fn {} + } +} +impl KhrExtension309Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + KhrExtension309Fn {} + } +} +#[doc = "Generated from \'VK_KHR_extension_309\'"] +impl MemoryHeapFlags { + pub const RESERVED_2_KHR: Self = MemoryHeapFlags(0b100); +} +impl QcomExtension310Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_QCOM_extension_310\0") + .expect("Wrong extension string") + } +} +pub struct QcomExtension310Fn {} +unsafe impl Send for QcomExtension310Fn {} +unsafe impl Sync for QcomExtension310Fn {} +impl ::std::clone::Clone for QcomExtension310Fn { + fn clone(&self) -> Self { + QcomExtension310Fn {} + } +} +impl QcomExtension310Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + QcomExtension310Fn {} + } +} +#[doc = "Generated from \'VK_QCOM_extension_310\'"] +impl StructureType { + pub const RESERVED_QCOM: Self = StructureType(1_000_309_000); +} +impl NvExtension311Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_NV_extension_311\0") + .expect("Wrong extension string") + } +} +pub struct NvExtension311Fn {} +unsafe impl Send for NvExtension311Fn {} +unsafe impl Sync for NvExtension311Fn {} +impl ::std::clone::Clone for NvExtension311Fn { + fn clone(&self) -> Self { + NvExtension311Fn {} + } +} +impl NvExtension311Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + NvExtension311Fn {} + } +} +impl ExtExtension312Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_extension_312\0") + .expect("Wrong extension string") + } +} +pub struct ExtExtension312Fn {} +unsafe impl Send for ExtExtension312Fn {} +unsafe impl Sync for ExtExtension312Fn {} +impl ::std::clone::Clone for ExtExtension312Fn { + fn clone(&self) -> Self { + ExtExtension312Fn {} + } +} +impl ExtExtension312Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtExtension312Fn {} + } +} +impl ExtExtension313Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_EXT_extension_313\0") + .expect("Wrong extension string") + } +} +pub struct ExtExtension313Fn {} +unsafe impl Send for ExtExtension313Fn {} +unsafe impl Sync for ExtExtension313Fn {} +impl ::std::clone::Clone for ExtExtension313Fn { + fn clone(&self) -> Self { + ExtExtension313Fn {} + } +} +impl ExtExtension313Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + ExtExtension313Fn {} + } +} +impl AmdExtension314Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_314\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension314Fn {} +unsafe impl Send for AmdExtension314Fn {} +unsafe impl Sync for AmdExtension314Fn {} +impl ::std::clone::Clone for AmdExtension314Fn { + fn clone(&self) -> Self { + AmdExtension314Fn {} + } +} +impl AmdExtension314Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension314Fn {} + } +} +impl AmdExtension315Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_315\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension315Fn {} +unsafe impl Send for AmdExtension315Fn {} +unsafe impl Sync for AmdExtension315Fn {} +impl ::std::clone::Clone for AmdExtension315Fn { + fn clone(&self) -> Self { + AmdExtension315Fn {} + } +} +impl AmdExtension315Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension315Fn {} + } +} +impl AmdExtension316Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_316\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension316Fn {} +unsafe impl Send for AmdExtension316Fn {} +unsafe impl Sync for AmdExtension316Fn {} +impl ::std::clone::Clone for AmdExtension316Fn { + fn clone(&self) -> Self { + AmdExtension316Fn {} + } +} +impl AmdExtension316Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension316Fn {} + } +} +impl AmdExtension317Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_317\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension317Fn {} +unsafe impl Send for AmdExtension317Fn {} +unsafe impl Sync for AmdExtension317Fn {} +impl ::std::clone::Clone for AmdExtension317Fn { + fn clone(&self) -> Self { + AmdExtension317Fn {} + } +} +impl AmdExtension317Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension317Fn {} + } +} +impl AmdExtension318Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_318\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension318Fn {} +unsafe impl Send for AmdExtension318Fn {} +unsafe impl Sync for AmdExtension318Fn {} +impl ::std::clone::Clone for AmdExtension318Fn { + fn clone(&self) -> Self { + AmdExtension318Fn {} + } +} +impl AmdExtension318Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension318Fn {} + } +} +impl AmdExtension319Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_319\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension319Fn {} +unsafe impl Send for AmdExtension319Fn {} +unsafe impl Sync for AmdExtension319Fn {} +impl ::std::clone::Clone for AmdExtension319Fn { + fn clone(&self) -> Self { + AmdExtension319Fn {} + } +} +impl AmdExtension319Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension319Fn {} + } +} +impl AmdExtension320Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_320\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension320Fn {} +unsafe impl Send for AmdExtension320Fn {} +unsafe impl Sync for AmdExtension320Fn {} +impl ::std::clone::Clone for AmdExtension320Fn { + fn clone(&self) -> Self { + AmdExtension320Fn {} + } +} +impl AmdExtension320Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension320Fn {} + } +} +impl AmdExtension321Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_321\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension321Fn {} +unsafe impl Send for AmdExtension321Fn {} +unsafe impl Sync for AmdExtension321Fn {} +impl ::std::clone::Clone for AmdExtension321Fn { + fn clone(&self) -> Self { + AmdExtension321Fn {} + } +} +impl AmdExtension321Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension321Fn {} + } +} +impl AmdExtension322Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_322\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension322Fn {} +unsafe impl Send for AmdExtension322Fn {} +unsafe impl Sync for AmdExtension322Fn {} +impl ::std::clone::Clone for AmdExtension322Fn { + fn clone(&self) -> Self { + AmdExtension322Fn {} + } +} +impl AmdExtension322Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension322Fn {} + } +} +impl AmdExtension323Fn { + pub fn name() -> &'static ::std::ffi::CStr { + ::std::ffi::CStr::from_bytes_with_nul(b"VK_AMD_extension_323\0") + .expect("Wrong extension string") + } +} +pub struct AmdExtension323Fn {} +unsafe impl Send for AmdExtension323Fn {} +unsafe impl Sync for AmdExtension323Fn {} +impl ::std::clone::Clone for AmdExtension323Fn { + fn clone(&self) -> Self { + AmdExtension323Fn {} + } +} +impl AmdExtension323Fn { + pub fn load(mut _f: F) -> Self + where + F: FnMut(&::std::ffi::CStr) -> *const c_void, + { + AmdExtension323Fn {} } } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const PHYSICAL_DEVICE_SUBGROUP_PROPERTIES: Self = StructureType(1000094000); + pub const PHYSICAL_DEVICE_SUBGROUP_PROPERTIES: Self = StructureType(1_000_094_000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const BIND_BUFFER_MEMORY_INFO: Self = StructureType(1000157000); + pub const BIND_BUFFER_MEMORY_INFO: Self = StructureType(1_000_157_000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const BIND_IMAGE_MEMORY_INFO: Self = StructureType(1000157001); + pub const BIND_IMAGE_MEMORY_INFO: Self = StructureType(1_000_157_001); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl ImageCreateFlags { - pub const ALIAS: Self = ImageCreateFlags(0b10000000000); + pub const ALIAS: Self = ImageCreateFlags(0b100_0000_0000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: Self = StructureType(1000083000); + pub const PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: Self = StructureType(1_000_083_000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const MEMORY_DEDICATED_REQUIREMENTS: Self = StructureType(1000127000); + pub const MEMORY_DEDICATED_REQUIREMENTS: Self = StructureType(1_000_127_000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const MEMORY_DEDICATED_ALLOCATE_INFO: Self = StructureType(1000127001); + pub const MEMORY_DEDICATED_ALLOCATE_INFO: Self = StructureType(1_000_127_001); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const MEMORY_ALLOCATE_FLAGS_INFO: Self = StructureType(1000060000); + pub const MEMORY_ALLOCATE_FLAGS_INFO: Self = StructureType(1_000_060_000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const DEVICE_GROUP_RENDER_PASS_BEGIN_INFO: Self = StructureType(1000060003); + pub const DEVICE_GROUP_RENDER_PASS_BEGIN_INFO: Self = StructureType(1_000_060_003); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO: Self = StructureType(1000060004); + pub const DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO: Self = StructureType(1_000_060_004); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const DEVICE_GROUP_SUBMIT_INFO: Self = StructureType(1000060005); + pub const DEVICE_GROUP_SUBMIT_INFO: Self = StructureType(1_000_060_005); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const DEVICE_GROUP_BIND_SPARSE_INFO: Self = StructureType(1000060006); + pub const DEVICE_GROUP_BIND_SPARSE_INFO: Self = StructureType(1_000_060_006); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl PipelineCreateFlags { @@ -58887,7 +77734,7 @@ impl PipelineCreateFlags { } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl PipelineCreateFlags { - pub const DISPATCH_BASE: Self = PipelineCreateFlags(0b10000); + pub const DISPATCH_BASE: Self = PipelineCreateFlags(0b1_0000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl DependencyFlags { @@ -58895,23 +77742,23 @@ impl DependencyFlags { } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO: Self = StructureType(1000060013); + pub const BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO: Self = StructureType(1_000_060_013); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO: Self = StructureType(1000060014); + pub const BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO: Self = StructureType(1_000_060_014); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl ImageCreateFlags { - pub const SPLIT_INSTANCE_BIND_REGIONS: Self = ImageCreateFlags(0b1000000); + pub const SPLIT_INSTANCE_BIND_REGIONS: Self = ImageCreateFlags(0b100_0000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const PHYSICAL_DEVICE_GROUP_PROPERTIES: Self = StructureType(1000070000); + pub const PHYSICAL_DEVICE_GROUP_PROPERTIES: Self = StructureType(1_000_070_000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const DEVICE_GROUP_DEVICE_CREATE_INFO: Self = StructureType(1000070001); + pub const DEVICE_GROUP_DEVICE_CREATE_INFO: Self = StructureType(1_000_070_001); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl MemoryHeapFlags { @@ -58919,120 +77766,120 @@ impl MemoryHeapFlags { } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const BUFFER_MEMORY_REQUIREMENTS_INFO_2: Self = StructureType(1000146000); + pub const BUFFER_MEMORY_REQUIREMENTS_INFO_2: Self = StructureType(1_000_146_000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const IMAGE_MEMORY_REQUIREMENTS_INFO_2: Self = StructureType(1000146001); + pub const IMAGE_MEMORY_REQUIREMENTS_INFO_2: Self = StructureType(1_000_146_001); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2: Self = StructureType(1000146002); + pub const IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2: Self = StructureType(1_000_146_002); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const MEMORY_REQUIREMENTS_2: Self = StructureType(1000146003); + pub const MEMORY_REQUIREMENTS_2: Self = StructureType(1_000_146_003); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const SPARSE_IMAGE_MEMORY_REQUIREMENTS_2: Self = StructureType(1000146004); + pub const SPARSE_IMAGE_MEMORY_REQUIREMENTS_2: Self = StructureType(1_000_146_004); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const PHYSICAL_DEVICE_FEATURES_2: Self = StructureType(1000059000); + pub const PHYSICAL_DEVICE_FEATURES_2: Self = StructureType(1_000_059_000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const PHYSICAL_DEVICE_PROPERTIES_2: Self = StructureType(1000059001); + pub const PHYSICAL_DEVICE_PROPERTIES_2: Self = StructureType(1_000_059_001); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const FORMAT_PROPERTIES_2: Self = StructureType(1000059002); + pub const FORMAT_PROPERTIES_2: Self = StructureType(1_000_059_002); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const IMAGE_FORMAT_PROPERTIES_2: Self = StructureType(1000059003); + pub const IMAGE_FORMAT_PROPERTIES_2: Self = StructureType(1_000_059_003); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2: Self = StructureType(1000059004); + pub const PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2: Self = StructureType(1_000_059_004); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const QUEUE_FAMILY_PROPERTIES_2: Self = StructureType(1000059005); + pub const QUEUE_FAMILY_PROPERTIES_2: Self = StructureType(1_000_059_005); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const PHYSICAL_DEVICE_MEMORY_PROPERTIES_2: Self = StructureType(1000059006); + pub const PHYSICAL_DEVICE_MEMORY_PROPERTIES_2: Self = StructureType(1_000_059_006); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const SPARSE_IMAGE_FORMAT_PROPERTIES_2: Self = StructureType(1000059007); + pub const SPARSE_IMAGE_FORMAT_PROPERTIES_2: Self = StructureType(1_000_059_007); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2: Self = StructureType(1000059008); + pub const PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2: Self = StructureType(1_000_059_008); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Result { - pub const ERROR_OUT_OF_POOL_MEMORY: Self = Result(-1000069000); + pub const ERROR_OUT_OF_POOL_MEMORY: Self = Result(-1_000_069_000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl FormatFeatureFlags { - pub const TRANSFER_SRC: Self = FormatFeatureFlags(0b100000000000000); + pub const TRANSFER_SRC: Self = FormatFeatureFlags(0b100_0000_0000_0000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl FormatFeatureFlags { - pub const TRANSFER_DST: Self = FormatFeatureFlags(0b1000000000000000); + pub const TRANSFER_DST: Self = FormatFeatureFlags(0b1000_0000_0000_0000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl ImageCreateFlags { - pub const TYPE_2D_ARRAY_COMPATIBLE: Self = ImageCreateFlags(0b100000); + pub const TYPE_2D_ARRAY_COMPATIBLE: Self = ImageCreateFlags(0b10_0000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl ImageCreateFlags { - pub const BLOCK_TEXEL_VIEW_COMPATIBLE: Self = ImageCreateFlags(0b10000000); + pub const BLOCK_TEXEL_VIEW_COMPATIBLE: Self = ImageCreateFlags(0b1000_0000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl ImageCreateFlags { - pub const EXTENDED_USAGE: Self = ImageCreateFlags(0b100000000); + pub const EXTENDED_USAGE: Self = ImageCreateFlags(0b1_0000_0000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: Self = StructureType(1000117000); + pub const PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: Self = StructureType(1_000_117_000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO: Self = StructureType(1000117001); + pub const RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO: Self = StructureType(1_000_117_001); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const IMAGE_VIEW_USAGE_CREATE_INFO: Self = StructureType(1000117002); + pub const IMAGE_VIEW_USAGE_CREATE_INFO: Self = StructureType(1_000_117_002); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { pub const PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO: Self = - StructureType(1000117003); + StructureType(1_000_117_003); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl ImageLayout { - pub const DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL: Self = ImageLayout(1000117000); + pub const DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL: Self = ImageLayout(1_000_117_000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl ImageLayout { - pub const DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL: Self = ImageLayout(1000117001); + pub const DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL: Self = ImageLayout(1_000_117_001); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const RENDER_PASS_MULTIVIEW_CREATE_INFO: Self = StructureType(1000053000); + pub const RENDER_PASS_MULTIVIEW_CREATE_INFO: Self = StructureType(1_000_053_000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const PHYSICAL_DEVICE_MULTIVIEW_FEATURES: Self = StructureType(1000053001); + pub const PHYSICAL_DEVICE_MULTIVIEW_FEATURES: Self = StructureType(1_000_053_001); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: Self = StructureType(1000053002); + pub const PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: Self = StructureType(1_000_053_002); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl DependencyFlags { @@ -59040,27 +77887,32 @@ impl DependencyFlags { } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES: Self = StructureType(1000120000); + pub const PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: Self = StructureType(1_000_120_000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const PROTECTED_SUBMIT_INFO: Self = StructureType(1000145000); + pub const PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES: Self = + StructureType::PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES; } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: Self = StructureType(1000145001); + pub const PROTECTED_SUBMIT_INFO: Self = StructureType(1_000_145_000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES: Self = StructureType(1000145002); + pub const PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: Self = StructureType(1_000_145_001); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const DEVICE_QUEUE_INFO_2: Self = StructureType(1000145003); + pub const PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES: Self = StructureType(1_000_145_002); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const DEVICE_QUEUE_INFO_2: Self = StructureType(1_000_145_003); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl QueueFlags { - pub const PROTECTED: Self = QueueFlags(0b10000); + pub const PROTECTED: Self = QueueFlags(0b1_0000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl DeviceQueueCreateFlags { @@ -59068,7 +77920,7 @@ impl DeviceQueueCreateFlags { } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl MemoryPropertyFlags { - pub const PROTECTED: Self = MemoryPropertyFlags(0b100000); + pub const PROTECTED: Self = MemoryPropertyFlags(0b10_0000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl BufferCreateFlags { @@ -59076,7 +77928,7 @@ impl BufferCreateFlags { } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl ImageCreateFlags { - pub const PROTECTED: Self = ImageCreateFlags(0b100000000000); + pub const PROTECTED: Self = ImageCreateFlags(0b1000_0000_0000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl CommandPoolCreateFlags { @@ -59084,295 +77936,562 @@ impl CommandPoolCreateFlags { } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const SAMPLER_YCBCR_CONVERSION_CREATE_INFO: Self = StructureType(1000156000); + pub const SAMPLER_YCBCR_CONVERSION_CREATE_INFO: Self = StructureType(1_000_156_000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const SAMPLER_YCBCR_CONVERSION_INFO: Self = StructureType(1000156001); + pub const SAMPLER_YCBCR_CONVERSION_INFO: Self = StructureType(1_000_156_001); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const BIND_IMAGE_PLANE_MEMORY_INFO: Self = StructureType(1000156002); + pub const BIND_IMAGE_PLANE_MEMORY_INFO: Self = StructureType(1_000_156_002); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO: Self = StructureType(1000156003); + pub const IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO: Self = StructureType(1_000_156_003); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: Self = StructureType(1000156004); + pub const PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: Self = + StructureType(1_000_156_004); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES: Self = StructureType(1000156005); + pub const SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES: Self = StructureType(1_000_156_005); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl ObjectType { - pub const SAMPLER_YCBCR_CONVERSION: Self = ObjectType(1000156000); + pub const SAMPLER_YCBCR_CONVERSION: Self = ObjectType(1_000_156_000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const G8B8G8R8_422_UNORM: Self = Format(1000156000); + pub const G8B8G8R8_422_UNORM: Self = Format(1_000_156_000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const B8G8R8G8_422_UNORM: Self = Format(1000156001); + pub const B8G8R8G8_422_UNORM: Self = Format(1_000_156_001); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const G8_B8_R8_3PLANE_420_UNORM: Self = Format(1000156002); + pub const G8_B8_R8_3PLANE_420_UNORM: Self = Format(1_000_156_002); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const G8_B8R8_2PLANE_420_UNORM: Self = Format(1000156003); + pub const G8_B8R8_2PLANE_420_UNORM: Self = Format(1_000_156_003); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const G8_B8_R8_3PLANE_422_UNORM: Self = Format(1000156004); + pub const G8_B8_R8_3PLANE_422_UNORM: Self = Format(1_000_156_004); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const G8_B8R8_2PLANE_422_UNORM: Self = Format(1000156005); + pub const G8_B8R8_2PLANE_422_UNORM: Self = Format(1_000_156_005); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const G8_B8_R8_3PLANE_444_UNORM: Self = Format(1000156006); + pub const G8_B8_R8_3PLANE_444_UNORM: Self = Format(1_000_156_006); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const R10X6_UNORM_PACK16: Self = Format(1000156007); + pub const R10X6_UNORM_PACK16: Self = Format(1_000_156_007); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const R10X6G10X6_UNORM_2PACK16: Self = Format(1000156008); + pub const R10X6G10X6_UNORM_2PACK16: Self = Format(1_000_156_008); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const R10X6G10X6B10X6A10X6_UNORM_4PACK16: Self = Format(1000156009); + pub const R10X6G10X6B10X6A10X6_UNORM_4PACK16: Self = Format(1_000_156_009); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const G10X6B10X6G10X6R10X6_422_UNORM_4PACK16: Self = Format(1000156010); + pub const G10X6B10X6G10X6R10X6_422_UNORM_4PACK16: Self = Format(1_000_156_010); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const B10X6G10X6R10X6G10X6_422_UNORM_4PACK16: Self = Format(1000156011); + pub const B10X6G10X6R10X6G10X6_422_UNORM_4PACK16: Self = Format(1_000_156_011); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16: Self = Format(1000156012); + pub const G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16: Self = Format(1_000_156_012); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16: Self = Format(1000156013); + pub const G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16: Self = Format(1_000_156_013); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16: Self = Format(1000156014); + pub const G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16: Self = Format(1_000_156_014); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16: Self = Format(1000156015); + pub const G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16: Self = Format(1_000_156_015); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16: Self = Format(1000156016); + pub const G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16: Self = Format(1_000_156_016); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const R12X4_UNORM_PACK16: Self = Format(1000156017); + pub const R12X4_UNORM_PACK16: Self = Format(1_000_156_017); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const R12X4G12X4_UNORM_2PACK16: Self = Format(1000156018); + pub const R12X4G12X4_UNORM_2PACK16: Self = Format(1_000_156_018); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const R12X4G12X4B12X4A12X4_UNORM_4PACK16: Self = Format(1000156019); + pub const R12X4G12X4B12X4A12X4_UNORM_4PACK16: Self = Format(1_000_156_019); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const G12X4B12X4G12X4R12X4_422_UNORM_4PACK16: Self = Format(1000156020); + pub const G12X4B12X4G12X4R12X4_422_UNORM_4PACK16: Self = Format(1_000_156_020); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const B12X4G12X4R12X4G12X4_422_UNORM_4PACK16: Self = Format(1000156021); + pub const B12X4G12X4R12X4G12X4_422_UNORM_4PACK16: Self = Format(1_000_156_021); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16: Self = Format(1000156022); + pub const G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16: Self = Format(1_000_156_022); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16: Self = Format(1000156023); + pub const G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16: Self = Format(1_000_156_023); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16: Self = Format(1000156024); + pub const G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16: Self = Format(1_000_156_024); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16: Self = Format(1000156025); + pub const G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16: Self = Format(1_000_156_025); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16: Self = Format(1000156026); + pub const G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16: Self = Format(1_000_156_026); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const G16B16G16R16_422_UNORM: Self = Format(1000156027); + pub const G16B16G16R16_422_UNORM: Self = Format(1_000_156_027); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const B16G16R16G16_422_UNORM: Self = Format(1000156028); + pub const B16G16R16G16_422_UNORM: Self = Format(1_000_156_028); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const G16_B16_R16_3PLANE_420_UNORM: Self = Format(1000156029); + pub const G16_B16_R16_3PLANE_420_UNORM: Self = Format(1_000_156_029); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const G16_B16R16_2PLANE_420_UNORM: Self = Format(1000156030); + pub const G16_B16R16_2PLANE_420_UNORM: Self = Format(1_000_156_030); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const G16_B16_R16_3PLANE_422_UNORM: Self = Format(1000156031); + pub const G16_B16_R16_3PLANE_422_UNORM: Self = Format(1_000_156_031); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const G16_B16R16_2PLANE_422_UNORM: Self = Format(1000156032); + pub const G16_B16R16_2PLANE_422_UNORM: Self = Format(1_000_156_032); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Format { - pub const G16_B16_R16_3PLANE_444_UNORM: Self = Format(1000156033); + pub const G16_B16_R16_3PLANE_444_UNORM: Self = Format(1_000_156_033); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl ImageAspectFlags { - pub const PLANE_0: Self = ImageAspectFlags(0b10000); + pub const PLANE_0: Self = ImageAspectFlags(0b1_0000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl ImageAspectFlags { - pub const PLANE_1: Self = ImageAspectFlags(0b100000); + pub const PLANE_1: Self = ImageAspectFlags(0b10_0000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl ImageAspectFlags { - pub const PLANE_2: Self = ImageAspectFlags(0b1000000); + pub const PLANE_2: Self = ImageAspectFlags(0b100_0000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl ImageCreateFlags { - pub const DISJOINT: Self = ImageCreateFlags(0b1000000000); + pub const DISJOINT: Self = ImageCreateFlags(0b10_0000_0000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl FormatFeatureFlags { - pub const MIDPOINT_CHROMA_SAMPLES: Self = FormatFeatureFlags(0b100000000000000000); + pub const MIDPOINT_CHROMA_SAMPLES: Self = FormatFeatureFlags(0b10_0000_0000_0000_0000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl FormatFeatureFlags { pub const SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER: Self = - FormatFeatureFlags(0b1000000000000000000); + FormatFeatureFlags(0b100_0000_0000_0000_0000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl FormatFeatureFlags { pub const SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER: Self = - FormatFeatureFlags(0b10000000000000000000); + FormatFeatureFlags(0b1000_0000_0000_0000_0000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl FormatFeatureFlags { pub const SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT: Self = - FormatFeatureFlags(0b100000000000000000000); + FormatFeatureFlags(0b1_0000_0000_0000_0000_0000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl FormatFeatureFlags { pub const SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE: Self = - FormatFeatureFlags(0b1000000000000000000000); + FormatFeatureFlags(0b10_0000_0000_0000_0000_0000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl FormatFeatureFlags { - pub const DISJOINT: Self = FormatFeatureFlags(0b10000000000000000000000); + pub const DISJOINT: Self = FormatFeatureFlags(0b100_0000_0000_0000_0000_0000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl FormatFeatureFlags { - pub const COSITED_CHROMA_SAMPLES: Self = FormatFeatureFlags(0b100000000000000000000000); + pub const COSITED_CHROMA_SAMPLES: Self = FormatFeatureFlags(0b1000_0000_0000_0000_0000_0000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO: Self = StructureType(1000085000); + pub const DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO: Self = StructureType(1_000_085_000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl ObjectType { - pub const DESCRIPTOR_UPDATE_TEMPLATE: Self = ObjectType(1000085000); + pub const DESCRIPTOR_UPDATE_TEMPLATE: Self = ObjectType(1_000_085_000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO: Self = StructureType(1000071000); + pub const PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO: Self = StructureType(1_000_071_000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const EXTERNAL_IMAGE_FORMAT_PROPERTIES: Self = StructureType(1000071001); + pub const EXTERNAL_IMAGE_FORMAT_PROPERTIES: Self = StructureType(1_000_071_001); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO: Self = StructureType(1000071002); + pub const PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO: Self = StructureType(1_000_071_002); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const EXTERNAL_BUFFER_PROPERTIES: Self = StructureType(1000071003); + pub const EXTERNAL_BUFFER_PROPERTIES: Self = StructureType(1_000_071_003); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const PHYSICAL_DEVICE_ID_PROPERTIES: Self = StructureType(1000071004); + pub const PHYSICAL_DEVICE_ID_PROPERTIES: Self = StructureType(1_000_071_004); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const EXTERNAL_MEMORY_BUFFER_CREATE_INFO: Self = StructureType(1000072000); + pub const EXTERNAL_MEMORY_BUFFER_CREATE_INFO: Self = StructureType(1_000_072_000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const EXTERNAL_MEMORY_IMAGE_CREATE_INFO: Self = StructureType(1000072001); + pub const EXTERNAL_MEMORY_IMAGE_CREATE_INFO: Self = StructureType(1_000_072_001); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const EXPORT_MEMORY_ALLOCATE_INFO: Self = StructureType(1000072002); + pub const EXPORT_MEMORY_ALLOCATE_INFO: Self = StructureType(1_000_072_002); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl Result { - pub const ERROR_INVALID_EXTERNAL_HANDLE: Self = Result(-1000072003); + pub const ERROR_INVALID_EXTERNAL_HANDLE: Self = Result(-1_000_072_003); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO: Self = StructureType(1000112000); + pub const PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO: Self = StructureType(1_000_112_000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const EXTERNAL_FENCE_PROPERTIES: Self = StructureType(1000112001); + pub const EXTERNAL_FENCE_PROPERTIES: Self = StructureType(1_000_112_001); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const EXPORT_FENCE_CREATE_INFO: Self = StructureType(1000113000); + pub const EXPORT_FENCE_CREATE_INFO: Self = StructureType(1_000_113_000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const EXPORT_SEMAPHORE_CREATE_INFO: Self = StructureType(1000077000); + pub const EXPORT_SEMAPHORE_CREATE_INFO: Self = StructureType(1_000_077_000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO: Self = StructureType(1000076000); + pub const PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO: Self = StructureType(1_000_076_000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const EXTERNAL_SEMAPHORE_PROPERTIES: Self = StructureType(1000076001); + pub const EXTERNAL_SEMAPHORE_PROPERTIES: Self = StructureType(1_000_076_001); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: Self = StructureType(1000168000); + pub const PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: Self = StructureType(1_000_168_000); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const DESCRIPTOR_SET_LAYOUT_SUPPORT: Self = StructureType(1000168001); + pub const DESCRIPTOR_SET_LAYOUT_SUPPORT: Self = StructureType(1_000_168_001); } #[doc = "Generated from \'VK_VERSION_1_1\'"] impl StructureType { - pub const PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: Self = StructureType(1000063000); + pub const PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES: Self = StructureType(1_000_063_000); +} +#[doc = "Generated from \'VK_VERSION_1_1\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: Self = + StructureType::PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES; +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_VULKAN_1_1_FEATURES: Self = StructureType(49); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES: Self = StructureType(50); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_VULKAN_1_2_FEATURES: Self = StructureType(51); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES: Self = StructureType(52); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const IMAGE_FORMAT_LIST_CREATE_INFO: Self = StructureType(1_000_147_000); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const ATTACHMENT_DESCRIPTION_2: Self = StructureType(1_000_109_000); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const ATTACHMENT_REFERENCE_2: Self = StructureType(1_000_109_001); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const SUBPASS_DESCRIPTION_2: Self = StructureType(1_000_109_002); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const SUBPASS_DEPENDENCY_2: Self = StructureType(1_000_109_003); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const RENDER_PASS_CREATE_INFO_2: Self = StructureType(1_000_109_004); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const SUBPASS_BEGIN_INFO: Self = StructureType(1_000_109_005); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const SUBPASS_END_INFO: Self = StructureType(1_000_109_006); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES: Self = StructureType(1_000_177_000); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_DRIVER_PROPERTIES: Self = StructureType(1_000_196_000); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES: Self = StructureType(1_000_180_000); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES: Self = StructureType(1_000_082_000); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES: Self = StructureType(1_000_197_000); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO: Self = StructureType(1_000_161_000); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES: Self = StructureType(1_000_161_001); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES: Self = StructureType(1_000_161_002); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO: Self = + StructureType(1_000_161_003); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT: Self = + StructureType(1_000_161_004); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl DescriptorPoolCreateFlags { + pub const UPDATE_AFTER_BIND: Self = DescriptorPoolCreateFlags(0b10); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl DescriptorSetLayoutCreateFlags { + pub const UPDATE_AFTER_BIND_POOL: Self = DescriptorSetLayoutCreateFlags(0b10); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl Result { + pub const ERROR_FRAGMENTATION: Self = Result(-1_000_161_000); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES: Self = StructureType(1_000_199_000); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE: Self = StructureType(1_000_199_001); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES: Self = StructureType(1_000_221_000); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const IMAGE_STENCIL_USAGE_CREATE_INFO: Self = StructureType(1_000_246_000); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES: Self = StructureType(1_000_130_000); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const SAMPLER_REDUCTION_MODE_CREATE_INFO: Self = StructureType(1_000_130_001); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl FormatFeatureFlags { + pub const SAMPLED_IMAGE_FILTER_MINMAX: Self = FormatFeatureFlags(0b1_0000_0000_0000_0000); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES: Self = StructureType(1_000_211_000); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES: Self = StructureType(1_000_108_000); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const FRAMEBUFFER_ATTACHMENTS_CREATE_INFO: Self = StructureType(1_000_108_001); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const FRAMEBUFFER_ATTACHMENT_IMAGE_INFO: Self = StructureType(1_000_108_002); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const RENDER_PASS_ATTACHMENT_BEGIN_INFO: Self = StructureType(1_000_108_003); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl FramebufferCreateFlags { + pub const IMAGELESS: Self = FramebufferCreateFlags(0b1); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES: Self = + StructureType(1_000_253_000); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES: Self = + StructureType(1_000_175_000); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES: Self = + StructureType(1_000_241_000); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const ATTACHMENT_REFERENCE_STENCIL_LAYOUT: Self = StructureType(1_000_241_001); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT: Self = StructureType(1_000_241_002); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl ImageLayout { + pub const DEPTH_ATTACHMENT_OPTIMAL: Self = ImageLayout(1_000_241_000); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl ImageLayout { + pub const DEPTH_READ_ONLY_OPTIMAL: Self = ImageLayout(1_000_241_001); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl ImageLayout { + pub const STENCIL_ATTACHMENT_OPTIMAL: Self = ImageLayout(1_000_241_002); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl ImageLayout { + pub const STENCIL_READ_ONLY_OPTIMAL: Self = ImageLayout(1_000_241_003); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES: Self = StructureType(1_000_261_000); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES: Self = StructureType(1_000_207_000); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES: Self = StructureType(1_000_207_001); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const SEMAPHORE_TYPE_CREATE_INFO: Self = StructureType(1_000_207_002); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const TIMELINE_SEMAPHORE_SUBMIT_INFO: Self = StructureType(1_000_207_003); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const SEMAPHORE_WAIT_INFO: Self = StructureType(1_000_207_004); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const SEMAPHORE_SIGNAL_INFO: Self = StructureType(1_000_207_005); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES: Self = StructureType(1_000_257_000); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const BUFFER_DEVICE_ADDRESS_INFO: Self = StructureType(1_000_244_001); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO: Self = StructureType(1_000_257_002); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO: Self = StructureType(1_000_257_003); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl StructureType { + pub const DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO: Self = StructureType(1_000_257_004); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl BufferUsageFlags { + pub const SHADER_DEVICE_ADDRESS: Self = BufferUsageFlags(0b10_0000_0000_0000_0000); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl BufferCreateFlags { + pub const DEVICE_ADDRESS_CAPTURE_REPLAY: Self = BufferCreateFlags(0b1_0000); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl MemoryAllocateFlags { + pub const DEVICE_ADDRESS: Self = MemoryAllocateFlags(0b10); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl MemoryAllocateFlags { + pub const DEVICE_ADDRESS_CAPTURE_REPLAY: Self = MemoryAllocateFlags(0b100); +} +#[doc = "Generated from \'VK_VERSION_1_2\'"] +impl Result { + pub const ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS: Self = Result(-1_000_257_000); } pub(crate) fn debug_flags( f: &mut fmt::Formatter, @@ -59399,7 +78518,22 @@ pub(crate) fn debug_flags( } Ok(()) } -impl fmt::Debug for AccelerationStructureMemoryRequirementsTypeNV { +impl fmt::Debug for AccelerationStructureBuildTypeKHR { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::HOST => Some("HOST"), + Self::DEVICE => Some("DEVICE"), + Self::HOST_OR_DEVICE => Some("HOST_OR_DEVICE"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for AccelerationStructureMemoryRequirementsTypeKHR { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let name = match *self { Self::OBJECT => Some("OBJECT"), @@ -59414,7 +78548,7 @@ impl fmt::Debug for AccelerationStructureMemoryRequirementsTypeNV { } } } -impl fmt::Debug for AccelerationStructureTypeNV { +impl fmt::Debug for AccelerationStructureTypeKHR { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let name = match *self { Self::TOP_LEVEL => Some("TOP_LEVEL"), @@ -59489,38 +78623,44 @@ impl fmt::Debug for AccessFlags { AccessFlags::CONDITIONAL_RENDERING_READ_EXT.0, "CONDITIONAL_RENDERING_READ_EXT", ), - ( - AccessFlags::COMMAND_PROCESS_READ_NVX.0, - "COMMAND_PROCESS_READ_NVX", - ), - ( - AccessFlags::COMMAND_PROCESS_WRITE_NVX.0, - "COMMAND_PROCESS_WRITE_NVX", - ), ( AccessFlags::COLOR_ATTACHMENT_READ_NONCOHERENT_EXT.0, "COLOR_ATTACHMENT_READ_NONCOHERENT_EXT", ), + ( + AccessFlags::ACCELERATION_STRUCTURE_READ_KHR.0, + "ACCELERATION_STRUCTURE_READ_KHR", + ), + ( + AccessFlags::ACCELERATION_STRUCTURE_WRITE_KHR.0, + "ACCELERATION_STRUCTURE_WRITE_KHR", + ), ( AccessFlags::SHADING_RATE_IMAGE_READ_NV.0, "SHADING_RATE_IMAGE_READ_NV", ), - ( - AccessFlags::ACCELERATION_STRUCTURE_READ_NV.0, - "ACCELERATION_STRUCTURE_READ_NV", - ), - ( - AccessFlags::ACCELERATION_STRUCTURE_WRITE_NV.0, - "ACCELERATION_STRUCTURE_WRITE_NV", - ), ( AccessFlags::FRAGMENT_DENSITY_MAP_READ_EXT.0, "FRAGMENT_DENSITY_MAP_READ_EXT", ), + ( + AccessFlags::COMMAND_PREPROCESS_READ_NV.0, + "COMMAND_PREPROCESS_READ_NV", + ), + ( + AccessFlags::COMMAND_PREPROCESS_WRITE_NV.0, + "COMMAND_PREPROCESS_WRITE_NV", + ), ]; debug_flags(f, KNOWN, self.0) } } +impl fmt::Debug for AcquireProfilingLockFlagsKHR { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} impl fmt::Debug for AndroidSurfaceCreateFlagsKHR { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { const KNOWN: &[(Flags, &str)] = &[]; @@ -59695,11 +78835,11 @@ impl fmt::Debug for BufferCreateFlags { (BufferCreateFlags::SPARSE_BINDING.0, "SPARSE_BINDING"), (BufferCreateFlags::SPARSE_RESIDENCY.0, "SPARSE_RESIDENCY"), (BufferCreateFlags::SPARSE_ALIASED.0, "SPARSE_ALIASED"), - ( - BufferCreateFlags::DEVICE_ADDRESS_CAPTURE_REPLAY_EXT.0, - "DEVICE_ADDRESS_CAPTURE_REPLAY_EXT", - ), (BufferCreateFlags::PROTECTED.0, "PROTECTED"), + ( + BufferCreateFlags::DEVICE_ADDRESS_CAPTURE_REPLAY.0, + "DEVICE_ADDRESS_CAPTURE_REPLAY", + ), ]; debug_flags(f, KNOWN, self.0) } @@ -59738,10 +78878,11 @@ impl fmt::Debug for BufferUsageFlags { BufferUsageFlags::CONDITIONAL_RENDERING_EXT.0, "CONDITIONAL_RENDERING_EXT", ), - (BufferUsageFlags::RAY_TRACING_NV.0, "RAY_TRACING_NV"), + (BufferUsageFlags::RAY_TRACING_KHR.0, "RAY_TRACING_KHR"), + (BufferUsageFlags::RESERVED_18_QCOM.0, "RESERVED_18_QCOM"), ( - BufferUsageFlags::SHADER_DEVICE_ADDRESS_EXT.0, - "SHADER_DEVICE_ADDRESS_EXT", + BufferUsageFlags::SHADER_DEVICE_ADDRESS.0, + "SHADER_DEVICE_ADDRESS", ), ]; debug_flags(f, KNOWN, self.0) @@ -59753,27 +78894,27 @@ impl fmt::Debug for BufferViewCreateFlags { debug_flags(f, KNOWN, self.0) } } -impl fmt::Debug for BuildAccelerationStructureFlagsNV { +impl fmt::Debug for BuildAccelerationStructureFlagsKHR { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { const KNOWN: &[(Flags, &str)] = &[ ( - BuildAccelerationStructureFlagsNV::ALLOW_UPDATE.0, + BuildAccelerationStructureFlagsKHR::ALLOW_UPDATE.0, "ALLOW_UPDATE", ), ( - BuildAccelerationStructureFlagsNV::ALLOW_COMPACTION.0, + BuildAccelerationStructureFlagsKHR::ALLOW_COMPACTION.0, "ALLOW_COMPACTION", ), ( - BuildAccelerationStructureFlagsNV::PREFER_FAST_TRACE.0, + BuildAccelerationStructureFlagsKHR::PREFER_FAST_TRACE.0, "PREFER_FAST_TRACE", ), ( - BuildAccelerationStructureFlagsNV::PREFER_FAST_BUILD.0, + BuildAccelerationStructureFlagsKHR::PREFER_FAST_BUILD.0, "PREFER_FAST_BUILD", ), ( - BuildAccelerationStructureFlagsNV::LOW_MEMORY.0, + BuildAccelerationStructureFlagsKHR::LOW_MEMORY.0, "LOW_MEMORY", ), ]; @@ -59827,7 +78968,7 @@ impl fmt::Debug for ColorSpaceKHR { Self::SRGB_NONLINEAR => Some("SRGB_NONLINEAR"), Self::DISPLAY_P3_NONLINEAR_EXT => Some("DISPLAY_P3_NONLINEAR_EXT"), Self::EXTENDED_SRGB_LINEAR_EXT => Some("EXTENDED_SRGB_LINEAR_EXT"), - Self::DCI_P3_LINEAR_EXT => Some("DCI_P3_LINEAR_EXT"), + Self::DISPLAY_P3_LINEAR_EXT => Some("DISPLAY_P3_LINEAR_EXT"), Self::DCI_P3_NONLINEAR_EXT => Some("DCI_P3_NONLINEAR_EXT"), Self::BT709_LINEAR_EXT => Some("BT709_LINEAR_EXT"), Self::BT709_NONLINEAR_EXT => Some("BT709_NONLINEAR_EXT"), @@ -59839,6 +78980,7 @@ impl fmt::Debug for ColorSpaceKHR { Self::ADOBERGB_NONLINEAR_EXT => Some("ADOBERGB_NONLINEAR_EXT"), Self::PASS_THROUGH_EXT => Some("PASS_THROUGH_EXT"), Self::EXTENDED_SRGB_NONLINEAR_EXT => Some("EXTENDED_SRGB_NONLINEAR_EXT"), + Self::DISPLAY_NATIVE_AMD => Some("DISPLAY_NATIVE_AMD"), _ => None, }; if let Some(x) = name { @@ -59957,6 +79099,29 @@ impl fmt::Debug for ComponentSwizzle { } } } +impl fmt::Debug for ComponentTypeNV { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::FLOAT16 => Some("FLOAT16"), + Self::FLOAT32 => Some("FLOAT32"), + Self::FLOAT64 => Some("FLOAT64"), + Self::SINT8 => Some("SINT8"), + Self::SINT16 => Some("SINT16"), + Self::SINT32 => Some("SINT32"), + Self::SINT64 => Some("SINT64"), + Self::UINT8 => Some("UINT8"), + Self::UINT16 => Some("UINT16"), + Self::UINT32 => Some("UINT32"), + Self::UINT64 => Some("UINT64"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} impl fmt::Debug for CompositeAlphaFlagsKHR { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { const KNOWN: &[(Flags, &str)] = &[ @@ -59989,11 +79154,13 @@ impl fmt::Debug for ConservativeRasterizationModeEXT { } } } -impl fmt::Debug for CopyAccelerationStructureModeNV { +impl fmt::Debug for CopyAccelerationStructureModeKHR { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let name = match *self { Self::CLONE => Some("CLONE"), Self::COMPACT => Some("COMPACT"), + Self::SERIALIZE => Some("SERIALIZE"), + Self::DESERIALIZE => Some("DESERIALIZE"), _ => None, }; if let Some(x) = name { @@ -60019,6 +79186,20 @@ impl fmt::Debug for CoverageModulationModeNV { } } } +impl fmt::Debug for CoverageReductionModeNV { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::MERGE => Some("MERGE"), + Self::TRUNCATE => Some("TRUNCATE"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} impl fmt::Debug for CullModeFlags { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { const KNOWN: &[(Flags, &str)] = &[ @@ -60079,12 +79260,10 @@ impl fmt::Debug for DebugReportObjectTypeEXT { Self::DEBUG_REPORT_CALLBACK => Some("DEBUG_REPORT_CALLBACK"), Self::DISPLAY_KHR => Some("DISPLAY_KHR"), Self::DISPLAY_MODE_KHR => Some("DISPLAY_MODE_KHR"), - Self::OBJECT_TABLE_NVX => Some("OBJECT_TABLE_NVX"), - Self::INDIRECT_COMMANDS_LAYOUT_NVX => Some("INDIRECT_COMMANDS_LAYOUT_NVX"), Self::VALIDATION_CACHE => Some("VALIDATION_CACHE"), Self::SAMPLER_YCBCR_CONVERSION => Some("SAMPLER_YCBCR_CONVERSION"), Self::DESCRIPTOR_UPDATE_TEMPLATE => Some("DESCRIPTOR_UPDATE_TEMPLATE"), - Self::ACCELERATION_STRUCTURE_NV => Some("ACCELERATION_STRUCTURE_NV"), + Self::ACCELERATION_STRUCTURE_KHR => Some("ACCELERATION_STRUCTURE_KHR"), _ => None, }; if let Some(x) = name { @@ -60137,23 +79316,20 @@ impl fmt::Debug for DependencyFlags { debug_flags(f, KNOWN, self.0) } } -impl fmt::Debug for DescriptorBindingFlagsEXT { +impl fmt::Debug for DescriptorBindingFlags { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { const KNOWN: &[(Flags, &str)] = &[ ( - DescriptorBindingFlagsEXT::UPDATE_AFTER_BIND.0, + DescriptorBindingFlags::UPDATE_AFTER_BIND.0, "UPDATE_AFTER_BIND", ), ( - DescriptorBindingFlagsEXT::UPDATE_UNUSED_WHILE_PENDING.0, + DescriptorBindingFlags::UPDATE_UNUSED_WHILE_PENDING.0, "UPDATE_UNUSED_WHILE_PENDING", ), + (DescriptorBindingFlags::PARTIALLY_BOUND.0, "PARTIALLY_BOUND"), ( - DescriptorBindingFlagsEXT::PARTIALLY_BOUND.0, - "PARTIALLY_BOUND", - ), - ( - DescriptorBindingFlagsEXT::VARIABLE_DESCRIPTOR_COUNT.0, + DescriptorBindingFlags::VARIABLE_DESCRIPTOR_COUNT.0, "VARIABLE_DESCRIPTOR_COUNT", ), ]; @@ -60168,8 +79344,8 @@ impl fmt::Debug for DescriptorPoolCreateFlags { "FREE_DESCRIPTOR_SET", ), ( - DescriptorPoolCreateFlags::UPDATE_AFTER_BIND_EXT.0, - "UPDATE_AFTER_BIND_EXT", + DescriptorPoolCreateFlags::UPDATE_AFTER_BIND.0, + "UPDATE_AFTER_BIND", ), ]; debug_flags(f, KNOWN, self.0) @@ -60189,8 +79365,8 @@ impl fmt::Debug for DescriptorSetLayoutCreateFlags { "PUSH_DESCRIPTOR_KHR", ), ( - DescriptorSetLayoutCreateFlags::UPDATE_AFTER_BIND_POOL_EXT.0, - "UPDATE_AFTER_BIND_POOL_EXT", + DescriptorSetLayoutCreateFlags::UPDATE_AFTER_BIND_POOL.0, + "UPDATE_AFTER_BIND_POOL", ), ]; debug_flags(f, KNOWN, self.0) @@ -60211,7 +79387,7 @@ impl fmt::Debug for DescriptorType { Self::STORAGE_BUFFER_DYNAMIC => Some("STORAGE_BUFFER_DYNAMIC"), Self::INPUT_ATTACHMENT => Some("INPUT_ATTACHMENT"), Self::INLINE_UNIFORM_BLOCK_EXT => Some("INLINE_UNIFORM_BLOCK_EXT"), - Self::ACCELERATION_STRUCTURE_NV => Some("ACCELERATION_STRUCTURE_NV"), + Self::ACCELERATION_STRUCTURE_KHR => Some("ACCELERATION_STRUCTURE_KHR"), _ => None, }; if let Some(x) = name { @@ -60231,6 +79407,7 @@ impl fmt::Debug for DescriptorUpdateTemplateType { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let name = match *self { Self::DESCRIPTOR_SET => Some("DESCRIPTOR_SET"), + Self::PUSH_DESCRIPTORS_KHR => Some("PUSH_DESCRIPTORS_KHR"), _ => None, }; if let Some(x) = name { @@ -60246,6 +79423,25 @@ impl fmt::Debug for DeviceCreateFlags { debug_flags(f, KNOWN, self.0) } } +impl fmt::Debug for DeviceDiagnosticsConfigFlagsNV { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + ( + DeviceDiagnosticsConfigFlagsNV::ENABLE_SHADER_DEBUG_INFO.0, + "ENABLE_SHADER_DEBUG_INFO", + ), + ( + DeviceDiagnosticsConfigFlagsNV::ENABLE_RESOURCE_TRACKING.0, + "ENABLE_RESOURCE_TRACKING", + ), + ( + DeviceDiagnosticsConfigFlagsNV::ENABLE_AUTOMATIC_CHECKPOINTS.0, + "ENABLE_AUTOMATIC_CHECKPOINTS", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} impl fmt::Debug for DeviceEventTypeEXT { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let name = match *self { @@ -60347,7 +79543,7 @@ impl fmt::Debug for DisplaySurfaceCreateFlagsKHR { debug_flags(f, KNOWN, self.0) } } -impl fmt::Debug for DriverIdKHR { +impl fmt::Debug for DriverId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let name = match *self { Self::AMD_PROPRIETARY => Some("AMD_PROPRIETARY"), @@ -60359,7 +79555,9 @@ impl fmt::Debug for DriverIdKHR { Self::IMAGINATION_PROPRIETARY => Some("IMAGINATION_PROPRIETARY"), Self::QUALCOMM_PROPRIETARY => Some("QUALCOMM_PROPRIETARY"), Self::ARM_PROPRIETARY => Some("ARM_PROPRIETARY"), - Self::GOOGLE_PASTEL => Some("GOOGLE_PASTEL"), + Self::GOOGLE_SWIFTSHADER => Some("GOOGLE_SWIFTSHADER"), + Self::GGP_PROPRIETARY => Some("GGP_PROPRIETARY"), + Self::BROADCOM_PROPRIETARY => Some("BROADCOM_PROPRIETARY"), _ => None, }; if let Some(x) = name { @@ -60387,6 +79585,7 @@ impl fmt::Debug for DynamicState { Self::VIEWPORT_SHADING_RATE_PALETTE_NV => Some("VIEWPORT_SHADING_RATE_PALETTE_NV"), Self::VIEWPORT_COARSE_SAMPLE_ORDER_NV => Some("VIEWPORT_COARSE_SAMPLE_ORDER_NV"), Self::EXCLUSIVE_SCISSOR_NV => Some("EXCLUSIVE_SCISSOR_NV"), + Self::LINE_STIPPLE_EXT => Some("LINE_STIPPLE_EXT"), _ => None, }; if let Some(x) = name { @@ -60772,6 +79971,20 @@ impl fmt::Debug for Format { Self::PVRTC1_4BPP_SRGB_BLOCK_IMG => Some("PVRTC1_4BPP_SRGB_BLOCK_IMG"), Self::PVRTC2_2BPP_SRGB_BLOCK_IMG => Some("PVRTC2_2BPP_SRGB_BLOCK_IMG"), Self::PVRTC2_4BPP_SRGB_BLOCK_IMG => Some("PVRTC2_4BPP_SRGB_BLOCK_IMG"), + Self::ASTC_4X4_SFLOAT_BLOCK_EXT => Some("ASTC_4X4_SFLOAT_BLOCK_EXT"), + Self::ASTC_5X4_SFLOAT_BLOCK_EXT => Some("ASTC_5X4_SFLOAT_BLOCK_EXT"), + Self::ASTC_5X5_SFLOAT_BLOCK_EXT => Some("ASTC_5X5_SFLOAT_BLOCK_EXT"), + Self::ASTC_6X5_SFLOAT_BLOCK_EXT => Some("ASTC_6X5_SFLOAT_BLOCK_EXT"), + Self::ASTC_6X6_SFLOAT_BLOCK_EXT => Some("ASTC_6X6_SFLOAT_BLOCK_EXT"), + Self::ASTC_8X5_SFLOAT_BLOCK_EXT => Some("ASTC_8X5_SFLOAT_BLOCK_EXT"), + Self::ASTC_8X6_SFLOAT_BLOCK_EXT => Some("ASTC_8X6_SFLOAT_BLOCK_EXT"), + Self::ASTC_8X8_SFLOAT_BLOCK_EXT => Some("ASTC_8X8_SFLOAT_BLOCK_EXT"), + Self::ASTC_10X5_SFLOAT_BLOCK_EXT => Some("ASTC_10X5_SFLOAT_BLOCK_EXT"), + Self::ASTC_10X6_SFLOAT_BLOCK_EXT => Some("ASTC_10X6_SFLOAT_BLOCK_EXT"), + Self::ASTC_10X8_SFLOAT_BLOCK_EXT => Some("ASTC_10X8_SFLOAT_BLOCK_EXT"), + Self::ASTC_10X10_SFLOAT_BLOCK_EXT => Some("ASTC_10X10_SFLOAT_BLOCK_EXT"), + Self::ASTC_12X10_SFLOAT_BLOCK_EXT => Some("ASTC_12X10_SFLOAT_BLOCK_EXT"), + Self::ASTC_12X12_SFLOAT_BLOCK_EXT => Some("ASTC_12X12_SFLOAT_BLOCK_EXT"), Self::G8B8G8R8_422_UNORM => Some("G8B8G8R8_422_UNORM"), Self::B8G8R8G8_422_UNORM => Some("B8G8R8G8_422_UNORM"), Self::G8_B8_R8_3PLANE_420_UNORM => Some("G8_B8_R8_3PLANE_420_UNORM"), @@ -60845,13 +80058,13 @@ impl fmt::Debug for Format { } impl fmt::Debug for FormatFeatureFlags { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - const KNOWN : & [ ( Flags , & str ) ] = & [ ( FormatFeatureFlags :: SAMPLED_IMAGE . 0 , "SAMPLED_IMAGE" ) , ( FormatFeatureFlags :: STORAGE_IMAGE . 0 , "STORAGE_IMAGE" ) , ( FormatFeatureFlags :: STORAGE_IMAGE_ATOMIC . 0 , "STORAGE_IMAGE_ATOMIC" ) , ( FormatFeatureFlags :: UNIFORM_TEXEL_BUFFER . 0 , "UNIFORM_TEXEL_BUFFER" ) , ( FormatFeatureFlags :: STORAGE_TEXEL_BUFFER . 0 , "STORAGE_TEXEL_BUFFER" ) , ( FormatFeatureFlags :: STORAGE_TEXEL_BUFFER_ATOMIC . 0 , "STORAGE_TEXEL_BUFFER_ATOMIC" ) , ( FormatFeatureFlags :: VERTEX_BUFFER . 0 , "VERTEX_BUFFER" ) , ( FormatFeatureFlags :: COLOR_ATTACHMENT . 0 , "COLOR_ATTACHMENT" ) , ( FormatFeatureFlags :: COLOR_ATTACHMENT_BLEND . 0 , "COLOR_ATTACHMENT_BLEND" ) , ( FormatFeatureFlags :: DEPTH_STENCIL_ATTACHMENT . 0 , "DEPTH_STENCIL_ATTACHMENT" ) , ( FormatFeatureFlags :: BLIT_SRC . 0 , "BLIT_SRC" ) , ( FormatFeatureFlags :: BLIT_DST . 0 , "BLIT_DST" ) , ( FormatFeatureFlags :: SAMPLED_IMAGE_FILTER_LINEAR . 0 , "SAMPLED_IMAGE_FILTER_LINEAR" ) , ( FormatFeatureFlags :: SAMPLED_IMAGE_FILTER_CUBIC_IMG . 0 , "SAMPLED_IMAGE_FILTER_CUBIC_IMG" ) , ( FormatFeatureFlags :: RESERVED_27_KHR . 0 , "RESERVED_27_KHR" ) , ( FormatFeatureFlags :: RESERVED_28_KHR . 0 , "RESERVED_28_KHR" ) , ( FormatFeatureFlags :: RESERVED_25_KHR . 0 , "RESERVED_25_KHR" ) , ( FormatFeatureFlags :: RESERVED_26_KHR . 0 , "RESERVED_26_KHR" ) , ( FormatFeatureFlags :: SAMPLED_IMAGE_FILTER_MINMAX_EXT . 0 , "SAMPLED_IMAGE_FILTER_MINMAX_EXT" ) , ( FormatFeatureFlags :: FRAGMENT_DENSITY_MAP_EXT . 0 , "FRAGMENT_DENSITY_MAP_EXT" ) , ( FormatFeatureFlags :: TRANSFER_SRC . 0 , "TRANSFER_SRC" ) , ( FormatFeatureFlags :: TRANSFER_DST . 0 , "TRANSFER_DST" ) , ( FormatFeatureFlags :: MIDPOINT_CHROMA_SAMPLES . 0 , "MIDPOINT_CHROMA_SAMPLES" ) , ( FormatFeatureFlags :: SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER . 0 , "SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER" ) , ( FormatFeatureFlags :: SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER . 0 , "SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER" ) , ( FormatFeatureFlags :: SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT . 0 , "SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT" ) , ( FormatFeatureFlags :: SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE . 0 , "SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE" ) , ( FormatFeatureFlags :: DISJOINT . 0 , "DISJOINT" ) , ( FormatFeatureFlags :: COSITED_CHROMA_SAMPLES . 0 , "COSITED_CHROMA_SAMPLES" ) ] ; + const KNOWN : & [ ( Flags , & str ) ] = & [ ( FormatFeatureFlags :: SAMPLED_IMAGE . 0 , "SAMPLED_IMAGE" ) , ( FormatFeatureFlags :: STORAGE_IMAGE . 0 , "STORAGE_IMAGE" ) , ( FormatFeatureFlags :: STORAGE_IMAGE_ATOMIC . 0 , "STORAGE_IMAGE_ATOMIC" ) , ( FormatFeatureFlags :: UNIFORM_TEXEL_BUFFER . 0 , "UNIFORM_TEXEL_BUFFER" ) , ( FormatFeatureFlags :: STORAGE_TEXEL_BUFFER . 0 , "STORAGE_TEXEL_BUFFER" ) , ( FormatFeatureFlags :: STORAGE_TEXEL_BUFFER_ATOMIC . 0 , "STORAGE_TEXEL_BUFFER_ATOMIC" ) , ( FormatFeatureFlags :: VERTEX_BUFFER . 0 , "VERTEX_BUFFER" ) , ( FormatFeatureFlags :: COLOR_ATTACHMENT . 0 , "COLOR_ATTACHMENT" ) , ( FormatFeatureFlags :: COLOR_ATTACHMENT_BLEND . 0 , "COLOR_ATTACHMENT_BLEND" ) , ( FormatFeatureFlags :: DEPTH_STENCIL_ATTACHMENT . 0 , "DEPTH_STENCIL_ATTACHMENT" ) , ( FormatFeatureFlags :: BLIT_SRC . 0 , "BLIT_SRC" ) , ( FormatFeatureFlags :: BLIT_DST . 0 , "BLIT_DST" ) , ( FormatFeatureFlags :: SAMPLED_IMAGE_FILTER_LINEAR . 0 , "SAMPLED_IMAGE_FILTER_LINEAR" ) , ( FormatFeatureFlags :: SAMPLED_IMAGE_FILTER_CUBIC_IMG . 0 , "SAMPLED_IMAGE_FILTER_CUBIC_IMG" ) , ( FormatFeatureFlags :: RESERVED_27_KHR . 0 , "RESERVED_27_KHR" ) , ( FormatFeatureFlags :: RESERVED_28_KHR . 0 , "RESERVED_28_KHR" ) , ( FormatFeatureFlags :: RESERVED_25_KHR . 0 , "RESERVED_25_KHR" ) , ( FormatFeatureFlags :: RESERVED_26_KHR . 0 , "RESERVED_26_KHR" ) , ( FormatFeatureFlags :: ACCELERATION_STRUCTURE_VERTEX_BUFFER_KHR . 0 , "ACCELERATION_STRUCTURE_VERTEX_BUFFER_KHR" ) , ( FormatFeatureFlags :: FRAGMENT_DENSITY_MAP_EXT . 0 , "FRAGMENT_DENSITY_MAP_EXT" ) , ( FormatFeatureFlags :: TRANSFER_SRC . 0 , "TRANSFER_SRC" ) , ( FormatFeatureFlags :: TRANSFER_DST . 0 , "TRANSFER_DST" ) , ( FormatFeatureFlags :: MIDPOINT_CHROMA_SAMPLES . 0 , "MIDPOINT_CHROMA_SAMPLES" ) , ( FormatFeatureFlags :: SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER . 0 , "SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER" ) , ( FormatFeatureFlags :: SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER . 0 , "SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER" ) , ( FormatFeatureFlags :: SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT . 0 , "SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT" ) , ( FormatFeatureFlags :: SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE . 0 , "SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE" ) , ( FormatFeatureFlags :: DISJOINT . 0 , "DISJOINT" ) , ( FormatFeatureFlags :: COSITED_CHROMA_SAMPLES . 0 , "COSITED_CHROMA_SAMPLES" ) , ( FormatFeatureFlags :: SAMPLED_IMAGE_FILTER_MINMAX . 0 , "SAMPLED_IMAGE_FILTER_MINMAX" ) ] ; debug_flags(f, KNOWN, self.0) } } impl fmt::Debug for FramebufferCreateFlags { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - const KNOWN: &[(Flags, &str)] = &[]; + const KNOWN: &[(Flags, &str)] = &[(FramebufferCreateFlags::IMAGELESS.0, "IMAGELESS")]; debug_flags(f, KNOWN, self.0) } } @@ -60869,43 +80082,13 @@ impl fmt::Debug for FrontFace { } } } -impl fmt::Debug for GeometryFlagsNV { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - const KNOWN: &[(Flags, &str)] = &[ - (GeometryFlagsNV::OPAQUE.0, "OPAQUE"), - ( - GeometryFlagsNV::NO_DUPLICATE_ANY_HIT_INVOCATION.0, - "NO_DUPLICATE_ANY_HIT_INVOCATION", - ), - ]; - debug_flags(f, KNOWN, self.0) - } -} -impl fmt::Debug for GeometryInstanceFlagsNV { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - const KNOWN: &[(Flags, &str)] = &[ - ( - GeometryInstanceFlagsNV::TRIANGLE_CULL_DISABLE.0, - "TRIANGLE_CULL_DISABLE", - ), - ( - GeometryInstanceFlagsNV::TRIANGLE_FRONT_COUNTERCLOCKWISE.0, - "TRIANGLE_FRONT_COUNTERCLOCKWISE", - ), - (GeometryInstanceFlagsNV::FORCE_OPAQUE.0, "FORCE_OPAQUE"), - ( - GeometryInstanceFlagsNV::FORCE_NO_OPAQUE.0, - "FORCE_NO_OPAQUE", - ), - ]; - debug_flags(f, KNOWN, self.0) - } -} -impl fmt::Debug for GeometryTypeNV { +impl fmt::Debug for FullScreenExclusiveEXT { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let name = match *self { - Self::TRIANGLES => Some("TRIANGLES"), - Self::AABBS => Some("AABBS"), + Self::DEFAULT => Some("DEFAULT"), + Self::ALLOWED => Some("ALLOWED"), + Self::DISALLOWED => Some("DISALLOWED"), + Self::APPLICATION_CONTROLLED => Some("APPLICATION_CONTROLLED"), _ => None, }; if let Some(x) = name { @@ -60915,6 +80098,59 @@ impl fmt::Debug for GeometryTypeNV { } } } +impl fmt::Debug for GeometryFlagsKHR { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (GeometryFlagsKHR::OPAQUE.0, "OPAQUE"), + ( + GeometryFlagsKHR::NO_DUPLICATE_ANY_HIT_INVOCATION.0, + "NO_DUPLICATE_ANY_HIT_INVOCATION", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for GeometryInstanceFlagsKHR { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + ( + GeometryInstanceFlagsKHR::TRIANGLE_FACING_CULL_DISABLE.0, + "TRIANGLE_FACING_CULL_DISABLE", + ), + ( + GeometryInstanceFlagsKHR::TRIANGLE_FRONT_COUNTERCLOCKWISE.0, + "TRIANGLE_FRONT_COUNTERCLOCKWISE", + ), + (GeometryInstanceFlagsKHR::FORCE_OPAQUE.0, "FORCE_OPAQUE"), + ( + GeometryInstanceFlagsKHR::FORCE_NO_OPAQUE.0, + "FORCE_NO_OPAQUE", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for GeometryTypeKHR { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::TRIANGLES => Some("TRIANGLES"), + Self::AABBS => Some("AABBS"), + Self::INSTANCES => Some("INSTANCES"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for HeadlessSurfaceCreateFlagsEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} impl fmt::Debug for IOSSurfaceCreateFlagsMVK { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { const KNOWN: &[(Flags, &str)] = &[]; @@ -60995,6 +80231,10 @@ impl fmt::Debug for ImageLayout { Self::DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL => { Some("DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL") } + Self::DEPTH_ATTACHMENT_OPTIMAL => Some("DEPTH_ATTACHMENT_OPTIMAL"), + Self::DEPTH_READ_ONLY_OPTIMAL => Some("DEPTH_READ_ONLY_OPTIMAL"), + Self::STENCIL_ATTACHMENT_OPTIMAL => Some("STENCIL_ATTACHMENT_OPTIMAL"), + Self::STENCIL_READ_ONLY_OPTIMAL => Some("STENCIL_READ_ONLY_OPTIMAL"), _ => None, }; if let Some(x) = name { @@ -61067,6 +80307,8 @@ impl fmt::Debug for ImageUsageFlags { ImageUsageFlags::SHADING_RATE_IMAGE_NV.0, "SHADING_RATE_IMAGE_NV", ), + (ImageUsageFlags::RESERVED_16_QCOM.0, "RESERVED_16_QCOM"), + (ImageUsageFlags::RESERVED_17_QCOM.0, "RESERVED_17_QCOM"), ( ImageUsageFlags::FRAGMENT_DENSITY_MAP_EXT.0, "FRAGMENT_DENSITY_MAP_EXT", @@ -61108,7 +80350,8 @@ impl fmt::Debug for IndexType { let name = match *self { Self::UINT16 => Some("UINT16"), Self::UINT32 => Some("UINT32"), - Self::NONE_NV => Some("NONE_NV"), + Self::NONE_KHR => Some("NONE_KHR"), + Self::UINT8_EXT => Some("UINT8_EXT"), _ => None, }; if let Some(x) = name { @@ -61118,40 +80361,36 @@ impl fmt::Debug for IndexType { } } } -impl fmt::Debug for IndirectCommandsLayoutUsageFlagsNVX { +impl fmt::Debug for IndirectCommandsLayoutUsageFlagsNV { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { const KNOWN: &[(Flags, &str)] = &[ ( - IndirectCommandsLayoutUsageFlagsNVX::UNORDERED_SEQUENCES.0, - "UNORDERED_SEQUENCES", + IndirectCommandsLayoutUsageFlagsNV::EXPLICIT_PREPROCESS.0, + "EXPLICIT_PREPROCESS", ), ( - IndirectCommandsLayoutUsageFlagsNVX::SPARSE_SEQUENCES.0, - "SPARSE_SEQUENCES", - ), - ( - IndirectCommandsLayoutUsageFlagsNVX::EMPTY_EXECUTIONS.0, - "EMPTY_EXECUTIONS", - ), - ( - IndirectCommandsLayoutUsageFlagsNVX::INDEXED_SEQUENCES.0, + IndirectCommandsLayoutUsageFlagsNV::INDEXED_SEQUENCES.0, "INDEXED_SEQUENCES", ), + ( + IndirectCommandsLayoutUsageFlagsNV::UNORDERED_SEQUENCES.0, + "UNORDERED_SEQUENCES", + ), ]; debug_flags(f, KNOWN, self.0) } } -impl fmt::Debug for IndirectCommandsTokenTypeNVX { +impl fmt::Debug for IndirectCommandsTokenTypeNV { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let name = match *self { - Self::PIPELINE => Some("PIPELINE"), - Self::DESCRIPTOR_SET => Some("DESCRIPTOR_SET"), + Self::SHADER_GROUP => Some("SHADER_GROUP"), + Self::STATE_FLAGS => Some("STATE_FLAGS"), Self::INDEX_BUFFER => Some("INDEX_BUFFER"), Self::VERTEX_BUFFER => Some("VERTEX_BUFFER"), Self::PUSH_CONSTANT => Some("PUSH_CONSTANT"), Self::DRAW_INDEXED => Some("DRAW_INDEXED"), Self::DRAW => Some("DRAW"), - Self::DISPATCH => Some("DISPATCH"), + Self::DRAW_TASKS => Some("DRAW_TASKS"), _ => None, }; if let Some(x) = name { @@ -61161,6 +80400,13 @@ impl fmt::Debug for IndirectCommandsTokenTypeNVX { } } } +impl fmt::Debug for IndirectStateFlagsNV { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = + &[(IndirectStateFlagsNV::FLAG_FRONTFACE.0, "FLAG_FRONTFACE")]; + debug_flags(f, KNOWN, self.0) + } +} impl fmt::Debug for InstanceCreateFlags { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { const KNOWN: &[(Flags, &str)] = &[]; @@ -61180,6 +80426,22 @@ impl fmt::Debug for InternalAllocationType { } } } +impl fmt::Debug for LineRasterizationModeEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::DEFAULT => Some("DEFAULT"), + Self::RECTANGULAR => Some("RECTANGULAR"), + Self::BRESENHAM => Some("BRESENHAM"), + Self::RECTANGULAR_SMOOTH => Some("RECTANGULAR_SMOOTH"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} impl fmt::Debug for LogicOp { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let name = match *self { @@ -61216,7 +80478,14 @@ impl fmt::Debug for MacOSSurfaceCreateFlagsMVK { } impl fmt::Debug for MemoryAllocateFlags { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - const KNOWN: &[(Flags, &str)] = &[(MemoryAllocateFlags::DEVICE_MASK.0, "DEVICE_MASK")]; + const KNOWN: &[(Flags, &str)] = &[ + (MemoryAllocateFlags::DEVICE_MASK.0, "DEVICE_MASK"), + (MemoryAllocateFlags::DEVICE_ADDRESS.0, "DEVICE_ADDRESS"), + ( + MemoryAllocateFlags::DEVICE_ADDRESS_CAPTURE_REPLAY.0, + "DEVICE_ADDRESS_CAPTURE_REPLAY", + ), + ]; debug_flags(f, KNOWN, self.0) } } @@ -61224,6 +80493,7 @@ impl fmt::Debug for MemoryHeapFlags { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { const KNOWN: &[(Flags, &str)] = &[ (MemoryHeapFlags::DEVICE_LOCAL.0, "DEVICE_LOCAL"), + (MemoryHeapFlags::RESERVED_2_KHR.0, "RESERVED_2_KHR"), (MemoryHeapFlags::MULTI_INSTANCE.0, "MULTI_INSTANCE"), ]; debug_flags(f, KNOWN, self.0) @@ -61258,34 +80528,22 @@ impl fmt::Debug for MemoryPropertyFlags { (MemoryPropertyFlags::HOST_COHERENT.0, "HOST_COHERENT"), (MemoryPropertyFlags::HOST_CACHED.0, "HOST_CACHED"), (MemoryPropertyFlags::LAZILY_ALLOCATED.0, "LAZILY_ALLOCATED"), + ( + MemoryPropertyFlags::DEVICE_COHERENT_AMD.0, + "DEVICE_COHERENT_AMD", + ), + ( + MemoryPropertyFlags::DEVICE_UNCACHED_AMD.0, + "DEVICE_UNCACHED_AMD", + ), (MemoryPropertyFlags::PROTECTED.0, "PROTECTED"), ]; debug_flags(f, KNOWN, self.0) } } -impl fmt::Debug for ObjectEntryTypeNVX { +impl fmt::Debug for MetalSurfaceCreateFlagsEXT { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let name = match *self { - Self::DESCRIPTOR_SET => Some("DESCRIPTOR_SET"), - Self::PIPELINE => Some("PIPELINE"), - Self::INDEX_BUFFER => Some("INDEX_BUFFER"), - Self::VERTEX_BUFFER => Some("VERTEX_BUFFER"), - Self::PUSH_CONSTANT => Some("PUSH_CONSTANT"), - _ => None, - }; - if let Some(x) = name { - f.write_str(x) - } else { - self.0.fmt(f) - } - } -} -impl fmt::Debug for ObjectEntryUsageFlagsNVX { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - const KNOWN: &[(Flags, &str)] = &[ - (ObjectEntryUsageFlagsNVX::GRAPHICS.0, "GRAPHICS"), - (ObjectEntryUsageFlagsNVX::COMPUTE.0, "COMPUTE"), - ]; + const KNOWN: &[(Flags, &str)] = &[]; debug_flags(f, KNOWN, self.0) } } @@ -61323,11 +80581,12 @@ impl fmt::Debug for ObjectType { Self::DISPLAY_KHR => Some("DISPLAY_KHR"), Self::DISPLAY_MODE_KHR => Some("DISPLAY_MODE_KHR"), Self::DEBUG_REPORT_CALLBACK_EXT => Some("DEBUG_REPORT_CALLBACK_EXT"), - Self::OBJECT_TABLE_NVX => Some("OBJECT_TABLE_NVX"), - Self::INDIRECT_COMMANDS_LAYOUT_NVX => Some("INDIRECT_COMMANDS_LAYOUT_NVX"), Self::DEBUG_UTILS_MESSENGER_EXT => Some("DEBUG_UTILS_MESSENGER_EXT"), + Self::ACCELERATION_STRUCTURE_KHR => Some("ACCELERATION_STRUCTURE_KHR"), Self::VALIDATION_CACHE_EXT => Some("VALIDATION_CACHE_EXT"), - Self::ACCELERATION_STRUCTURE_NV => Some("ACCELERATION_STRUCTURE_NV"), + Self::PERFORMANCE_CONFIGURATION_INTEL => Some("PERFORMANCE_CONFIGURATION_INTEL"), + Self::DEFERRED_OPERATION_KHR => Some("DEFERRED_OPERATION_KHR"), + Self::INDIRECT_COMMANDS_LAYOUT_NV => Some("INDIRECT_COMMANDS_LAYOUT_NV"), Self::SAMPLER_YCBCR_CONVERSION => Some("SAMPLER_YCBCR_CONVERSION"), Self::DESCRIPTOR_UPDATE_TEMPLATE => Some("DESCRIPTOR_UPDATE_TEMPLATE"), _ => None, @@ -61350,6 +80609,146 @@ impl fmt::Debug for PeerMemoryFeatureFlags { debug_flags(f, KNOWN, self.0) } } +impl fmt::Debug for PerformanceConfigurationTypeINTEL { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match * self { Self :: PERFORMANCE_CONFIGURATION_TYPE_COMMAND_QUEUE_METRICS_DISCOVERY_ACTIVATED_INTEL => Some ( "PERFORMANCE_CONFIGURATION_TYPE_COMMAND_QUEUE_METRICS_DISCOVERY_ACTIVATED_INTEL" ) , _ => None , } ; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for PerformanceCounterDescriptionFlagsKHR { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + ( + PerformanceCounterDescriptionFlagsKHR::PERFORMANCE_IMPACTING.0, + "PERFORMANCE_IMPACTING", + ), + ( + PerformanceCounterDescriptionFlagsKHR::CONCURRENTLY_IMPACTED.0, + "CONCURRENTLY_IMPACTED", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for PerformanceCounterScopeKHR { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::COMMAND_BUFFER => Some("COMMAND_BUFFER"), + Self::RENDER_PASS => Some("RENDER_PASS"), + Self::COMMAND => Some("COMMAND"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for PerformanceCounterStorageKHR { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::INT32 => Some("INT32"), + Self::INT64 => Some("INT64"), + Self::UINT32 => Some("UINT32"), + Self::UINT64 => Some("UINT64"), + Self::FLOAT32 => Some("FLOAT32"), + Self::FLOAT64 => Some("FLOAT64"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for PerformanceCounterUnitKHR { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::GENERIC => Some("GENERIC"), + Self::PERCENTAGE => Some("PERCENTAGE"), + Self::NANOSECONDS => Some("NANOSECONDS"), + Self::BYTES => Some("BYTES"), + Self::BYTES_PER_SECOND => Some("BYTES_PER_SECOND"), + Self::KELVIN => Some("KELVIN"), + Self::WATTS => Some("WATTS"), + Self::VOLTS => Some("VOLTS"), + Self::AMPS => Some("AMPS"), + Self::HERTZ => Some("HERTZ"), + Self::CYCLES => Some("CYCLES"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for PerformanceOverrideTypeINTEL { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::PERFORMANCE_OVERRIDE_TYPE_NULL_HARDWARE_INTEL => { + Some("PERFORMANCE_OVERRIDE_TYPE_NULL_HARDWARE_INTEL") + } + Self::PERFORMANCE_OVERRIDE_TYPE_FLUSH_GPU_CACHES_INTEL => { + Some("PERFORMANCE_OVERRIDE_TYPE_FLUSH_GPU_CACHES_INTEL") + } + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for PerformanceParameterTypeINTEL { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::PERFORMANCE_PARAMETER_TYPE_HW_COUNTERS_SUPPORTED_INTEL => { + Some("PERFORMANCE_PARAMETER_TYPE_HW_COUNTERS_SUPPORTED_INTEL") + } + Self::PERFORMANCE_PARAMETER_TYPE_STREAM_MARKER_VALIDS_INTEL => { + Some("PERFORMANCE_PARAMETER_TYPE_STREAM_MARKER_VALIDS_INTEL") + } + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for PerformanceValueTypeINTEL { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::PERFORMANCE_VALUE_TYPE_UINT32_INTEL => { + Some("PERFORMANCE_VALUE_TYPE_UINT32_INTEL") + } + Self::PERFORMANCE_VALUE_TYPE_UINT64_INTEL => { + Some("PERFORMANCE_VALUE_TYPE_UINT64_INTEL") + } + Self::PERFORMANCE_VALUE_TYPE_FLOAT_INTEL => Some("PERFORMANCE_VALUE_TYPE_FLOAT_INTEL"), + Self::PERFORMANCE_VALUE_TYPE_BOOL_INTEL => Some("PERFORMANCE_VALUE_TYPE_BOOL_INTEL"), + Self::PERFORMANCE_VALUE_TYPE_STRING_INTEL => { + Some("PERFORMANCE_VALUE_TYPE_STRING_INTEL") + } + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} impl fmt::Debug for PhysicalDeviceType { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let name = match *self { @@ -61372,7 +80771,7 @@ impl fmt::Debug for PipelineBindPoint { let name = match *self { Self::GRAPHICS => Some("GRAPHICS"), Self::COMPUTE => Some("COMPUTE"), - Self::RAY_TRACING_NV => Some("RAY_TRACING_NV"), + Self::RAY_TRACING_KHR => Some("RAY_TRACING_KHR"), _ => None, }; if let Some(x) = name { @@ -61384,7 +80783,10 @@ impl fmt::Debug for PipelineBindPoint { } impl fmt::Debug for PipelineCacheCreateFlags { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - const KNOWN: &[(Flags, &str)] = &[]; + const KNOWN: &[(Flags, &str)] = &[( + PipelineCacheCreateFlags::EXTERNALLY_SYNCHRONIZED_EXT.0, + "EXTERNALLY_SYNCHRONIZED_EXT", + )]; debug_flags(f, KNOWN, self.0) } } @@ -61407,12 +80809,24 @@ impl fmt::Debug for PipelineColorBlendStateCreateFlags { debug_flags(f, KNOWN, self.0) } } +impl fmt::Debug for PipelineCompilerControlFlagsAMD { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} impl fmt::Debug for PipelineCoverageModulationStateCreateFlagsNV { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { const KNOWN: &[(Flags, &str)] = &[]; debug_flags(f, KNOWN, self.0) } } +impl fmt::Debug for PipelineCoverageReductionStateCreateFlagsNV { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} impl fmt::Debug for PipelineCoverageToColorStateCreateFlagsNV { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { const KNOWN: &[(Flags, &str)] = &[]; @@ -61431,7 +80845,52 @@ impl fmt::Debug for PipelineCreateFlags { "ALLOW_DERIVATIVES", ), (PipelineCreateFlags::DERIVATIVE.0, "DERIVATIVE"), + ( + PipelineCreateFlags::RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_KHR.0, + "RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_KHR", + ), + ( + PipelineCreateFlags::RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_KHR.0, + "RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_KHR", + ), + ( + PipelineCreateFlags::RAY_TRACING_NO_NULL_MISS_SHADERS_KHR.0, + "RAY_TRACING_NO_NULL_MISS_SHADERS_KHR", + ), + ( + PipelineCreateFlags::RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_KHR.0, + "RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_KHR", + ), + ( + PipelineCreateFlags::RAY_TRACING_SKIP_TRIANGLES_KHR.0, + "RAY_TRACING_SKIP_TRIANGLES_KHR", + ), + ( + PipelineCreateFlags::RAY_TRACING_SKIP_AABBS_KHR.0, + "RAY_TRACING_SKIP_AABBS_KHR", + ), (PipelineCreateFlags::DEFER_COMPILE_NV.0, "DEFER_COMPILE_NV"), + ( + PipelineCreateFlags::CAPTURE_STATISTICS_KHR.0, + "CAPTURE_STATISTICS_KHR", + ), + ( + PipelineCreateFlags::CAPTURE_INTERNAL_REPRESENTATIONS_KHR.0, + "CAPTURE_INTERNAL_REPRESENTATIONS_KHR", + ), + ( + PipelineCreateFlags::INDIRECT_BINDABLE_NV.0, + "INDIRECT_BINDABLE_NV", + ), + (PipelineCreateFlags::LIBRARY_KHR.0, "LIBRARY_KHR"), + ( + PipelineCreateFlags::FAIL_ON_PIPELINE_COMPILE_REQUIRED_EXT.0, + "FAIL_ON_PIPELINE_COMPILE_REQUIRED_EXT", + ), + ( + PipelineCreateFlags::EARLY_RETURN_ON_FAILURE_EXT.0, + "EARLY_RETURN_ON_FAILURE_EXT", + ), ( PipelineCreateFlags::VIEW_INDEX_FROM_DEVICE_INDEX.0, "VIEW_INDEX_FROM_DEVICE_INDEX", @@ -61441,6 +80900,22 @@ impl fmt::Debug for PipelineCreateFlags { debug_flags(f, KNOWN, self.0) } } +impl fmt::Debug for PipelineCreationFeedbackFlagsEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (PipelineCreationFeedbackFlagsEXT::VALID.0, "VALID"), + ( + PipelineCreationFeedbackFlagsEXT::APPLICATION_PIPELINE_CACHE_HIT.0, + "APPLICATION_PIPELINE_CACHE_HIT", + ), + ( + PipelineCreationFeedbackFlagsEXT::BASE_PIPELINE_ACCELERATION.0, + "BASE_PIPELINE_ACCELERATION", + ), + ]; + debug_flags(f, KNOWN, self.0) + } +} impl fmt::Debug for PipelineDepthStencilStateCreateFlags { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { const KNOWN: &[(Flags, &str)] = &[]; @@ -61459,6 +80934,22 @@ impl fmt::Debug for PipelineDynamicStateCreateFlags { debug_flags(f, KNOWN, self.0) } } +impl fmt::Debug for PipelineExecutableStatisticFormatKHR { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::BOOL32 => Some("BOOL32"), + Self::INT64 => Some("INT64"), + Self::UINT64 => Some("UINT64"), + Self::FLOAT64 => Some("FLOAT64"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} impl fmt::Debug for PipelineInputAssemblyStateCreateFlags { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { const KNOWN: &[(Flags, &str)] = &[]; @@ -61483,6 +80974,12 @@ impl fmt::Debug for PipelineRasterizationConservativeStateCreateFlagsEXT { debug_flags(f, KNOWN, self.0) } } +impl fmt::Debug for PipelineRasterizationDepthClipStateCreateFlagsEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} impl fmt::Debug for PipelineRasterizationStateCreateFlags { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { const KNOWN: &[(Flags, &str)] = &[]; @@ -61497,7 +80994,24 @@ impl fmt::Debug for PipelineRasterizationStateStreamCreateFlagsEXT { } impl fmt::Debug for PipelineShaderStageCreateFlags { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - const KNOWN: &[(Flags, &str)] = &[]; + const KNOWN: &[(Flags, &str)] = &[ + ( + PipelineShaderStageCreateFlags::RESERVED_2_NV.0, + "RESERVED_2_NV", + ), + ( + PipelineShaderStageCreateFlags::ALLOW_VARYING_SUBGROUP_SIZE_EXT.0, + "ALLOW_VARYING_SUBGROUP_SIZE_EXT", + ), + ( + PipelineShaderStageCreateFlags::REQUIRE_FULL_SUBGROUPS_EXT.0, + "REQUIRE_FULL_SUBGROUPS_EXT", + ), + ( + PipelineShaderStageCreateFlags::RESERVED_3_KHR.0, + "RESERVED_3_KHR", + ), + ]; debug_flags(f, KNOWN, self.0) } } @@ -61547,27 +81061,27 @@ impl fmt::Debug for PipelineStageFlags { "CONDITIONAL_RENDERING_EXT", ), ( - PipelineStageFlags::COMMAND_PROCESS_NVX.0, - "COMMAND_PROCESS_NVX", + PipelineStageFlags::RAY_TRACING_SHADER_KHR.0, + "RAY_TRACING_SHADER_KHR", + ), + ( + PipelineStageFlags::ACCELERATION_STRUCTURE_BUILD_KHR.0, + "ACCELERATION_STRUCTURE_BUILD_KHR", ), ( PipelineStageFlags::SHADING_RATE_IMAGE_NV.0, "SHADING_RATE_IMAGE_NV", ), - ( - PipelineStageFlags::RAY_TRACING_SHADER_NV.0, - "RAY_TRACING_SHADER_NV", - ), - ( - PipelineStageFlags::ACCELERATION_STRUCTURE_BUILD_NV.0, - "ACCELERATION_STRUCTURE_BUILD_NV", - ), (PipelineStageFlags::TASK_SHADER_NV.0, "TASK_SHADER_NV"), (PipelineStageFlags::MESH_SHADER_NV.0, "MESH_SHADER_NV"), ( PipelineStageFlags::FRAGMENT_DENSITY_PROCESS_EXT.0, "FRAGMENT_DENSITY_PROCESS_EXT", ), + ( + PipelineStageFlags::COMMAND_PREPROCESS_NV.0, + "COMMAND_PREPROCESS_NV", + ), ]; debug_flags(f, KNOWN, self.0) } @@ -61730,6 +81244,21 @@ impl fmt::Debug for QueryPoolCreateFlags { debug_flags(f, KNOWN, self.0) } } +impl fmt::Debug for QueryPoolSamplingModeINTEL { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::QUERY_POOL_SAMPLING_MODE_MANUAL_INTEL => { + Some("QUERY_POOL_SAMPLING_MODE_MANUAL_INTEL") + } + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} impl fmt::Debug for QueryResultFlags { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { const KNOWN: &[(Flags, &str)] = &[ @@ -61750,9 +81279,14 @@ impl fmt::Debug for QueryType { Self::RESERVED_8 => Some("RESERVED_8"), Self::RESERVED_4 => Some("RESERVED_4"), Self::TRANSFORM_FEEDBACK_STREAM_EXT => Some("TRANSFORM_FEEDBACK_STREAM_EXT"), - Self::ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV => { - Some("ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV") + Self::PERFORMANCE_QUERY_KHR => Some("PERFORMANCE_QUERY_KHR"), + Self::ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR => { + Some("ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR") } + Self::ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR => { + Some("ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR") + } + Self::PERFORMANCE_QUERY_INTEL => Some("PERFORMANCE_QUERY_INTEL"), _ => None, }; if let Some(x) = name { @@ -61806,7 +81340,7 @@ impl fmt::Debug for RasterizationOrderAMD { } } } -impl fmt::Debug for RayTracingShaderGroupTypeNV { +impl fmt::Debug for RayTracingShaderGroupTypeKHR { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let name = match *self { Self::GENERAL => Some("GENERAL"), @@ -61823,19 +81357,21 @@ impl fmt::Debug for RayTracingShaderGroupTypeNV { } impl fmt::Debug for RenderPassCreateFlags { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - const KNOWN: &[(Flags, &str)] = - &[(RenderPassCreateFlags::RESERVED_0_KHR.0, "RESERVED_0_KHR")]; + const KNOWN: &[(Flags, &str)] = &[ + (RenderPassCreateFlags::RESERVED_0_KHR.0, "RESERVED_0_KHR"), + (RenderPassCreateFlags::TRANSFORM_QCOM.0, "TRANSFORM_QCOM"), + ]; debug_flags(f, KNOWN, self.0) } } -impl fmt::Debug for ResolveModeFlagsKHR { +impl fmt::Debug for ResolveModeFlags { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { const KNOWN: &[(Flags, &str)] = &[ - (ResolveModeFlagsKHR::NONE.0, "NONE"), - (ResolveModeFlagsKHR::SAMPLE_ZERO.0, "SAMPLE_ZERO"), - (ResolveModeFlagsKHR::AVERAGE.0, "AVERAGE"), - (ResolveModeFlagsKHR::MIN.0, "MIN"), - (ResolveModeFlagsKHR::MAX.0, "MAX"), + (ResolveModeFlags::NONE.0, "NONE"), + (ResolveModeFlags::SAMPLE_ZERO.0, "SAMPLE_ZERO"), + (ResolveModeFlags::AVERAGE.0, "AVERAGE"), + (ResolveModeFlags::MIN.0, "MIN"), + (ResolveModeFlags::MAX.0, "MAX"), ]; debug_flags(f, KNOWN, self.0) } @@ -61861,6 +81397,7 @@ impl fmt::Debug for Result { Self::ERROR_TOO_MANY_OBJECTS => Some("ERROR_TOO_MANY_OBJECTS"), Self::ERROR_FORMAT_NOT_SUPPORTED => Some("ERROR_FORMAT_NOT_SUPPORTED"), Self::ERROR_FRAGMENTED_POOL => Some("ERROR_FRAGMENTED_POOL"), + Self::ERROR_UNKNOWN => Some("ERROR_UNKNOWN"), Self::ERROR_SURFACE_LOST_KHR => Some("ERROR_SURFACE_LOST_KHR"), Self::ERROR_NATIVE_WINDOW_IN_USE_KHR => Some("ERROR_NATIVE_WINDOW_IN_USE_KHR"), Self::SUBOPTIMAL_KHR => Some("SUBOPTIMAL_KHR"), @@ -61868,14 +81405,27 @@ impl fmt::Debug for Result { Self::ERROR_INCOMPATIBLE_DISPLAY_KHR => Some("ERROR_INCOMPATIBLE_DISPLAY_KHR"), Self::ERROR_VALIDATION_FAILED_EXT => Some("ERROR_VALIDATION_FAILED_EXT"), Self::ERROR_INVALID_SHADER_NV => Some("ERROR_INVALID_SHADER_NV"), + Self::ERROR_INCOMPATIBLE_VERSION_KHR => Some("ERROR_INCOMPATIBLE_VERSION_KHR"), Self::ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT => { Some("ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT") } - Self::ERROR_FRAGMENTATION_EXT => Some("ERROR_FRAGMENTATION_EXT"), Self::ERROR_NOT_PERMITTED_EXT => Some("ERROR_NOT_PERMITTED_EXT"), - Self::ERROR_INVALID_DEVICE_ADDRESS_EXT => Some("ERROR_INVALID_DEVICE_ADDRESS_EXT"), + Self::ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT => { + Some("ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT") + } + Self::THREAD_IDLE_KHR => Some("THREAD_IDLE_KHR"), + Self::THREAD_DONE_KHR => Some("THREAD_DONE_KHR"), + Self::OPERATION_DEFERRED_KHR => Some("OPERATION_DEFERRED_KHR"), + Self::OPERATION_NOT_DEFERRED_KHR => Some("OPERATION_NOT_DEFERRED_KHR"), + Self::ERROR_PIPELINE_COMPILE_REQUIRED_EXT => { + Some("ERROR_PIPELINE_COMPILE_REQUIRED_EXT") + } Self::ERROR_OUT_OF_POOL_MEMORY => Some("ERROR_OUT_OF_POOL_MEMORY"), Self::ERROR_INVALID_EXTERNAL_HANDLE => Some("ERROR_INVALID_EXTERNAL_HANDLE"), + Self::ERROR_FRAGMENTATION => Some("ERROR_FRAGMENTATION"), + Self::ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS => { + Some("ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS") + } _ => None, }; if let Some(x) = name { @@ -61906,6 +81456,7 @@ impl fmt::Debug for SamplerAddressMode { Self::MIRRORED_REPEAT => Some("MIRRORED_REPEAT"), Self::CLAMP_TO_EDGE => Some("CLAMP_TO_EDGE"), Self::CLAMP_TO_BORDER => Some("CLAMP_TO_BORDER"), + Self::MIRROR_CLAMP_TO_EDGE => Some("MIRROR_CLAMP_TO_EDGE"), _ => None, }; if let Some(x) = name { @@ -61941,7 +81492,7 @@ impl fmt::Debug for SamplerMipmapMode { } } } -impl fmt::Debug for SamplerReductionModeEXT { +impl fmt::Debug for SamplerReductionMode { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let name = match *self { Self::WEIGHTED_AVERAGE => Some("WEIGHTED_AVERAGE"), @@ -61987,6 +81538,22 @@ impl fmt::Debug for SamplerYcbcrRange { } } } +impl fmt::Debug for ScopeNV { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::DEVICE => Some("DEVICE"), + Self::WORKGROUP => Some("WORKGROUP"), + Self::SUBGROUP => Some("SUBGROUP"), + Self::QUEUE_FAMILY => Some("QUEUE_FAMILY"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} impl fmt::Debug for SemaphoreCreateFlags { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { const KNOWN: &[(Flags, &str)] = &[]; @@ -61999,6 +81566,47 @@ impl fmt::Debug for SemaphoreImportFlags { debug_flags(f, KNOWN, self.0) } } +impl fmt::Debug for SemaphoreType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::BINARY => Some("BINARY"), + Self::TIMELINE => Some("TIMELINE"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} +impl fmt::Debug for SemaphoreWaitFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[(SemaphoreWaitFlags::ANY.0, "ANY")]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for ShaderCorePropertiesFlagsAMD { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} +impl fmt::Debug for ShaderFloatControlsIndependence { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match *self { + Self::TYPE_32_ONLY => Some("TYPE_32_ONLY"), + Self::ALL => Some("ALL"), + Self::NONE => Some("NONE"), + _ => None, + }; + if let Some(x) = name { + f.write_str(x) + } else { + self.0.fmt(f) + } + } +} impl fmt::Debug for ShaderInfoTypeAMD { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let name = match *self { @@ -62016,7 +81624,8 @@ impl fmt::Debug for ShaderInfoTypeAMD { } impl fmt::Debug for ShaderModuleCreateFlags { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - const KNOWN: &[(Flags, &str)] = &[]; + const KNOWN: &[(Flags, &str)] = + &[(ShaderModuleCreateFlags::RESERVED_0_NV.0, "RESERVED_0_NV")]; debug_flags(f, KNOWN, self.0) } } @@ -62037,12 +81646,12 @@ impl fmt::Debug for ShaderStageFlags { (ShaderStageFlags::COMPUTE.0, "COMPUTE"), (ShaderStageFlags::ALL_GRAPHICS.0, "ALL_GRAPHICS"), (ShaderStageFlags::ALL.0, "ALL"), - (ShaderStageFlags::RAYGEN_NV.0, "RAYGEN_NV"), - (ShaderStageFlags::ANY_HIT_NV.0, "ANY_HIT_NV"), - (ShaderStageFlags::CLOSEST_HIT_NV.0, "CLOSEST_HIT_NV"), - (ShaderStageFlags::MISS_NV.0, "MISS_NV"), - (ShaderStageFlags::INTERSECTION_NV.0, "INTERSECTION_NV"), - (ShaderStageFlags::CALLABLE_NV.0, "CALLABLE_NV"), + (ShaderStageFlags::RAYGEN_KHR.0, "RAYGEN_KHR"), + (ShaderStageFlags::ANY_HIT_KHR.0, "ANY_HIT_KHR"), + (ShaderStageFlags::CLOSEST_HIT_KHR.0, "CLOSEST_HIT_KHR"), + (ShaderStageFlags::MISS_KHR.0, "MISS_KHR"), + (ShaderStageFlags::INTERSECTION_KHR.0, "INTERSECTION_KHR"), + (ShaderStageFlags::CALLABLE_KHR.0, "CALLABLE_KHR"), (ShaderStageFlags::TASK_NV.0, "TASK_NV"), (ShaderStageFlags::MESH_NV.0, "MESH_NV"), ]; @@ -62114,10 +81723,7 @@ impl fmt::Debug for StencilFaceFlags { const KNOWN: &[(Flags, &str)] = &[ (StencilFaceFlags::FRONT.0, "FRONT"), (StencilFaceFlags::BACK.0, "BACK"), - ( - StencilFaceFlags::STENCIL_FRONT_AND_BACK.0, - "STENCIL_FRONT_AND_BACK", - ), + (StencilFaceFlags::FRONT_AND_BACK.0, "FRONT_AND_BACK"), ]; debug_flags(f, KNOWN, self.0) } @@ -62142,6 +81748,12 @@ impl fmt::Debug for StencilOp { } } } +impl fmt::Debug for StreamDescriptorSurfaceCreateFlagsGGP { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[]; + debug_flags(f, KNOWN, self.0) + } +} impl fmt::Debug for StructureType { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let name = match *self { @@ -62233,6 +81845,12 @@ impl fmt::Debug for StructureType { Self::ANDROID_SURFACE_CREATE_INFO_KHR => Some("ANDROID_SURFACE_CREATE_INFO_KHR"), Self::WIN32_SURFACE_CREATE_INFO_KHR => Some("WIN32_SURFACE_CREATE_INFO_KHR"), Self::NATIVE_BUFFER_ANDROID => Some("NATIVE_BUFFER_ANDROID"), + Self::SWAPCHAIN_IMAGE_CREATE_INFO_ANDROID => { + Some("SWAPCHAIN_IMAGE_CREATE_INFO_ANDROID") + } + Self::PHYSICAL_DEVICE_PRESENTATION_PROPERTIES_ANDROID => { + Some("PHYSICAL_DEVICE_PRESENTATION_PROPERTIES_ANDROID") + } Self::DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT => { Some("DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT") } @@ -62260,9 +81878,13 @@ impl fmt::Debug for StructureType { Self::PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT => { Some("PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT") } + Self::IMAGE_VIEW_HANDLE_INFO_NVX => Some("IMAGE_VIEW_HANDLE_INFO_NVX"), Self::TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD => { Some("TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD") } + Self::STREAM_DESCRIPTOR_SURFACE_CREATE_INFO_GGP => { + Some("STREAM_DESCRIPTOR_SURFACE_CREATE_INFO_GGP") + } Self::PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV => { Some("PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV") } @@ -62277,6 +81899,9 @@ impl fmt::Debug for StructureType { } Self::VALIDATION_FLAGS_EXT => Some("VALIDATION_FLAGS_EXT"), Self::VI_SURFACE_CREATE_INFO_NN => Some("VI_SURFACE_CREATE_INFO_NN"), + Self::PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT => { + Some("PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT") + } Self::IMAGE_VIEW_ASTC_DECODE_MODE_EXT => Some("IMAGE_VIEW_ASTC_DECODE_MODE_EXT"), Self::PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT => { Some("PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT") @@ -62319,24 +81944,7 @@ impl fmt::Debug for StructureType { Self::CONDITIONAL_RENDERING_BEGIN_INFO_EXT => { Some("CONDITIONAL_RENDERING_BEGIN_INFO_EXT") } - Self::PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR => { - Some("PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR") - } Self::PRESENT_REGIONS_KHR => Some("PRESENT_REGIONS_KHR"), - Self::OBJECT_TABLE_CREATE_INFO_NVX => Some("OBJECT_TABLE_CREATE_INFO_NVX"), - Self::INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NVX => { - Some("INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NVX") - } - Self::CMD_PROCESS_COMMANDS_INFO_NVX => Some("CMD_PROCESS_COMMANDS_INFO_NVX"), - Self::CMD_RESERVE_SPACE_FOR_COMMANDS_INFO_NVX => { - Some("CMD_RESERVE_SPACE_FOR_COMMANDS_INFO_NVX") - } - Self::DEVICE_GENERATED_COMMANDS_LIMITS_NVX => { - Some("DEVICE_GENERATED_COMMANDS_LIMITS_NVX") - } - Self::DEVICE_GENERATED_COMMANDS_FEATURES_NVX => { - Some("DEVICE_GENERATED_COMMANDS_FEATURES_NVX") - } Self::PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV => { Some("PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV") } @@ -62364,14 +81972,13 @@ impl fmt::Debug for StructureType { Self::PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT => { Some("PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT") } + Self::PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT => { + Some("PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT") + } + Self::PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT => { + Some("PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT") + } Self::HDR_METADATA_EXT => Some("HDR_METADATA_EXT"), - Self::ATTACHMENT_DESCRIPTION_2_KHR => Some("ATTACHMENT_DESCRIPTION_2_KHR"), - Self::ATTACHMENT_REFERENCE_2_KHR => Some("ATTACHMENT_REFERENCE_2_KHR"), - Self::SUBPASS_DESCRIPTION_2_KHR => Some("SUBPASS_DESCRIPTION_2_KHR"), - Self::SUBPASS_DEPENDENCY_2_KHR => Some("SUBPASS_DEPENDENCY_2_KHR"), - Self::RENDER_PASS_CREATE_INFO_2_KHR => Some("RENDER_PASS_CREATE_INFO_2_KHR"), - Self::SUBPASS_BEGIN_INFO_KHR => Some("SUBPASS_BEGIN_INFO_KHR"), - Self::SUBPASS_END_INFO_KHR => Some("SUBPASS_END_INFO_KHR"), Self::SHARED_PRESENT_SURFACE_CAPABILITIES_KHR => { Some("SHARED_PRESENT_SURFACE_CAPABILITIES_KHR") } @@ -62380,6 +81987,21 @@ impl fmt::Debug for StructureType { Self::FENCE_GET_WIN32_HANDLE_INFO_KHR => Some("FENCE_GET_WIN32_HANDLE_INFO_KHR"), Self::IMPORT_FENCE_FD_INFO_KHR => Some("IMPORT_FENCE_FD_INFO_KHR"), Self::FENCE_GET_FD_INFO_KHR => Some("FENCE_GET_FD_INFO_KHR"), + Self::PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR => { + Some("PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR") + } + Self::PHYSICAL_DEVICE_PERFORMANCE_QUERY_PROPERTIES_KHR => { + Some("PHYSICAL_DEVICE_PERFORMANCE_QUERY_PROPERTIES_KHR") + } + Self::QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR => { + Some("QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR") + } + Self::PERFORMANCE_QUERY_SUBMIT_INFO_KHR => Some("PERFORMANCE_QUERY_SUBMIT_INFO_KHR"), + Self::ACQUIRE_PROFILING_LOCK_INFO_KHR => Some("ACQUIRE_PROFILING_LOCK_INFO_KHR"), + Self::PERFORMANCE_COUNTER_KHR => Some("PERFORMANCE_COUNTER_KHR"), + Self::PERFORMANCE_COUNTER_DESCRIPTION_KHR => { + Some("PERFORMANCE_COUNTER_DESCRIPTION_KHR") + } Self::PHYSICAL_DEVICE_SURFACE_INFO_2_KHR => Some("PHYSICAL_DEVICE_SURFACE_INFO_2_KHR"), Self::SURFACE_CAPABILITIES_2_KHR => Some("SURFACE_CAPABILITIES_2_KHR"), Self::SURFACE_FORMAT_2_KHR => Some("SURFACE_FORMAT_2_KHR"), @@ -62415,12 +82037,6 @@ impl fmt::Debug for StructureType { Some("MEMORY_GET_ANDROID_HARDWARE_BUFFER_INFO_ANDROID") } Self::EXTERNAL_FORMAT_ANDROID => Some("EXTERNAL_FORMAT_ANDROID"), - Self::PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT => { - Some("PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT") - } - Self::SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT => { - Some("SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT") - } Self::PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT => { Some("PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT") } @@ -62444,7 +82060,6 @@ impl fmt::Debug for StructureType { Some("PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT") } Self::MULTISAMPLE_PROPERTIES_EXT => Some("MULTISAMPLE_PROPERTIES_EXT"), - Self::IMAGE_FORMAT_LIST_CREATE_INFO_KHR => Some("IMAGE_FORMAT_LIST_CREATE_INFO_KHR"), Self::PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT => { Some("PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT") } @@ -62457,9 +82072,74 @@ impl fmt::Debug for StructureType { Self::PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV => { Some("PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV") } + Self::BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_KHR => { + Some("BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_KHR") + } + Self::WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR => { + Some("WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR") + } + Self::ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR => { + Some("ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR") + } + Self::ACCELERATION_STRUCTURE_CREATE_GEOMETRY_TYPE_INFO_KHR => { + Some("ACCELERATION_STRUCTURE_CREATE_GEOMETRY_TYPE_INFO_KHR") + } + Self::ACCELERATION_STRUCTURE_DEVICE_ADDRESS_INFO_KHR => { + Some("ACCELERATION_STRUCTURE_DEVICE_ADDRESS_INFO_KHR") + } + Self::ACCELERATION_STRUCTURE_GEOMETRY_AABBS_DATA_KHR => { + Some("ACCELERATION_STRUCTURE_GEOMETRY_AABBS_DATA_KHR") + } + Self::ACCELERATION_STRUCTURE_GEOMETRY_INSTANCES_DATA_KHR => { + Some("ACCELERATION_STRUCTURE_GEOMETRY_INSTANCES_DATA_KHR") + } + Self::ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR => { + Some("ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR") + } + Self::ACCELERATION_STRUCTURE_GEOMETRY_KHR => { + Some("ACCELERATION_STRUCTURE_GEOMETRY_KHR") + } + Self::ACCELERATION_STRUCTURE_INFO_KHR => Some("ACCELERATION_STRUCTURE_INFO_KHR"), + Self::ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_KHR => { + Some("ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_KHR") + } + Self::ACCELERATION_STRUCTURE_VERSION_KHR => Some("ACCELERATION_STRUCTURE_VERSION_KHR"), + Self::COPY_ACCELERATION_STRUCTURE_INFO_KHR => { + Some("COPY_ACCELERATION_STRUCTURE_INFO_KHR") + } + Self::COPY_ACCELERATION_STRUCTURE_TO_MEMORY_INFO_KHR => { + Some("COPY_ACCELERATION_STRUCTURE_TO_MEMORY_INFO_KHR") + } + Self::COPY_MEMORY_TO_ACCELERATION_STRUCTURE_INFO_KHR => { + Some("COPY_MEMORY_TO_ACCELERATION_STRUCTURE_INFO_KHR") + } + Self::PHYSICAL_DEVICE_RAY_TRACING_FEATURES_KHR => { + Some("PHYSICAL_DEVICE_RAY_TRACING_FEATURES_KHR") + } + Self::PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_KHR => { + Some("PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_KHR") + } + Self::RAY_TRACING_PIPELINE_CREATE_INFO_KHR => { + Some("RAY_TRACING_PIPELINE_CREATE_INFO_KHR") + } + Self::RAY_TRACING_SHADER_GROUP_CREATE_INFO_KHR => { + Some("RAY_TRACING_SHADER_GROUP_CREATE_INFO_KHR") + } + Self::ACCELERATION_STRUCTURE_CREATE_INFO_KHR => { + Some("ACCELERATION_STRUCTURE_CREATE_INFO_KHR") + } + Self::RAY_TRACING_PIPELINE_INTERFACE_CREATE_INFO_KHR => { + Some("RAY_TRACING_PIPELINE_INTERFACE_CREATE_INFO_KHR") + } Self::PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV => { Some("PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV") } + Self::PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV => { + Some("PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV") + } + Self::PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV => { + Some("PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV") + } Self::DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT => { Some("DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT") } @@ -62480,21 +82160,6 @@ impl fmt::Debug for StructureType { Self::SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT => { Some("SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT") } - Self::DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT => { - Some("DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT") - } - Self::PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT => { - Some("PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT") - } - Self::PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT => { - Some("PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT") - } - Self::DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT => { - Some("DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT") - } - Self::DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT => { - Some("DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT") - } Self::PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV => { Some("PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV") } @@ -62516,12 +82181,6 @@ impl fmt::Debug for StructureType { Self::GEOMETRY_NV => Some("GEOMETRY_NV"), Self::GEOMETRY_TRIANGLES_NV => Some("GEOMETRY_TRIANGLES_NV"), Self::GEOMETRY_AABB_NV => Some("GEOMETRY_AABB_NV"), - Self::BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV => { - Some("BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV") - } - Self::WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV => { - Some("WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV") - } Self::ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV => { Some("ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV") } @@ -62538,12 +82197,15 @@ impl fmt::Debug for StructureType { Self::PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV => { Some("PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV") } + Self::PHYSICAL_DEVICE_IMAGE_VIEW_IMAGE_FORMAT_INFO_EXT => { + Some("PHYSICAL_DEVICE_IMAGE_VIEW_IMAGE_FORMAT_INFO_EXT") + } + Self::FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT => { + Some("FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT") + } Self::DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT => { Some("DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT") } - Self::PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR => { - Some("PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR") - } Self::IMPORT_MEMORY_HOST_POINTER_INFO_EXT => { Some("IMPORT_MEMORY_HOST_POINTER_INFO_EXT") } @@ -62551,8 +82213,11 @@ impl fmt::Debug for StructureType { Self::PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT => { Some("PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT") } - Self::PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR => { - Some("PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR") + Self::PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR => { + Some("PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR") + } + Self::PIPELINE_COMPILER_CONTROL_CREATE_INFO_AMD => { + Some("PIPELINE_COMPILER_CONTROL_CREATE_INFO_AMD") } Self::CALIBRATED_TIMESTAMP_INFO_EXT => Some("CALIBRATED_TIMESTAMP_INFO_EXT"), Self::PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD => { @@ -62570,17 +82235,9 @@ impl fmt::Debug for StructureType { Self::PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT => { Some("PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT") } - Self::PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR => { - Some("PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR") - } - Self::PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES_KHR => { - Some("PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES_KHR") - } - Self::PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES_KHR => { - Some("PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES_KHR") - } - Self::SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE_KHR => { - Some("SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE_KHR") + Self::PRESENT_FRAME_TOKEN_GGP => Some("PRESENT_FRAME_TOKEN_GGP"), + Self::PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT => { + Some("PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT") } Self::PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV => { Some("PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV") @@ -62607,15 +82264,34 @@ impl fmt::Debug for StructureType { Self::QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV => { Some("QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV") } - Self::PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES_KHR => { - Some("PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES_KHR") + Self::PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL => { + Some("PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL") + } + Self::QUERY_POOL_CREATE_INFO_INTEL => Some("QUERY_POOL_CREATE_INFO_INTEL"), + Self::INITIALIZE_PERFORMANCE_API_INFO_INTEL => { + Some("INITIALIZE_PERFORMANCE_API_INFO_INTEL") + } + Self::PERFORMANCE_MARKER_INFO_INTEL => Some("PERFORMANCE_MARKER_INFO_INTEL"), + Self::PERFORMANCE_STREAM_MARKER_INFO_INTEL => { + Some("PERFORMANCE_STREAM_MARKER_INFO_INTEL") + } + Self::PERFORMANCE_OVERRIDE_INFO_INTEL => Some("PERFORMANCE_OVERRIDE_INFO_INTEL"), + Self::PERFORMANCE_CONFIGURATION_ACQUIRE_INFO_INTEL => { + Some("PERFORMANCE_CONFIGURATION_ACQUIRE_INFO_INTEL") } Self::PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT => { Some("PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT") } + Self::DISPLAY_NATIVE_HDR_SURFACE_CAPABILITIES_AMD => { + Some("DISPLAY_NATIVE_HDR_SURFACE_CAPABILITIES_AMD") + } + Self::SWAPCHAIN_DISPLAY_NATIVE_HDR_CREATE_INFO_AMD => { + Some("SWAPCHAIN_DISPLAY_NATIVE_HDR_CREATE_INFO_AMD") + } Self::IMAGEPIPE_SURFACE_CREATE_INFO_FUCHSIA => { Some("IMAGEPIPE_SURFACE_CREATE_INFO_FUCHSIA") } + Self::METAL_SURFACE_CREATE_INFO_EXT => Some("METAL_SURFACE_CREATE_INFO_EXT"), Self::PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT => { Some("PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT") } @@ -62625,8 +82301,20 @@ impl fmt::Debug for StructureType { Self::RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT => { Some("RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT") } - Self::PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES_EXT => { - Some("PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES_EXT") + Self::PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT => { + Some("PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT") + } + Self::PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT => { + Some("PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT") + } + Self::PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT => { + Some("PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT") + } + Self::PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD => { + Some("PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD") + } + Self::PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD => { + Some("PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD") } Self::PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT => { Some("PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT") @@ -62635,17 +82323,121 @@ impl fmt::Debug for StructureType { Some("PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT") } Self::MEMORY_PRIORITY_ALLOCATE_INFO_EXT => Some("MEMORY_PRIORITY_ALLOCATE_INFO_EXT"), - Self::PHYSICAL_DEVICE_BUFFER_ADDRESS_FEATURES_EXT => { - Some("PHYSICAL_DEVICE_BUFFER_ADDRESS_FEATURES_EXT") + Self::SURFACE_PROTECTED_CAPABILITIES_KHR => Some("SURFACE_PROTECTED_CAPABILITIES_KHR"), + Self::PHYSICAL_DEVICE_DEDICATED_ALLOCATION_IMAGE_ALIASING_FEATURES_NV => { + Some("PHYSICAL_DEVICE_DEDICATED_ALLOCATION_IMAGE_ALIASING_FEATURES_NV") + } + Self::PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT => { + Some("PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT") } - Self::BUFFER_DEVICE_ADDRESS_INFO_EXT => Some("BUFFER_DEVICE_ADDRESS_INFO_EXT"), Self::BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT => { Some("BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT") } - Self::IMAGE_STENCIL_USAGE_CREATE_INFO_EXT => { - Some("IMAGE_STENCIL_USAGE_CREATE_INFO_EXT") + Self::PHYSICAL_DEVICE_TOOL_PROPERTIES_EXT => { + Some("PHYSICAL_DEVICE_TOOL_PROPERTIES_EXT") } Self::VALIDATION_FEATURES_EXT => Some("VALIDATION_FEATURES_EXT"), + Self::PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_NV => { + Some("PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_NV") + } + Self::COOPERATIVE_MATRIX_PROPERTIES_NV => Some("COOPERATIVE_MATRIX_PROPERTIES_NV"), + Self::PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_NV => { + Some("PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_NV") + } + Self::PHYSICAL_DEVICE_COVERAGE_REDUCTION_MODE_FEATURES_NV => { + Some("PHYSICAL_DEVICE_COVERAGE_REDUCTION_MODE_FEATURES_NV") + } + Self::PIPELINE_COVERAGE_REDUCTION_STATE_CREATE_INFO_NV => { + Some("PIPELINE_COVERAGE_REDUCTION_STATE_CREATE_INFO_NV") + } + Self::FRAMEBUFFER_MIXED_SAMPLES_COMBINATION_NV => { + Some("FRAMEBUFFER_MIXED_SAMPLES_COMBINATION_NV") + } + Self::PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT => { + Some("PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT") + } + Self::PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT => { + Some("PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT") + } + Self::SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT => { + Some("SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT") + } + Self::SURFACE_CAPABILITIES_FULL_SCREEN_EXCLUSIVE_EXT => { + Some("SURFACE_CAPABILITIES_FULL_SCREEN_EXCLUSIVE_EXT") + } + Self::SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT => { + Some("SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT") + } + Self::HEADLESS_SURFACE_CREATE_INFO_EXT => Some("HEADLESS_SURFACE_CREATE_INFO_EXT"), + Self::PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT => { + Some("PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT") + } + Self::PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT => { + Some("PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT") + } + Self::PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT => { + Some("PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT") + } + Self::PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT => { + Some("PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT") + } + Self::DEFERRED_OPERATION_INFO_KHR => Some("DEFERRED_OPERATION_INFO_KHR"), + Self::PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR => { + Some("PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR") + } + Self::PIPELINE_INFO_KHR => Some("PIPELINE_INFO_KHR"), + Self::PIPELINE_EXECUTABLE_PROPERTIES_KHR => Some("PIPELINE_EXECUTABLE_PROPERTIES_KHR"), + Self::PIPELINE_EXECUTABLE_INFO_KHR => Some("PIPELINE_EXECUTABLE_INFO_KHR"), + Self::PIPELINE_EXECUTABLE_STATISTIC_KHR => Some("PIPELINE_EXECUTABLE_STATISTIC_KHR"), + Self::PIPELINE_EXECUTABLE_INTERNAL_REPRESENTATION_KHR => { + Some("PIPELINE_EXECUTABLE_INTERNAL_REPRESENTATION_KHR") + } + Self::PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT => { + Some("PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT") + } + Self::PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV => { + Some("PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV") + } + Self::GRAPHICS_SHADER_GROUP_CREATE_INFO_NV => { + Some("GRAPHICS_SHADER_GROUP_CREATE_INFO_NV") + } + Self::GRAPHICS_PIPELINE_SHADER_GROUPS_CREATE_INFO_NV => { + Some("GRAPHICS_PIPELINE_SHADER_GROUPS_CREATE_INFO_NV") + } + Self::INDIRECT_COMMANDS_LAYOUT_TOKEN_NV => Some("INDIRECT_COMMANDS_LAYOUT_TOKEN_NV"), + Self::INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NV => { + Some("INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NV") + } + Self::GENERATED_COMMANDS_INFO_NV => Some("GENERATED_COMMANDS_INFO_NV"), + Self::GENERATED_COMMANDS_MEMORY_REQUIREMENTS_INFO_NV => { + Some("GENERATED_COMMANDS_MEMORY_REQUIREMENTS_INFO_NV") + } + Self::PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_NV => { + Some("PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_NV") + } + Self::PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT => { + Some("PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT") + } + Self::PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT => { + Some("PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT") + } + Self::COMMAND_BUFFER_INHERITANCE_RENDER_PASS_TRANSFORM_INFO_QCOM => { + Some("COMMAND_BUFFER_INHERITANCE_RENDER_PASS_TRANSFORM_INFO_QCOM") + } + Self::RENDER_PASS_TRANSFORM_BEGIN_INFO_QCOM => { + Some("RENDER_PASS_TRANSFORM_BEGIN_INFO_QCOM") + } + Self::PIPELINE_LIBRARY_CREATE_INFO_KHR => Some("PIPELINE_LIBRARY_CREATE_INFO_KHR"), + Self::PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT => { + Some("PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT") + } + Self::PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV => { + Some("PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV") + } + Self::DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV => { + Some("DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV") + } + Self::RESERVED_QCOM => Some("RESERVED_QCOM"), Self::PHYSICAL_DEVICE_SUBGROUP_PROPERTIES => { Some("PHYSICAL_DEVICE_SUBGROUP_PROPERTIES") } @@ -62710,8 +82502,8 @@ impl fmt::Debug for StructureType { Self::PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES => { Some("PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES") } - Self::PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES => { - Some("PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES") + Self::PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES => { + Some("PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES") } Self::PROTECTED_SUBMIT_INFO => Some("PROTECTED_SUBMIT_INFO"), Self::PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES => { @@ -62764,8 +82556,122 @@ impl fmt::Debug for StructureType { Some("PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES") } Self::DESCRIPTOR_SET_LAYOUT_SUPPORT => Some("DESCRIPTOR_SET_LAYOUT_SUPPORT"), - Self::PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES => { - Some("PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES") + Self::PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES => { + Some("PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES") + } + Self::PHYSICAL_DEVICE_VULKAN_1_1_FEATURES => { + Some("PHYSICAL_DEVICE_VULKAN_1_1_FEATURES") + } + Self::PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES => { + Some("PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES") + } + Self::PHYSICAL_DEVICE_VULKAN_1_2_FEATURES => { + Some("PHYSICAL_DEVICE_VULKAN_1_2_FEATURES") + } + Self::PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES => { + Some("PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES") + } + Self::IMAGE_FORMAT_LIST_CREATE_INFO => Some("IMAGE_FORMAT_LIST_CREATE_INFO"), + Self::ATTACHMENT_DESCRIPTION_2 => Some("ATTACHMENT_DESCRIPTION_2"), + Self::ATTACHMENT_REFERENCE_2 => Some("ATTACHMENT_REFERENCE_2"), + Self::SUBPASS_DESCRIPTION_2 => Some("SUBPASS_DESCRIPTION_2"), + Self::SUBPASS_DEPENDENCY_2 => Some("SUBPASS_DEPENDENCY_2"), + Self::RENDER_PASS_CREATE_INFO_2 => Some("RENDER_PASS_CREATE_INFO_2"), + Self::SUBPASS_BEGIN_INFO => Some("SUBPASS_BEGIN_INFO"), + Self::SUBPASS_END_INFO => Some("SUBPASS_END_INFO"), + Self::PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES => { + Some("PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES") + } + Self::PHYSICAL_DEVICE_DRIVER_PROPERTIES => Some("PHYSICAL_DEVICE_DRIVER_PROPERTIES"), + Self::PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES => { + Some("PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES") + } + Self::PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES => { + Some("PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES") + } + Self::PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES => { + Some("PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES") + } + Self::DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO => { + Some("DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO") + } + Self::PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES => { + Some("PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES") + } + Self::PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES => { + Some("PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES") + } + Self::DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO => { + Some("DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO") + } + Self::DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT => { + Some("DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT") + } + Self::PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES => { + Some("PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES") + } + Self::SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE => { + Some("SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE") + } + Self::PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES => { + Some("PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES") + } + Self::IMAGE_STENCIL_USAGE_CREATE_INFO => Some("IMAGE_STENCIL_USAGE_CREATE_INFO"), + Self::PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES => { + Some("PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES") + } + Self::SAMPLER_REDUCTION_MODE_CREATE_INFO => Some("SAMPLER_REDUCTION_MODE_CREATE_INFO"), + Self::PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES => { + Some("PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES") + } + Self::PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES => { + Some("PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES") + } + Self::FRAMEBUFFER_ATTACHMENTS_CREATE_INFO => { + Some("FRAMEBUFFER_ATTACHMENTS_CREATE_INFO") + } + Self::FRAMEBUFFER_ATTACHMENT_IMAGE_INFO => Some("FRAMEBUFFER_ATTACHMENT_IMAGE_INFO"), + Self::RENDER_PASS_ATTACHMENT_BEGIN_INFO => Some("RENDER_PASS_ATTACHMENT_BEGIN_INFO"), + Self::PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES => { + Some("PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES") + } + Self::PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES => { + Some("PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES") + } + Self::PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES => { + Some("PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES") + } + Self::ATTACHMENT_REFERENCE_STENCIL_LAYOUT => { + Some("ATTACHMENT_REFERENCE_STENCIL_LAYOUT") + } + Self::ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT => { + Some("ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT") + } + Self::PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES => { + Some("PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES") + } + Self::PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES => { + Some("PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES") + } + Self::PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES => { + Some("PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES") + } + Self::SEMAPHORE_TYPE_CREATE_INFO => Some("SEMAPHORE_TYPE_CREATE_INFO"), + Self::TIMELINE_SEMAPHORE_SUBMIT_INFO => Some("TIMELINE_SEMAPHORE_SUBMIT_INFO"), + Self::SEMAPHORE_WAIT_INFO => Some("SEMAPHORE_WAIT_INFO"), + Self::SEMAPHORE_SIGNAL_INFO => Some("SEMAPHORE_SIGNAL_INFO"), + Self::PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES => { + Some("PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES") + } + Self::BUFFER_DEVICE_ADDRESS_INFO => Some("BUFFER_DEVICE_ADDRESS_INFO"), + Self::BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO => { + Some("BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO") + } + Self::MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO => { + Some("MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO") + } + Self::DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO => { + Some("DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO") } _ => None, }; @@ -62817,6 +82723,14 @@ impl fmt::Debug for SubpassDescriptionFlags { SubpassDescriptionFlags::PER_VIEW_POSITION_X_ONLY_NVX.0, "PER_VIEW_POSITION_X_ONLY_NVX", ), + ( + SubpassDescriptionFlags::RESERVED_2_QCOM.0, + "RESERVED_2_QCOM", + ), + ( + SubpassDescriptionFlags::RESERVED_3_QCOM.0, + "RESERVED_3_QCOM", + ), ]; debug_flags(f, KNOWN, self.0) } @@ -62868,6 +82782,12 @@ impl fmt::Debug for SwapchainCreateFlagsKHR { debug_flags(f, KNOWN, self.0) } } +impl fmt::Debug for SwapchainImageUsageFlagsANDROID { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[(SwapchainImageUsageFlagsANDROID::SHARED.0, "SHARED")]; + debug_flags(f, KNOWN, self.0) + } +} impl fmt::Debug for SystemAllocationScope { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let name = match *self { @@ -62915,6 +82835,26 @@ impl fmt::Debug for TimeDomainEXT { } } } +impl fmt::Debug for ToolPurposeFlagsEXT { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const KNOWN: &[(Flags, &str)] = &[ + (ToolPurposeFlagsEXT::VALIDATION.0, "VALIDATION"), + (ToolPurposeFlagsEXT::PROFILING.0, "PROFILING"), + (ToolPurposeFlagsEXT::TRACING.0, "TRACING"), + ( + ToolPurposeFlagsEXT::ADDITIONAL_FEATURES.0, + "ADDITIONAL_FEATURES", + ), + ( + ToolPurposeFlagsEXT::MODIFYING_FEATURES.0, + "MODIFYING_FEATURES", + ), + (ToolPurposeFlagsEXT::DEBUG_REPORTING.0, "DEBUG_REPORTING"), + (ToolPurposeFlagsEXT::DEBUG_MARKERS.0, "DEBUG_MARKERS"), + ]; + debug_flags(f, KNOWN, self.0) + } +} impl fmt::Debug for ValidationCacheCreateFlagsEXT { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { const KNOWN: &[(Flags, &str)] = &[]; @@ -62972,6 +82912,8 @@ impl fmt::Debug for ValidationFeatureEnableEXT { let name = match *self { Self::GPU_ASSISTED => Some("GPU_ASSISTED"), Self::GPU_ASSISTED_RESERVE_BINDING_SLOT => Some("GPU_ASSISTED_RESERVE_BINDING_SLOT"), + Self::BEST_PRACTICES => Some("BEST_PRACTICES"), + Self::DEBUG_PRINTF => Some("DEBUG_PRINTF"), _ => None, }; if let Some(x) = name { @@ -62987,6 +82929,7 @@ impl fmt::Debug for VendorId { Self::VIV => Some("VIV"), Self::VSI => Some("VSI"), Self::KAZAN => Some("KAZAN"), + Self::CODEPLAY => Some("CODEPLAY"), _ => None, }; if let Some(x) = name { @@ -63060,7 +83003,11 @@ impl fmt::Debug for XlibSurfaceCreateFlagsKHR { debug_flags(f, KNOWN, self.0) } } +pub type GeometryFlagsNV = GeometryFlagsKHR; +pub type GeometryInstanceFlagsNV = GeometryInstanceFlagsKHR; +pub type BuildAccelerationStructureFlagsNV = BuildAccelerationStructureFlagsKHR; pub type DescriptorUpdateTemplateCreateFlagsKHR = DescriptorUpdateTemplateCreateFlags; +pub type SemaphoreWaitFlagsKHR = SemaphoreWaitFlags; pub type PeerMemoryFeatureFlagsKHR = PeerMemoryFeatureFlags; pub type MemoryAllocateFlagsKHR = MemoryAllocateFlags; pub type CommandPoolTrimFlagsKHR = CommandPoolTrimFlags; @@ -63072,14 +83019,27 @@ pub type SemaphoreImportFlagsKHR = SemaphoreImportFlags; pub type ExternalFenceHandleTypeFlagsKHR = ExternalFenceHandleTypeFlags; pub type ExternalFenceFeatureFlagsKHR = ExternalFenceFeatureFlags; pub type FenceImportFlagsKHR = FenceImportFlags; +pub type DescriptorBindingFlagsEXT = DescriptorBindingFlags; +pub type ResolveModeFlagsKHR = ResolveModeFlags; pub type DescriptorUpdateTemplateKHR = DescriptorUpdateTemplate; pub type SamplerYcbcrConversionKHR = SamplerYcbcrConversion; +pub type AccelerationStructureNV = AccelerationStructureKHR; pub type DescriptorUpdateTemplateTypeKHR = DescriptorUpdateTemplateType; pub type PointClippingBehaviorKHR = PointClippingBehavior; +pub type SemaphoreTypeKHR = SemaphoreType; +pub type CopyAccelerationStructureModeNV = CopyAccelerationStructureModeKHR; +pub type AccelerationStructureTypeNV = AccelerationStructureTypeKHR; +pub type GeometryTypeNV = GeometryTypeKHR; +pub type RayTracingShaderGroupTypeNV = RayTracingShaderGroupTypeKHR; +pub type AccelerationStructureMemoryRequirementsTypeNV = + AccelerationStructureMemoryRequirementsTypeKHR; pub type TessellationDomainOriginKHR = TessellationDomainOrigin; pub type SamplerYcbcrModelConversionKHR = SamplerYcbcrModelConversion; pub type SamplerYcbcrRangeKHR = SamplerYcbcrRange; pub type ChromaLocationKHR = ChromaLocation; +pub type SamplerReductionModeEXT = SamplerReductionMode; +pub type ShaderFloatControlsIndependenceKHR = ShaderFloatControlsIndependence; +pub type DriverIdKHR = DriverId; pub type PhysicalDeviceFeatures2KHR = PhysicalDeviceFeatures2; pub type PhysicalDeviceProperties2KHR = PhysicalDeviceProperties2; pub type FormatProperties2KHR = FormatProperties2; @@ -63089,7 +83049,11 @@ pub type QueueFamilyProperties2KHR = QueueFamilyProperties2; pub type PhysicalDeviceMemoryProperties2KHR = PhysicalDeviceMemoryProperties2; pub type SparseImageFormatProperties2KHR = SparseImageFormatProperties2; pub type PhysicalDeviceSparseImageFormatInfo2KHR = PhysicalDeviceSparseImageFormatInfo2; -pub type PhysicalDeviceVariablePointerFeaturesKHR = PhysicalDeviceVariablePointerFeatures; +pub type ConformanceVersionKHR = ConformanceVersion; +pub type PhysicalDeviceDriverPropertiesKHR = PhysicalDeviceDriverProperties; +pub type PhysicalDeviceVariablePointersFeaturesKHR = PhysicalDeviceVariablePointersFeatures; +pub type PhysicalDeviceVariablePointerFeaturesKHR = PhysicalDeviceVariablePointersFeatures; +pub type PhysicalDeviceVariablePointerFeatures = PhysicalDeviceVariablePointersFeatures; pub type ExternalMemoryPropertiesKHR = ExternalMemoryProperties; pub type PhysicalDeviceExternalImageFormatInfoKHR = PhysicalDeviceExternalImageFormatInfo; pub type ExternalImageFormatPropertiesKHR = ExternalImageFormatProperties; @@ -63124,6 +83088,8 @@ pub type DescriptorUpdateTemplateCreateInfoKHR = DescriptorUpdateTemplateCreateI pub type InputAttachmentAspectReferenceKHR = InputAttachmentAspectReference; pub type RenderPassInputAttachmentAspectCreateInfoKHR = RenderPassInputAttachmentAspectCreateInfo; pub type PhysicalDevice16BitStorageFeaturesKHR = PhysicalDevice16BitStorageFeatures; +pub type PhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR = + PhysicalDeviceShaderSubgroupExtendedTypesFeatures; pub type BufferMemoryRequirementsInfo2KHR = BufferMemoryRequirementsInfo2; pub type ImageMemoryRequirementsInfo2KHR = ImageMemoryRequirementsInfo2; pub type ImageSparseMemoryRequirementsInfo2KHR = ImageSparseMemoryRequirementsInfo2; @@ -63143,5 +83109,65 @@ pub type PhysicalDeviceSamplerYcbcrConversionFeaturesKHR = PhysicalDeviceSamplerYcbcrConversionFeatures; pub type SamplerYcbcrConversionImageFormatPropertiesKHR = SamplerYcbcrConversionImageFormatProperties; +pub type PhysicalDeviceSamplerFilterMinmaxPropertiesEXT = + PhysicalDeviceSamplerFilterMinmaxProperties; +pub type SamplerReductionModeCreateInfoEXT = SamplerReductionModeCreateInfo; +pub type ImageFormatListCreateInfoKHR = ImageFormatListCreateInfo; pub type PhysicalDeviceMaintenance3PropertiesKHR = PhysicalDeviceMaintenance3Properties; pub type DescriptorSetLayoutSupportKHR = DescriptorSetLayoutSupport; +pub type PhysicalDeviceShaderDrawParameterFeatures = PhysicalDeviceShaderDrawParametersFeatures; +pub type PhysicalDeviceShaderFloat16Int8FeaturesKHR = PhysicalDeviceShaderFloat16Int8Features; +pub type PhysicalDeviceFloat16Int8FeaturesKHR = PhysicalDeviceShaderFloat16Int8Features; +pub type PhysicalDeviceFloatControlsPropertiesKHR = PhysicalDeviceFloatControlsProperties; +pub type PhysicalDeviceHostQueryResetFeaturesEXT = PhysicalDeviceHostQueryResetFeatures; +pub type PhysicalDeviceDescriptorIndexingFeaturesEXT = PhysicalDeviceDescriptorIndexingFeatures; +pub type PhysicalDeviceDescriptorIndexingPropertiesEXT = PhysicalDeviceDescriptorIndexingProperties; +pub type DescriptorSetLayoutBindingFlagsCreateInfoEXT = DescriptorSetLayoutBindingFlagsCreateInfo; +pub type DescriptorSetVariableDescriptorCountAllocateInfoEXT = + DescriptorSetVariableDescriptorCountAllocateInfo; +pub type DescriptorSetVariableDescriptorCountLayoutSupportEXT = + DescriptorSetVariableDescriptorCountLayoutSupport; +pub type AttachmentDescription2KHR = AttachmentDescription2; +pub type AttachmentReference2KHR = AttachmentReference2; +pub type SubpassDescription2KHR = SubpassDescription2; +pub type SubpassDependency2KHR = SubpassDependency2; +pub type RenderPassCreateInfo2KHR = RenderPassCreateInfo2; +pub type SubpassBeginInfoKHR = SubpassBeginInfo; +pub type SubpassEndInfoKHR = SubpassEndInfo; +pub type PhysicalDeviceTimelineSemaphoreFeaturesKHR = PhysicalDeviceTimelineSemaphoreFeatures; +pub type PhysicalDeviceTimelineSemaphorePropertiesKHR = PhysicalDeviceTimelineSemaphoreProperties; +pub type SemaphoreTypeCreateInfoKHR = SemaphoreTypeCreateInfo; +pub type TimelineSemaphoreSubmitInfoKHR = TimelineSemaphoreSubmitInfo; +pub type SemaphoreWaitInfoKHR = SemaphoreWaitInfo; +pub type SemaphoreSignalInfoKHR = SemaphoreSignalInfo; +pub type PhysicalDevice8BitStorageFeaturesKHR = PhysicalDevice8BitStorageFeatures; +pub type PhysicalDeviceVulkanMemoryModelFeaturesKHR = PhysicalDeviceVulkanMemoryModelFeatures; +pub type PhysicalDeviceShaderAtomicInt64FeaturesKHR = PhysicalDeviceShaderAtomicInt64Features; +pub type PhysicalDeviceDepthStencilResolvePropertiesKHR = + PhysicalDeviceDepthStencilResolveProperties; +pub type SubpassDescriptionDepthStencilResolveKHR = SubpassDescriptionDepthStencilResolve; +pub type BindAccelerationStructureMemoryInfoNV = BindAccelerationStructureMemoryInfoKHR; +pub type WriteDescriptorSetAccelerationStructureNV = WriteDescriptorSetAccelerationStructureKHR; +pub type ImageStencilUsageCreateInfoEXT = ImageStencilUsageCreateInfo; +pub type PhysicalDeviceScalarBlockLayoutFeaturesEXT = PhysicalDeviceScalarBlockLayoutFeatures; +pub type PhysicalDeviceUniformBufferStandardLayoutFeaturesKHR = + PhysicalDeviceUniformBufferStandardLayoutFeatures; +pub type PhysicalDeviceBufferDeviceAddressFeaturesKHR = PhysicalDeviceBufferDeviceAddressFeatures; +pub type PhysicalDeviceBufferAddressFeaturesEXT = PhysicalDeviceBufferDeviceAddressFeaturesEXT; +pub type BufferDeviceAddressInfoKHR = BufferDeviceAddressInfo; +pub type BufferDeviceAddressInfoEXT = BufferDeviceAddressInfo; +pub type BufferOpaqueCaptureAddressCreateInfoKHR = BufferOpaqueCaptureAddressCreateInfo; +pub type PhysicalDeviceImagelessFramebufferFeaturesKHR = PhysicalDeviceImagelessFramebufferFeatures; +pub type FramebufferAttachmentsCreateInfoKHR = FramebufferAttachmentsCreateInfo; +pub type FramebufferAttachmentImageInfoKHR = FramebufferAttachmentImageInfo; +pub type RenderPassAttachmentBeginInfoKHR = RenderPassAttachmentBeginInfo; +pub type QueryPoolCreateInfoINTEL = QueryPoolPerformanceQueryCreateInfoINTEL; +pub type PhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR = + PhysicalDeviceSeparateDepthStencilLayoutsFeatures; +pub type AttachmentReferenceStencilLayoutKHR = AttachmentReferenceStencilLayout; +pub type AttachmentDescriptionStencilLayoutKHR = AttachmentDescriptionStencilLayout; +pub type MemoryOpaqueCaptureAddressAllocateInfoKHR = MemoryOpaqueCaptureAddressAllocateInfo; +pub type DeviceMemoryOpaqueCaptureAddressInfoKHR = DeviceMemoryOpaqueCaptureAddressInfo; +pub type AabbPositionsNV = AabbPositionsKHR; +pub type TransformMatrixNV = TransformMatrixKHR; +pub type AccelerationStructureInstanceNV = AccelerationStructureInstanceKHR; diff --git a/third_party/rust/ash/tests/constant_size_arrays.rs b/third_party/rust/ash/tests/constant_size_arrays.rs index f736c0a4fe14..c58cd5e111c7 100644 --- a/third_party/rust/ash/tests/constant_size_arrays.rs +++ b/third_party/rust/ash/tests/constant_size_arrays.rs @@ -1,4 +1,4 @@ -extern crate ash; +use ash; use ash::vk::{PhysicalDeviceProperties, PipelineColorBlendStateCreateInfo}; @@ -27,15 +27,10 @@ fn assert_struct_field_is_array() { #[allow(dead_code)] fn assert_ffi_array_param_is_pointer() { use ash::version::DeviceV1_0; - unsafe { - // don't run it, just make sure it compiles - if false { - let device: ash::Device = std::mem::uninitialized(); - let cmd_buffer = std::mem::uninitialized(); + // don't run it, just make sure it compiles + unsafe fn dummy(device: &ash::Device, cmd_buffer: ash::vk::CommandBuffer) { + let blend_constants: [f32; 4] = [0.0, 0.0, 0.0, 0.0]; - let blend_constants: [f32; 4] = [0.0, 0.0, 0.0, 0.0]; - - device.cmd_set_blend_constants(cmd_buffer, &blend_constants); - } + device.cmd_set_blend_constants(cmd_buffer, &blend_constants); } } diff --git a/third_party/rust/ash/tests/display.rs b/third_party/rust/ash/tests/display.rs index 87460226de54..f4131fdc7c57 100644 --- a/third_party/rust/ash/tests/display.rs +++ b/third_party/rust/ash/tests/display.rs @@ -1,4 +1,3 @@ -extern crate ash; use ash::vk; #[test] diff --git a/third_party/rust/colorful/.cargo-checksum.json b/third_party/rust/colorful/.cargo-checksum.json deleted file mode 100644 index a1a56cb23c2e..000000000000 --- a/third_party/rust/colorful/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{"Cargo.toml":"4b57cc26ea5fe95d54cd5ddaa036a435b1f996cd283283efd6675f1f4b77fed1","CodeOfConduct.md":"9a97bc250bc21e1b3fe7d7217296a33b7379e082e293f206afb6bcc93f2d3cce","README.md":"0059d0114dc6283f54eacf7cc285e0b7f3fe5db35107f5841844c8949c67000e","license":"48da2f39e100d4085767e94966b43f4fa95ff6a0698fba57ed460914e35f94a0","rustfmt.toml":"2e13002b5bbc7c434e8b6e669bd5e9417c58607bacf2f3c8711e9fc1745dd302","src/core/color_string.rs":"e860320fdce6d9590eceee040eda4e769a100e9f3a3ef24c3620bd523482fff8","src/core/colors.rs":"d22bf7c763994259ba02320ec64d6349a4f596cbf68c2161c09130f2fad16219","src/core/hsl.rs":"7308dd6b02b74b1e13eb61d4f960d52a50c9e01621404fe7468bedcf29596de5","src/core/mod.rs":"331493703e3c09b36cbf7a51d7c2b6b2455f34b7558fac172a847470b1e31fd9","src/core/rgb.rs":"c5dda4eb726a1d137658b22aa5d69958c4c710a595dff6a966395003918c1e95","src/core/style.rs":"315def912b8df5f4d6efa3b92adf5c5d21caaa8b3688dab79348140909551d9c","src/core/symbols.rs":"4925401f864d7c9b40bebf9f1a565c5650e7475dcc05a4d43240c32fd7ea38da","src/lib.rs":"f423d55cd70f7b4d4a31172f0ac3b74123ef7ed925f6be33913a8222da092279","tests/test_all_color.rs":"83d8ff40812200682360e59c9ac8ced14af65adb558699958e280e05d359933d","tests/test_animation.rs":"1b6db4c29c7b2727337c9096b53108475bf12cea0ffda9e26aa86bd7956885e2","tests/test_basic.rs":"8884ac1fb3b6749d94429ce57336a43a1a84f6c2c8c9c7ea9cdf224e2bc230df","tests/test_extra.rs":"c46c7f4fd45851565359fa20d9fce3216a19e000b66d08b1af8dc8f1e0b2282c","tests/test_gradient.rs":"bedd7a0afedbca9f606acfa3ae3bc718fab03b5f69fced8c9fbf0d499ad9d991","tests/test_hsl.rs":"668a7db4f84b555210f47cac4183141703aae679d3343bcbdb6fa75c1b3057b2"},"package":"0bca1619ff57dd7a56b58a8e25ef4199f123e78e503fe1653410350a1b98ae65"} \ No newline at end of file diff --git a/third_party/rust/colorful/Cargo.toml b/third_party/rust/colorful/Cargo.toml deleted file mode 100644 index 67abb1367dfb..000000000000 --- a/third_party/rust/colorful/Cargo.toml +++ /dev/null @@ -1,26 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - -[package] -name = "colorful" -version = "0.2.1" -authors = ["da2018 "] -exclude = ["images/*", "examples"] -description = "Make your terminal output colorful" -homepage = "https://github.com/rocketsman/colorful" -readme = "README.md" -keywords = ["cli", "colors", "terminal"] -categories = ["cli", "colors", "terminal"] -license = "MIT" -repository = "https://github.com/rocketsman/colorful" - -[dependencies] diff --git a/third_party/rust/colorful/CodeOfConduct.md b/third_party/rust/colorful/CodeOfConduct.md deleted file mode 100644 index b6b0dea7f2f6..000000000000 --- a/third_party/rust/colorful/CodeOfConduct.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of -experience, nationality, personal appearance, race, religion, or sexual identity -and orientation. - -## Our Standards - -Examples of behaviour that contributes to creating a positive environment -include: - -- Using welcoming and inclusive language -- Being respectful of differing viewpoints and experiences -- Gracefully accepting constructive criticism -- Focusing on what is best for the community -- Showing empathy towards other community members - -Examples of unacceptable behaviour by participants include: - -- The use of sexualised language or imagery and unwelcome sexual - attention or advances -- Trolling, insulting/derogatory comments, and personal or political attacks -- Public or private harassment -- Publishing others’ private information, such as a physical or electronic - address, without explicit permission -- Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behaviour and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behaviour. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviours that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may -be further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behaviour may be -reported by contacting the project team at tituswormer@gmail.com. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an -incident. Further details of specific enforcement policies may be posted -separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project’s leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], -version 1.4, available at . - -[homepage]: https://www.contributor-covenant.org diff --git a/third_party/rust/colorful/README.md b/third_party/rust/colorful/README.md deleted file mode 100644 index 4abec5ee21aa..000000000000 --- a/third_party/rust/colorful/README.md +++ /dev/null @@ -1,196 +0,0 @@ -

- Colorful -
-
-

- -[![Build Status](https://travis-ci.org/rocketsman/colorful.svg?branch=master)](https://travis-ci.org/rocketsman/colorful) [![Coverage Status](https://coveralls.io/repos/github/rocketsman/colorful/badge.svg?branch=master)](https://coveralls.io/github/rocketsman/colorful?branch=master) [![Codacy Badge](https://api.codacy.com/project/badge/Grade/37a45510f41445eea0168f0f07e8f7cb)](https://app.codacy.com/app/rocketsman/colorful_2?utm_source=github.com&utm_medium=referral&utm_content=rocketsman/colorful&utm_campaign=Badge_Grade_Dashboard) - -## Usage - -### Basic Usage - -```Rust -extern crate colorful; - -use colorful::Color; -use colorful::Colorful; -//use colorful::HSL; -//use colorful::RGB; - -fn main() { - let s = "Hello world"; - println!("{}", s.color(Color::Blue).bg_color(Color::Yellow).bold()); - // println!("{}", s.color(HSL::new(1.0, 1.0, 0.5)).bold()); - // println!("{}", s.color(RGB::new(255, 0, 0)).bold()); - println!("{}", s.blue().bg_yellow()); -} -``` - -### Gradient - -```Rust -extern crate colorful; - -use colorful::Color; -use colorful::Colorful; - -fn main() { - println!("{}", "This code is editable and runnable!".gradient(Color::Red)); - println!("{}", "¡Este código es editable y ejecutable!".gradient(Color::Green)); - println!("{}", "Ce code est modifiable et exécutable !".gradient(Color::Yellow)); - println!("{}", "Questo codice è modificabile ed eseguibile!".gradient(Color::Blue)); - println!("{}", "このコードは編集して実行出来ます!".gradient(Color::Magenta)); - println!("{}", "여기에서 코드를 수정하고 실행할 수 있습니다!".gradient(Color::Cyan)); - println!("{}", "Ten kod można edytować oraz uruchomić!".gradient(Color::LightGray)); - println!("{}", "Este código é editável e executável!".gradient(Color::DarkGray)); - println!("{}", "Этот код можно отредактировать и запустить!".gradient(Color::LightRed)); - println!("{}", "Bạn có thể edit và run code trực tiếp!".gradient(Color::LightGreen)); - println!("{}", "这段代码是可以编辑并且能够运行的!".gradient(Color::LightYellow)); - println!("{}", "Dieser Code kann bearbeitet und ausgeführt werden!".gradient(Color::LightBlue)); - println!("{}", "Den här koden kan redigeras och köras!".gradient(Color::LightMagenta)); - println!("{}", "Tento kód můžete upravit a spustit".gradient(Color::LightCyan)); - println!("{}", "این کد قابلیت ویرایش و اجرا دارد!".gradient(Color::White)); - println!("{}", "โค้ดนี้สามารถแก้ไขได้และรันได้".gradient(Color::Grey0)); -} - -``` -
- -
- -### Gradient with style - -```Rust -extern crate colorful; - -use colorful::Colorful; - -fn main() { - println!("{}", "言葉にできず 凍えたままで 人前ではやさしく生きていた しわよせで こんなふうに雑に 雨の夜にきみを 抱きしめてた".gradient_with_color(HSL::new(0.0, 1.0, 0.5), HSL::new(0.833, 1.0, 0.5)).underlined()); -} -``` - -
- -
- -### Bar chart - -```Rust -extern crate colorful; - -use colorful::Colorful; -use colorful::HSL; - -fn main() { - let s = "█"; - println!("{}\n", "Most Loved, Dreaded, and Wanted Languages".red()); - let values = vec![78.9, 75.1, 68.0, 67.0, 65.6, 65.1, 61.9, 60.4]; - let languages = vec!["Rust", "Kotlin", "Python", "TypeScript", "Go", "Swift", "JavaScript", "C#"]; - let c = languages.iter().max_by_key(|x| x.len()).unwrap(); - - for (i, value) in values.iter().enumerate() { - let h = (*value as f32 * 15.0 % 360.0) / 360.0; - let length = (value - 30.0) as usize; - println!("{: - - - -### Animation - -#### Rainbow - -```Rust -extern crate colorful; - -use colorful::Colorful; - -fn main() { - let text = format!("{:^50}\n{}\r\n{}", "岳飞 小重山", "昨夜寒蛩不住鸣 惊回千里梦 已三更 起身独自绕阶行 人悄悄 帘外月胧明", - "白首为功名 旧山松竹老 阻归程 欲将心事付瑶琴 知音少 弦断有谁听"); - text.rainbow(); -} -``` -Output - -
- -
- -#### Neon - -```Rust -extern crate colorful; - -use colorful::Colorful; - -fn main() { - let text = format!("{:^28}\n{}", "WARNING", "BIG BROTHER IS WATCHING YOU!!!"); - text.neon(RGB::new(226, 14, 14), RGB::new(158, 158, 158)); - // or you can use text.warn(); -} - -``` -Output - -
- -
- - -## Terminals compatibility - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TerminalFormattingColor
BoldDimUnderlinedBlinkInvertHidden81688256
aTerm ~
Eterm ~ ~
GNOME Terminal
Guake
Konsole
Nautilus Terminal
rxvt ~
Terminator
Tilda
XFCE4 Terminal
XTerm
xvt
Linux TTY ~
VTE Terminal
- -~: Supported in a special way by the terminal. - -## Todo - -- [x] Basic 16 color -- [ ] Extra 240 color -- [x] HSL support -- [x] RGB support -- [x] Gradient mode -- [x] Rainbow mode -- [x] Animation mode -- [ ] Document -- [x] Terminals compatibility - -## License - -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fda2018%2Fcolorful.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fda2018%2Fcolorful?ref=badge_large) diff --git a/third_party/rust/colorful/license b/third_party/rust/colorful/license deleted file mode 100644 index e7af2f77107d..000000000000 --- a/third_party/rust/colorful/license +++ /dev/null @@ -1,9 +0,0 @@ -MIT License - -Copyright (c) Sindre Sorhus (sindresorhus.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/colorful/rustfmt.toml b/third_party/rust/colorful/rustfmt.toml deleted file mode 100644 index 4fff285e7dfd..000000000000 --- a/third_party/rust/colorful/rustfmt.toml +++ /dev/null @@ -1,5 +0,0 @@ -max_width = 89 -reorder_imports = true -#wrap_comments = true -fn_args_density = "Compressed" -#use_small_heuristics = false diff --git a/third_party/rust/colorful/src/core/color_string.rs b/third_party/rust/colorful/src/core/color_string.rs deleted file mode 100644 index a4f0a1968524..000000000000 --- a/third_party/rust/colorful/src/core/color_string.rs +++ /dev/null @@ -1,134 +0,0 @@ -use std::fmt::Display; -use std::fmt::Formatter; -use std::fmt::Result as FmtResult; - -use core::ColorInterface; -use core::colors::Colorado; -use core::colors::ColorMode; -use core::StrMarker; -use core::symbols::Symbol; -use Style; - -// Support multiple style -#[derive(Clone)] -pub struct CString { - text: String, - fg_color: Option, - bg_color: Option, - styles: Option>, -} - -impl StrMarker for CString { - fn to_str(&self) -> String { - self.text.to_owned() - } - fn get_fg_color(&self) -> Option { - self.fg_color.clone() - } - fn get_bg_color(&self) -> Option { - self.bg_color.clone() - } - fn get_style(&self) -> Option> { - self.styles.clone() - } -} - - -impl CString { - pub fn new(cs: S) -> CString { - CString { - text: cs.to_str(), - fg_color: cs.get_fg_color(), - bg_color: cs.get_bg_color(), - styles: cs.get_style(), - } - } - pub fn create_by_text(cs: S, t: String) -> CString { - CString { text: t, ..CString::new(cs) } - } - pub fn create_by_fg(cs: S, color: C) -> CString { - CString { fg_color: Some(Colorado::new(color)), ..CString::new(cs) } - } - pub fn create_by_bg(cs: S, color: C) -> CString { - CString { bg_color: Some(Colorado::new(color)), ..CString::new(cs) } - } - pub fn create_by_style(cs: S, style: Style) -> CString { - CString { - text: cs.to_str(), - styles: match cs.get_style() { - Some(mut v) => { - v.push(style); - Some(v) - } - _ => { Some(vec![style]) } - }, - fg_color: cs.get_fg_color(), - bg_color: cs.get_bg_color(), - } - } -} - -impl Display for CString { - fn fmt(&self, f: &mut Formatter) -> FmtResult { - let mut is_colored = false; - - if self.bg_color.is_none() && self.fg_color.is_none() && self.styles.is_none() { - write!(f, "{}", self.text)?; - Ok(()) - } else { - match &self.fg_color { - Some(v) => { - is_colored = true; - match v.get_mode() { - ColorMode::SIMPLE => { - f.write_str(Symbol::Simple256Foreground.to_str())?; - } - ColorMode::RGB => { - f.write_str(Symbol::RgbForeground.to_str())?; - } - _ => {} - } - write!(f, "{}", v.get_color())?; - } - _ => {} - } - match &self.bg_color { - Some(v) => { - if is_colored { - f.write_str(Symbol::Mode.to_str())?; - } else { - is_colored = true; - } - match v.get_mode() { - ColorMode::SIMPLE => { - f.write_str(Symbol::Simple256Background.to_str())?; - } - ColorMode::RGB => { - f.write_str(Symbol::RgbBackground.to_str())?; - } - _ => {} - } - write!(f, "{}", v.get_color())?; - } - _ => {} - } - - match &self.styles { - Some(v) => { - if !is_colored { // pure style without color - write!(f, "{}{}", Symbol::Esc, Symbol::LeftBrackets)?; - } else { - f.write_str(Symbol::Semicolon.to_str())?; - } - let t: Vec = v.into_iter().map(|x| x.to_string()).collect(); - f.write_str(&t.join(";")[..])?; - } - _ => {} - } - f.write_str(Symbol::Mode.to_str())?; - write!(f, "{}", self.text)?; - f.write_str(Symbol::Reset.to_str())?; - Ok(()) - } - } -} diff --git a/third_party/rust/colorful/src/core/colors.rs b/third_party/rust/colorful/src/core/colors.rs deleted file mode 100644 index ca4194b1b20d..000000000000 --- a/third_party/rust/colorful/src/core/colors.rs +++ /dev/null @@ -1,847 +0,0 @@ -use std::slice::Iter; - -use core::ColorInterface; -use HSL; - -#[derive(Copy, Clone, PartialEq, Eq, Debug)] -pub enum Color { - Black, - Red, - Green, - Yellow, - Blue, - Magenta, - Cyan, - LightGray, - DarkGray, - LightRed, - LightGreen, - LightYellow, - LightBlue, - LightMagenta, - LightCyan, - White, - Grey0, - NavyBlue, - DarkBlue, - Blue3a, - Blue3b, - Blue1, - DarkGreen, - DeepSkyBlue4a, - DeepSkyBlue4b, - DeepSkyBlue4c, - DodgerBlue3, - DodgerBlue2, - Green4, - SpringGreen4, - Turquoise4, - DeepSkyBlue3a, - DeepSkyBlue3b, - DodgerBlue1, - Green3a, - SpringGreen3a, - DarkCyan, - LightSeaGreen, - DeepSkyBlue2, - DeepSkyBlue1, - Green3b, - SpringGreen3b, - SpringGreen2a, - Cyan3, - DarkTurquoise, - Turquoise2, - Green1, - SpringGreen2b, - SpringGreen1, - MediumSpringGreen, - Cyan2, - Cyan1, - DarkRed1, - DeepPink4a, - Purple4a, - Purple4b, - Purple3, - BlueViolet, - Orange4a, - Grey37, - MediumPurple4, - SlateBlue3a, - SlateBlue3b, - RoyalBlue1, - Chartreuse4, - DarkSeaGreen4a, - PaleTurquoise4, - SteelBlue, - SteelBlue3, - CornflowerBlue, - Chartreuse3a, - DarkSeaGreen4b, - CadetBlue2, - CadetBlue1, - SkyBlue3, - SteelBlue1a, - Chartreuse3b, - PaleGreen3a, - SeaGreen3, - Aquamarine3, - MediumTurquoise, - SteelBlue1b, - Chartreuse2a, - SeaGreen2, - SeaGreen1a, - SeaGreen1b, - Aquamarine1a, - DarkSlateGray2, - DarkRed2, - DeepPink4b, - DarkMagenta1, - DarkMagenta2, - DarkViolet1a, - Purple1a, - Orange4b, - LightPink4, - Plum4, - MediumPurple3a, - MediumPurple3b, - SlateBlue1, - Yellow4a, - Wheat4, - Grey53, - LightSlateGrey, - MediumPurple, - LightSlateBlue, - Yellow4b, - DarkOliveGreen3a, - DarkGreenSea, - LightSkyBlue3a, - LightSkyBlue3b, - SkyBlue2, - Chartreuse2b, - DarkOliveGreen3b, - PaleGreen3b, - DarkSeaGreen3a, - DarkSlateGray3, - SkyBlue1, - Chartreuse1, - LightGreen2, - LightGreen3, - PaleGreen1a, - Aquamarine1b, - DarkSlateGray1, - Red3a, - DeepPink4c, - MediumVioletRed, - Magenta3a, - DarkViolet1b, - Purple1b, - DarkOrange3a, - IndianRed1a, - HotPink3a, - MediumOrchid3, - MediumOrchid, - MediumPurple2a, - DarkGoldenrod, - LightSalmon3a, - RosyBrown, - Grey63, - MediumPurple2b, - MediumPurple1, - Gold3a, - DarkKhaki, - NavajoWhite3, - Grey69, - LightSteelBlue3, - LightSteelBlue, - Yellow3a, - DarkOliveGreen3, - DarkSeaGreen3b, - DarkSeaGreen2, - LightCyan3, - LightSkyBlue1, - GreenYellow, - DarkOliveGreen2, - PaleGreen1b, - DarkSeaGreen5b, - DarkSeaGreen5a, - PaleTurquoise1, - Red3b, - DeepPink3a, - DeepPink3b, - Magenta3b, - Magenta3c, - Magenta2a, - DarkOrange3b, - IndianRed1b, - HotPink3b, - HotPink2, - Orchid, - MediumOrchid1a, - Orange3, - LightSalmon3b, - LightPink3, - Pink3, - Plum3, - Violet, - Gold3b, - LightGoldenrod3, - Tan, - MistyRose3, - Thistle3, - Plum2, - Yellow3b, - Khaki3, - LightGoldenrod2a, - LightYellow3, - Grey84, - LightSteelBlue1, - Yellow2, - DarkOliveGreen1a, - DarkOliveGreen1b, - DarkSeaGreen1, - Honeydew2, - LightCyan1, - Red1, - DeepPink2, - DeepPink1a, - DeepPink1b, - Magenta2b, - Magenta1, - OrangeRed1, - IndianRed1c, - IndianRed1d, - HotPink1a, - HotPink1b, - MediumOrchid1b, - DarkOrange, - Salmon1, - LightCoral, - PaleVioletRed1, - Orchid2, - Orchid1, - Orange1, - SandyBrown, - LightSalmon1, - LightPink1, - Pink1, - Plum1, - Gold1, - LightGoldenrod2b, - LightGoldenrod2c, - NavajoWhite1, - MistyRose1, - Thistle1, - Yellow1, - LightGoldenrod1, - Khaki1, - Wheat1, - CornSilk1, - Grey100, - Grey3, - Grey7, - Grey11, - Grey15, - Grey19, - Grey23, - Grey27, - Grey30, - Grey35, - Grey39, - Grey42, - Grey46, - Grey50, - Grey54, - Grey58, - Grey62, - Grey66, - Grey70, - Grey74, - Grey78, - Grey82, - Grey85, - Grey89, - Grey93, -} - -impl ColorInterface for Color { - fn to_color_str(&self) -> String { - format!("{}", - match self { - Color::Black => "0", - Color::Red => "1", - Color::Green => "2", - Color::Yellow => "3", - Color::Blue => "4", - Color::Magenta => "5", - Color::Cyan => "6", - Color::LightGray => "7", - Color::DarkGray => "8", - Color::LightRed => "9", - Color::LightGreen => "10", - Color::LightYellow => "11", - Color::LightBlue => "12", - Color::LightMagenta => "13", - Color::LightCyan => "14", - Color::White => "15", - Color::Grey0 => "16", - Color::NavyBlue => "17", - Color::DarkBlue => "18", - Color::Blue3a => "19", - Color::Blue3b => "20", - Color::Blue1 => "21", - Color::DarkGreen => "22", - Color::DeepSkyBlue4a => "23", - Color::DeepSkyBlue4b => "24", - Color::DeepSkyBlue4c => "25", - Color::DodgerBlue3 => "26", - Color::DodgerBlue2 => "27", - Color::Green4 => "28", - Color::SpringGreen4 => "29", - Color::Turquoise4 => "30", - Color::DeepSkyBlue3a => "31", - Color::DeepSkyBlue3b => "32", - Color::DodgerBlue1 => "33", - Color::Green3a => "34", - Color::SpringGreen3a => "35", - Color::DarkCyan => "36", - Color::LightSeaGreen => "37", - Color::DeepSkyBlue2 => "38", - Color::DeepSkyBlue1 => "39", - Color::Green3b => "40", - Color::SpringGreen3b => "41", - Color::SpringGreen2a => "42", - Color::Cyan3 => "43", - Color::DarkTurquoise => "44", - Color::Turquoise2 => "45", - Color::Green1 => "46", - Color::SpringGreen2b => "47", - Color::SpringGreen1 => "48", - Color::MediumSpringGreen => "49", - Color::Cyan2 => "50", - Color::Cyan1 => "51", - Color::DarkRed1 => "52", - Color::DeepPink4a => "53", - Color::Purple4a => "54", - Color::Purple4b => "55", - Color::Purple3 => "56", - Color::BlueViolet => "57", - Color::Orange4a => "58", - Color::Grey37 => "59", - Color::MediumPurple4 => "60", - Color::SlateBlue3a => "61", - Color::SlateBlue3b => "62", - Color::RoyalBlue1 => "63", - Color::Chartreuse4 => "64", - Color::DarkSeaGreen4a => "65", - Color::PaleTurquoise4 => "66", - Color::SteelBlue => "67", - Color::SteelBlue3 => "68", - Color::CornflowerBlue => "69", - Color::Chartreuse3a => "70", - Color::DarkSeaGreen4b => "71", - Color::CadetBlue2 => "72", - Color::CadetBlue1 => "73", - Color::SkyBlue3 => "74", - Color::SteelBlue1a => "75", - Color::Chartreuse3b => "76", - Color::PaleGreen3a => "77", - Color::SeaGreen3 => "78", - Color::Aquamarine3 => "79", - Color::MediumTurquoise => "80", - Color::SteelBlue1b => "81", - Color::Chartreuse2a => "82", - Color::SeaGreen2 => "83", - Color::SeaGreen1a => "84", - Color::SeaGreen1b => "85", - Color::Aquamarine1a => "86", - Color::DarkSlateGray2 => "87", - Color::DarkRed2 => "88", - Color::DeepPink4b => "89", - Color::DarkMagenta1 => "90", - Color::DarkMagenta2 => "91", - Color::DarkViolet1a => "92", - Color::Purple1a => "93", - Color::Orange4b => "94", - Color::LightPink4 => "95", - Color::Plum4 => "96", - Color::MediumPurple3a => "97", - Color::MediumPurple3b => "98", - Color::SlateBlue1 => "99", - Color::Yellow4a => "100", - Color::Wheat4 => "101", - Color::Grey53 => "102", - Color::LightSlateGrey => "103", - Color::MediumPurple => "104", - Color::LightSlateBlue => "105", - Color::Yellow4b => "106", - Color::DarkOliveGreen3a => "107", - Color::DarkGreenSea => "108", - Color::LightSkyBlue3a => "109", - Color::LightSkyBlue3b => "110", - Color::SkyBlue2 => "111", - Color::Chartreuse2b => "112", - Color::DarkOliveGreen3b => "113", - Color::PaleGreen3b => "114", - Color::DarkSeaGreen3a => "115", - Color::DarkSlateGray3 => "116", - Color::SkyBlue1 => "117", - Color::Chartreuse1 => "118", - Color::LightGreen2 => "119", - Color::LightGreen3 => "120", - Color::PaleGreen1a => "121", - Color::Aquamarine1b => "122", - Color::DarkSlateGray1 => "123", - Color::Red3a => "124", - Color::DeepPink4c => "125", - Color::MediumVioletRed => "126", - Color::Magenta3a => "127", - Color::DarkViolet1b => "128", - Color::Purple1b => "129", - Color::DarkOrange3a => "130", - Color::IndianRed1a => "131", - Color::HotPink3a => "132", - Color::MediumOrchid3 => "133", - Color::MediumOrchid => "134", - Color::MediumPurple2a => "135", - Color::DarkGoldenrod => "136", - Color::LightSalmon3a => "137", - Color::RosyBrown => "138", - Color::Grey63 => "139", - Color::MediumPurple2b => "140", - Color::MediumPurple1 => "141", - Color::Gold3a => "142", - Color::DarkKhaki => "143", - Color::NavajoWhite3 => "144", - Color::Grey69 => "145", - Color::LightSteelBlue3 => "146", - Color::LightSteelBlue => "147", - Color::Yellow3a => "148", - Color::DarkOliveGreen3 => "149", - Color::DarkSeaGreen3b => "150", - Color::DarkSeaGreen2 => "151", - Color::LightCyan3 => "152", - Color::LightSkyBlue1 => "153", - Color::GreenYellow => "154", - Color::DarkOliveGreen2 => "155", - Color::PaleGreen1b => "156", - Color::DarkSeaGreen5b => "157", - Color::DarkSeaGreen5a => "158", - Color::PaleTurquoise1 => "159", - Color::Red3b => "160", - Color::DeepPink3a => "161", - Color::DeepPink3b => "162", - Color::Magenta3b => "163", - Color::Magenta3c => "164", - Color::Magenta2a => "165", - Color::DarkOrange3b => "166", - Color::IndianRed1b => "167", - Color::HotPink3b => "168", - Color::HotPink2 => "169", - Color::Orchid => "170", - Color::MediumOrchid1a => "171", - Color::Orange3 => "172", - Color::LightSalmon3b => "173", - Color::LightPink3 => "174", - Color::Pink3 => "175", - Color::Plum3 => "176", - Color::Violet => "177", - Color::Gold3b => "178", - Color::LightGoldenrod3 => "179", - Color::Tan => "180", - Color::MistyRose3 => "181", - Color::Thistle3 => "182", - Color::Plum2 => "183", - Color::Yellow3b => "184", - Color::Khaki3 => "185", - Color::LightGoldenrod2a => "186", - Color::LightYellow3 => "187", - Color::Grey84 => "188", - Color::LightSteelBlue1 => "189", - Color::Yellow2 => "190", - Color::DarkOliveGreen1a => "191", - Color::DarkOliveGreen1b => "192", - Color::DarkSeaGreen1 => "193", - Color::Honeydew2 => "194", - Color::LightCyan1 => "195", - Color::Red1 => "196", - Color::DeepPink2 => "197", - Color::DeepPink1a => "198", - Color::DeepPink1b => "199", - Color::Magenta2b => "200", - Color::Magenta1 => "201", - Color::OrangeRed1 => "202", - Color::IndianRed1c => "203", - Color::IndianRed1d => "204", - Color::HotPink1a => "205", - Color::HotPink1b => "206", - Color::MediumOrchid1b => "207", - Color::DarkOrange => "208", - Color::Salmon1 => "209", - Color::LightCoral => "210", - Color::PaleVioletRed1 => "211", - Color::Orchid2 => "212", - Color::Orchid1 => "213", - Color::Orange1 => "214", - Color::SandyBrown => "215", - Color::LightSalmon1 => "216", - Color::LightPink1 => "217", - Color::Pink1 => "218", - Color::Plum1 => "219", - Color::Gold1 => "220", - Color::LightGoldenrod2b => "221", - Color::LightGoldenrod2c => "222", - Color::NavajoWhite1 => "223", - Color::MistyRose1 => "224", - Color::Thistle1 => "225", - Color::Yellow1 => "226", - Color::LightGoldenrod1 => "227", - Color::Khaki1 => "228", - Color::Wheat1 => "229", - Color::CornSilk1 => "230", - Color::Grey100 => "231", - Color::Grey3 => "232", - Color::Grey7 => "233", - Color::Grey11 => "234", - Color::Grey15 => "235", - Color::Grey19 => "236", - Color::Grey23 => "237", - Color::Grey27 => "238", - Color::Grey30 => "239", - Color::Grey35 => "240", - Color::Grey39 => "241", - Color::Grey42 => "242", - Color::Grey46 => "243", - Color::Grey50 => "244", - Color::Grey54 => "245", - Color::Grey58 => "246", - Color::Grey62 => "247", - Color::Grey66 => "248", - Color::Grey70 => "249", - Color::Grey74 => "250", - Color::Grey78 => "251", - Color::Grey82 => "252", - Color::Grey85 => "253", - Color::Grey89 => "254", - Color::Grey93 => "255", - } - ) - } - fn to_hsl(&self) -> HSL { - match self { - Color::Black => HSL::new(0.0, 0.0, 0.0), - Color::Red => HSL::new(0.0, 1.0, 0.25), - Color::Green => HSL::new(0.3333333333333333, 1.0, 0.25), - Color::Yellow => HSL::new(0.16666666666666666, 1.0, 0.25), - Color::Blue => HSL::new(0.6666666666666666, 1.0, 0.25), - Color::Magenta => HSL::new(0.8333333333333334, 1.0, 0.25), - Color::Cyan => HSL::new(0.5, 1.0, 0.25), - Color::LightGray => HSL::new(0.0, 0.0, 0.75), - Color::DarkGray => HSL::new(0.0, 0.0, 0.5), - Color::LightRed => HSL::new(0.0, 1.0, 0.5), - Color::LightGreen => HSL::new(0.3333333333333333, 1.0, 0.5), - Color::LightYellow => HSL::new(0.16666666666666666, 1.0, 0.5), - Color::LightBlue => HSL::new(0.6666666666666666, 1.0, 0.5), - Color::LightMagenta => HSL::new(0.8333333333333334, 1.0, 0.5), - Color::LightCyan => HSL::new(0.5, 1.0, 0.5), - Color::White => HSL::new(0.0, 0.0, 1.0), - Color::Grey0 => HSL::new(0.0, 0.0, 0.0), - Color::NavyBlue => HSL::new(0.6666666666666666, 1.0, 0.18), - Color::DarkBlue => HSL::new(0.6666666666666666, 1.0, 0.26), - Color::Blue3a => HSL::new(0.6666666666666666, 1.0, 0.34), - Color::Blue3b => HSL::new(0.6666666666666666, 1.0, 0.42), - Color::Blue1 => HSL::new(0.6666666666666666, 1.0, 0.5), - Color::DarkGreen => HSL::new(0.3333333333333333, 1.0, 0.18), - Color::DeepSkyBlue4a => HSL::new(0.5, 1.0, 0.18), - Color::DeepSkyBlue4b => HSL::new(0.5493827160493834, 1.0, 0.26), - Color::DeepSkyBlue4c => HSL::new(0.5761904761904749, 1.0, 0.34), - Color::DodgerBlue3 => HSL::new(0.5930232558139528, 1.0, 0.42), - Color::DodgerBlue2 => HSL::new(0.6045751633986917, 1.0, 0.5), - Color::Green4 => HSL::new(0.3333333333333333, 1.0, 0.26), - Color::SpringGreen4 => HSL::new(0.4506172839506167, 1.0, 0.26), - Color::Turquoise4 => HSL::new(0.5, 1.0, 0.26), - Color::DeepSkyBlue3a => HSL::new(0.538095238095239, 1.0, 0.34), - Color::DeepSkyBlue3b => HSL::new(0.5620155038759694, 1.0, 0.42), - Color::DodgerBlue1 => HSL::new(0.5784313725490194, 1.0, 0.5), - Color::Green3a => HSL::new(0.3333333333333333, 1.0, 0.34), - Color::SpringGreen3a => HSL::new(0.423809523809525, 1.0, 0.34), - Color::DarkCyan => HSL::new(0.4619047619047611, 1.0, 0.34), - Color::LightSeaGreen => HSL::new(0.5, 1.0, 0.34), - Color::DeepSkyBlue2 => HSL::new(0.5310077519379833, 1.0, 0.42), - Color::DeepSkyBlue1 => HSL::new(0.5522875816993472, 1.0, 0.5), - Color::Green3b => HSL::new(0.3333333333333333, 1.0, 0.42), - Color::SpringGreen3b => HSL::new(0.40697674418604723, 1.0, 0.42), - Color::SpringGreen2a => HSL::new(0.43798449612403056, 1.0, 0.42), - Color::Cyan3 => HSL::new(0.4689922480620166, 1.0, 0.42), - Color::DarkTurquoise => HSL::new(0.5, 1.0, 0.42), - Color::Turquoise2 => HSL::new(0.5261437908496722, 1.0, 0.5), - Color::Green1 => HSL::new(0.3333333333333333, 1.0, 0.5), - Color::SpringGreen2b => HSL::new(0.39542483660130834, 1.0, 0.5), - Color::SpringGreen1 => HSL::new(0.4215686274509806, 1.0, 0.5), - Color::MediumSpringGreen => HSL::new(0.4477124183006528, 1.0, 0.5), - Color::Cyan2 => HSL::new(0.4738562091503278, 1.0, 0.5), - Color::Cyan1 => HSL::new(0.5, 1.0, 0.5), - Color::DarkRed1 => HSL::new(0.0, 1.0, 0.18), - Color::DeepPink4a => HSL::new(0.8333333333333334, 1.0, 0.18), - Color::Purple4a => HSL::new(0.78395061728395, 1.0, 0.26), - Color::Purple4b => HSL::new(0.7571428571428583, 1.0, 0.34), - Color::Purple3 => HSL::new(0.7403100775193806, 1.0, 0.42), - Color::BlueViolet => HSL::new(0.7287581699346417, 1.0, 0.5), - Color::Orange4a => HSL::new(0.16666666666666666, 1.0, 0.18), - Color::Grey37 => HSL::new(0.0, 0.0, 0.37), - Color::MediumPurple4 => HSL::new(0.6666666666666666, 0.17, 0.45), - Color::SlateBlue3a => HSL::new(0.6666666666666666, 0.33, 0.52), - Color::SlateBlue3b => HSL::new(0.6666666666666666, 0.6, 0.6), - Color::RoyalBlue1 => HSL::new(0.6666666666666666, 1.0, 0.68), - Color::Chartreuse4 => HSL::new(0.21604938271604945, 1.0, 0.26), - Color::DarkSeaGreen4a => HSL::new(0.3333333333333333, 0.17, 0.45), - Color::PaleTurquoise4 => HSL::new(0.5, 0.17, 0.45), - Color::SteelBlue => HSL::new(0.5833333333333334, 0.33, 0.52), - Color::SteelBlue3 => HSL::new(0.6111111111111112, 0.6, 0.6), - Color::CornflowerBlue => HSL::new(0.625, 1.0, 0.68), - Color::Chartreuse3a => HSL::new(0.24285714285714277, 1.0, 0.34), - Color::DarkSeaGreen4b => HSL::new(0.3333333333333333, 0.33, 0.52), - Color::CadetBlue2 => HSL::new(0.4166666666666667, 0.33, 0.52), - Color::CadetBlue1 => HSL::new(0.5, 0.33, 0.52), - Color::SkyBlue3 => HSL::new(0.5555555555555556, 0.6, 0.6), - Color::SteelBlue1a => HSL::new(0.5833333333333334, 1.0, 0.68), - Color::Chartreuse3b => HSL::new(0.2596899224806203, 1.0, 0.42), - Color::PaleGreen3a => HSL::new(0.3333333333333333, 0.6, 0.6), - Color::SeaGreen3 => HSL::new(0.3888888888888889, 0.6, 0.6), - Color::Aquamarine3 => HSL::new(0.4444444444444444, 0.6, 0.6), - Color::MediumTurquoise => HSL::new(0.5, 0.6, 0.6), - Color::SteelBlue1b => HSL::new(0.5416666666666666, 1.0, 0.68), - Color::Chartreuse2a => HSL::new(0.27124183006535946, 1.0, 0.5), - Color::SeaGreen2 => HSL::new(0.3333333333333333, 1.0, 0.68), - Color::SeaGreen1a => HSL::new(0.375, 1.0, 0.68), - Color::SeaGreen1b => HSL::new(0.4166666666666667, 1.0, 0.68), - Color::Aquamarine1a => HSL::new(0.4583333333333333, 1.0, 0.68), - Color::DarkSlateGray2 => HSL::new(0.5, 1.0, 0.68), - Color::DarkRed2 => HSL::new(0.0, 1.0, 0.26), - Color::DeepPink4b => HSL::new(0.8827160493827166, 1.0, 0.26), - Color::DarkMagenta1 => HSL::new(0.8333333333333334, 1.0, 0.26), - Color::DarkMagenta2 => HSL::new(0.7952380952380944, 1.0, 0.34), - Color::DarkViolet1a => HSL::new(0.7713178294573639, 1.0, 0.42), - Color::Purple1a => HSL::new(0.7549019607843138, 1.0, 0.5), - Color::Orange4b => HSL::new(0.11728395061728389, 1.0, 0.26), - Color::LightPink4 => HSL::new(0.0, 0.17, 0.45), - Color::Plum4 => HSL::new(0.8333333333333334, 0.17, 0.45), - Color::MediumPurple3a => HSL::new(0.75, 0.33, 0.52), - Color::MediumPurple3b => HSL::new(0.7222222222222222, 0.6, 0.6), - Color::SlateBlue1 => HSL::new(0.7083333333333334, 1.0, 0.68), - Color::Yellow4a => HSL::new(0.16666666666666666, 1.0, 0.26), - Color::Wheat4 => HSL::new(0.16666666666666666, 0.17, 0.45), - Color::Grey53 => HSL::new(0.0, 0.0, 0.52), - Color::LightSlateGrey => HSL::new(0.6666666666666666, 0.2, 0.6), - Color::MediumPurple => HSL::new(0.6666666666666666, 0.5, 0.68), - Color::LightSlateBlue => HSL::new(0.6666666666666666, 1.0, 0.76), - Color::Yellow4b => HSL::new(0.2047619047619047, 1.0, 0.34), - Color::DarkOliveGreen3a => HSL::new(0.25, 0.33, 0.52), - Color::DarkGreenSea => HSL::new(0.3333333333333333, 0.2, 0.6), - Color::LightSkyBlue3a => HSL::new(0.5, 0.2, 0.6), - Color::LightSkyBlue3b => HSL::new(0.5833333333333334, 0.5, 0.68), - Color::SkyBlue2 => HSL::new(0.6111111111111112, 1.0, 0.76), - Color::Chartreuse2b => HSL::new(0.22868217054263557, 1.0, 0.42), - Color::DarkOliveGreen3b => HSL::new(0.2777777777777778, 0.6, 0.6), - Color::PaleGreen3b => HSL::new(0.3333333333333333, 0.5, 0.68), - Color::DarkSeaGreen3a => HSL::new(0.4166666666666667, 0.5, 0.68), - Color::DarkSlateGray3 => HSL::new(0.5, 0.5, 0.68), - Color::SkyBlue1 => HSL::new(0.5555555555555556, 1.0, 0.76), - Color::Chartreuse1 => HSL::new(0.2450980392156864, 1.0, 0.5), - Color::LightGreen2 => HSL::new(0.2916666666666667, 1.0, 0.68), - Color::LightGreen3 => HSL::new(0.3333333333333333, 1.0, 0.76), - Color::PaleGreen1a => HSL::new(0.3888888888888889, 1.0, 0.76), - Color::Aquamarine1b => HSL::new(0.4444444444444444, 1.0, 0.76), - Color::DarkSlateGray1 => HSL::new(0.5, 1.0, 0.76), - Color::Red3a => HSL::new(0.0, 1.0, 0.34), - Color::DeepPink4c => HSL::new(0.9095238095238083, 1.0, 0.34), - Color::MediumVioletRed => HSL::new(0.8714285714285722, 1.0, 0.34), - Color::Magenta3a => HSL::new(0.8333333333333334, 1.0, 0.34), - Color::DarkViolet1b => HSL::new(0.80232558139535, 1.0, 0.42), - Color::Purple1b => HSL::new(0.7810457516339862, 1.0, 0.5), - Color::DarkOrange3a => HSL::new(0.09047619047619054, 1.0, 0.34), - Color::IndianRed1a => HSL::new(0.0, 0.33, 0.52), - Color::HotPink3a => HSL::new(0.9166666666666666, 0.33, 0.52), - Color::MediumOrchid3 => HSL::new(0.8333333333333334, 0.33, 0.52), - Color::MediumOrchid => HSL::new(0.7777777777777778, 0.6, 0.6), - Color::MediumPurple2a => HSL::new(0.75, 1.0, 0.68), - Color::DarkGoldenrod => HSL::new(0.12857142857142861, 1.0, 0.34), - Color::LightSalmon3a => HSL::new(0.08333333333333333, 0.33, 0.52), - Color::RosyBrown => HSL::new(0.0, 0.2, 0.6), - Color::Grey63 => HSL::new(0.8333333333333334, 0.2, 0.6), - Color::MediumPurple2b => HSL::new(0.75, 0.5, 0.68), - Color::MediumPurple1 => HSL::new(0.7222222222222222, 1.0, 0.76), - Color::Gold3a => HSL::new(0.16666666666666666, 1.0, 0.34), - Color::DarkKhaki => HSL::new(0.16666666666666666, 0.33, 0.52), - Color::NavajoWhite3 => HSL::new(0.16666666666666666, 0.2, 0.6), - Color::Grey69 => HSL::new(0.0, 0.0, 0.68), - Color::LightSteelBlue3 => HSL::new(0.6666666666666666, 0.33, 0.76), - Color::LightSteelBlue => HSL::new(0.6666666666666666, 1.0, 0.84), - Color::Yellow3a => HSL::new(0.1976744186046511, 1.0, 0.42), - Color::DarkOliveGreen3 => HSL::new(0.2222222222222222, 0.6, 0.6), - Color::DarkSeaGreen3b => HSL::new(0.25, 0.5, 0.68), - Color::DarkSeaGreen2 => HSL::new(0.3333333333333333, 0.33, 0.76), - Color::LightCyan3 => HSL::new(0.5, 0.33, 0.76), - Color::LightSkyBlue1 => HSL::new(0.5833333333333334, 1.0, 0.84), - Color::GreenYellow => HSL::new(0.21895424836601304, 1.0, 0.5), - Color::DarkOliveGreen2 => HSL::new(0.25, 1.0, 0.68), - Color::PaleGreen1b => HSL::new(0.2777777777777778, 1.0, 0.76), - Color::DarkSeaGreen5b => HSL::new(0.3333333333333333, 1.0, 0.84), - Color::DarkSeaGreen5a => HSL::new(0.4166666666666667, 1.0, 0.84), - Color::PaleTurquoise1 => HSL::new(0.5, 1.0, 0.84), - Color::Red3b => HSL::new(0.0, 1.0, 0.42), - Color::DeepPink3a => HSL::new(0.926356589147286, 1.0, 0.42), - Color::DeepPink3b => HSL::new(0.8953488372093028, 1.0, 0.42), - Color::Magenta3b => HSL::new(0.8643410852713166, 1.0, 0.42), - Color::Magenta3c => HSL::new(0.8333333333333334, 1.0, 0.42), - Color::Magenta2a => HSL::new(0.8071895424836611, 1.0, 0.5), - Color::DarkOrange3b => HSL::new(0.07364341085271306, 1.0, 0.42), - Color::IndianRed1b => HSL::new(0.0, 0.6, 0.6), - Color::HotPink3b => HSL::new(0.9444444444444444, 0.6, 0.6), - Color::HotPink2 => HSL::new(0.8888888888888888, 0.6, 0.6), - Color::Orchid => HSL::new(0.8333333333333334, 0.6, 0.6), - Color::MediumOrchid1a => HSL::new(0.7916666666666666, 1.0, 0.68), - Color::Orange3 => HSL::new(0.10465116279069778, 1.0, 0.42), - Color::LightSalmon3b => HSL::new(0.05555555555555555, 0.6, 0.6), - Color::LightPink3 => HSL::new(0.0, 0.5, 0.68), - Color::Pink3 => HSL::new(0.9166666666666666, 0.5, 0.68), - Color::Plum3 => HSL::new(0.8333333333333334, 0.5, 0.68), - Color::Violet => HSL::new(0.7777777777777778, 1.0, 0.76), - Color::Gold3b => HSL::new(0.13565891472868222, 1.0, 0.42), - Color::LightGoldenrod3 => HSL::new(0.1111111111111111, 0.6, 0.6), - Color::Tan => HSL::new(0.08333333333333333, 0.5, 0.68), - Color::MistyRose3 => HSL::new(0.0, 0.33, 0.76), - Color::Thistle3 => HSL::new(0.8333333333333334, 0.33, 0.76), - Color::Plum2 => HSL::new(0.75, 1.0, 0.84), - Color::Yellow3b => HSL::new(0.16666666666666666, 1.0, 0.42), - Color::Khaki3 => HSL::new(0.16666666666666666, 0.6, 0.6), - Color::LightGoldenrod2a => HSL::new(0.16666666666666666, 0.5, 0.68), - Color::LightYellow3 => HSL::new(0.16666666666666666, 0.33, 0.76), - Color::Grey84 => HSL::new(0.0, 0.0, 0.84), - Color::LightSteelBlue1 => HSL::new(0.6666666666666666, 1.0, 0.92), - Color::Yellow2 => HSL::new(0.19281045751633974, 1.0, 0.5), - Color::DarkOliveGreen1a => HSL::new(0.20833333333333334, 1.0, 0.68), - Color::DarkOliveGreen1b => HSL::new(0.2222222222222222, 1.0, 0.76), - Color::DarkSeaGreen1 => HSL::new(0.25, 1.0, 0.84), - Color::Honeydew2 => HSL::new(0.3333333333333333, 1.0, 0.92), - Color::LightCyan1 => HSL::new(0.5, 1.0, 0.92), - Color::Red1 => HSL::new(0.0, 1.0, 0.5), - Color::DeepPink2 => HSL::new(0.937908496732025, 1.0, 0.5), - Color::DeepPink1a => HSL::new(0.9117647058823528, 1.0, 0.5), - Color::DeepPink1b => HSL::new(0.8856209150326805, 1.0, 0.5), - Color::Magenta2b => HSL::new(0.8594771241830055, 1.0, 0.5), - Color::Magenta1 => HSL::new(0.8333333333333334, 1.0, 0.5), - Color::OrangeRed1 => HSL::new(0.06209150326797389, 1.0, 0.5), - Color::IndianRed1c => HSL::new(0.0, 1.0, 0.68), - Color::IndianRed1d => HSL::new(0.9583333333333334, 1.0, 0.68), - Color::HotPink1a => HSL::new(0.9166666666666666, 1.0, 0.68), - Color::HotPink1b => HSL::new(0.875, 1.0, 0.68), - Color::MediumOrchid1b => HSL::new(0.8333333333333334, 1.0, 0.68), - Color::DarkOrange => HSL::new(0.08823529411764694, 1.0, 0.5), - Color::Salmon1 => HSL::new(0.041666666666666664, 1.0, 0.68), - Color::LightCoral => HSL::new(0.0, 1.0, 0.76), - Color::PaleVioletRed1 => HSL::new(0.9444444444444444, 1.0, 0.76), - Color::Orchid2 => HSL::new(0.8888888888888888, 1.0, 0.76), - Color::Orchid1 => HSL::new(0.8333333333333334, 1.0, 0.76), - Color::Orange1 => HSL::new(0.11437908496732027, 1.0, 0.5), - Color::SandyBrown => HSL::new(0.08333333333333333, 1.0, 0.68), - Color::LightSalmon1 => HSL::new(0.05555555555555555, 1.0, 0.76), - Color::LightPink1 => HSL::new(0.0, 1.0, 0.84), - Color::Pink1 => HSL::new(0.9166666666666666, 1.0, 0.84), - Color::Plum1 => HSL::new(0.8333333333333334, 1.0, 0.84), - Color::Gold1 => HSL::new(0.14052287581699335, 1.0, 0.5), - Color::LightGoldenrod2b => HSL::new(0.125, 1.0, 0.68), - Color::LightGoldenrod2c => HSL::new(0.1111111111111111, 1.0, 0.76), - Color::NavajoWhite1 => HSL::new(0.08333333333333333, 1.0, 0.84), - Color::MistyRose1 => HSL::new(0.0, 1.0, 0.92), - Color::Thistle1 => HSL::new(0.8333333333333334, 1.0, 0.92), - Color::Yellow1 => HSL::new(0.16666666666666666, 1.0, 0.5), - Color::LightGoldenrod1 => HSL::new(0.16666666666666666, 1.0, 0.68), - Color::Khaki1 => HSL::new(0.16666666666666666, 1.0, 0.76), - Color::Wheat1 => HSL::new(0.16666666666666666, 1.0, 0.84), - Color::CornSilk1 => HSL::new(0.16666666666666666, 1.0, 0.92), - Color::Grey100 => HSL::new(0.0, 0.0, 1.0), - Color::Grey3 => HSL::new(0.0, 0.0, 0.03), - Color::Grey7 => HSL::new(0.0, 0.0, 0.07), - Color::Grey11 => HSL::new(0.0, 0.0, 0.1), - Color::Grey15 => HSL::new(0.0, 0.0, 0.14), - Color::Grey19 => HSL::new(0.0, 0.0, 0.18), - Color::Grey23 => HSL::new(0.0, 0.0, 0.22), - Color::Grey27 => HSL::new(0.0, 0.0, 0.26), - Color::Grey30 => HSL::new(0.0, 0.0, 0.3), - Color::Grey35 => HSL::new(0.0, 0.0, 0.34), - Color::Grey39 => HSL::new(0.0, 0.0, 0.37), - Color::Grey42 => HSL::new(0.0, 0.0, 0.4), - Color::Grey46 => HSL::new(0.0, 0.0, 0.46), - Color::Grey50 => HSL::new(0.0, 0.0, 0.5), - Color::Grey54 => HSL::new(0.0, 0.0, 0.54), - Color::Grey58 => HSL::new(0.0, 0.0, 0.58), - Color::Grey62 => HSL::new(0.0, 0.0, 0.61), - Color::Grey66 => HSL::new(0.0, 0.0, 0.65), - Color::Grey70 => HSL::new(0.0, 0.0, 0.69), - Color::Grey74 => HSL::new(0.0, 0.0, 0.73), - Color::Grey78 => HSL::new(0.0, 0.0, 0.77), - Color::Grey82 => HSL::new(0.0, 0.0, 0.81), - Color::Grey85 => HSL::new(0.0, 0.0, 0.85), - Color::Grey89 => HSL::new(0.0, 0.0, 0.89), - Color::Grey93 => HSL::new(0.0, 0.0, 0.93), - } - } -} - -impl Color { - pub fn iterator() -> Iter<'static, Color> { - use Color::*; - static ITEMS: [Color; 256] = [Black, Red, Green, Yellow, Blue, Magenta, Cyan, LightGray, DarkGray, LightRed, LightGreen, LightYellow, LightBlue, LightMagenta, LightCyan, White, Grey0, NavyBlue, DarkBlue, Blue3a, Blue3b, Blue1, DarkGreen, DeepSkyBlue4a, DeepSkyBlue4b, DeepSkyBlue4c, DodgerBlue3, DodgerBlue2, Green4, SpringGreen4, Turquoise4, DeepSkyBlue3a, DeepSkyBlue3b, DodgerBlue1, Green3a, SpringGreen3a, DarkCyan, LightSeaGreen, DeepSkyBlue2, DeepSkyBlue1, Green3b, SpringGreen3b, SpringGreen2a, Cyan3, DarkTurquoise, Turquoise2, Green1, SpringGreen2b, SpringGreen1, MediumSpringGreen, Cyan2, Cyan1, DarkRed1, DeepPink4a, Purple4a, Purple4b, Purple3, BlueViolet, Orange4a, Grey37, MediumPurple4, SlateBlue3a, SlateBlue3b, RoyalBlue1, Chartreuse4, DarkSeaGreen4a, PaleTurquoise4, SteelBlue, SteelBlue3, CornflowerBlue, Chartreuse3a, DarkSeaGreen4b, CadetBlue2, CadetBlue1, SkyBlue3, SteelBlue1a, Chartreuse3b, PaleGreen3a, SeaGreen3, Aquamarine3, MediumTurquoise, SteelBlue1b, Chartreuse2a, SeaGreen2, SeaGreen1a, SeaGreen1b, Aquamarine1a, DarkSlateGray2, DarkRed2, DeepPink4b, DarkMagenta1, DarkMagenta2, DarkViolet1a, Purple1a, Orange4b, LightPink4, Plum4, MediumPurple3a, MediumPurple3b, SlateBlue1, Yellow4a, Wheat4, Grey53, LightSlateGrey, MediumPurple, LightSlateBlue, Yellow4b, DarkOliveGreen3a, DarkGreenSea, LightSkyBlue3a, LightSkyBlue3b, SkyBlue2, Chartreuse2b, DarkOliveGreen3b, PaleGreen3b, DarkSeaGreen3a, DarkSlateGray3, SkyBlue1, Chartreuse1, LightGreen2, LightGreen3, PaleGreen1a, Aquamarine1b, DarkSlateGray1, Red3a, DeepPink4c, MediumVioletRed, Magenta3a, DarkViolet1b, Purple1b, DarkOrange3a, IndianRed1a, HotPink3a, MediumOrchid3, MediumOrchid, MediumPurple2a, DarkGoldenrod, LightSalmon3a, RosyBrown, Grey63, MediumPurple2b, MediumPurple1, Gold3a, DarkKhaki, NavajoWhite3, Grey69, LightSteelBlue3, LightSteelBlue, Yellow3a, DarkOliveGreen3, DarkSeaGreen3b, DarkSeaGreen2, LightCyan3, LightSkyBlue1, GreenYellow, DarkOliveGreen2, PaleGreen1b, DarkSeaGreen5b, DarkSeaGreen5a, PaleTurquoise1, Red3b, DeepPink3a, DeepPink3b, Magenta3b, Magenta3c, Magenta2a, DarkOrange3b, IndianRed1b, HotPink3b, HotPink2, Orchid, MediumOrchid1a, Orange3, LightSalmon3b, LightPink3, Pink3, Plum3, Violet, Gold3b, LightGoldenrod3, Tan, MistyRose3, Thistle3, Plum2, Yellow3b, Khaki3, LightGoldenrod2a, LightYellow3, Grey84, LightSteelBlue1, Yellow2, DarkOliveGreen1a, DarkOliveGreen1b, DarkSeaGreen1, Honeydew2, LightCyan1, Red1, DeepPink2, DeepPink1a, DeepPink1b, Magenta2b, Magenta1, OrangeRed1, IndianRed1c, IndianRed1d, HotPink1a, HotPink1b, MediumOrchid1b, DarkOrange, Salmon1, LightCoral, PaleVioletRed1, Orchid2, Orchid1, Orange1, SandyBrown, LightSalmon1, LightPink1, Pink1, Plum1, Gold1, LightGoldenrod2b, LightGoldenrod2c, NavajoWhite1, MistyRose1, Thistle1, Yellow1, LightGoldenrod1, Khaki1, Wheat1, CornSilk1, Grey100, Grey3, Grey7, Grey11, Grey15, Grey19, Grey23, Grey27, Grey30, Grey35, Grey39, Grey42, Grey46, Grey50, Grey54, Grey58, Grey62, Grey66, Grey70, Grey74, Grey78, Grey82, Grey85, Grey89, Grey93]; - ITEMS.iter() - } -} - -#[derive(Copy, Clone)] -pub enum ColorMode { - SIMPLE, - RGB, - HSL, -} - -#[derive(Clone)] -pub struct Colorado { - mode: ColorMode, - color: String, -} - - -impl Default for Colorado { - fn default() -> Colorado { - Colorado { - mode: ColorMode::SIMPLE, - color: String::default(), - } - } -} - -impl Colorado { - pub fn new(color: C) -> Colorado { - let c = format!("{}", color.to_color_str()); - Colorado { - color: c.clone(), - mode: if c.contains(";") { - ColorMode::RGB - } else { - ColorMode::SIMPLE - }, - } - } - pub fn get_color(&self) -> String { self.color.clone() } - pub fn get_mode(&self) -> ColorMode { self.mode } -} - - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_rgb_iterator() { - for _ in Color::iterator() {} - } -} \ No newline at end of file diff --git a/third_party/rust/colorful/src/core/hsl.rs b/third_party/rust/colorful/src/core/hsl.rs deleted file mode 100644 index 39d54c8064e9..000000000000 --- a/third_party/rust/colorful/src/core/hsl.rs +++ /dev/null @@ -1,105 +0,0 @@ -use core::ColorInterface; -use core::rgb::RGB; - -#[derive(Debug, Copy, Clone, PartialEq)] -pub struct HSL { - // range 0 ≤ H < 1.0, 0 ≤ S ≤ 1.0 and 0 ≤ L ≤ 1.0: - pub h: f32, - s: f32, - l: f32, -} - -impl HSL { - pub fn new(h: f32, s: f32, l: f32) -> HSL { - HSL { h: _round(h), s: _round(s), l: _round(l) } - } - - pub fn hsl_to_rgb(&self) -> RGB { - let red: f32; - let green: f32; - let blue: f32; - let var_1: f32; - let var_2: f32; - if self.s == 0.0 { - let tmp = self.l * 255.0; - red = tmp; - green = tmp; - blue = tmp; - } else { - if self.l < 0.5 { - var_2 = self.l * (1.0 + self.s); - } else { - var_2 = (self.l + self.s) - (self.s * self.l); - } - var_1 = 2.0 * self.l - var_2; - red = 255.0 * hue_2_rgb(var_1, var_2, &mut (self.h + (1.0 / 3.0))); - green = 255.0 * hue_2_rgb(var_1, var_2, &mut self.h.clone()); - blue = 255.0 * hue_2_rgb(var_1, var_2, &mut (self.h - (1.0 / 3.0))); - } - RGB::new(red.round() as u8, green.round() as u8, blue.round() as u8) - } -} - -impl ColorInterface for HSL { - fn to_color_str(&self) -> String { - self.hsl_to_rgb().to_color_str() - } - fn to_hsl(&self) -> HSL { *self } -} - -fn hue_2_rgb(v1: f32, v2: f32, vh: &mut f32) -> f32 { - if *vh < 0.0 { - *vh += 1.0; - } - if *vh > 1.0 { - *vh -= 1.0; - } - if 6.0 * *vh < 1.0 { - return v1 + (v2 - v1) * 6.0 * *vh; - } - if 2.0 * *vh < 1.0 { - return v2; - } - if 3.0 * *vh < 2.0 { - return v1 + (v2 - v1) * (2.0 / 3.0 - *vh) * 6.0; - } - v1 -} - -fn _round(value: f32) -> f32 { - if value < 0.0 { - 0.0 - } else if value >= 1.0 { - 1.0 - } else { - value - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_hsl_2_rgb_1() { - let hsl = HSL::new(0.7, 0.50, 0.60); - let rgb = RGB::new(122, 102, 204); - - assert_eq!(rgb, hsl.hsl_to_rgb()); - } - - #[test] - fn test_hsl_2_rgb_2() { - let hsl = HSL::new(0.7, 0.0, 0.60); - let rgb = RGB::new(153, 153, 153); - assert_eq!(rgb, hsl.hsl_to_rgb()); - } - - #[test] - fn test_hsl_2_rgb_3() { - let hsl = HSL::new(0.7, 0.50, 0.30); - let rgb = RGB::new(54, 38, 115); - - assert_eq!(rgb, hsl.hsl_to_rgb()); - } -} \ No newline at end of file diff --git a/third_party/rust/colorful/src/core/mod.rs b/third_party/rust/colorful/src/core/mod.rs deleted file mode 100644 index 69e215d92d84..000000000000 --- a/third_party/rust/colorful/src/core/mod.rs +++ /dev/null @@ -1,36 +0,0 @@ -use core::colors::Colorado; -use core::style::Style; -use HSL; - - -pub mod colors; -pub mod symbols; -pub mod style; -pub mod color_string; -pub mod rgb; -pub mod hsl; - -pub trait StrMarker { - fn to_str(&self) -> String; - fn get_fg_color(&self) -> Option { None } - fn get_bg_color(&self) -> Option { None } - fn get_style(&self) -> Option> { None } -} - -impl<'a> StrMarker for &'a str { - fn to_str(&self) -> String { - String::from(*self) - } -} - -impl StrMarker for String { - fn to_str(&self) -> String { - self.clone() - } -} - -/// `ColorInterface` is for basic trait for `Colorful`, `RGB`, `HSL` and `Color` implement this trait. -pub trait ColorInterface: Clone { - fn to_color_str(&self) -> String; - fn to_hsl(&self) -> HSL; -} diff --git a/third_party/rust/colorful/src/core/rgb.rs b/third_party/rust/colorful/src/core/rgb.rs deleted file mode 100644 index 560a83daded3..000000000000 --- a/third_party/rust/colorful/src/core/rgb.rs +++ /dev/null @@ -1,90 +0,0 @@ -use core::ColorInterface; -use HSL; - -#[derive(Debug, Copy, Clone, PartialEq)] -pub struct RGB { - // range 0 -255 - r: u8, - g: u8, - b: u8, -} - -impl RGB { - pub fn new(r: u8, g: u8, b: u8) -> RGB { - RGB { r, g, b } - } - - pub fn unpack(&self) -> (u8, u8, u8) { - (self.r, self.g, self.b) - } - - pub fn rgb_to_hsl(&self) -> HSL { - let (r, g, b) = self.unpack(); - let r = r as f32 / 255.0; - let g = g as f32 / 255.0; - let b = b as f32 / 255.0; - - let max = r.max(g).max(b); - let min = r.min(g).min(b); - let mut h: f32 = 0.0; - let mut s: f32 = 0.0; - let l = (max + min) / 2.0; - - if max != min { - let d = max - min; - s = if l > 0.5 { d / (2.0 - max - min) } else { d / (max + min) }; - if max == r { - h = (g - b) / d + (if g < b { 6.0 } else { 0.0 }); - } else if max == g { - h = (b - r) / d + 2.0; - } else { - h = (r - g) / d + 4.0; - } - h /= 6.0; - } - return HSL::new(h, s, l); - } -} - -impl ColorInterface for RGB { - fn to_color_str(&self) -> String { - format!("{};{};{}", self.r, self.g, self.b) - } - fn to_hsl(&self) -> HSL { self.rgb_to_hsl() } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_rgb_2_hsl_1() { - let hsl = HSL::new(0.69934636, 0.49999997, 0.60); - let rgb = RGB::new(122, 102, 204); - - assert_eq!(hsl, rgb.rgb_to_hsl()); - } - - #[test] - fn test_rgb_2_hsl_2() { - let hsl = HSL::new(0.0, 0.0, 0.60); - let rgb = RGB::new(153, 153, 153); - assert_eq!(hsl, rgb.rgb_to_hsl()); - } - - #[test] - fn test_rgb_2_hsl_3() { - let hsl = HSL::new(0.7012987, 0.50326794, 0.30); - let rgb = RGB::new(54, 38, 115); - - assert_eq!(hsl, rgb.rgb_to_hsl()); - } - - #[test] - fn test_rgb_2_hsl_4() { - let hsl = HSL::new(0.08333334, 1.0, 0.6862745); - let rgb = RGB::new(255, 175, 95); - - assert_eq!(hsl, rgb.rgb_to_hsl()); - } -} \ No newline at end of file diff --git a/third_party/rust/colorful/src/core/style.rs b/third_party/rust/colorful/src/core/style.rs deleted file mode 100644 index 11f7c0878429..000000000000 --- a/third_party/rust/colorful/src/core/style.rs +++ /dev/null @@ -1,24 +0,0 @@ -#[derive(Copy, Clone, PartialEq, Eq, Debug)] -pub enum Style { - Bold, - Dim, - Underlined, - Blink, - // invert the foreground and background colors - Reverse, - // useful for passwords - Hidden, -} - -impl Style { - pub fn to_string(&self) -> String { - match self { - Style::Bold => String::from("1"), - Style::Dim => String::from("2"), - Style::Underlined => String::from("4"), - Style::Blink => String::from("5"), - Style::Reverse => String::from("7"), - Style::Hidden => String::from("8"), - } - } -} diff --git a/third_party/rust/colorful/src/core/symbols.rs b/third_party/rust/colorful/src/core/symbols.rs deleted file mode 100644 index fddfe317eea7..000000000000 --- a/third_party/rust/colorful/src/core/symbols.rs +++ /dev/null @@ -1,55 +0,0 @@ -use std::fmt::Display; -use std::fmt::Formatter; -use std::fmt::Result as FmtResult; - -pub enum Symbol { - Mode, - Semicolon, - LeftBrackets, - Esc, - Reset, - Simple256Foreground, - Simple256Background, - RgbForeground, - RgbBackground, - ResetStyle, - ResetForeground, - ResetBackground, - ClearScreenFromCursorToEnd, - ClearScreenUpToCursor, - ClearEntireScreen, - ClearLineFromCursorToEnd, - ClearLineUpToCursor, - ClearEntireLine -} - -impl Symbol { - pub fn to_str<'a>(&self) -> &'a str { - match self { - Symbol::Mode => "m", - Symbol::Semicolon => ";", - Symbol::LeftBrackets => "[", - Symbol::Esc => "\x1B", - Symbol::Reset => "\x1B[0m", - Symbol::Simple256Foreground => "\x1B[38;5;", - Symbol::Simple256Background => "\x1B[48;5;", - Symbol::RgbForeground => "\x1B[38;2;", - Symbol::RgbBackground => "\x1B[48;2;", - Symbol::ResetStyle => "\x1B[20m", - Symbol::ResetForeground => "\x1B[39m", - Symbol::ResetBackground => "\x1B[49m", - Symbol::ClearScreenFromCursorToEnd => "\x1B[0J", - Symbol::ClearScreenUpToCursor => "\x1B[1J", - Symbol::ClearEntireScreen => "\x1B[2J", - Symbol::ClearLineFromCursorToEnd => "\x1B[0K", - Symbol::ClearLineUpToCursor => "\x1B[1K", - Symbol::ClearEntireLine => "\x1B[2K", - } - } -} - -impl Display for Symbol { - fn fmt(&self, f: &mut Formatter) -> FmtResult { - write!(f, "{}", self.to_str()) - } -} \ No newline at end of file diff --git a/third_party/rust/colorful/src/lib.rs b/third_party/rust/colorful/src/lib.rs deleted file mode 100644 index 31927e331890..000000000000 --- a/third_party/rust/colorful/src/lib.rs +++ /dev/null @@ -1,316 +0,0 @@ -//! Colored your terminal. -//! You can use this package to make your string colorful in terminal. -//! Platform support: -//! - Linux -//! - macOS -//! - -/// It is recommended to use `Color` enum item to set foreground color not literal string. -/// Literal string makes program uncontrollable, you can use `Color` `RGB::new(1,1,1)` or `HSL::new(0.5,0.5,0.5)` -/// to create color and pass the variable to method. -/// # Examples -/// ``` -/// extern crate colorful; -/// -/// use colorful::Colorful; -/// use colorful::Color; -/// -/// fn main(){ -/// let s = "Hello world"; -/// println!("{}",s.color(Color::Red).bg_color(Color::Yellow).bold().to_string()); -/// } -/// ``` - -use std::{thread, time}; - -use core::color_string::CString; -use core::ColorInterface; -pub use core::colors::Color; -pub use core::hsl::HSL; -pub use core::rgb::RGB; -use core::StrMarker; -pub use core::style::Style; - -pub mod core; - -/// Support `&str` and `String`, you can use `"text".red()` and `s.red()` for s:String -pub trait Colorful { - /// Set foreground color. Support Color enum and HSL, RGB mode. - /// ```Rust - /// extern crate colorful; - /// - /// use colorful::Colorful; - /// use colorful::Color; - /// - /// fn main() { - /// let a = "Hello world"; - /// println!("{}", a.color(Color::Red)); - /// println!("{}", a.blue()); - /// let b = String::from("Hello world"); - /// println!("{}", b.blue()); - /// } - /// ``` - fn color(self, color: C) -> CString; - fn black(self) -> CString; - fn red(self) -> CString; - fn green(self) -> CString; - fn yellow(self) -> CString; - fn blue(self) -> CString; - fn magenta(self) -> CString; - fn cyan(self) -> CString; - fn light_gray(self) -> CString; - fn dark_gray(self) -> CString; - fn light_red(self) -> CString; - fn light_green(self) -> CString; - fn light_yellow(self) -> CString; - fn light_blue(self) -> CString; - fn light_magenta(self) -> CString; - fn light_cyan(self) -> CString; - fn white(self) -> CString; - /// Set background color. Support Color enum and HSL, RGB mode. - /// ```Rust - /// extern crate colorful; - /// - /// use colorful::Colorful; - /// use colorful::Color; - /// - /// fn main() { - /// let a = "Hello world"; - /// println!("{}", a.bg_color(Color::Red)); - /// } - /// ``` - fn bg_color(self, color: C) -> CString; - fn bg_black(self) -> CString; - fn bg_red(self) -> CString; - fn bg_green(self) -> CString; - fn bg_yellow(self) -> CString; - fn bg_blue(self) -> CString; - fn bg_magenta(self) -> CString; - fn bg_cyan(self) -> CString; - fn bg_light_gray(self) -> CString; - fn bg_dark_gray(self) -> CString; - fn bg_light_red(self) -> CString; - fn bg_light_green(self) -> CString; - fn bg_light_yellow(self) -> CString; - fn bg_light_blue(self) -> CString; - fn bg_light_magenta(self) -> CString; - fn bg_light_cyan(self) -> CString; - fn bg_white(self) -> CString; - /// Support RGB color and HSL mode - /// ```Rust - /// extern crate colorful; - /// - /// use colorful::Colorful; - /// - /// fn main() { - /// let a = "Hello world"; - /// println!("{}", a.rgb(100, 100, 100).bg_rgb(100, 100, 100); - /// println!("{}", a.hsl(0.5, 0.5, 0.5)).bg_hsl(0.5, 0.5, 0.5)); - /// } - /// ``` - fn rgb(self, r: u8, g: u8, b: u8) -> CString; - fn bg_rgb(self, r: u8, g: u8, b: u8) -> CString; - fn hsl(self, h: f32, s: f32, l: f32) -> CString; - fn bg_hsl(self, h: f32, s: f32, l: f32) -> CString; - /// Terminal effect - /// See [ANSI escape code](https://en.wikipedia.org/wiki/ANSI_escape_code) - /// For terminals compatibility, check [Terminals compatibility](https://github.com/rocketsman/colorful#terminals-compatibility) - fn style(self, style: Style) -> CString; - /// Turn bold mode on. - fn bold(self) -> CString; - /// Turn blinking mode on. Blink doesn't work in many terminal emulators ,and it will still work on the console. - fn blink(self) -> CString; - /// Turn low intensity mode on. - fn dim(self) -> CString; - /// Turn underline mode on. - fn underlined(self) -> CString; - /// Turn reverse mode on (invert the foreground and background colors). - fn reverse(self) -> CString; - /// Turn invisible text mode on (useful for passwords). - fn hidden(self) -> CString; - /// Apply gradient color to sentences, support multiple lines. - /// You can use `use colorful::Color;` or `use colorful::HSL;` or `use colorful::RGB;` - /// to import colors and create gradient string. - /// ```Rust - /// extern crate colorful; - /// - /// use colorful::Color; - /// use colorful::Colorful; - /// - /// fn main() { - /// println!("{}", "This code is editable and runnable!".gradient(Color::Red)); - /// println!("{}", "¡Este código es editable y ejecutable!".gradient(Color::Green)); - /// println!("{}", "Ce code est modifiable et exécutable !".gradient(Color::Yellow)); - /// println!("{}", "Questo codice è modificabile ed eseguibile!".gradient(Color::Blue)); - /// println!("{}", "このコードは編集して実行出来ます!".gradient(Color::Magenta)); - /// println!("{}", "여기에서 코드를 수정하고 실행할 수 있습니다!".gradient(Color::Cyan)); - /// println!("{}", "Ten kod można edytować oraz uruchomić!".gradient(Color::LightGray)); - /// println!("{}", "Este código é editável e executável!".gradient(Color::DarkGray)); - /// println!("{}", "Этот код можно отредактировать и запустить!".gradient(Color::LightRed)); - /// println!("{}", "Bạn có thể edit và run code trực tiếp!".gradient(Color::LightGreen)); - /// println!("{}", "这段代码是可以编辑并且能够运行的!".gradient(Color::LightYellow)); - /// println!("{}", "Dieser Code kann bearbeitet und ausgeführt werden!".gradient(Color::LightBlue)); - /// println!("{}", "Den här koden kan redigeras och köras!".gradient(Color::LightMagenta)); - /// println!("{}", "Tento kód můžete upravit a spustit".gradient(Color::LightCyan)); - /// println!("{}", "این کد قابلیت ویرایش و اجرا دارد!".gradient(Color::White)); - /// println!("{}", "โค้ดนี้สามารถแก้ไขได้และรันได้".gradient(Color::Grey0)); - /// } - /// ``` - /// - fn gradient_with_step(self, color: C, step: f32) -> CString; - fn gradient_with_color(self, start: C, stop: C) -> CString; - fn gradient(self, color: C) -> CString; - fn rainbow_with_speed(self, speed: i32); - /// Rainbow mode, support multiple lines - /// - fn rainbow(self); - fn neon_with_speed(self, low: C, high: C, speed: i32); - /// Neon mode - fn neon(self, low: C, high: C); - /// Show some warning words. - /// - fn warn(self); -} - -impl Colorful for T where T: StrMarker { - fn color(self, color: C) -> CString { CString::create_by_fg(self, color) } - fn black(self) -> CString { self.color(Color::Black) } - fn red(self) -> CString { self.color(Color::Red) } - fn green(self) -> CString { self.color(Color::Green) } - fn yellow(self) -> CString { self.color(Color::Yellow) } - fn blue(self) -> CString { self.color(Color::Blue) } - fn magenta(self) -> CString { self.color(Color::Magenta) } - fn cyan(self) -> CString { self.color(Color::Cyan) } - fn light_gray(self) -> CString { self.color(Color::LightGray) } - fn dark_gray(self) -> CString { self.color(Color::DarkGray) } - fn light_red(self) -> CString { self.color(Color::LightRed) } - fn light_green(self) -> CString { self.color(Color::LightGreen) } - fn light_yellow(self) -> CString { self.color(Color::LightYellow) } - fn light_blue(self) -> CString { self.color(Color::LightBlue) } - fn light_magenta(self) -> CString { self.color(Color::LightMagenta) } - fn light_cyan(self) -> CString { self.color(Color::LightCyan) } - fn white(self) -> CString { self.color(Color::White) } - fn bg_color(self, color: C) -> CString { CString::create_by_bg(self, color) } - fn bg_black(self) -> CString { self.bg_color(Color::Black) } - fn bg_red(self) -> CString { self.bg_color(Color::Red) } - fn bg_green(self) -> CString { self.bg_color(Color::Green) } - fn bg_yellow(self) -> CString { self.bg_color(Color::Yellow) } - fn bg_blue(self) -> CString { self.bg_color(Color::Blue) } - fn bg_magenta(self) -> CString { self.bg_color(Color::Magenta) } - fn bg_cyan(self) -> CString { self.bg_color(Color::Cyan) } - fn bg_light_gray(self) -> CString { self.bg_color(Color::LightGray) } - fn bg_dark_gray(self) -> CString { self.bg_color(Color::DarkGray) } - fn bg_light_red(self) -> CString { self.bg_color(Color::LightRed) } - fn bg_light_green(self) -> CString { self.bg_color(Color::LightGreen) } - fn bg_light_yellow(self) -> CString { self.bg_color(Color::LightYellow) } - fn bg_light_blue(self) -> CString { self.bg_color(Color::LightBlue) } - fn bg_light_magenta(self) -> CString { self.bg_color(Color::LightMagenta) } - fn bg_light_cyan(self) -> CString { self.bg_color(Color::LightCyan) } - fn bg_white(self) -> CString { self.bg_color(Color::White) } - fn rgb(self, r: u8, g: u8, b: u8) -> CString { CString::create_by_fg(self, RGB::new(r, g, b)) } - fn bg_rgb(self, r: u8, g: u8, b: u8) -> CString { CString::create_by_bg(self, RGB::new(r, g, b)) } - fn hsl(self, h: f32, s: f32, l: f32) -> CString { CString::create_by_fg(self, HSL::new(h, s, l)) } - fn bg_hsl(self, h: f32, s: f32, l: f32) -> CString { CString::create_by_bg(self, HSL::new(h, s, l)) } - fn style(self, style: Style) -> CString { CString::create_by_style(self, style) } - fn bold(self) -> CString { self.style(Style::Bold) } - fn blink(self) -> CString { self.style(Style::Blink) } - fn dim(self) -> CString { self.style(Style::Dim) } - fn underlined(self) -> CString { self.style(Style::Underlined) } - fn reverse(self) -> CString { self.style(Style::Reverse) } - fn hidden(self) -> CString { self.style(Style::Hidden) } - fn gradient_with_step(self, color: C, step: f32) -> CString { - let mut t = vec![]; - let mut start = color.to_hsl().h; - let s = self.to_str(); - let c = s.chars(); - let length = c.clone().count() - 1; - for (index, i) in c.enumerate() { - let b = i.to_string(); - let tmp = b.hsl(start, 1.0, 0.5).to_string(); - t.push(format!("{}", &tmp[..tmp.len() - if index != length { 4 } else { 0 }])); - start = (start + step) % 1.0; - } - CString::create_by_text(self, t.join("")) - } - fn gradient_with_color(self, start: C, stop: C) -> CString { - let mut t = vec![]; - let c = self.to_str(); - let s = c.chars(); - let length = s.clone().count() - 1; - let mut start = start.to_hsl().h; - let stop = stop.to_hsl().h; - let step = (stop - start) / length as f32; - for (index, i) in s.enumerate() { - let b = i.to_string(); - let tmp = b.hsl(start, 1.0, 0.5).to_string(); - t.push(format!("{}", &tmp[..tmp.len() - if index != length { 4 } else { 0 }])); - start = (start + step) % 1.0; - } - CString::create_by_text(self, t.join("")) - } - fn gradient(self, color: C) -> CString { - let text = self.to_str(); - let lines: Vec<_> = text.lines().collect(); - let mut tmp = vec![]; - for sub_str in lines.iter() { - tmp.push(sub_str.gradient_with_step(color.clone(), 1.5 / 360.0).to_string()); - } - CString::new(tmp.join("\n")) - } - fn rainbow_with_speed(self, speed: i32) { - let respite: u64 = match speed { - 3 => { 10 } - 2 => { 5 } - 1 => { 2 } - _ => { 0 } - }; - let text = self.to_str(); - let lines: Vec<_> = text.lines().collect(); - for i in 0..360 { - let mut tmp = vec![]; - for sub_str in lines.iter() { - tmp.push(sub_str.gradient_with_step(HSL::new(i as f32 / 360.0, 1.0, 0.5), 0.02).to_string()); - } - println!("{}\x1B[{}F\x1B[G\x1B[2K", tmp.join("\n"), lines.len()); - let ten_millis = time::Duration::from_millis(respite); - thread::sleep(ten_millis); - } - } - fn rainbow(self) { - self.rainbow_with_speed(3); - } - fn neon_with_speed(self, high: C, low: C, speed: i32) { - let respite: u64 = match speed { - 3 => { 500 } - 2 => { 200 } - 1 => { 100 } - _ => { 0 } - }; - let text = self.to_str(); - let lines: Vec<_> = text.lines().collect(); - let mut coin = true; - let positive = format!("{}\x1B[{}F\x1B[2K", text.clone().color(high), lines.len()); - let negative = format!("{}\x1B[{}F\x1B[2K", text.clone().color(low), lines.len()); - for _ in 0..360 { - if coin { println!("{}", positive) } else { println!("{}", negative) }; - let ten_millis = time::Duration::from_millis(respite); - thread::sleep(ten_millis); - coin = !coin; - } - } - fn neon(self, high: C, low: C) { - self.neon_with_speed(high, low, 3); - } - fn warn(self) { - self.neon_with_speed(RGB::new(226, 14, 14), RGB::new(158, 158, 158), 3); - } -} - -pub trait ExtraColorInterface { - fn grey0(self) -> CString; -} - -impl ExtraColorInterface for T where T: Colorful { - fn grey0(self) -> CString { self.color(Color::Grey0) } -} - diff --git a/third_party/rust/colorful/tests/test_all_color.rs b/third_party/rust/colorful/tests/test_all_color.rs deleted file mode 100644 index e2803734e6c9..000000000000 --- a/third_party/rust/colorful/tests/test_all_color.rs +++ /dev/null @@ -1,280 +0,0 @@ -extern crate colorful; -extern crate core; - -use colorful::Colorful; -use colorful::Color; -use colorful::core::ColorInterface; -use colorful::HSL; - -#[test] -fn test_color() { - let s = "Hello world"; - for (i, color) in Color::iterator().enumerate() { - assert_eq!(format!("\x1B[38;5;{}mHello world\x1B[0m", i.to_owned()), s.color(*color).to_string()); - } -} - - -#[test] -fn test_color_to_hsl() { - let l = vec![ - HSL::new(0.0, 0.0, 0.0), - HSL::new(0.0, 1.0, 0.25), - HSL::new(0.3333333333333333, 1.0, 0.25), - HSL::new(0.16666666666666666, 1.0, 0.25), - HSL::new(0.6666666666666666, 1.0, 0.25), - HSL::new(0.8333333333333334, 1.0, 0.25), - HSL::new(0.5, 1.0, 0.25), - HSL::new(0.0, 0.0, 0.75), - HSL::new(0.0, 0.0, 0.5), - HSL::new(0.0, 1.0, 0.5), - HSL::new(0.3333333333333333, 1.0, 0.5), - HSL::new(0.16666666666666666, 1.0, 0.5), - HSL::new(0.6666666666666666, 1.0, 0.5), - HSL::new(0.8333333333333334, 1.0, 0.5), - HSL::new(0.5, 1.0, 0.5), - HSL::new(0.0, 0.0, 1.0), - HSL::new(0.0, 0.0, 0.0), - HSL::new(0.6666666666666666, 1.0, 0.18), - HSL::new(0.6666666666666666, 1.0, 0.26), - HSL::new(0.6666666666666666, 1.0, 0.34), - HSL::new(0.6666666666666666, 1.0, 0.42), - HSL::new(0.6666666666666666, 1.0, 0.5), - HSL::new(0.3333333333333333, 1.0, 0.18), - HSL::new(0.5, 1.0, 0.18), - HSL::new(0.5493827160493834, 1.0, 0.26), - HSL::new(0.5761904761904749, 1.0, 0.34), - HSL::new(0.5930232558139528, 1.0, 0.42), - HSL::new(0.6045751633986917, 1.0, 0.5), - HSL::new(0.3333333333333333, 1.0, 0.26), - HSL::new(0.4506172839506167, 1.0, 0.26), - HSL::new(0.5, 1.0, 0.26), - HSL::new(0.538095238095239, 1.0, 0.34), - HSL::new(0.5620155038759694, 1.0, 0.42), - HSL::new(0.5784313725490194, 1.0, 0.5), - HSL::new(0.3333333333333333, 1.0, 0.34), - HSL::new(0.423809523809525, 1.0, 0.34), - HSL::new(0.4619047619047611, 1.0, 0.34), - HSL::new(0.5, 1.0, 0.34), - HSL::new(0.5310077519379833, 1.0, 0.42), - HSL::new(0.5522875816993472, 1.0, 0.5), - HSL::new(0.3333333333333333, 1.0, 0.42), - HSL::new(0.40697674418604723, 1.0, 0.42), - HSL::new(0.43798449612403056, 1.0, 0.42), - HSL::new(0.4689922480620166, 1.0, 0.42), - HSL::new(0.5, 1.0, 0.42), - HSL::new(0.5261437908496722, 1.0, 0.5), - HSL::new(0.3333333333333333, 1.0, 0.5), - HSL::new(0.39542483660130834, 1.0, 0.5), - HSL::new(0.4215686274509806, 1.0, 0.5), - HSL::new(0.4477124183006528, 1.0, 0.5), - HSL::new(0.4738562091503278, 1.0, 0.5), - HSL::new(0.5, 1.0, 0.5), - HSL::new(0.0, 1.0, 0.18), - HSL::new(0.8333333333333334, 1.0, 0.18), - HSL::new(0.78395061728395, 1.0, 0.26), - HSL::new(0.7571428571428583, 1.0, 0.34), - HSL::new(0.7403100775193806, 1.0, 0.42), - HSL::new(0.7287581699346417, 1.0, 0.5), - HSL::new(0.16666666666666666, 1.0, 0.18), - HSL::new(0.0, 0.0, 0.37), - HSL::new(0.6666666666666666, 0.17, 0.45), - HSL::new(0.6666666666666666, 0.33, 0.52), - HSL::new(0.6666666666666666, 0.6, 0.6), - HSL::new(0.6666666666666666, 1.0, 0.68), - HSL::new(0.21604938271604945, 1.0, 0.26), - HSL::new(0.3333333333333333, 0.17, 0.45), - HSL::new(0.5, 0.17, 0.45), - HSL::new(0.5833333333333334, 0.33, 0.52), - HSL::new(0.6111111111111112, 0.6, 0.6), - HSL::new(0.625, 1.0, 0.68), - HSL::new(0.24285714285714277, 1.0, 0.34), - HSL::new(0.3333333333333333, 0.33, 0.52), - HSL::new(0.4166666666666667, 0.33, 0.52), - HSL::new(0.5, 0.33, 0.52), - HSL::new(0.5555555555555556, 0.6, 0.6), - HSL::new(0.5833333333333334, 1.0, 0.68), - HSL::new(0.2596899224806203, 1.0, 0.42), - HSL::new(0.3333333333333333, 0.6, 0.6), - HSL::new(0.3888888888888889, 0.6, 0.6), - HSL::new(0.4444444444444444, 0.6, 0.6), - HSL::new(0.5, 0.6, 0.6), - HSL::new(0.5416666666666666, 1.0, 0.68), - HSL::new(0.27124183006535946, 1.0, 0.5), - HSL::new(0.3333333333333333, 1.0, 0.68), - HSL::new(0.375, 1.0, 0.68), - HSL::new(0.4166666666666667, 1.0, 0.68), - HSL::new(0.4583333333333333, 1.0, 0.68), - HSL::new(0.5, 1.0, 0.68), - HSL::new(0.0, 1.0, 0.26), - HSL::new(0.8827160493827166, 1.0, 0.26), - HSL::new(0.8333333333333334, 1.0, 0.26), - HSL::new(0.7952380952380944, 1.0, 0.34), - HSL::new(0.7713178294573639, 1.0, 0.42), - HSL::new(0.7549019607843138, 1.0, 0.5), - HSL::new(0.11728395061728389, 1.0, 0.26), - HSL::new(0.0, 0.17, 0.45), - HSL::new(0.8333333333333334, 0.17, 0.45), - HSL::new(0.75, 0.33, 0.52), - HSL::new(0.7222222222222222, 0.6, 0.6), - HSL::new(0.7083333333333334, 1.0, 0.68), - HSL::new(0.16666666666666666, 1.0, 0.26), - HSL::new(0.16666666666666666, 0.17, 0.45), - HSL::new(0.0, 0.0, 0.52), - HSL::new(0.6666666666666666, 0.2, 0.6), - HSL::new(0.6666666666666666, 0.5, 0.68), - HSL::new(0.6666666666666666, 1.0, 0.76), - HSL::new(0.2047619047619047, 1.0, 0.34), - HSL::new(0.25, 0.33, 0.52), - HSL::new(0.3333333333333333, 0.2, 0.6), - HSL::new(0.5, 0.2, 0.6), - HSL::new(0.5833333333333334, 0.5, 0.68), - HSL::new(0.6111111111111112, 1.0, 0.76), - HSL::new(0.22868217054263557, 1.0, 0.42), - HSL::new(0.2777777777777778, 0.6, 0.6), - HSL::new(0.3333333333333333, 0.5, 0.68), - HSL::new(0.4166666666666667, 0.5, 0.68), - HSL::new(0.5, 0.5, 0.68), - HSL::new(0.5555555555555556, 1.0, 0.76), - HSL::new(0.2450980392156864, 1.0, 0.5), - HSL::new(0.2916666666666667, 1.0, 0.68), - HSL::new(0.3333333333333333, 1.0, 0.76), - HSL::new(0.3888888888888889, 1.0, 0.76), - HSL::new(0.4444444444444444, 1.0, 0.76), - HSL::new(0.5, 1.0, 0.76), - HSL::new(0.0, 1.0, 0.34), - HSL::new(0.9095238095238083, 1.0, 0.34), - HSL::new(0.8714285714285722, 1.0, 0.34), - HSL::new(0.8333333333333334, 1.0, 0.34), - HSL::new(0.80232558139535, 1.0, 0.42), - HSL::new(0.7810457516339862, 1.0, 0.5), - HSL::new(0.09047619047619054, 1.0, 0.34), - HSL::new(0.0, 0.33, 0.52), - HSL::new(0.9166666666666666, 0.33, 0.52), - HSL::new(0.8333333333333334, 0.33, 0.52), - HSL::new(0.7777777777777778, 0.6, 0.6), - HSL::new(0.75, 1.0, 0.68), - HSL::new(0.12857142857142861, 1.0, 0.34), - HSL::new(0.08333333333333333, 0.33, 0.52), - HSL::new(0.0, 0.2, 0.6), - HSL::new(0.8333333333333334, 0.2, 0.6), - HSL::new(0.75, 0.5, 0.68), - HSL::new(0.7222222222222222, 1.0, 0.76), - HSL::new(0.16666666666666666, 1.0, 0.34), - HSL::new(0.16666666666666666, 0.33, 0.52), - HSL::new(0.16666666666666666, 0.2, 0.6), - HSL::new(0.0, 0.0, 0.68), - HSL::new(0.6666666666666666, 0.33, 0.76), - HSL::new(0.6666666666666666, 1.0, 0.84), - HSL::new(0.1976744186046511, 1.0, 0.42), - HSL::new(0.2222222222222222, 0.6, 0.6), - HSL::new(0.25, 0.5, 0.68), - HSL::new(0.3333333333333333, 0.33, 0.76), - HSL::new(0.5, 0.33, 0.76), - HSL::new(0.5833333333333334, 1.0, 0.84), - HSL::new(0.21895424836601304, 1.0, 0.5), - HSL::new(0.25, 1.0, 0.68), - HSL::new(0.2777777777777778, 1.0, 0.76), - HSL::new(0.3333333333333333, 1.0, 0.84), - HSL::new(0.4166666666666667, 1.0, 0.84), - HSL::new(0.5, 1.0, 0.84), - HSL::new(0.0, 1.0, 0.42), - HSL::new(0.926356589147286, 1.0, 0.42), - HSL::new(0.8953488372093028, 1.0, 0.42), - HSL::new(0.8643410852713166, 1.0, 0.42), - HSL::new(0.8333333333333334, 1.0, 0.42), - HSL::new(0.8071895424836611, 1.0, 0.5), - HSL::new(0.07364341085271306, 1.0, 0.42), - HSL::new(0.0, 0.6, 0.6), - HSL::new(0.9444444444444444, 0.6, 0.6), - HSL::new(0.8888888888888888, 0.6, 0.6), - HSL::new(0.8333333333333334, 0.6, 0.6), - HSL::new(0.7916666666666666, 1.0, 0.68), - HSL::new(0.10465116279069778, 1.0, 0.42), - HSL::new(0.05555555555555555, 0.6, 0.6), - HSL::new(0.0, 0.5, 0.68), - HSL::new(0.9166666666666666, 0.5, 0.68), - HSL::new(0.8333333333333334, 0.5, 0.68), - HSL::new(0.7777777777777778, 1.0, 0.76), - HSL::new(0.13565891472868222, 1.0, 0.42), - HSL::new(0.1111111111111111, 0.6, 0.6), - HSL::new(0.08333333333333333, 0.5, 0.68), - HSL::new(0.0, 0.33, 0.76), - HSL::new(0.8333333333333334, 0.33, 0.76), - HSL::new(0.75, 1.0, 0.84), - HSL::new(0.16666666666666666, 1.0, 0.42), - HSL::new(0.16666666666666666, 0.6, 0.6), - HSL::new(0.16666666666666666, 0.5, 0.68), - HSL::new(0.16666666666666666, 0.33, 0.76), - HSL::new(0.0, 0.0, 0.84), - HSL::new(0.6666666666666666, 1.0, 0.92), - HSL::new(0.19281045751633974, 1.0, 0.5), - HSL::new(0.20833333333333334, 1.0, 0.68), - HSL::new(0.2222222222222222, 1.0, 0.76), - HSL::new(0.25, 1.0, 0.84), - HSL::new(0.3333333333333333, 1.0, 0.92), - HSL::new(0.5, 1.0, 0.92), - HSL::new(0.0, 1.0, 0.5), - HSL::new(0.937908496732025, 1.0, 0.5), - HSL::new(0.9117647058823528, 1.0, 0.5), - HSL::new(0.8856209150326805, 1.0, 0.5), - HSL::new(0.8594771241830055, 1.0, 0.5), - HSL::new(0.8333333333333334, 1.0, 0.5), - HSL::new(0.06209150326797389, 1.0, 0.5), - HSL::new(0.0, 1.0, 0.68), - HSL::new(0.9583333333333334, 1.0, 0.68), - HSL::new(0.9166666666666666, 1.0, 0.68), - HSL::new(0.875, 1.0, 0.68), - HSL::new(0.8333333333333334, 1.0, 0.68), - HSL::new(0.08823529411764694, 1.0, 0.5), - HSL::new(0.041666666666666664, 1.0, 0.68), - HSL::new(0.0, 1.0, 0.76), - HSL::new(0.9444444444444444, 1.0, 0.76), - HSL::new(0.8888888888888888, 1.0, 0.76), - HSL::new(0.8333333333333334, 1.0, 0.76), - HSL::new(0.11437908496732027, 1.0, 0.5), - HSL::new(0.08333333333333333, 1.0, 0.68), - HSL::new(0.05555555555555555, 1.0, 0.76), - HSL::new(0.0, 1.0, 0.84), - HSL::new(0.9166666666666666, 1.0, 0.84), - HSL::new(0.8333333333333334, 1.0, 0.84), - HSL::new(0.14052287581699335, 1.0, 0.5), - HSL::new(0.125, 1.0, 0.68), - HSL::new(0.1111111111111111, 1.0, 0.76), - HSL::new(0.08333333333333333, 1.0, 0.84), - HSL::new(0.0, 1.0, 0.92), - HSL::new(0.8333333333333334, 1.0, 0.92), - HSL::new(0.16666666666666666, 1.0, 0.5), - HSL::new(0.16666666666666666, 1.0, 0.68), - HSL::new(0.16666666666666666, 1.0, 0.76), - HSL::new(0.16666666666666666, 1.0, 0.84), - HSL::new(0.16666666666666666, 1.0, 0.92), - HSL::new(0.0, 0.0, 1.0), - HSL::new(0.0, 0.0, 0.03), - HSL::new(0.0, 0.0, 0.07), - HSL::new(0.0, 0.0, 0.1), - HSL::new(0.0, 0.0, 0.14), - HSL::new(0.0, 0.0, 0.18), - HSL::new(0.0, 0.0, 0.22), - HSL::new(0.0, 0.0, 0.26), - HSL::new(0.0, 0.0, 0.3), - HSL::new(0.0, 0.0, 0.34), - HSL::new(0.0, 0.0, 0.37), - HSL::new(0.0, 0.0, 0.4), - HSL::new(0.0, 0.0, 0.46), - HSL::new(0.0, 0.0, 0.5), - HSL::new(0.0, 0.0, 0.54), - HSL::new(0.0, 0.0, 0.58), - HSL::new(0.0, 0.0, 0.61), - HSL::new(0.0, 0.0, 0.65), - HSL::new(0.0, 0.0, 0.69), - HSL::new(0.0, 0.0, 0.73), - HSL::new(0.0, 0.0, 0.77), - HSL::new(0.0, 0.0, 0.81), - HSL::new(0.0, 0.0, 0.85), - HSL::new(0.0, 0.0, 0.89), - HSL::new(0.0, 0.0, 0.93), ]; - for (i, color) in Color::iterator().enumerate() { - assert_eq!(color.to_hsl(), l[i]); - } -} \ No newline at end of file diff --git a/third_party/rust/colorful/tests/test_animation.rs b/third_party/rust/colorful/tests/test_animation.rs deleted file mode 100644 index fd40781a8c97..000000000000 --- a/third_party/rust/colorful/tests/test_animation.rs +++ /dev/null @@ -1,31 +0,0 @@ -extern crate colorful; - -use colorful::Color; -use colorful::Colorful; -use colorful::HSL; -use colorful::RGB; - -#[test] -fn test_rainbow() { - let s = "Hello world"; - s.rainbow_with_speed(0); -} - -#[test] -fn test_neon_1() { - let s = "Hello world"; - s.neon_with_speed(Color::Grey0, Color::Grey0, 0); -} - -#[test] -fn test_neon_2() { - let s = "Hello world"; - s.neon_with_speed(HSL::new(1.0, 1.0, 0.4), HSL::new(0.5, 1.0, 0.4), 0); -} - -#[test] -fn test_neon_3() { - let s = "Hello world"; - s.neon_with_speed(RGB::new(122, 122, 122), RGB::new(222, 222, 222), 0); -} - diff --git a/third_party/rust/colorful/tests/test_basic.rs b/third_party/rust/colorful/tests/test_basic.rs deleted file mode 100644 index 9e05eb43f818..000000000000 --- a/third_party/rust/colorful/tests/test_basic.rs +++ /dev/null @@ -1,53 +0,0 @@ -extern crate colorful; -extern crate core; - -use colorful::Colorful; -use colorful::Color; -use colorful::Style; - -#[test] -fn test_1() { - assert_eq!("\u{1b}", "\x1B"); -} - -#[test] -fn test_color() { - let s = "Hello world"; - assert_eq!("\x1B[38;5;1mHello world\x1B[0m".to_owned(), s.color(Color::Red).to_string()); - assert_eq!("\x1B[38;5;220mHello world\x1B[0m".to_owned(), s.color(Color::Red).color(Color::Gold1).to_string()); -} - -#[test] -fn test_bg_color() { - let s = "Hello world"; - assert_eq!("\x1B[38;5;1m\x1B[48;5;16mHello world\x1B[0m".to_owned(), s.color(Color::Red).bg_color(Color::Grey0).to_string()); - assert_eq!("\x1B[38;5;1m\x1B[48;5;6mHello world\x1B[0m".to_owned(), s.color(Color::Red).bg_cyan().to_string()); - assert_eq!("\x1B[38;5;220m\x1B[48;5;6mHello world\x1B[0m".to_owned(), s.color(Color::Red).color(Color::Gold1).bg_color(Color::Cyan).to_string()); -} - - -#[test] -fn test_style() { - let s = "Hello world"; - assert_eq!("\x1B[1mHello world\x1B[0m".to_owned(), s.style(Style::Bold).to_string()); - assert_eq!("\x1B[1;5mHello world\x1B[0m".to_owned(), s.style(Style::Bold).style(Style::Blink).to_string()); -} - -#[test] -fn test_interface() { - let s = "Hello world"; - assert_eq!("\x1B[1mHello world\x1B[0m".to_owned(), s.bold().to_string()); - assert_eq!("\x1B[1;5mHello world\x1B[0m".to_owned(), s.bold().blink().to_string()); - assert_eq!("\x1B[38;5;1mHello world\x1B[0m".to_owned(), s.red().to_string()); - assert_eq!("\x1B[38;5;2mHello world\x1B[0m".to_owned(), s.red().green().to_string()); -} - -#[test] -fn test_mix() { - let s = "Hello world"; - assert_eq!("\x1B[38;5;1;5mHello world\x1B[0m".to_owned(), s.color(Color::Red).blink().to_string()); - assert_eq!("\x1B[38;5;220;1mHello world\x1B[0m".to_owned(), s.bold().color(Color::Gold1).to_string()); - - assert_eq!("\x1B[38;5;2;5;1mHello world\x1B[0m".to_owned(), s.color(Color::Green).blink().bold().to_string()); - assert_eq!("\x1B[38;5;220;1;5mHello world\x1B[0m".to_owned(), s.bold().blink().color(Color::Gold1).to_string()); -} diff --git a/third_party/rust/colorful/tests/test_extra.rs b/third_party/rust/colorful/tests/test_extra.rs deleted file mode 100644 index c8a79888c996..000000000000 --- a/third_party/rust/colorful/tests/test_extra.rs +++ /dev/null @@ -1,10 +0,0 @@ -extern crate colorful; -extern crate core; - - -#[test] -fn test_extra_color() { - use colorful::ExtraColorInterface; - let s = "Hello world"; - assert_eq!("\x1B[38;5;16mHello world\x1B[0m".to_owned(), s.grey0().to_string()); -} diff --git a/third_party/rust/colorful/tests/test_gradient.rs b/third_party/rust/colorful/tests/test_gradient.rs deleted file mode 100644 index ddebc1ff6844..000000000000 --- a/third_party/rust/colorful/tests/test_gradient.rs +++ /dev/null @@ -1,16 +0,0 @@ -extern crate colorful; - -use colorful::Color; -use colorful::Colorful; - -#[test] -fn test_gradient_color() { - let s = "Hello world"; - assert_eq!("\u{1b}[38;2;255;0;0mH\u{1b}[38;2;255;6;0me\u{1b}[38;2;255;13;0ml\u{1b}[38;2;255;19;0ml\u{1b}[38;2;255;26;0mo\u{1b}[38;2;255;32;0m \u{1b}[38;2;255;38;0mw\u{1b}[38;2;255;45;0mo\u{1b}[38;2;255;51;0mr\u{1b}[38;2;255;57;0ml\u{1b}[38;2;255;64;0md\u{1b}[0m".to_owned(), s.gradient(Color::Red).to_string()); -} - -#[test] -fn test_gradient_multiple_lines() { - let s = "a\nb"; - assert_eq!("\u{1b}[38;2;255;0;0ma\u{1b}[0m\n\u{1b}[38;2;255;0;0mb\u{1b}[0m".to_owned(), s.gradient(Color::Red).to_string()); -} \ No newline at end of file diff --git a/third_party/rust/colorful/tests/test_hsl.rs b/third_party/rust/colorful/tests/test_hsl.rs deleted file mode 100644 index 57222c2598e9..000000000000 --- a/third_party/rust/colorful/tests/test_hsl.rs +++ /dev/null @@ -1,10 +0,0 @@ -extern crate colorful; -extern crate core; - -use colorful::Colorful; - -#[test] -fn test_hsl_color() { - let s = "Hello world"; - assert_eq!("\x1B[38;2;19;205;94mHello world\x1B[0m", s.hsl(0.4, 0.83, 0.44).to_string()); -} diff --git a/third_party/rust/gfx-auxil/.cargo-checksum.json b/third_party/rust/gfx-auxil/.cargo-checksum.json index af040e124d16..cd79e03f071f 100644 --- a/third_party/rust/gfx-auxil/.cargo-checksum.json +++ b/third_party/rust/gfx-auxil/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"f06acc2f73f07e900bfdb0539ecac85b0481993babe8c5fdf7e079c54179c5a4","src/lib.rs":"3a200e513417044eaa986ac61810d9087193369afa1aeb4469a7306fd64249ec"},"package":"572eee952a9a23c99cfe3e4fd95d277784058a89ac3c77ff6fa3d80a4e321919"} \ No newline at end of file +{"files":{"Cargo.toml":"d9bacdc7fb6c94a0f98f766118b320b037be66cf28896b1edd348f900006ff49","src/lib.rs":"090ca4b005fe34762215078e30b7bca58f47fd6c43267a29d032ebe806428df9"},"package":"3b46e6f0031330a0be08d17820f2dcaaa91cb36710a97a9500cb4f1c36e785c8"} \ No newline at end of file diff --git a/third_party/rust/gfx-auxil/Cargo.toml b/third_party/rust/gfx-auxil/Cargo.toml index 8220e42388d1..ce22b99acebb 100644 --- a/third_party/rust/gfx-auxil/Cargo.toml +++ b/third_party/rust/gfx-auxil/Cargo.toml @@ -13,7 +13,7 @@ [package] edition = "2018" name = "gfx-auxil" -version = "0.1.0" +version = "0.3.0" authors = ["The Gfx-rs Developers"] description = "Implementation details shared between gfx-rs backends" homepage = "https://github.com/gfx-rs/gfx" @@ -28,8 +28,9 @@ name = "gfx_auxil" version = "0.2.1" [dependencies.hal] -version = "0.4" +version = "0.5" package = "gfx-hal" [dependencies.spirv_cross] -version = "0.16" +version = "0.18" +optional = true diff --git a/third_party/rust/gfx-auxil/src/lib.rs b/third_party/rust/gfx-auxil/src/lib.rs old mode 100755 new mode 100644 index 3af795c8b74a..626aec4def50 --- a/third_party/rust/gfx-auxil/src/lib.rs +++ b/third_party/rust/gfx-auxil/src/lib.rs @@ -1,50 +1,55 @@ -use { - hal::{device::ShaderError, pso}, - spirv_cross::spirv, -}; - -/// Fast hash map used internally. -pub type FastHashMap = std::collections::HashMap>; - -pub fn spirv_cross_specialize_ast( - ast: &mut spirv::Ast, - specialization: &pso::Specialization, -) -> Result<(), ShaderError> -where - T: spirv::Target, - spirv::Ast: spirv::Compile + spirv::Parse, -{ - let spec_constants = ast.get_specialization_constants().map_err(|err| { - ShaderError::CompilationFailed(match err { - spirv_cross::ErrorCode::CompilationError(msg) => msg, - spirv_cross::ErrorCode::Unhandled => "Unexpected specialization constant error".into(), - }) - })?; - - for spec_constant in spec_constants { - if let Some(constant) = specialization - .constants - .iter() - .find(|c| c.id == spec_constant.constant_id) - { - // Override specialization constant values - let value = specialization.data - [constant.range.start as usize .. constant.range.end as usize] - .iter() - .rev() - .fold(0u64, |u, &b| (u << 8) + b as u64); - - ast.set_scalar_constant(spec_constant.id, value) - .map_err(|err| { - ShaderError::CompilationFailed(match err { - spirv_cross::ErrorCode::CompilationError(msg) => msg, - spirv_cross::ErrorCode::Unhandled => { - "Unexpected specialization constant error".into() - } - }) - })?; - } - } - - Ok(()) -} +#[cfg(feature = "spirv_cross")] +use { + hal::{device::ShaderError, pso}, + spirv_cross::spirv, +}; + +/// Fast hash map used internally. +pub type FastHashMap = + std::collections::HashMap>; +pub type FastHashSet = + std::collections::HashSet>; + +#[cfg(feature = "spirv_cross")] +pub fn spirv_cross_specialize_ast( + ast: &mut spirv::Ast, + specialization: &pso::Specialization, +) -> Result<(), ShaderError> +where + T: spirv::Target, + spirv::Ast: spirv::Compile + spirv::Parse, +{ + let spec_constants = ast.get_specialization_constants().map_err(|err| { + ShaderError::CompilationFailed(match err { + spirv_cross::ErrorCode::CompilationError(msg) => msg, + spirv_cross::ErrorCode::Unhandled => "Unexpected specialization constant error".into(), + }) + })?; + + for spec_constant in spec_constants { + if let Some(constant) = specialization + .constants + .iter() + .find(|c| c.id == spec_constant.constant_id) + { + // Override specialization constant values + let value = specialization.data + [constant.range.start as usize .. constant.range.end as usize] + .iter() + .rev() + .fold(0u64, |u, &b| (u << 8) + b as u64); + + ast.set_scalar_constant(spec_constant.id, value) + .map_err(|err| { + ShaderError::CompilationFailed(match err { + spirv_cross::ErrorCode::CompilationError(msg) => msg, + spirv_cross::ErrorCode::Unhandled => { + "Unexpected specialization constant error".into() + } + }) + })?; + } + } + + Ok(()) +} diff --git a/third_party/rust/gfx-backend-dx11/.cargo-checksum.json b/third_party/rust/gfx-backend-dx11/.cargo-checksum.json index 982daa25b291..3e43798796fa 100644 --- a/third_party/rust/gfx-backend-dx11/.cargo-checksum.json +++ b/third_party/rust/gfx-backend-dx11/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"9395d7e87f0c91d4900088cdbeb0ca8a0d4526911ece1de9cfc746974a9eec4e","README.md":"5c66af7bc110525a57c757859b9b93468ae54222e6ce9ce5ffd55b2a6ca596b9","shaders/blit.hlsl":"92a8b404ee956ceff2728ec8dd68969fba4c32a79f4d879f069a294f245a867c","shaders/clear.hlsl":"b715a0d8ccebd858531de845fdb3f1b31f25d3f62266238cd1d417006a07957c","shaders/copy.hlsl":"13ca6a1826eb5d252332d2bd75cc7f2e13c029a72006d141118d10ea5fb9856b","src/conv.rs":"69845245e8100921c14d3c9ac7b66e734cb167f16d397b15d723e42b93f9ecf0","src/debug.rs":"f51aa06d7eec0eb352ca7d401c474a075a10a01bbc31ea7e9c62973d7105112f","src/device.rs":"c41ba79b013c8dccd6e37fd0e713341066e4e4d16b337fdc108b4c6c09d55898","src/dxgi.rs":"8587c85d6d4223d23143b1b2dcf52d22615ad93852957bc0a0d13dd9bf057d25","src/internal.rs":"d2440eb4734f0765b86c6a3f6ef82005af4b998c1b449a48bd262a76b62845b4","src/lib.rs":"5f25d95df828ce8c3c3a463d80b39a4c981ae0ba50496a51229fa9e9894e9026","src/shader.rs":"58f9ccb451eb9e0db4b27b3b2901c7a9008e95279abb341a0bd30fdf1b45879c"},"package":"d7527cfcd7d1eec6b99f81891293bdd2a41d044ace009717264e5f3b10ce5b86"} \ No newline at end of file +{"files":{"Cargo.toml":"a7e447d8532e3ad094eada6105bdcc305ff62e4f4861b381e4ab182023a13cae","README.md":"aa7ff84146655d3957c043b5f71dc439712392c3a18c8c397d8c179bc43f75c0","shaders/blit.hlsl":"a00c57d25b6704a57cd17923c5b7a47608b3ab17b96e7e2ab1172283dc841194","shaders/clear.hlsl":"9b6747a76dabe37ff8e069cdbb8a9c22f6cf71a6d3041d358cd1569d1bb8e10f","shaders/copy.hlsl":"0a164e64b28e62e1d8895159c13e5aa9c74891f61d54939c0f79b08a2a5223c9","src/conv.rs":"1e6ccaa053c4b6d64c861b794364b3043b04b2506e214f854b15d5f055d98f36","src/debug.rs":"d0047b032491e190a939a6fb8920f7ce7c22dcec12449f16061537f65e5ef81f","src/device.rs":"d02f5efccd66563e0b1c1565e39178f2e9605070ffc2f65b45c19b6029b5b711","src/dxgi.rs":"1f3db576380f824682f7c1a8fc55ce0df277f3201e97dcf59e6d8dbe8ec4f851","src/internal.rs":"eb45d808dda1d94974e45377c0cd84d79b33fb4dd3ef1f83ed647b73908a6613","src/lib.rs":"a2039cb3390079901c2ec391cc47d2b113ad247a9532c579a39053eec6608d25","src/shader.rs":"a6ce3acf4731e99b48209afcc2a5136ce937f967bf5decad0903282e69215594"},"package":"b148219292624126f78245e50a9720d95ea149a415ce8ce73ab7014205301b88"} \ No newline at end of file diff --git a/third_party/rust/gfx-backend-dx11/Cargo.toml b/third_party/rust/gfx-backend-dx11/Cargo.toml index 50d12afdf9a1..cedc5e6b0073 100644 --- a/third_party/rust/gfx-backend-dx11/Cargo.toml +++ b/third_party/rust/gfx-backend-dx11/Cargo.toml @@ -11,8 +11,9 @@ # will likely look very different (and much more reasonable) [package] +edition = "2018" name = "gfx-backend-dx11" -version = "0.4.6" +version = "0.5.0" authors = ["The Gfx-rs Developers"] description = "DirectX-11 API backend for gfx-rs" homepage = "https://github.com/gfx-rs/gfx" @@ -27,14 +28,16 @@ default-target = "x86_64-pc-windows-msvc" [lib] name = "gfx_backend_dx11" [dependencies.auxil] -version = "0.1" +version = "0.3" +features = ["spirv_cross"] package = "gfx-auxil" [dependencies.bitflags] version = "1" -[dependencies.gfx-hal] -version = "0.4" +[dependencies.hal] +version = "0.5" +package = "gfx-hal" [dependencies.libloading] version = "0.5" @@ -52,10 +55,10 @@ version = "0.1" version = "0.3" [dependencies.smallvec] -version = "0.6" +version = "1.0" [dependencies.spirv_cross] -version = "0.16" +version = "0.18" features = ["hlsl"] [dependencies.winapi] diff --git a/third_party/rust/gfx-backend-dx11/README.md b/third_party/rust/gfx-backend-dx11/README.md index 921c43c22c6a..5b65cd56bbfc 100644 --- a/third_party/rust/gfx-backend-dx11/README.md +++ b/third_party/rust/gfx-backend-dx11/README.md @@ -1,13 +1,13 @@ -# gfx_device_dx11 - -[DX11](https://msdn.microsoft.com/en-us/library/windows/desktop/ff476080(v=vs.85).aspx) backend for gfx. - -## Normalized Coordinates - -Render | Depth | Texture --------|-------|-------- -![render_coordinates](../../../info/gl_render_coordinates.png) | ![depth_coordinates](../../../info/dx_depth_coordinates.png) | ![texture_coordinates](../../../info/dx_texture_coordinates.png) - -## Mirroring - -TODO +# gfx_device_dx11 + +[DX11](https://msdn.microsoft.com/en-us/library/windows/desktop/ff476080(v=vs.85).aspx) backend for gfx. + +## Normalized Coordinates + +Render | Depth | Texture +-------|-------|-------- +![render_coordinates](../../../info/gl_render_coordinates.png) | ![depth_coordinates](../../../info/dx_depth_coordinates.png) | ![texture_coordinates](../../../info/dx_texture_coordinates.png) + +## Mirroring + +TODO diff --git a/third_party/rust/gfx-backend-dx11/shaders/blit.hlsl b/third_party/rust/gfx-backend-dx11/shaders/blit.hlsl index f627e5c9b92e..4bdd006abed7 100644 --- a/third_party/rust/gfx-backend-dx11/shaders/blit.hlsl +++ b/third_party/rust/gfx-backend-dx11/shaders/blit.hlsl @@ -1,63 +1,63 @@ -cbuffer Region : register(b0) { - float2 offset; - float2 extent; - float z; - float level; -}; - -struct VsOutput { - float4 pos: SV_POSITION; - float4 uv: TEXCOORD0; -}; - -// Create a screen filling triangle -VsOutput vs_blit_2d(uint id: SV_VertexID) { - float2 coord = float2((id << 1) & 2, id & 2); - VsOutput output = { - float4(float2(-1.0, 1.0) + coord * float2(2.0, -2.0), 0.0, 1.0), - float4(offset + coord * extent, z, level) - }; - return output; -} - -SamplerState BlitSampler : register(s0); - -Texture2DArray BlitSrc_Uint : register(t0); -Texture2DArray BlitSrc_Sint : register(t0); -Texture2DArray BlitSrc_Float : register(t0); - -// TODO: get rid of GetDimensions call -uint4 Nearest_Uint(float4 uv) -{ - float4 size; - BlitSrc_Uint.GetDimensions(0, size.x, size.y, size.z, size.w); - - float2 pix = uv.xy * size.xy; - - return BlitSrc_Uint.Load(int4(int2(pix), uv.zw)); -} - -int4 Nearest_Sint(float4 uv) -{ - float4 size; - BlitSrc_Sint.GetDimensions(0, size.x, size.y, size.z, size.w); - - float2 pix = uv.xy * size.xy; - - return BlitSrc_Sint.Load(int4(int2(pix), uv.zw)); -} - -uint4 ps_blit_2d_uint(VsOutput input) : SV_Target -{ - return Nearest_Uint(input.uv); -} - -int4 ps_blit_2d_int(VsOutput input) : SV_Target -{ - return Nearest_Sint(input.uv); -} - -float4 ps_blit_2d_float(VsOutput input) : SV_Target -{ - return BlitSrc_Float.SampleLevel(BlitSampler, input.uv.xyz, input.uv.w); -} +cbuffer Region : register(b0) { + float2 offset; + float2 extent; + float z; + float level; +}; + +struct VsOutput { + float4 pos: SV_POSITION; + float4 uv: TEXCOORD0; +}; + +// Create a screen filling triangle +VsOutput vs_blit_2d(uint id: SV_VertexID) { + float2 coord = float2((id << 1) & 2, id & 2); + VsOutput output = { + float4(float2(-1.0, 1.0) + coord * float2(2.0, -2.0), 0.0, 1.0), + float4(offset + coord * extent, z, level) + }; + return output; +} + +SamplerState BlitSampler : register(s0); + +Texture2DArray BlitSrc_Uint : register(t0); +Texture2DArray BlitSrc_Sint : register(t0); +Texture2DArray BlitSrc_Float : register(t0); + +// TODO: get rid of GetDimensions call +uint4 Nearest_Uint(float4 uv) +{ + float4 size; + BlitSrc_Uint.GetDimensions(0, size.x, size.y, size.z, size.w); + + float2 pix = uv.xy * size.xy; + + return BlitSrc_Uint.Load(int4(int2(pix), uv.zw)); +} + +int4 Nearest_Sint(float4 uv) +{ + float4 size; + BlitSrc_Sint.GetDimensions(0, size.x, size.y, size.z, size.w); + + float2 pix = uv.xy * size.xy; + + return BlitSrc_Sint.Load(int4(int2(pix), uv.zw)); +} + +uint4 ps_blit_2d_uint(VsOutput input) : SV_Target +{ + return Nearest_Uint(input.uv); +} + +int4 ps_blit_2d_int(VsOutput input) : SV_Target +{ + return Nearest_Sint(input.uv); +} + +float4 ps_blit_2d_float(VsOutput input) : SV_Target +{ + return BlitSrc_Float.SampleLevel(BlitSampler, input.uv.xyz, input.uv.w); +} diff --git a/third_party/rust/gfx-backend-dx11/shaders/clear.hlsl b/third_party/rust/gfx-backend-dx11/shaders/clear.hlsl index 5c91fe6b4900..3f8f3a4e87dd 100644 --- a/third_party/rust/gfx-backend-dx11/shaders/clear.hlsl +++ b/third_party/rust/gfx-backend-dx11/shaders/clear.hlsl @@ -1,22 +1,22 @@ -cbuffer ClearColorF32 : register(b0) { float4 ClearF32; }; -cbuffer ClearColorU32 : register(b0) { uint4 ClearU32; }; -cbuffer ClearColorI32 : register(b0) { int4 ClearI32; }; -cbuffer ClearColorDepth : register(b0) { float ClearDepth; }; - -// fullscreen triangle -float4 vs_partial_clear(uint id : SV_VertexID) : SV_Position -{ - return float4( - float(id / 2) * 4.0 - 1.0, - float(id % 2) * 4.0 - 1.0, - 0.0, - 1.0 - ); -} - -// TODO: send constants through VS as flat attributes -float4 ps_partial_clear_float() : SV_Target0 { return ClearF32; } -uint4 ps_partial_clear_uint() : SV_Target0 { return ClearU32; } -int4 ps_partial_clear_int() : SV_Target0 { return ClearI32; } -float ps_partial_clear_depth() : SV_Depth { return ClearDepth; } -void ps_partial_clear_stencil() { } +cbuffer ClearColorF32 : register(b0) { float4 ClearF32; }; +cbuffer ClearColorU32 : register(b0) { uint4 ClearU32; }; +cbuffer ClearColorI32 : register(b0) { int4 ClearI32; }; +cbuffer ClearColorDepth : register(b0) { float ClearDepth; }; + +// fullscreen triangle +float4 vs_partial_clear(uint id : SV_VertexID) : SV_Position +{ + return float4( + float(id / 2) * 4.0 - 1.0, + float(id % 2) * 4.0 - 1.0, + 0.0, + 1.0 + ); +} + +// TODO: send constants through VS as flat attributes +float4 ps_partial_clear_float() : SV_Target0 { return ClearF32; } +uint4 ps_partial_clear_uint() : SV_Target0 { return ClearU32; } +int4 ps_partial_clear_int() : SV_Target0 { return ClearI32; } +float ps_partial_clear_depth() : SV_Depth { return ClearDepth; } +void ps_partial_clear_stencil() { } diff --git a/third_party/rust/gfx-backend-dx11/shaders/copy.hlsl b/third_party/rust/gfx-backend-dx11/shaders/copy.hlsl index 6034561b1eb7..95dcfc46602f 100644 --- a/third_party/rust/gfx-backend-dx11/shaders/copy.hlsl +++ b/third_party/rust/gfx-backend-dx11/shaders/copy.hlsl @@ -1,517 +1,517 @@ -struct BufferCopy { - uint4 SrcDst; -}; - -struct ImageCopy { - uint4 Src; - uint4 Dst; -}; - -struct BufferImageCopy { - // x=offset, yz=size - uint4 BufferVars; - uint4 ImageOffset; - uint4 ImageExtent; - uint4 ImageSize; -}; - -cbuffer CopyConstants : register(b0) { - BufferCopy BufferCopies; - ImageCopy ImageCopies; - BufferImageCopy BufferImageCopies; -}; - - -uint3 GetDestBounds() -{ - return min( - BufferImageCopies.ImageOffset + BufferImageCopies.ImageExtent, - BufferImageCopies.ImageSize - ); -} - -uint3 GetImageCopyDst(uint3 dispatch_thread_id) -{ - return uint3(ImageCopies.Dst.xy + dispatch_thread_id.xy, ImageCopies.Dst.z); -} - -uint3 GetImageCopySrc(uint3 dispatch_thread_id) -{ - return uint3(ImageCopies.Src.xy + dispatch_thread_id.xy, ImageCopies.Src.z); -} - -uint3 GetImageDst(uint3 dispatch_thread_id) -{ - return uint3(BufferImageCopies.ImageOffset.xy + dispatch_thread_id.xy, BufferImageCopies.ImageOffset.z); -} - -uint3 GetImageSrc(uint3 dispatch_thread_id) -{ - return uint3(BufferImageCopies.ImageOffset.xy + dispatch_thread_id.xy, BufferImageCopies.ImageOffset.z); -} - -uint GetBufferDst128(uint3 dispatch_thread_id) -{ - return BufferImageCopies.BufferVars.x + dispatch_thread_id.x * 16 + dispatch_thread_id.y * 16 * max(BufferImageCopies.BufferVars.y, BufferImageCopies.ImageExtent.x); -} -uint GetBufferSrc128(uint3 dispatch_thread_id) -{ - return BufferImageCopies.BufferVars.x + dispatch_thread_id.x * 16 + dispatch_thread_id.y * 16 * max(BufferImageCopies.BufferVars.y, BufferImageCopies.ImageExtent.x); -} - -uint GetBufferDst64(uint3 dispatch_thread_id) -{ - return BufferImageCopies.BufferVars.x + dispatch_thread_id.x * 8 + dispatch_thread_id.y * 8 * max(BufferImageCopies.BufferVars.y, BufferImageCopies.ImageExtent.x); -} -uint GetBufferSrc64(uint3 dispatch_thread_id) -{ - return BufferImageCopies.BufferVars.x + dispatch_thread_id.x * 8 + dispatch_thread_id.y * 8 * max(BufferImageCopies.BufferVars.y, BufferImageCopies.ImageExtent.x); -} - -uint GetBufferDst32(uint3 dispatch_thread_id) -{ - return BufferImageCopies.BufferVars.x + dispatch_thread_id.x * 4 + dispatch_thread_id.y * 4 * max(BufferImageCopies.BufferVars.y, BufferImageCopies.ImageExtent.x); -} -uint GetBufferSrc32(uint3 dispatch_thread_id) -{ - return BufferImageCopies.BufferVars.x + dispatch_thread_id.x * 4 + dispatch_thread_id.y * 4 * max(BufferImageCopies.BufferVars.y, BufferImageCopies.ImageExtent.x); -} - -uint GetBufferDst16(uint3 dispatch_thread_id) -{ - return BufferImageCopies.BufferVars.x + dispatch_thread_id.x * 4 + dispatch_thread_id.y * 2 * max(BufferImageCopies.BufferVars.y, BufferImageCopies.ImageExtent.x); -} -uint GetBufferSrc16(uint3 dispatch_thread_id) -{ - return BufferImageCopies.BufferVars.x + dispatch_thread_id.x * 4 + dispatch_thread_id.y * 2 * max(BufferImageCopies.BufferVars.y, BufferImageCopies.ImageExtent.x); -} - -uint GetBufferDst8(uint3 dispatch_thread_id) -{ - return BufferImageCopies.BufferVars.x + dispatch_thread_id.x * 4 + dispatch_thread_id.y * max(BufferImageCopies.BufferVars.y, BufferImageCopies.ImageExtent.x); -} -uint GetBufferSrc8(uint3 dispatch_thread_id) -{ - return BufferImageCopies.BufferVars.x + dispatch_thread_id.x * 4 + dispatch_thread_id.y * max(BufferImageCopies.BufferVars.y, BufferImageCopies.ImageExtent.x); -} - - -uint4 Uint32ToUint8x4(uint data) -{ - return (data >> uint4(0, 8, 16, 24)) & 0xFF; -} - -uint2 Uint32ToUint16x2(uint data) -{ - return (data >> uint2(0, 16)) & 0xFFFF; -} - -uint Uint8x4ToUint32(uint4 data) -{ - return dot(min(data, 0xFF), 1 << uint4(0, 8, 16, 24)); -} - -uint Uint16x2ToUint32(uint2 data) -{ - return dot(min(data, 0xFFFF), 1 << uint2(0, 16)); -} - -uint2 Uint16ToUint8x2(uint data) -{ - return (data >> uint2(0, 8)) & 0xFF; -} - -uint Uint8x2ToUint16(uint2 data) -{ - return dot(min(data, 0xFF), 1 << uint2(0, 8)); -} - -uint4 Float4ToUint8x4(float4 data) -{ - return uint4(data * 255 + .5f); -} - -// Buffers are always R32-aligned -ByteAddressBuffer BufferCopySrc : register(t0); -RWByteAddressBuffer BufferCopyDst : register(u0); - -Texture2DArray ImageCopySrc : register(t0); -RWTexture2DArray ImageCopyDstR : register(u0); -RWTexture2DArray ImageCopyDstRg : register(u0); -RWTexture2DArray ImageCopyDstRgba : register(u0); - -Texture2DArray ImageCopySrcBgra : register(t0); - -// Image<->Image copies -[numthreads(1, 1, 1)] -void cs_copy_image2d_r8g8_image2d_r16(uint3 dispatch_thread_id : SV_DispatchThreadID) -{ - uint3 dst_idx = GetImageCopyDst(dispatch_thread_id); - uint3 src_idx = GetImageCopySrc(dispatch_thread_id); - - ImageCopyDstR[dst_idx] = Uint8x2ToUint16(ImageCopySrc[src_idx]); -} - -[numthreads(1, 1, 1)] -void cs_copy_image2d_r16_image2d_r8g8(uint3 dispatch_thread_id : SV_DispatchThreadID) -{ - uint3 dst_idx = GetImageCopyDst(dispatch_thread_id); - uint3 src_idx = GetImageCopySrc(dispatch_thread_id); - - ImageCopyDstRg[dst_idx] = Uint16ToUint8x2(ImageCopySrc[src_idx]); -} - -[numthreads(1, 1, 1)] -void cs_copy_image2d_r8g8b8a8_image2d_r32(uint3 dispatch_thread_id : SV_DispatchThreadID) -{ - uint3 dst_idx = GetImageCopyDst(dispatch_thread_id); - uint3 src_idx = GetImageCopySrc(dispatch_thread_id); - - ImageCopyDstR[dst_idx] = Uint8x4ToUint32(ImageCopySrc[src_idx]); -} - -[numthreads(1, 1, 1)] -void cs_copy_image2d_r8g8b8a8_image2d_r16g16(uint3 dispatch_thread_id : SV_DispatchThreadID) -{ - uint3 dst_idx = GetImageCopyDst(dispatch_thread_id); - uint3 src_idx = GetImageCopySrc(dispatch_thread_id); - - ImageCopyDstRg[dst_idx] = Uint32ToUint16x2(Uint8x4ToUint32(ImageCopySrc[src_idx])); -} - -[numthreads(1, 1, 1)] -void cs_copy_image2d_r16g16_image2d_r32(uint3 dispatch_thread_id : SV_DispatchThreadID) -{ - uint3 dst_idx = GetImageCopyDst(dispatch_thread_id); - uint3 src_idx = GetImageCopySrc(dispatch_thread_id); - - ImageCopyDstR[dst_idx] = Uint16x2ToUint32(ImageCopySrc[src_idx]); -} - -[numthreads(1, 1, 1)] -void cs_copy_image2d_r16g16_image2d_r8g8b8a8(uint3 dispatch_thread_id : SV_DispatchThreadID) -{ - uint3 dst_idx = GetImageCopyDst(dispatch_thread_id); - uint3 src_idx = GetImageCopySrc(dispatch_thread_id); - - ImageCopyDstRgba[dst_idx] = Uint32ToUint8x4(Uint16x2ToUint32(ImageCopySrc[src_idx])); -} - -[numthreads(1, 1, 1)] -void cs_copy_image2d_r32_image2d_r16g16(uint3 dispatch_thread_id : SV_DispatchThreadID) -{ - uint3 dst_idx = GetImageCopyDst(dispatch_thread_id); - uint3 src_idx = GetImageCopySrc(dispatch_thread_id); - - ImageCopyDstRg[dst_idx] = Uint32ToUint16x2(ImageCopySrc[src_idx]); -} - -[numthreads(1, 1, 1)] -void cs_copy_image2d_r32_image2d_r8g8b8a8(uint3 dispatch_thread_id : SV_DispatchThreadID) -{ - uint3 dst_idx = GetImageCopyDst(dispatch_thread_id); - uint3 src_idx = GetImageCopySrc(dispatch_thread_id); - - ImageCopyDstRgba[dst_idx] = Uint32ToUint8x4(ImageCopySrc[src_idx]); -} - -#define COPY_NUM_THREAD_X 8 -#define COPY_NUM_THREAD_Y 8 - -// Buffer<->Image copies - -// R32G32B32A32 -[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] -void cs_copy_buffer_image2d_r32g32b32a32(uint3 dispatch_thread_id : SV_DispatchThreadID) { - uint3 dst_idx = GetImageDst(dispatch_thread_id); - uint3 bounds = GetDestBounds(); - if (dst_idx.x >= bounds.x || dst_idx.y >= bounds.y) { - return; - } - - uint src_idx = GetBufferSrc128(dispatch_thread_id); - - ImageCopyDstRgba[dst_idx] = uint4( - BufferCopySrc.Load(src_idx), - BufferCopySrc.Load(src_idx + 1 * 4), - BufferCopySrc.Load(src_idx + 2 * 4), - BufferCopySrc.Load(src_idx + 3 * 4) - ); -} - -[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] -void cs_copy_image2d_r32g32b32a32_buffer(uint3 dispatch_thread_id : SV_DispatchThreadID) { - uint3 src_idx = GetImageSrc(dispatch_thread_id); - uint3 bounds = GetDestBounds(); - if (src_idx.x >= bounds.x || src_idx.y >= bounds.y) { - return; - } - - uint4 data = ImageCopySrc[src_idx]; - uint dst_idx = GetBufferDst128(dispatch_thread_id); - - BufferCopyDst.Store(dst_idx, data.x); - BufferCopyDst.Store(dst_idx + 1 * 4, data.y); - BufferCopyDst.Store(dst_idx + 2 * 4, data.z); - BufferCopyDst.Store(dst_idx + 3 * 4, data.w); -} - -// R32G32 -[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] -void cs_copy_buffer_image2d_r32g32(uint3 dispatch_thread_id : SV_DispatchThreadID) { - uint3 dst_idx = GetImageDst(dispatch_thread_id); - uint3 bounds = GetDestBounds(); - if (dst_idx.x >= bounds.x || dst_idx.y >= bounds.y) { - return; - } - - uint src_idx = GetBufferSrc64(dispatch_thread_id); - - ImageCopyDstRg[dst_idx] = uint2( - BufferCopySrc.Load(src_idx), - BufferCopySrc.Load(src_idx + 1 * 4) - ); -} - -[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] -void cs_copy_image2d_r32g32_buffer(uint3 dispatch_thread_id : SV_DispatchThreadID) { - uint3 src_idx = GetImageSrc(dispatch_thread_id); - uint3 bounds = GetDestBounds(); - if (src_idx.x >= bounds.x || src_idx.y >= bounds.y) { - return; - } - - uint2 data = ImageCopySrc[src_idx].rg; - uint dst_idx = GetBufferDst64(dispatch_thread_id); - - BufferCopyDst.Store(dst_idx , data.x); - BufferCopyDst.Store(dst_idx + 1 * 4, data.y); -} - -// R16G16B16A16 -[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] -void cs_copy_buffer_image2d_r16g16b16a16(uint3 dispatch_thread_id : SV_DispatchThreadID) { - uint3 dst_idx = GetImageDst(dispatch_thread_id); - uint3 bounds = GetDestBounds(); - if (dst_idx.x >= bounds.x || dst_idx.y >= bounds.y) { - return; - } - - uint src_idx = GetBufferSrc64(dispatch_thread_id); - - ImageCopyDstRgba[dst_idx] = uint4( - Uint32ToUint16x2(BufferCopySrc.Load(src_idx)), - Uint32ToUint16x2(BufferCopySrc.Load(src_idx + 1 * 4)) - ); -} - -[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] -void cs_copy_image2d_r16g16b16a16_buffer(uint3 dispatch_thread_id : SV_DispatchThreadID) { - uint3 src_idx = GetImageSrc(dispatch_thread_id); - uint3 bounds = GetDestBounds(); - if (src_idx.x >= bounds.x || src_idx.y >= bounds.y) { - return; - } - - uint4 data = ImageCopySrc[src_idx]; - uint dst_idx = GetBufferDst64(dispatch_thread_id); - - BufferCopyDst.Store(dst_idx, Uint16x2ToUint32(data.xy)); - BufferCopyDst.Store(dst_idx + 1 * 4, Uint16x2ToUint32(data.zw)); -} - -// R32 -[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] -void cs_copy_buffer_image2d_r32(uint3 dispatch_thread_id : SV_DispatchThreadID) { - uint3 dst_idx = GetImageDst(dispatch_thread_id); - uint3 bounds = GetDestBounds(); - if (dst_idx.x >= bounds.x || dst_idx.y >= bounds.y) { - return; - } - - uint src_idx = GetBufferSrc32(dispatch_thread_id); - - ImageCopyDstR[dst_idx] = BufferCopySrc.Load(src_idx); -} - -[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] -void cs_copy_image2d_r32_buffer(uint3 dispatch_thread_id : SV_DispatchThreadID) { - uint3 src_idx = GetImageSrc(dispatch_thread_id); - uint3 bounds = GetDestBounds(); - if (src_idx.x >= bounds.x || src_idx.y >= bounds.y) { - return; - } - - uint dst_idx = GetBufferDst32(dispatch_thread_id); - - BufferCopyDst.Store(dst_idx, ImageCopySrc[src_idx].r); -} - -// R16G16 -[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] -void cs_copy_buffer_image2d_r16g16(uint3 dispatch_thread_id : SV_DispatchThreadID) { - uint3 dst_idx = GetImageDst(dispatch_thread_id); - uint3 bounds = GetDestBounds(); - if (dst_idx.x >= bounds.x || dst_idx.y >= bounds.y) { - return; - } - - uint src_idx = GetBufferSrc32(dispatch_thread_id); - - ImageCopyDstRg[dst_idx] = Uint32ToUint16x2(BufferCopySrc.Load(src_idx)); -} - -[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] -void cs_copy_image2d_r16g16_buffer(uint3 dispatch_thread_id : SV_DispatchThreadID) { - uint3 src_idx = GetImageSrc(dispatch_thread_id); - uint3 bounds = GetDestBounds(); - if (src_idx.x >= bounds.x || src_idx.y >= bounds.y) { - return; - } - - uint dst_idx = GetBufferDst32(dispatch_thread_id); - - BufferCopyDst.Store(dst_idx, Uint16x2ToUint32(ImageCopySrc[src_idx].xy)); -} - -// R8G8B8A8 -[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] -void cs_copy_buffer_image2d_r8g8b8a8(uint3 dispatch_thread_id : SV_DispatchThreadID) { - uint3 dst_idx = GetImageDst(dispatch_thread_id); - uint3 bounds = GetDestBounds(); - if (dst_idx.x >= bounds.x || dst_idx.y >= bounds.y) { - return; - } - - uint src_idx = GetBufferSrc32(dispatch_thread_id); - - ImageCopyDstRgba[dst_idx] = Uint32ToUint8x4(BufferCopySrc.Load(src_idx)); -} - -[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] -void cs_copy_image2d_r8g8b8a8_buffer(uint3 dispatch_thread_id : SV_DispatchThreadID) { - uint3 src_idx = GetImageSrc(dispatch_thread_id); - uint3 bounds = GetDestBounds(); - if (src_idx.x >= bounds.x || src_idx.y >= bounds.y) { - return; - } - - uint dst_idx = GetBufferDst32(dispatch_thread_id); - - BufferCopyDst.Store(dst_idx, Uint8x4ToUint32(ImageCopySrc[src_idx])); -} - -// B8G8R8A8 -[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] -void cs_copy_image2d_b8g8r8a8_buffer(uint3 dispatch_thread_id : SV_DispatchThreadID) { - uint3 src_idx = GetImageSrc(dispatch_thread_id); - uint3 bounds = GetDestBounds(); - if (src_idx.x >= bounds.x || src_idx.y >= bounds.y) { - return; - } - - uint dst_idx = GetBufferDst32(dispatch_thread_id); - - BufferCopyDst.Store(dst_idx, Uint8x4ToUint32(Float4ToUint8x4(ImageCopySrcBgra[src_idx].bgra))); -} - -// R16 -[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] -void cs_copy_buffer_image2d_r16(uint3 dispatch_thread_id : SV_DispatchThreadID) { - uint3 dst_idx = GetImageDst(uint3(2, 1, 0) * dispatch_thread_id); - uint3 bounds = GetDestBounds(); - if (dst_idx.x >= bounds.x || dst_idx.y >= bounds.y) { - return; - } - - uint src_idx = GetBufferSrc16(dispatch_thread_id); - uint2 data = Uint32ToUint16x2(BufferCopySrc.Load(src_idx)); - - ImageCopyDstR[dst_idx ] = data.x; - ImageCopyDstR[dst_idx + uint3(1, 0, 0)] = data.y; -} - -[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] -void cs_copy_image2d_r16_buffer(uint3 dispatch_thread_id : SV_DispatchThreadID) { - uint3 src_idx = GetImageSrc(uint3(2, 1, 0) * dispatch_thread_id); - uint3 bounds = GetDestBounds(); - if (src_idx.x >= bounds.x || src_idx.y >= bounds.y) { - return; - } - - uint dst_idx = GetBufferDst16(dispatch_thread_id); - - uint upper = ImageCopySrc[src_idx].r; - uint lower = ImageCopySrc[src_idx + uint3(1, 0, 0)].r; - - BufferCopyDst.Store(dst_idx, Uint16x2ToUint32(uint2(upper, lower))); -} - -// R8G8 -[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] -void cs_copy_buffer_image2d_r8g8(uint3 dispatch_thread_id : SV_DispatchThreadID) { - uint3 dst_idx = GetImageDst(uint3(2, 1, 0) * dispatch_thread_id); - uint3 bounds = GetDestBounds(); - if (dst_idx.x >= bounds.x || dst_idx.y >= bounds.y) { - return; - } - - uint src_idx = GetBufferSrc16(dispatch_thread_id); - - uint4 data = Uint32ToUint8x4(BufferCopySrc.Load(src_idx)); - - ImageCopyDstRg[dst_idx ] = data.xy; - ImageCopyDstRg[dst_idx + uint3(1, 0, 0)] = data.zw; -} - -[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] -void cs_copy_image2d_r8g8_buffer(uint3 dispatch_thread_id : SV_DispatchThreadID) { - uint3 src_idx = GetImageSrc(uint3(2, 1, 0) * dispatch_thread_id); - uint3 bounds = GetDestBounds(); - if (src_idx.x >= bounds.x || src_idx.y >= bounds.y) { - return; - } - - uint dst_idx = GetBufferDst16(dispatch_thread_id); - - uint2 lower = ImageCopySrc[src_idx].xy; - uint2 upper = ImageCopySrc[src_idx + uint3(1, 0, 0)].xy; - - BufferCopyDst.Store(dst_idx, Uint8x4ToUint32(uint4(lower.x, lower.y, upper.x, upper.y))); -} - -// R8 -[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] -void cs_copy_buffer_image2d_r8(uint3 dispatch_thread_id : SV_DispatchThreadID) { - uint3 dst_idx = GetImageDst(uint3(4, 1, 0) * dispatch_thread_id); - uint3 bounds = GetDestBounds(); - if (dst_idx.x >= bounds.x || dst_idx.y >= bounds.y) { - return; - } - - uint src_idx = GetBufferSrc8(dispatch_thread_id); - uint4 data = Uint32ToUint8x4(BufferCopySrc.Load(src_idx)); - - ImageCopyDstR[dst_idx ] = data.x; - ImageCopyDstR[dst_idx + uint3(1, 0, 0)] = data.y; - ImageCopyDstR[dst_idx + uint3(2, 0, 0)] = data.z; - ImageCopyDstR[dst_idx + uint3(3, 0, 0)] = data.w; -} - -[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] -void cs_copy_image2d_r8_buffer(uint3 dispatch_thread_id : SV_DispatchThreadID) { - uint3 src_idx = GetImageSrc(uint3(4, 1, 0) * dispatch_thread_id); - uint3 bounds = GetDestBounds(); - if (src_idx.x >= bounds.x || src_idx.y >= bounds.y) { - return; - } - - uint dst_idx = GetBufferDst8(dispatch_thread_id); - - BufferCopyDst.Store(dst_idx, Uint8x4ToUint32(uint4( - ImageCopySrc[src_idx].r, - ImageCopySrc[src_idx + uint3(1, 0, 0)].r, - ImageCopySrc[src_idx + uint3(2, 0, 0)].r, - ImageCopySrc[src_idx + uint3(3, 0, 0)].r - ))); -} +struct BufferCopy { + uint4 SrcDst; +}; + +struct ImageCopy { + uint4 Src; + uint4 Dst; +}; + +struct BufferImageCopy { + // x=offset, yz=size + uint4 BufferVars; + uint4 ImageOffset; + uint4 ImageExtent; + uint4 ImageSize; +}; + +cbuffer CopyConstants : register(b0) { + BufferCopy BufferCopies; + ImageCopy ImageCopies; + BufferImageCopy BufferImageCopies; +}; + + +uint3 GetDestBounds() +{ + return min( + BufferImageCopies.ImageOffset + BufferImageCopies.ImageExtent, + BufferImageCopies.ImageSize + ); +} + +uint3 GetImageCopyDst(uint3 dispatch_thread_id) +{ + return uint3(ImageCopies.Dst.xy + dispatch_thread_id.xy, ImageCopies.Dst.z); +} + +uint3 GetImageCopySrc(uint3 dispatch_thread_id) +{ + return uint3(ImageCopies.Src.xy + dispatch_thread_id.xy, ImageCopies.Src.z); +} + +uint3 GetImageDst(uint3 dispatch_thread_id) +{ + return uint3(BufferImageCopies.ImageOffset.xy + dispatch_thread_id.xy, BufferImageCopies.ImageOffset.z); +} + +uint3 GetImageSrc(uint3 dispatch_thread_id) +{ + return uint3(BufferImageCopies.ImageOffset.xy + dispatch_thread_id.xy, BufferImageCopies.ImageOffset.z); +} + +uint GetBufferDst128(uint3 dispatch_thread_id) +{ + return BufferImageCopies.BufferVars.x + dispatch_thread_id.x * 16 + dispatch_thread_id.y * 16 * max(BufferImageCopies.BufferVars.y, BufferImageCopies.ImageExtent.x); +} +uint GetBufferSrc128(uint3 dispatch_thread_id) +{ + return BufferImageCopies.BufferVars.x + dispatch_thread_id.x * 16 + dispatch_thread_id.y * 16 * max(BufferImageCopies.BufferVars.y, BufferImageCopies.ImageExtent.x); +} + +uint GetBufferDst64(uint3 dispatch_thread_id) +{ + return BufferImageCopies.BufferVars.x + dispatch_thread_id.x * 8 + dispatch_thread_id.y * 8 * max(BufferImageCopies.BufferVars.y, BufferImageCopies.ImageExtent.x); +} +uint GetBufferSrc64(uint3 dispatch_thread_id) +{ + return BufferImageCopies.BufferVars.x + dispatch_thread_id.x * 8 + dispatch_thread_id.y * 8 * max(BufferImageCopies.BufferVars.y, BufferImageCopies.ImageExtent.x); +} + +uint GetBufferDst32(uint3 dispatch_thread_id) +{ + return BufferImageCopies.BufferVars.x + dispatch_thread_id.x * 4 + dispatch_thread_id.y * 4 * max(BufferImageCopies.BufferVars.y, BufferImageCopies.ImageExtent.x); +} +uint GetBufferSrc32(uint3 dispatch_thread_id) +{ + return BufferImageCopies.BufferVars.x + dispatch_thread_id.x * 4 + dispatch_thread_id.y * 4 * max(BufferImageCopies.BufferVars.y, BufferImageCopies.ImageExtent.x); +} + +uint GetBufferDst16(uint3 dispatch_thread_id) +{ + return BufferImageCopies.BufferVars.x + dispatch_thread_id.x * 4 + dispatch_thread_id.y * 2 * max(BufferImageCopies.BufferVars.y, BufferImageCopies.ImageExtent.x); +} +uint GetBufferSrc16(uint3 dispatch_thread_id) +{ + return BufferImageCopies.BufferVars.x + dispatch_thread_id.x * 4 + dispatch_thread_id.y * 2 * max(BufferImageCopies.BufferVars.y, BufferImageCopies.ImageExtent.x); +} + +uint GetBufferDst8(uint3 dispatch_thread_id) +{ + return BufferImageCopies.BufferVars.x + dispatch_thread_id.x * 4 + dispatch_thread_id.y * max(BufferImageCopies.BufferVars.y, BufferImageCopies.ImageExtent.x); +} +uint GetBufferSrc8(uint3 dispatch_thread_id) +{ + return BufferImageCopies.BufferVars.x + dispatch_thread_id.x * 4 + dispatch_thread_id.y * max(BufferImageCopies.BufferVars.y, BufferImageCopies.ImageExtent.x); +} + + +uint4 Uint32ToUint8x4(uint data) +{ + return (data >> uint4(0, 8, 16, 24)) & 0xFF; +} + +uint2 Uint32ToUint16x2(uint data) +{ + return (data >> uint2(0, 16)) & 0xFFFF; +} + +uint Uint8x4ToUint32(uint4 data) +{ + return dot(min(data, 0xFF), 1 << uint4(0, 8, 16, 24)); +} + +uint Uint16x2ToUint32(uint2 data) +{ + return dot(min(data, 0xFFFF), 1 << uint2(0, 16)); +} + +uint2 Uint16ToUint8x2(uint data) +{ + return (data >> uint2(0, 8)) & 0xFF; +} + +uint Uint8x2ToUint16(uint2 data) +{ + return dot(min(data, 0xFF), 1 << uint2(0, 8)); +} + +uint4 Float4ToUint8x4(float4 data) +{ + return uint4(data * 255 + .5f); +} + +// Buffers are always R32-aligned +ByteAddressBuffer BufferCopySrc : register(t0); +RWByteAddressBuffer BufferCopyDst : register(u0); + +Texture2DArray ImageCopySrc : register(t0); +RWTexture2DArray ImageCopyDstR : register(u0); +RWTexture2DArray ImageCopyDstRg : register(u0); +RWTexture2DArray ImageCopyDstRgba : register(u0); + +Texture2DArray ImageCopySrcBgra : register(t0); + +// Image<->Image copies +[numthreads(1, 1, 1)] +void cs_copy_image2d_r8g8_image2d_r16(uint3 dispatch_thread_id : SV_DispatchThreadID) +{ + uint3 dst_idx = GetImageCopyDst(dispatch_thread_id); + uint3 src_idx = GetImageCopySrc(dispatch_thread_id); + + ImageCopyDstR[dst_idx] = Uint8x2ToUint16(ImageCopySrc[src_idx]); +} + +[numthreads(1, 1, 1)] +void cs_copy_image2d_r16_image2d_r8g8(uint3 dispatch_thread_id : SV_DispatchThreadID) +{ + uint3 dst_idx = GetImageCopyDst(dispatch_thread_id); + uint3 src_idx = GetImageCopySrc(dispatch_thread_id); + + ImageCopyDstRg[dst_idx] = Uint16ToUint8x2(ImageCopySrc[src_idx]); +} + +[numthreads(1, 1, 1)] +void cs_copy_image2d_r8g8b8a8_image2d_r32(uint3 dispatch_thread_id : SV_DispatchThreadID) +{ + uint3 dst_idx = GetImageCopyDst(dispatch_thread_id); + uint3 src_idx = GetImageCopySrc(dispatch_thread_id); + + ImageCopyDstR[dst_idx] = Uint8x4ToUint32(ImageCopySrc[src_idx]); +} + +[numthreads(1, 1, 1)] +void cs_copy_image2d_r8g8b8a8_image2d_r16g16(uint3 dispatch_thread_id : SV_DispatchThreadID) +{ + uint3 dst_idx = GetImageCopyDst(dispatch_thread_id); + uint3 src_idx = GetImageCopySrc(dispatch_thread_id); + + ImageCopyDstRg[dst_idx] = Uint32ToUint16x2(Uint8x4ToUint32(ImageCopySrc[src_idx])); +} + +[numthreads(1, 1, 1)] +void cs_copy_image2d_r16g16_image2d_r32(uint3 dispatch_thread_id : SV_DispatchThreadID) +{ + uint3 dst_idx = GetImageCopyDst(dispatch_thread_id); + uint3 src_idx = GetImageCopySrc(dispatch_thread_id); + + ImageCopyDstR[dst_idx] = Uint16x2ToUint32(ImageCopySrc[src_idx]); +} + +[numthreads(1, 1, 1)] +void cs_copy_image2d_r16g16_image2d_r8g8b8a8(uint3 dispatch_thread_id : SV_DispatchThreadID) +{ + uint3 dst_idx = GetImageCopyDst(dispatch_thread_id); + uint3 src_idx = GetImageCopySrc(dispatch_thread_id); + + ImageCopyDstRgba[dst_idx] = Uint32ToUint8x4(Uint16x2ToUint32(ImageCopySrc[src_idx])); +} + +[numthreads(1, 1, 1)] +void cs_copy_image2d_r32_image2d_r16g16(uint3 dispatch_thread_id : SV_DispatchThreadID) +{ + uint3 dst_idx = GetImageCopyDst(dispatch_thread_id); + uint3 src_idx = GetImageCopySrc(dispatch_thread_id); + + ImageCopyDstRg[dst_idx] = Uint32ToUint16x2(ImageCopySrc[src_idx]); +} + +[numthreads(1, 1, 1)] +void cs_copy_image2d_r32_image2d_r8g8b8a8(uint3 dispatch_thread_id : SV_DispatchThreadID) +{ + uint3 dst_idx = GetImageCopyDst(dispatch_thread_id); + uint3 src_idx = GetImageCopySrc(dispatch_thread_id); + + ImageCopyDstRgba[dst_idx] = Uint32ToUint8x4(ImageCopySrc[src_idx]); +} + +#define COPY_NUM_THREAD_X 8 +#define COPY_NUM_THREAD_Y 8 + +// Buffer<->Image copies + +// R32G32B32A32 +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_buffer_image2d_r32g32b32a32(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 dst_idx = GetImageDst(dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (dst_idx.x >= bounds.x || dst_idx.y >= bounds.y) { + return; + } + + uint src_idx = GetBufferSrc128(dispatch_thread_id); + + ImageCopyDstRgba[dst_idx] = uint4( + BufferCopySrc.Load(src_idx), + BufferCopySrc.Load(src_idx + 1 * 4), + BufferCopySrc.Load(src_idx + 2 * 4), + BufferCopySrc.Load(src_idx + 3 * 4) + ); +} + +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_image2d_r32g32b32a32_buffer(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 src_idx = GetImageSrc(dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (src_idx.x >= bounds.x || src_idx.y >= bounds.y) { + return; + } + + uint4 data = ImageCopySrc[src_idx]; + uint dst_idx = GetBufferDst128(dispatch_thread_id); + + BufferCopyDst.Store(dst_idx, data.x); + BufferCopyDst.Store(dst_idx + 1 * 4, data.y); + BufferCopyDst.Store(dst_idx + 2 * 4, data.z); + BufferCopyDst.Store(dst_idx + 3 * 4, data.w); +} + +// R32G32 +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_buffer_image2d_r32g32(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 dst_idx = GetImageDst(dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (dst_idx.x >= bounds.x || dst_idx.y >= bounds.y) { + return; + } + + uint src_idx = GetBufferSrc64(dispatch_thread_id); + + ImageCopyDstRg[dst_idx] = uint2( + BufferCopySrc.Load(src_idx), + BufferCopySrc.Load(src_idx + 1 * 4) + ); +} + +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_image2d_r32g32_buffer(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 src_idx = GetImageSrc(dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (src_idx.x >= bounds.x || src_idx.y >= bounds.y) { + return; + } + + uint2 data = ImageCopySrc[src_idx].rg; + uint dst_idx = GetBufferDst64(dispatch_thread_id); + + BufferCopyDst.Store(dst_idx , data.x); + BufferCopyDst.Store(dst_idx + 1 * 4, data.y); +} + +// R16G16B16A16 +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_buffer_image2d_r16g16b16a16(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 dst_idx = GetImageDst(dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (dst_idx.x >= bounds.x || dst_idx.y >= bounds.y) { + return; + } + + uint src_idx = GetBufferSrc64(dispatch_thread_id); + + ImageCopyDstRgba[dst_idx] = uint4( + Uint32ToUint16x2(BufferCopySrc.Load(src_idx)), + Uint32ToUint16x2(BufferCopySrc.Load(src_idx + 1 * 4)) + ); +} + +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_image2d_r16g16b16a16_buffer(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 src_idx = GetImageSrc(dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (src_idx.x >= bounds.x || src_idx.y >= bounds.y) { + return; + } + + uint4 data = ImageCopySrc[src_idx]; + uint dst_idx = GetBufferDst64(dispatch_thread_id); + + BufferCopyDst.Store(dst_idx, Uint16x2ToUint32(data.xy)); + BufferCopyDst.Store(dst_idx + 1 * 4, Uint16x2ToUint32(data.zw)); +} + +// R32 +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_buffer_image2d_r32(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 dst_idx = GetImageDst(dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (dst_idx.x >= bounds.x || dst_idx.y >= bounds.y) { + return; + } + + uint src_idx = GetBufferSrc32(dispatch_thread_id); + + ImageCopyDstR[dst_idx] = BufferCopySrc.Load(src_idx); +} + +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_image2d_r32_buffer(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 src_idx = GetImageSrc(dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (src_idx.x >= bounds.x || src_idx.y >= bounds.y) { + return; + } + + uint dst_idx = GetBufferDst32(dispatch_thread_id); + + BufferCopyDst.Store(dst_idx, ImageCopySrc[src_idx].r); +} + +// R16G16 +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_buffer_image2d_r16g16(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 dst_idx = GetImageDst(dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (dst_idx.x >= bounds.x || dst_idx.y >= bounds.y) { + return; + } + + uint src_idx = GetBufferSrc32(dispatch_thread_id); + + ImageCopyDstRg[dst_idx] = Uint32ToUint16x2(BufferCopySrc.Load(src_idx)); +} + +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_image2d_r16g16_buffer(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 src_idx = GetImageSrc(dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (src_idx.x >= bounds.x || src_idx.y >= bounds.y) { + return; + } + + uint dst_idx = GetBufferDst32(dispatch_thread_id); + + BufferCopyDst.Store(dst_idx, Uint16x2ToUint32(ImageCopySrc[src_idx].xy)); +} + +// R8G8B8A8 +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_buffer_image2d_r8g8b8a8(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 dst_idx = GetImageDst(dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (dst_idx.x >= bounds.x || dst_idx.y >= bounds.y) { + return; + } + + uint src_idx = GetBufferSrc32(dispatch_thread_id); + + ImageCopyDstRgba[dst_idx] = Uint32ToUint8x4(BufferCopySrc.Load(src_idx)); +} + +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_image2d_r8g8b8a8_buffer(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 src_idx = GetImageSrc(dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (src_idx.x >= bounds.x || src_idx.y >= bounds.y) { + return; + } + + uint dst_idx = GetBufferDst32(dispatch_thread_id); + + BufferCopyDst.Store(dst_idx, Uint8x4ToUint32(ImageCopySrc[src_idx])); +} + +// B8G8R8A8 +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_image2d_b8g8r8a8_buffer(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 src_idx = GetImageSrc(dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (src_idx.x >= bounds.x || src_idx.y >= bounds.y) { + return; + } + + uint dst_idx = GetBufferDst32(dispatch_thread_id); + + BufferCopyDst.Store(dst_idx, Uint8x4ToUint32(Float4ToUint8x4(ImageCopySrcBgra[src_idx].bgra))); +} + +// R16 +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_buffer_image2d_r16(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 dst_idx = GetImageDst(uint3(2, 1, 0) * dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (dst_idx.x >= bounds.x || dst_idx.y >= bounds.y) { + return; + } + + uint src_idx = GetBufferSrc16(dispatch_thread_id); + uint2 data = Uint32ToUint16x2(BufferCopySrc.Load(src_idx)); + + ImageCopyDstR[dst_idx ] = data.x; + ImageCopyDstR[dst_idx + uint3(1, 0, 0)] = data.y; +} + +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_image2d_r16_buffer(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 src_idx = GetImageSrc(uint3(2, 1, 0) * dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (src_idx.x >= bounds.x || src_idx.y >= bounds.y) { + return; + } + + uint dst_idx = GetBufferDst16(dispatch_thread_id); + + uint upper = ImageCopySrc[src_idx].r; + uint lower = ImageCopySrc[src_idx + uint3(1, 0, 0)].r; + + BufferCopyDst.Store(dst_idx, Uint16x2ToUint32(uint2(upper, lower))); +} + +// R8G8 +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_buffer_image2d_r8g8(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 dst_idx = GetImageDst(uint3(2, 1, 0) * dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (dst_idx.x >= bounds.x || dst_idx.y >= bounds.y) { + return; + } + + uint src_idx = GetBufferSrc16(dispatch_thread_id); + + uint4 data = Uint32ToUint8x4(BufferCopySrc.Load(src_idx)); + + ImageCopyDstRg[dst_idx ] = data.xy; + ImageCopyDstRg[dst_idx + uint3(1, 0, 0)] = data.zw; +} + +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_image2d_r8g8_buffer(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 src_idx = GetImageSrc(uint3(2, 1, 0) * dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (src_idx.x >= bounds.x || src_idx.y >= bounds.y) { + return; + } + + uint dst_idx = GetBufferDst16(dispatch_thread_id); + + uint2 lower = ImageCopySrc[src_idx].xy; + uint2 upper = ImageCopySrc[src_idx + uint3(1, 0, 0)].xy; + + BufferCopyDst.Store(dst_idx, Uint8x4ToUint32(uint4(lower.x, lower.y, upper.x, upper.y))); +} + +// R8 +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_buffer_image2d_r8(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 dst_idx = GetImageDst(uint3(4, 1, 0) * dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (dst_idx.x >= bounds.x || dst_idx.y >= bounds.y) { + return; + } + + uint src_idx = GetBufferSrc8(dispatch_thread_id); + uint4 data = Uint32ToUint8x4(BufferCopySrc.Load(src_idx)); + + ImageCopyDstR[dst_idx ] = data.x; + ImageCopyDstR[dst_idx + uint3(1, 0, 0)] = data.y; + ImageCopyDstR[dst_idx + uint3(2, 0, 0)] = data.z; + ImageCopyDstR[dst_idx + uint3(3, 0, 0)] = data.w; +} + +[numthreads(COPY_NUM_THREAD_X, COPY_NUM_THREAD_Y, 1)] +void cs_copy_image2d_r8_buffer(uint3 dispatch_thread_id : SV_DispatchThreadID) { + uint3 src_idx = GetImageSrc(uint3(4, 1, 0) * dispatch_thread_id); + uint3 bounds = GetDestBounds(); + if (src_idx.x >= bounds.x || src_idx.y >= bounds.y) { + return; + } + + uint dst_idx = GetBufferDst8(dispatch_thread_id); + + BufferCopyDst.Store(dst_idx, Uint8x4ToUint32(uint4( + ImageCopySrc[src_idx].r, + ImageCopySrc[src_idx + uint3(1, 0, 0)].r, + ImageCopySrc[src_idx + uint3(2, 0, 0)].r, + ImageCopySrc[src_idx + uint3(3, 0, 0)].r + ))); +} diff --git a/third_party/rust/gfx-backend-dx11/src/conv.rs b/third_party/rust/gfx-backend-dx11/src/conv.rs index e29ce7c1dc32..d454d25522a6 100644 --- a/third_party/rust/gfx-backend-dx11/src/conv.rs +++ b/third_party/rust/gfx-backend-dx11/src/conv.rs @@ -1,824 +1,827 @@ -use hal::format::Format; -use hal::image::{Anisotropic, Filter, WrapMode}; -use hal::pso::{ - BlendDesc, - BlendOp, - ColorBlendDesc, - Comparison, - DepthBias, - DepthStencilDesc, - Face, - Factor, - FrontFace, - InputAssemblerDesc, - PolygonMode, - Rasterizer, - Rect, - Sided, - Stage, - State, - StencilFace, - StencilOp, - StencilValue, - Viewport, -}; -use hal::IndexType; - -use spirv_cross::spirv; - -use winapi::shared::dxgiformat::*; -use winapi::shared::minwindef::{FALSE, INT, TRUE}; - -use winapi::um::d3d11::*; -use winapi::um::d3dcommon::*; - -use std::mem; - -pub fn map_index_type(ty: IndexType) -> DXGI_FORMAT { - match ty { - IndexType::U16 => DXGI_FORMAT_R16_UINT, - IndexType::U32 => DXGI_FORMAT_R32_UINT, - } -} - -// TODO: add aspect parameter -pub fn viewable_format(format: DXGI_FORMAT) -> DXGI_FORMAT { - match format { - DXGI_FORMAT_D32_FLOAT_S8X24_UINT => DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS, - DXGI_FORMAT_D32_FLOAT => DXGI_FORMAT_R32_FLOAT, - DXGI_FORMAT_D16_UNORM => DXGI_FORMAT_R16_UNORM, - _ => format, - } -} - -// TODO: stolen from d3d12 backend, maybe share function somehow? -pub fn map_format(format: Format) -> Option { - use hal::format::Format::*; - - let format = match format { - R5g6b5Unorm => DXGI_FORMAT_B5G6R5_UNORM, - R5g5b5a1Unorm => DXGI_FORMAT_B5G5R5A1_UNORM, - R8Unorm => DXGI_FORMAT_R8_UNORM, - R8Snorm => DXGI_FORMAT_R8_SNORM, - R8Uint => DXGI_FORMAT_R8_UINT, - R8Sint => DXGI_FORMAT_R8_SINT, - Rg8Unorm => DXGI_FORMAT_R8G8_UNORM, - Rg8Snorm => DXGI_FORMAT_R8G8_SNORM, - Rg8Uint => DXGI_FORMAT_R8G8_UINT, - Rg8Sint => DXGI_FORMAT_R8G8_SINT, - Rgba8Unorm => DXGI_FORMAT_R8G8B8A8_UNORM, - Rgba8Snorm => DXGI_FORMAT_R8G8B8A8_SNORM, - Rgba8Uint => DXGI_FORMAT_R8G8B8A8_UINT, - Rgba8Sint => DXGI_FORMAT_R8G8B8A8_SINT, - Rgba8Srgb => DXGI_FORMAT_R8G8B8A8_UNORM_SRGB, - Bgra8Unorm => DXGI_FORMAT_B8G8R8A8_UNORM, - Bgra8Srgb => DXGI_FORMAT_B8G8R8A8_UNORM_SRGB, - A2b10g10r10Unorm => DXGI_FORMAT_R10G10B10A2_UNORM, - A2b10g10r10Uint => DXGI_FORMAT_R10G10B10A2_UINT, - R16Unorm => DXGI_FORMAT_R16_UNORM, - R16Snorm => DXGI_FORMAT_R16_SNORM, - R16Uint => DXGI_FORMAT_R16_UINT, - R16Sint => DXGI_FORMAT_R16_SINT, - R16Sfloat => DXGI_FORMAT_R16_FLOAT, - Rg16Unorm => DXGI_FORMAT_R16G16_UNORM, - Rg16Snorm => DXGI_FORMAT_R16G16_SNORM, - Rg16Uint => DXGI_FORMAT_R16G16_UINT, - Rg16Sint => DXGI_FORMAT_R16G16_SINT, - Rg16Sfloat => DXGI_FORMAT_R16G16_FLOAT, - Rgba16Unorm => DXGI_FORMAT_R16G16B16A16_UNORM, - Rgba16Snorm => DXGI_FORMAT_R16G16B16A16_SNORM, - Rgba16Uint => DXGI_FORMAT_R16G16B16A16_UINT, - Rgba16Sint => DXGI_FORMAT_R16G16B16A16_SINT, - Rgba16Sfloat => DXGI_FORMAT_R16G16B16A16_FLOAT, - R32Uint => DXGI_FORMAT_R32_UINT, - R32Sint => DXGI_FORMAT_R32_SINT, - R32Sfloat => DXGI_FORMAT_R32_FLOAT, - Rg32Uint => DXGI_FORMAT_R32G32_UINT, - Rg32Sint => DXGI_FORMAT_R32G32_SINT, - Rg32Sfloat => DXGI_FORMAT_R32G32_FLOAT, - Rgb32Uint => DXGI_FORMAT_R32G32B32_UINT, - Rgb32Sint => DXGI_FORMAT_R32G32B32_SINT, - Rgb32Sfloat => DXGI_FORMAT_R32G32B32_FLOAT, - Rgba32Uint => DXGI_FORMAT_R32G32B32A32_UINT, - Rgba32Sint => DXGI_FORMAT_R32G32B32A32_SINT, - Rgba32Sfloat => DXGI_FORMAT_R32G32B32A32_FLOAT, - B10g11r11Ufloat => DXGI_FORMAT_R11G11B10_FLOAT, - E5b9g9r9Ufloat => DXGI_FORMAT_R9G9B9E5_SHAREDEXP, - D16Unorm => DXGI_FORMAT_D16_UNORM, - D32Sfloat => DXGI_FORMAT_D32_FLOAT, - D32SfloatS8Uint => DXGI_FORMAT_D32_FLOAT_S8X24_UINT, - Bc1RgbUnorm => DXGI_FORMAT_BC1_UNORM, - Bc1RgbSrgb => DXGI_FORMAT_BC1_UNORM_SRGB, - Bc2Unorm => DXGI_FORMAT_BC2_UNORM, - Bc2Srgb => DXGI_FORMAT_BC2_UNORM_SRGB, - Bc3Unorm => DXGI_FORMAT_BC3_UNORM, - Bc3Srgb => DXGI_FORMAT_BC3_UNORM_SRGB, - Bc4Unorm => DXGI_FORMAT_BC4_UNORM, - Bc4Snorm => DXGI_FORMAT_BC4_SNORM, - Bc5Unorm => DXGI_FORMAT_BC5_UNORM, - Bc5Snorm => DXGI_FORMAT_BC5_SNORM, - Bc6hUfloat => DXGI_FORMAT_BC6H_UF16, - Bc6hSfloat => DXGI_FORMAT_BC6H_SF16, - Bc7Unorm => DXGI_FORMAT_BC7_UNORM, - Bc7Srgb => DXGI_FORMAT_BC7_UNORM_SRGB, - - _ => return None, - }; - - Some(format) -} - -pub fn map_format_nosrgb(format: Format) -> Option { - // NOTE: DXGI doesn't allow sRGB format on the swapchain, but - // creating RTV of swapchain buffers with sRGB works - match format { - Format::Bgra8Srgb => Some(DXGI_FORMAT_B8G8R8A8_UNORM), - Format::Rgba8Srgb => Some(DXGI_FORMAT_R8G8B8A8_UNORM), - _ => map_format(format), - } -} - -#[derive(Debug, Clone)] -pub struct DecomposedDxgiFormat { - pub typeless: DXGI_FORMAT, - pub srv: Option, - pub rtv: Option, - pub uav: Option, - pub dsv: Option, - // the format we use internally for operating on textures (eg. Rgba8 uses R32 internally for - // copies) - pub copy_uav: Option, - pub copy_srv: Option, -} - -impl DecomposedDxgiFormat { - pub const UNKNOWN: DecomposedDxgiFormat = DecomposedDxgiFormat { - typeless: DXGI_FORMAT_UNKNOWN, - srv: None, - rtv: None, - uav: None, - dsv: None, - copy_uav: None, - copy_srv: None, - }; - - // TODO: we probably want to pass in usage flags or similar to allow for our `typeless_format` - // field to only contain the input format (eg. depth only rather than typeless likely - // improves perf since the driver doesn't need to expose internals) - // - // TODO: we also want aspect for determining depth/stencil - pub fn from_dxgi_format(format: DXGI_FORMAT) -> DecomposedDxgiFormat { - match format { - DXGI_FORMAT_R8G8B8A8_UNORM - | DXGI_FORMAT_R8G8B8A8_SNORM - | DXGI_FORMAT_R8G8B8A8_UINT - | DXGI_FORMAT_R8G8B8A8_SINT - | DXGI_FORMAT_R8G8B8A8_UNORM_SRGB => DecomposedDxgiFormat { - typeless: DXGI_FORMAT_R8G8B8A8_TYPELESS, - srv: Some(format), - rtv: Some(format), - uav: Some(format), - dsv: None, - copy_uav: Some(DXGI_FORMAT_R32_UINT), - copy_srv: Some(DXGI_FORMAT_R8G8B8A8_UINT), - }, - - DXGI_FORMAT_B8G8R8A8_UNORM | DXGI_FORMAT_B8G8R8A8_UNORM_SRGB => DecomposedDxgiFormat { - typeless: DXGI_FORMAT_B8G8R8A8_TYPELESS, - srv: Some(format), - rtv: Some(format), - uav: Some(DXGI_FORMAT_B8G8R8A8_UNORM), - dsv: None, - copy_uav: Some(DXGI_FORMAT_R32_UINT), - copy_srv: Some(DXGI_FORMAT_B8G8R8A8_UNORM), - }, - - DXGI_FORMAT_A8_UNORM => DecomposedDxgiFormat { - typeless: format, - srv: Some(format), - rtv: Some(format), - uav: Some(format), - dsv: None, - copy_uav: Some(format), - copy_srv: Some(format), - }, - - DXGI_FORMAT_R8_UNORM | DXGI_FORMAT_R8_SNORM | DXGI_FORMAT_R8_UINT - | DXGI_FORMAT_R8_SINT => DecomposedDxgiFormat { - typeless: DXGI_FORMAT_R8_TYPELESS, - srv: Some(format), - rtv: Some(format), - uav: Some(format), - dsv: None, - copy_uav: Some(DXGI_FORMAT_R8_UINT), - copy_srv: Some(DXGI_FORMAT_R8_UINT), - }, - - DXGI_FORMAT_R8G8_UNORM - | DXGI_FORMAT_R8G8_SNORM - | DXGI_FORMAT_R8G8_UINT - | DXGI_FORMAT_R8G8_SINT => DecomposedDxgiFormat { - typeless: DXGI_FORMAT_R8G8_TYPELESS, - srv: Some(format), - rtv: Some(format), - uav: Some(format), - dsv: None, - copy_uav: Some(DXGI_FORMAT_R8G8_UINT), - copy_srv: Some(DXGI_FORMAT_R8G8_UINT), - }, - - DXGI_FORMAT_D16_UNORM => DecomposedDxgiFormat { - typeless: DXGI_FORMAT_R16_TYPELESS, - srv: Some(DXGI_FORMAT_R16_FLOAT), - rtv: Some(DXGI_FORMAT_R16_FLOAT), - uav: Some(DXGI_FORMAT_R16_FLOAT), - dsv: Some(format), - copy_uav: Some(DXGI_FORMAT_R16_UINT), - copy_srv: Some(DXGI_FORMAT_R16_UINT), - }, - - DXGI_FORMAT_R16_UNORM - | DXGI_FORMAT_R16_SNORM - | DXGI_FORMAT_R16_UINT - | DXGI_FORMAT_R16_SINT - | DXGI_FORMAT_R16_FLOAT => DecomposedDxgiFormat { - typeless: DXGI_FORMAT_R16_TYPELESS, - srv: Some(format), - rtv: Some(format), - uav: Some(format), - dsv: Some(DXGI_FORMAT_D16_UNORM), - copy_uav: Some(DXGI_FORMAT_R16_UINT), - copy_srv: Some(DXGI_FORMAT_R16_UINT), - }, - - DXGI_FORMAT_R16G16_UNORM - | DXGI_FORMAT_R16G16_SNORM - | DXGI_FORMAT_R16G16_UINT - | DXGI_FORMAT_R16G16_SINT - | DXGI_FORMAT_R16G16_FLOAT => DecomposedDxgiFormat { - typeless: DXGI_FORMAT_R16G16_TYPELESS, - srv: Some(format), - rtv: Some(format), - uav: Some(format), - dsv: None, - copy_uav: Some(DXGI_FORMAT_R32_UINT), - copy_srv: Some(DXGI_FORMAT_R16G16_UINT), - }, - - DXGI_FORMAT_R16G16B16A16_UNORM - | DXGI_FORMAT_R16G16B16A16_SNORM - | DXGI_FORMAT_R16G16B16A16_UINT - | DXGI_FORMAT_R16G16B16A16_SINT - | DXGI_FORMAT_R16G16B16A16_FLOAT => DecomposedDxgiFormat { - typeless: DXGI_FORMAT_R16G16B16A16_TYPELESS, - srv: Some(format), - rtv: Some(format), - uav: Some(format), - dsv: None, - copy_uav: Some(DXGI_FORMAT_R16G16B16A16_UINT), - copy_srv: Some(DXGI_FORMAT_R16G16B16A16_UINT), - }, - - DXGI_FORMAT_D32_FLOAT_S8X24_UINT => DecomposedDxgiFormat { - typeless: DXGI_FORMAT_R32G8X24_TYPELESS, - // TODO: depth or stencil? - srv: Some(DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS), - rtv: None, - uav: None, - dsv: Some(format), - copy_uav: None, - copy_srv: Some(DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS), - }, - - DXGI_FORMAT_D32_FLOAT => DecomposedDxgiFormat { - typeless: DXGI_FORMAT_R32_TYPELESS, - srv: Some(DXGI_FORMAT_R32_FLOAT), - rtv: None, - uav: None, - dsv: Some(format), - copy_uav: Some(DXGI_FORMAT_R32_UINT), - copy_srv: Some(DXGI_FORMAT_R32_UINT), - }, - - DXGI_FORMAT_R32_UINT | DXGI_FORMAT_R32_SINT | DXGI_FORMAT_R32_FLOAT => { - DecomposedDxgiFormat { - typeless: DXGI_FORMAT_R32_TYPELESS, - srv: Some(format), - rtv: Some(format), - uav: Some(format), - dsv: Some(DXGI_FORMAT_D32_FLOAT), - copy_uav: Some(DXGI_FORMAT_R32_UINT), - copy_srv: Some(DXGI_FORMAT_R32_UINT), - } - } - - DXGI_FORMAT_R32G32_UINT | DXGI_FORMAT_R32G32_SINT | DXGI_FORMAT_R32G32_FLOAT => { - DecomposedDxgiFormat { - typeless: DXGI_FORMAT_R32G32_TYPELESS, - srv: Some(format), - rtv: Some(format), - uav: Some(format), - dsv: None, - copy_uav: Some(DXGI_FORMAT_R32G32_UINT), - copy_srv: Some(DXGI_FORMAT_R32G32_UINT), - } - } - - // TODO: should we just convert to Rgba32 internally? - DXGI_FORMAT_R32G32B32_UINT - | DXGI_FORMAT_R32G32B32_SINT - | DXGI_FORMAT_R32G32B32_FLOAT => DecomposedDxgiFormat { - typeless: DXGI_FORMAT_R32G32_TYPELESS, - srv: Some(format), - rtv: None, - uav: None, - dsv: None, - copy_uav: Some(DXGI_FORMAT_R32G32B32_UINT), - copy_srv: Some(DXGI_FORMAT_R32G32B32_UINT), - }, - - DXGI_FORMAT_R32G32B32A32_UINT - | DXGI_FORMAT_R32G32B32A32_SINT - | DXGI_FORMAT_R32G32B32A32_FLOAT => DecomposedDxgiFormat { - typeless: DXGI_FORMAT_R32G32B32A32_TYPELESS, - srv: Some(format), - rtv: Some(format), - uav: Some(format), - dsv: None, - copy_uav: Some(DXGI_FORMAT_R32G32B32A32_UINT), - copy_srv: Some(DXGI_FORMAT_R32G32B32A32_UINT), - }, - - DXGI_FORMAT_R10G10B10A2_UNORM | DXGI_FORMAT_R10G10B10A2_UINT => DecomposedDxgiFormat { - typeless: DXGI_FORMAT_R10G10B10A2_TYPELESS, - srv: Some(format), - rtv: Some(format), - uav: Some(format), - dsv: None, - copy_uav: Some(DXGI_FORMAT_R32_UINT), - copy_srv: Some(DXGI_FORMAT_R10G10B10A2_UINT), - }, - - DXGI_FORMAT_R11G11B10_FLOAT => DecomposedDxgiFormat { - typeless: format, - srv: Some(format), - rtv: Some(format), - uav: Some(format), - dsv: None, - copy_uav: Some(format), - copy_srv: Some(format), - }, - - DXGI_FORMAT_R9G9B9E5_SHAREDEXP => DecomposedDxgiFormat { - typeless: format, - srv: Some(format), - rtv: None, - uav: None, - dsv: None, - // NOTE: read only - copy_uav: None, - copy_srv: Some(format), - }, - - DXGI_FORMAT_BC1_UNORM | DXGI_FORMAT_BC1_UNORM_SRGB => DecomposedDxgiFormat { - typeless: DXGI_FORMAT_BC1_TYPELESS, - srv: Some(format), - rtv: None, - uav: None, - dsv: None, - // NOTE: read only - copy_uav: None, - copy_srv: Some(format), - }, - - DXGI_FORMAT_BC2_UNORM | DXGI_FORMAT_BC2_UNORM_SRGB => DecomposedDxgiFormat { - typeless: DXGI_FORMAT_BC2_TYPELESS, - srv: Some(format), - rtv: None, - uav: None, - dsv: None, - // NOTE: read only - copy_uav: None, - copy_srv: Some(format), - }, - - DXGI_FORMAT_BC3_UNORM | DXGI_FORMAT_BC3_UNORM_SRGB => DecomposedDxgiFormat { - typeless: DXGI_FORMAT_BC3_TYPELESS, - srv: Some(format), - rtv: None, - uav: None, - dsv: None, - // NOTE: read only - copy_uav: None, - copy_srv: Some(format), - }, - - DXGI_FORMAT_BC4_UNORM | DXGI_FORMAT_BC4_SNORM => DecomposedDxgiFormat { - typeless: DXGI_FORMAT_BC4_TYPELESS, - srv: Some(format), - rtv: None, - uav: None, - dsv: None, - // NOTE: read only - copy_uav: None, - copy_srv: Some(format), - }, - - DXGI_FORMAT_BC5_UNORM | DXGI_FORMAT_BC5_SNORM => DecomposedDxgiFormat { - typeless: format, - srv: Some(format), - rtv: None, - uav: None, - dsv: None, - // NOTE: read only - copy_uav: None, - copy_srv: Some(format), - }, - - DXGI_FORMAT_BC6H_UF16 | DXGI_FORMAT_BC6H_SF16 => DecomposedDxgiFormat { - typeless: DXGI_FORMAT_BC6H_TYPELESS, - srv: Some(format), - rtv: None, - uav: None, - dsv: None, - // NOTE: read only - copy_uav: None, - copy_srv: Some(format), - }, - - // TODO: srgb craziness - DXGI_FORMAT_BC7_UNORM | DXGI_FORMAT_BC7_UNORM_SRGB => DecomposedDxgiFormat { - typeless: DXGI_FORMAT_BC7_TYPELESS, - srv: Some(format), - rtv: None, - uav: None, - dsv: None, - // NOTE: read only - copy_uav: None, - copy_srv: Some(format), - }, - - _ => unimplemented!(), - } - } -} - -pub fn map_viewport(viewport: &Viewport) -> D3D11_VIEWPORT { - D3D11_VIEWPORT { - TopLeftX: viewport.rect.x as _, - TopLeftY: viewport.rect.y as _, - Width: viewport.rect.w as _, - Height: viewport.rect.h as _, - MinDepth: viewport.depth.start, - MaxDepth: viewport.depth.end, - } -} - -pub fn map_rect(rect: &Rect) -> D3D11_RECT { - D3D11_RECT { - left: rect.x as _, - top: rect.y as _, - right: (rect.x + rect.w) as _, - bottom: (rect.y + rect.h) as _, - } -} - -pub fn map_topology(ia: &InputAssemblerDesc) -> D3D11_PRIMITIVE_TOPOLOGY { - use hal::pso::Primitive::*; - match (ia.primitive, ia.with_adjacency) { - (PointList, false) => D3D_PRIMITIVE_TOPOLOGY_POINTLIST, - (PointList, true) => panic!("Points can't have adjacency info"), - (LineList, false) => D3D_PRIMITIVE_TOPOLOGY_LINELIST, - (LineList, true) => D3D_PRIMITIVE_TOPOLOGY_LINELIST_ADJ, - (LineStrip, false) => D3D_PRIMITIVE_TOPOLOGY_LINESTRIP, - (LineStrip, true) => D3D_PRIMITIVE_TOPOLOGY_LINESTRIP_ADJ, - (TriangleList, false) => D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST, - (TriangleList, true) => D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST_ADJ, - (TriangleStrip, false) => D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP, - (TriangleStrip, true) => D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP_ADJ, - (PatchList(num), false) => { - assert!(num != 0); - D3D_PRIMITIVE_TOPOLOGY_1_CONTROL_POINT_PATCHLIST + (num as u32) - 1 - } - (_, true) => panic!("Patches can't have adjacency info"), - } -} - -fn map_fill_mode(mode: PolygonMode) -> D3D11_FILL_MODE { - match mode { - PolygonMode::Fill => D3D11_FILL_SOLID, - PolygonMode::Line(_) => D3D11_FILL_WIREFRAME, - // TODO: return error - _ => unimplemented!(), - } -} - -fn map_cull_mode(mode: Face) -> D3D11_CULL_MODE { - match mode { - Face::NONE => D3D11_CULL_NONE, - Face::FRONT => D3D11_CULL_FRONT, - Face::BACK => D3D11_CULL_BACK, - _ => panic!("Culling both front and back faces is not supported"), - } -} - -pub(crate) fn map_rasterizer_desc(desc: &Rasterizer) -> D3D11_RASTERIZER_DESC { - let bias = match desc.depth_bias { - //TODO: support dynamic depth bias - Some(State::Static(db)) => db, - Some(_) | None => DepthBias::default(), - }; - D3D11_RASTERIZER_DESC { - FillMode: map_fill_mode(desc.polygon_mode), - CullMode: map_cull_mode(desc.cull_face), - FrontCounterClockwise: match desc.front_face { - FrontFace::Clockwise => FALSE, - FrontFace::CounterClockwise => TRUE, - }, - DepthBias: bias.const_factor as INT, - DepthBiasClamp: bias.clamp, - SlopeScaledDepthBias: bias.slope_factor, - DepthClipEnable: !desc.depth_clamping as _, - // TODO: - ScissorEnable: TRUE, - // TODO: msaa - MultisampleEnable: FALSE, - // TODO: line aa? - AntialiasedLineEnable: FALSE, - // TODO: conservative raster in >=11.x - } -} - -fn map_blend_factor(factor: Factor) -> D3D11_BLEND { - match factor { - Factor::Zero => D3D11_BLEND_ZERO, - Factor::One => D3D11_BLEND_ONE, - Factor::SrcColor => D3D11_BLEND_SRC_COLOR, - Factor::OneMinusSrcColor => D3D11_BLEND_INV_SRC_COLOR, - Factor::DstColor => D3D11_BLEND_DEST_COLOR, - Factor::OneMinusDstColor => D3D11_BLEND_INV_DEST_COLOR, - Factor::SrcAlpha => D3D11_BLEND_SRC_ALPHA, - Factor::OneMinusSrcAlpha => D3D11_BLEND_INV_SRC_ALPHA, - Factor::DstAlpha => D3D11_BLEND_DEST_ALPHA, - Factor::OneMinusDstAlpha => D3D11_BLEND_INV_DEST_ALPHA, - Factor::ConstColor | Factor::ConstAlpha => D3D11_BLEND_BLEND_FACTOR, - Factor::OneMinusConstColor | Factor::OneMinusConstAlpha => D3D11_BLEND_INV_BLEND_FACTOR, - Factor::SrcAlphaSaturate => D3D11_BLEND_SRC_ALPHA_SAT, - Factor::Src1Color => D3D11_BLEND_SRC1_COLOR, - Factor::OneMinusSrc1Color => D3D11_BLEND_INV_SRC1_COLOR, - Factor::Src1Alpha => D3D11_BLEND_SRC1_ALPHA, - Factor::OneMinusSrc1Alpha => D3D11_BLEND_INV_SRC1_ALPHA, - } -} - -fn map_alpha_blend_factor(factor: Factor) -> D3D11_BLEND { - match factor { - Factor::Zero => D3D11_BLEND_ZERO, - Factor::One => D3D11_BLEND_ONE, - Factor::SrcColor | Factor::SrcAlpha => D3D11_BLEND_SRC_ALPHA, - Factor::DstColor | Factor::DstAlpha => D3D11_BLEND_DEST_ALPHA, - Factor::OneMinusSrcColor | Factor::OneMinusSrcAlpha => D3D11_BLEND_INV_SRC_ALPHA, - Factor::OneMinusDstColor | Factor::OneMinusDstAlpha => D3D11_BLEND_INV_DEST_ALPHA, - Factor::ConstColor | Factor::ConstAlpha => D3D11_BLEND_BLEND_FACTOR, - Factor::OneMinusConstColor | Factor::OneMinusConstAlpha => D3D11_BLEND_INV_BLEND_FACTOR, - Factor::SrcAlphaSaturate => D3D11_BLEND_SRC_ALPHA_SAT, - Factor::Src1Color | Factor::Src1Alpha => D3D11_BLEND_SRC1_ALPHA, - Factor::OneMinusSrc1Color | Factor::OneMinusSrc1Alpha => D3D11_BLEND_INV_SRC1_ALPHA, - } -} - -fn map_blend_op(operation: BlendOp) -> (D3D11_BLEND_OP, D3D11_BLEND, D3D11_BLEND) { - match operation { - BlendOp::Add { src, dst } => ( - D3D11_BLEND_OP_ADD, - map_blend_factor(src), - map_blend_factor(dst), - ), - BlendOp::Sub { src, dst } => ( - D3D11_BLEND_OP_SUBTRACT, - map_blend_factor(src), - map_blend_factor(dst), - ), - BlendOp::RevSub { src, dst } => ( - D3D11_BLEND_OP_REV_SUBTRACT, - map_blend_factor(src), - map_blend_factor(dst), - ), - BlendOp::Min => (D3D11_BLEND_OP_MIN, D3D11_BLEND_ZERO, D3D11_BLEND_ZERO), - BlendOp::Max => (D3D11_BLEND_OP_MAX, D3D11_BLEND_ZERO, D3D11_BLEND_ZERO), - } -} - -fn map_alpha_blend_op(operation: BlendOp) -> (D3D11_BLEND_OP, D3D11_BLEND, D3D11_BLEND) { - match operation { - BlendOp::Add { src, dst } => ( - D3D11_BLEND_OP_ADD, - map_alpha_blend_factor(src), - map_alpha_blend_factor(dst), - ), - BlendOp::Sub { src, dst } => ( - D3D11_BLEND_OP_SUBTRACT, - map_alpha_blend_factor(src), - map_alpha_blend_factor(dst), - ), - BlendOp::RevSub { src, dst } => ( - D3D11_BLEND_OP_REV_SUBTRACT, - map_alpha_blend_factor(src), - map_alpha_blend_factor(dst), - ), - BlendOp::Min => (D3D11_BLEND_OP_MIN, D3D11_BLEND_ZERO, D3D11_BLEND_ZERO), - BlendOp::Max => (D3D11_BLEND_OP_MAX, D3D11_BLEND_ZERO, D3D11_BLEND_ZERO), - } -} - -fn map_blend_targets( - render_target_blends: &[ColorBlendDesc], -) -> [D3D11_RENDER_TARGET_BLEND_DESC; 8] { - let mut targets: [D3D11_RENDER_TARGET_BLEND_DESC; 8] = [unsafe { mem::zeroed() }; 8]; - - for (mut target, color_desc) in targets.iter_mut().zip(render_target_blends.iter()) { - target.RenderTargetWriteMask = color_desc.mask.bits() as _; - if let Some(ref blend) = color_desc.blend { - let (color_op, color_src, color_dst) = map_blend_op(blend.color); - let (alpha_op, alpha_src, alpha_dst) = map_alpha_blend_op(blend.alpha); - target.BlendEnable = TRUE; - target.BlendOp = color_op; - target.SrcBlend = color_src; - target.DestBlend = color_dst; - target.BlendOpAlpha = alpha_op; - target.SrcBlendAlpha = alpha_src; - target.DestBlendAlpha = alpha_dst; - } - } - - targets -} - -pub(crate) fn map_blend_desc(desc: &BlendDesc) -> D3D11_BLEND_DESC { - D3D11_BLEND_DESC { - // TODO: msaa - AlphaToCoverageEnable: FALSE, - IndependentBlendEnable: TRUE, - RenderTarget: map_blend_targets(&desc.targets), - } -} - -pub fn map_comparison(func: Comparison) -> D3D11_COMPARISON_FUNC { - match func { - Comparison::Never => D3D11_COMPARISON_NEVER, - Comparison::Less => D3D11_COMPARISON_LESS, - Comparison::LessEqual => D3D11_COMPARISON_LESS_EQUAL, - Comparison::Equal => D3D11_COMPARISON_EQUAL, - Comparison::GreaterEqual => D3D11_COMPARISON_GREATER_EQUAL, - Comparison::Greater => D3D11_COMPARISON_GREATER, - Comparison::NotEqual => D3D11_COMPARISON_NOT_EQUAL, - Comparison::Always => D3D11_COMPARISON_ALWAYS, - } -} - -fn map_stencil_op(op: StencilOp) -> D3D11_STENCIL_OP { - match op { - StencilOp::Keep => D3D11_STENCIL_OP_KEEP, - StencilOp::Zero => D3D11_STENCIL_OP_ZERO, - StencilOp::Replace => D3D11_STENCIL_OP_REPLACE, - StencilOp::IncrementClamp => D3D11_STENCIL_OP_INCR_SAT, - StencilOp::IncrementWrap => D3D11_STENCIL_OP_INCR, - StencilOp::DecrementClamp => D3D11_STENCIL_OP_DECR_SAT, - StencilOp::DecrementWrap => D3D11_STENCIL_OP_DECR, - StencilOp::Invert => D3D11_STENCIL_OP_INVERT, - } -} - -fn map_stencil_side(side: &StencilFace) -> D3D11_DEPTH_STENCILOP_DESC { - D3D11_DEPTH_STENCILOP_DESC { - StencilFailOp: map_stencil_op(side.op_fail), - StencilDepthFailOp: map_stencil_op(side.op_depth_fail), - StencilPassOp: map_stencil_op(side.op_pass), - StencilFunc: map_comparison(side.fun), - } -} - -pub(crate) fn map_depth_stencil_desc( - desc: &DepthStencilDesc, -) -> (D3D11_DEPTH_STENCIL_DESC, State) { - let (depth_on, depth_write, depth_func) = match desc.depth { - Some(ref depth) => (TRUE, depth.write, map_comparison(depth.fun)), - None => unsafe { mem::zeroed() }, - }; - - let (stencil_on, front, back, read_mask, write_mask, stencil_ref) = match desc.stencil { - Some(ref stencil) => { - let read_masks = stencil.read_masks.static_or(Sided::new(!0)); - let write_masks = stencil.read_masks.static_or(Sided::new(!0)); - let reference_value = match stencil.reference_values { - State::Static(ref values) => { - if values.front != values.back { - error!("Different reference values for front ({}) and back ({}) of the stencil", - values.front, values.back); - } - State::Static(values.front) - } - State::Dynamic => State::Dynamic, - }; - // TODO: cascade to create_pipeline - if read_masks.front != read_masks.back || write_masks.front != write_masks.back { - error!( - "Different sides are specified for read ({:?} and write ({:?}) stencil masks", - read_masks, write_masks - ); - } - ( - TRUE, - map_stencil_side(&stencil.faces.front), - map_stencil_side(&stencil.faces.back), - read_masks.front, - write_masks.front, - reference_value, - ) - } - None => unsafe { mem::zeroed() }, - }; - - ( - D3D11_DEPTH_STENCIL_DESC { - DepthEnable: depth_on, - DepthWriteMask: if depth_write { - D3D11_DEPTH_WRITE_MASK_ALL - } else { - D3D11_DEPTH_WRITE_MASK_ZERO - }, - DepthFunc: depth_func, - StencilEnable: stencil_on, - StencilReadMask: read_mask as _, - StencilWriteMask: write_mask as _, - FrontFace: front, - BackFace: back, - }, - stencil_ref, - ) -} - -pub fn map_execution_model(model: spirv::ExecutionModel) -> Stage { - match model { - spirv::ExecutionModel::Vertex => Stage::Vertex, - spirv::ExecutionModel::Fragment => Stage::Fragment, - spirv::ExecutionModel::Geometry => Stage::Geometry, - spirv::ExecutionModel::GlCompute => Stage::Compute, - spirv::ExecutionModel::TessellationControl => Stage::Hull, - spirv::ExecutionModel::TessellationEvaluation => Stage::Domain, - spirv::ExecutionModel::Kernel => panic!("Kernel is not a valid execution model."), - } -} - -pub fn map_stage(stage: Stage) -> spirv::ExecutionModel { - match stage { - Stage::Vertex => spirv::ExecutionModel::Vertex, - Stage::Fragment => spirv::ExecutionModel::Fragment, - Stage::Geometry => spirv::ExecutionModel::Geometry, - Stage::Compute => spirv::ExecutionModel::GlCompute, - Stage::Hull => spirv::ExecutionModel::TessellationControl, - Stage::Domain => spirv::ExecutionModel::TessellationEvaluation, - } -} - -pub fn map_wrapping(wrap: WrapMode) -> D3D11_TEXTURE_ADDRESS_MODE { - match wrap { - WrapMode::Tile => D3D11_TEXTURE_ADDRESS_WRAP, - WrapMode::Mirror => D3D11_TEXTURE_ADDRESS_MIRROR, - WrapMode::Clamp => D3D11_TEXTURE_ADDRESS_CLAMP, - WrapMode::Border => D3D11_TEXTURE_ADDRESS_BORDER, - } -} - -pub fn map_anisotropic(anisotropic: Anisotropic) -> D3D11_FILTER { - match anisotropic { - Anisotropic::On(_) => D3D11_FILTER_ANISOTROPIC, - Anisotropic::Off => 0, - } -} - -fn map_filter_type(filter: Filter) -> D3D11_FILTER_TYPE { - match filter { - Filter::Nearest => D3D11_FILTER_TYPE_POINT, - Filter::Linear => D3D11_FILTER_TYPE_LINEAR, - } -} - -// Hopefully works just as well in d3d11 :) -pub fn map_filter( - mag_filter: Filter, - min_filter: Filter, - mip_filter: Filter, - reduction: D3D11_FILTER_REDUCTION_TYPE, - anisotropic: Anisotropic, -) -> D3D11_FILTER { - let mag = map_filter_type(mag_filter); - let min = map_filter_type(min_filter); - let mip = map_filter_type(mip_filter); - - (min & D3D11_FILTER_TYPE_MASK) << D3D11_MIN_FILTER_SHIFT - | (mag & D3D11_FILTER_TYPE_MASK) << D3D11_MAG_FILTER_SHIFT - | (mip & D3D11_FILTER_TYPE_MASK) << D3D11_MIP_FILTER_SHIFT - | (reduction & D3D11_FILTER_REDUCTION_TYPE_MASK) << D3D11_FILTER_REDUCTION_TYPE_SHIFT - | map_anisotropic(anisotropic) -} +use hal::{ + format::Format, + image::{Filter, WrapMode}, + pso::{ + BlendDesc, + BlendOp, + ColorBlendDesc, + Comparison, + DepthBias, + DepthStencilDesc, + Face, + Factor, + FrontFace, + InputAssemblerDesc, + PolygonMode, + Rasterizer, + Rect, + Sided, + Stage, + State, + StencilFace, + StencilOp, + StencilValue, + Viewport, + }, + IndexType, +}; + +use spirv_cross::spirv; + +use winapi::{ + shared::{ + dxgiformat::*, + minwindef::{FALSE, INT, TRUE}, + }, + um::{d3d11::*, d3dcommon::*}, +}; + +use std::mem; + +pub fn map_index_type(ty: IndexType) -> DXGI_FORMAT { + match ty { + IndexType::U16 => DXGI_FORMAT_R16_UINT, + IndexType::U32 => DXGI_FORMAT_R32_UINT, + } +} + +// TODO: add aspect parameter +pub fn viewable_format(format: DXGI_FORMAT) -> DXGI_FORMAT { + match format { + DXGI_FORMAT_D32_FLOAT_S8X24_UINT => DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS, + DXGI_FORMAT_D32_FLOAT => DXGI_FORMAT_R32_FLOAT, + DXGI_FORMAT_D16_UNORM => DXGI_FORMAT_R16_UNORM, + _ => format, + } +} + +// TODO: stolen from d3d12 backend, maybe share function somehow? +pub fn map_format(format: Format) -> Option { + use hal::format::Format::*; + + let format = match format { + R5g6b5Unorm => DXGI_FORMAT_B5G6R5_UNORM, + R5g5b5a1Unorm => DXGI_FORMAT_B5G5R5A1_UNORM, + R8Unorm => DXGI_FORMAT_R8_UNORM, + R8Snorm => DXGI_FORMAT_R8_SNORM, + R8Uint => DXGI_FORMAT_R8_UINT, + R8Sint => DXGI_FORMAT_R8_SINT, + Rg8Unorm => DXGI_FORMAT_R8G8_UNORM, + Rg8Snorm => DXGI_FORMAT_R8G8_SNORM, + Rg8Uint => DXGI_FORMAT_R8G8_UINT, + Rg8Sint => DXGI_FORMAT_R8G8_SINT, + Rgba8Unorm => DXGI_FORMAT_R8G8B8A8_UNORM, + Rgba8Snorm => DXGI_FORMAT_R8G8B8A8_SNORM, + Rgba8Uint => DXGI_FORMAT_R8G8B8A8_UINT, + Rgba8Sint => DXGI_FORMAT_R8G8B8A8_SINT, + Rgba8Srgb => DXGI_FORMAT_R8G8B8A8_UNORM_SRGB, + Bgra8Unorm => DXGI_FORMAT_B8G8R8A8_UNORM, + Bgra8Srgb => DXGI_FORMAT_B8G8R8A8_UNORM_SRGB, + A2b10g10r10Unorm => DXGI_FORMAT_R10G10B10A2_UNORM, + A2b10g10r10Uint => DXGI_FORMAT_R10G10B10A2_UINT, + R16Unorm => DXGI_FORMAT_R16_UNORM, + R16Snorm => DXGI_FORMAT_R16_SNORM, + R16Uint => DXGI_FORMAT_R16_UINT, + R16Sint => DXGI_FORMAT_R16_SINT, + R16Sfloat => DXGI_FORMAT_R16_FLOAT, + Rg16Unorm => DXGI_FORMAT_R16G16_UNORM, + Rg16Snorm => DXGI_FORMAT_R16G16_SNORM, + Rg16Uint => DXGI_FORMAT_R16G16_UINT, + Rg16Sint => DXGI_FORMAT_R16G16_SINT, + Rg16Sfloat => DXGI_FORMAT_R16G16_FLOAT, + Rgba16Unorm => DXGI_FORMAT_R16G16B16A16_UNORM, + Rgba16Snorm => DXGI_FORMAT_R16G16B16A16_SNORM, + Rgba16Uint => DXGI_FORMAT_R16G16B16A16_UINT, + Rgba16Sint => DXGI_FORMAT_R16G16B16A16_SINT, + Rgba16Sfloat => DXGI_FORMAT_R16G16B16A16_FLOAT, + R32Uint => DXGI_FORMAT_R32_UINT, + R32Sint => DXGI_FORMAT_R32_SINT, + R32Sfloat => DXGI_FORMAT_R32_FLOAT, + Rg32Uint => DXGI_FORMAT_R32G32_UINT, + Rg32Sint => DXGI_FORMAT_R32G32_SINT, + Rg32Sfloat => DXGI_FORMAT_R32G32_FLOAT, + Rgb32Uint => DXGI_FORMAT_R32G32B32_UINT, + Rgb32Sint => DXGI_FORMAT_R32G32B32_SINT, + Rgb32Sfloat => DXGI_FORMAT_R32G32B32_FLOAT, + Rgba32Uint => DXGI_FORMAT_R32G32B32A32_UINT, + Rgba32Sint => DXGI_FORMAT_R32G32B32A32_SINT, + Rgba32Sfloat => DXGI_FORMAT_R32G32B32A32_FLOAT, + B10g11r11Ufloat => DXGI_FORMAT_R11G11B10_FLOAT, + E5b9g9r9Ufloat => DXGI_FORMAT_R9G9B9E5_SHAREDEXP, + D16Unorm => DXGI_FORMAT_D16_UNORM, + D32Sfloat => DXGI_FORMAT_D32_FLOAT, + D32SfloatS8Uint => DXGI_FORMAT_D32_FLOAT_S8X24_UINT, + Bc1RgbUnorm => DXGI_FORMAT_BC1_UNORM, + Bc1RgbSrgb => DXGI_FORMAT_BC1_UNORM_SRGB, + Bc2Unorm => DXGI_FORMAT_BC2_UNORM, + Bc2Srgb => DXGI_FORMAT_BC2_UNORM_SRGB, + Bc3Unorm => DXGI_FORMAT_BC3_UNORM, + Bc3Srgb => DXGI_FORMAT_BC3_UNORM_SRGB, + Bc4Unorm => DXGI_FORMAT_BC4_UNORM, + Bc4Snorm => DXGI_FORMAT_BC4_SNORM, + Bc5Unorm => DXGI_FORMAT_BC5_UNORM, + Bc5Snorm => DXGI_FORMAT_BC5_SNORM, + Bc6hUfloat => DXGI_FORMAT_BC6H_UF16, + Bc6hSfloat => DXGI_FORMAT_BC6H_SF16, + Bc7Unorm => DXGI_FORMAT_BC7_UNORM, + Bc7Srgb => DXGI_FORMAT_BC7_UNORM_SRGB, + + _ => return None, + }; + + Some(format) +} + +pub fn map_format_nosrgb(format: Format) -> Option { + // NOTE: DXGI doesn't allow sRGB format on the swapchain, but + // creating RTV of swapchain buffers with sRGB works + match format { + Format::Bgra8Srgb => Some(DXGI_FORMAT_B8G8R8A8_UNORM), + Format::Rgba8Srgb => Some(DXGI_FORMAT_R8G8B8A8_UNORM), + _ => map_format(format), + } +} + +#[derive(Debug, Clone)] +pub struct DecomposedDxgiFormat { + pub typeless: DXGI_FORMAT, + pub srv: Option, + pub rtv: Option, + pub uav: Option, + pub dsv: Option, + // the format we use internally for operating on textures (eg. Rgba8 uses R32 internally for + // copies) + pub copy_uav: Option, + pub copy_srv: Option, +} + +impl DecomposedDxgiFormat { + pub const UNKNOWN: DecomposedDxgiFormat = DecomposedDxgiFormat { + typeless: DXGI_FORMAT_UNKNOWN, + srv: None, + rtv: None, + uav: None, + dsv: None, + copy_uav: None, + copy_srv: None, + }; + + // TODO: we probably want to pass in usage flags or similar to allow for our `typeless_format` + // field to only contain the input format (eg. depth only rather than typeless likely + // improves perf since the driver doesn't need to expose internals) + // + // TODO: we also want aspect for determining depth/stencil + pub fn from_dxgi_format(format: DXGI_FORMAT) -> DecomposedDxgiFormat { + match format { + DXGI_FORMAT_R8G8B8A8_UNORM + | DXGI_FORMAT_R8G8B8A8_SNORM + | DXGI_FORMAT_R8G8B8A8_UINT + | DXGI_FORMAT_R8G8B8A8_SINT + | DXGI_FORMAT_R8G8B8A8_UNORM_SRGB => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_R8G8B8A8_TYPELESS, + srv: Some(format), + rtv: Some(format), + uav: Some(format), + dsv: None, + copy_uav: Some(DXGI_FORMAT_R32_UINT), + copy_srv: Some(DXGI_FORMAT_R8G8B8A8_UINT), + }, + + DXGI_FORMAT_B8G8R8A8_UNORM | DXGI_FORMAT_B8G8R8A8_UNORM_SRGB => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_B8G8R8A8_TYPELESS, + srv: Some(format), + rtv: Some(format), + uav: Some(DXGI_FORMAT_B8G8R8A8_UNORM), + dsv: None, + copy_uav: Some(DXGI_FORMAT_R32_UINT), + copy_srv: Some(DXGI_FORMAT_B8G8R8A8_UNORM), + }, + + DXGI_FORMAT_A8_UNORM => DecomposedDxgiFormat { + typeless: format, + srv: Some(format), + rtv: Some(format), + uav: Some(format), + dsv: None, + copy_uav: Some(format), + copy_srv: Some(format), + }, + + DXGI_FORMAT_R8_UNORM | DXGI_FORMAT_R8_SNORM | DXGI_FORMAT_R8_UINT + | DXGI_FORMAT_R8_SINT => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_R8_TYPELESS, + srv: Some(format), + rtv: Some(format), + uav: Some(format), + dsv: None, + copy_uav: Some(DXGI_FORMAT_R8_UINT), + copy_srv: Some(DXGI_FORMAT_R8_UINT), + }, + + DXGI_FORMAT_R8G8_UNORM + | DXGI_FORMAT_R8G8_SNORM + | DXGI_FORMAT_R8G8_UINT + | DXGI_FORMAT_R8G8_SINT => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_R8G8_TYPELESS, + srv: Some(format), + rtv: Some(format), + uav: Some(format), + dsv: None, + copy_uav: Some(DXGI_FORMAT_R8G8_UINT), + copy_srv: Some(DXGI_FORMAT_R8G8_UINT), + }, + + DXGI_FORMAT_D16_UNORM => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_R16_TYPELESS, + srv: Some(DXGI_FORMAT_R16_FLOAT), + rtv: Some(DXGI_FORMAT_R16_FLOAT), + uav: Some(DXGI_FORMAT_R16_FLOAT), + dsv: Some(format), + copy_uav: Some(DXGI_FORMAT_R16_UINT), + copy_srv: Some(DXGI_FORMAT_R16_UINT), + }, + + DXGI_FORMAT_R16_UNORM + | DXGI_FORMAT_R16_SNORM + | DXGI_FORMAT_R16_UINT + | DXGI_FORMAT_R16_SINT + | DXGI_FORMAT_R16_FLOAT => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_R16_TYPELESS, + srv: Some(format), + rtv: Some(format), + uav: Some(format), + dsv: Some(DXGI_FORMAT_D16_UNORM), + copy_uav: Some(DXGI_FORMAT_R16_UINT), + copy_srv: Some(DXGI_FORMAT_R16_UINT), + }, + + DXGI_FORMAT_R16G16_UNORM + | DXGI_FORMAT_R16G16_SNORM + | DXGI_FORMAT_R16G16_UINT + | DXGI_FORMAT_R16G16_SINT + | DXGI_FORMAT_R16G16_FLOAT => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_R16G16_TYPELESS, + srv: Some(format), + rtv: Some(format), + uav: Some(format), + dsv: None, + copy_uav: Some(DXGI_FORMAT_R32_UINT), + copy_srv: Some(DXGI_FORMAT_R16G16_UINT), + }, + + DXGI_FORMAT_R16G16B16A16_UNORM + | DXGI_FORMAT_R16G16B16A16_SNORM + | DXGI_FORMAT_R16G16B16A16_UINT + | DXGI_FORMAT_R16G16B16A16_SINT + | DXGI_FORMAT_R16G16B16A16_FLOAT => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_R16G16B16A16_TYPELESS, + srv: Some(format), + rtv: Some(format), + uav: Some(format), + dsv: None, + copy_uav: Some(DXGI_FORMAT_R16G16B16A16_UINT), + copy_srv: Some(DXGI_FORMAT_R16G16B16A16_UINT), + }, + + DXGI_FORMAT_D32_FLOAT_S8X24_UINT => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_R32G8X24_TYPELESS, + // TODO: depth or stencil? + srv: Some(DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS), + rtv: None, + uav: None, + dsv: Some(format), + copy_uav: None, + copy_srv: Some(DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS), + }, + + DXGI_FORMAT_D32_FLOAT => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_R32_TYPELESS, + srv: Some(DXGI_FORMAT_R32_FLOAT), + rtv: None, + uav: None, + dsv: Some(format), + copy_uav: Some(DXGI_FORMAT_R32_UINT), + copy_srv: Some(DXGI_FORMAT_R32_UINT), + }, + + DXGI_FORMAT_R32_UINT | DXGI_FORMAT_R32_SINT | DXGI_FORMAT_R32_FLOAT => { + DecomposedDxgiFormat { + typeless: DXGI_FORMAT_R32_TYPELESS, + srv: Some(format), + rtv: Some(format), + uav: Some(format), + dsv: Some(DXGI_FORMAT_D32_FLOAT), + copy_uav: Some(DXGI_FORMAT_R32_UINT), + copy_srv: Some(DXGI_FORMAT_R32_UINT), + } + } + + DXGI_FORMAT_R32G32_UINT | DXGI_FORMAT_R32G32_SINT | DXGI_FORMAT_R32G32_FLOAT => { + DecomposedDxgiFormat { + typeless: DXGI_FORMAT_R32G32_TYPELESS, + srv: Some(format), + rtv: Some(format), + uav: Some(format), + dsv: None, + copy_uav: Some(DXGI_FORMAT_R32G32_UINT), + copy_srv: Some(DXGI_FORMAT_R32G32_UINT), + } + } + + // TODO: should we just convert to Rgba32 internally? + DXGI_FORMAT_R32G32B32_UINT + | DXGI_FORMAT_R32G32B32_SINT + | DXGI_FORMAT_R32G32B32_FLOAT => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_R32G32_TYPELESS, + srv: Some(format), + rtv: None, + uav: None, + dsv: None, + copy_uav: Some(DXGI_FORMAT_R32G32B32_UINT), + copy_srv: Some(DXGI_FORMAT_R32G32B32_UINT), + }, + + DXGI_FORMAT_R32G32B32A32_UINT + | DXGI_FORMAT_R32G32B32A32_SINT + | DXGI_FORMAT_R32G32B32A32_FLOAT => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_R32G32B32A32_TYPELESS, + srv: Some(format), + rtv: Some(format), + uav: Some(format), + dsv: None, + copy_uav: Some(DXGI_FORMAT_R32G32B32A32_UINT), + copy_srv: Some(DXGI_FORMAT_R32G32B32A32_UINT), + }, + + DXGI_FORMAT_R10G10B10A2_UNORM | DXGI_FORMAT_R10G10B10A2_UINT => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_R10G10B10A2_TYPELESS, + srv: Some(format), + rtv: Some(format), + uav: Some(format), + dsv: None, + copy_uav: Some(DXGI_FORMAT_R32_UINT), + copy_srv: Some(DXGI_FORMAT_R10G10B10A2_UINT), + }, + + DXGI_FORMAT_R11G11B10_FLOAT => DecomposedDxgiFormat { + typeless: format, + srv: Some(format), + rtv: Some(format), + uav: Some(format), + dsv: None, + copy_uav: Some(format), + copy_srv: Some(format), + }, + + DXGI_FORMAT_R9G9B9E5_SHAREDEXP => DecomposedDxgiFormat { + typeless: format, + srv: Some(format), + rtv: None, + uav: None, + dsv: None, + // NOTE: read only + copy_uav: None, + copy_srv: Some(format), + }, + + DXGI_FORMAT_BC1_UNORM | DXGI_FORMAT_BC1_UNORM_SRGB => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_BC1_TYPELESS, + srv: Some(format), + rtv: None, + uav: None, + dsv: None, + // NOTE: read only + copy_uav: None, + copy_srv: Some(format), + }, + + DXGI_FORMAT_BC2_UNORM | DXGI_FORMAT_BC2_UNORM_SRGB => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_BC2_TYPELESS, + srv: Some(format), + rtv: None, + uav: None, + dsv: None, + // NOTE: read only + copy_uav: None, + copy_srv: Some(format), + }, + + DXGI_FORMAT_BC3_UNORM | DXGI_FORMAT_BC3_UNORM_SRGB => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_BC3_TYPELESS, + srv: Some(format), + rtv: None, + uav: None, + dsv: None, + // NOTE: read only + copy_uav: None, + copy_srv: Some(format), + }, + + DXGI_FORMAT_BC4_UNORM | DXGI_FORMAT_BC4_SNORM => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_BC4_TYPELESS, + srv: Some(format), + rtv: None, + uav: None, + dsv: None, + // NOTE: read only + copy_uav: None, + copy_srv: Some(format), + }, + + DXGI_FORMAT_BC5_UNORM | DXGI_FORMAT_BC5_SNORM => DecomposedDxgiFormat { + typeless: format, + srv: Some(format), + rtv: None, + uav: None, + dsv: None, + // NOTE: read only + copy_uav: None, + copy_srv: Some(format), + }, + + DXGI_FORMAT_BC6H_UF16 | DXGI_FORMAT_BC6H_SF16 => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_BC6H_TYPELESS, + srv: Some(format), + rtv: None, + uav: None, + dsv: None, + // NOTE: read only + copy_uav: None, + copy_srv: Some(format), + }, + + // TODO: srgb craziness + DXGI_FORMAT_BC7_UNORM | DXGI_FORMAT_BC7_UNORM_SRGB => DecomposedDxgiFormat { + typeless: DXGI_FORMAT_BC7_TYPELESS, + srv: Some(format), + rtv: None, + uav: None, + dsv: None, + // NOTE: read only + copy_uav: None, + copy_srv: Some(format), + }, + + _ => unimplemented!(), + } + } +} + +pub fn map_viewport(viewport: &Viewport) -> D3D11_VIEWPORT { + D3D11_VIEWPORT { + TopLeftX: viewport.rect.x as _, + TopLeftY: viewport.rect.y as _, + Width: viewport.rect.w as _, + Height: viewport.rect.h as _, + MinDepth: viewport.depth.start, + MaxDepth: viewport.depth.end, + } +} + +pub fn map_rect(rect: &Rect) -> D3D11_RECT { + D3D11_RECT { + left: rect.x as _, + top: rect.y as _, + right: (rect.x + rect.w) as _, + bottom: (rect.y + rect.h) as _, + } +} + +pub fn map_topology(ia: &InputAssemblerDesc) -> D3D11_PRIMITIVE_TOPOLOGY { + use hal::pso::Primitive::*; + match (ia.primitive, ia.with_adjacency) { + (PointList, false) => D3D_PRIMITIVE_TOPOLOGY_POINTLIST, + (PointList, true) => panic!("Points can't have adjacency info"), + (LineList, false) => D3D_PRIMITIVE_TOPOLOGY_LINELIST, + (LineList, true) => D3D_PRIMITIVE_TOPOLOGY_LINELIST_ADJ, + (LineStrip, false) => D3D_PRIMITIVE_TOPOLOGY_LINESTRIP, + (LineStrip, true) => D3D_PRIMITIVE_TOPOLOGY_LINESTRIP_ADJ, + (TriangleList, false) => D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST, + (TriangleList, true) => D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST_ADJ, + (TriangleStrip, false) => D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP, + (TriangleStrip, true) => D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP_ADJ, + (PatchList(num), false) => { + assert!(num != 0); + D3D_PRIMITIVE_TOPOLOGY_1_CONTROL_POINT_PATCHLIST + (num as u32) - 1 + } + (_, true) => panic!("Patches can't have adjacency info"), + } +} + +fn map_fill_mode(mode: PolygonMode) -> D3D11_FILL_MODE { + match mode { + PolygonMode::Fill => D3D11_FILL_SOLID, + PolygonMode::Line => D3D11_FILL_WIREFRAME, + // TODO: return error + _ => unimplemented!(), + } +} + +fn map_cull_mode(mode: Face) -> D3D11_CULL_MODE { + match mode { + Face::NONE => D3D11_CULL_NONE, + Face::FRONT => D3D11_CULL_FRONT, + Face::BACK => D3D11_CULL_BACK, + _ => panic!("Culling both front and back faces is not supported"), + } +} + +pub(crate) fn map_rasterizer_desc(desc: &Rasterizer) -> D3D11_RASTERIZER_DESC { + let bias = match desc.depth_bias { + //TODO: support dynamic depth bias + Some(State::Static(db)) => db, + Some(_) | None => DepthBias::default(), + }; + if let State::Static(w) = desc.line_width { + super::validate_line_width(w); + } + D3D11_RASTERIZER_DESC { + FillMode: map_fill_mode(desc.polygon_mode), + CullMode: map_cull_mode(desc.cull_face), + FrontCounterClockwise: match desc.front_face { + FrontFace::Clockwise => FALSE, + FrontFace::CounterClockwise => TRUE, + }, + DepthBias: bias.const_factor as INT, + DepthBiasClamp: bias.clamp, + SlopeScaledDepthBias: bias.slope_factor, + DepthClipEnable: !desc.depth_clamping as _, + // TODO: + ScissorEnable: TRUE, + // TODO: msaa + MultisampleEnable: FALSE, + // TODO: line aa? + AntialiasedLineEnable: FALSE, + // TODO: conservative raster in >=11.x + } +} + +fn map_blend_factor(factor: Factor) -> D3D11_BLEND { + match factor { + Factor::Zero => D3D11_BLEND_ZERO, + Factor::One => D3D11_BLEND_ONE, + Factor::SrcColor => D3D11_BLEND_SRC_COLOR, + Factor::OneMinusSrcColor => D3D11_BLEND_INV_SRC_COLOR, + Factor::DstColor => D3D11_BLEND_DEST_COLOR, + Factor::OneMinusDstColor => D3D11_BLEND_INV_DEST_COLOR, + Factor::SrcAlpha => D3D11_BLEND_SRC_ALPHA, + Factor::OneMinusSrcAlpha => D3D11_BLEND_INV_SRC_ALPHA, + Factor::DstAlpha => D3D11_BLEND_DEST_ALPHA, + Factor::OneMinusDstAlpha => D3D11_BLEND_INV_DEST_ALPHA, + Factor::ConstColor | Factor::ConstAlpha => D3D11_BLEND_BLEND_FACTOR, + Factor::OneMinusConstColor | Factor::OneMinusConstAlpha => D3D11_BLEND_INV_BLEND_FACTOR, + Factor::SrcAlphaSaturate => D3D11_BLEND_SRC_ALPHA_SAT, + Factor::Src1Color => D3D11_BLEND_SRC1_COLOR, + Factor::OneMinusSrc1Color => D3D11_BLEND_INV_SRC1_COLOR, + Factor::Src1Alpha => D3D11_BLEND_SRC1_ALPHA, + Factor::OneMinusSrc1Alpha => D3D11_BLEND_INV_SRC1_ALPHA, + } +} + +fn map_alpha_blend_factor(factor: Factor) -> D3D11_BLEND { + match factor { + Factor::Zero => D3D11_BLEND_ZERO, + Factor::One => D3D11_BLEND_ONE, + Factor::SrcColor | Factor::SrcAlpha => D3D11_BLEND_SRC_ALPHA, + Factor::DstColor | Factor::DstAlpha => D3D11_BLEND_DEST_ALPHA, + Factor::OneMinusSrcColor | Factor::OneMinusSrcAlpha => D3D11_BLEND_INV_SRC_ALPHA, + Factor::OneMinusDstColor | Factor::OneMinusDstAlpha => D3D11_BLEND_INV_DEST_ALPHA, + Factor::ConstColor | Factor::ConstAlpha => D3D11_BLEND_BLEND_FACTOR, + Factor::OneMinusConstColor | Factor::OneMinusConstAlpha => D3D11_BLEND_INV_BLEND_FACTOR, + Factor::SrcAlphaSaturate => D3D11_BLEND_SRC_ALPHA_SAT, + Factor::Src1Color | Factor::Src1Alpha => D3D11_BLEND_SRC1_ALPHA, + Factor::OneMinusSrc1Color | Factor::OneMinusSrc1Alpha => D3D11_BLEND_INV_SRC1_ALPHA, + } +} + +fn map_blend_op(operation: BlendOp) -> (D3D11_BLEND_OP, D3D11_BLEND, D3D11_BLEND) { + match operation { + BlendOp::Add { src, dst } => ( + D3D11_BLEND_OP_ADD, + map_blend_factor(src), + map_blend_factor(dst), + ), + BlendOp::Sub { src, dst } => ( + D3D11_BLEND_OP_SUBTRACT, + map_blend_factor(src), + map_blend_factor(dst), + ), + BlendOp::RevSub { src, dst } => ( + D3D11_BLEND_OP_REV_SUBTRACT, + map_blend_factor(src), + map_blend_factor(dst), + ), + BlendOp::Min => (D3D11_BLEND_OP_MIN, D3D11_BLEND_ZERO, D3D11_BLEND_ZERO), + BlendOp::Max => (D3D11_BLEND_OP_MAX, D3D11_BLEND_ZERO, D3D11_BLEND_ZERO), + } +} + +fn map_alpha_blend_op(operation: BlendOp) -> (D3D11_BLEND_OP, D3D11_BLEND, D3D11_BLEND) { + match operation { + BlendOp::Add { src, dst } => ( + D3D11_BLEND_OP_ADD, + map_alpha_blend_factor(src), + map_alpha_blend_factor(dst), + ), + BlendOp::Sub { src, dst } => ( + D3D11_BLEND_OP_SUBTRACT, + map_alpha_blend_factor(src), + map_alpha_blend_factor(dst), + ), + BlendOp::RevSub { src, dst } => ( + D3D11_BLEND_OP_REV_SUBTRACT, + map_alpha_blend_factor(src), + map_alpha_blend_factor(dst), + ), + BlendOp::Min => (D3D11_BLEND_OP_MIN, D3D11_BLEND_ZERO, D3D11_BLEND_ZERO), + BlendOp::Max => (D3D11_BLEND_OP_MAX, D3D11_BLEND_ZERO, D3D11_BLEND_ZERO), + } +} + +fn map_blend_targets( + render_target_blends: &[ColorBlendDesc], +) -> [D3D11_RENDER_TARGET_BLEND_DESC; 8] { + let mut targets: [D3D11_RENDER_TARGET_BLEND_DESC; 8] = [unsafe { mem::zeroed() }; 8]; + + for (mut target, color_desc) in targets.iter_mut().zip(render_target_blends.iter()) { + target.RenderTargetWriteMask = color_desc.mask.bits() as _; + if let Some(ref blend) = color_desc.blend { + let (color_op, color_src, color_dst) = map_blend_op(blend.color); + let (alpha_op, alpha_src, alpha_dst) = map_alpha_blend_op(blend.alpha); + target.BlendEnable = TRUE; + target.BlendOp = color_op; + target.SrcBlend = color_src; + target.DestBlend = color_dst; + target.BlendOpAlpha = alpha_op; + target.SrcBlendAlpha = alpha_src; + target.DestBlendAlpha = alpha_dst; + } + } + + targets +} + +pub(crate) fn map_blend_desc(desc: &BlendDesc) -> D3D11_BLEND_DESC { + D3D11_BLEND_DESC { + // TODO: msaa + AlphaToCoverageEnable: FALSE, + IndependentBlendEnable: TRUE, + RenderTarget: map_blend_targets(&desc.targets), + } +} + +pub fn map_comparison(func: Comparison) -> D3D11_COMPARISON_FUNC { + match func { + Comparison::Never => D3D11_COMPARISON_NEVER, + Comparison::Less => D3D11_COMPARISON_LESS, + Comparison::LessEqual => D3D11_COMPARISON_LESS_EQUAL, + Comparison::Equal => D3D11_COMPARISON_EQUAL, + Comparison::GreaterEqual => D3D11_COMPARISON_GREATER_EQUAL, + Comparison::Greater => D3D11_COMPARISON_GREATER, + Comparison::NotEqual => D3D11_COMPARISON_NOT_EQUAL, + Comparison::Always => D3D11_COMPARISON_ALWAYS, + } +} + +fn map_stencil_op(op: StencilOp) -> D3D11_STENCIL_OP { + match op { + StencilOp::Keep => D3D11_STENCIL_OP_KEEP, + StencilOp::Zero => D3D11_STENCIL_OP_ZERO, + StencilOp::Replace => D3D11_STENCIL_OP_REPLACE, + StencilOp::IncrementClamp => D3D11_STENCIL_OP_INCR_SAT, + StencilOp::IncrementWrap => D3D11_STENCIL_OP_INCR, + StencilOp::DecrementClamp => D3D11_STENCIL_OP_DECR_SAT, + StencilOp::DecrementWrap => D3D11_STENCIL_OP_DECR, + StencilOp::Invert => D3D11_STENCIL_OP_INVERT, + } +} + +fn map_stencil_side(side: &StencilFace) -> D3D11_DEPTH_STENCILOP_DESC { + D3D11_DEPTH_STENCILOP_DESC { + StencilFailOp: map_stencil_op(side.op_fail), + StencilDepthFailOp: map_stencil_op(side.op_depth_fail), + StencilPassOp: map_stencil_op(side.op_pass), + StencilFunc: map_comparison(side.fun), + } +} + +pub(crate) fn map_depth_stencil_desc( + desc: &DepthStencilDesc, +) -> (D3D11_DEPTH_STENCIL_DESC, State) { + let (depth_on, depth_write, depth_func) = match desc.depth { + Some(ref depth) => (TRUE, depth.write, map_comparison(depth.fun)), + None => unsafe { mem::zeroed() }, + }; + + let (stencil_on, front, back, read_mask, write_mask, stencil_ref) = match desc.stencil { + Some(ref stencil) => { + let read_masks = stencil.read_masks.static_or(Sided::new(!0)); + let write_masks = stencil.read_masks.static_or(Sided::new(!0)); + let reference_value = match stencil.reference_values { + State::Static(ref values) => { + if values.front != values.back { + error!("Different reference values for front ({}) and back ({}) of the stencil", + values.front, values.back); + } + State::Static(values.front) + } + State::Dynamic => State::Dynamic, + }; + // TODO: cascade to create_pipeline + if read_masks.front != read_masks.back || write_masks.front != write_masks.back { + error!( + "Different sides are specified for read ({:?} and write ({:?}) stencil masks", + read_masks, write_masks + ); + } + ( + TRUE, + map_stencil_side(&stencil.faces.front), + map_stencil_side(&stencil.faces.back), + read_masks.front, + write_masks.front, + reference_value, + ) + } + None => unsafe { mem::zeroed() }, + }; + + ( + D3D11_DEPTH_STENCIL_DESC { + DepthEnable: depth_on, + DepthWriteMask: if depth_write { + D3D11_DEPTH_WRITE_MASK_ALL + } else { + D3D11_DEPTH_WRITE_MASK_ZERO + }, + DepthFunc: depth_func, + StencilEnable: stencil_on, + StencilReadMask: read_mask as _, + StencilWriteMask: write_mask as _, + FrontFace: front, + BackFace: back, + }, + stencil_ref, + ) +} + +pub fn map_execution_model(model: spirv::ExecutionModel) -> Stage { + match model { + spirv::ExecutionModel::Vertex => Stage::Vertex, + spirv::ExecutionModel::Fragment => Stage::Fragment, + spirv::ExecutionModel::Geometry => Stage::Geometry, + spirv::ExecutionModel::GlCompute => Stage::Compute, + spirv::ExecutionModel::TessellationControl => Stage::Hull, + spirv::ExecutionModel::TessellationEvaluation => Stage::Domain, + spirv::ExecutionModel::Kernel => panic!("Kernel is not a valid execution model."), + } +} + +pub fn map_stage(stage: Stage) -> spirv::ExecutionModel { + match stage { + Stage::Vertex => spirv::ExecutionModel::Vertex, + Stage::Fragment => spirv::ExecutionModel::Fragment, + Stage::Geometry => spirv::ExecutionModel::Geometry, + Stage::Compute => spirv::ExecutionModel::GlCompute, + Stage::Hull => spirv::ExecutionModel::TessellationControl, + Stage::Domain => spirv::ExecutionModel::TessellationEvaluation, + } +} + +pub fn map_wrapping(wrap: WrapMode) -> D3D11_TEXTURE_ADDRESS_MODE { + match wrap { + WrapMode::Tile => D3D11_TEXTURE_ADDRESS_WRAP, + WrapMode::Mirror => D3D11_TEXTURE_ADDRESS_MIRROR, + WrapMode::Clamp => D3D11_TEXTURE_ADDRESS_CLAMP, + WrapMode::Border => D3D11_TEXTURE_ADDRESS_BORDER, + WrapMode::MirrorClamp => D3D11_TEXTURE_ADDRESS_MIRROR_ONCE, + } +} + +fn map_filter_type(filter: Filter) -> D3D11_FILTER_TYPE { + match filter { + Filter::Nearest => D3D11_FILTER_TYPE_POINT, + Filter::Linear => D3D11_FILTER_TYPE_LINEAR, + } +} + +// Hopefully works just as well in d3d11 :) +pub fn map_filter( + mag_filter: Filter, + min_filter: Filter, + mip_filter: Filter, + reduction: D3D11_FILTER_REDUCTION_TYPE, + anisotropy_clamp: Option, +) -> D3D11_FILTER { + let mag = map_filter_type(mag_filter); + let min = map_filter_type(min_filter); + let mip = map_filter_type(mip_filter); + + (min & D3D11_FILTER_TYPE_MASK) << D3D11_MIN_FILTER_SHIFT + | (mag & D3D11_FILTER_TYPE_MASK) << D3D11_MAG_FILTER_SHIFT + | (mip & D3D11_FILTER_TYPE_MASK) << D3D11_MIP_FILTER_SHIFT + | (reduction & D3D11_FILTER_REDUCTION_TYPE_MASK) << D3D11_FILTER_REDUCTION_TYPE_SHIFT + | anisotropy_clamp + .map(|_| D3D11_FILTER_ANISOTROPIC) + .unwrap_or(0) +} diff --git a/third_party/rust/gfx-backend-dx11/src/debug.rs b/third_party/rust/gfx-backend-dx11/src/debug.rs index a9b4c1e3d883..1170524f7f61 100644 --- a/third_party/rust/gfx-backend-dx11/src/debug.rs +++ b/third_party/rust/gfx-backend-dx11/src/debug.rs @@ -1,92 +1,94 @@ -use winapi::um::d3d11; - -use wio::com::ComPtr; -use wio::wide::ToWide; - -use std::ffi::OsStr; -use std::{env, fmt}; - -// TODO: replace with new winapi version when available -#[allow(bad_style, unused)] -mod temp { - use winapi::shared::minwindef::{BOOL, INT}; - use winapi::um::unknwnbase::{IUnknown, IUnknownVtbl}; - use winapi::um::winnt::LPCWSTR; - - RIDL! {#[uuid(0xb2daad8b, 0x03d4, 0x4dbf, 0x95, 0xeb, 0x32, 0xab, 0x4b, 0x63, 0xd0, 0xab)] - interface ID3DUserDefinedAnnotation(ID3DUserDefinedAnnotationVtbl): - IUnknown(IUnknownVtbl) { - fn BeginEvent( - Name: LPCWSTR, - ) -> INT, - fn EndEvent() -> INT, - fn SetMarker( - Name: LPCWSTR, - ) -> (), - fn GetStatus() -> BOOL, - }} -} - -#[must_use] -#[cfg(debug_assertions)] -pub struct DebugScope { - annotation: ComPtr, -} - -#[cfg(debug_assertions)] -impl DebugScope { - pub fn with_name( - context: &ComPtr, - args: fmt::Arguments, - ) -> Option { - let name = format!("{}", args); - - // debugging with visual studio and its ilk *really* doesn't like calling this on a - // deferred context when replaying a capture, compared to renderdoc - if unsafe { context.GetType() } == d3d11::D3D11_DEVICE_CONTEXT_DEFERRED { - // TODO: find a better way to detect either if RD or VS is active debugger - if env::var("GFX_NO_RENDERDOC").is_ok() { - return None; - } - } - - let annotation = context.cast::().unwrap(); - let msg: &OsStr = name.as_ref(); - let msg: Vec = msg.to_wide_null(); - - unsafe { - annotation.BeginEvent(msg.as_ptr() as _); - } - - Some(DebugScope { annotation }) - } -} - -#[cfg(debug_assertions)] -impl Drop for DebugScope { - fn drop(&mut self) { - unsafe { - self.annotation.EndEvent(); - } - } -} - -#[cfg(debug_assertions)] -pub fn debug_marker(context: &ComPtr, args: fmt::Arguments) { - let name = format!("{}", args); - - // same here - if unsafe { context.GetType() } == d3d11::D3D11_DEVICE_CONTEXT_DEFERRED { - if env::var("GFX_NO_RENDERDOC").is_ok() { - return; - } - } - - let annotation = context.cast::().unwrap(); - let msg: &OsStr = name.as_ref(); - let msg: Vec = msg.to_wide_null(); - - unsafe { - annotation.SetMarker(msg.as_ptr() as _); - } -} +use winapi::um::d3d11; + +use wio::{com::ComPtr, wide::ToWide}; + +use std::{env, ffi::OsStr, fmt}; + +// TODO: replace with new winapi version when available +#[allow(bad_style, unused)] +mod temp { + use winapi::{ + shared::minwindef::{BOOL, INT}, + um::{ + unknwnbase::{IUnknown, IUnknownVtbl}, + winnt::LPCWSTR, + }, + }; + + RIDL! {#[uuid(0xb2daad8b, 0x03d4, 0x4dbf, 0x95, 0xeb, 0x32, 0xab, 0x4b, 0x63, 0xd0, 0xab)] + interface ID3DUserDefinedAnnotation(ID3DUserDefinedAnnotationVtbl): + IUnknown(IUnknownVtbl) { + fn BeginEvent( + Name: LPCWSTR, + ) -> INT, + fn EndEvent() -> INT, + fn SetMarker( + Name: LPCWSTR, + ) -> (), + fn GetStatus() -> BOOL, + }} +} + +#[must_use] +#[cfg(debug_assertions)] +pub struct DebugScope { + annotation: ComPtr, +} + +#[cfg(debug_assertions)] +impl DebugScope { + pub fn with_name( + context: &ComPtr, + args: fmt::Arguments, + ) -> Option { + let name = format!("{}", args); + + // debugging with visual studio and its ilk *really* doesn't like calling this on a + // deferred context when replaying a capture, compared to renderdoc + if unsafe { context.GetType() } == d3d11::D3D11_DEVICE_CONTEXT_DEFERRED { + // TODO: find a better way to detect either if RD or VS is active debugger + if env::var("GFX_NO_RENDERDOC").is_ok() { + return None; + } + } + + let annotation = context.cast::().unwrap(); + let msg: &OsStr = name.as_ref(); + let msg: Vec = msg.to_wide_null(); + + unsafe { + annotation.BeginEvent(msg.as_ptr() as _); + } + + Some(DebugScope { annotation }) + } +} + +#[cfg(debug_assertions)] +impl Drop for DebugScope { + fn drop(&mut self) { + unsafe { + self.annotation.EndEvent(); + } + } +} + +#[cfg(debug_assertions)] +pub fn debug_marker(context: &ComPtr, args: fmt::Arguments) { + let name = format!("{}", args); + + // same here + if unsafe { context.GetType() } == d3d11::D3D11_DEVICE_CONTEXT_DEFERRED { + if env::var("GFX_NO_RENDERDOC").is_ok() { + return; + } + } + + let annotation = context.cast::().unwrap(); + let msg: &OsStr = name.as_ref(); + let msg: Vec = msg.to_wide_null(); + + unsafe { + annotation.SetMarker(msg.as_ptr() as _); + } +} diff --git a/third_party/rust/gfx-backend-dx11/src/device.rs b/third_party/rust/gfx-backend-dx11/src/device.rs index 1083e4f482bd..5bf3cef90c58 100644 --- a/third_party/rust/gfx-backend-dx11/src/device.rs +++ b/third_party/rust/gfx-backend-dx11/src/device.rs @@ -1,2338 +1,2363 @@ -use hal::adapter::MemoryProperties; -use hal::pso::VertexInputRate; -use hal::queue::QueueFamilyId; -use hal::range::RangeArg; -use hal::{buffer, device, format, image, memory, pass, pool, pso, query, window}; - -use winapi::shared::dxgi::{ - IDXGIFactory, - IDXGISwapChain, - DXGI_SWAP_CHAIN_DESC, - DXGI_SWAP_EFFECT_DISCARD, -}; -use winapi::shared::minwindef::TRUE; -use winapi::shared::windef::HWND; -use winapi::shared::{dxgiformat, dxgitype, winerror}; -use winapi::um::{d3d11, d3d11sdklayers, d3dcommon}; -use winapi::Interface as _; - -use wio::com::ComPtr; - -use std::borrow::Borrow; -use std::cell::RefCell; -use std::ops::Range; -use std::sync::Arc; -use std::{fmt, mem, ptr}; - -use parking_lot::{Condvar, Mutex}; - -use { - Backend, - Buffer, - BufferView, - CommandBuffer, - CommandPool, - ComputePipeline, - DescriptorContent, - DescriptorIndex, - DescriptorPool, - DescriptorSet, - DescriptorSetInfo, - DescriptorSetLayout, - Fence, - Framebuffer, - GraphicsPipeline, - Image, - ImageView, - InternalBuffer, - InternalImage, - Memory, - MultiStageData, - PipelineLayout, - QueryPool, - RawFence, - RegisterData, - RegisterAccumulator, - RenderPass, - ResourceIndex, - Sampler, - Semaphore, - ShaderModule, - SubpassDesc, - Surface, - Swapchain, - ViewInfo, -}; - -use {conv, internal, shader}; - - -//TODO: expose coherent type 0x2 when it's properly supported -const BUFFER_TYPE_MASK: u64 = 0x1 | 0x4; - -struct InputLayout { - raw: ComPtr, - required_bindings: u32, - max_vertex_bindings: u32, - topology: d3d11::D3D11_PRIMITIVE_TOPOLOGY, - vertex_strides: Vec, -} - -pub struct Device { - raw: ComPtr, - pub(crate) context: ComPtr, - memory_properties: MemoryProperties, - pub(crate) internal: internal::Internal, -} - -impl fmt::Debug for Device { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str("Device") - } -} - -impl Drop for Device { - fn drop(&mut self) { - if let Ok(debug) = self.raw.cast::() { - unsafe { - debug.ReportLiveDeviceObjects(d3d11sdklayers::D3D11_RLDO_DETAIL); - } - } - } -} - -unsafe impl Send for Device {} -unsafe impl Sync for Device {} - -impl Device { - pub fn as_raw(&self) -> *mut d3d11::ID3D11Device { - self.raw.as_raw() - } - - pub fn new( - device: ComPtr, - context: ComPtr, - memory_properties: MemoryProperties, - ) -> Self { - Device { - raw: device.clone(), - context, - memory_properties, - internal: internal::Internal::new(&device), - } - } - - fn create_rasterizer_state( - &self, - rasterizer_desc: &pso::Rasterizer, - ) -> Result, pso::CreationError> { - let mut rasterizer = ptr::null_mut(); - let desc = conv::map_rasterizer_desc(rasterizer_desc); - - let hr = unsafe { - self.raw - .CreateRasterizerState(&desc, &mut rasterizer as *mut *mut _ as *mut *mut _) - }; - - if winerror::SUCCEEDED(hr) { - Ok(unsafe { ComPtr::from_raw(rasterizer) }) - } else { - Err(pso::CreationError::Other) - } - } - - fn create_blend_state( - &self, - blend_desc: &pso::BlendDesc, - ) -> Result, pso::CreationError> { - let mut blend = ptr::null_mut(); - let desc = conv::map_blend_desc(blend_desc); - - let hr = unsafe { - self.raw - .CreateBlendState(&desc, &mut blend as *mut *mut _ as *mut *mut _) - }; - - if winerror::SUCCEEDED(hr) { - Ok(unsafe { ComPtr::from_raw(blend) }) - } else { - Err(pso::CreationError::Other) - } - } - - fn create_depth_stencil_state( - &self, - depth_desc: &pso::DepthStencilDesc, - ) -> Result< - ( - ComPtr, - pso::State, - ), - pso::CreationError, - > { - let mut depth = ptr::null_mut(); - let (desc, stencil_ref) = conv::map_depth_stencil_desc(depth_desc); - - let hr = unsafe { - self.raw - .CreateDepthStencilState(&desc, &mut depth as *mut *mut _ as *mut *mut _) - }; - - if winerror::SUCCEEDED(hr) { - Ok((unsafe { ComPtr::from_raw(depth) }, stencil_ref)) - } else { - Err(pso::CreationError::Other) - } - } - - fn create_input_layout( - &self, - vs: ComPtr, - vertex_buffers: &[pso::VertexBufferDesc], - attributes: &[pso::AttributeDesc], - input_assembler: &pso::InputAssemblerDesc, - ) -> Result { - let mut layout = ptr::null_mut(); - - let mut vertex_strides = Vec::new(); - let mut required_bindings = 0u32; - let mut max_vertex_bindings = 0u32; - for buffer in vertex_buffers { - required_bindings |= 1 << buffer.binding as u32; - max_vertex_bindings = max_vertex_bindings.max(1u32 + buffer.binding as u32); - - while vertex_strides.len() <= buffer.binding as usize { - vertex_strides.push(0); - } - - vertex_strides[buffer.binding as usize] = buffer.stride; - } - - let input_elements = attributes - .iter() - .filter_map(|attrib| { - let buffer_desc = match vertex_buffers - .iter() - .find(|buffer_desc| buffer_desc.binding == attrib.binding) - { - Some(buffer_desc) => buffer_desc, - None => { - // TODO: - // L - // error!("Couldn't find associated vertex buffer description {:?}", attrib.binding); - return Some(Err(pso::CreationError::Other)); - } - }; - - let (slot_class, step_rate) = match buffer_desc.rate { - VertexInputRate::Vertex => (d3d11::D3D11_INPUT_PER_VERTEX_DATA, 0), - VertexInputRate::Instance(divisor) => { - (d3d11::D3D11_INPUT_PER_INSTANCE_DATA, divisor) - } - }; - let format = attrib.element.format; - - Some(Ok(d3d11::D3D11_INPUT_ELEMENT_DESC { - SemanticName: "TEXCOORD\0".as_ptr() as *const _, // Semantic name used by SPIRV-Cross - SemanticIndex: attrib.location, - Format: match conv::map_format(format) { - Some(fm) => fm, - None => { - // TODO: - // error!("Unable to find DXGI format for {:?}", format); - return Some(Err(pso::CreationError::Other)); - } - }, - InputSlot: attrib.binding as _, - AlignedByteOffset: attrib.element.offset, - InputSlotClass: slot_class, - InstanceDataStepRate: step_rate as _, - })) - }) - .collect::, _>>()?; - - let hr = unsafe { - self.raw.CreateInputLayout( - input_elements.as_ptr(), - input_elements.len() as _, - vs.GetBufferPointer(), - vs.GetBufferSize(), - &mut layout as *mut *mut _ as *mut *mut _, - ) - }; - - if winerror::SUCCEEDED(hr) { - let topology = conv::map_topology(input_assembler); - - Ok(InputLayout { - raw: unsafe { ComPtr::from_raw(layout) }, - required_bindings, - max_vertex_bindings, - topology, - vertex_strides, - }) - } else { - Err(pso::CreationError::Other) - } - } - - fn create_vertex_shader( - &self, - blob: ComPtr, - ) -> Result, pso::CreationError> { - let mut vs = ptr::null_mut(); - - let hr = unsafe { - self.raw.CreateVertexShader( - blob.GetBufferPointer(), - blob.GetBufferSize(), - ptr::null_mut(), - &mut vs as *mut *mut _ as *mut *mut _, - ) - }; - - if winerror::SUCCEEDED(hr) { - Ok(unsafe { ComPtr::from_raw(vs) }) - } else { - Err(pso::CreationError::Other) - } - } - - fn create_pixel_shader( - &self, - blob: ComPtr, - ) -> Result, pso::CreationError> { - let mut ps = ptr::null_mut(); - - let hr = unsafe { - self.raw.CreatePixelShader( - blob.GetBufferPointer(), - blob.GetBufferSize(), - ptr::null_mut(), - &mut ps as *mut *mut _ as *mut *mut _, - ) - }; - - if winerror::SUCCEEDED(hr) { - Ok(unsafe { ComPtr::from_raw(ps) }) - } else { - Err(pso::CreationError::Other) - } - } - - fn create_geometry_shader( - &self, - blob: ComPtr, - ) -> Result, pso::CreationError> { - let mut gs = ptr::null_mut(); - - let hr = unsafe { - self.raw.CreateGeometryShader( - blob.GetBufferPointer(), - blob.GetBufferSize(), - ptr::null_mut(), - &mut gs as *mut *mut _ as *mut *mut _, - ) - }; - - if winerror::SUCCEEDED(hr) { - Ok(unsafe { ComPtr::from_raw(gs) }) - } else { - Err(pso::CreationError::Other) - } - } - - fn create_hull_shader( - &self, - blob: ComPtr, - ) -> Result, pso::CreationError> { - let mut hs = ptr::null_mut(); - - let hr = unsafe { - self.raw.CreateHullShader( - blob.GetBufferPointer(), - blob.GetBufferSize(), - ptr::null_mut(), - &mut hs as *mut *mut _ as *mut *mut _, - ) - }; - - if winerror::SUCCEEDED(hr) { - Ok(unsafe { ComPtr::from_raw(hs) }) - } else { - Err(pso::CreationError::Other) - } - } - - fn create_domain_shader( - &self, - blob: ComPtr, - ) -> Result, pso::CreationError> { - let mut ds = ptr::null_mut(); - - let hr = unsafe { - self.raw.CreateDomainShader( - blob.GetBufferPointer(), - blob.GetBufferSize(), - ptr::null_mut(), - &mut ds as *mut *mut _ as *mut *mut _, - ) - }; - - if winerror::SUCCEEDED(hr) { - Ok(unsafe { ComPtr::from_raw(ds) }) - } else { - Err(pso::CreationError::Other) - } - } - - fn create_compute_shader( - &self, - blob: ComPtr, - ) -> Result, pso::CreationError> { - let mut cs = ptr::null_mut(); - - let hr = unsafe { - self.raw.CreateComputeShader( - blob.GetBufferPointer(), - blob.GetBufferSize(), - ptr::null_mut(), - &mut cs as *mut *mut _ as *mut *mut _, - ) - }; - - if winerror::SUCCEEDED(hr) { - Ok(unsafe { ComPtr::from_raw(cs) }) - } else { - Err(pso::CreationError::Other) - } - } - - // TODO: fix return type.. - fn extract_entry_point( - stage: pso::Stage, - source: &pso::EntryPoint, - layout: &PipelineLayout, - ) -> Result>, device::ShaderError> { - // TODO: entrypoint stuff - match *source.module { - ShaderModule::Dxbc(ref _shader) => { - unimplemented!() - - // Ok(Some(shader)) - } - ShaderModule::Spirv(ref raw_data) => Ok(shader::compile_spirv_entrypoint( - raw_data, stage, source, layout, - )?), - } - } - - fn view_image_as_shader_resource( - &self, - info: &ViewInfo, - ) -> Result, image::ViewError> { - let mut desc: d3d11::D3D11_SHADER_RESOURCE_VIEW_DESC = unsafe { mem::zeroed() }; - desc.Format = info.format; - if desc.Format == dxgiformat::DXGI_FORMAT_D32_FLOAT_S8X24_UINT { - desc.Format = dxgiformat::DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS; - } - - #[allow(non_snake_case)] - let MostDetailedMip = info.range.levels.start as _; - #[allow(non_snake_case)] - let MipLevels = (info.range.levels.end - info.range.levels.start) as _; - #[allow(non_snake_case)] - let FirstArraySlice = info.range.layers.start as _; - #[allow(non_snake_case)] - let ArraySize = (info.range.layers.end - info.range.layers.start) as _; - - match info.view_kind { - image::ViewKind::D1 => { - desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURE1D; - *unsafe { desc.u.Texture1D_mut() } = d3d11::D3D11_TEX1D_SRV { - MostDetailedMip, - MipLevels, - } - } - image::ViewKind::D1Array => { - desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURE1DARRAY; - *unsafe { desc.u.Texture1DArray_mut() } = d3d11::D3D11_TEX1D_ARRAY_SRV { - MostDetailedMip, - MipLevels, - FirstArraySlice, - ArraySize, - } - } - image::ViewKind::D2 => { - desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURE2D; - *unsafe { desc.u.Texture2D_mut() } = d3d11::D3D11_TEX2D_SRV { - MostDetailedMip, - MipLevels, - } - } - image::ViewKind::D2Array => { - desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURE2DARRAY; - *unsafe { desc.u.Texture2DArray_mut() } = d3d11::D3D11_TEX2D_ARRAY_SRV { - MostDetailedMip, - MipLevels, - FirstArraySlice, - ArraySize, - } - } - image::ViewKind::D3 => { - desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURE3D; - *unsafe { desc.u.Texture3D_mut() } = d3d11::D3D11_TEX3D_SRV { - MostDetailedMip, - MipLevels, - } - } - image::ViewKind::Cube => { - desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURECUBE; - *unsafe { desc.u.TextureCube_mut() } = d3d11::D3D11_TEXCUBE_SRV { - MostDetailedMip, - MipLevels, - } - } - image::ViewKind::CubeArray => { - desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURECUBEARRAY; - *unsafe { desc.u.TextureCubeArray_mut() } = d3d11::D3D11_TEXCUBE_ARRAY_SRV { - MostDetailedMip, - MipLevels, - First2DArrayFace: FirstArraySlice, - NumCubes: ArraySize / 6, - } - } - } - - let mut srv = ptr::null_mut(); - let hr = unsafe { - self.raw.CreateShaderResourceView( - info.resource, - &desc, - &mut srv as *mut *mut _ as *mut *mut _, - ) - }; - - if winerror::SUCCEEDED(hr) { - Ok(unsafe { ComPtr::from_raw(srv) }) - } else { - Err(image::ViewError::Unsupported) - } - } - - fn view_image_as_unordered_access( - &self, - info: &ViewInfo, - ) -> Result, image::ViewError> { - let mut desc: d3d11::D3D11_UNORDERED_ACCESS_VIEW_DESC = unsafe { mem::zeroed() }; - desc.Format = info.format; - - #[allow(non_snake_case)] - let MipSlice = info.range.levels.start as _; - #[allow(non_snake_case)] - let FirstArraySlice = info.range.layers.start as _; - #[allow(non_snake_case)] - let ArraySize = (info.range.layers.end - info.range.layers.start) as _; - - match info.view_kind { - image::ViewKind::D1 => { - desc.ViewDimension = d3d11::D3D11_UAV_DIMENSION_TEXTURE1D; - *unsafe { desc.u.Texture1D_mut() } = d3d11::D3D11_TEX1D_UAV { - MipSlice: info.range.levels.start as _, - } - } - image::ViewKind::D1Array => { - desc.ViewDimension = d3d11::D3D11_UAV_DIMENSION_TEXTURE1DARRAY; - *unsafe { desc.u.Texture1DArray_mut() } = d3d11::D3D11_TEX1D_ARRAY_UAV { - MipSlice, - FirstArraySlice, - ArraySize, - } - } - image::ViewKind::D2 => { - desc.ViewDimension = d3d11::D3D11_UAV_DIMENSION_TEXTURE2D; - *unsafe { desc.u.Texture2D_mut() } = d3d11::D3D11_TEX2D_UAV { - MipSlice: info.range.levels.start as _, - } - } - image::ViewKind::D2Array => { - desc.ViewDimension = d3d11::D3D11_UAV_DIMENSION_TEXTURE2DARRAY; - *unsafe { desc.u.Texture2DArray_mut() } = d3d11::D3D11_TEX2D_ARRAY_UAV { - MipSlice, - FirstArraySlice, - ArraySize, - } - } - image::ViewKind::D3 => { - desc.ViewDimension = d3d11::D3D11_UAV_DIMENSION_TEXTURE3D; - *unsafe { desc.u.Texture3D_mut() } = d3d11::D3D11_TEX3D_UAV { - MipSlice, - FirstWSlice: FirstArraySlice, - WSize: ArraySize, - } - } - _ => unimplemented!(), - } - - let mut uav = ptr::null_mut(); - let hr = unsafe { - self.raw.CreateUnorderedAccessView( - info.resource, - &desc, - &mut uav as *mut *mut _ as *mut *mut _, - ) - }; - - if winerror::SUCCEEDED(hr) { - Ok(unsafe { ComPtr::from_raw(uav) }) - } else { - Err(image::ViewError::Unsupported) - } - } - - pub(crate) fn view_image_as_render_target( - &self, - info: &ViewInfo, - ) -> Result, image::ViewError> { - let mut desc: d3d11::D3D11_RENDER_TARGET_VIEW_DESC = unsafe { mem::zeroed() }; - desc.Format = info.format; - - #[allow(non_snake_case)] - let MipSlice = info.range.levels.start as _; - #[allow(non_snake_case)] - let FirstArraySlice = info.range.layers.start as _; - #[allow(non_snake_case)] - let ArraySize = (info.range.layers.end - info.range.layers.start) as _; - - match info.view_kind { - image::ViewKind::D1 => { - desc.ViewDimension = d3d11::D3D11_RTV_DIMENSION_TEXTURE1D; - *unsafe { desc.u.Texture1D_mut() } = d3d11::D3D11_TEX1D_RTV { MipSlice } - } - image::ViewKind::D1Array => { - desc.ViewDimension = d3d11::D3D11_RTV_DIMENSION_TEXTURE1DARRAY; - *unsafe { desc.u.Texture1DArray_mut() } = d3d11::D3D11_TEX1D_ARRAY_RTV { - MipSlice, - FirstArraySlice, - ArraySize, - } - } - image::ViewKind::D2 => { - desc.ViewDimension = d3d11::D3D11_RTV_DIMENSION_TEXTURE2D; - *unsafe { desc.u.Texture2D_mut() } = d3d11::D3D11_TEX2D_RTV { MipSlice } - } - image::ViewKind::D2Array => { - desc.ViewDimension = d3d11::D3D11_RTV_DIMENSION_TEXTURE2DARRAY; - *unsafe { desc.u.Texture2DArray_mut() } = d3d11::D3D11_TEX2D_ARRAY_RTV { - MipSlice, - FirstArraySlice, - ArraySize, - } - } - image::ViewKind::D3 => { - desc.ViewDimension = d3d11::D3D11_RTV_DIMENSION_TEXTURE3D; - *unsafe { desc.u.Texture3D_mut() } = d3d11::D3D11_TEX3D_RTV { - MipSlice, - FirstWSlice: FirstArraySlice, - WSize: ArraySize, - } - } - _ => unimplemented!(), - } - - let mut rtv = ptr::null_mut(); - let hr = unsafe { - self.raw.CreateRenderTargetView( - info.resource, - &desc, - &mut rtv as *mut *mut _ as *mut *mut _, - ) - }; - - if winerror::SUCCEEDED(hr) { - Ok(unsafe { ComPtr::from_raw(rtv) }) - } else { - Err(image::ViewError::Unsupported) - } - } - - fn view_image_as_depth_stencil( - &self, - info: &ViewInfo, - ) -> Result, image::ViewError> { - #![allow(non_snake_case)] - - let MipSlice = info.range.levels.start as _; - let FirstArraySlice = info.range.layers.start as _; - let ArraySize = (info.range.layers.end - info.range.layers.start) as _; - assert_eq!(info.range.levels.start + 1, info.range.levels.end); - assert!(info.range.layers.end <= info.kind.num_layers()); - - let mut desc: d3d11::D3D11_DEPTH_STENCIL_VIEW_DESC = unsafe { mem::zeroed() }; - desc.Format = info.format; - - match info.view_kind { - image::ViewKind::D2 => { - desc.ViewDimension = d3d11::D3D11_DSV_DIMENSION_TEXTURE2D; - *unsafe { desc.u.Texture2D_mut() } = d3d11::D3D11_TEX2D_DSV { MipSlice } - } - image::ViewKind::D2Array => { - desc.ViewDimension = d3d11::D3D11_DSV_DIMENSION_TEXTURE2DARRAY; - *unsafe { desc.u.Texture2DArray_mut() } = d3d11::D3D11_TEX2D_ARRAY_DSV { - MipSlice, - FirstArraySlice, - ArraySize, - } - } - _ => unimplemented!(), - } - - let mut dsv = ptr::null_mut(); - let hr = unsafe { - self.raw.CreateDepthStencilView( - info.resource, - &desc, - &mut dsv as *mut *mut _ as *mut *mut _, - ) - }; - - if winerror::SUCCEEDED(hr) { - Ok(unsafe { ComPtr::from_raw(dsv) }) - } else { - Err(image::ViewError::Unsupported) - } - } - - pub(crate) fn create_swapchain_impl( - &self, - config: &window::SwapchainConfig, - window_handle: HWND, - factory: ComPtr, - ) -> Result<(ComPtr, dxgiformat::DXGI_FORMAT), window::CreationError> { - // TODO: use IDXGIFactory2 for >=11.1 - // TODO: this function should be able to fail (Result)? - - debug!("{:#?}", config); - let non_srgb_format = conv::map_format_nosrgb(config.format).unwrap(); - - let mut desc = DXGI_SWAP_CHAIN_DESC { - BufferDesc: dxgitype::DXGI_MODE_DESC { - Width: config.extent.width, - Height: config.extent.height, - // TODO: should this grab max value of all monitor hz? vsync - // will clamp to current monitor anyways? - RefreshRate: dxgitype::DXGI_RATIONAL { - Numerator: 1, - Denominator: 60, - }, - Format: non_srgb_format, - ScanlineOrdering: dxgitype::DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED, - Scaling: dxgitype::DXGI_MODE_SCALING_UNSPECIFIED, - }, - // TODO: msaa on backbuffer? - SampleDesc: dxgitype::DXGI_SAMPLE_DESC { - Count: 1, - Quality: 0, - }, - BufferUsage: dxgitype::DXGI_USAGE_RENDER_TARGET_OUTPUT - | dxgitype::DXGI_USAGE_SHADER_INPUT, - BufferCount: config.image_count, - OutputWindow: window_handle, - // TODO: - Windowed: TRUE, - // TODO: - SwapEffect: DXGI_SWAP_EFFECT_DISCARD, - Flags: 0, - }; - - let dxgi_swapchain = { - let mut swapchain: *mut IDXGISwapChain = ptr::null_mut(); - let hr = unsafe { - factory.CreateSwapChain( - self.raw.as_raw() as *mut _, - &mut desc as *mut _, - &mut swapchain as *mut *mut _ as *mut *mut _, - ) - }; - assert_eq!(hr, winerror::S_OK); - - unsafe { ComPtr::from_raw(swapchain) } - }; - Ok((dxgi_swapchain, non_srgb_format)) - } -} - -impl device::Device for Device { - unsafe fn allocate_memory( - &self, - mem_type: hal::MemoryTypeId, - size: u64, - ) -> Result { - let vec = Vec::with_capacity(size as usize); - Ok(Memory { - properties: self.memory_properties.memory_types[mem_type.0].properties, - size, - mapped_ptr: vec.as_ptr() as *mut _, - host_visible: Some(RefCell::new(vec)), - local_buffers: RefCell::new(Vec::new()), - _local_images: RefCell::new(Vec::new()), - }) - } - - unsafe fn create_command_pool( - &self, - _family: QueueFamilyId, - _create_flags: pool::CommandPoolCreateFlags, - ) -> Result { - // TODO: - Ok(CommandPool { - device: self.raw.clone(), - internal: self.internal.clone(), - }) - } - - unsafe fn destroy_command_pool(&self, _pool: CommandPool) { - // automatic - } - - unsafe fn create_render_pass<'a, IA, IS, ID>( - &self, - attachments: IA, - subpasses: IS, - _dependencies: ID, - ) -> Result - where - IA: IntoIterator, - IA::Item: Borrow, - IS: IntoIterator, - IS::Item: Borrow>, - ID: IntoIterator, - ID::Item: Borrow, - { - Ok(RenderPass { - attachments: attachments - .into_iter() - .map(|attachment| attachment.borrow().clone()) - .collect(), - subpasses: subpasses - .into_iter() - .map(|desc| { - let desc = desc.borrow(); - SubpassDesc { - color_attachments: desc - .colors - .iter() - .map(|color| color.borrow().clone()) - .collect(), - depth_stencil_attachment: desc.depth_stencil.map(|d| *d), - input_attachments: desc - .inputs - .iter() - .map(|input| input.borrow().clone()) - .collect(), - resolve_attachments: desc - .resolves - .iter() - .map(|resolve| resolve.borrow().clone()) - .collect(), - } - }) - .collect(), - }) - } - - unsafe fn create_pipeline_layout( - &self, - set_layouts: IS, - _push_constant_ranges: IR, - ) -> Result - where - IS: IntoIterator, - IS::Item: Borrow, - IR: IntoIterator, - IR::Item: Borrow<(pso::ShaderStageFlags, Range)>, - { - let mut res_offsets = MultiStageData::>::default(); - let mut sets = Vec::new(); - for set_layout in set_layouts { - let layout = set_layout.borrow(); - sets.push(DescriptorSetInfo { - bindings: Arc::clone(&layout.bindings), - registers: res_offsets.advance(&layout.pool_mapping), - }); - }; - - //TODO: assert that res_offsets are within supported range - - Ok(PipelineLayout { - sets, - }) - } - - unsafe fn create_pipeline_cache( - &self, - _data: Option<&[u8]>, - ) -> Result<(), device::OutOfMemory> { - Ok(()) - } - - unsafe fn get_pipeline_cache_data(&self, _cache: &()) -> Result, device::OutOfMemory> { - //empty - Ok(Vec::new()) - } - - unsafe fn destroy_pipeline_cache(&self, _: ()) { - //empty - } - - unsafe fn merge_pipeline_caches(&self, _: &(), _: I) -> Result<(), device::OutOfMemory> - where - I: IntoIterator, - I::Item: Borrow<()>, - { - //empty - Ok(()) - } - - unsafe fn create_graphics_pipeline<'a>( - &self, - desc: &pso::GraphicsPipelineDesc<'a, Backend>, - _cache: Option<&()>, - ) -> Result { - let build_shader = |stage: pso::Stage, source: Option<&pso::EntryPoint<'a, Backend>>| { - let source = match source { - Some(src) => src, - None => return Ok(None), - }; - - Self::extract_entry_point(stage, source, desc.layout) - .map_err(|err| pso::CreationError::Shader(err)) - }; - - let vs = build_shader(pso::Stage::Vertex, Some(&desc.shaders.vertex))?.unwrap(); - let ps = build_shader(pso::Stage::Fragment, desc.shaders.fragment.as_ref())?; - let gs = build_shader(pso::Stage::Geometry, desc.shaders.geometry.as_ref())?; - let ds = build_shader(pso::Stage::Domain, desc.shaders.domain.as_ref())?; - let hs = build_shader(pso::Stage::Hull, desc.shaders.hull.as_ref())?; - - let layout = self.create_input_layout( - vs.clone(), - &desc.vertex_buffers, - &desc.attributes, - &desc.input_assembler, - )?; - let rasterizer_state = self.create_rasterizer_state(&desc.rasterizer)?; - let blend_state = self.create_blend_state(&desc.blender)?; - let depth_stencil_state = Some(self.create_depth_stencil_state(&desc.depth_stencil)?); - - let vs = self.create_vertex_shader(vs)?; - let ps = if let Some(blob) = ps { - Some(self.create_pixel_shader(blob)?) - } else { - None - }; - let gs = if let Some(blob) = gs { - Some(self.create_geometry_shader(blob)?) - } else { - None - }; - let ds = if let Some(blob) = ds { - Some(self.create_domain_shader(blob)?) - } else { - None - }; - let hs = if let Some(blob) = hs { - Some(self.create_hull_shader(blob)?) - } else { - None - }; - - Ok(GraphicsPipeline { - vs, - gs, - ds, - hs, - ps, - topology: layout.topology, - input_layout: layout.raw, - rasterizer_state, - blend_state, - depth_stencil_state, - baked_states: desc.baked_states.clone(), - required_bindings: layout.required_bindings, - max_vertex_bindings: layout.max_vertex_bindings, - strides: layout.vertex_strides, - }) - } - - unsafe fn create_compute_pipeline<'a>( - &self, - desc: &pso::ComputePipelineDesc<'a, Backend>, - _cache: Option<&()>, - ) -> Result { - let build_shader = |stage: pso::Stage, source: Option<&pso::EntryPoint<'a, Backend>>| { - let source = match source { - Some(src) => src, - None => return Ok(None), - }; - - Self::extract_entry_point(stage, source, desc.layout) - .map_err(|err| pso::CreationError::Shader(err)) - }; - - let cs = build_shader(pso::Stage::Compute, Some(&desc.shader))?.unwrap(); - let cs = self.create_compute_shader(cs)?; - - Ok(ComputePipeline { cs }) - } - - unsafe fn create_framebuffer( - &self, - _renderpass: &RenderPass, - attachments: I, - extent: image::Extent, - ) -> Result - where - I: IntoIterator, - I::Item: Borrow, - { - Ok(Framebuffer { - attachments: attachments - .into_iter() - .map(|att| att.borrow().clone()) - .collect(), - layers: extent.depth as _, - }) - } - - unsafe fn create_shader_module( - &self, - raw_data: &[u32], - ) -> Result { - Ok(ShaderModule::Spirv(raw_data.into())) - } - - unsafe fn create_buffer( - &self, - size: u64, - usage: buffer::Usage, - ) -> Result { - use buffer::Usage; - - let mut bind = 0; - - if usage.contains(Usage::UNIFORM) { - bind |= d3d11::D3D11_BIND_CONSTANT_BUFFER; - } - if usage.contains(Usage::VERTEX) { - bind |= d3d11::D3D11_BIND_VERTEX_BUFFER; - } - if usage.contains(Usage::INDEX) { - bind |= d3d11::D3D11_BIND_INDEX_BUFFER; - } - - // TODO: >=11.1 - if usage.intersects(Usage::UNIFORM_TEXEL | Usage::STORAGE_TEXEL | Usage::TRANSFER_SRC) { - bind |= d3d11::D3D11_BIND_SHADER_RESOURCE; - } - - if usage.intersects(Usage::TRANSFER_DST | Usage::STORAGE) { - bind |= d3d11::D3D11_BIND_UNORDERED_ACCESS; - } - - // if `D3D11_BIND_CONSTANT_BUFFER` intersects with any other bind flag, we need to handle - // it by creating two buffers. one with `D3D11_BIND_CONSTANT_BUFFER` and one with the rest - let needs_disjoint_cb = bind & d3d11::D3D11_BIND_CONSTANT_BUFFER != 0 - && bind != d3d11::D3D11_BIND_CONSTANT_BUFFER; - - if needs_disjoint_cb { - bind ^= d3d11::D3D11_BIND_CONSTANT_BUFFER; - } - - fn up_align(x: u64, alignment: u64) -> u64 { - (x + alignment - 1) & !(alignment - 1) - } - - // constant buffer size need to be divisible by 16 - let size = if usage.contains(Usage::UNIFORM) { - up_align(size, 16) - } else { - up_align(size, 4) - }; - - Ok(Buffer { - internal: InternalBuffer { - raw: ptr::null_mut(), - disjoint_cb: if needs_disjoint_cb { - Some(ptr::null_mut()) - } else { - None - }, - srv: None, - uav: None, - usage, - }, - properties: memory::Properties::empty(), - bound_range: 0 .. 0, - host_ptr: ptr::null_mut(), - bind, - requirements: memory::Requirements { - size, - alignment: 1, - type_mask: BUFFER_TYPE_MASK, - }, - }) - } - - unsafe fn get_buffer_requirements(&self, buffer: &Buffer) -> memory::Requirements { - buffer.requirements - } - - unsafe fn bind_buffer_memory( - &self, - memory: &Memory, - offset: u64, - buffer: &mut Buffer, - ) -> Result<(), device::BindError> { - debug!( - "usage={:?}, props={:b}", - buffer.internal.usage, memory.properties - ); - - #[allow(non_snake_case)] - let MiscFlags = if buffer.bind - & (d3d11::D3D11_BIND_SHADER_RESOURCE | d3d11::D3D11_BIND_UNORDERED_ACCESS) - != 0 - { - d3d11::D3D11_RESOURCE_MISC_BUFFER_ALLOW_RAW_VIEWS - } else { - 0 - }; - - let initial_data = memory - .host_visible - .as_ref() - .map(|p| d3d11::D3D11_SUBRESOURCE_DATA { - pSysMem: p.borrow().as_ptr().offset(offset as isize) as _, - SysMemPitch: 0, - SysMemSlicePitch: 0, - }); - - let raw = if memory.properties.contains(memory::Properties::DEVICE_LOCAL) { - // device local memory - let desc = d3d11::D3D11_BUFFER_DESC { - ByteWidth: buffer.requirements.size as _, - Usage: d3d11::D3D11_USAGE_DEFAULT, - BindFlags: buffer.bind, - CPUAccessFlags: 0, - MiscFlags, - StructureByteStride: if buffer - .internal - .usage - .contains(buffer::Usage::TRANSFER_SRC) - { - 4 - } else { - 0 - }, - }; - - let mut buffer: *mut d3d11::ID3D11Buffer = ptr::null_mut(); - let hr = self.raw.CreateBuffer( - &desc, - if let Some(data) = initial_data { - &data - } else { - ptr::null_mut() - }, - &mut buffer as *mut *mut _ as *mut *mut _, - ); - - if !winerror::SUCCEEDED(hr) { - return Err(device::BindError::WrongMemory); - } - - ComPtr::from_raw(buffer) - } else { - let desc = d3d11::D3D11_BUFFER_DESC { - ByteWidth: buffer.requirements.size as _, - // TODO: dynamic? - Usage: d3d11::D3D11_USAGE_DEFAULT, - BindFlags: buffer.bind, - CPUAccessFlags: 0, - MiscFlags, - StructureByteStride: if buffer - .internal - .usage - .contains(buffer::Usage::TRANSFER_SRC) - { - 4 - } else { - 0 - }, - }; - - let mut buffer: *mut d3d11::ID3D11Buffer = ptr::null_mut(); - let hr = self.raw.CreateBuffer( - &desc, - if let Some(data) = initial_data { - &data - } else { - ptr::null_mut() - }, - &mut buffer as *mut *mut _ as *mut *mut _, - ); - - if !winerror::SUCCEEDED(hr) { - return Err(device::BindError::WrongMemory); - } - - ComPtr::from_raw(buffer) - }; - - let disjoint_cb = if buffer.internal.disjoint_cb.is_some() { - let desc = d3d11::D3D11_BUFFER_DESC { - ByteWidth: buffer.requirements.size as _, - Usage: d3d11::D3D11_USAGE_DEFAULT, - BindFlags: d3d11::D3D11_BIND_CONSTANT_BUFFER, - CPUAccessFlags: 0, - MiscFlags: 0, - StructureByteStride: 0, - }; - - let mut buffer: *mut d3d11::ID3D11Buffer = ptr::null_mut(); - let hr = self.raw.CreateBuffer( - &desc, - if let Some(data) = initial_data { - &data - } else { - ptr::null_mut() - }, - &mut buffer as *mut *mut _ as *mut *mut _, - ); - - if !winerror::SUCCEEDED(hr) { - return Err(device::BindError::WrongMemory); - } - - Some(buffer) - } else { - None - }; - - let srv = if buffer.bind & d3d11::D3D11_BIND_SHADER_RESOURCE != 0 { - let mut desc = mem::zeroed::(); - desc.Format = dxgiformat::DXGI_FORMAT_R32_TYPELESS; - desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_BUFFEREX; - *desc.u.BufferEx_mut() = d3d11::D3D11_BUFFEREX_SRV { - FirstElement: 0, - // TODO: enforce alignment through HAL limits - NumElements: buffer.requirements.size as u32 / 4, - Flags: d3d11::D3D11_BUFFEREX_SRV_FLAG_RAW, - }; - - let mut srv = ptr::null_mut(); - let hr = self.raw.CreateShaderResourceView( - raw.as_raw() as *mut _, - &desc, - &mut srv as *mut *mut _ as *mut *mut _, - ); - - if !winerror::SUCCEEDED(hr) { - error!("CreateShaderResourceView failed: 0x{:x}", hr); - - return Err(device::BindError::WrongMemory); - } - - Some(srv) - } else { - None - }; - - let uav = if buffer.bind & d3d11::D3D11_BIND_UNORDERED_ACCESS != 0 { - let mut desc = mem::zeroed::(); - desc.Format = dxgiformat::DXGI_FORMAT_R32_TYPELESS; - desc.ViewDimension = d3d11::D3D11_UAV_DIMENSION_BUFFER; - *desc.u.Buffer_mut() = d3d11::D3D11_BUFFER_UAV { - FirstElement: 0, - NumElements: buffer.requirements.size as u32 / 4, - Flags: d3d11::D3D11_BUFFER_UAV_FLAG_RAW, - }; - - let mut uav = ptr::null_mut(); - let hr = self.raw.CreateUnorderedAccessView( - raw.as_raw() as *mut _, - &desc, - &mut uav as *mut *mut _ as *mut *mut _, - ); - - if !winerror::SUCCEEDED(hr) { - error!("CreateUnorderedAccessView failed: 0x{:x}", hr); - - return Err(device::BindError::WrongMemory); - } - - Some(uav) - } else { - None - }; - - let internal = InternalBuffer { - raw: raw.into_raw(), - disjoint_cb, - srv, - uav, - usage: buffer.internal.usage, - }; - let range = offset .. offset + buffer.requirements.size; - - memory.bind_buffer(range.clone(), internal.clone()); - - let host_ptr = if let Some(vec) = &memory.host_visible { - vec.borrow().as_ptr() as *mut _ - } else { - ptr::null_mut() - }; - - buffer.internal = internal; - buffer.properties = memory.properties; - buffer.host_ptr = host_ptr; - buffer.bound_range = range; - - Ok(()) - } - - unsafe fn create_buffer_view>( - &self, - _buffer: &Buffer, - _format: Option, - _range: R, - ) -> Result { - unimplemented!() - } - - unsafe fn create_image( - &self, - kind: image::Kind, - mip_levels: image::Level, - format: format::Format, - _tiling: image::Tiling, - usage: image::Usage, - view_caps: image::ViewCapabilities, - ) -> Result { - use image::Usage; - // - // TODO: create desc - - let surface_desc = format.base_format().0.desc(); - let bytes_per_texel = surface_desc.bits / 8; - let ext = kind.extent(); - let size = (ext.width * ext.height * ext.depth) as u64 * bytes_per_texel as u64; - let compressed = surface_desc.is_compressed(); - let depth = format.is_depth(); - - let mut bind = 0; - - if usage.intersects(Usage::TRANSFER_SRC | Usage::SAMPLED | Usage::STORAGE) { - bind |= d3d11::D3D11_BIND_SHADER_RESOURCE; - } - - // we cant get RTVs or UAVs on compressed & depth formats - if !compressed && !depth { - if usage.intersects(Usage::COLOR_ATTACHMENT | Usage::TRANSFER_DST) { - bind |= d3d11::D3D11_BIND_RENDER_TARGET; - } - - if usage.intersects(Usage::TRANSFER_DST | Usage::STORAGE) { - bind |= d3d11::D3D11_BIND_UNORDERED_ACCESS; - } - } - - if usage.contains(Usage::DEPTH_STENCIL_ATTACHMENT) { - bind |= d3d11::D3D11_BIND_DEPTH_STENCIL; - } - - debug!("{:b}", bind); - - Ok(Image { - internal: InternalImage { - raw: ptr::null_mut(), - copy_srv: None, - srv: None, - unordered_access_views: Vec::new(), - depth_stencil_views: Vec::new(), - render_target_views: Vec::new(), - }, - decomposed_format: conv::DecomposedDxgiFormat::UNKNOWN, - kind, - mip_levels, - format, - usage, - view_caps, - bind, - requirements: memory::Requirements { - size: size, - alignment: 1, - type_mask: 0x1, // device-local only - }, - }) - } - - unsafe fn get_image_requirements(&self, image: &Image) -> memory::Requirements { - image.requirements - } - - unsafe fn get_image_subresource_footprint( - &self, - _image: &Image, - _sub: image::Subresource, - ) -> image::SubresourceFootprint { - unimplemented!() - } - - unsafe fn bind_image_memory( - &self, - memory: &Memory, - offset: u64, - image: &mut Image, - ) -> Result<(), device::BindError> { - use image::Usage; - use memory::Properties; - - let base_format = image.format.base_format(); - let format_desc = base_format.0.desc(); - - let compressed = format_desc.is_compressed(); - let depth = image.format.is_depth(); - let stencil = image.format.is_stencil(); - - let (bind, usage, cpu) = if memory.properties == Properties::DEVICE_LOCAL { - (image.bind, d3d11::D3D11_USAGE_DEFAULT, 0) - } else if memory.properties - == (Properties::DEVICE_LOCAL | Properties::CPU_VISIBLE | Properties::CPU_CACHED) - { - ( - image.bind, - d3d11::D3D11_USAGE_DYNAMIC, - d3d11::D3D11_CPU_ACCESS_WRITE, - ) - } else if memory.properties == (Properties::CPU_VISIBLE | Properties::CPU_CACHED) { - ( - 0, - d3d11::D3D11_USAGE_STAGING, - d3d11::D3D11_CPU_ACCESS_READ | d3d11::D3D11_CPU_ACCESS_WRITE, - ) - } else { - unimplemented!() - }; - - let dxgi_format = conv::map_format(image.format).unwrap(); - let decomposed = conv::DecomposedDxgiFormat::from_dxgi_format(dxgi_format); - let bpp = format_desc.bits as u32 / 8; - - let (view_kind, resource) = match image.kind { - image::Kind::D1(width, layers) => { - let initial_data = - memory - .host_visible - .as_ref() - .map(|_p| d3d11::D3D11_SUBRESOURCE_DATA { - pSysMem: memory.mapped_ptr.offset(offset as isize) as _, - SysMemPitch: 0, - SysMemSlicePitch: 0, - }); - - let desc = d3d11::D3D11_TEXTURE1D_DESC { - Width: width, - MipLevels: image.mip_levels as _, - ArraySize: layers as _, - Format: decomposed.typeless, - Usage: usage, - BindFlags: bind, - CPUAccessFlags: cpu, - MiscFlags: 0, - }; - - let mut resource = ptr::null_mut(); - let hr = self.raw.CreateTexture1D( - &desc, - if let Some(data) = initial_data { - &data - } else { - ptr::null_mut() - }, - &mut resource as *mut *mut _ as *mut *mut _, - ); - - if !winerror::SUCCEEDED(hr) { - error!("CreateTexture1D failed: 0x{:x}", hr); - - return Err(device::BindError::WrongMemory); - } - - (image::ViewKind::D1Array, resource) - } - image::Kind::D2(width, height, layers, _) => { - let mut initial_datas = Vec::new(); - - for _layer in 0 .. layers { - for level in 0 .. image.mip_levels { - let width = image.kind.extent().at_level(level).width; - - // TODO: layer offset? - initial_datas.push(d3d11::D3D11_SUBRESOURCE_DATA { - pSysMem: memory.mapped_ptr.offset(offset as isize) as _, - SysMemPitch: width * bpp, - SysMemSlicePitch: 0, - }); - } - } - - let desc = d3d11::D3D11_TEXTURE2D_DESC { - Width: width, - Height: height, - MipLevels: image.mip_levels as _, - ArraySize: layers as _, - Format: decomposed.typeless, - SampleDesc: dxgitype::DXGI_SAMPLE_DESC { - Count: 1, - Quality: 0, - }, - Usage: usage, - BindFlags: bind, - CPUAccessFlags: cpu, - MiscFlags: if image.view_caps.contains(image::ViewCapabilities::KIND_CUBE) { - d3d11::D3D11_RESOURCE_MISC_TEXTURECUBE - } else { - 0 - }, - }; - - let mut resource = ptr::null_mut(); - let hr = self.raw.CreateTexture2D( - &desc, - if !depth { - initial_datas.as_ptr() - } else { - ptr::null_mut() - }, - &mut resource as *mut *mut _ as *mut *mut _, - ); - - if !winerror::SUCCEEDED(hr) { - error!("CreateTexture2D failed: 0x{:x}", hr); - - return Err(device::BindError::WrongMemory); - } - - (image::ViewKind::D2Array, resource) - } - image::Kind::D3(width, height, depth) => { - let initial_data = - memory - .host_visible - .as_ref() - .map(|_p| d3d11::D3D11_SUBRESOURCE_DATA { - pSysMem: memory.mapped_ptr.offset(offset as isize) as _, - SysMemPitch: width * bpp, - SysMemSlicePitch: width * height * bpp, - }); - - let desc = d3d11::D3D11_TEXTURE3D_DESC { - Width: width, - Height: height, - Depth: depth, - MipLevels: image.mip_levels as _, - Format: decomposed.typeless, - Usage: usage, - BindFlags: bind, - CPUAccessFlags: cpu, - MiscFlags: 0, - }; - - let mut resource = ptr::null_mut(); - let hr = self.raw.CreateTexture3D( - &desc, - if let Some(data) = initial_data { - &data - } else { - ptr::null_mut() - }, - &mut resource as *mut *mut _ as *mut *mut _, - ); - - if !winerror::SUCCEEDED(hr) { - error!("CreateTexture3D failed: 0x{:x}", hr); - - return Err(device::BindError::WrongMemory); - } - - (image::ViewKind::D3, resource) - } - }; - - let mut unordered_access_views = Vec::new(); - - if image.usage.contains(Usage::TRANSFER_DST) && !compressed && !depth { - for mip in 0 .. image.mip_levels { - let view = ViewInfo { - resource: resource, - kind: image.kind, - caps: image::ViewCapabilities::empty(), - view_kind, - // TODO: we should be using `uav_format` rather than `copy_uav_format`, and share - // the UAVs when the formats are identical - format: decomposed.copy_uav.unwrap(), - range: image::SubresourceRange { - aspects: format::Aspects::COLOR, - levels: mip .. (mip + 1), - layers: 0 .. image.kind.num_layers(), - }, - }; - - unordered_access_views.push( - self.view_image_as_unordered_access(&view) - .map_err(|_| device::BindError::WrongMemory)?, - ); - } - } - - let (copy_srv, srv) = if image.usage.contains(image::Usage::TRANSFER_SRC) { - let mut view = ViewInfo { - resource: resource, - kind: image.kind, - caps: image::ViewCapabilities::empty(), - view_kind, - format: decomposed.copy_srv.unwrap(), - range: image::SubresourceRange { - aspects: format::Aspects::COLOR, - levels: 0 .. image.mip_levels, - layers: 0 .. image.kind.num_layers(), - }, - }; - - let copy_srv = if !compressed { - Some( - self.view_image_as_shader_resource(&view) - .map_err(|_| device::BindError::WrongMemory)?, - ) - } else { - None - }; - - view.format = decomposed.srv.unwrap(); - - let srv = if !depth && !stencil { - Some( - self.view_image_as_shader_resource(&view) - .map_err(|_| device::BindError::WrongMemory)?, - ) - } else { - None - }; - - (copy_srv, srv) - } else { - (None, None) - }; - - let mut render_target_views = Vec::new(); - - if (image.usage.contains(image::Usage::COLOR_ATTACHMENT) - || image.usage.contains(image::Usage::TRANSFER_DST)) - && !compressed - && !depth - { - for layer in 0 .. image.kind.num_layers() { - for mip in 0 .. image.mip_levels { - let view = ViewInfo { - resource: resource, - kind: image.kind, - caps: image::ViewCapabilities::empty(), - view_kind, - format: decomposed.rtv.unwrap(), - range: image::SubresourceRange { - aspects: format::Aspects::COLOR, - levels: mip .. (mip + 1), - layers: layer .. (layer + 1), - }, - }; - - render_target_views.push( - self.view_image_as_render_target(&view) - .map_err(|_| device::BindError::WrongMemory)?, - ); - } - } - }; - - let mut depth_stencil_views = Vec::new(); - - if depth { - for layer in 0 .. image.kind.num_layers() { - for mip in 0 .. image.mip_levels { - let view = ViewInfo { - resource: resource, - kind: image.kind, - caps: image::ViewCapabilities::empty(), - view_kind, - format: decomposed.dsv.unwrap(), - range: image::SubresourceRange { - aspects: format::Aspects::COLOR, - levels: mip .. (mip + 1), - layers: layer .. (layer + 1), - }, - }; - - depth_stencil_views.push( - self.view_image_as_depth_stencil(&view) - .map_err(|_| device::BindError::WrongMemory)?, - ); - } - } - } - - let internal = InternalImage { - raw: resource, - copy_srv, - srv, - unordered_access_views, - depth_stencil_views, - render_target_views, - }; - - image.decomposed_format = decomposed; - image.internal = internal; - - Ok(()) - } - - unsafe fn create_image_view( - &self, - image: &Image, - view_kind: image::ViewKind, - format: format::Format, - _swizzle: format::Swizzle, - range: image::SubresourceRange, - ) -> Result { - let is_array = image.kind.num_layers() > 1; - - let info = ViewInfo { - resource: image.internal.raw, - kind: image.kind, - caps: image.view_caps, - // D3D11 doesn't allow looking at a single slice of an array as a non-array - view_kind: if is_array && view_kind == image::ViewKind::D2 { - image::ViewKind::D2Array - } else if is_array && view_kind == image::ViewKind::D1 { - image::ViewKind::D1Array - } else { - view_kind - }, - format: conv::map_format(format).ok_or(image::ViewError::BadFormat(format))?, - range, - }; - - let srv_info = ViewInfo { - format: conv::viewable_format(info.format), - ..info.clone() - }; - - Ok(ImageView { - format, - srv_handle: if image.usage.intersects(image::Usage::SAMPLED) { - Some(self.view_image_as_shader_resource(&srv_info)?) - } else { - None - }, - rtv_handle: if image.usage.contains(image::Usage::COLOR_ATTACHMENT) { - Some(self.view_image_as_render_target(&info)?) - } else { - None - }, - uav_handle: if image.usage.contains(image::Usage::STORAGE) { - Some(self.view_image_as_unordered_access(&info)?) - } else { - None - }, - dsv_handle: if image.usage.contains(image::Usage::DEPTH_STENCIL_ATTACHMENT) { - Some(self.view_image_as_depth_stencil(&info)?) - } else { - None - }, - }) - } - - unsafe fn create_sampler( - &self, - info: &image::SamplerDesc, - ) -> Result { - assert!(info.normalized); - - let op = match info.comparison { - Some(_) => d3d11::D3D11_FILTER_REDUCTION_TYPE_COMPARISON, - None => d3d11::D3D11_FILTER_REDUCTION_TYPE_STANDARD, - }; - - let desc = d3d11::D3D11_SAMPLER_DESC { - Filter: conv::map_filter( - info.min_filter, - info.mag_filter, - info.mip_filter, - op, - info.anisotropic, - ), - AddressU: conv::map_wrapping(info.wrap_mode.0), - AddressV: conv::map_wrapping(info.wrap_mode.1), - AddressW: conv::map_wrapping(info.wrap_mode.2), - MipLODBias: info.lod_bias.0, - MaxAnisotropy: match info.anisotropic { - image::Anisotropic::Off => 0, - image::Anisotropic::On(aniso) => aniso as _, - }, - ComparisonFunc: info.comparison.map_or(0, |comp| conv::map_comparison(comp)), - BorderColor: info.border.into(), - MinLOD: info.lod_range.start.0, - MaxLOD: info.lod_range.end.0, - }; - - let mut sampler = ptr::null_mut(); - let hr = self - .raw - .CreateSamplerState(&desc, &mut sampler as *mut *mut _ as *mut *mut _); - - assert_eq!(true, winerror::SUCCEEDED(hr)); - - Ok(Sampler { - sampler_handle: ComPtr::from_raw(sampler), - }) - } - - unsafe fn create_descriptor_pool( - &self, - _max_sets: usize, - ranges: I, - _flags: pso::DescriptorPoolCreateFlags, - ) -> Result - where - I: IntoIterator, - I::Item: Borrow, - { - let mut total = RegisterData::default(); - for range in ranges { - let r = range.borrow(); - let content = DescriptorContent::from(r.ty); - total.add_content_many(content, r.count as DescriptorIndex); - } - - let max_stages = 6; - let count = total.sum() * max_stages; - Ok(DescriptorPool::with_capacity(count)) - } - - unsafe fn create_descriptor_set_layout( - &self, - layout_bindings: I, - _immutable_samplers: J, - ) -> Result - where - I: IntoIterator, - I::Item: Borrow, - J: IntoIterator, - J::Item: Borrow, - { - let mut total = MultiStageData::>::default(); - let mut bindings = layout_bindings - .into_iter() - .map(|b| b.borrow().clone()) - .collect::>(); - - for binding in bindings.iter() { - let content = DescriptorContent::from(binding.ty); - total.add_content(content, binding.stage_flags); - } - - bindings.sort_by_key(|a| a.binding); - - let accum = total.map_register(|count| RegisterAccumulator { - res_index: *count as ResourceIndex, - }); - - Ok(DescriptorSetLayout { - bindings: Arc::new(bindings), - pool_mapping: accum.to_mapping(), - }) - } - - unsafe fn write_descriptor_sets<'a, I, J>(&self, write_iter: I) - where - I: IntoIterator>, - J: IntoIterator, - J::Item: Borrow>, - { - for write in write_iter { - let mut mapping = write.set.layout.pool_mapping - .map_register(|mapping| mapping.offset); - let binding_start = write.set.layout.bindings - .iter() - .position(|binding| binding.binding == write.binding) - .unwrap(); - for binding in &write.set.layout.bindings[.. binding_start] { - let content = DescriptorContent::from(binding.ty); - mapping.add_content(content, binding.stage_flags); - } - - for (binding, descriptor) in write.set.layout.bindings[binding_start ..] - .iter() - .zip(write.descriptors) - { - let handles = match *descriptor.borrow() { - pso::Descriptor::Buffer(buffer, ref _range) => RegisterData { - c: match buffer.internal.disjoint_cb { - Some(dj_buf) => dj_buf as *mut _, - None => buffer.internal.raw as *mut _, - }, - t: buffer.internal.srv.map_or(ptr::null_mut(), |p| p as *mut _), - u: buffer.internal.uav.map_or(ptr::null_mut(), |p| p as *mut _), - s: ptr::null_mut(), - }, - pso::Descriptor::Image(image, _layout) => RegisterData { - c: ptr::null_mut(), - t: image.srv_handle.clone().map_or(ptr::null_mut(), |h| h.as_raw() as *mut _), - u: image.uav_handle.clone().map_or(ptr::null_mut(), |h| h.as_raw() as *mut _), - s: ptr::null_mut(), - }, - pso::Descriptor::Sampler(sampler) => RegisterData { - c: ptr::null_mut(), - t: ptr::null_mut(), - u: ptr::null_mut(), - s: sampler.sampler_handle.as_raw() as *mut _, - }, - pso::Descriptor::CombinedImageSampler(image, _layout, sampler) => RegisterData { - c: ptr::null_mut(), - t: image.srv_handle.clone().map_or(ptr::null_mut(), |h| h.as_raw() as *mut _), - u: image.uav_handle.clone().map_or(ptr::null_mut(), |h| h.as_raw() as *mut _), - s: sampler.sampler_handle.as_raw() as *mut _, - }, - pso::Descriptor::UniformTexelBuffer(_buffer_view) => unimplemented!(), - pso::Descriptor::StorageTexelBuffer(_buffer_view) => unimplemented!(), - }; - - let content = DescriptorContent::from(binding.ty); - if content.contains(DescriptorContent::CBV) { - let offsets = mapping.map_other(|map| map.c); - write.set.assign_stages(&offsets, binding.stage_flags, handles.c); - }; - if content.contains(DescriptorContent::SRV) { - let offsets = mapping.map_other(|map| map.t); - write.set.assign_stages(&offsets, binding.stage_flags, handles.t); - }; - if content.contains(DescriptorContent::UAV) { - let offsets = mapping.map_other(|map| map.u); - write.set.assign_stages(&offsets, binding.stage_flags, handles.u); - }; - if content.contains(DescriptorContent::SAMPLER) { - let offsets = mapping.map_other(|map| map.s); - write.set.assign_stages(&offsets, binding.stage_flags, handles.s); - }; - - mapping.add_content(content, binding.stage_flags); - } - } - } - - unsafe fn copy_descriptor_sets<'a, I>(&self, copy_iter: I) - where - I: IntoIterator, - I::Item: Borrow>, - { - for copy in copy_iter { - let _copy = copy.borrow(); - //TODO - /* - for offset in 0 .. copy.count { - let (dst_ty, dst_handle_offset, dst_second_handle_offset) = copy - .dst_set - .get_handle_offset(copy.dst_binding + offset as u32); - let (src_ty, src_handle_offset, src_second_handle_offset) = copy - .src_set - .get_handle_offset(copy.src_binding + offset as u32); - assert_eq!(dst_ty, src_ty); - - let dst_handle = copy.dst_set.handles.offset(dst_handle_offset as isize); - let src_handle = copy.dst_set.handles.offset(src_handle_offset as isize); - - match dst_ty { - pso::DescriptorType::CombinedImageSampler => { - let dst_second_handle = copy - .dst_set - .handles - .offset(dst_second_handle_offset as isize); - let src_second_handle = copy - .dst_set - .handles - .offset(src_second_handle_offset as isize); - - *dst_handle = *src_handle; - *dst_second_handle = *src_second_handle; - } - _ => *dst_handle = *src_handle, - } - }*/ - } - } - - unsafe fn map_memory(&self, memory: &Memory, range: R) -> Result<*mut u8, device::MapError> - where - R: RangeArg, - { - assert_eq!(memory.host_visible.is_some(), true); - - Ok(memory - .mapped_ptr - .offset(*range.start().unwrap_or(&0) as isize)) - } - - unsafe fn unmap_memory(&self, memory: &Memory) { - assert_eq!(memory.host_visible.is_some(), true); - } - - unsafe fn flush_mapped_memory_ranges<'a, I, R>( - &self, - ranges: I, - ) -> Result<(), device::OutOfMemory> - where - I: IntoIterator, - I::Item: Borrow<(&'a Memory, R)>, - R: RangeArg, - { - let _scope = debug_scope!(&self.context, "FlushMappedRanges"); - - // go through every range we wrote to - for range in ranges.into_iter() { - let &(memory, ref range) = range.borrow(); - let range = memory.resolve(range); - - let _scope = debug_scope!(&self.context, "Range({:?})", range); - memory.flush(&self.context, range); - } - - Ok(()) - } - - unsafe fn invalidate_mapped_memory_ranges<'a, I, R>( - &self, - ranges: I, - ) -> Result<(), device::OutOfMemory> - where - I: IntoIterator, - I::Item: Borrow<(&'a Memory, R)>, - R: RangeArg, - { - let _scope = debug_scope!(&self.context, "InvalidateMappedRanges"); - - // go through every range we want to read from - for range in ranges.into_iter() { - let &(memory, ref range) = range.borrow(); - let range = *range.start().unwrap_or(&0) .. *range.end().unwrap_or(&memory.size); - - let _scope = debug_scope!(&self.context, "Range({:?})", range); - memory.invalidate( - &self.context, - range, - self.internal.working_buffer.clone(), - self.internal.working_buffer_size, - ); - } - - Ok(()) - } - - fn create_semaphore(&self) -> Result { - // TODO: - Ok(Semaphore) - } - - fn create_fence(&self, signalled: bool) -> Result { - Ok(Arc::new(RawFence { - mutex: Mutex::new(signalled), - condvar: Condvar::new(), - })) - } - - unsafe fn reset_fence(&self, fence: &Fence) -> Result<(), device::OutOfMemory> { - *fence.mutex.lock() = false; - Ok(()) - } - - unsafe fn wait_for_fence( - &self, - fence: &Fence, - timeout_ns: u64, - ) -> Result { - use std::time::{Duration, Instant}; - - debug!("wait_for_fence {:?} for {} ns", fence, timeout_ns); - let mut guard = fence.mutex.lock(); - match timeout_ns { - 0 => Ok(*guard), - 0xFFFFFFFFFFFFFFFF => { - while !*guard { - fence.condvar.wait(&mut guard); - } - Ok(true) - } - _ => { - let total = Duration::from_nanos(timeout_ns as u64); - let now = Instant::now(); - while !*guard { - let duration = match total.checked_sub(now.elapsed()) { - Some(dur) => dur, - None => return Ok(false), - }; - let result = fence.condvar.wait_for(&mut guard, duration); - if result.timed_out() { - return Ok(false); - } - } - Ok(true) - } - } - } - - unsafe fn get_fence_status(&self, fence: &Fence) -> Result { - Ok(*fence.mutex.lock()) - } - - fn create_event(&self) -> Result<(), device::OutOfMemory> { - unimplemented!() - } - - unsafe fn get_event_status(&self, _event: &()) -> Result { - unimplemented!() - } - - unsafe fn set_event(&self, _event: &()) -> Result<(), device::OutOfMemory> { - unimplemented!() - } - - unsafe fn reset_event(&self, _event: &()) -> Result<(), device::OutOfMemory> { - unimplemented!() - } - - unsafe fn free_memory(&self, memory: Memory) { - for (_range, internal) in memory.local_buffers.borrow_mut().iter() { - (*internal.raw).Release(); - if let Some(srv) = internal.srv { - (*srv).Release(); - } - } - } - - unsafe fn create_query_pool( - &self, - _query_ty: query::Type, - _count: query::Id, - ) -> Result { - unimplemented!() - } - - unsafe fn destroy_query_pool(&self, _pool: QueryPool) { - unimplemented!() - } - - unsafe fn get_query_pool_results( - &self, - _pool: &QueryPool, - _queries: Range, - _data: &mut [u8], - _stride: buffer::Offset, - _flags: query::ResultFlags, - ) -> Result { - unimplemented!() - } - - unsafe fn destroy_shader_module(&self, _shader_lib: ShaderModule) {} - - unsafe fn destroy_render_pass(&self, _rp: RenderPass) { - //unimplemented!() - } - - unsafe fn destroy_pipeline_layout(&self, _layout: PipelineLayout) { - //unimplemented!() - } - - unsafe fn destroy_graphics_pipeline(&self, _pipeline: GraphicsPipeline) {} - - unsafe fn destroy_compute_pipeline(&self, _pipeline: ComputePipeline) {} - - unsafe fn destroy_framebuffer(&self, _fb: Framebuffer) {} - - unsafe fn destroy_buffer(&self, _buffer: Buffer) {} - - unsafe fn destroy_buffer_view(&self, _view: BufferView) { - unimplemented!() - } - - unsafe fn destroy_image(&self, _image: Image) { - // TODO: - // unimplemented!() - } - - unsafe fn destroy_image_view(&self, _view: ImageView) { - //unimplemented!() - } - - unsafe fn destroy_sampler(&self, _sampler: Sampler) {} - - unsafe fn destroy_descriptor_pool(&self, _pool: DescriptorPool) { - //unimplemented!() - } - - unsafe fn destroy_descriptor_set_layout(&self, _layout: DescriptorSetLayout) { - //unimplemented!() - } - - unsafe fn destroy_fence(&self, _fence: Fence) { - // unimplemented!() - } - - unsafe fn destroy_semaphore(&self, _semaphore: Semaphore) { - //unimplemented!() - } - - unsafe fn destroy_event(&self, _event: ()) { - //unimplemented!() - } - - unsafe fn create_swapchain( - &self, - surface: &mut Surface, - config: window::SwapchainConfig, - _old_swapchain: Option, - ) -> Result<(Swapchain, Vec), window::CreationError> { - let (dxgi_swapchain, non_srgb_format) = - self.create_swapchain_impl(&config, surface.wnd_handle, surface.factory.clone())?; - - let resource = { - let mut resource: *mut d3d11::ID3D11Resource = ptr::null_mut(); - assert_eq!( - winerror::S_OK, - dxgi_swapchain.GetBuffer( - 0 as _, - &d3d11::ID3D11Resource::uuidof(), - &mut resource as *mut *mut _ as *mut *mut _, - ) - ); - resource - }; - - let kind = image::Kind::D2(config.extent.width, config.extent.height, 1, 1); - let decomposed = - conv::DecomposedDxgiFormat::from_dxgi_format(conv::map_format(config.format).unwrap()); - - let mut view_info = ViewInfo { - resource, - kind, - caps: image::ViewCapabilities::empty(), - view_kind: image::ViewKind::D2, - format: decomposed.rtv.unwrap(), - // TODO: can these ever differ for backbuffer? - range: image::SubresourceRange { - aspects: format::Aspects::COLOR, - levels: 0 .. 1, - layers: 0 .. 1, - }, - }; - let rtv = self.view_image_as_render_target(&view_info).unwrap(); - - view_info.format = non_srgb_format; - view_info.view_kind = image::ViewKind::D2Array; - let copy_srv = self.view_image_as_shader_resource(&view_info).unwrap(); - - let images = (0 .. config.image_count) - .map(|_i| { - // returning the 0th buffer for all images seems like the right thing to do. we can - // only get write access to the first buffer in the case of `_SEQUENTIAL` flip model, - // and read access to the rest - let internal = InternalImage { - raw: resource, - copy_srv: Some(copy_srv.clone()), - srv: None, - unordered_access_views: Vec::new(), - depth_stencil_views: Vec::new(), - render_target_views: vec![rtv.clone()], - }; - - Image { - kind, - usage: config.image_usage, - format: config.format, - view_caps: image::ViewCapabilities::empty(), - // NOTE: not the actual format of the backbuffer(s) - decomposed_format: decomposed.clone(), - mip_levels: 1, - internal, - bind: 0, // TODO: ? - requirements: memory::Requirements { - // values don't really matter - size: 0, - alignment: 0, - type_mask: 0, - }, - } - }) - .collect(); - - Ok((Swapchain { dxgi_swapchain }, images)) - } - - unsafe fn destroy_swapchain(&self, _swapchain: Swapchain) { - // automatic - } - - fn wait_idle(&self) -> Result<(), device::OutOfMemory> { - Ok(()) - // unimplemented!() - } - - unsafe fn set_image_name(&self, _image: &mut Image, _name: &str) { - // TODO - } - - unsafe fn set_buffer_name(&self, _buffer: &mut Buffer, _name: &str) { - // TODO - } - - unsafe fn set_command_buffer_name(&self, _command_buffer: &mut CommandBuffer, _name: &str) { - // TODO - } - - unsafe fn set_semaphore_name(&self, _semaphore: &mut Semaphore, _name: &str) { - // TODO - } - - unsafe fn set_fence_name(&self, _fence: &mut Fence, _name: &str) { - // TODO - } - - unsafe fn set_framebuffer_name(&self, _framebuffer: &mut Framebuffer, _name: &str) { - // TODO - } - - unsafe fn set_render_pass_name(&self, _render_pass: &mut RenderPass, _name: &str) { - // TODO - } - - unsafe fn set_descriptor_set_name(&self, _descriptor_set: &mut DescriptorSet, _name: &str) { - // TODO - } - - unsafe fn set_descriptor_set_layout_name( - &self, - _descriptor_set_layout: &mut DescriptorSetLayout, - _name: &str, - ) { - // TODO - } -} +use hal::{ + adapter::MemoryProperties, + buffer, + device, + format, + image, + memory, + pass, + pool, + pso, + pso::VertexInputRate, + query, + queue::QueueFamilyId, + window, +}; + +use winapi::{ + shared::{ + dxgi::{IDXGIFactory, IDXGISwapChain, DXGI_SWAP_CHAIN_DESC, DXGI_SWAP_EFFECT_DISCARD}, + dxgiformat, + dxgitype, + minwindef::TRUE, + windef::HWND, + winerror, + }, + um::{d3d11, d3d11sdklayers, d3dcommon}, + Interface as _, +}; + +use wio::com::ComPtr; + +use std::{borrow::Borrow, cell::RefCell, fmt, mem, ops::Range, ptr, sync::Arc}; + +use parking_lot::{Condvar, Mutex}; + +use crate::{ + conv, + internal, + shader, + Backend, + Buffer, + BufferView, + CommandBuffer, + CommandPool, + ComputePipeline, + DescriptorContent, + DescriptorIndex, + DescriptorPool, + DescriptorSet, + DescriptorSetInfo, + DescriptorSetLayout, + Fence, + Framebuffer, + GraphicsPipeline, + Image, + ImageView, + InternalBuffer, + InternalImage, + Memory, + MultiStageData, + PipelineLayout, + QueryPool, + RawFence, + RegisterAccumulator, + RegisterData, + RenderPass, + ResourceIndex, + Sampler, + Semaphore, + ShaderModule, + SubpassDesc, + Surface, + Swapchain, + ViewInfo, +}; + +//TODO: expose coherent type 0x2 when it's properly supported +const BUFFER_TYPE_MASK: u64 = 0x1 | 0x4; + +struct InputLayout { + raw: ComPtr, + required_bindings: u32, + max_vertex_bindings: u32, + topology: d3d11::D3D11_PRIMITIVE_TOPOLOGY, + vertex_strides: Vec, +} + +pub struct Device { + raw: ComPtr, + pub(crate) context: ComPtr, + features: hal::Features, + memory_properties: MemoryProperties, + pub(crate) internal: internal::Internal, +} + +impl fmt::Debug for Device { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("Device") + } +} + +impl Drop for Device { + fn drop(&mut self) { + if let Ok(debug) = self.raw.cast::() { + unsafe { + debug.ReportLiveDeviceObjects(d3d11sdklayers::D3D11_RLDO_DETAIL); + } + } + } +} + +unsafe impl Send for Device {} +unsafe impl Sync for Device {} + +impl Device { + pub fn new( + device: ComPtr, + context: ComPtr, + features: hal::Features, + memory_properties: MemoryProperties, + ) -> Self { + Device { + raw: device.clone(), + context, + features, + memory_properties, + internal: internal::Internal::new(&device), + } + } + + pub fn as_raw(&self) -> *mut d3d11::ID3D11Device { + self.raw.as_raw() + } + + fn create_rasterizer_state( + &self, + rasterizer_desc: &pso::Rasterizer, + ) -> Result, pso::CreationError> { + let mut rasterizer = ptr::null_mut(); + let desc = conv::map_rasterizer_desc(rasterizer_desc); + + let hr = unsafe { + self.raw + .CreateRasterizerState(&desc, &mut rasterizer as *mut *mut _ as *mut *mut _) + }; + + if winerror::SUCCEEDED(hr) { + Ok(unsafe { ComPtr::from_raw(rasterizer) }) + } else { + Err(pso::CreationError::Other) + } + } + + fn create_blend_state( + &self, + blend_desc: &pso::BlendDesc, + ) -> Result, pso::CreationError> { + let mut blend = ptr::null_mut(); + let desc = conv::map_blend_desc(blend_desc); + + let hr = unsafe { + self.raw + .CreateBlendState(&desc, &mut blend as *mut *mut _ as *mut *mut _) + }; + + if winerror::SUCCEEDED(hr) { + Ok(unsafe { ComPtr::from_raw(blend) }) + } else { + Err(pso::CreationError::Other) + } + } + + fn create_depth_stencil_state( + &self, + depth_desc: &pso::DepthStencilDesc, + ) -> Result< + ( + ComPtr, + pso::State, + ), + pso::CreationError, + > { + let mut depth = ptr::null_mut(); + let (desc, stencil_ref) = conv::map_depth_stencil_desc(depth_desc); + + let hr = unsafe { + self.raw + .CreateDepthStencilState(&desc, &mut depth as *mut *mut _ as *mut *mut _) + }; + + if winerror::SUCCEEDED(hr) { + Ok((unsafe { ComPtr::from_raw(depth) }, stencil_ref)) + } else { + Err(pso::CreationError::Other) + } + } + + fn create_input_layout( + &self, + vs: ComPtr, + vertex_buffers: &[pso::VertexBufferDesc], + attributes: &[pso::AttributeDesc], + input_assembler: &pso::InputAssemblerDesc, + ) -> Result { + let mut layout = ptr::null_mut(); + + let mut vertex_strides = Vec::new(); + let mut required_bindings = 0u32; + let mut max_vertex_bindings = 0u32; + for buffer in vertex_buffers { + required_bindings |= 1 << buffer.binding as u32; + max_vertex_bindings = max_vertex_bindings.max(1u32 + buffer.binding as u32); + + while vertex_strides.len() <= buffer.binding as usize { + vertex_strides.push(0); + } + + vertex_strides[buffer.binding as usize] = buffer.stride; + } + + let input_elements = attributes + .iter() + .filter_map(|attrib| { + let buffer_desc = match vertex_buffers + .iter() + .find(|buffer_desc| buffer_desc.binding == attrib.binding) + { + Some(buffer_desc) => buffer_desc, + None => { + // TODO: + // L + // error!("Couldn't find associated vertex buffer description {:?}", attrib.binding); + return Some(Err(pso::CreationError::Other)); + } + }; + + let (slot_class, step_rate) = match buffer_desc.rate { + VertexInputRate::Vertex => (d3d11::D3D11_INPUT_PER_VERTEX_DATA, 0), + VertexInputRate::Instance(divisor) => { + (d3d11::D3D11_INPUT_PER_INSTANCE_DATA, divisor) + } + }; + let format = attrib.element.format; + + Some(Ok(d3d11::D3D11_INPUT_ELEMENT_DESC { + SemanticName: "TEXCOORD\0".as_ptr() as *const _, // Semantic name used by SPIRV-Cross + SemanticIndex: attrib.location, + Format: match conv::map_format(format) { + Some(fm) => fm, + None => { + // TODO: + // error!("Unable to find DXGI format for {:?}", format); + return Some(Err(pso::CreationError::Other)); + } + }, + InputSlot: attrib.binding as _, + AlignedByteOffset: attrib.element.offset, + InputSlotClass: slot_class, + InstanceDataStepRate: step_rate as _, + })) + }) + .collect::, _>>()?; + + let hr = unsafe { + self.raw.CreateInputLayout( + input_elements.as_ptr(), + input_elements.len() as _, + vs.GetBufferPointer(), + vs.GetBufferSize(), + &mut layout as *mut *mut _ as *mut *mut _, + ) + }; + + if winerror::SUCCEEDED(hr) { + let topology = conv::map_topology(input_assembler); + + Ok(InputLayout { + raw: unsafe { ComPtr::from_raw(layout) }, + required_bindings, + max_vertex_bindings, + topology, + vertex_strides, + }) + } else { + Err(pso::CreationError::Other) + } + } + + fn create_vertex_shader( + &self, + blob: ComPtr, + ) -> Result, pso::CreationError> { + let mut vs = ptr::null_mut(); + + let hr = unsafe { + self.raw.CreateVertexShader( + blob.GetBufferPointer(), + blob.GetBufferSize(), + ptr::null_mut(), + &mut vs as *mut *mut _ as *mut *mut _, + ) + }; + + if winerror::SUCCEEDED(hr) { + Ok(unsafe { ComPtr::from_raw(vs) }) + } else { + Err(pso::CreationError::Other) + } + } + + fn create_pixel_shader( + &self, + blob: ComPtr, + ) -> Result, pso::CreationError> { + let mut ps = ptr::null_mut(); + + let hr = unsafe { + self.raw.CreatePixelShader( + blob.GetBufferPointer(), + blob.GetBufferSize(), + ptr::null_mut(), + &mut ps as *mut *mut _ as *mut *mut _, + ) + }; + + if winerror::SUCCEEDED(hr) { + Ok(unsafe { ComPtr::from_raw(ps) }) + } else { + Err(pso::CreationError::Other) + } + } + + fn create_geometry_shader( + &self, + blob: ComPtr, + ) -> Result, pso::CreationError> { + let mut gs = ptr::null_mut(); + + let hr = unsafe { + self.raw.CreateGeometryShader( + blob.GetBufferPointer(), + blob.GetBufferSize(), + ptr::null_mut(), + &mut gs as *mut *mut _ as *mut *mut _, + ) + }; + + if winerror::SUCCEEDED(hr) { + Ok(unsafe { ComPtr::from_raw(gs) }) + } else { + Err(pso::CreationError::Other) + } + } + + fn create_hull_shader( + &self, + blob: ComPtr, + ) -> Result, pso::CreationError> { + let mut hs = ptr::null_mut(); + + let hr = unsafe { + self.raw.CreateHullShader( + blob.GetBufferPointer(), + blob.GetBufferSize(), + ptr::null_mut(), + &mut hs as *mut *mut _ as *mut *mut _, + ) + }; + + if winerror::SUCCEEDED(hr) { + Ok(unsafe { ComPtr::from_raw(hs) }) + } else { + Err(pso::CreationError::Other) + } + } + + fn create_domain_shader( + &self, + blob: ComPtr, + ) -> Result, pso::CreationError> { + let mut ds = ptr::null_mut(); + + let hr = unsafe { + self.raw.CreateDomainShader( + blob.GetBufferPointer(), + blob.GetBufferSize(), + ptr::null_mut(), + &mut ds as *mut *mut _ as *mut *mut _, + ) + }; + + if winerror::SUCCEEDED(hr) { + Ok(unsafe { ComPtr::from_raw(ds) }) + } else { + Err(pso::CreationError::Other) + } + } + + fn create_compute_shader( + &self, + blob: ComPtr, + ) -> Result, pso::CreationError> { + let mut cs = ptr::null_mut(); + + let hr = unsafe { + self.raw.CreateComputeShader( + blob.GetBufferPointer(), + blob.GetBufferSize(), + ptr::null_mut(), + &mut cs as *mut *mut _ as *mut *mut _, + ) + }; + + if winerror::SUCCEEDED(hr) { + Ok(unsafe { ComPtr::from_raw(cs) }) + } else { + Err(pso::CreationError::Other) + } + } + + // TODO: fix return type.. + fn extract_entry_point( + stage: pso::Stage, + source: &pso::EntryPoint, + layout: &PipelineLayout, + features: &hal::Features, + ) -> Result>, device::ShaderError> { + // TODO: entrypoint stuff + match *source.module { + ShaderModule::Dxbc(ref _shader) => { + unimplemented!() + + // Ok(Some(shader)) + } + ShaderModule::Spirv(ref raw_data) => Ok(shader::compile_spirv_entrypoint( + raw_data, stage, source, layout, features, + )?), + } + } + + fn view_image_as_shader_resource( + &self, + info: &ViewInfo, + ) -> Result, image::ViewCreationError> { + let mut desc: d3d11::D3D11_SHADER_RESOURCE_VIEW_DESC = unsafe { mem::zeroed() }; + desc.Format = info.format; + if desc.Format == dxgiformat::DXGI_FORMAT_D32_FLOAT_S8X24_UINT { + desc.Format = dxgiformat::DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS; + } + + #[allow(non_snake_case)] + let MostDetailedMip = info.range.levels.start as _; + #[allow(non_snake_case)] + let MipLevels = (info.range.levels.end - info.range.levels.start) as _; + #[allow(non_snake_case)] + let FirstArraySlice = info.range.layers.start as _; + #[allow(non_snake_case)] + let ArraySize = (info.range.layers.end - info.range.layers.start) as _; + + match info.view_kind { + image::ViewKind::D1 => { + desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURE1D; + *unsafe { desc.u.Texture1D_mut() } = d3d11::D3D11_TEX1D_SRV { + MostDetailedMip, + MipLevels, + } + } + image::ViewKind::D1Array => { + desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURE1DARRAY; + *unsafe { desc.u.Texture1DArray_mut() } = d3d11::D3D11_TEX1D_ARRAY_SRV { + MostDetailedMip, + MipLevels, + FirstArraySlice, + ArraySize, + } + } + image::ViewKind::D2 => { + desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURE2D; + *unsafe { desc.u.Texture2D_mut() } = d3d11::D3D11_TEX2D_SRV { + MostDetailedMip, + MipLevels, + } + } + image::ViewKind::D2Array => { + desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURE2DARRAY; + *unsafe { desc.u.Texture2DArray_mut() } = d3d11::D3D11_TEX2D_ARRAY_SRV { + MostDetailedMip, + MipLevels, + FirstArraySlice, + ArraySize, + } + } + image::ViewKind::D3 => { + desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURE3D; + *unsafe { desc.u.Texture3D_mut() } = d3d11::D3D11_TEX3D_SRV { + MostDetailedMip, + MipLevels, + } + } + image::ViewKind::Cube => { + desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURECUBE; + *unsafe { desc.u.TextureCube_mut() } = d3d11::D3D11_TEXCUBE_SRV { + MostDetailedMip, + MipLevels, + } + } + image::ViewKind::CubeArray => { + desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURECUBEARRAY; + *unsafe { desc.u.TextureCubeArray_mut() } = d3d11::D3D11_TEXCUBE_ARRAY_SRV { + MostDetailedMip, + MipLevels, + First2DArrayFace: FirstArraySlice, + NumCubes: ArraySize / 6, + } + } + } + + let mut srv = ptr::null_mut(); + let hr = unsafe { + self.raw.CreateShaderResourceView( + info.resource, + &desc, + &mut srv as *mut *mut _ as *mut *mut _, + ) + }; + + if winerror::SUCCEEDED(hr) { + Ok(unsafe { ComPtr::from_raw(srv) }) + } else { + Err(image::ViewCreationError::Unsupported) + } + } + + fn view_image_as_unordered_access( + &self, + info: &ViewInfo, + ) -> Result, image::ViewCreationError> { + let mut desc: d3d11::D3D11_UNORDERED_ACCESS_VIEW_DESC = unsafe { mem::zeroed() }; + desc.Format = info.format; + + #[allow(non_snake_case)] + let MipSlice = info.range.levels.start as _; + #[allow(non_snake_case)] + let FirstArraySlice = info.range.layers.start as _; + #[allow(non_snake_case)] + let ArraySize = (info.range.layers.end - info.range.layers.start) as _; + + match info.view_kind { + image::ViewKind::D1 => { + desc.ViewDimension = d3d11::D3D11_UAV_DIMENSION_TEXTURE1D; + *unsafe { desc.u.Texture1D_mut() } = d3d11::D3D11_TEX1D_UAV { + MipSlice: info.range.levels.start as _, + } + } + image::ViewKind::D1Array => { + desc.ViewDimension = d3d11::D3D11_UAV_DIMENSION_TEXTURE1DARRAY; + *unsafe { desc.u.Texture1DArray_mut() } = d3d11::D3D11_TEX1D_ARRAY_UAV { + MipSlice, + FirstArraySlice, + ArraySize, + } + } + image::ViewKind::D2 => { + desc.ViewDimension = d3d11::D3D11_UAV_DIMENSION_TEXTURE2D; + *unsafe { desc.u.Texture2D_mut() } = d3d11::D3D11_TEX2D_UAV { + MipSlice: info.range.levels.start as _, + } + } + image::ViewKind::D2Array => { + desc.ViewDimension = d3d11::D3D11_UAV_DIMENSION_TEXTURE2DARRAY; + *unsafe { desc.u.Texture2DArray_mut() } = d3d11::D3D11_TEX2D_ARRAY_UAV { + MipSlice, + FirstArraySlice, + ArraySize, + } + } + image::ViewKind::D3 => { + desc.ViewDimension = d3d11::D3D11_UAV_DIMENSION_TEXTURE3D; + *unsafe { desc.u.Texture3D_mut() } = d3d11::D3D11_TEX3D_UAV { + MipSlice, + FirstWSlice: FirstArraySlice, + WSize: ArraySize, + } + } + _ => unimplemented!(), + } + + let mut uav = ptr::null_mut(); + let hr = unsafe { + self.raw.CreateUnorderedAccessView( + info.resource, + &desc, + &mut uav as *mut *mut _ as *mut *mut _, + ) + }; + + if winerror::SUCCEEDED(hr) { + Ok(unsafe { ComPtr::from_raw(uav) }) + } else { + Err(image::ViewCreationError::Unsupported) + } + } + + pub(crate) fn view_image_as_render_target( + &self, + info: &ViewInfo, + ) -> Result, image::ViewCreationError> { + let mut desc: d3d11::D3D11_RENDER_TARGET_VIEW_DESC = unsafe { mem::zeroed() }; + desc.Format = info.format; + + #[allow(non_snake_case)] + let MipSlice = info.range.levels.start as _; + #[allow(non_snake_case)] + let FirstArraySlice = info.range.layers.start as _; + #[allow(non_snake_case)] + let ArraySize = (info.range.layers.end - info.range.layers.start) as _; + + match info.view_kind { + image::ViewKind::D1 => { + desc.ViewDimension = d3d11::D3D11_RTV_DIMENSION_TEXTURE1D; + *unsafe { desc.u.Texture1D_mut() } = d3d11::D3D11_TEX1D_RTV { MipSlice } + } + image::ViewKind::D1Array => { + desc.ViewDimension = d3d11::D3D11_RTV_DIMENSION_TEXTURE1DARRAY; + *unsafe { desc.u.Texture1DArray_mut() } = d3d11::D3D11_TEX1D_ARRAY_RTV { + MipSlice, + FirstArraySlice, + ArraySize, + } + } + image::ViewKind::D2 => { + desc.ViewDimension = d3d11::D3D11_RTV_DIMENSION_TEXTURE2D; + *unsafe { desc.u.Texture2D_mut() } = d3d11::D3D11_TEX2D_RTV { MipSlice } + } + image::ViewKind::D2Array => { + desc.ViewDimension = d3d11::D3D11_RTV_DIMENSION_TEXTURE2DARRAY; + *unsafe { desc.u.Texture2DArray_mut() } = d3d11::D3D11_TEX2D_ARRAY_RTV { + MipSlice, + FirstArraySlice, + ArraySize, + } + } + image::ViewKind::D3 => { + desc.ViewDimension = d3d11::D3D11_RTV_DIMENSION_TEXTURE3D; + *unsafe { desc.u.Texture3D_mut() } = d3d11::D3D11_TEX3D_RTV { + MipSlice, + FirstWSlice: FirstArraySlice, + WSize: ArraySize, + } + } + _ => unimplemented!(), + } + + let mut rtv = ptr::null_mut(); + let hr = unsafe { + self.raw.CreateRenderTargetView( + info.resource, + &desc, + &mut rtv as *mut *mut _ as *mut *mut _, + ) + }; + + if winerror::SUCCEEDED(hr) { + Ok(unsafe { ComPtr::from_raw(rtv) }) + } else { + Err(image::ViewCreationError::Unsupported) + } + } + + fn view_image_as_depth_stencil( + &self, + info: &ViewInfo, + ) -> Result, image::ViewCreationError> { + #![allow(non_snake_case)] + + let MipSlice = info.range.levels.start as _; + let FirstArraySlice = info.range.layers.start as _; + let ArraySize = (info.range.layers.end - info.range.layers.start) as _; + assert_eq!(info.range.levels.start + 1, info.range.levels.end); + assert!(info.range.layers.end <= info.kind.num_layers()); + + let mut desc: d3d11::D3D11_DEPTH_STENCIL_VIEW_DESC = unsafe { mem::zeroed() }; + desc.Format = info.format; + + match info.view_kind { + image::ViewKind::D2 => { + desc.ViewDimension = d3d11::D3D11_DSV_DIMENSION_TEXTURE2D; + *unsafe { desc.u.Texture2D_mut() } = d3d11::D3D11_TEX2D_DSV { MipSlice } + } + image::ViewKind::D2Array => { + desc.ViewDimension = d3d11::D3D11_DSV_DIMENSION_TEXTURE2DARRAY; + *unsafe { desc.u.Texture2DArray_mut() } = d3d11::D3D11_TEX2D_ARRAY_DSV { + MipSlice, + FirstArraySlice, + ArraySize, + } + } + _ => unimplemented!(), + } + + let mut dsv = ptr::null_mut(); + let hr = unsafe { + self.raw.CreateDepthStencilView( + info.resource, + &desc, + &mut dsv as *mut *mut _ as *mut *mut _, + ) + }; + + if winerror::SUCCEEDED(hr) { + Ok(unsafe { ComPtr::from_raw(dsv) }) + } else { + Err(image::ViewCreationError::Unsupported) + } + } + + pub(crate) fn create_swapchain_impl( + &self, + config: &window::SwapchainConfig, + window_handle: HWND, + factory: ComPtr, + ) -> Result<(ComPtr, dxgiformat::DXGI_FORMAT), window::CreationError> { + // TODO: use IDXGIFactory2 for >=11.1 + // TODO: this function should be able to fail (Result)? + + debug!("{:#?}", config); + let non_srgb_format = conv::map_format_nosrgb(config.format).unwrap(); + + let mut desc = DXGI_SWAP_CHAIN_DESC { + BufferDesc: dxgitype::DXGI_MODE_DESC { + Width: config.extent.width, + Height: config.extent.height, + // TODO: should this grab max value of all monitor hz? vsync + // will clamp to current monitor anyways? + RefreshRate: dxgitype::DXGI_RATIONAL { + Numerator: 1, + Denominator: 60, + }, + Format: non_srgb_format, + ScanlineOrdering: dxgitype::DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED, + Scaling: dxgitype::DXGI_MODE_SCALING_UNSPECIFIED, + }, + // TODO: msaa on backbuffer? + SampleDesc: dxgitype::DXGI_SAMPLE_DESC { + Count: 1, + Quality: 0, + }, + BufferUsage: dxgitype::DXGI_USAGE_RENDER_TARGET_OUTPUT + | dxgitype::DXGI_USAGE_SHADER_INPUT, + BufferCount: config.image_count, + OutputWindow: window_handle, + // TODO: + Windowed: TRUE, + // TODO: + SwapEffect: DXGI_SWAP_EFFECT_DISCARD, + Flags: 0, + }; + + let dxgi_swapchain = { + let mut swapchain: *mut IDXGISwapChain = ptr::null_mut(); + let hr = unsafe { + factory.CreateSwapChain( + self.raw.as_raw() as *mut _, + &mut desc as *mut _, + &mut swapchain as *mut *mut _ as *mut *mut _, + ) + }; + assert_eq!(hr, winerror::S_OK); + + unsafe { ComPtr::from_raw(swapchain) } + }; + Ok((dxgi_swapchain, non_srgb_format)) + } +} + +impl device::Device for Device { + unsafe fn allocate_memory( + &self, + mem_type: hal::MemoryTypeId, + size: u64, + ) -> Result { + let vec = Vec::with_capacity(size as usize); + Ok(Memory { + properties: self.memory_properties.memory_types[mem_type.0].properties, + size, + mapped_ptr: vec.as_ptr() as *mut _, + host_visible: Some(RefCell::new(vec)), + local_buffers: RefCell::new(Vec::new()), + _local_images: RefCell::new(Vec::new()), + }) + } + + unsafe fn create_command_pool( + &self, + _family: QueueFamilyId, + _create_flags: pool::CommandPoolCreateFlags, + ) -> Result { + // TODO: + Ok(CommandPool { + device: self.raw.clone(), + internal: self.internal.clone(), + }) + } + + unsafe fn destroy_command_pool(&self, _pool: CommandPool) { + // automatic + } + + unsafe fn create_render_pass<'a, IA, IS, ID>( + &self, + attachments: IA, + subpasses: IS, + _dependencies: ID, + ) -> Result + where + IA: IntoIterator, + IA::Item: Borrow, + IS: IntoIterator, + IS::Item: Borrow>, + ID: IntoIterator, + ID::Item: Borrow, + { + Ok(RenderPass { + attachments: attachments + .into_iter() + .map(|attachment| attachment.borrow().clone()) + .collect(), + subpasses: subpasses + .into_iter() + .map(|desc| { + let desc = desc.borrow(); + SubpassDesc { + color_attachments: desc + .colors + .iter() + .map(|color| color.borrow().clone()) + .collect(), + depth_stencil_attachment: desc.depth_stencil.map(|d| *d), + input_attachments: desc + .inputs + .iter() + .map(|input| input.borrow().clone()) + .collect(), + resolve_attachments: desc + .resolves + .iter() + .map(|resolve| resolve.borrow().clone()) + .collect(), + } + }) + .collect(), + }) + } + + unsafe fn create_pipeline_layout( + &self, + set_layouts: IS, + _push_constant_ranges: IR, + ) -> Result + where + IS: IntoIterator, + IS::Item: Borrow, + IR: IntoIterator, + IR::Item: Borrow<(pso::ShaderStageFlags, Range)>, + { + let mut res_offsets = MultiStageData::>::default(); + let mut sets = Vec::new(); + for set_layout in set_layouts { + let layout = set_layout.borrow(); + sets.push(DescriptorSetInfo { + bindings: Arc::clone(&layout.bindings), + registers: res_offsets.advance(&layout.pool_mapping), + }); + } + + //TODO: assert that res_offsets are within supported range + + Ok(PipelineLayout { sets }) + } + + unsafe fn create_pipeline_cache( + &self, + _data: Option<&[u8]>, + ) -> Result<(), device::OutOfMemory> { + Ok(()) + } + + unsafe fn get_pipeline_cache_data(&self, _cache: &()) -> Result, device::OutOfMemory> { + //empty + Ok(Vec::new()) + } + + unsafe fn destroy_pipeline_cache(&self, _: ()) { + //empty + } + + unsafe fn merge_pipeline_caches(&self, _: &(), _: I) -> Result<(), device::OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow<()>, + { + //empty + Ok(()) + } + + unsafe fn create_graphics_pipeline<'a>( + &self, + desc: &pso::GraphicsPipelineDesc<'a, Backend>, + _cache: Option<&()>, + ) -> Result { + let features = &self.features; + let build_shader = |stage: pso::Stage, source: Option<&pso::EntryPoint<'a, Backend>>| { + let source = match source { + Some(src) => src, + None => return Ok(None), + }; + + Self::extract_entry_point(stage, source, desc.layout, features) + .map_err(|err| pso::CreationError::Shader(err)) + }; + + let vs = build_shader(pso::Stage::Vertex, Some(&desc.shaders.vertex))?.unwrap(); + let ps = build_shader(pso::Stage::Fragment, desc.shaders.fragment.as_ref())?; + let gs = build_shader(pso::Stage::Geometry, desc.shaders.geometry.as_ref())?; + let ds = build_shader(pso::Stage::Domain, desc.shaders.domain.as_ref())?; + let hs = build_shader(pso::Stage::Hull, desc.shaders.hull.as_ref())?; + + let layout = self.create_input_layout( + vs.clone(), + &desc.vertex_buffers, + &desc.attributes, + &desc.input_assembler, + )?; + let rasterizer_state = self.create_rasterizer_state(&desc.rasterizer)?; + let blend_state = self.create_blend_state(&desc.blender)?; + let depth_stencil_state = Some(self.create_depth_stencil_state(&desc.depth_stencil)?); + + let vs = self.create_vertex_shader(vs)?; + let ps = if let Some(blob) = ps { + Some(self.create_pixel_shader(blob)?) + } else { + None + }; + let gs = if let Some(blob) = gs { + Some(self.create_geometry_shader(blob)?) + } else { + None + }; + let ds = if let Some(blob) = ds { + Some(self.create_domain_shader(blob)?) + } else { + None + }; + let hs = if let Some(blob) = hs { + Some(self.create_hull_shader(blob)?) + } else { + None + }; + + Ok(GraphicsPipeline { + vs, + gs, + ds, + hs, + ps, + topology: layout.topology, + input_layout: layout.raw, + rasterizer_state, + blend_state, + depth_stencil_state, + baked_states: desc.baked_states.clone(), + required_bindings: layout.required_bindings, + max_vertex_bindings: layout.max_vertex_bindings, + strides: layout.vertex_strides, + }) + } + + unsafe fn create_compute_pipeline<'a>( + &self, + desc: &pso::ComputePipelineDesc<'a, Backend>, + _cache: Option<&()>, + ) -> Result { + let features = &self.features; + let build_shader = |stage: pso::Stage, source: Option<&pso::EntryPoint<'a, Backend>>| { + let source = match source { + Some(src) => src, + None => return Ok(None), + }; + + Self::extract_entry_point(stage, source, desc.layout, features) + .map_err(|err| pso::CreationError::Shader(err)) + }; + + let cs = build_shader(pso::Stage::Compute, Some(&desc.shader))?.unwrap(); + let cs = self.create_compute_shader(cs)?; + + Ok(ComputePipeline { cs }) + } + + unsafe fn create_framebuffer( + &self, + _renderpass: &RenderPass, + attachments: I, + extent: image::Extent, + ) -> Result + where + I: IntoIterator, + I::Item: Borrow, + { + Ok(Framebuffer { + attachments: attachments + .into_iter() + .map(|att| att.borrow().clone()) + .collect(), + layers: extent.depth as _, + }) + } + + unsafe fn create_shader_module( + &self, + raw_data: &[u32], + ) -> Result { + Ok(ShaderModule::Spirv(raw_data.into())) + } + + unsafe fn create_buffer( + &self, + size: u64, + usage: buffer::Usage, + ) -> Result { + use buffer::Usage; + + let mut bind = 0; + + if usage.contains(Usage::UNIFORM) { + bind |= d3d11::D3D11_BIND_CONSTANT_BUFFER; + } + if usage.contains(Usage::VERTEX) { + bind |= d3d11::D3D11_BIND_VERTEX_BUFFER; + } + if usage.contains(Usage::INDEX) { + bind |= d3d11::D3D11_BIND_INDEX_BUFFER; + } + + // TODO: >=11.1 + if usage.intersects(Usage::UNIFORM_TEXEL | Usage::STORAGE_TEXEL | Usage::TRANSFER_SRC) { + bind |= d3d11::D3D11_BIND_SHADER_RESOURCE; + } + + if usage.intersects(Usage::TRANSFER_DST | Usage::STORAGE) { + bind |= d3d11::D3D11_BIND_UNORDERED_ACCESS; + } + + // if `D3D11_BIND_CONSTANT_BUFFER` intersects with any other bind flag, we need to handle + // it by creating two buffers. one with `D3D11_BIND_CONSTANT_BUFFER` and one with the rest + let needs_disjoint_cb = bind & d3d11::D3D11_BIND_CONSTANT_BUFFER != 0 + && bind != d3d11::D3D11_BIND_CONSTANT_BUFFER; + + if needs_disjoint_cb { + bind ^= d3d11::D3D11_BIND_CONSTANT_BUFFER; + } + + fn up_align(x: u64, alignment: u64) -> u64 { + (x + alignment - 1) & !(alignment - 1) + } + + // constant buffer size need to be divisible by 16 + let size = if usage.contains(Usage::UNIFORM) { + up_align(size, 16) + } else { + up_align(size, 4) + }; + + Ok(Buffer { + internal: InternalBuffer { + raw: ptr::null_mut(), + disjoint_cb: if needs_disjoint_cb { + Some(ptr::null_mut()) + } else { + None + }, + srv: None, + uav: None, + usage, + }, + properties: memory::Properties::empty(), + bound_range: 0 .. 0, + host_ptr: ptr::null_mut(), + bind, + requirements: memory::Requirements { + size, + alignment: 1, + type_mask: BUFFER_TYPE_MASK, + }, + }) + } + + unsafe fn get_buffer_requirements(&self, buffer: &Buffer) -> memory::Requirements { + buffer.requirements + } + + unsafe fn bind_buffer_memory( + &self, + memory: &Memory, + offset: u64, + buffer: &mut Buffer, + ) -> Result<(), device::BindError> { + debug!( + "usage={:?}, props={:b}", + buffer.internal.usage, memory.properties + ); + + #[allow(non_snake_case)] + let MiscFlags = if buffer.bind + & (d3d11::D3D11_BIND_SHADER_RESOURCE | d3d11::D3D11_BIND_UNORDERED_ACCESS) + != 0 + { + d3d11::D3D11_RESOURCE_MISC_BUFFER_ALLOW_RAW_VIEWS + } else { + 0 + }; + + let initial_data = memory + .host_visible + .as_ref() + .map(|p| d3d11::D3D11_SUBRESOURCE_DATA { + pSysMem: p.borrow().as_ptr().offset(offset as isize) as _, + SysMemPitch: 0, + SysMemSlicePitch: 0, + }); + + let raw = if memory.properties.contains(memory::Properties::DEVICE_LOCAL) { + // device local memory + let desc = d3d11::D3D11_BUFFER_DESC { + ByteWidth: buffer.requirements.size as _, + Usage: d3d11::D3D11_USAGE_DEFAULT, + BindFlags: buffer.bind, + CPUAccessFlags: 0, + MiscFlags, + StructureByteStride: if buffer.internal.usage.contains(buffer::Usage::TRANSFER_SRC) + { + 4 + } else { + 0 + }, + }; + + let mut buffer: *mut d3d11::ID3D11Buffer = ptr::null_mut(); + let hr = self.raw.CreateBuffer( + &desc, + if let Some(data) = initial_data { + &data + } else { + ptr::null_mut() + }, + &mut buffer as *mut *mut _ as *mut *mut _, + ); + + if !winerror::SUCCEEDED(hr) { + return Err(device::BindError::WrongMemory); + } + + ComPtr::from_raw(buffer) + } else { + let desc = d3d11::D3D11_BUFFER_DESC { + ByteWidth: buffer.requirements.size as _, + // TODO: dynamic? + Usage: d3d11::D3D11_USAGE_DEFAULT, + BindFlags: buffer.bind, + CPUAccessFlags: 0, + MiscFlags, + StructureByteStride: if buffer.internal.usage.contains(buffer::Usage::TRANSFER_SRC) + { + 4 + } else { + 0 + }, + }; + + let mut buffer: *mut d3d11::ID3D11Buffer = ptr::null_mut(); + let hr = self.raw.CreateBuffer( + &desc, + if let Some(data) = initial_data { + &data + } else { + ptr::null_mut() + }, + &mut buffer as *mut *mut _ as *mut *mut _, + ); + + if !winerror::SUCCEEDED(hr) { + return Err(device::BindError::WrongMemory); + } + + ComPtr::from_raw(buffer) + }; + + let disjoint_cb = if buffer.internal.disjoint_cb.is_some() { + let desc = d3d11::D3D11_BUFFER_DESC { + ByteWidth: buffer.requirements.size as _, + Usage: d3d11::D3D11_USAGE_DEFAULT, + BindFlags: d3d11::D3D11_BIND_CONSTANT_BUFFER, + CPUAccessFlags: 0, + MiscFlags: 0, + StructureByteStride: 0, + }; + + let mut buffer: *mut d3d11::ID3D11Buffer = ptr::null_mut(); + let hr = self.raw.CreateBuffer( + &desc, + if let Some(data) = initial_data { + &data + } else { + ptr::null_mut() + }, + &mut buffer as *mut *mut _ as *mut *mut _, + ); + + if !winerror::SUCCEEDED(hr) { + return Err(device::BindError::WrongMemory); + } + + Some(buffer) + } else { + None + }; + + let srv = if buffer.bind & d3d11::D3D11_BIND_SHADER_RESOURCE != 0 { + let mut desc = mem::zeroed::(); + desc.Format = dxgiformat::DXGI_FORMAT_R32_TYPELESS; + desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_BUFFEREX; + *desc.u.BufferEx_mut() = d3d11::D3D11_BUFFEREX_SRV { + FirstElement: 0, + // TODO: enforce alignment through HAL limits + NumElements: buffer.requirements.size as u32 / 4, + Flags: d3d11::D3D11_BUFFEREX_SRV_FLAG_RAW, + }; + + let mut srv = ptr::null_mut(); + let hr = self.raw.CreateShaderResourceView( + raw.as_raw() as *mut _, + &desc, + &mut srv as *mut *mut _ as *mut *mut _, + ); + + if !winerror::SUCCEEDED(hr) { + error!("CreateShaderResourceView failed: 0x{:x}", hr); + + return Err(device::BindError::WrongMemory); + } + + Some(srv) + } else { + None + }; + + let uav = if buffer.bind & d3d11::D3D11_BIND_UNORDERED_ACCESS != 0 { + let mut desc = mem::zeroed::(); + desc.Format = dxgiformat::DXGI_FORMAT_R32_TYPELESS; + desc.ViewDimension = d3d11::D3D11_UAV_DIMENSION_BUFFER; + *desc.u.Buffer_mut() = d3d11::D3D11_BUFFER_UAV { + FirstElement: 0, + NumElements: buffer.requirements.size as u32 / 4, + Flags: d3d11::D3D11_BUFFER_UAV_FLAG_RAW, + }; + + let mut uav = ptr::null_mut(); + let hr = self.raw.CreateUnorderedAccessView( + raw.as_raw() as *mut _, + &desc, + &mut uav as *mut *mut _ as *mut *mut _, + ); + + if !winerror::SUCCEEDED(hr) { + error!("CreateUnorderedAccessView failed: 0x{:x}", hr); + + return Err(device::BindError::WrongMemory); + } + + Some(uav) + } else { + None + }; + + let internal = InternalBuffer { + raw: raw.into_raw(), + disjoint_cb, + srv, + uav, + usage: buffer.internal.usage, + }; + let range = offset .. offset + buffer.requirements.size; + + memory.bind_buffer(range.clone(), internal.clone()); + + let host_ptr = if let Some(vec) = &memory.host_visible { + vec.borrow().as_ptr() as *mut _ + } else { + ptr::null_mut() + }; + + buffer.internal = internal; + buffer.properties = memory.properties; + buffer.host_ptr = host_ptr; + buffer.bound_range = range; + + Ok(()) + } + + unsafe fn create_buffer_view( + &self, + _buffer: &Buffer, + _format: Option, + _range: buffer::SubRange, + ) -> Result { + unimplemented!() + } + + unsafe fn create_image( + &self, + kind: image::Kind, + mip_levels: image::Level, + format: format::Format, + _tiling: image::Tiling, + usage: image::Usage, + view_caps: image::ViewCapabilities, + ) -> Result { + use image::Usage; + // + // TODO: create desc + + let surface_desc = format.base_format().0.desc(); + let bytes_per_texel = surface_desc.bits / 8; + let ext = kind.extent(); + let size = (ext.width * ext.height * ext.depth) as u64 * bytes_per_texel as u64; + let compressed = surface_desc.is_compressed(); + let depth = format.is_depth(); + + let mut bind = 0; + + if usage.intersects(Usage::TRANSFER_SRC | Usage::SAMPLED | Usage::STORAGE) { + bind |= d3d11::D3D11_BIND_SHADER_RESOURCE; + } + + // we cant get RTVs or UAVs on compressed & depth formats + if !compressed && !depth { + if usage.intersects(Usage::COLOR_ATTACHMENT | Usage::TRANSFER_DST) { + bind |= d3d11::D3D11_BIND_RENDER_TARGET; + } + + if usage.intersects(Usage::TRANSFER_DST | Usage::STORAGE) { + bind |= d3d11::D3D11_BIND_UNORDERED_ACCESS; + } + } + + if usage.contains(Usage::DEPTH_STENCIL_ATTACHMENT) { + bind |= d3d11::D3D11_BIND_DEPTH_STENCIL; + } + + debug!("{:b}", bind); + + Ok(Image { + internal: InternalImage { + raw: ptr::null_mut(), + copy_srv: None, + srv: None, + unordered_access_views: Vec::new(), + depth_stencil_views: Vec::new(), + render_target_views: Vec::new(), + }, + decomposed_format: conv::DecomposedDxgiFormat::UNKNOWN, + kind, + mip_levels, + format, + usage, + view_caps, + bind, + requirements: memory::Requirements { + size: size, + alignment: 1, + type_mask: 0x1, // device-local only + }, + }) + } + + unsafe fn get_image_requirements(&self, image: &Image) -> memory::Requirements { + image.requirements + } + + unsafe fn get_image_subresource_footprint( + &self, + _image: &Image, + _sub: image::Subresource, + ) -> image::SubresourceFootprint { + unimplemented!() + } + + unsafe fn bind_image_memory( + &self, + memory: &Memory, + offset: u64, + image: &mut Image, + ) -> Result<(), device::BindError> { + use image::Usage; + use memory::Properties; + + let base_format = image.format.base_format(); + let format_desc = base_format.0.desc(); + + let compressed = format_desc.is_compressed(); + let depth = image.format.is_depth(); + let stencil = image.format.is_stencil(); + + let (bind, usage, cpu) = if memory.properties == Properties::DEVICE_LOCAL { + (image.bind, d3d11::D3D11_USAGE_DEFAULT, 0) + } else if memory.properties + == (Properties::DEVICE_LOCAL | Properties::CPU_VISIBLE | Properties::CPU_CACHED) + { + ( + image.bind, + d3d11::D3D11_USAGE_DYNAMIC, + d3d11::D3D11_CPU_ACCESS_WRITE, + ) + } else if memory.properties == (Properties::CPU_VISIBLE | Properties::CPU_CACHED) { + ( + 0, + d3d11::D3D11_USAGE_STAGING, + d3d11::D3D11_CPU_ACCESS_READ | d3d11::D3D11_CPU_ACCESS_WRITE, + ) + } else { + unimplemented!() + }; + + let dxgi_format = conv::map_format(image.format).unwrap(); + let decomposed = conv::DecomposedDxgiFormat::from_dxgi_format(dxgi_format); + let bpp = format_desc.bits as u32 / 8; + + let (view_kind, resource) = match image.kind { + image::Kind::D1(width, layers) => { + let initial_data = + memory + .host_visible + .as_ref() + .map(|_p| d3d11::D3D11_SUBRESOURCE_DATA { + pSysMem: memory.mapped_ptr.offset(offset as isize) as _, + SysMemPitch: 0, + SysMemSlicePitch: 0, + }); + + let desc = d3d11::D3D11_TEXTURE1D_DESC { + Width: width, + MipLevels: image.mip_levels as _, + ArraySize: layers as _, + Format: decomposed.typeless, + Usage: usage, + BindFlags: bind, + CPUAccessFlags: cpu, + MiscFlags: 0, + }; + + let mut resource = ptr::null_mut(); + let hr = self.raw.CreateTexture1D( + &desc, + if let Some(data) = initial_data { + &data + } else { + ptr::null_mut() + }, + &mut resource as *mut *mut _ as *mut *mut _, + ); + + if !winerror::SUCCEEDED(hr) { + error!("CreateTexture1D failed: 0x{:x}", hr); + + return Err(device::BindError::WrongMemory); + } + + (image::ViewKind::D1Array, resource) + } + image::Kind::D2(width, height, layers, _) => { + let mut initial_datas = Vec::new(); + + for _layer in 0 .. layers { + for level in 0 .. image.mip_levels { + let width = image.kind.extent().at_level(level).width; + + // TODO: layer offset? + initial_datas.push(d3d11::D3D11_SUBRESOURCE_DATA { + pSysMem: memory.mapped_ptr.offset(offset as isize) as _, + SysMemPitch: width * bpp, + SysMemSlicePitch: 0, + }); + } + } + + let desc = d3d11::D3D11_TEXTURE2D_DESC { + Width: width, + Height: height, + MipLevels: image.mip_levels as _, + ArraySize: layers as _, + Format: decomposed.typeless, + SampleDesc: dxgitype::DXGI_SAMPLE_DESC { + Count: 1, + Quality: 0, + }, + Usage: usage, + BindFlags: bind, + CPUAccessFlags: cpu, + MiscFlags: if image.view_caps.contains(image::ViewCapabilities::KIND_CUBE) { + d3d11::D3D11_RESOURCE_MISC_TEXTURECUBE + } else { + 0 + }, + }; + + let mut resource = ptr::null_mut(); + let hr = self.raw.CreateTexture2D( + &desc, + if !depth { + initial_datas.as_ptr() + } else { + ptr::null_mut() + }, + &mut resource as *mut *mut _ as *mut *mut _, + ); + + if !winerror::SUCCEEDED(hr) { + error!("CreateTexture2D failed: 0x{:x}", hr); + + return Err(device::BindError::WrongMemory); + } + + (image::ViewKind::D2Array, resource) + } + image::Kind::D3(width, height, depth) => { + let initial_data = + memory + .host_visible + .as_ref() + .map(|_p| d3d11::D3D11_SUBRESOURCE_DATA { + pSysMem: memory.mapped_ptr.offset(offset as isize) as _, + SysMemPitch: width * bpp, + SysMemSlicePitch: width * height * bpp, + }); + + let desc = d3d11::D3D11_TEXTURE3D_DESC { + Width: width, + Height: height, + Depth: depth, + MipLevels: image.mip_levels as _, + Format: decomposed.typeless, + Usage: usage, + BindFlags: bind, + CPUAccessFlags: cpu, + MiscFlags: 0, + }; + + let mut resource = ptr::null_mut(); + let hr = self.raw.CreateTexture3D( + &desc, + if let Some(data) = initial_data { + &data + } else { + ptr::null_mut() + }, + &mut resource as *mut *mut _ as *mut *mut _, + ); + + if !winerror::SUCCEEDED(hr) { + error!("CreateTexture3D failed: 0x{:x}", hr); + + return Err(device::BindError::WrongMemory); + } + + (image::ViewKind::D3, resource) + } + }; + + let mut unordered_access_views = Vec::new(); + + if image.usage.contains(Usage::TRANSFER_DST) && !compressed && !depth { + for mip in 0 .. image.mip_levels { + let view = ViewInfo { + resource: resource, + kind: image.kind, + caps: image::ViewCapabilities::empty(), + view_kind, + // TODO: we should be using `uav_format` rather than `copy_uav_format`, and share + // the UAVs when the formats are identical + format: decomposed.copy_uav.unwrap(), + range: image::SubresourceRange { + aspects: format::Aspects::COLOR, + levels: mip .. (mip + 1), + layers: 0 .. image.kind.num_layers(), + }, + }; + + unordered_access_views.push( + self.view_image_as_unordered_access(&view) + .map_err(|_| device::BindError::WrongMemory)?, + ); + } + } + + let (copy_srv, srv) = if image.usage.contains(image::Usage::TRANSFER_SRC) { + let mut view = ViewInfo { + resource: resource, + kind: image.kind, + caps: image::ViewCapabilities::empty(), + view_kind, + format: decomposed.copy_srv.unwrap(), + range: image::SubresourceRange { + aspects: format::Aspects::COLOR, + levels: 0 .. image.mip_levels, + layers: 0 .. image.kind.num_layers(), + }, + }; + + let copy_srv = if !compressed { + Some( + self.view_image_as_shader_resource(&view) + .map_err(|_| device::BindError::WrongMemory)?, + ) + } else { + None + }; + + view.format = decomposed.srv.unwrap(); + + let srv = if !depth && !stencil { + Some( + self.view_image_as_shader_resource(&view) + .map_err(|_| device::BindError::WrongMemory)?, + ) + } else { + None + }; + + (copy_srv, srv) + } else { + (None, None) + }; + + let mut render_target_views = Vec::new(); + + if (image.usage.contains(image::Usage::COLOR_ATTACHMENT) + || image.usage.contains(image::Usage::TRANSFER_DST)) + && !compressed + && !depth + { + for layer in 0 .. image.kind.num_layers() { + for mip in 0 .. image.mip_levels { + let view = ViewInfo { + resource: resource, + kind: image.kind, + caps: image::ViewCapabilities::empty(), + view_kind, + format: decomposed.rtv.unwrap(), + range: image::SubresourceRange { + aspects: format::Aspects::COLOR, + levels: mip .. (mip + 1), + layers: layer .. (layer + 1), + }, + }; + + render_target_views.push( + self.view_image_as_render_target(&view) + .map_err(|_| device::BindError::WrongMemory)?, + ); + } + } + }; + + let mut depth_stencil_views = Vec::new(); + + if depth { + for layer in 0 .. image.kind.num_layers() { + for mip in 0 .. image.mip_levels { + let view = ViewInfo { + resource: resource, + kind: image.kind, + caps: image::ViewCapabilities::empty(), + view_kind, + format: decomposed.dsv.unwrap(), + range: image::SubresourceRange { + aspects: format::Aspects::COLOR, + levels: mip .. (mip + 1), + layers: layer .. (layer + 1), + }, + }; + + depth_stencil_views.push( + self.view_image_as_depth_stencil(&view) + .map_err(|_| device::BindError::WrongMemory)?, + ); + } + } + } + + let internal = InternalImage { + raw: resource, + copy_srv, + srv, + unordered_access_views, + depth_stencil_views, + render_target_views, + }; + + image.decomposed_format = decomposed; + image.internal = internal; + + Ok(()) + } + + unsafe fn create_image_view( + &self, + image: &Image, + view_kind: image::ViewKind, + format: format::Format, + _swizzle: format::Swizzle, + range: image::SubresourceRange, + ) -> Result { + let is_array = image.kind.num_layers() > 1; + + let info = ViewInfo { + resource: image.internal.raw, + kind: image.kind, + caps: image.view_caps, + // D3D11 doesn't allow looking at a single slice of an array as a non-array + view_kind: if is_array && view_kind == image::ViewKind::D2 { + image::ViewKind::D2Array + } else if is_array && view_kind == image::ViewKind::D1 { + image::ViewKind::D1Array + } else { + view_kind + }, + format: conv::map_format(format).ok_or(image::ViewCreationError::BadFormat(format))?, + range, + }; + + let srv_info = ViewInfo { + format: conv::viewable_format(info.format), + ..info.clone() + }; + + Ok(ImageView { + format, + srv_handle: if image.usage.intersects(image::Usage::SAMPLED) { + Some(self.view_image_as_shader_resource(&srv_info)?) + } else { + None + }, + rtv_handle: if image.usage.contains(image::Usage::COLOR_ATTACHMENT) { + Some(self.view_image_as_render_target(&info)?) + } else { + None + }, + uav_handle: if image.usage.contains(image::Usage::STORAGE) { + Some(self.view_image_as_unordered_access(&info)?) + } else { + None + }, + dsv_handle: if image.usage.contains(image::Usage::DEPTH_STENCIL_ATTACHMENT) { + Some(self.view_image_as_depth_stencil(&info)?) + } else { + None + }, + }) + } + + unsafe fn create_sampler( + &self, + info: &image::SamplerDesc, + ) -> Result { + assert!(info.normalized); + + let op = match info.comparison { + Some(_) => d3d11::D3D11_FILTER_REDUCTION_TYPE_COMPARISON, + None => d3d11::D3D11_FILTER_REDUCTION_TYPE_STANDARD, + }; + + let desc = d3d11::D3D11_SAMPLER_DESC { + Filter: conv::map_filter( + info.min_filter, + info.mag_filter, + info.mip_filter, + op, + info.anisotropy_clamp, + ), + AddressU: conv::map_wrapping(info.wrap_mode.0), + AddressV: conv::map_wrapping(info.wrap_mode.1), + AddressW: conv::map_wrapping(info.wrap_mode.2), + MipLODBias: info.lod_bias.0, + MaxAnisotropy: info.anisotropy_clamp.map_or(0, |aniso| aniso as u32), + ComparisonFunc: info.comparison.map_or(0, |comp| conv::map_comparison(comp)), + BorderColor: info.border.into(), + MinLOD: info.lod_range.start.0, + MaxLOD: info.lod_range.end.0, + }; + + let mut sampler = ptr::null_mut(); + let hr = self + .raw + .CreateSamplerState(&desc, &mut sampler as *mut *mut _ as *mut *mut _); + + assert_eq!(true, winerror::SUCCEEDED(hr)); + + Ok(Sampler { + sampler_handle: ComPtr::from_raw(sampler), + }) + } + + unsafe fn create_descriptor_pool( + &self, + _max_sets: usize, + ranges: I, + _flags: pso::DescriptorPoolCreateFlags, + ) -> Result + where + I: IntoIterator, + I::Item: Borrow, + { + let mut total = RegisterData::default(); + for range in ranges { + let r = range.borrow(); + let content = DescriptorContent::from(r.ty); + total.add_content_many(content, r.count as DescriptorIndex); + } + + let max_stages = 6; + let count = total.sum() * max_stages; + Ok(DescriptorPool::with_capacity(count)) + } + + unsafe fn create_descriptor_set_layout( + &self, + layout_bindings: I, + _immutable_samplers: J, + ) -> Result + where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow, + { + let mut total = MultiStageData::>::default(); + let mut bindings = layout_bindings + .into_iter() + .map(|b| b.borrow().clone()) + .collect::>(); + + for binding in bindings.iter() { + let content = DescriptorContent::from(binding.ty); + total.add_content(content, binding.stage_flags); + } + + bindings.sort_by_key(|a| a.binding); + + let accum = total.map_register(|count| RegisterAccumulator { + res_index: *count as ResourceIndex, + }); + + Ok(DescriptorSetLayout { + bindings: Arc::new(bindings), + pool_mapping: accum.to_mapping(), + }) + } + + unsafe fn write_descriptor_sets<'a, I, J>(&self, write_iter: I) + where + I: IntoIterator>, + J: IntoIterator, + J::Item: Borrow>, + { + for write in write_iter { + let mut mapping = write + .set + .layout + .pool_mapping + .map_register(|mapping| mapping.offset); + let binding_start = write + .set + .layout + .bindings + .iter() + .position(|binding| binding.binding == write.binding) + .unwrap(); + for binding in &write.set.layout.bindings[.. binding_start] { + let content = DescriptorContent::from(binding.ty); + mapping.add_content(content, binding.stage_flags); + } + + for (binding, descriptor) in write.set.layout.bindings[binding_start ..] + .iter() + .zip(write.descriptors) + { + let handles = match *descriptor.borrow() { + pso::Descriptor::Buffer(buffer, ref _sub) => RegisterData { + c: match buffer.internal.disjoint_cb { + Some(dj_buf) => dj_buf as *mut _, + None => buffer.internal.raw as *mut _, + }, + t: buffer.internal.srv.map_or(ptr::null_mut(), |p| p as *mut _), + u: buffer.internal.uav.map_or(ptr::null_mut(), |p| p as *mut _), + s: ptr::null_mut(), + }, + pso::Descriptor::Image(image, _layout) => RegisterData { + c: ptr::null_mut(), + t: image + .srv_handle + .clone() + .map_or(ptr::null_mut(), |h| h.as_raw() as *mut _), + u: image + .uav_handle + .clone() + .map_or(ptr::null_mut(), |h| h.as_raw() as *mut _), + s: ptr::null_mut(), + }, + pso::Descriptor::Sampler(sampler) => RegisterData { + c: ptr::null_mut(), + t: ptr::null_mut(), + u: ptr::null_mut(), + s: sampler.sampler_handle.as_raw() as *mut _, + }, + pso::Descriptor::CombinedImageSampler(image, _layout, sampler) => { + RegisterData { + c: ptr::null_mut(), + t: image + .srv_handle + .clone() + .map_or(ptr::null_mut(), |h| h.as_raw() as *mut _), + u: image + .uav_handle + .clone() + .map_or(ptr::null_mut(), |h| h.as_raw() as *mut _), + s: sampler.sampler_handle.as_raw() as *mut _, + } + } + pso::Descriptor::TexelBuffer(_buffer_view) => unimplemented!(), + }; + + let content = DescriptorContent::from(binding.ty); + if content.contains(DescriptorContent::CBV) { + let offsets = mapping.map_other(|map| map.c); + write + .set + .assign_stages(&offsets, binding.stage_flags, handles.c); + }; + if content.contains(DescriptorContent::SRV) { + let offsets = mapping.map_other(|map| map.t); + write + .set + .assign_stages(&offsets, binding.stage_flags, handles.t); + }; + if content.contains(DescriptorContent::UAV) { + let offsets = mapping.map_other(|map| map.u); + write + .set + .assign_stages(&offsets, binding.stage_flags, handles.u); + }; + if content.contains(DescriptorContent::SAMPLER) { + let offsets = mapping.map_other(|map| map.s); + write + .set + .assign_stages(&offsets, binding.stage_flags, handles.s); + }; + + mapping.add_content(content, binding.stage_flags); + } + } + } + + unsafe fn copy_descriptor_sets<'a, I>(&self, copy_iter: I) + where + I: IntoIterator, + I::Item: Borrow>, + { + for copy in copy_iter { + let _copy = copy.borrow(); + //TODO + /* + for offset in 0 .. copy.count { + let (dst_ty, dst_handle_offset, dst_second_handle_offset) = copy + .dst_set + .get_handle_offset(copy.dst_binding + offset as u32); + let (src_ty, src_handle_offset, src_second_handle_offset) = copy + .src_set + .get_handle_offset(copy.src_binding + offset as u32); + assert_eq!(dst_ty, src_ty); + + let dst_handle = copy.dst_set.handles.offset(dst_handle_offset as isize); + let src_handle = copy.dst_set.handles.offset(src_handle_offset as isize); + + match dst_ty { + pso::DescriptorType::Image { + ty: pso::ImageDescriptorType::Sampled { with_sampler: true } + } => { + let dst_second_handle = copy + .dst_set + .handles + .offset(dst_second_handle_offset as isize); + let src_second_handle = copy + .dst_set + .handles + .offset(src_second_handle_offset as isize); + + *dst_handle = *src_handle; + *dst_second_handle = *src_second_handle; + } + _ => *dst_handle = *src_handle, + } + }*/ + } + } + + unsafe fn map_memory( + &self, + memory: &Memory, + segment: memory::Segment, + ) -> Result<*mut u8, device::MapError> { + assert_eq!(memory.host_visible.is_some(), true); + + Ok(memory.mapped_ptr.offset(segment.offset as isize)) + } + + unsafe fn unmap_memory(&self, memory: &Memory) { + assert_eq!(memory.host_visible.is_some(), true); + } + + unsafe fn flush_mapped_memory_ranges<'a, I>(&self, ranges: I) -> Result<(), device::OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow<(&'a Memory, memory::Segment)>, + { + let _scope = debug_scope!(&self.context, "FlushMappedRanges"); + + // go through every range we wrote to + for range in ranges.into_iter() { + let &(memory, ref segment) = range.borrow(); + let range = memory.resolve(segment); + + let _scope = debug_scope!(&self.context, "Range({:?})", range); + memory.flush(&self.context, range); + } + + Ok(()) + } + + unsafe fn invalidate_mapped_memory_ranges<'a, I>( + &self, + ranges: I, + ) -> Result<(), device::OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow<(&'a Memory, memory::Segment)>, + { + let _scope = debug_scope!(&self.context, "InvalidateMappedRanges"); + + // go through every range we want to read from + for range in ranges.into_iter() { + let &(memory, ref segment) = range.borrow(); + let range = memory.resolve(segment); + + let _scope = debug_scope!(&self.context, "Range({:?})", range); + memory.invalidate( + &self.context, + range, + self.internal.working_buffer.clone(), + self.internal.working_buffer_size, + ); + } + + Ok(()) + } + + fn create_semaphore(&self) -> Result { + // TODO: + Ok(Semaphore) + } + + fn create_fence(&self, signalled: bool) -> Result { + Ok(Arc::new(RawFence { + mutex: Mutex::new(signalled), + condvar: Condvar::new(), + })) + } + + unsafe fn reset_fence(&self, fence: &Fence) -> Result<(), device::OutOfMemory> { + *fence.mutex.lock() = false; + Ok(()) + } + + unsafe fn wait_for_fence( + &self, + fence: &Fence, + timeout_ns: u64, + ) -> Result { + use std::time::{Duration, Instant}; + + debug!("wait_for_fence {:?} for {} ns", fence, timeout_ns); + let mut guard = fence.mutex.lock(); + match timeout_ns { + 0 => Ok(*guard), + 0xFFFFFFFFFFFFFFFF => { + while !*guard { + fence.condvar.wait(&mut guard); + } + Ok(true) + } + _ => { + let total = Duration::from_nanos(timeout_ns as u64); + let now = Instant::now(); + while !*guard { + let duration = match total.checked_sub(now.elapsed()) { + Some(dur) => dur, + None => return Ok(false), + }; + let result = fence.condvar.wait_for(&mut guard, duration); + if result.timed_out() { + return Ok(false); + } + } + Ok(true) + } + } + } + + unsafe fn get_fence_status(&self, fence: &Fence) -> Result { + Ok(*fence.mutex.lock()) + } + + fn create_event(&self) -> Result<(), device::OutOfMemory> { + unimplemented!() + } + + unsafe fn get_event_status(&self, _event: &()) -> Result { + unimplemented!() + } + + unsafe fn set_event(&self, _event: &()) -> Result<(), device::OutOfMemory> { + unimplemented!() + } + + unsafe fn reset_event(&self, _event: &()) -> Result<(), device::OutOfMemory> { + unimplemented!() + } + + unsafe fn free_memory(&self, memory: Memory) { + for (_range, internal) in memory.local_buffers.borrow_mut().iter() { + (*internal.raw).Release(); + if let Some(srv) = internal.srv { + (*srv).Release(); + } + } + } + + unsafe fn create_query_pool( + &self, + _query_ty: query::Type, + _count: query::Id, + ) -> Result { + unimplemented!() + } + + unsafe fn destroy_query_pool(&self, _pool: QueryPool) { + unimplemented!() + } + + unsafe fn get_query_pool_results( + &self, + _pool: &QueryPool, + _queries: Range, + _data: &mut [u8], + _stride: buffer::Offset, + _flags: query::ResultFlags, + ) -> Result { + unimplemented!() + } + + unsafe fn destroy_shader_module(&self, _shader_lib: ShaderModule) {} + + unsafe fn destroy_render_pass(&self, _rp: RenderPass) { + //unimplemented!() + } + + unsafe fn destroy_pipeline_layout(&self, _layout: PipelineLayout) { + //unimplemented!() + } + + unsafe fn destroy_graphics_pipeline(&self, _pipeline: GraphicsPipeline) {} + + unsafe fn destroy_compute_pipeline(&self, _pipeline: ComputePipeline) {} + + unsafe fn destroy_framebuffer(&self, _fb: Framebuffer) {} + + unsafe fn destroy_buffer(&self, _buffer: Buffer) {} + + unsafe fn destroy_buffer_view(&self, _view: BufferView) { + unimplemented!() + } + + unsafe fn destroy_image(&self, _image: Image) { + // TODO: + // unimplemented!() + } + + unsafe fn destroy_image_view(&self, _view: ImageView) { + //unimplemented!() + } + + unsafe fn destroy_sampler(&self, _sampler: Sampler) {} + + unsafe fn destroy_descriptor_pool(&self, _pool: DescriptorPool) { + //unimplemented!() + } + + unsafe fn destroy_descriptor_set_layout(&self, _layout: DescriptorSetLayout) { + //unimplemented!() + } + + unsafe fn destroy_fence(&self, _fence: Fence) { + // unimplemented!() + } + + unsafe fn destroy_semaphore(&self, _semaphore: Semaphore) { + //unimplemented!() + } + + unsafe fn destroy_event(&self, _event: ()) { + //unimplemented!() + } + + unsafe fn create_swapchain( + &self, + surface: &mut Surface, + config: window::SwapchainConfig, + _old_swapchain: Option, + ) -> Result<(Swapchain, Vec), window::CreationError> { + let (dxgi_swapchain, non_srgb_format) = + self.create_swapchain_impl(&config, surface.wnd_handle, surface.factory.clone())?; + + let resource = { + let mut resource: *mut d3d11::ID3D11Resource = ptr::null_mut(); + assert_eq!( + winerror::S_OK, + dxgi_swapchain.GetBuffer( + 0 as _, + &d3d11::ID3D11Resource::uuidof(), + &mut resource as *mut *mut _ as *mut *mut _, + ) + ); + resource + }; + + let kind = image::Kind::D2(config.extent.width, config.extent.height, 1, 1); + let decomposed = + conv::DecomposedDxgiFormat::from_dxgi_format(conv::map_format(config.format).unwrap()); + + let mut view_info = ViewInfo { + resource, + kind, + caps: image::ViewCapabilities::empty(), + view_kind: image::ViewKind::D2, + format: decomposed.rtv.unwrap(), + // TODO: can these ever differ for backbuffer? + range: image::SubresourceRange { + aspects: format::Aspects::COLOR, + levels: 0 .. 1, + layers: 0 .. 1, + }, + }; + let rtv = self.view_image_as_render_target(&view_info).unwrap(); + + view_info.format = non_srgb_format; + view_info.view_kind = image::ViewKind::D2Array; + let copy_srv = self.view_image_as_shader_resource(&view_info).unwrap(); + + let images = (0 .. config.image_count) + .map(|_i| { + // returning the 0th buffer for all images seems like the right thing to do. we can + // only get write access to the first buffer in the case of `_SEQUENTIAL` flip model, + // and read access to the rest + let internal = InternalImage { + raw: resource, + copy_srv: Some(copy_srv.clone()), + srv: None, + unordered_access_views: Vec::new(), + depth_stencil_views: Vec::new(), + render_target_views: vec![rtv.clone()], + }; + + Image { + kind, + usage: config.image_usage, + format: config.format, + view_caps: image::ViewCapabilities::empty(), + // NOTE: not the actual format of the backbuffer(s) + decomposed_format: decomposed.clone(), + mip_levels: 1, + internal, + bind: 0, // TODO: ? + requirements: memory::Requirements { + // values don't really matter + size: 0, + alignment: 0, + type_mask: 0, + }, + } + }) + .collect(); + + Ok((Swapchain { dxgi_swapchain }, images)) + } + + unsafe fn destroy_swapchain(&self, _swapchain: Swapchain) { + // automatic + } + + fn wait_idle(&self) -> Result<(), device::OutOfMemory> { + Ok(()) + // unimplemented!() + } + + unsafe fn set_image_name(&self, _image: &mut Image, _name: &str) { + // TODO + } + + unsafe fn set_buffer_name(&self, _buffer: &mut Buffer, _name: &str) { + // TODO + } + + unsafe fn set_command_buffer_name(&self, _command_buffer: &mut CommandBuffer, _name: &str) { + // TODO + } + + unsafe fn set_semaphore_name(&self, _semaphore: &mut Semaphore, _name: &str) { + // TODO + } + + unsafe fn set_fence_name(&self, _fence: &mut Fence, _name: &str) { + // TODO + } + + unsafe fn set_framebuffer_name(&self, _framebuffer: &mut Framebuffer, _name: &str) { + // TODO + } + + unsafe fn set_render_pass_name(&self, _render_pass: &mut RenderPass, _name: &str) { + // TODO + } + + unsafe fn set_descriptor_set_name(&self, _descriptor_set: &mut DescriptorSet, _name: &str) { + // TODO + } + + unsafe fn set_descriptor_set_layout_name( + &self, + _descriptor_set_layout: &mut DescriptorSetLayout, + _name: &str, + ) { + // TODO + } +} diff --git a/third_party/rust/gfx-backend-dx11/src/dxgi.rs b/third_party/rust/gfx-backend-dx11/src/dxgi.rs index 41b2d7163a60..ec562f61c693 100644 --- a/third_party/rust/gfx-backend-dx11/src/dxgi.rs +++ b/third_party/rust/gfx-backend-dx11/src/dxgi.rs @@ -1,220 +1,217 @@ -use hal::adapter::{AdapterInfo, DeviceType}; - -use winapi::{ - shared::{ - dxgi, - dxgi1_2, - dxgi1_3, - dxgi1_4, - dxgi1_5, - guiddef::{GUID, REFIID}, - winerror, - }, - um::unknwnbase::IUnknown, - Interface, -}; - -use wio::com::ComPtr; - -use std::ffi::OsString; -use std::mem; -use std::os::windows::ffi::OsStringExt; -use std::ptr; - -#[derive(Debug, Copy, Clone)] -pub(crate) enum DxgiVersion { - /// Capable of the following interfaces: - /// * IDXGIObject - /// * IDXGIDeviceSubObject - /// * IDXGIResource - /// * IDXGIKeyedMutex - /// * IDXGISurface - /// * IDXGISurface1 - /// * IDXGIOutput - /// * IDXGISwapChain - /// * IDXGIFactory - /// * IDXGIDevice - /// * IDXGIFactory1 - /// * IDXGIAdapter1 - /// * IDXGIDevice1 - Dxgi1_0, - - /// Capable of the following interfaces: - /// * IDXGIDisplayControl - /// * IDXGIOutputDuplication - /// * IDXGISurface2 - /// * IDXGIResource1 - /// * IDXGIDevice2 - /// * IDXGISwapChain1 - /// * IDXGIFactory2 - /// * IDXGIAdapter2 - /// * IDXGIOutput1 - Dxgi1_2, - - /// Capable of the following interfaces: - /// * IDXGIDevice3 - /// * IDXGISwapChain2 - /// * IDXGIOutput2 - /// * IDXGIDecodeSwapChain - /// * IDXGIFactoryMedia - /// * IDXGISwapChainMedia - /// * IDXGIOutput3 - Dxgi1_3, - - /// Capable of the following interfaces: - /// * IDXGISwapChain3 - /// * IDXGIOutput4 - /// * IDXGIFactory4 - /// * IDXGIAdapter3 - Dxgi1_4, - - /// Capable of the following interfaces: - /// * IDXGIOutput5 - /// * IDXGISwapChain4 - /// * IDXGIDevice4 - /// * IDXGIFactory5 - Dxgi1_5, -} - -type DxgiFun = - unsafe extern "system" fn(REFIID, *mut *mut winapi::ctypes::c_void) -> winerror::HRESULT; - -fn create_dxgi_factory1( - func: &DxgiFun, - guid: &GUID, -) -> Result, winerror::HRESULT> { - let mut factory: *mut IUnknown = ptr::null_mut(); - - let hr = unsafe { func(guid, &mut factory as *mut *mut _ as *mut *mut _) }; - - if winerror::SUCCEEDED(hr) { - Ok(unsafe { ComPtr::from_raw(factory as *mut _) }) - } else { - Err(hr) - } -} - -pub(crate) fn get_dxgi_factory( -) -> Result<(libloading::Library, ComPtr, DxgiVersion), winerror::HRESULT> { - // The returned Com-pointer is only safe to use for the lifetime of the Library. - let library = libloading::Library::new("dxgi.dll").map_err(|_| -1)?; - let func: libloading::Symbol = - unsafe { library.get(b"CreateDXGIFactory1") }.map_err(|_| -1)?; - - // TODO: do we even need `create_dxgi_factory2`? - if let Ok(factory) = create_dxgi_factory1(&func, &dxgi1_5::IDXGIFactory5::uuidof()) { - return Ok((library, factory, DxgiVersion::Dxgi1_5)); - } - - if let Ok(factory) = create_dxgi_factory1(&func, &dxgi1_4::IDXGIFactory4::uuidof()) { - return Ok((library, factory, DxgiVersion::Dxgi1_4)); - } - - if let Ok(factory) = create_dxgi_factory1(&func, &dxgi1_3::IDXGIFactory3::uuidof()) { - return Ok((library, factory, DxgiVersion::Dxgi1_3)); - } - - if let Ok(factory) = create_dxgi_factory1(&func, &dxgi1_2::IDXGIFactory2::uuidof()) { - return Ok((library, factory, DxgiVersion::Dxgi1_2)); - } - - if let Ok(factory) = create_dxgi_factory1(&func, &dxgi::IDXGIFactory1::uuidof()) { - return Ok((library, factory, DxgiVersion::Dxgi1_0)); - } - - // TODO: any reason why above would fail and this wouldnt? - match create_dxgi_factory1(&func, &dxgi::IDXGIFactory::uuidof()) { - Ok(factory) => Ok((library, factory, DxgiVersion::Dxgi1_0)), - Err(hr) => Err(hr), - } -} - -fn enum_adapters1( - idx: u32, - factory: *mut dxgi::IDXGIFactory, -) -> Result, winerror::HRESULT> { - let mut adapter: *mut dxgi::IDXGIAdapter = ptr::null_mut(); - - let hr = unsafe { - (*(factory as *mut dxgi::IDXGIFactory1)) - .EnumAdapters1(idx, &mut adapter as *mut *mut _ as *mut *mut _) - }; - - if winerror::SUCCEEDED(hr) { - Ok(unsafe { ComPtr::from_raw(adapter) }) - } else { - Err(hr) - } -} - -fn get_adapter_desc(adapter: *mut dxgi::IDXGIAdapter, version: DxgiVersion) -> AdapterInfo { - match version { - DxgiVersion::Dxgi1_0 => { - let mut desc: dxgi::DXGI_ADAPTER_DESC1 = unsafe { mem::zeroed() }; - unsafe { - (*(adapter as *mut dxgi::IDXGIAdapter1)).GetDesc1(&mut desc); - } - - let device_name = { - let len = desc.Description.iter().take_while(|&&c| c != 0).count(); - let name = ::from_wide(&desc.Description[.. len]); - name.to_string_lossy().into_owned() - }; - - AdapterInfo { - name: device_name, - vendor: desc.VendorId as usize, - device: desc.DeviceId as usize, - device_type: if (desc.Flags & dxgi::DXGI_ADAPTER_FLAG_SOFTWARE) != 0 { - DeviceType::VirtualGpu - } else { - DeviceType::DiscreteGpu - }, - } - } - DxgiVersion::Dxgi1_2 - | DxgiVersion::Dxgi1_3 - | DxgiVersion::Dxgi1_4 - | DxgiVersion::Dxgi1_5 => { - let mut desc: dxgi1_2::DXGI_ADAPTER_DESC2 = unsafe { mem::zeroed() }; - unsafe { - (*(adapter as *mut dxgi1_2::IDXGIAdapter2)).GetDesc2(&mut desc); - } - - let device_name = { - let len = desc.Description.iter().take_while(|&&c| c != 0).count(); - let name = ::from_wide(&desc.Description[.. len]); - name.to_string_lossy().into_owned() - }; - - AdapterInfo { - name: device_name, - vendor: desc.VendorId as usize, - device: desc.DeviceId as usize, - device_type: if (desc.Flags & dxgi::DXGI_ADAPTER_FLAG_SOFTWARE) != 0 { - DeviceType::VirtualGpu - } else { - DeviceType::DiscreteGpu - }, - } - } - } -} - -pub(crate) fn get_adapter( - idx: u32, - factory: *mut dxgi::IDXGIFactory, - version: DxgiVersion, -) -> Result<(ComPtr, AdapterInfo), winerror::HRESULT> { - let adapter = match version { - DxgiVersion::Dxgi1_0 - | DxgiVersion::Dxgi1_2 - | DxgiVersion::Dxgi1_3 - | DxgiVersion::Dxgi1_4 - | DxgiVersion::Dxgi1_5 => enum_adapters1(idx, factory)?, - }; - - let desc = get_adapter_desc(adapter.as_raw(), version); - - Ok((adapter, desc)) -} +use hal::adapter::{AdapterInfo, DeviceType}; + +use winapi::{ + shared::{ + dxgi, + dxgi1_2, + dxgi1_3, + dxgi1_4, + dxgi1_5, + guiddef::{GUID, REFIID}, + winerror, + }, + um::unknwnbase::IUnknown, + Interface, +}; + +use wio::com::ComPtr; + +use std::{ffi::OsString, mem, os::windows::ffi::OsStringExt, ptr}; + +#[derive(Debug, Copy, Clone)] +pub(crate) enum DxgiVersion { + /// Capable of the following interfaces: + /// * IDXGIObject + /// * IDXGIDeviceSubObject + /// * IDXGIResource + /// * IDXGIKeyedMutex + /// * IDXGISurface + /// * IDXGISurface1 + /// * IDXGIOutput + /// * IDXGISwapChain + /// * IDXGIFactory + /// * IDXGIDevice + /// * IDXGIFactory1 + /// * IDXGIAdapter1 + /// * IDXGIDevice1 + Dxgi1_0, + + /// Capable of the following interfaces: + /// * IDXGIDisplayControl + /// * IDXGIOutputDuplication + /// * IDXGISurface2 + /// * IDXGIResource1 + /// * IDXGIDevice2 + /// * IDXGISwapChain1 + /// * IDXGIFactory2 + /// * IDXGIAdapter2 + /// * IDXGIOutput1 + Dxgi1_2, + + /// Capable of the following interfaces: + /// * IDXGIDevice3 + /// * IDXGISwapChain2 + /// * IDXGIOutput2 + /// * IDXGIDecodeSwapChain + /// * IDXGIFactoryMedia + /// * IDXGISwapChainMedia + /// * IDXGIOutput3 + Dxgi1_3, + + /// Capable of the following interfaces: + /// * IDXGISwapChain3 + /// * IDXGIOutput4 + /// * IDXGIFactory4 + /// * IDXGIAdapter3 + Dxgi1_4, + + /// Capable of the following interfaces: + /// * IDXGIOutput5 + /// * IDXGISwapChain4 + /// * IDXGIDevice4 + /// * IDXGIFactory5 + Dxgi1_5, +} + +type DxgiFun = + unsafe extern "system" fn(REFIID, *mut *mut winapi::ctypes::c_void) -> winerror::HRESULT; + +fn create_dxgi_factory1( + func: &DxgiFun, + guid: &GUID, +) -> Result, winerror::HRESULT> { + let mut factory: *mut IUnknown = ptr::null_mut(); + + let hr = unsafe { func(guid, &mut factory as *mut *mut _ as *mut *mut _) }; + + if winerror::SUCCEEDED(hr) { + Ok(unsafe { ComPtr::from_raw(factory as *mut _) }) + } else { + Err(hr) + } +} + +pub(crate) fn get_dxgi_factory( +) -> Result<(libloading::Library, ComPtr, DxgiVersion), winerror::HRESULT> { + // The returned Com-pointer is only safe to use for the lifetime of the Library. + let library = libloading::Library::new("dxgi.dll").map_err(|_| -1)?; + let func: libloading::Symbol = + unsafe { library.get(b"CreateDXGIFactory1") }.map_err(|_| -1)?; + + // TODO: do we even need `create_dxgi_factory2`? + if let Ok(factory) = create_dxgi_factory1(&func, &dxgi1_5::IDXGIFactory5::uuidof()) { + return Ok((library, factory, DxgiVersion::Dxgi1_5)); + } + + if let Ok(factory) = create_dxgi_factory1(&func, &dxgi1_4::IDXGIFactory4::uuidof()) { + return Ok((library, factory, DxgiVersion::Dxgi1_4)); + } + + if let Ok(factory) = create_dxgi_factory1(&func, &dxgi1_3::IDXGIFactory3::uuidof()) { + return Ok((library, factory, DxgiVersion::Dxgi1_3)); + } + + if let Ok(factory) = create_dxgi_factory1(&func, &dxgi1_2::IDXGIFactory2::uuidof()) { + return Ok((library, factory, DxgiVersion::Dxgi1_2)); + } + + if let Ok(factory) = create_dxgi_factory1(&func, &dxgi::IDXGIFactory1::uuidof()) { + return Ok((library, factory, DxgiVersion::Dxgi1_0)); + } + + // TODO: any reason why above would fail and this wouldnt? + match create_dxgi_factory1(&func, &dxgi::IDXGIFactory::uuidof()) { + Ok(factory) => Ok((library, factory, DxgiVersion::Dxgi1_0)), + Err(hr) => Err(hr), + } +} + +fn enum_adapters1( + idx: u32, + factory: *mut dxgi::IDXGIFactory, +) -> Result, winerror::HRESULT> { + let mut adapter: *mut dxgi::IDXGIAdapter = ptr::null_mut(); + + let hr = unsafe { + (*(factory as *mut dxgi::IDXGIFactory1)) + .EnumAdapters1(idx, &mut adapter as *mut *mut _ as *mut *mut _) + }; + + if winerror::SUCCEEDED(hr) { + Ok(unsafe { ComPtr::from_raw(adapter) }) + } else { + Err(hr) + } +} + +fn get_adapter_desc(adapter: *mut dxgi::IDXGIAdapter, version: DxgiVersion) -> AdapterInfo { + match version { + DxgiVersion::Dxgi1_0 => { + let mut desc: dxgi::DXGI_ADAPTER_DESC1 = unsafe { mem::zeroed() }; + unsafe { + (*(adapter as *mut dxgi::IDXGIAdapter1)).GetDesc1(&mut desc); + } + + let device_name = { + let len = desc.Description.iter().take_while(|&&c| c != 0).count(); + let name = ::from_wide(&desc.Description[.. len]); + name.to_string_lossy().into_owned() + }; + + AdapterInfo { + name: device_name, + vendor: desc.VendorId as usize, + device: desc.DeviceId as usize, + device_type: if (desc.Flags & dxgi::DXGI_ADAPTER_FLAG_SOFTWARE) != 0 { + DeviceType::VirtualGpu + } else { + DeviceType::DiscreteGpu + }, + } + } + DxgiVersion::Dxgi1_2 + | DxgiVersion::Dxgi1_3 + | DxgiVersion::Dxgi1_4 + | DxgiVersion::Dxgi1_5 => { + let mut desc: dxgi1_2::DXGI_ADAPTER_DESC2 = unsafe { mem::zeroed() }; + unsafe { + (*(adapter as *mut dxgi1_2::IDXGIAdapter2)).GetDesc2(&mut desc); + } + + let device_name = { + let len = desc.Description.iter().take_while(|&&c| c != 0).count(); + let name = ::from_wide(&desc.Description[.. len]); + name.to_string_lossy().into_owned() + }; + + AdapterInfo { + name: device_name, + vendor: desc.VendorId as usize, + device: desc.DeviceId as usize, + device_type: if (desc.Flags & dxgi::DXGI_ADAPTER_FLAG_SOFTWARE) != 0 { + DeviceType::VirtualGpu + } else { + DeviceType::DiscreteGpu + }, + } + } + } +} + +pub(crate) fn get_adapter( + idx: u32, + factory: *mut dxgi::IDXGIFactory, + version: DxgiVersion, +) -> Result<(ComPtr, AdapterInfo), winerror::HRESULT> { + let adapter = match version { + DxgiVersion::Dxgi1_0 + | DxgiVersion::Dxgi1_2 + | DxgiVersion::Dxgi1_3 + | DxgiVersion::Dxgi1_4 + | DxgiVersion::Dxgi1_5 => enum_adapters1(idx, factory)?, + }; + + let desc = get_adapter_desc(adapter.as_raw(), version); + + Ok((adapter, desc)) +} diff --git a/third_party/rust/gfx-backend-dx11/src/internal.rs b/third_party/rust/gfx-backend-dx11/src/internal.rs index e47aad8ea8c6..e120c003b5da 100644 --- a/third_party/rust/gfx-backend-dx11/src/internal.rs +++ b/third_party/rust/gfx-backend-dx11/src/internal.rs @@ -1,1308 +1,1312 @@ -use hal::pso::{Stage, Viewport}; -use hal::{command, image, pso}; - -use winapi::shared::dxgiformat; -use winapi::shared::minwindef::{FALSE, TRUE}; -use winapi::shared::winerror; -use winapi::um::d3d11; -use winapi::um::d3dcommon; - -use wio::com::ComPtr; - -use std::borrow::Borrow; -use std::{mem, ptr}; - -use smallvec::SmallVec; -use spirv_cross; - -use {conv, shader}; - -use {Buffer, Image, RenderPassCache}; - -#[repr(C)] -struct BufferCopy { - src: u32, - dst: u32, - _padding: [u32; 2], -} - -#[repr(C)] -struct ImageCopy { - src: [u32; 4], - dst: [u32; 4], -} - -#[repr(C)] -struct BufferImageCopy { - buffer_offset: u32, - buffer_size: [u32; 2], - _padding: u32, - image_offset: [u32; 4], - image_extent: [u32; 4], - // actual size of the target image - image_size: [u32; 4], -} - -#[repr(C)] -struct BufferImageCopyInfo { - buffer: BufferCopy, - image: ImageCopy, - buffer_image: BufferImageCopy, -} - -#[repr(C)] -struct BlitInfo { - offset: [f32; 2], - extent: [f32; 2], - z: f32, - level: f32, -} - -#[repr(C)] -struct PartialClearInfo { - // transmute between the types, easier than juggling all different kinds of fields.. - data: [u32; 4], -} - -// the threadgroup count we use in our copy shaders -const COPY_THREAD_GROUP_X: u32 = 8; -const COPY_THREAD_GROUP_Y: u32 = 8; - -// Holds everything we need for fallback implementations of features that are not in DX. -// -// TODO: maybe get rid of `Clone`? there's _a lot_ of refcounts here and it is used as a singleton -// anyway :s -// -// TODO: make struct fields more modular and group them up in structs depending on if it is a -// fallback version or not (eg. Option), should make struct definition and -// `new` function smaller -#[derive(Clone, Debug)] -pub struct Internal { - // partial clearing - vs_partial_clear: ComPtr, - ps_partial_clear_float: ComPtr, - ps_partial_clear_uint: ComPtr, - ps_partial_clear_int: ComPtr, - ps_partial_clear_depth: ComPtr, - ps_partial_clear_stencil: ComPtr, - partial_clear_depth_stencil_state: ComPtr, - partial_clear_depth_state: ComPtr, - partial_clear_stencil_state: ComPtr, - - // blitting - vs_blit_2d: ComPtr, - - sampler_nearest: ComPtr, - sampler_linear: ComPtr, - - ps_blit_2d_uint: ComPtr, - ps_blit_2d_int: ComPtr, - ps_blit_2d_float: ComPtr, - - // Image<->Image not covered by `CopySubresourceRegion` - cs_copy_image2d_r8g8_image2d_r16: ComPtr, - cs_copy_image2d_r16_image2d_r8g8: ComPtr, - - cs_copy_image2d_r8g8b8a8_image2d_r32: ComPtr, - cs_copy_image2d_r8g8b8a8_image2d_r16g16: ComPtr, - cs_copy_image2d_r16g16_image2d_r32: ComPtr, - cs_copy_image2d_r16g16_image2d_r8g8b8a8: ComPtr, - cs_copy_image2d_r32_image2d_r16g16: ComPtr, - cs_copy_image2d_r32_image2d_r8g8b8a8: ComPtr, - - // Image -> Buffer - cs_copy_image2d_r32g32b32a32_buffer: ComPtr, - cs_copy_image2d_r32g32_buffer: ComPtr, - cs_copy_image2d_r16g16b16a16_buffer: ComPtr, - cs_copy_image2d_r32_buffer: ComPtr, - cs_copy_image2d_r16g16_buffer: ComPtr, - cs_copy_image2d_r8g8b8a8_buffer: ComPtr, - cs_copy_image2d_r16_buffer: ComPtr, - cs_copy_image2d_r8g8_buffer: ComPtr, - cs_copy_image2d_r8_buffer: ComPtr, - cs_copy_image2d_b8g8r8a8_buffer: ComPtr, - - // Buffer -> Image - cs_copy_buffer_image2d_r32g32b32a32: ComPtr, - cs_copy_buffer_image2d_r32g32: ComPtr, - cs_copy_buffer_image2d_r16g16b16a16: ComPtr, - cs_copy_buffer_image2d_r32: ComPtr, - cs_copy_buffer_image2d_r16g16: ComPtr, - cs_copy_buffer_image2d_r8g8b8a8: ComPtr, - cs_copy_buffer_image2d_r16: ComPtr, - cs_copy_buffer_image2d_r8g8: ComPtr, - cs_copy_buffer_image2d_r8: ComPtr, - - // internal constant buffer that is used by internal shaders - internal_buffer: ComPtr, - - // public buffer that is used as intermediate storage for some operations (memory invalidation) - pub working_buffer: ComPtr, - pub working_buffer_size: u64, -} - -fn compile_blob(src: &[u8], entrypoint: &str, stage: Stage) -> ComPtr { - unsafe { - ComPtr::from_raw( - shader::compile_hlsl_shader( - stage, - spirv_cross::hlsl::ShaderModel::V5_0, - entrypoint, - src, - ) - .unwrap(), - ) - } -} - -fn compile_vs( - device: &ComPtr, - src: &[u8], - entrypoint: &str, -) -> ComPtr { - let bytecode = compile_blob(src, entrypoint, Stage::Vertex); - let mut shader = ptr::null_mut(); - let hr = unsafe { - device.CreateVertexShader( - bytecode.GetBufferPointer(), - bytecode.GetBufferSize(), - ptr::null_mut(), - &mut shader as *mut *mut _ as *mut *mut _, - ) - }; - assert_eq!(true, winerror::SUCCEEDED(hr)); - - unsafe { ComPtr::from_raw(shader) } -} - -fn compile_ps( - device: &ComPtr, - src: &[u8], - entrypoint: &str, -) -> ComPtr { - let bytecode = compile_blob(src, entrypoint, Stage::Fragment); - let mut shader = ptr::null_mut(); - let hr = unsafe { - device.CreatePixelShader( - bytecode.GetBufferPointer(), - bytecode.GetBufferSize(), - ptr::null_mut(), - &mut shader as *mut *mut _ as *mut *mut _, - ) - }; - assert_eq!(true, winerror::SUCCEEDED(hr)); - - unsafe { ComPtr::from_raw(shader) } -} - -fn compile_cs( - device: &ComPtr, - src: &[u8], - entrypoint: &str, -) -> ComPtr { - let bytecode = compile_blob(src, entrypoint, Stage::Compute); - let mut shader = ptr::null_mut(); - let hr = unsafe { - device.CreateComputeShader( - bytecode.GetBufferPointer(), - bytecode.GetBufferSize(), - ptr::null_mut(), - &mut shader as *mut *mut _ as *mut *mut _, - ) - }; - assert_eq!(true, winerror::SUCCEEDED(hr)); - - unsafe { ComPtr::from_raw(shader) } -} - -impl Internal { - pub fn new(device: &ComPtr) -> Self { - let internal_buffer = { - let desc = d3d11::D3D11_BUFFER_DESC { - ByteWidth: mem::size_of::() as _, - Usage: d3d11::D3D11_USAGE_DYNAMIC, - BindFlags: d3d11::D3D11_BIND_CONSTANT_BUFFER, - CPUAccessFlags: d3d11::D3D11_CPU_ACCESS_WRITE, - MiscFlags: 0, - StructureByteStride: 0, - }; - - let mut buffer = ptr::null_mut(); - let hr = unsafe { - device.CreateBuffer( - &desc, - ptr::null_mut(), - &mut buffer as *mut *mut _ as *mut *mut _, - ) - }; - assert_eq!(true, winerror::SUCCEEDED(hr)); - - unsafe { ComPtr::from_raw(buffer) } - }; - - let (depth_stencil_state, depth_state, stencil_state) = { - let mut depth_state = ptr::null_mut(); - let mut stencil_state = ptr::null_mut(); - let mut depth_stencil_state = ptr::null_mut(); - - let mut desc = d3d11::D3D11_DEPTH_STENCIL_DESC { - DepthEnable: TRUE, - DepthWriteMask: d3d11::D3D11_DEPTH_WRITE_MASK_ALL, - DepthFunc: d3d11::D3D11_COMPARISON_ALWAYS, - StencilEnable: TRUE, - StencilReadMask: 0, - StencilWriteMask: !0, - FrontFace: d3d11::D3D11_DEPTH_STENCILOP_DESC { - StencilFailOp: d3d11::D3D11_STENCIL_OP_REPLACE, - StencilDepthFailOp: d3d11::D3D11_STENCIL_OP_REPLACE, - StencilPassOp: d3d11::D3D11_STENCIL_OP_REPLACE, - StencilFunc: d3d11::D3D11_COMPARISON_ALWAYS, - }, - BackFace: d3d11::D3D11_DEPTH_STENCILOP_DESC { - StencilFailOp: d3d11::D3D11_STENCIL_OP_REPLACE, - StencilDepthFailOp: d3d11::D3D11_STENCIL_OP_REPLACE, - StencilPassOp: d3d11::D3D11_STENCIL_OP_REPLACE, - StencilFunc: d3d11::D3D11_COMPARISON_ALWAYS, - }, - }; - - let hr = unsafe { - device.CreateDepthStencilState( - &desc, - &mut depth_stencil_state as *mut *mut _ as *mut *mut _, - ) - }; - assert_eq!(winerror::S_OK, hr); - - desc.DepthEnable = TRUE; - desc.StencilEnable = FALSE; - - let hr = unsafe { - device - .CreateDepthStencilState(&desc, &mut depth_state as *mut *mut _ as *mut *mut _) - }; - assert_eq!(winerror::S_OK, hr); - - desc.DepthEnable = FALSE; - desc.StencilEnable = TRUE; - - let hr = unsafe { - device.CreateDepthStencilState( - &desc, - &mut stencil_state as *mut *mut _ as *mut *mut _, - ) - }; - assert_eq!(winerror::S_OK, hr); - - unsafe { - ( - ComPtr::from_raw(depth_stencil_state), - ComPtr::from_raw(depth_state), - ComPtr::from_raw(stencil_state), - ) - } - }; - - let (sampler_nearest, sampler_linear) = { - let mut desc = d3d11::D3D11_SAMPLER_DESC { - Filter: d3d11::D3D11_FILTER_MIN_MAG_MIP_POINT, - AddressU: d3d11::D3D11_TEXTURE_ADDRESS_CLAMP, - AddressV: d3d11::D3D11_TEXTURE_ADDRESS_CLAMP, - AddressW: d3d11::D3D11_TEXTURE_ADDRESS_CLAMP, - MipLODBias: 0f32, - MaxAnisotropy: 0, - ComparisonFunc: 0, - BorderColor: [0f32; 4], - MinLOD: 0f32, - MaxLOD: d3d11::D3D11_FLOAT32_MAX, - }; - - let mut nearest = ptr::null_mut(); - let mut linear = ptr::null_mut(); - - assert_eq!(winerror::S_OK, unsafe { - device.CreateSamplerState(&desc, &mut nearest as *mut *mut _ as *mut *mut _) - }); - - desc.Filter = d3d11::D3D11_FILTER_MIN_MAG_MIP_LINEAR; - - assert_eq!(winerror::S_OK, unsafe { - device.CreateSamplerState(&desc, &mut linear as *mut *mut _ as *mut *mut _) - }); - - unsafe { (ComPtr::from_raw(nearest), ComPtr::from_raw(linear)) } - }; - - let (working_buffer, working_buffer_size) = { - let working_buffer_size = 1 << 16; - - let desc = d3d11::D3D11_BUFFER_DESC { - ByteWidth: working_buffer_size, - Usage: d3d11::D3D11_USAGE_STAGING, - BindFlags: 0, - CPUAccessFlags: d3d11::D3D11_CPU_ACCESS_READ | d3d11::D3D11_CPU_ACCESS_WRITE, - MiscFlags: 0, - StructureByteStride: 0, - }; - let mut working_buffer = ptr::null_mut(); - - assert_eq!(winerror::S_OK, unsafe { - device.CreateBuffer( - &desc, - ptr::null_mut(), - &mut working_buffer as *mut *mut _ as *mut *mut _, - ) - }); - - ( - unsafe { ComPtr::from_raw(working_buffer) }, - working_buffer_size, - ) - }; - - let clear_shaders = include_bytes!("../shaders/clear.hlsl"); - let copy_shaders = include_bytes!("../shaders/copy.hlsl"); - let blit_shaders = include_bytes!("../shaders/blit.hlsl"); - - Internal { - vs_partial_clear: compile_vs(device, clear_shaders, "vs_partial_clear"), - ps_partial_clear_float: compile_ps(device, clear_shaders, "ps_partial_clear_float"), - ps_partial_clear_uint: compile_ps(device, clear_shaders, "ps_partial_clear_uint"), - ps_partial_clear_int: compile_ps(device, clear_shaders, "ps_partial_clear_int"), - ps_partial_clear_depth: compile_ps(device, clear_shaders, "ps_partial_clear_depth"), - ps_partial_clear_stencil: compile_ps(device, clear_shaders, "ps_partial_clear_stencil"), - partial_clear_depth_stencil_state: depth_stencil_state, - partial_clear_depth_state: depth_state, - partial_clear_stencil_state: stencil_state, - - vs_blit_2d: compile_vs(device, blit_shaders, "vs_blit_2d"), - - sampler_nearest, - sampler_linear, - - ps_blit_2d_uint: compile_ps(device, blit_shaders, "ps_blit_2d_uint"), - ps_blit_2d_int: compile_ps(device, blit_shaders, "ps_blit_2d_int"), - ps_blit_2d_float: compile_ps(device, blit_shaders, "ps_blit_2d_float"), - - cs_copy_image2d_r8g8_image2d_r16: compile_cs( - device, - copy_shaders, - "cs_copy_image2d_r8g8_image2d_r16", - ), - cs_copy_image2d_r16_image2d_r8g8: compile_cs( - device, - copy_shaders, - "cs_copy_image2d_r16_image2d_r8g8", - ), - - cs_copy_image2d_r8g8b8a8_image2d_r32: compile_cs( - device, - copy_shaders, - "cs_copy_image2d_r8g8b8a8_image2d_r32", - ), - cs_copy_image2d_r8g8b8a8_image2d_r16g16: compile_cs( - device, - copy_shaders, - "cs_copy_image2d_r8g8b8a8_image2d_r16g16", - ), - cs_copy_image2d_r16g16_image2d_r32: compile_cs( - device, - copy_shaders, - "cs_copy_image2d_r16g16_image2d_r32", - ), - cs_copy_image2d_r16g16_image2d_r8g8b8a8: compile_cs( - device, - copy_shaders, - "cs_copy_image2d_r16g16_image2d_r8g8b8a8", - ), - cs_copy_image2d_r32_image2d_r16g16: compile_cs( - device, - copy_shaders, - "cs_copy_image2d_r32_image2d_r16g16", - ), - cs_copy_image2d_r32_image2d_r8g8b8a8: compile_cs( - device, - copy_shaders, - "cs_copy_image2d_r32_image2d_r8g8b8a8", - ), - - cs_copy_image2d_r32g32b32a32_buffer: compile_cs( - device, - copy_shaders, - "cs_copy_image2d_r32g32b32a32_buffer", - ), - cs_copy_image2d_r32g32_buffer: compile_cs( - device, - copy_shaders, - "cs_copy_image2d_r32g32_buffer", - ), - cs_copy_image2d_r16g16b16a16_buffer: compile_cs( - device, - copy_shaders, - "cs_copy_image2d_r16g16b16a16_buffer", - ), - cs_copy_image2d_r32_buffer: compile_cs( - device, - copy_shaders, - "cs_copy_image2d_r32_buffer", - ), - cs_copy_image2d_r16g16_buffer: compile_cs( - device, - copy_shaders, - "cs_copy_image2d_r16g16_buffer", - ), - cs_copy_image2d_r8g8b8a8_buffer: compile_cs( - device, - copy_shaders, - "cs_copy_image2d_r8g8b8a8_buffer", - ), - cs_copy_image2d_r16_buffer: compile_cs( - device, - copy_shaders, - "cs_copy_image2d_r16_buffer", - ), - cs_copy_image2d_r8g8_buffer: compile_cs( - device, - copy_shaders, - "cs_copy_image2d_r8g8_buffer", - ), - cs_copy_image2d_r8_buffer: compile_cs( - device, - copy_shaders, - "cs_copy_image2d_r8_buffer", - ), - cs_copy_image2d_b8g8r8a8_buffer: compile_cs( - device, - copy_shaders, - "cs_copy_image2d_b8g8r8a8_buffer", - ), - - cs_copy_buffer_image2d_r32g32b32a32: compile_cs( - device, - copy_shaders, - "cs_copy_buffer_image2d_r32g32b32a32", - ), - cs_copy_buffer_image2d_r32g32: compile_cs( - device, - copy_shaders, - "cs_copy_buffer_image2d_r32g32", - ), - cs_copy_buffer_image2d_r16g16b16a16: compile_cs( - device, - copy_shaders, - "cs_copy_buffer_image2d_r16g16b16a16", - ), - cs_copy_buffer_image2d_r32: compile_cs( - device, - copy_shaders, - "cs_copy_buffer_image2d_r32", - ), - cs_copy_buffer_image2d_r16g16: compile_cs( - device, - copy_shaders, - "cs_copy_buffer_image2d_r16g16", - ), - cs_copy_buffer_image2d_r8g8b8a8: compile_cs( - device, - copy_shaders, - "cs_copy_buffer_image2d_r8g8b8a8", - ), - cs_copy_buffer_image2d_r16: compile_cs( - device, - copy_shaders, - "cs_copy_buffer_image2d_r16", - ), - cs_copy_buffer_image2d_r8g8: compile_cs( - device, - copy_shaders, - "cs_copy_buffer_image2d_r8g8", - ), - cs_copy_buffer_image2d_r8: compile_cs( - device, - copy_shaders, - "cs_copy_buffer_image2d_r8", - ), - - internal_buffer, - working_buffer, - working_buffer_size: working_buffer_size as _, - } - } - - fn map(&mut self, context: &ComPtr) -> *mut u8 { - let mut mapped = unsafe { mem::zeroed::() }; - let hr = unsafe { - context.Map( - self.internal_buffer.as_raw() as _, - 0, - d3d11::D3D11_MAP_WRITE_DISCARD, - 0, - &mut mapped, - ) - }; - - assert_eq!(winerror::S_OK, hr); - - mapped.pData as _ - } - - fn unmap(&mut self, context: &ComPtr) { - unsafe { - context.Unmap(self.internal_buffer.as_raw() as _, 0); - } - } - - fn update_image( - &mut self, - context: &ComPtr, - info: &command::ImageCopy, - ) { - unsafe { - ptr::copy( - &BufferImageCopyInfo { - image: ImageCopy { - src: [ - info.src_offset.x as _, - info.src_offset.y as _, - info.src_offset.z as _, - 0, - ], - dst: [ - info.dst_offset.x as _, - info.dst_offset.y as _, - info.dst_offset.z as _, - 0, - ], - }, - ..mem::zeroed() - }, - self.map(context) as *mut _, - 1, - ) - }; - - self.unmap(context); - } - - fn update_buffer_image( - &mut self, - context: &ComPtr, - info: &command::BufferImageCopy, - image: &Image, - ) { - let size = image.kind.extent(); - - unsafe { - ptr::copy( - &BufferImageCopyInfo { - buffer_image: BufferImageCopy { - buffer_offset: info.buffer_offset as _, - buffer_size: [info.buffer_width, info.buffer_height], - _padding: 0, - image_offset: [ - info.image_offset.x as _, - info.image_offset.y as _, - (info.image_offset.z + info.image_layers.layers.start as i32) as _, - 0, - ], - image_extent: [ - info.image_extent.width, - info.image_extent.height, - info.image_extent.depth, - 0, - ], - image_size: [size.width, size.height, size.depth, 0], - }, - ..mem::zeroed() - }, - self.map(context) as *mut _, - 1, - ) - }; - - self.unmap(context); - } - - fn update_blit( - &mut self, - context: &ComPtr, - src: &Image, - info: &command::ImageBlit, - ) { - let (sx, dx) = if info.dst_bounds.start.x > info.dst_bounds.end.x { - ( - info.src_bounds.end.x, - info.src_bounds.start.x - info.src_bounds.end.x, - ) - } else { - ( - info.src_bounds.start.x, - info.src_bounds.end.x - info.src_bounds.start.x, - ) - }; - let (sy, dy) = if info.dst_bounds.start.y > info.dst_bounds.end.y { - ( - info.src_bounds.end.y, - info.src_bounds.start.y - info.src_bounds.end.y, - ) - } else { - ( - info.src_bounds.start.y, - info.src_bounds.end.y - info.src_bounds.start.y, - ) - }; - let image::Extent { width, height, .. } = src.kind.level_extent(info.src_subresource.level); - - unsafe { - ptr::copy( - &BlitInfo { - offset: [sx as f32 / width as f32, sy as f32 / height as f32], - extent: [dx as f32 / width as f32, dy as f32 / height as f32], - z: 0f32, // TODO - level: info.src_subresource.level as _, - }, - self.map(context) as *mut _, - 1, - ) - }; - - self.unmap(context); - } - - fn update_clear_color( - &mut self, - context: &ComPtr, - value: command::ClearColor, - ) { - unsafe { - ptr::copy( - &PartialClearInfo { - data: mem::transmute(value), - }, - self.map(context) as *mut _, - 1, - ) - }; - - self.unmap(context); - } - - fn update_clear_depth_stencil( - &mut self, - context: &ComPtr, - depth: Option, - stencil: Option, - ) { - unsafe { - ptr::copy( - &PartialClearInfo { - data: [ - mem::transmute(depth.unwrap_or(0f32)), - stencil.unwrap_or(0), - 0, - 0, - ], - }, - self.map(context) as *mut _, - 1, - ); - } - - self.unmap(context); - } - - fn find_image_copy_shader( - &self, - src: &Image, - dst: &Image, - ) -> Option<*mut d3d11::ID3D11ComputeShader> { - use dxgiformat::*; - - let src_format = src.decomposed_format.copy_srv.unwrap(); - let dst_format = dst.decomposed_format.copy_uav.unwrap(); - - match (src_format, dst_format) { - (DXGI_FORMAT_R8G8_UINT, DXGI_FORMAT_R16_UINT) => { - Some(self.cs_copy_image2d_r8g8_image2d_r16.as_raw()) - } - (DXGI_FORMAT_R16_UINT, DXGI_FORMAT_R8G8_UINT) => { - Some(self.cs_copy_image2d_r16_image2d_r8g8.as_raw()) - } - (DXGI_FORMAT_R8G8B8A8_UINT, DXGI_FORMAT_R32_UINT) => { - Some(self.cs_copy_image2d_r8g8b8a8_image2d_r32.as_raw()) - } - (DXGI_FORMAT_R8G8B8A8_UINT, DXGI_FORMAT_R16G16_UINT) => { - Some(self.cs_copy_image2d_r8g8b8a8_image2d_r16g16.as_raw()) - } - (DXGI_FORMAT_R16G16_UINT, DXGI_FORMAT_R32_UINT) => { - Some(self.cs_copy_image2d_r16g16_image2d_r32.as_raw()) - } - (DXGI_FORMAT_R16G16_UINT, DXGI_FORMAT_R8G8B8A8_UINT) => { - Some(self.cs_copy_image2d_r16g16_image2d_r8g8b8a8.as_raw()) - } - (DXGI_FORMAT_R32_UINT, DXGI_FORMAT_R16G16_UINT) => { - Some(self.cs_copy_image2d_r32_image2d_r16g16.as_raw()) - } - (DXGI_FORMAT_R32_UINT, DXGI_FORMAT_R8G8B8A8_UINT) => { - Some(self.cs_copy_image2d_r32_image2d_r8g8b8a8.as_raw()) - } - _ => None, - } - } - - pub fn copy_image_2d( - &mut self, - context: &ComPtr, - src: &Image, - dst: &Image, - regions: T, - ) where - T: IntoIterator, - T::Item: Borrow, - { - if let Some(shader) = self.find_image_copy_shader(src, dst) { - // Some formats cant go through default path, since they cant - // be cast between formats of different component types (eg. - // Rg16 <-> Rgba8) - - // TODO: subresources - let srv = src.internal.copy_srv.clone().unwrap().as_raw(); - - unsafe { - context.CSSetShader(shader, ptr::null_mut(), 0); - context.CSSetConstantBuffers(0, 1, &self.internal_buffer.as_raw()); - context.CSSetShaderResources(0, 1, [srv].as_ptr()); - - for region in regions.into_iter() { - let info = region.borrow(); - self.update_image(context, &info); - - let uav = dst.get_uav(info.dst_subresource.level, 0).unwrap().as_raw(); - context.CSSetUnorderedAccessViews(0, 1, [uav].as_ptr(), ptr::null_mut()); - - context.Dispatch(info.extent.width as u32, info.extent.height as u32, 1); - } - - // unbind external resources - context.CSSetShaderResources(0, 1, [ptr::null_mut(); 1].as_ptr()); - context.CSSetUnorderedAccessViews( - 0, - 1, - [ptr::null_mut(); 1].as_ptr(), - ptr::null_mut(), - ); - } - } else { - // Default copy path - for region in regions.into_iter() { - let info = region.borrow(); - - // TODO: layer subresources - unsafe { - context.CopySubresourceRegion( - dst.internal.raw, - src.calc_subresource(info.src_subresource.level as _, 0), - info.dst_offset.x as _, - info.dst_offset.y as _, - info.dst_offset.z as _, - src.internal.raw, - dst.calc_subresource(info.dst_subresource.level as _, 0), - &d3d11::D3D11_BOX { - left: info.src_offset.x as _, - top: info.src_offset.y as _, - front: info.src_offset.z as _, - right: info.src_offset.x as u32 + info.extent.width as u32, - bottom: info.src_offset.y as u32 + info.extent.height as u32, - back: info.src_offset.z as u32 + info.extent.depth as u32, - }, - ); - } - } - } - } - - fn find_image_to_buffer_shader( - &self, - format: dxgiformat::DXGI_FORMAT, - ) -> Option<(*mut d3d11::ID3D11ComputeShader, u32, u32)> { - use dxgiformat::*; - - match format { - DXGI_FORMAT_R32G32B32A32_UINT => { - Some((self.cs_copy_image2d_r32g32b32a32_buffer.as_raw(), 1, 1)) - } - DXGI_FORMAT_R32G32_UINT => Some((self.cs_copy_image2d_r32g32_buffer.as_raw(), 1, 1)), - DXGI_FORMAT_R16G16B16A16_UINT => { - Some((self.cs_copy_image2d_r16g16b16a16_buffer.as_raw(), 1, 1)) - } - DXGI_FORMAT_R32_UINT => Some((self.cs_copy_image2d_r32_buffer.as_raw(), 1, 1)), - DXGI_FORMAT_R16G16_UINT => Some((self.cs_copy_image2d_r16g16_buffer.as_raw(), 1, 1)), - DXGI_FORMAT_R8G8B8A8_UINT => { - Some((self.cs_copy_image2d_r8g8b8a8_buffer.as_raw(), 1, 1)) - } - DXGI_FORMAT_R16_UINT => Some((self.cs_copy_image2d_r16_buffer.as_raw(), 2, 1)), - DXGI_FORMAT_R8G8_UINT => Some((self.cs_copy_image2d_r8g8_buffer.as_raw(), 2, 1)), - DXGI_FORMAT_R8_UINT => Some((self.cs_copy_image2d_r8_buffer.as_raw(), 4, 1)), - DXGI_FORMAT_B8G8R8A8_UNORM => { - Some((self.cs_copy_image2d_b8g8r8a8_buffer.as_raw(), 1, 1)) - } - _ => None, - } - } - - pub fn copy_image_2d_to_buffer( - &mut self, - context: &ComPtr, - src: &Image, - dst: &Buffer, - regions: T, - ) where - T: IntoIterator, - T::Item: Borrow, - { - let _scope = debug_scope!( - context, - "Image (format={:?},kind={:?}) => Buffer", - src.format, - src.kind - ); - let (shader, scale_x, scale_y) = self - .find_image_to_buffer_shader(src.decomposed_format.copy_srv.unwrap()) - .unwrap(); - - let srv = src.internal.copy_srv.clone().unwrap().as_raw(); - let uav = dst.internal.uav.unwrap(); - let format_desc = src.format.base_format().0.desc(); - let bytes_per_texel = format_desc.bits as u32 / 8; - - unsafe { - context.CSSetShader(shader, ptr::null_mut(), 0); - context.CSSetConstantBuffers(0, 1, &self.internal_buffer.as_raw()); - - context.CSSetShaderResources(0, 1, [srv].as_ptr()); - context.CSSetUnorderedAccessViews(0, 1, [uav].as_ptr(), ptr::null_mut()); - - for copy in regions { - let copy = copy.borrow(); - self.update_buffer_image(context, ©, src); - - debug_marker!(context, "{:?}", copy); - - context.Dispatch( - ((copy.image_extent.width + (COPY_THREAD_GROUP_X - 1)) - / COPY_THREAD_GROUP_X - / scale_x) - .max(1), - ((copy.image_extent.height + (COPY_THREAD_GROUP_X - 1)) - / COPY_THREAD_GROUP_Y - / scale_y) - .max(1), - 1, - ); - - if let Some(disjoint_cb) = dst.internal.disjoint_cb { - let total_size = copy.image_extent.depth - * (copy.buffer_height * copy.buffer_width * bytes_per_texel); - let copy_box = d3d11::D3D11_BOX { - left: copy.buffer_offset as u32, - top: 0, - front: 0, - right: copy.buffer_offset as u32 + total_size, - bottom: 1, - back: 1, - }; - - context.CopySubresourceRegion( - disjoint_cb as _, - 0, - copy.buffer_offset as _, - 0, - 0, - dst.internal.raw as _, - 0, - ©_box, - ); - } - } - - // unbind external resources - context.CSSetShaderResources(0, 1, [ptr::null_mut(); 1].as_ptr()); - context.CSSetUnorderedAccessViews(0, 1, [ptr::null_mut(); 1].as_ptr(), ptr::null_mut()); - } - } - - fn find_buffer_to_image_shader( - &self, - format: dxgiformat::DXGI_FORMAT, - ) -> Option<(*mut d3d11::ID3D11ComputeShader, u32, u32)> { - use dxgiformat::*; - - match format { - DXGI_FORMAT_R32G32B32A32_UINT => { - Some((self.cs_copy_buffer_image2d_r32g32b32a32.as_raw(), 1, 1)) - } - DXGI_FORMAT_R32G32_UINT => Some((self.cs_copy_buffer_image2d_r32g32.as_raw(), 1, 1)), - DXGI_FORMAT_R16G16B16A16_UINT => { - Some((self.cs_copy_buffer_image2d_r16g16b16a16.as_raw(), 1, 1)) - } - DXGI_FORMAT_R32_UINT => Some((self.cs_copy_buffer_image2d_r32.as_raw(), 1, 1)), - DXGI_FORMAT_R16G16_UINT => Some((self.cs_copy_buffer_image2d_r16g16.as_raw(), 1, 1)), - DXGI_FORMAT_R8G8B8A8_UINT => { - Some((self.cs_copy_buffer_image2d_r8g8b8a8.as_raw(), 1, 1)) - } - DXGI_FORMAT_R16_UINT => Some((self.cs_copy_buffer_image2d_r16.as_raw(), 2, 1)), - DXGI_FORMAT_R8G8_UINT => Some((self.cs_copy_buffer_image2d_r8g8.as_raw(), 2, 1)), - DXGI_FORMAT_R8_UINT => Some((self.cs_copy_buffer_image2d_r8.as_raw(), 4, 1)), - _ => None, - } - } - - pub fn copy_buffer_to_image_2d( - &mut self, - context: &ComPtr, - src: &Buffer, - dst: &Image, - regions: T, - ) where - T: IntoIterator, - T::Item: Borrow, - { - let _scope = debug_scope!( - context, - "Buffer => Image (format={:?},kind={:?})", - dst.format, - dst.kind - ); - // NOTE: we have two separate paths for Buffer -> Image transfers. we need to special case - // uploads to compressed formats through `UpdateSubresource` since we cannot get a - // UAV of any compressed format. - - let format_desc = dst.format.base_format().0.desc(); - if format_desc.is_compressed() { - // we dont really care about non-4x4 block formats.. - assert_eq!(format_desc.dim, (4, 4)); - assert!(!src.host_ptr.is_null()); - - for copy in regions { - let info = copy.borrow(); - - let bytes_per_texel = format_desc.bits as u32 / 8; - - let row_pitch = bytes_per_texel * info.image_extent.width / 4; - let depth_pitch = row_pitch * info.image_extent.height / 4; - - unsafe { - context.UpdateSubresource( - dst.internal.raw, - dst.calc_subresource( - info.image_layers.level as _, - info.image_layers.layers.start as _, - ), - &d3d11::D3D11_BOX { - left: info.image_offset.x as _, - top: info.image_offset.y as _, - front: info.image_offset.z as _, - right: info.image_offset.x as u32 + info.image_extent.width, - bottom: info.image_offset.y as u32 + info.image_extent.height, - back: info.image_offset.z as u32 + info.image_extent.depth, - }, - src.host_ptr - .offset(src.bound_range.start as isize + info.buffer_offset as isize) - as _, - row_pitch, - depth_pitch, - ); - } - } - } else { - let (shader, scale_x, scale_y) = self - .find_buffer_to_image_shader(dst.decomposed_format.copy_uav.unwrap()) - .unwrap(); - - let srv = src.internal.srv.unwrap(); - - unsafe { - context.CSSetShader(shader, ptr::null_mut(), 0); - context.CSSetConstantBuffers(0, 1, &self.internal_buffer.as_raw()); - context.CSSetShaderResources(0, 1, [srv].as_ptr()); - - for copy in regions { - let info = copy.borrow(); - self.update_buffer_image(context, &info, dst); - - debug_marker!(context, "{:?}", info); - - // TODO: multiple layers? do we introduce a stride and do multiple dispatch - // calls or handle this in the shader? (use z component in dispatch call - // - // NOTE: right now our copy UAV is a 2D array, and we set the layer in the - // `update_buffer_image` call above - let uav = dst - .get_uav( - info.image_layers.level, - 0, /*info.image_layers.layers.start*/ - ) - .unwrap() - .as_raw(); - context.CSSetUnorderedAccessViews(0, 1, [uav].as_ptr(), ptr::null_mut()); - - context.Dispatch( - ((info.image_extent.width + (COPY_THREAD_GROUP_X - 1)) - / COPY_THREAD_GROUP_X - / scale_x) - .max(1), - ((info.image_extent.height + (COPY_THREAD_GROUP_X - 1)) - / COPY_THREAD_GROUP_Y - / scale_y) - .max(1), - 1, - ); - } - - // unbind external resources - context.CSSetShaderResources(0, 1, [ptr::null_mut(); 1].as_ptr()); - context.CSSetUnorderedAccessViews( - 0, - 1, - [ptr::null_mut(); 1].as_ptr(), - ptr::null_mut(), - ); - } - } - } - - fn find_blit_shader(&self, src: &Image) -> Option<*mut d3d11::ID3D11PixelShader> { - use format::ChannelType as Ct; - - match src.format.base_format().1 { - Ct::Uint => Some(self.ps_blit_2d_uint.as_raw()), - Ct::Sint => Some(self.ps_blit_2d_int.as_raw()), - Ct::Unorm | Ct::Snorm | Ct::Sfloat | Ct::Srgb => Some(self.ps_blit_2d_float.as_raw()), - Ct::Ufloat | Ct::Uscaled | Ct::Sscaled => None, - } - } - - pub fn blit_2d_image( - &mut self, - context: &ComPtr, - src: &Image, - dst: &Image, - filter: image::Filter, - regions: T, - ) where - T: IntoIterator, - T::Item: Borrow, - { - use std::cmp; - - let _scope = debug_scope!( - context, - "Blit: Image (format={:?},kind={:?}) => Image (format={:?},kind={:?})", - src.format, - src.kind, - dst.format, - dst.kind - ); - - let shader = self.find_blit_shader(src).unwrap(); - - let srv = src.internal.srv.clone().unwrap().as_raw(); - - unsafe { - context.IASetPrimitiveTopology(d3dcommon::D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST); - context.VSSetShader(self.vs_blit_2d.as_raw(), ptr::null_mut(), 0); - context.VSSetConstantBuffers(0, 1, [self.internal_buffer.as_raw()].as_ptr()); - context.PSSetShader(shader, ptr::null_mut(), 0); - context.PSSetShaderResources(0, 1, [srv].as_ptr()); - context.PSSetSamplers( - 0, - 1, - match filter { - image::Filter::Nearest => [self.sampler_nearest.as_raw()], - image::Filter::Linear => [self.sampler_linear.as_raw()], - } - .as_ptr(), - ); - - for region in regions { - let region = region.borrow(); - self.update_blit(context, src, ®ion); - - // TODO: more layers - let rtv = dst - .get_rtv( - region.dst_subresource.level, - region.dst_subresource.layers.start, - ) - .unwrap() - .as_raw(); - - context.RSSetViewports( - 1, - [d3d11::D3D11_VIEWPORT { - TopLeftX: cmp::min(region.dst_bounds.start.x, region.dst_bounds.end.x) as _, - TopLeftY: cmp::min(region.dst_bounds.start.y, region.dst_bounds.end.y) as _, - Width: (region.dst_bounds.end.x - region.dst_bounds.start.x).abs() as _, - Height: (region.dst_bounds.end.y - region.dst_bounds.start.y).abs() as _, - MinDepth: 0.0f32, - MaxDepth: 1.0f32, - }] - .as_ptr(), - ); - context.OMSetRenderTargets(1, [rtv].as_ptr(), ptr::null_mut()); - context.Draw(3, 0); - } - - context.PSSetShaderResources(0, 1, [ptr::null_mut()].as_ptr()); - context.OMSetRenderTargets(1, [ptr::null_mut()].as_ptr(), ptr::null_mut()); - } - } - - pub fn clear_attachments( - &mut self, - context: &ComPtr, - clears: T, - rects: U, - cache: &RenderPassCache, - ) where - T: IntoIterator, - T::Item: Borrow, - U: IntoIterator, - U::Item: Borrow, - { - use hal::format::ChannelType as Ct; - let _scope = debug_scope!(context, "ClearAttachments"); - - let clear_rects: SmallVec<[pso::ClearRect; 8]> = rects - .into_iter() - .map(|rect| rect.borrow().clone()) - .collect(); - - unsafe { - context.IASetPrimitiveTopology(d3dcommon::D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST); - context.IASetInputLayout(ptr::null_mut()); - context.VSSetShader(self.vs_partial_clear.as_raw(), ptr::null_mut(), 0); - context.PSSetConstantBuffers(0, 1, [self.internal_buffer.as_raw()].as_ptr()); - } - - let subpass = &cache.render_pass.subpasses[cache.current_subpass]; - - for clear in clears { - let clear = clear.borrow(); - - let _scope = debug_scope!(context, "{:?}", clear); - - match *clear { - command::AttachmentClear::Color { index, value } => { - self.update_clear_color(context, value); - - let attachment = { - let rtv_id = subpass.color_attachments[index]; - &cache.framebuffer.attachments[rtv_id.0] - }; - - unsafe { - context.OMSetRenderTargets( - 1, - [attachment.rtv_handle.clone().unwrap().as_raw()].as_ptr(), - ptr::null_mut(), - ); - } - - let shader = match attachment.format.base_format().1 { - Ct::Uint => self.ps_partial_clear_uint.as_raw(), - Ct::Sint => self.ps_partial_clear_int.as_raw(), - _ => self.ps_partial_clear_float.as_raw(), - }; - unsafe { context.PSSetShader(shader, ptr::null_mut(), 0) }; - - for clear_rect in &clear_rects { - let viewport = conv::map_viewport(&Viewport { - rect: clear_rect.rect, - depth: 0f32 .. 1f32, - }); - - debug_marker!(context, "{:?}", clear_rect.rect); - - unsafe { - context.RSSetViewports(1, [viewport].as_ptr()); - context.Draw(3, 0); - } - } - } - command::AttachmentClear::DepthStencil { depth, stencil } => { - self.update_clear_depth_stencil(context, depth, stencil); - - let attachment = { - let dsv_id = subpass.depth_stencil_attachment.unwrap(); - &cache.framebuffer.attachments[dsv_id.0] - }; - - unsafe { - match (depth, stencil) { - (Some(_), Some(stencil)) => { - context.OMSetDepthStencilState( - self.partial_clear_depth_stencil_state.as_raw(), - stencil, - ); - context.PSSetShader( - self.ps_partial_clear_depth.as_raw(), - ptr::null_mut(), - 0, - ); - } - - (Some(_), None) => { - context.OMSetDepthStencilState( - self.partial_clear_depth_state.as_raw(), - 0, - ); - context.PSSetShader( - self.ps_partial_clear_depth.as_raw(), - ptr::null_mut(), - 0, - ); - } - - (None, Some(stencil)) => { - context.OMSetDepthStencilState( - self.partial_clear_stencil_state.as_raw(), - stencil, - ); - context.PSSetShader( - self.ps_partial_clear_stencil.as_raw(), - ptr::null_mut(), - 0, - ); - } - (None, None) => {} - } - - context.OMSetRenderTargets( - 0, - ptr::null_mut(), - attachment.dsv_handle.clone().unwrap().as_raw(), - ); - context.PSSetShader( - self.ps_partial_clear_depth.as_raw(), - ptr::null_mut(), - 0, - ); - } - - for clear_rect in &clear_rects { - let viewport = conv::map_viewport(&Viewport { - rect: clear_rect.rect, - depth: 0f32 .. 1f32, - }); - - unsafe { - context.RSSetViewports(1, [viewport].as_ptr()); - context.Draw(3, 0); - } - } - } - } - } - } -} +use hal::{ + command, + image, + pso, + pso::{Stage, Viewport}, +}; + +use winapi::{ + shared::{ + dxgiformat, + minwindef::{FALSE, TRUE}, + winerror, + }, + um::{d3d11, d3dcommon}, +}; + +use wio::com::ComPtr; + +use std::{borrow::Borrow, mem, ptr}; + +use smallvec::SmallVec; +use spirv_cross; + +use crate::{conv, shader, Buffer, Image, RenderPassCache}; + +#[repr(C)] +struct BufferCopy { + src: u32, + dst: u32, + _padding: [u32; 2], +} + +#[repr(C)] +struct ImageCopy { + src: [u32; 4], + dst: [u32; 4], +} + +#[repr(C)] +struct BufferImageCopy { + buffer_offset: u32, + buffer_size: [u32; 2], + _padding: u32, + image_offset: [u32; 4], + image_extent: [u32; 4], + // actual size of the target image + image_size: [u32; 4], +} + +#[repr(C)] +struct BufferImageCopyInfo { + buffer: BufferCopy, + image: ImageCopy, + buffer_image: BufferImageCopy, +} + +#[repr(C)] +struct BlitInfo { + offset: [f32; 2], + extent: [f32; 2], + z: f32, + level: f32, +} + +#[repr(C)] +struct PartialClearInfo { + // transmute between the types, easier than juggling all different kinds of fields.. + data: [u32; 4], +} + +// the threadgroup count we use in our copy shaders +const COPY_THREAD_GROUP_X: u32 = 8; +const COPY_THREAD_GROUP_Y: u32 = 8; + +// Holds everything we need for fallback implementations of features that are not in DX. +// +// TODO: maybe get rid of `Clone`? there's _a lot_ of refcounts here and it is used as a singleton +// anyway :s +// +// TODO: make struct fields more modular and group them up in structs depending on if it is a +// fallback version or not (eg. Option), should make struct definition and +// `new` function smaller +#[derive(Clone, Debug)] +pub struct Internal { + // partial clearing + vs_partial_clear: ComPtr, + ps_partial_clear_float: ComPtr, + ps_partial_clear_uint: ComPtr, + ps_partial_clear_int: ComPtr, + ps_partial_clear_depth: ComPtr, + ps_partial_clear_stencil: ComPtr, + partial_clear_depth_stencil_state: ComPtr, + partial_clear_depth_state: ComPtr, + partial_clear_stencil_state: ComPtr, + + // blitting + vs_blit_2d: ComPtr, + + sampler_nearest: ComPtr, + sampler_linear: ComPtr, + + ps_blit_2d_uint: ComPtr, + ps_blit_2d_int: ComPtr, + ps_blit_2d_float: ComPtr, + + // Image<->Image not covered by `CopySubresourceRegion` + cs_copy_image2d_r8g8_image2d_r16: ComPtr, + cs_copy_image2d_r16_image2d_r8g8: ComPtr, + + cs_copy_image2d_r8g8b8a8_image2d_r32: ComPtr, + cs_copy_image2d_r8g8b8a8_image2d_r16g16: ComPtr, + cs_copy_image2d_r16g16_image2d_r32: ComPtr, + cs_copy_image2d_r16g16_image2d_r8g8b8a8: ComPtr, + cs_copy_image2d_r32_image2d_r16g16: ComPtr, + cs_copy_image2d_r32_image2d_r8g8b8a8: ComPtr, + + // Image -> Buffer + cs_copy_image2d_r32g32b32a32_buffer: ComPtr, + cs_copy_image2d_r32g32_buffer: ComPtr, + cs_copy_image2d_r16g16b16a16_buffer: ComPtr, + cs_copy_image2d_r32_buffer: ComPtr, + cs_copy_image2d_r16g16_buffer: ComPtr, + cs_copy_image2d_r8g8b8a8_buffer: ComPtr, + cs_copy_image2d_r16_buffer: ComPtr, + cs_copy_image2d_r8g8_buffer: ComPtr, + cs_copy_image2d_r8_buffer: ComPtr, + cs_copy_image2d_b8g8r8a8_buffer: ComPtr, + + // Buffer -> Image + cs_copy_buffer_image2d_r32g32b32a32: ComPtr, + cs_copy_buffer_image2d_r32g32: ComPtr, + cs_copy_buffer_image2d_r16g16b16a16: ComPtr, + cs_copy_buffer_image2d_r32: ComPtr, + cs_copy_buffer_image2d_r16g16: ComPtr, + cs_copy_buffer_image2d_r8g8b8a8: ComPtr, + cs_copy_buffer_image2d_r16: ComPtr, + cs_copy_buffer_image2d_r8g8: ComPtr, + cs_copy_buffer_image2d_r8: ComPtr, + + // internal constant buffer that is used by internal shaders + internal_buffer: ComPtr, + + // public buffer that is used as intermediate storage for some operations (memory invalidation) + pub working_buffer: ComPtr, + pub working_buffer_size: u64, +} + +fn compile_blob(src: &[u8], entrypoint: &str, stage: Stage) -> ComPtr { + unsafe { + ComPtr::from_raw( + shader::compile_hlsl_shader( + stage, + spirv_cross::hlsl::ShaderModel::V5_0, + entrypoint, + src, + ) + .unwrap(), + ) + } +} + +fn compile_vs( + device: &ComPtr, + src: &[u8], + entrypoint: &str, +) -> ComPtr { + let bytecode = compile_blob(src, entrypoint, Stage::Vertex); + let mut shader = ptr::null_mut(); + let hr = unsafe { + device.CreateVertexShader( + bytecode.GetBufferPointer(), + bytecode.GetBufferSize(), + ptr::null_mut(), + &mut shader as *mut *mut _ as *mut *mut _, + ) + }; + assert_eq!(true, winerror::SUCCEEDED(hr)); + + unsafe { ComPtr::from_raw(shader) } +} + +fn compile_ps( + device: &ComPtr, + src: &[u8], + entrypoint: &str, +) -> ComPtr { + let bytecode = compile_blob(src, entrypoint, Stage::Fragment); + let mut shader = ptr::null_mut(); + let hr = unsafe { + device.CreatePixelShader( + bytecode.GetBufferPointer(), + bytecode.GetBufferSize(), + ptr::null_mut(), + &mut shader as *mut *mut _ as *mut *mut _, + ) + }; + assert_eq!(true, winerror::SUCCEEDED(hr)); + + unsafe { ComPtr::from_raw(shader) } +} + +fn compile_cs( + device: &ComPtr, + src: &[u8], + entrypoint: &str, +) -> ComPtr { + let bytecode = compile_blob(src, entrypoint, Stage::Compute); + let mut shader = ptr::null_mut(); + let hr = unsafe { + device.CreateComputeShader( + bytecode.GetBufferPointer(), + bytecode.GetBufferSize(), + ptr::null_mut(), + &mut shader as *mut *mut _ as *mut *mut _, + ) + }; + assert_eq!(true, winerror::SUCCEEDED(hr)); + + unsafe { ComPtr::from_raw(shader) } +} + +impl Internal { + pub fn new(device: &ComPtr) -> Self { + let internal_buffer = { + let desc = d3d11::D3D11_BUFFER_DESC { + ByteWidth: mem::size_of::() as _, + Usage: d3d11::D3D11_USAGE_DYNAMIC, + BindFlags: d3d11::D3D11_BIND_CONSTANT_BUFFER, + CPUAccessFlags: d3d11::D3D11_CPU_ACCESS_WRITE, + MiscFlags: 0, + StructureByteStride: 0, + }; + + let mut buffer = ptr::null_mut(); + let hr = unsafe { + device.CreateBuffer( + &desc, + ptr::null_mut(), + &mut buffer as *mut *mut _ as *mut *mut _, + ) + }; + assert_eq!(true, winerror::SUCCEEDED(hr)); + + unsafe { ComPtr::from_raw(buffer) } + }; + + let (depth_stencil_state, depth_state, stencil_state) = { + let mut depth_state = ptr::null_mut(); + let mut stencil_state = ptr::null_mut(); + let mut depth_stencil_state = ptr::null_mut(); + + let mut desc = d3d11::D3D11_DEPTH_STENCIL_DESC { + DepthEnable: TRUE, + DepthWriteMask: d3d11::D3D11_DEPTH_WRITE_MASK_ALL, + DepthFunc: d3d11::D3D11_COMPARISON_ALWAYS, + StencilEnable: TRUE, + StencilReadMask: 0, + StencilWriteMask: !0, + FrontFace: d3d11::D3D11_DEPTH_STENCILOP_DESC { + StencilFailOp: d3d11::D3D11_STENCIL_OP_REPLACE, + StencilDepthFailOp: d3d11::D3D11_STENCIL_OP_REPLACE, + StencilPassOp: d3d11::D3D11_STENCIL_OP_REPLACE, + StencilFunc: d3d11::D3D11_COMPARISON_ALWAYS, + }, + BackFace: d3d11::D3D11_DEPTH_STENCILOP_DESC { + StencilFailOp: d3d11::D3D11_STENCIL_OP_REPLACE, + StencilDepthFailOp: d3d11::D3D11_STENCIL_OP_REPLACE, + StencilPassOp: d3d11::D3D11_STENCIL_OP_REPLACE, + StencilFunc: d3d11::D3D11_COMPARISON_ALWAYS, + }, + }; + + let hr = unsafe { + device.CreateDepthStencilState( + &desc, + &mut depth_stencil_state as *mut *mut _ as *mut *mut _, + ) + }; + assert_eq!(winerror::S_OK, hr); + + desc.DepthEnable = TRUE; + desc.StencilEnable = FALSE; + + let hr = unsafe { + device + .CreateDepthStencilState(&desc, &mut depth_state as *mut *mut _ as *mut *mut _) + }; + assert_eq!(winerror::S_OK, hr); + + desc.DepthEnable = FALSE; + desc.StencilEnable = TRUE; + + let hr = unsafe { + device.CreateDepthStencilState( + &desc, + &mut stencil_state as *mut *mut _ as *mut *mut _, + ) + }; + assert_eq!(winerror::S_OK, hr); + + unsafe { + ( + ComPtr::from_raw(depth_stencil_state), + ComPtr::from_raw(depth_state), + ComPtr::from_raw(stencil_state), + ) + } + }; + + let (sampler_nearest, sampler_linear) = { + let mut desc = d3d11::D3D11_SAMPLER_DESC { + Filter: d3d11::D3D11_FILTER_MIN_MAG_MIP_POINT, + AddressU: d3d11::D3D11_TEXTURE_ADDRESS_CLAMP, + AddressV: d3d11::D3D11_TEXTURE_ADDRESS_CLAMP, + AddressW: d3d11::D3D11_TEXTURE_ADDRESS_CLAMP, + MipLODBias: 0f32, + MaxAnisotropy: 0, + ComparisonFunc: 0, + BorderColor: [0f32; 4], + MinLOD: 0f32, + MaxLOD: d3d11::D3D11_FLOAT32_MAX, + }; + + let mut nearest = ptr::null_mut(); + let mut linear = ptr::null_mut(); + + assert_eq!(winerror::S_OK, unsafe { + device.CreateSamplerState(&desc, &mut nearest as *mut *mut _ as *mut *mut _) + }); + + desc.Filter = d3d11::D3D11_FILTER_MIN_MAG_MIP_LINEAR; + + assert_eq!(winerror::S_OK, unsafe { + device.CreateSamplerState(&desc, &mut linear as *mut *mut _ as *mut *mut _) + }); + + unsafe { (ComPtr::from_raw(nearest), ComPtr::from_raw(linear)) } + }; + + let (working_buffer, working_buffer_size) = { + let working_buffer_size = 1 << 16; + + let desc = d3d11::D3D11_BUFFER_DESC { + ByteWidth: working_buffer_size, + Usage: d3d11::D3D11_USAGE_STAGING, + BindFlags: 0, + CPUAccessFlags: d3d11::D3D11_CPU_ACCESS_READ | d3d11::D3D11_CPU_ACCESS_WRITE, + MiscFlags: 0, + StructureByteStride: 0, + }; + let mut working_buffer = ptr::null_mut(); + + assert_eq!(winerror::S_OK, unsafe { + device.CreateBuffer( + &desc, + ptr::null_mut(), + &mut working_buffer as *mut *mut _ as *mut *mut _, + ) + }); + + ( + unsafe { ComPtr::from_raw(working_buffer) }, + working_buffer_size, + ) + }; + + let clear_shaders = include_bytes!("../shaders/clear.hlsl"); + let copy_shaders = include_bytes!("../shaders/copy.hlsl"); + let blit_shaders = include_bytes!("../shaders/blit.hlsl"); + + Internal { + vs_partial_clear: compile_vs(device, clear_shaders, "vs_partial_clear"), + ps_partial_clear_float: compile_ps(device, clear_shaders, "ps_partial_clear_float"), + ps_partial_clear_uint: compile_ps(device, clear_shaders, "ps_partial_clear_uint"), + ps_partial_clear_int: compile_ps(device, clear_shaders, "ps_partial_clear_int"), + ps_partial_clear_depth: compile_ps(device, clear_shaders, "ps_partial_clear_depth"), + ps_partial_clear_stencil: compile_ps(device, clear_shaders, "ps_partial_clear_stencil"), + partial_clear_depth_stencil_state: depth_stencil_state, + partial_clear_depth_state: depth_state, + partial_clear_stencil_state: stencil_state, + + vs_blit_2d: compile_vs(device, blit_shaders, "vs_blit_2d"), + + sampler_nearest, + sampler_linear, + + ps_blit_2d_uint: compile_ps(device, blit_shaders, "ps_blit_2d_uint"), + ps_blit_2d_int: compile_ps(device, blit_shaders, "ps_blit_2d_int"), + ps_blit_2d_float: compile_ps(device, blit_shaders, "ps_blit_2d_float"), + + cs_copy_image2d_r8g8_image2d_r16: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r8g8_image2d_r16", + ), + cs_copy_image2d_r16_image2d_r8g8: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r16_image2d_r8g8", + ), + + cs_copy_image2d_r8g8b8a8_image2d_r32: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r8g8b8a8_image2d_r32", + ), + cs_copy_image2d_r8g8b8a8_image2d_r16g16: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r8g8b8a8_image2d_r16g16", + ), + cs_copy_image2d_r16g16_image2d_r32: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r16g16_image2d_r32", + ), + cs_copy_image2d_r16g16_image2d_r8g8b8a8: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r16g16_image2d_r8g8b8a8", + ), + cs_copy_image2d_r32_image2d_r16g16: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r32_image2d_r16g16", + ), + cs_copy_image2d_r32_image2d_r8g8b8a8: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r32_image2d_r8g8b8a8", + ), + + cs_copy_image2d_r32g32b32a32_buffer: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r32g32b32a32_buffer", + ), + cs_copy_image2d_r32g32_buffer: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r32g32_buffer", + ), + cs_copy_image2d_r16g16b16a16_buffer: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r16g16b16a16_buffer", + ), + cs_copy_image2d_r32_buffer: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r32_buffer", + ), + cs_copy_image2d_r16g16_buffer: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r16g16_buffer", + ), + cs_copy_image2d_r8g8b8a8_buffer: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r8g8b8a8_buffer", + ), + cs_copy_image2d_r16_buffer: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r16_buffer", + ), + cs_copy_image2d_r8g8_buffer: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r8g8_buffer", + ), + cs_copy_image2d_r8_buffer: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_r8_buffer", + ), + cs_copy_image2d_b8g8r8a8_buffer: compile_cs( + device, + copy_shaders, + "cs_copy_image2d_b8g8r8a8_buffer", + ), + + cs_copy_buffer_image2d_r32g32b32a32: compile_cs( + device, + copy_shaders, + "cs_copy_buffer_image2d_r32g32b32a32", + ), + cs_copy_buffer_image2d_r32g32: compile_cs( + device, + copy_shaders, + "cs_copy_buffer_image2d_r32g32", + ), + cs_copy_buffer_image2d_r16g16b16a16: compile_cs( + device, + copy_shaders, + "cs_copy_buffer_image2d_r16g16b16a16", + ), + cs_copy_buffer_image2d_r32: compile_cs( + device, + copy_shaders, + "cs_copy_buffer_image2d_r32", + ), + cs_copy_buffer_image2d_r16g16: compile_cs( + device, + copy_shaders, + "cs_copy_buffer_image2d_r16g16", + ), + cs_copy_buffer_image2d_r8g8b8a8: compile_cs( + device, + copy_shaders, + "cs_copy_buffer_image2d_r8g8b8a8", + ), + cs_copy_buffer_image2d_r16: compile_cs( + device, + copy_shaders, + "cs_copy_buffer_image2d_r16", + ), + cs_copy_buffer_image2d_r8g8: compile_cs( + device, + copy_shaders, + "cs_copy_buffer_image2d_r8g8", + ), + cs_copy_buffer_image2d_r8: compile_cs( + device, + copy_shaders, + "cs_copy_buffer_image2d_r8", + ), + + internal_buffer, + working_buffer, + working_buffer_size: working_buffer_size as _, + } + } + + fn map(&mut self, context: &ComPtr) -> *mut u8 { + let mut mapped = unsafe { mem::zeroed::() }; + let hr = unsafe { + context.Map( + self.internal_buffer.as_raw() as _, + 0, + d3d11::D3D11_MAP_WRITE_DISCARD, + 0, + &mut mapped, + ) + }; + + assert_eq!(winerror::S_OK, hr); + + mapped.pData as _ + } + + fn unmap(&mut self, context: &ComPtr) { + unsafe { + context.Unmap(self.internal_buffer.as_raw() as _, 0); + } + } + + fn update_image( + &mut self, + context: &ComPtr, + info: &command::ImageCopy, + ) { + unsafe { + ptr::copy( + &BufferImageCopyInfo { + image: ImageCopy { + src: [ + info.src_offset.x as _, + info.src_offset.y as _, + info.src_offset.z as _, + 0, + ], + dst: [ + info.dst_offset.x as _, + info.dst_offset.y as _, + info.dst_offset.z as _, + 0, + ], + }, + ..mem::zeroed() + }, + self.map(context) as *mut _, + 1, + ) + }; + + self.unmap(context); + } + + fn update_buffer_image( + &mut self, + context: &ComPtr, + info: &command::BufferImageCopy, + image: &Image, + ) { + let size = image.kind.extent(); + + unsafe { + ptr::copy( + &BufferImageCopyInfo { + buffer_image: BufferImageCopy { + buffer_offset: info.buffer_offset as _, + buffer_size: [info.buffer_width, info.buffer_height], + _padding: 0, + image_offset: [ + info.image_offset.x as _, + info.image_offset.y as _, + (info.image_offset.z + info.image_layers.layers.start as i32) as _, + 0, + ], + image_extent: [ + info.image_extent.width, + info.image_extent.height, + info.image_extent.depth, + 0, + ], + image_size: [size.width, size.height, size.depth, 0], + }, + ..mem::zeroed() + }, + self.map(context) as *mut _, + 1, + ) + }; + + self.unmap(context); + } + + fn update_blit( + &mut self, + context: &ComPtr, + src: &Image, + info: &command::ImageBlit, + ) { + let (sx, dx) = if info.dst_bounds.start.x > info.dst_bounds.end.x { + ( + info.src_bounds.end.x, + info.src_bounds.start.x - info.src_bounds.end.x, + ) + } else { + ( + info.src_bounds.start.x, + info.src_bounds.end.x - info.src_bounds.start.x, + ) + }; + let (sy, dy) = if info.dst_bounds.start.y > info.dst_bounds.end.y { + ( + info.src_bounds.end.y, + info.src_bounds.start.y - info.src_bounds.end.y, + ) + } else { + ( + info.src_bounds.start.y, + info.src_bounds.end.y - info.src_bounds.start.y, + ) + }; + let image::Extent { width, height, .. } = src.kind.level_extent(info.src_subresource.level); + + unsafe { + ptr::copy( + &BlitInfo { + offset: [sx as f32 / width as f32, sy as f32 / height as f32], + extent: [dx as f32 / width as f32, dy as f32 / height as f32], + z: 0f32, // TODO + level: info.src_subresource.level as _, + }, + self.map(context) as *mut _, + 1, + ) + }; + + self.unmap(context); + } + + fn update_clear_color( + &mut self, + context: &ComPtr, + value: command::ClearColor, + ) { + unsafe { + ptr::copy( + &PartialClearInfo { + data: mem::transmute(value), + }, + self.map(context) as *mut _, + 1, + ) + }; + + self.unmap(context); + } + + fn update_clear_depth_stencil( + &mut self, + context: &ComPtr, + depth: Option, + stencil: Option, + ) { + unsafe { + ptr::copy( + &PartialClearInfo { + data: [ + mem::transmute(depth.unwrap_or(0f32)), + stencil.unwrap_or(0), + 0, + 0, + ], + }, + self.map(context) as *mut _, + 1, + ); + } + + self.unmap(context); + } + + fn find_image_copy_shader( + &self, + src: &Image, + dst: &Image, + ) -> Option<*mut d3d11::ID3D11ComputeShader> { + use dxgiformat::*; + + let src_format = src.decomposed_format.copy_srv.unwrap(); + let dst_format = dst.decomposed_format.copy_uav.unwrap(); + + match (src_format, dst_format) { + (DXGI_FORMAT_R8G8_UINT, DXGI_FORMAT_R16_UINT) => { + Some(self.cs_copy_image2d_r8g8_image2d_r16.as_raw()) + } + (DXGI_FORMAT_R16_UINT, DXGI_FORMAT_R8G8_UINT) => { + Some(self.cs_copy_image2d_r16_image2d_r8g8.as_raw()) + } + (DXGI_FORMAT_R8G8B8A8_UINT, DXGI_FORMAT_R32_UINT) => { + Some(self.cs_copy_image2d_r8g8b8a8_image2d_r32.as_raw()) + } + (DXGI_FORMAT_R8G8B8A8_UINT, DXGI_FORMAT_R16G16_UINT) => { + Some(self.cs_copy_image2d_r8g8b8a8_image2d_r16g16.as_raw()) + } + (DXGI_FORMAT_R16G16_UINT, DXGI_FORMAT_R32_UINT) => { + Some(self.cs_copy_image2d_r16g16_image2d_r32.as_raw()) + } + (DXGI_FORMAT_R16G16_UINT, DXGI_FORMAT_R8G8B8A8_UINT) => { + Some(self.cs_copy_image2d_r16g16_image2d_r8g8b8a8.as_raw()) + } + (DXGI_FORMAT_R32_UINT, DXGI_FORMAT_R16G16_UINT) => { + Some(self.cs_copy_image2d_r32_image2d_r16g16.as_raw()) + } + (DXGI_FORMAT_R32_UINT, DXGI_FORMAT_R8G8B8A8_UINT) => { + Some(self.cs_copy_image2d_r32_image2d_r8g8b8a8.as_raw()) + } + _ => None, + } + } + + pub fn copy_image_2d( + &mut self, + context: &ComPtr, + src: &Image, + dst: &Image, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + if let Some(shader) = self.find_image_copy_shader(src, dst) { + // Some formats cant go through default path, since they cant + // be cast between formats of different component types (eg. + // Rg16 <-> Rgba8) + + // TODO: subresources + let srv = src.internal.copy_srv.clone().unwrap().as_raw(); + + unsafe { + context.CSSetShader(shader, ptr::null_mut(), 0); + context.CSSetConstantBuffers(0, 1, &self.internal_buffer.as_raw()); + context.CSSetShaderResources(0, 1, [srv].as_ptr()); + + for region in regions.into_iter() { + let info = region.borrow(); + self.update_image(context, &info); + + let uav = dst.get_uav(info.dst_subresource.level, 0).unwrap().as_raw(); + context.CSSetUnorderedAccessViews(0, 1, [uav].as_ptr(), ptr::null_mut()); + + context.Dispatch(info.extent.width as u32, info.extent.height as u32, 1); + } + + // unbind external resources + context.CSSetShaderResources(0, 1, [ptr::null_mut(); 1].as_ptr()); + context.CSSetUnorderedAccessViews( + 0, + 1, + [ptr::null_mut(); 1].as_ptr(), + ptr::null_mut(), + ); + } + } else { + // Default copy path + for region in regions.into_iter() { + let info = region.borrow(); + + // TODO: layer subresources + unsafe { + context.CopySubresourceRegion( + dst.internal.raw, + src.calc_subresource(info.src_subresource.level as _, 0), + info.dst_offset.x as _, + info.dst_offset.y as _, + info.dst_offset.z as _, + src.internal.raw, + dst.calc_subresource(info.dst_subresource.level as _, 0), + &d3d11::D3D11_BOX { + left: info.src_offset.x as _, + top: info.src_offset.y as _, + front: info.src_offset.z as _, + right: info.src_offset.x as u32 + info.extent.width as u32, + bottom: info.src_offset.y as u32 + info.extent.height as u32, + back: info.src_offset.z as u32 + info.extent.depth as u32, + }, + ); + } + } + } + } + + fn find_image_to_buffer_shader( + &self, + format: dxgiformat::DXGI_FORMAT, + ) -> Option<(*mut d3d11::ID3D11ComputeShader, u32, u32)> { + use dxgiformat::*; + + match format { + DXGI_FORMAT_R32G32B32A32_UINT => { + Some((self.cs_copy_image2d_r32g32b32a32_buffer.as_raw(), 1, 1)) + } + DXGI_FORMAT_R32G32_UINT => Some((self.cs_copy_image2d_r32g32_buffer.as_raw(), 1, 1)), + DXGI_FORMAT_R16G16B16A16_UINT => { + Some((self.cs_copy_image2d_r16g16b16a16_buffer.as_raw(), 1, 1)) + } + DXGI_FORMAT_R32_UINT => Some((self.cs_copy_image2d_r32_buffer.as_raw(), 1, 1)), + DXGI_FORMAT_R16G16_UINT => Some((self.cs_copy_image2d_r16g16_buffer.as_raw(), 1, 1)), + DXGI_FORMAT_R8G8B8A8_UINT => { + Some((self.cs_copy_image2d_r8g8b8a8_buffer.as_raw(), 1, 1)) + } + DXGI_FORMAT_R16_UINT => Some((self.cs_copy_image2d_r16_buffer.as_raw(), 2, 1)), + DXGI_FORMAT_R8G8_UINT => Some((self.cs_copy_image2d_r8g8_buffer.as_raw(), 2, 1)), + DXGI_FORMAT_R8_UINT => Some((self.cs_copy_image2d_r8_buffer.as_raw(), 4, 1)), + DXGI_FORMAT_B8G8R8A8_UNORM => { + Some((self.cs_copy_image2d_b8g8r8a8_buffer.as_raw(), 1, 1)) + } + _ => None, + } + } + + pub fn copy_image_2d_to_buffer( + &mut self, + context: &ComPtr, + src: &Image, + dst: &Buffer, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + let _scope = debug_scope!( + context, + "Image (format={:?},kind={:?}) => Buffer", + src.format, + src.kind + ); + let (shader, scale_x, scale_y) = self + .find_image_to_buffer_shader(src.decomposed_format.copy_srv.unwrap()) + .unwrap(); + + let srv = src.internal.copy_srv.clone().unwrap().as_raw(); + let uav = dst.internal.uav.unwrap(); + let format_desc = src.format.base_format().0.desc(); + let bytes_per_texel = format_desc.bits as u32 / 8; + + unsafe { + context.CSSetShader(shader, ptr::null_mut(), 0); + context.CSSetConstantBuffers(0, 1, &self.internal_buffer.as_raw()); + + context.CSSetShaderResources(0, 1, [srv].as_ptr()); + context.CSSetUnorderedAccessViews(0, 1, [uav].as_ptr(), ptr::null_mut()); + + for copy in regions { + let copy = copy.borrow(); + self.update_buffer_image(context, ©, src); + + debug_marker!(context, "{:?}", copy); + + context.Dispatch( + ((copy.image_extent.width + (COPY_THREAD_GROUP_X - 1)) + / COPY_THREAD_GROUP_X + / scale_x) + .max(1), + ((copy.image_extent.height + (COPY_THREAD_GROUP_X - 1)) + / COPY_THREAD_GROUP_Y + / scale_y) + .max(1), + 1, + ); + + if let Some(disjoint_cb) = dst.internal.disjoint_cb { + let total_size = copy.image_extent.depth + * (copy.buffer_height * copy.buffer_width * bytes_per_texel); + let copy_box = d3d11::D3D11_BOX { + left: copy.buffer_offset as u32, + top: 0, + front: 0, + right: copy.buffer_offset as u32 + total_size, + bottom: 1, + back: 1, + }; + + context.CopySubresourceRegion( + disjoint_cb as _, + 0, + copy.buffer_offset as _, + 0, + 0, + dst.internal.raw as _, + 0, + ©_box, + ); + } + } + + // unbind external resources + context.CSSetShaderResources(0, 1, [ptr::null_mut(); 1].as_ptr()); + context.CSSetUnorderedAccessViews(0, 1, [ptr::null_mut(); 1].as_ptr(), ptr::null_mut()); + } + } + + fn find_buffer_to_image_shader( + &self, + format: dxgiformat::DXGI_FORMAT, + ) -> Option<(*mut d3d11::ID3D11ComputeShader, u32, u32)> { + use dxgiformat::*; + + match format { + DXGI_FORMAT_R32G32B32A32_UINT => { + Some((self.cs_copy_buffer_image2d_r32g32b32a32.as_raw(), 1, 1)) + } + DXGI_FORMAT_R32G32_UINT => Some((self.cs_copy_buffer_image2d_r32g32.as_raw(), 1, 1)), + DXGI_FORMAT_R16G16B16A16_UINT => { + Some((self.cs_copy_buffer_image2d_r16g16b16a16.as_raw(), 1, 1)) + } + DXGI_FORMAT_R32_UINT => Some((self.cs_copy_buffer_image2d_r32.as_raw(), 1, 1)), + DXGI_FORMAT_R16G16_UINT => Some((self.cs_copy_buffer_image2d_r16g16.as_raw(), 1, 1)), + DXGI_FORMAT_R8G8B8A8_UINT => { + Some((self.cs_copy_buffer_image2d_r8g8b8a8.as_raw(), 1, 1)) + } + DXGI_FORMAT_R16_UINT => Some((self.cs_copy_buffer_image2d_r16.as_raw(), 2, 1)), + DXGI_FORMAT_R8G8_UINT => Some((self.cs_copy_buffer_image2d_r8g8.as_raw(), 2, 1)), + DXGI_FORMAT_R8_UINT => Some((self.cs_copy_buffer_image2d_r8.as_raw(), 4, 1)), + _ => None, + } + } + + pub fn copy_buffer_to_image_2d( + &mut self, + context: &ComPtr, + src: &Buffer, + dst: &Image, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + let _scope = debug_scope!( + context, + "Buffer => Image (format={:?},kind={:?})", + dst.format, + dst.kind + ); + // NOTE: we have two separate paths for Buffer -> Image transfers. we need to special case + // uploads to compressed formats through `UpdateSubresource` since we cannot get a + // UAV of any compressed format. + + let format_desc = dst.format.base_format().0.desc(); + if format_desc.is_compressed() { + // we dont really care about non-4x4 block formats.. + assert_eq!(format_desc.dim, (4, 4)); + assert!(!src.host_ptr.is_null()); + + for copy in regions { + let info = copy.borrow(); + + let bytes_per_texel = format_desc.bits as u32 / 8; + + let row_pitch = bytes_per_texel * info.image_extent.width / 4; + let depth_pitch = row_pitch * info.image_extent.height / 4; + + unsafe { + context.UpdateSubresource( + dst.internal.raw, + dst.calc_subresource( + info.image_layers.level as _, + info.image_layers.layers.start as _, + ), + &d3d11::D3D11_BOX { + left: info.image_offset.x as _, + top: info.image_offset.y as _, + front: info.image_offset.z as _, + right: info.image_offset.x as u32 + info.image_extent.width, + bottom: info.image_offset.y as u32 + info.image_extent.height, + back: info.image_offset.z as u32 + info.image_extent.depth, + }, + src.host_ptr + .offset(src.bound_range.start as isize + info.buffer_offset as isize) + as _, + row_pitch, + depth_pitch, + ); + } + } + } else { + let (shader, scale_x, scale_y) = self + .find_buffer_to_image_shader(dst.decomposed_format.copy_uav.unwrap()) + .unwrap(); + + let srv = src.internal.srv.unwrap(); + + unsafe { + context.CSSetShader(shader, ptr::null_mut(), 0); + context.CSSetConstantBuffers(0, 1, &self.internal_buffer.as_raw()); + context.CSSetShaderResources(0, 1, [srv].as_ptr()); + + for copy in regions { + let info = copy.borrow(); + self.update_buffer_image(context, &info, dst); + + debug_marker!(context, "{:?}", info); + + // TODO: multiple layers? do we introduce a stride and do multiple dispatch + // calls or handle this in the shader? (use z component in dispatch call + // + // NOTE: right now our copy UAV is a 2D array, and we set the layer in the + // `update_buffer_image` call above + let uav = dst + .get_uav( + info.image_layers.level, + 0, /*info.image_layers.layers.start*/ + ) + .unwrap() + .as_raw(); + context.CSSetUnorderedAccessViews(0, 1, [uav].as_ptr(), ptr::null_mut()); + + context.Dispatch( + ((info.image_extent.width + (COPY_THREAD_GROUP_X - 1)) + / COPY_THREAD_GROUP_X + / scale_x) + .max(1), + ((info.image_extent.height + (COPY_THREAD_GROUP_X - 1)) + / COPY_THREAD_GROUP_Y + / scale_y) + .max(1), + 1, + ); + } + + // unbind external resources + context.CSSetShaderResources(0, 1, [ptr::null_mut(); 1].as_ptr()); + context.CSSetUnorderedAccessViews( + 0, + 1, + [ptr::null_mut(); 1].as_ptr(), + ptr::null_mut(), + ); + } + } + } + + fn find_blit_shader(&self, src: &Image) -> Option<*mut d3d11::ID3D11PixelShader> { + use crate::format::ChannelType as Ct; + + match src.format.base_format().1 { + Ct::Uint => Some(self.ps_blit_2d_uint.as_raw()), + Ct::Sint => Some(self.ps_blit_2d_int.as_raw()), + Ct::Unorm | Ct::Snorm | Ct::Sfloat | Ct::Srgb => Some(self.ps_blit_2d_float.as_raw()), + Ct::Ufloat | Ct::Uscaled | Ct::Sscaled => None, + } + } + + pub fn blit_2d_image( + &mut self, + context: &ComPtr, + src: &Image, + dst: &Image, + filter: image::Filter, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + use std::cmp; + + let _scope = debug_scope!( + context, + "Blit: Image (format={:?},kind={:?}) => Image (format={:?},kind={:?})", + src.format, + src.kind, + dst.format, + dst.kind + ); + + let shader = self.find_blit_shader(src).unwrap(); + + let srv = src.internal.srv.clone().unwrap().as_raw(); + + unsafe { + context.IASetPrimitiveTopology(d3dcommon::D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST); + context.VSSetShader(self.vs_blit_2d.as_raw(), ptr::null_mut(), 0); + context.VSSetConstantBuffers(0, 1, [self.internal_buffer.as_raw()].as_ptr()); + context.PSSetShader(shader, ptr::null_mut(), 0); + context.PSSetShaderResources(0, 1, [srv].as_ptr()); + context.PSSetSamplers( + 0, + 1, + match filter { + image::Filter::Nearest => [self.sampler_nearest.as_raw()], + image::Filter::Linear => [self.sampler_linear.as_raw()], + } + .as_ptr(), + ); + + for region in regions { + let region = region.borrow(); + self.update_blit(context, src, ®ion); + + // TODO: more layers + let rtv = dst + .get_rtv( + region.dst_subresource.level, + region.dst_subresource.layers.start, + ) + .unwrap() + .as_raw(); + + context.RSSetViewports( + 1, + [d3d11::D3D11_VIEWPORT { + TopLeftX: cmp::min(region.dst_bounds.start.x, region.dst_bounds.end.x) as _, + TopLeftY: cmp::min(region.dst_bounds.start.y, region.dst_bounds.end.y) as _, + Width: (region.dst_bounds.end.x - region.dst_bounds.start.x).abs() as _, + Height: (region.dst_bounds.end.y - region.dst_bounds.start.y).abs() as _, + MinDepth: 0.0f32, + MaxDepth: 1.0f32, + }] + .as_ptr(), + ); + context.OMSetRenderTargets(1, [rtv].as_ptr(), ptr::null_mut()); + context.Draw(3, 0); + } + + context.PSSetShaderResources(0, 1, [ptr::null_mut()].as_ptr()); + context.OMSetRenderTargets(1, [ptr::null_mut()].as_ptr(), ptr::null_mut()); + } + } + + pub fn clear_attachments( + &mut self, + context: &ComPtr, + clears: T, + rects: U, + cache: &RenderPassCache, + ) where + T: IntoIterator, + T::Item: Borrow, + U: IntoIterator, + U::Item: Borrow, + { + use hal::format::ChannelType as Ct; + let _scope = debug_scope!(context, "ClearAttachments"); + + let clear_rects: SmallVec<[pso::ClearRect; 8]> = rects + .into_iter() + .map(|rect| rect.borrow().clone()) + .collect(); + + unsafe { + context.IASetPrimitiveTopology(d3dcommon::D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST); + context.IASetInputLayout(ptr::null_mut()); + context.VSSetShader(self.vs_partial_clear.as_raw(), ptr::null_mut(), 0); + context.PSSetConstantBuffers(0, 1, [self.internal_buffer.as_raw()].as_ptr()); + } + + let subpass = &cache.render_pass.subpasses[cache.current_subpass as usize]; + + for clear in clears { + let clear = clear.borrow(); + + let _scope = debug_scope!(context, "{:?}", clear); + + match *clear { + command::AttachmentClear::Color { index, value } => { + self.update_clear_color(context, value); + + let attachment = { + let rtv_id = subpass.color_attachments[index]; + &cache.framebuffer.attachments[rtv_id.0] + }; + + unsafe { + context.OMSetRenderTargets( + 1, + [attachment.rtv_handle.clone().unwrap().as_raw()].as_ptr(), + ptr::null_mut(), + ); + } + + let shader = match attachment.format.base_format().1 { + Ct::Uint => self.ps_partial_clear_uint.as_raw(), + Ct::Sint => self.ps_partial_clear_int.as_raw(), + _ => self.ps_partial_clear_float.as_raw(), + }; + unsafe { context.PSSetShader(shader, ptr::null_mut(), 0) }; + + for clear_rect in &clear_rects { + let viewport = conv::map_viewport(&Viewport { + rect: clear_rect.rect, + depth: 0f32 .. 1f32, + }); + + debug_marker!(context, "{:?}", clear_rect.rect); + + unsafe { + context.RSSetViewports(1, [viewport].as_ptr()); + context.Draw(3, 0); + } + } + } + command::AttachmentClear::DepthStencil { depth, stencil } => { + self.update_clear_depth_stencil(context, depth, stencil); + + let attachment = { + let dsv_id = subpass.depth_stencil_attachment.unwrap(); + &cache.framebuffer.attachments[dsv_id.0] + }; + + unsafe { + match (depth, stencil) { + (Some(_), Some(stencil)) => { + context.OMSetDepthStencilState( + self.partial_clear_depth_stencil_state.as_raw(), + stencil, + ); + context.PSSetShader( + self.ps_partial_clear_depth.as_raw(), + ptr::null_mut(), + 0, + ); + } + + (Some(_), None) => { + context.OMSetDepthStencilState( + self.partial_clear_depth_state.as_raw(), + 0, + ); + context.PSSetShader( + self.ps_partial_clear_depth.as_raw(), + ptr::null_mut(), + 0, + ); + } + + (None, Some(stencil)) => { + context.OMSetDepthStencilState( + self.partial_clear_stencil_state.as_raw(), + stencil, + ); + context.PSSetShader( + self.ps_partial_clear_stencil.as_raw(), + ptr::null_mut(), + 0, + ); + } + (None, None) => {} + } + + context.OMSetRenderTargets( + 0, + ptr::null_mut(), + attachment.dsv_handle.clone().unwrap().as_raw(), + ); + context.PSSetShader( + self.ps_partial_clear_depth.as_raw(), + ptr::null_mut(), + 0, + ); + } + + for clear_rect in &clear_rects { + let viewport = conv::map_viewport(&Viewport { + rect: clear_rect.rect, + depth: 0f32 .. 1f32, + }); + + unsafe { + context.RSSetViewports(1, [viewport].as_ptr()); + context.Draw(3, 0); + } + } + } + } + } + } +} diff --git a/third_party/rust/gfx-backend-dx11/src/lib.rs b/third_party/rust/gfx-backend-dx11/src/lib.rs index 47567b1a3f83..8da590f06afd 100644 --- a/third_party/rust/gfx-backend-dx11/src/lib.rs +++ b/third_party/rust/gfx-backend-dx11/src/lib.rs @@ -1,3423 +1,3446 @@ -/*! -# DX11 backend internals. - -## Pipeline Layout - -In D3D11 there are tables of CBVs, SRVs, UAVs, and samplers. - -Each descriptor type can take 1 or two of those entry points. - -The descriptor pool is just and array of handles, belonging to descriptor set 1, descriptor set 2, etc. -Each range of descriptors in a descriptor set area of the pool is split into shader stages, -which in turn is split into CBS/SRV/UAV/Sampler parts. That allows binding a descriptor set as a list -of continuous descriptor ranges (per type, per shader stage). - -!*/ - -//#[deny(missing_docs)] - -extern crate auxil; -extern crate gfx_hal as hal; -extern crate range_alloc; -#[macro_use] -extern crate bitflags; -extern crate libloading; -#[macro_use] -extern crate log; -extern crate parking_lot; -extern crate smallvec; -extern crate spirv_cross; -#[macro_use] -extern crate winapi; -extern crate wio; - -use hal::{ - adapter, - buffer, - command, - format, - image, - memory, - pass, - pso, - query, - queue, - range::RangeArg, - window, - DrawCount, - IndexCount, - InstanceCount, - Limits, - VertexCount, - VertexOffset, - WorkGroupCount, -}; - -use range_alloc::RangeAllocator; - -use winapi::{ - shared::{ - dxgi::{IDXGIAdapter, IDXGIFactory, IDXGISwapChain}, - dxgiformat, - minwindef::{FALSE, HMODULE, UINT}, - windef::{HWND, RECT}, - winerror, - }, - um::{d3d11, d3dcommon, winuser::GetClientRect}, - Interface as _, -}; - -use wio::com::ComPtr; - -use parking_lot::{Condvar, Mutex}; - -use std::borrow::Borrow; -use std::cell::RefCell; -use std::fmt; -use std::mem; -use std::ops::Range; -use std::ptr; -use std::sync::Arc; - -use std::os::raw::c_void; - -macro_rules! debug_scope { - ($context:expr, $($arg:tt)+) => ({ - #[cfg(debug_assertions)] - { - $crate::debug::DebugScope::with_name( - $context, - format_args!($($arg)+), - ) - } - #[cfg(not(debug_assertions))] - { - () - } - }); -} - -macro_rules! debug_marker { - ($context:expr, $($arg:tt)+) => ({ - #[cfg(debug_assertions)] - { - $crate::debug::debug_marker( - $context, - format_args!($($arg)+), - ); - } - }); -} - -mod conv; -#[cfg(debug_assertions)] -mod debug; -mod device; -mod dxgi; -mod internal; -mod shader; - -type CreateFun = unsafe extern "system" fn( - *mut IDXGIAdapter, - UINT, - HMODULE, - UINT, - *const UINT, - UINT, - UINT, - *mut *mut d3d11::ID3D11Device, - *mut UINT, - *mut *mut d3d11::ID3D11DeviceContext, -) -> winerror::HRESULT; - -#[derive(Clone)] -pub(crate) struct ViewInfo { - resource: *mut d3d11::ID3D11Resource, - kind: image::Kind, - caps: image::ViewCapabilities, - view_kind: image::ViewKind, - format: dxgiformat::DXGI_FORMAT, - range: image::SubresourceRange, -} - -impl fmt::Debug for ViewInfo { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str("ViewInfo") - } -} - -#[derive(Debug)] -pub struct Instance { - pub(crate) factory: ComPtr, - pub(crate) dxgi_version: dxgi::DxgiVersion, - library_d3d11: Arc, - library_dxgi: libloading::Library, -} - -unsafe impl Send for Instance {} -unsafe impl Sync for Instance {} - -impl Instance { - pub fn create_surface_from_hwnd(&self, hwnd: *mut c_void) -> Surface { - Surface { - factory: self.factory.clone(), - wnd_handle: hwnd as *mut _, - presentation: None, - } - } -} - -fn get_features( - _device: ComPtr, - _feature_level: d3dcommon::D3D_FEATURE_LEVEL, -) -> hal::Features { - hal::Features::ROBUST_BUFFER_ACCESS - | hal::Features::FULL_DRAW_INDEX_U32 - | hal::Features::FORMAT_BC - | hal::Features::INSTANCE_RATE - | hal::Features::SAMPLER_MIP_LOD_BIAS -} - -fn get_format_properties( - device: ComPtr, -) -> [format::Properties; format::NUM_FORMATS] { - let mut format_properties = [format::Properties::default(); format::NUM_FORMATS]; - for (i, props) in &mut format_properties.iter_mut().enumerate().skip(1) { - let format: format::Format = unsafe { mem::transmute(i as u32) }; - - let dxgi_format = match conv::map_format(format) { - Some(format) => format, - None => continue, - }; - - let mut support = d3d11::D3D11_FEATURE_DATA_FORMAT_SUPPORT { - InFormat: dxgi_format, - OutFormatSupport: 0, - }; - let mut support_2 = d3d11::D3D11_FEATURE_DATA_FORMAT_SUPPORT2 { - InFormat: dxgi_format, - OutFormatSupport2: 0, - }; - - let hr = unsafe { - device.CheckFeatureSupport( - d3d11::D3D11_FEATURE_FORMAT_SUPPORT, - &mut support as *mut _ as *mut _, - mem::size_of::() as UINT, - ) - }; - - if hr == winerror::S_OK { - let can_buffer = 0 != support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_BUFFER; - let can_image = 0 - != support.OutFormatSupport - & (d3d11::D3D11_FORMAT_SUPPORT_TEXTURE1D - | d3d11::D3D11_FORMAT_SUPPORT_TEXTURE2D - | d3d11::D3D11_FORMAT_SUPPORT_TEXTURE3D - | d3d11::D3D11_FORMAT_SUPPORT_TEXTURECUBE); - let can_linear = can_image && !format.surface_desc().is_compressed(); - if can_image { - props.optimal_tiling |= - format::ImageFeature::SAMPLED | format::ImageFeature::BLIT_SRC; - } - if can_linear { - props.linear_tiling |= - format::ImageFeature::SAMPLED | format::ImageFeature::BLIT_SRC; - } - if support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_IA_VERTEX_BUFFER != 0 { - props.buffer_features |= format::BufferFeature::VERTEX; - } - if support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_SHADER_SAMPLE != 0 { - props.optimal_tiling |= format::ImageFeature::SAMPLED_LINEAR; - } - if support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_RENDER_TARGET != 0 { - props.optimal_tiling |= - format::ImageFeature::COLOR_ATTACHMENT | format::ImageFeature::BLIT_DST; - if can_linear { - props.linear_tiling |= - format::ImageFeature::COLOR_ATTACHMENT | format::ImageFeature::BLIT_DST; - } - } - if support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_BLENDABLE != 0 { - props.optimal_tiling |= format::ImageFeature::COLOR_ATTACHMENT_BLEND; - } - if support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_DEPTH_STENCIL != 0 { - props.optimal_tiling |= format::ImageFeature::DEPTH_STENCIL_ATTACHMENT; - } - if support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_SHADER_LOAD != 0 { - //TODO: check d3d12::D3D12_FORMAT_SUPPORT2_UAV_TYPED_LOAD ? - if can_buffer { - props.buffer_features |= format::BufferFeature::UNIFORM_TEXEL; - } - } - - let hr = unsafe { - device.CheckFeatureSupport( - d3d11::D3D11_FEATURE_FORMAT_SUPPORT2, - &mut support_2 as *mut _ as *mut _, - mem::size_of::() as UINT, - ) - }; - if hr == winerror::S_OK { - if support_2.OutFormatSupport2 & d3d11::D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_ADD != 0 { - //TODO: other atomic flags? - if can_buffer { - props.buffer_features |= format::BufferFeature::STORAGE_TEXEL_ATOMIC; - } - if can_image { - props.optimal_tiling |= format::ImageFeature::STORAGE_ATOMIC; - } - } - if support_2.OutFormatSupport2 & d3d11::D3D11_FORMAT_SUPPORT2_UAV_TYPED_STORE != 0 { - if can_buffer { - props.buffer_features |= format::BufferFeature::STORAGE_TEXEL; - } - if can_image { - props.optimal_tiling |= format::ImageFeature::STORAGE; - } - } - } - } - - //TODO: blits, linear tiling - } - - format_properties -} - -impl hal::Instance for Instance { - fn create(_: &str, _: u32) -> Result { - // TODO: get the latest factory we can find - - match dxgi::get_dxgi_factory() { - Ok((library_dxgi, factory, dxgi_version)) => { - info!("DXGI version: {:?}", dxgi_version); - let library_d3d11 = Arc::new( - libloading::Library::new("d3d11.dll").map_err(|_| hal::UnsupportedBackend)?, - ); - Ok(Instance { - factory, - dxgi_version, - library_d3d11, - library_dxgi, - }) - } - Err(hr) => { - info!("Failed on factory creation: {:?}", hr); - Err(hal::UnsupportedBackend) - } - } - } - - fn enumerate_adapters(&self) -> Vec> { - let mut adapters = Vec::new(); - let mut idx = 0; - - let func: libloading::Symbol = - match unsafe { self.library_d3d11.get(b"D3D11CreateDevice") } { - Ok(func) => func, - Err(e) => { - error!("Unable to get device creation function: {:?}", e); - return Vec::new(); - } - }; - - while let Ok((adapter, info)) = - dxgi::get_adapter(idx, self.factory.as_raw(), self.dxgi_version) - { - idx += 1; - - use hal::memory::Properties; - - // TODO: move into function? - let (device, feature_level) = { - let feature_level = get_feature_level(&func, adapter.as_raw()); - - let mut device = ptr::null_mut(); - let hr = unsafe { - func( - adapter.as_raw() as *mut _, - d3dcommon::D3D_DRIVER_TYPE_UNKNOWN, - ptr::null_mut(), - 0, - [feature_level].as_ptr(), - 1, - d3d11::D3D11_SDK_VERSION, - &mut device as *mut *mut _ as *mut *mut _, - ptr::null_mut(), - ptr::null_mut(), - ) - }; - - if !winerror::SUCCEEDED(hr) { - continue; - } - - ( - unsafe { ComPtr::::from_raw(device) }, - feature_level, - ) - }; - - let memory_properties = adapter::MemoryProperties { - memory_types: vec![ - adapter::MemoryType { - properties: Properties::DEVICE_LOCAL, - heap_index: 0, - }, - adapter::MemoryType { - properties: Properties::CPU_VISIBLE - | Properties::COHERENT - | Properties::CPU_CACHED, - heap_index: 1, - }, - adapter::MemoryType { - properties: Properties::CPU_VISIBLE | Properties::CPU_CACHED, - heap_index: 1, - }, - ], - // TODO: would using *VideoMemory and *SystemMemory from - // DXGI_ADAPTER_DESC be too optimistic? :) - memory_heaps: vec![!0, !0], - }; - - let limits = hal::Limits { - max_image_1d_size: d3d11::D3D11_REQ_TEXTURE1D_U_DIMENSION as _, - max_image_2d_size: d3d11::D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION as _, - max_image_3d_size: d3d11::D3D11_REQ_TEXTURE3D_U_V_OR_W_DIMENSION as _, - max_image_cube_size: d3d11::D3D11_REQ_TEXTURECUBE_DIMENSION as _, - max_image_array_layers: d3d11::D3D11_REQ_TEXTURE2D_ARRAY_AXIS_DIMENSION as _, - max_texel_elements: d3d11::D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION as _, //TODO - max_patch_size: 0, // TODO - max_viewports: d3d11::D3D11_VIEWPORT_AND_SCISSORRECT_OBJECT_COUNT_PER_PIPELINE as _, - max_viewport_dimensions: [d3d11::D3D11_VIEWPORT_BOUNDS_MAX; 2], - max_framebuffer_extent: hal::image::Extent { - //TODO - width: 4096, - height: 4096, - depth: 1, - }, - max_compute_work_group_count: [ - d3d11::D3D11_CS_THREAD_GROUP_MAX_X, - d3d11::D3D11_CS_THREAD_GROUP_MAX_Y, - d3d11::D3D11_CS_THREAD_GROUP_MAX_Z, - ], - max_compute_work_group_size: [ - d3d11::D3D11_CS_THREAD_GROUP_MAX_THREADS_PER_GROUP, - 1, - 1, - ], // TODO - max_vertex_input_attribute_offset: 255, // TODO - max_vertex_input_attributes: d3d11::D3D11_IA_VERTEX_INPUT_RESOURCE_SLOT_COUNT as _, - max_vertex_input_binding_stride: - d3d11::D3D11_REQ_MULTI_ELEMENT_STRUCTURE_SIZE_IN_BYTES as _, - max_vertex_input_bindings: d3d11::D3D11_IA_VERTEX_INPUT_RESOURCE_SLOT_COUNT as _, // TODO: verify same as attributes - max_vertex_output_components: d3d11::D3D11_VS_OUTPUT_REGISTER_COUNT as _, // TODO - min_texel_buffer_offset_alignment: 1, // TODO - min_uniform_buffer_offset_alignment: 16, // TODO: verify - min_storage_buffer_offset_alignment: 1, // TODO - framebuffer_color_sample_counts: 1, // TODO - framebuffer_depth_sample_counts: 1, // TODO - framebuffer_stencil_sample_counts: 1, // TODO - max_color_attachments: d3d11::D3D11_SIMULTANEOUS_RENDER_TARGET_COUNT as _, - buffer_image_granularity: 1, - non_coherent_atom_size: 1, // TODO - max_sampler_anisotropy: 16., - optimal_buffer_copy_offset_alignment: 1, // TODO - optimal_buffer_copy_pitch_alignment: 1, // TODO - min_vertex_input_binding_stride_alignment: 1, - ..hal::Limits::default() //TODO - }; - - let features = get_features(device.clone(), feature_level); - let format_properties = get_format_properties(device.clone()); - - let physical_device = PhysicalDevice { - adapter, - library_d3d11: Arc::clone(&self.library_d3d11), - features, - limits, - memory_properties, - format_properties, - }; - - info!("{:#?}", info); - - adapters.push(adapter::Adapter { - info, - physical_device, - queue_families: vec![QueueFamily], - }); - } - - adapters - } - - unsafe fn create_surface( - &self, - has_handle: &impl raw_window_handle::HasRawWindowHandle, - ) -> Result { - match has_handle.raw_window_handle() { - raw_window_handle::RawWindowHandle::Windows(handle) => { - Ok(self.create_surface_from_hwnd(handle.hwnd)) - } - _ => Err(hal::window::InitError::UnsupportedWindowHandle), - } - } - - unsafe fn destroy_surface(&self, _surface: Surface) { - // TODO: Implement Surface cleanup - } -} - -pub struct PhysicalDevice { - adapter: ComPtr, - library_d3d11: Arc, - features: hal::Features, - limits: hal::Limits, - memory_properties: adapter::MemoryProperties, - format_properties: [format::Properties; format::NUM_FORMATS], -} - -impl fmt::Debug for PhysicalDevice { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str("PhysicalDevice") - } -} - -unsafe impl Send for PhysicalDevice {} -unsafe impl Sync for PhysicalDevice {} - -// TODO: does the adapter we get earlier matter for feature level? -fn get_feature_level(func: &CreateFun, adapter: *mut IDXGIAdapter) -> d3dcommon::D3D_FEATURE_LEVEL { - let requested_feature_levels = [ - d3dcommon::D3D_FEATURE_LEVEL_11_1, - d3dcommon::D3D_FEATURE_LEVEL_11_0, - d3dcommon::D3D_FEATURE_LEVEL_10_1, - d3dcommon::D3D_FEATURE_LEVEL_10_0, - d3dcommon::D3D_FEATURE_LEVEL_9_3, - d3dcommon::D3D_FEATURE_LEVEL_9_2, - d3dcommon::D3D_FEATURE_LEVEL_9_1, - ]; - - let mut feature_level = d3dcommon::D3D_FEATURE_LEVEL_9_1; - let hr = unsafe { - func( - adapter, - d3dcommon::D3D_DRIVER_TYPE_UNKNOWN, - ptr::null_mut(), - 0, - requested_feature_levels[..].as_ptr(), - requested_feature_levels.len() as _, - d3d11::D3D11_SDK_VERSION, - ptr::null_mut(), - &mut feature_level as *mut _, - ptr::null_mut(), - ) - }; - - if !winerror::SUCCEEDED(hr) { - // if there is no 11.1 runtime installed, requesting - // `D3D_FEATURE_LEVEL_11_1` will return E_INVALIDARG so we just retry - // without that - if hr == winerror::E_INVALIDARG { - let hr = unsafe { - func( - adapter, - d3dcommon::D3D_DRIVER_TYPE_UNKNOWN, - ptr::null_mut(), - 0, - requested_feature_levels[1 ..].as_ptr(), - (requested_feature_levels.len() - 1) as _, - d3d11::D3D11_SDK_VERSION, - ptr::null_mut(), - &mut feature_level as *mut _, - ptr::null_mut(), - ) - }; - - if !winerror::SUCCEEDED(hr) { - // TODO: device might not support any feature levels? - unimplemented!(); - } - } - } - - feature_level -} - -// TODO: PhysicalDevice -impl adapter::PhysicalDevice for PhysicalDevice { - unsafe fn open( - &self, - families: &[(&QueueFamily, &[queue::QueuePriority])], - requested_features: hal::Features, - ) -> Result, hal::device::CreationError> { - let func: libloading::Symbol = - self.library_d3d11.get(b"D3D11CreateDevice").unwrap(); - - let (device, cxt) = { - if !self.features().contains(requested_features) { - return Err(hal::device::CreationError::MissingFeature); - } - - let feature_level = get_feature_level(&func, self.adapter.as_raw()); - let mut returned_level = d3dcommon::D3D_FEATURE_LEVEL_9_1; - - #[cfg(debug_assertions)] - let create_flags = d3d11::D3D11_CREATE_DEVICE_DEBUG; - #[cfg(not(debug_assertions))] - let create_flags = 0; - - // TODO: request debug device only on debug config? - let mut device = ptr::null_mut(); - let mut cxt = ptr::null_mut(); - let hr = func( - self.adapter.as_raw() as *mut _, - d3dcommon::D3D_DRIVER_TYPE_UNKNOWN, - ptr::null_mut(), - create_flags, - [feature_level].as_ptr(), - 1, - d3d11::D3D11_SDK_VERSION, - &mut device as *mut *mut _ as *mut *mut _, - &mut returned_level as *mut _, - &mut cxt as *mut *mut _ as *mut *mut _, - ); - - // NOTE: returns error if adapter argument is non-null and driver - // type is not unknown; or if debug device is requested but not - // present - if !winerror::SUCCEEDED(hr) { - return Err(hal::device::CreationError::InitializationFailed); - } - - info!("feature level={:x}", feature_level); - - (ComPtr::from_raw(device), ComPtr::from_raw(cxt)) - }; - - let device = device::Device::new(device, cxt, self.memory_properties.clone()); - - // TODO: deferred context => 1 cxt/queue? - let queue_groups = families - .into_iter() - .map(|&(_family, prio)| { - assert_eq!(prio.len(), 1); - let mut group = queue::QueueGroup::new(queue::QueueFamilyId(0)); - - // TODO: multiple queues? - let queue = CommandQueue { - context: device.context.clone(), - }; - group.add_queue(queue); - group - }) - .collect(); - - Ok(adapter::Gpu { - device, - queue_groups, - }) - } - - fn format_properties(&self, fmt: Option) -> format::Properties { - let idx = fmt.map(|fmt| fmt as usize).unwrap_or(0); - self.format_properties[idx] - } - - fn image_format_properties( - &self, - format: format::Format, - dimensions: u8, - tiling: image::Tiling, - usage: image::Usage, - view_caps: image::ViewCapabilities, - ) -> Option { - conv::map_format(format)?; //filter out unknown formats - - let supported_usage = { - use hal::image::Usage as U; - let format_props = &self.format_properties[format as usize]; - let props = match tiling { - image::Tiling::Optimal => format_props.optimal_tiling, - image::Tiling::Linear => format_props.linear_tiling, - }; - let mut flags = U::empty(); - // Note: these checks would have been nicer if we had explicit BLIT usage - if props.contains(format::ImageFeature::BLIT_SRC) { - flags |= U::TRANSFER_SRC; - } - if props.contains(format::ImageFeature::BLIT_DST) { - flags |= U::TRANSFER_DST; - } - if props.contains(format::ImageFeature::SAMPLED) { - flags |= U::SAMPLED; - } - if props.contains(format::ImageFeature::STORAGE) { - flags |= U::STORAGE; - } - if props.contains(format::ImageFeature::COLOR_ATTACHMENT) { - flags |= U::COLOR_ATTACHMENT; - } - if props.contains(format::ImageFeature::DEPTH_STENCIL_ATTACHMENT) { - flags |= U::DEPTH_STENCIL_ATTACHMENT; - } - flags - }; - if !supported_usage.contains(usage) { - return None; - } - - let max_resource_size = - (d3d11::D3D11_REQ_RESOURCE_SIZE_IN_MEGABYTES_EXPRESSION_A_TERM as usize) << 20; - Some(match tiling { - image::Tiling::Optimal => image::FormatProperties { - max_extent: match dimensions { - 1 => image::Extent { - width: d3d11::D3D11_REQ_TEXTURE1D_U_DIMENSION, - height: 1, - depth: 1, - }, - 2 => image::Extent { - width: d3d11::D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION, - height: d3d11::D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION, - depth: 1, - }, - 3 => image::Extent { - width: d3d11::D3D11_REQ_TEXTURE3D_U_V_OR_W_DIMENSION, - height: d3d11::D3D11_REQ_TEXTURE3D_U_V_OR_W_DIMENSION, - depth: d3d11::D3D11_REQ_TEXTURE3D_U_V_OR_W_DIMENSION, - }, - _ => return None, - }, - max_levels: d3d11::D3D11_REQ_MIP_LEVELS as _, - max_layers: match dimensions { - 1 => d3d11::D3D11_REQ_TEXTURE1D_ARRAY_AXIS_DIMENSION as _, - 2 => d3d11::D3D11_REQ_TEXTURE2D_ARRAY_AXIS_DIMENSION as _, - _ => return None, - }, - sample_count_mask: if dimensions == 2 - && !view_caps.contains(image::ViewCapabilities::KIND_CUBE) - && (usage.contains(image::Usage::COLOR_ATTACHMENT) - | usage.contains(image::Usage::DEPTH_STENCIL_ATTACHMENT)) - { - 0x3F //TODO: use D3D12_FEATURE_DATA_FORMAT_SUPPORT - } else { - 0x1 - }, - max_resource_size, - }, - image::Tiling::Linear => image::FormatProperties { - max_extent: match dimensions { - 2 => image::Extent { - width: d3d11::D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION, - height: d3d11::D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION, - depth: 1, - }, - _ => return None, - }, - max_levels: 1, - max_layers: 1, - sample_count_mask: 0x1, - max_resource_size, - }, - }) - } - - fn memory_properties(&self) -> adapter::MemoryProperties { - self.memory_properties.clone() - } - - fn features(&self) -> hal::Features { - self.features - } - - fn limits(&self) -> Limits { - self.limits - } -} - -struct Presentation { - swapchain: ComPtr, - view: ComPtr, - format: format::Format, - size: window::Extent2D, -} - -pub struct Surface { - pub(crate) factory: ComPtr, - wnd_handle: HWND, - presentation: Option, -} - - -impl fmt::Debug for Surface { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str("Surface") - } -} - -unsafe impl Send for Surface {} -unsafe impl Sync for Surface {} - -impl window::Surface for Surface { - fn supports_queue_family(&self, _queue_family: &QueueFamily) -> bool { - true - } - - fn capabilities(&self, _physical_device: &PhysicalDevice) -> window::SurfaceCapabilities { - let current_extent = unsafe { - let mut rect: RECT = mem::zeroed(); - assert_ne!( - 0, - GetClientRect(self.wnd_handle as *mut _, &mut rect as *mut RECT) - ); - Some(window::Extent2D { - width: (rect.right - rect.left) as u32, - height: (rect.bottom - rect.top) as u32, - }) - }; - - // TODO: flip swap effects require dx11.1/windows8 - // NOTE: some swap effects affect msaa capabilities.. - // TODO: _DISCARD swap effects can only have one image? - window::SurfaceCapabilities { - present_modes: window::PresentMode::FIFO, //TODO - composite_alpha_modes: window::CompositeAlphaMode::OPAQUE, //TODO - image_count: 1 ..= 16, // TODO: - current_extent, - extents: window::Extent2D { - width: 16, - height: 16, - } ..= window::Extent2D { - width: 4096, - height: 4096, - }, - max_image_layers: 1, - usage: image::Usage::COLOR_ATTACHMENT | image::Usage::TRANSFER_SRC, - } - } - - fn supported_formats(&self, _physical_device: &PhysicalDevice) -> Option> { - Some(vec![ - format::Format::Bgra8Srgb, - format::Format::Bgra8Unorm, - format::Format::Rgba8Srgb, - format::Format::Rgba8Unorm, - format::Format::A2b10g10r10Unorm, - format::Format::Rgba16Sfloat, - ]) - } -} - -impl window::PresentationSurface for Surface { - type SwapchainImage = ImageView; - - unsafe fn configure_swapchain( - &mut self, - device: &device::Device, - config: window::SwapchainConfig, - ) -> Result<(), window::CreationError> { - assert!(image::Usage::COLOR_ATTACHMENT.contains(config.image_usage)); - - let swapchain = match self.presentation.take() { - Some(present) => { - if present.format == config.format && present.size == config.extent { - self.presentation = Some(present); - return Ok(()); - } - let non_srgb_format = conv::map_format_nosrgb(config.format).unwrap(); - drop(present.view); - let result = present.swapchain.ResizeBuffers( - config.image_count, - config.extent.width, - config.extent.height, - non_srgb_format, - 0, - ); - if result != winerror::S_OK { - error!("ResizeBuffers failed with 0x{:x}", result as u32); - return Err(window::CreationError::WindowInUse(hal::device::WindowInUse)); - } - present.swapchain - } - None => { - let (swapchain, _) = - device.create_swapchain_impl(&config, self.wnd_handle, self.factory.clone())?; - swapchain - } - }; - - let mut resource: *mut d3d11::ID3D11Resource = ptr::null_mut(); - assert_eq!( - winerror::S_OK, - swapchain.GetBuffer( - 0 as _, - &d3d11::ID3D11Resource::uuidof(), - &mut resource as *mut *mut _ as *mut *mut _, - ) - ); - - let kind = image::Kind::D2(config.extent.width, config.extent.height, 1, 1); - let format = conv::map_format(config.format).unwrap(); - let decomposed = conv::DecomposedDxgiFormat::from_dxgi_format(format); - - let view_info = ViewInfo { - resource, - kind, - caps: image::ViewCapabilities::empty(), - view_kind: image::ViewKind::D2, - format: decomposed.rtv.unwrap(), - range: image::SubresourceRange { - aspects: format::Aspects::COLOR, - levels: 0 .. 1, - layers: 0 .. 1, - }, - }; - let view = device.view_image_as_render_target(&view_info).unwrap(); - - (*resource).Release(); - - self.presentation = Some(Presentation { - swapchain, - view, - format: config.format, - size: config.extent, - }); - Ok(()) - } - - unsafe fn unconfigure_swapchain(&mut self, _device: &device::Device) { - self.presentation = None; - } - - unsafe fn acquire_image( - &mut self, - _timeout_ns: u64, //TODO: use the timeout - ) -> Result<(ImageView, Option), window::AcquireError> { - let present = self.presentation.as_ref().unwrap(); - let image_view = ImageView { - format: present.format, - rtv_handle: Some(present.view.clone()), - dsv_handle: None, - srv_handle: None, - uav_handle: None, - }; - Ok((image_view, None)) - } -} - - -pub struct Swapchain { - dxgi_swapchain: ComPtr, -} - - -impl fmt::Debug for Swapchain { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str("Swapchain") - } -} - -unsafe impl Send for Swapchain {} -unsafe impl Sync for Swapchain {} - -impl window::Swapchain for Swapchain { - unsafe fn acquire_image( - &mut self, - _timeout_ns: u64, - _semaphore: Option<&Semaphore>, - _fence: Option<&Fence>, - ) -> Result<(window::SwapImageIndex, Option), window::AcquireError> { - // TODO: non-`_DISCARD` swap effects have more than one buffer, `FLIP` - // effects are dxgi 1.3 (w10+?) in which case there is - // `GetCurrentBackBufferIndex()` on the swapchain - Ok((0, None)) - } -} - -#[derive(Debug, Clone, Copy)] -pub struct QueueFamily; - -impl queue::QueueFamily for QueueFamily { - fn queue_type(&self) -> queue::QueueType { - queue::QueueType::General - } - fn max_queues(&self) -> usize { - 1 - } - fn id(&self) -> queue::QueueFamilyId { - queue::QueueFamilyId(0) - } -} - -#[derive(Clone)] -pub struct CommandQueue { - context: ComPtr, -} - -impl fmt::Debug for CommandQueue { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str("CommandQueue") - } -} - -unsafe impl Send for CommandQueue {} -unsafe impl Sync for CommandQueue {} - -impl queue::CommandQueue for CommandQueue { - unsafe fn submit<'a, T, Ic, S, Iw, Is>( - &mut self, - submission: queue::Submission, - fence: Option<&Fence>, - ) where - T: 'a + Borrow, - Ic: IntoIterator, - S: 'a + Borrow, - Iw: IntoIterator, - Is: IntoIterator, - { - let _scope = debug_scope!(&self.context, "Submit(fence={:?})", fence); - for cmd_buf in submission.command_buffers { - let cmd_buf = cmd_buf.borrow(); - - let _scope = debug_scope!( - &self.context, - "CommandBuffer ({}/{})", - cmd_buf.flush_coherent_memory.len(), - cmd_buf.invalidate_coherent_memory.len() - ); - - { - let _scope = debug_scope!(&self.context, "Pre-Exec: Flush"); - for sync in &cmd_buf.flush_coherent_memory { - sync.do_flush(&self.context); - } - } - self.context - .ExecuteCommandList(cmd_buf.as_raw_list().as_raw(), FALSE); - { - let _scope = debug_scope!(&self.context, "Post-Exec: Invalidate"); - for sync in &cmd_buf.invalidate_coherent_memory { - sync.do_invalidate(&self.context); - } - } - } - - if let Some(fence) = fence { - *fence.mutex.lock() = true; - fence.condvar.notify_all(); - } - } - - unsafe fn present<'a, W, Is, S, Iw>( - &mut self, - swapchains: Is, - _wait_semaphores: Iw, - ) -> Result, window::PresentError> - where - W: 'a + Borrow, - Is: IntoIterator, - S: 'a + Borrow, - Iw: IntoIterator, - { - for (swapchain, _idx) in swapchains { - swapchain.borrow().dxgi_swapchain.Present(1, 0); - } - - Ok(None) - } - - unsafe fn present_surface( - &mut self, - surface: &mut Surface, - _image: ImageView, - _wait_semaphore: Option<&Semaphore>, - ) -> Result, window::PresentError> { - surface - .presentation - .as_ref() - .unwrap() - .swapchain - .Present(1, 0); - Ok(None) - } - - fn wait_idle(&self) -> Result<(), hal::device::OutOfMemory> { - // unimplemented!() - Ok(()) - } -} - -#[derive(Debug)] -pub struct AttachmentClear { - subpass_id: Option, - attachment_id: usize, - raw: command::AttachmentClear, -} - -#[derive(Debug)] -pub struct RenderPassCache { - pub render_pass: RenderPass, - pub framebuffer: Framebuffer, - pub attachment_clear_values: Vec, - pub target_rect: pso::Rect, - pub current_subpass: usize, -} - -impl RenderPassCache { - pub fn start_subpass( - &mut self, - internal: &mut internal::Internal, - context: &ComPtr, - cache: &mut CommandBufferState, - ) { - let attachments = self - .attachment_clear_values - .iter() - .filter(|clear| clear.subpass_id == Some(self.current_subpass)) - .map(|clear| clear.raw); - - cache - .dirty_flag - .insert(DirtyStateFlag::GRAPHICS_PIPELINE | DirtyStateFlag::VIEWPORTS); - internal.clear_attachments( - context, - attachments, - &[pso::ClearRect { - rect: self.target_rect, - layers: 0 .. 1, - }], - &self, - ); - - let subpass = &self.render_pass.subpasses[self.current_subpass]; - let color_views = subpass - .color_attachments - .iter() - .map(|&(id, _)| { - self.framebuffer.attachments[id] - .rtv_handle - .clone() - .unwrap() - .as_raw() - }) - .collect::>(); - let ds_view = match subpass.depth_stencil_attachment { - Some((id, _)) => Some( - self.framebuffer.attachments[id] - .dsv_handle - .clone() - .unwrap() - .as_raw(), - ), - None => None, - }; - - cache.set_render_targets(&color_views, ds_view); - cache.bind(context); - } - - pub fn next_subpass(&mut self) { - self.current_subpass += 1; - } -} - -bitflags! { - struct DirtyStateFlag : u32 { - const RENDER_TARGETS = (1 << 1); - const VERTEX_BUFFERS = (1 << 2); - const GRAPHICS_PIPELINE = (1 << 3); - const VIEWPORTS = (1 << 4); - const BLEND_STATE = (1 << 5); - } -} - -pub struct CommandBufferState { - dirty_flag: DirtyStateFlag, - - render_target_len: u32, - render_targets: [*mut d3d11::ID3D11RenderTargetView; 8], - depth_target: Option<*mut d3d11::ID3D11DepthStencilView>, - graphics_pipeline: Option, - - // a bitmask that keeps track of what vertex buffer bindings have been "bound" into - // our vec - bound_bindings: u32, - // a bitmask that hold the required binding slots to be bound for the currently - // bound pipeline - required_bindings: Option, - // the highest binding number in currently bound pipeline - max_bindings: Option, - viewports: Vec, - vertex_buffers: Vec<*mut d3d11::ID3D11Buffer>, - vertex_offsets: Vec, - vertex_strides: Vec, - blend_factor: Option<[f32; 4]>, - // we can only support one face (rather, both faces must have the same value) - stencil_ref: Option, - stencil_read_mask: Option, - stencil_write_mask: Option, - current_blend: Option<*mut d3d11::ID3D11BlendState>, -} - - -impl fmt::Debug for CommandBufferState { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str("CommandBufferState") - } -} - -impl CommandBufferState { - fn new() -> Self { - CommandBufferState { - dirty_flag: DirtyStateFlag::empty(), - render_target_len: 0, - render_targets: [ptr::null_mut(); 8], - depth_target: None, - graphics_pipeline: None, - bound_bindings: 0, - required_bindings: None, - max_bindings: None, - viewports: Vec::new(), - vertex_buffers: Vec::new(), - vertex_offsets: Vec::new(), - vertex_strides: Vec::new(), - blend_factor: None, - stencil_ref: None, - stencil_read_mask: None, - stencil_write_mask: None, - current_blend: None, - } - } - - fn clear(&mut self) { - self.render_target_len = 0; - self.depth_target = None; - self.graphics_pipeline = None; - self.bound_bindings = 0; - self.required_bindings = None; - self.max_bindings = None; - self.viewports.clear(); - self.vertex_buffers.clear(); - self.vertex_offsets.clear(); - self.vertex_strides.clear(); - self.blend_factor = None; - self.stencil_ref = None; - self.stencil_read_mask = None; - self.stencil_write_mask = None; - self.current_blend = None; - } - - pub fn set_vertex_buffer( - &mut self, - index: usize, - offset: u32, - buffer: *mut d3d11::ID3D11Buffer, - ) { - self.bound_bindings |= 1 << index as u32; - - if index >= self.vertex_buffers.len() { - self.vertex_buffers.push(buffer); - self.vertex_offsets.push(offset); - } else { - self.vertex_buffers[index] = buffer; - self.vertex_offsets[index] = offset; - } - - self.dirty_flag.insert(DirtyStateFlag::VERTEX_BUFFERS); - } - - pub fn bind_vertex_buffers(&mut self, context: &ComPtr) { - if let Some(binding_count) = self.max_bindings { - if self.vertex_buffers.len() >= binding_count as usize - && self.vertex_strides.len() >= binding_count as usize - { - unsafe { - context.IASetVertexBuffers( - 0, - binding_count, - self.vertex_buffers.as_ptr(), - self.vertex_strides.as_ptr(), - self.vertex_offsets.as_ptr(), - ); - } - - self.dirty_flag.remove(DirtyStateFlag::VERTEX_BUFFERS); - } - } - } - - pub fn set_viewports(&mut self, viewports: &[d3d11::D3D11_VIEWPORT]) { - self.viewports.clear(); - self.viewports.extend(viewports); - - self.dirty_flag.insert(DirtyStateFlag::VIEWPORTS); - } - - pub fn bind_viewports(&mut self, context: &ComPtr) { - if let Some(ref pipeline) = self.graphics_pipeline { - if let Some(ref viewport) = pipeline.baked_states.viewport { - unsafe { - context.RSSetViewports(1, [conv::map_viewport(&viewport)].as_ptr()); - } - } else { - unsafe { - context.RSSetViewports(self.viewports.len() as u32, self.viewports.as_ptr()); - } - } - } else { - unsafe { - context.RSSetViewports(self.viewports.len() as u32, self.viewports.as_ptr()); - } - } - - self.dirty_flag.remove(DirtyStateFlag::VIEWPORTS); - } - - pub fn set_render_targets( - &mut self, - render_targets: &[*mut d3d11::ID3D11RenderTargetView], - depth_target: Option<*mut d3d11::ID3D11DepthStencilView>, - ) { - for (idx, &rt) in render_targets.iter().enumerate() { - self.render_targets[idx] = rt; - } - - self.render_target_len = render_targets.len() as u32; - self.depth_target = depth_target; - - self.dirty_flag.insert(DirtyStateFlag::RENDER_TARGETS); - } - - pub fn bind_render_targets(&mut self, context: &ComPtr) { - unsafe { - context.OMSetRenderTargets( - self.render_target_len, - self.render_targets.as_ptr(), - if let Some(dsv) = self.depth_target { - dsv - } else { - ptr::null_mut() - }, - ); - } - - self.dirty_flag.remove(DirtyStateFlag::RENDER_TARGETS); - } - - pub fn set_blend_factor(&mut self, factor: [f32; 4]) { - self.blend_factor = Some(factor); - - self.dirty_flag.insert(DirtyStateFlag::BLEND_STATE); - } - - pub fn bind_blend_state(&mut self, context: &ComPtr) { - if let Some(blend) = self.current_blend { - let blend_color = if let Some(ref pipeline) = self.graphics_pipeline { - pipeline - .baked_states - .blend_color - .or(self.blend_factor) - .unwrap_or([0f32; 4]) - } else { - self.blend_factor.unwrap_or([0f32; 4]) - }; - - // TODO: MSAA - unsafe { - context.OMSetBlendState(blend, &blend_color, !0); - } - - self.dirty_flag.remove(DirtyStateFlag::BLEND_STATE); - } - } - - pub fn set_graphics_pipeline(&mut self, pipeline: GraphicsPipeline) { - self.graphics_pipeline = Some(pipeline); - - self.dirty_flag.insert(DirtyStateFlag::GRAPHICS_PIPELINE); - } - - pub fn bind_graphics_pipeline(&mut self, context: &ComPtr) { - if let Some(ref pipeline) = self.graphics_pipeline { - self.vertex_strides.clear(); - self.vertex_strides.extend(&pipeline.strides); - - self.required_bindings = Some(pipeline.required_bindings); - self.max_bindings = Some(pipeline.max_vertex_bindings); - }; - - self.bind_vertex_buffers(context); - - if let Some(ref pipeline) = self.graphics_pipeline { - unsafe { - context.IASetPrimitiveTopology(pipeline.topology); - context.IASetInputLayout(pipeline.input_layout.as_raw()); - - context.VSSetShader(pipeline.vs.as_raw(), ptr::null_mut(), 0); - if let Some(ref ps) = pipeline.ps { - context.PSSetShader(ps.as_raw(), ptr::null_mut(), 0); - } - if let Some(ref gs) = pipeline.gs { - context.GSSetShader(gs.as_raw(), ptr::null_mut(), 0); - } - if let Some(ref hs) = pipeline.hs { - context.HSSetShader(hs.as_raw(), ptr::null_mut(), 0); - } - if let Some(ref ds) = pipeline.ds { - context.DSSetShader(ds.as_raw(), ptr::null_mut(), 0); - } - - context.RSSetState(pipeline.rasterizer_state.as_raw()); - if let Some(ref viewport) = pipeline.baked_states.viewport { - context.RSSetViewports(1, [conv::map_viewport(&viewport)].as_ptr()); - } - if let Some(ref scissor) = pipeline.baked_states.scissor { - context.RSSetScissorRects(1, [conv::map_rect(&scissor)].as_ptr()); - } - - if let Some((ref state, reference)) = pipeline.depth_stencil_state { - let stencil_ref = if let pso::State::Static(reference) = reference { - reference - } else { - self.stencil_ref.unwrap_or(0) - }; - - context.OMSetDepthStencilState(state.as_raw(), stencil_ref); - } - self.current_blend = Some(pipeline.blend_state.as_raw()); - } - }; - - self.bind_blend_state(context); - - self.dirty_flag.remove(DirtyStateFlag::GRAPHICS_PIPELINE); - } - - pub fn bind(&mut self, context: &ComPtr) { - if self.dirty_flag.contains(DirtyStateFlag::RENDER_TARGETS) { - self.bind_render_targets(context); - } - - if self.dirty_flag.contains(DirtyStateFlag::GRAPHICS_PIPELINE) { - self.bind_graphics_pipeline(context); - } - - if self.dirty_flag.contains(DirtyStateFlag::VERTEX_BUFFERS) { - self.bind_vertex_buffers(context); - } - - if self.dirty_flag.contains(DirtyStateFlag::VIEWPORTS) { - self.bind_viewports(context); - } - } -} - -pub struct CommandBuffer { - // TODO: better way of sharing - internal: internal::Internal, - context: ComPtr, - list: RefCell>>, - - // since coherent memory needs to be synchronized at submission, we need to gather up all - // coherent resources that are used in the command buffer and flush/invalidate them accordingly - // before executing. - flush_coherent_memory: Vec, - invalidate_coherent_memory: Vec, - - // holds information about the active render pass - render_pass_cache: Option, - - cache: CommandBufferState, - - one_time_submit: bool, -} - -impl fmt::Debug for CommandBuffer { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str("CommandBuffer") - } -} - -unsafe impl Send for CommandBuffer {} -unsafe impl Sync for CommandBuffer {} - -impl CommandBuffer { - fn create_deferred(device: ComPtr, internal: internal::Internal) -> Self { - let mut context: *mut d3d11::ID3D11DeviceContext = ptr::null_mut(); - let hr = - unsafe { device.CreateDeferredContext(0, &mut context as *mut *mut _ as *mut *mut _) }; - assert_eq!(hr, winerror::S_OK); - - CommandBuffer { - internal, - context: unsafe { ComPtr::from_raw(context) }, - list: RefCell::new(None), - flush_coherent_memory: Vec::new(), - invalidate_coherent_memory: Vec::new(), - render_pass_cache: None, - cache: CommandBufferState::new(), - one_time_submit: false, - } - } - - fn as_raw_list(&self) -> ComPtr { - if self.one_time_submit { - self.list.replace(None).unwrap() - } else { - self.list.borrow().clone().unwrap() - } - } - - fn defer_coherent_flush(&mut self, buffer: &Buffer) { - if !self - .flush_coherent_memory - .iter() - .any(|m| m.buffer == buffer.internal.raw) - { - self.flush_coherent_memory.push(MemoryFlush { - host_memory: buffer.host_ptr, - sync_range: SyncRange::Whole, - buffer: buffer.internal.raw, - }); - } - } - - fn defer_coherent_invalidate(&mut self, buffer: &Buffer) { - if !self - .invalidate_coherent_memory - .iter() - .any(|m| m.buffer == buffer.internal.raw) - { - self.invalidate_coherent_memory.push(MemoryInvalidate { - working_buffer: Some(self.internal.working_buffer.clone()), - working_buffer_size: self.internal.working_buffer_size, - host_memory: buffer.host_ptr, - sync_range: buffer.bound_range.clone(), - buffer: buffer.internal.raw, - }); - } - } - - fn reset(&mut self) { - self.flush_coherent_memory.clear(); - self.invalidate_coherent_memory.clear(); - self.render_pass_cache = None; - self.cache.clear(); - } -} - -impl command::CommandBuffer for CommandBuffer { - unsafe fn begin( - &mut self, - flags: command::CommandBufferFlags, - _info: command::CommandBufferInheritanceInfo, - ) { - self.one_time_submit = flags.contains(command::CommandBufferFlags::ONE_TIME_SUBMIT); - self.reset(); - } - - unsafe fn finish(&mut self) { - let mut list = ptr::null_mut(); - let hr = self - .context - .FinishCommandList(FALSE, &mut list as *mut *mut _ as *mut *mut _); - assert_eq!(hr, winerror::S_OK); - - self.list.replace(Some(ComPtr::from_raw(list))); - } - - unsafe fn reset(&mut self, _release_resources: bool) { - self.reset(); - } - - unsafe fn begin_render_pass( - &mut self, - render_pass: &RenderPass, - framebuffer: &Framebuffer, - target_rect: pso::Rect, - clear_values: T, - _first_subpass: command::SubpassContents, - ) where - T: IntoIterator, - T::Item: Borrow, - { - use pass::AttachmentLoadOp as Alo; - - let mut clear_iter = clear_values.into_iter(); - let mut attachment_clears = Vec::new(); - - for (idx, attachment) in render_pass.attachments.iter().enumerate() { - //let attachment = render_pass.attachments[attachment_ref]; - let format = attachment.format.unwrap(); - - let subpass_id = render_pass.subpasses.iter().position(|sp| sp.is_using(idx)); - - if attachment.has_clears() { - let value = *clear_iter.next().unwrap().borrow(); - - match (attachment.ops.load, attachment.stencil_ops.load) { - (Alo::Clear, Alo::Clear) if format.is_depth() => { - attachment_clears.push(AttachmentClear { - subpass_id, - attachment_id: idx, - raw: command::AttachmentClear::DepthStencil { - depth: Some(value.depth_stencil.depth), - stencil: Some(value.depth_stencil.stencil), - }, - }); - } - (Alo::Clear, Alo::Clear) => { - attachment_clears.push(AttachmentClear { - subpass_id, - attachment_id: idx, - raw: command::AttachmentClear::Color { - index: idx, - value: value.color, - }, - }); - - attachment_clears.push(AttachmentClear { - subpass_id, - attachment_id: idx, - raw: command::AttachmentClear::DepthStencil { - depth: None, - stencil: Some(value.depth_stencil.stencil), - }, - }); - } - (Alo::Clear, _) if format.is_depth() => { - attachment_clears.push(AttachmentClear { - subpass_id, - attachment_id: idx, - raw: command::AttachmentClear::DepthStencil { - depth: Some(value.depth_stencil.depth), - stencil: None, - }, - }); - } - (Alo::Clear, _) => { - attachment_clears.push(AttachmentClear { - subpass_id, - attachment_id: idx, - raw: command::AttachmentClear::Color { - index: idx, - value: value.color, - }, - }); - } - (_, Alo::Clear) => { - attachment_clears.push(AttachmentClear { - subpass_id, - attachment_id: idx, - raw: command::AttachmentClear::DepthStencil { - depth: None, - stencil: Some(value.depth_stencil.stencil), - }, - }); - } - _ => {} - } - } - } - - self.render_pass_cache = Some(RenderPassCache { - render_pass: render_pass.clone(), - framebuffer: framebuffer.clone(), - attachment_clear_values: attachment_clears, - target_rect, - current_subpass: 0, - }); - - if let Some(ref mut current_render_pass) = self.render_pass_cache { - current_render_pass.start_subpass(&mut self.internal, &self.context, &mut self.cache); - } - } - - unsafe fn next_subpass(&mut self, _contents: command::SubpassContents) { - if let Some(ref mut current_render_pass) = self.render_pass_cache { - // TODO: resolve msaa - current_render_pass.next_subpass(); - current_render_pass.start_subpass(&mut self.internal, &self.context, &mut self.cache); - } - } - - unsafe fn end_render_pass(&mut self) { - self.context - .OMSetRenderTargets(8, [ptr::null_mut(); 8].as_ptr(), ptr::null_mut()); - - self.render_pass_cache = None; - } - - unsafe fn pipeline_barrier<'a, T>( - &mut self, - _stages: Range, - _dependencies: memory::Dependencies, - _barriers: T, - ) where - T: IntoIterator, - T::Item: Borrow>, - { - // TODO: should we track and assert on resource states? - // unimplemented!() - } - - unsafe fn clear_image( - &mut self, - image: &Image, - _: image::Layout, - value: command::ClearValue, - subresource_ranges: T, - ) where - T: IntoIterator, - T::Item: Borrow, - { - for range in subresource_ranges { - let range = range.borrow(); - - // TODO: clear Int/Uint depending on format - if range.aspects.contains(format::Aspects::COLOR) { - for layer in range.layers.clone() { - for level in range.levels.clone() { - self.context.ClearRenderTargetView( - image.get_rtv(level, layer).unwrap().as_raw(), - &value.color.float32, - ); - } - } - } - - let mut depth_stencil_flags = 0; - if range.aspects.contains(format::Aspects::DEPTH) { - depth_stencil_flags |= d3d11::D3D11_CLEAR_DEPTH; - } - - if range.aspects.contains(format::Aspects::STENCIL) { - depth_stencil_flags |= d3d11::D3D11_CLEAR_STENCIL; - } - - if depth_stencil_flags != 0 { - for layer in range.layers.clone() { - for level in range.levels.clone() { - self.context.ClearDepthStencilView( - image.get_dsv(level, layer).unwrap().as_raw(), - depth_stencil_flags, - value.depth_stencil.depth, - value.depth_stencil.stencil as _, - ); - } - } - } - } - } - - unsafe fn clear_attachments(&mut self, clears: T, rects: U) - where - T: IntoIterator, - T::Item: Borrow, - U: IntoIterator, - U::Item: Borrow, - { - if let Some(ref pass) = self.render_pass_cache { - self.cache.dirty_flag.insert( - DirtyStateFlag::GRAPHICS_PIPELINE - | DirtyStateFlag::VIEWPORTS - | DirtyStateFlag::RENDER_TARGETS, - ); - self.internal - .clear_attachments(&self.context, clears, rects, pass); - self.cache.bind(&self.context); - } else { - panic!("`clear_attachments` can only be called inside a renderpass") - } - } - - unsafe fn resolve_image( - &mut self, - _src: &Image, - _src_layout: image::Layout, - _dst: &Image, - _dst_layout: image::Layout, - _regions: T, - ) where - T: IntoIterator, - T::Item: Borrow, - { - unimplemented!() - } - - unsafe fn blit_image( - &mut self, - src: &Image, - _src_layout: image::Layout, - dst: &Image, - _dst_layout: image::Layout, - filter: image::Filter, - regions: T, - ) where - T: IntoIterator, - T::Item: Borrow, - { - self.cache - .dirty_flag - .insert(DirtyStateFlag::GRAPHICS_PIPELINE); - - self.internal - .blit_2d_image(&self.context, src, dst, filter, regions); - - self.cache.bind(&self.context); - } - - unsafe fn bind_index_buffer(&mut self, ibv: buffer::IndexBufferView) { - self.context.IASetIndexBuffer( - ibv.buffer.internal.raw, - conv::map_index_type(ibv.index_type), - ibv.offset as u32, - ); - } - - unsafe fn bind_vertex_buffers(&mut self, first_binding: pso::BufferIndex, buffers: I) - where - I: IntoIterator, - T: Borrow, - { - for (i, (buf, offset)) in buffers.into_iter().enumerate() { - let idx = i + first_binding as usize; - let buf = buf.borrow(); - - if buf.properties.contains(memory::Properties::COHERENT) { - self.defer_coherent_flush(buf); - } - - self.cache - .set_vertex_buffer(idx, offset as u32, buf.internal.raw); - } - - self.cache.bind_vertex_buffers(&self.context); - } - - unsafe fn set_viewports(&mut self, _first_viewport: u32, viewports: T) - where - T: IntoIterator, - T::Item: Borrow, - { - let viewports = viewports - .into_iter() - .map(|v| { - let v = v.borrow(); - conv::map_viewport(v) - }) - .collect::>(); - - // TODO: DX only lets us set all VPs at once, so cache in slice? - self.cache.set_viewports(&viewports); - self.cache.bind_viewports(&self.context); - } - - unsafe fn set_scissors(&mut self, _first_scissor: u32, scissors: T) - where - T: IntoIterator, - T::Item: Borrow, - { - let scissors = scissors - .into_iter() - .map(|s| { - let s = s.borrow(); - conv::map_rect(s) - }) - .collect::>(); - - // TODO: same as for viewports - self.context - .RSSetScissorRects(scissors.len() as _, scissors.as_ptr()); - } - - unsafe fn set_blend_constants(&mut self, color: pso::ColorValue) { - self.cache.set_blend_factor(color); - self.cache.bind_blend_state(&self.context); - } - - unsafe fn set_stencil_reference(&mut self, _faces: pso::Face, value: pso::StencilValue) { - self.cache.stencil_ref = Some(value); - } - - unsafe fn set_stencil_read_mask(&mut self, _faces: pso::Face, value: pso::StencilValue) { - self.cache.stencil_read_mask = Some(value); - } - - unsafe fn set_stencil_write_mask(&mut self, _faces: pso::Face, value: pso::StencilValue) { - self.cache.stencil_write_mask = Some(value); - } - - unsafe fn set_depth_bounds(&mut self, _bounds: Range) { - unimplemented!() - } - - unsafe fn set_line_width(&mut self, width: f32) { - validate_line_width(width); - } - - unsafe fn set_depth_bias(&mut self, _depth_bias: pso::DepthBias) { - // TODO: - // unimplemented!() - } - - unsafe fn bind_graphics_pipeline(&mut self, pipeline: &GraphicsPipeline) { - self.cache.set_graphics_pipeline(pipeline.clone()); - self.cache.bind_graphics_pipeline(&self.context); - } - - unsafe fn bind_graphics_descriptor_sets<'a, I, J>( - &mut self, - layout: &PipelineLayout, - first_set: usize, - sets: I, - _offsets: J, - ) where - I: IntoIterator, - I::Item: Borrow, - J: IntoIterator, - J::Item: Borrow, - { - let _scope = debug_scope!(&self.context, "BindGraphicsDescriptorSets"); - - // TODO: find a better solution to invalidating old bindings.. - self.context.CSSetUnorderedAccessViews( - 0, - 16, - [ptr::null_mut(); 16].as_ptr(), - ptr::null_mut(), - ); - - //let offsets: Vec = offsets.into_iter().map(|o| *o.borrow()).collect(); - - for (set, info) in sets - .into_iter() - .zip(&layout.sets[first_set ..]) - { - let set = set.borrow(); - - { - let coherent_buffers = set.coherent_buffers.lock(); - for sync in coherent_buffers.flush_coherent_buffers.borrow().iter() { - // TODO: merge sync range if a flush already exists - if !self - .flush_coherent_memory - .iter() - .any(|m| m.buffer == sync.device_buffer) - { - self.flush_coherent_memory.push(MemoryFlush { - host_memory: sync.host_ptr, - sync_range: sync.range.clone(), - buffer: sync.device_buffer, - }); - } - } - - for sync in coherent_buffers.invalidate_coherent_buffers.borrow().iter() { - if !self - .invalidate_coherent_memory - .iter() - .any(|m| m.buffer == sync.device_buffer) - { - self.invalidate_coherent_memory.push(MemoryInvalidate { - working_buffer: Some(self.internal.working_buffer.clone()), - working_buffer_size: self.internal.working_buffer_size, - host_memory: sync.host_ptr, - sync_range: sync.range.clone(), - buffer: sync.device_buffer, - }); - } - } - } - - // TODO: offsets - - if let Some(rd) = info.registers.vs.c.as_some() { - self.context.VSSetConstantBuffers( - rd.res_index as u32, - rd.count as u32, - set.handles.offset(rd.pool_offset as isize) as *const *mut _ as *const *mut _, - ); - } - if let Some(rd) = info.registers.vs.t.as_some() { - self.context.VSSetShaderResources( - rd.res_index as u32, - rd.count as u32, - set.handles.offset(rd.pool_offset as isize) as *const *mut _ as *const *mut _, - ); - } - if let Some(rd) = info.registers.vs.s.as_some() { - self.context.VSSetSamplers( - rd.res_index as u32, - rd.count as u32, - set.handles.offset(rd.pool_offset as isize) as *const *mut _ as *const *mut _, - ); - } - - if let Some(rd) = info.registers.ps.c.as_some() { - self.context.PSSetConstantBuffers( - rd.res_index as u32, - rd.count as u32, - set.handles.offset(rd.pool_offset as isize) as *const *mut _ as *const *mut _, - ); - } - if let Some(rd) = info.registers.ps.t.as_some() { - self.context.PSSetShaderResources( - rd.res_index as u32, - rd.count as u32, - set.handles.offset(rd.pool_offset as isize) as *const *mut _ as *const *mut _, - ); - } - if let Some(rd) = info.registers.ps.s.as_some() { - self.context.PSSetSamplers( - rd.res_index as u32, - rd.count as u32, - set.handles.offset(rd.pool_offset as isize) as *const *mut _ as *const *mut _, - ); - } - } - } - - unsafe fn bind_compute_pipeline(&mut self, pipeline: &ComputePipeline) { - self.context - .CSSetShader(pipeline.cs.as_raw(), ptr::null_mut(), 0); - } - - unsafe fn bind_compute_descriptor_sets( - &mut self, - layout: &PipelineLayout, - first_set: usize, - sets: I, - _offsets: J, - ) where - I: IntoIterator, - I::Item: Borrow, - J: IntoIterator, - J::Item: Borrow, - { - let _scope = debug_scope!(&self.context, "BindComputeDescriptorSets"); - - self.context.CSSetUnorderedAccessViews( - 0, - 16, - [ptr::null_mut(); 16].as_ptr(), - ptr::null_mut(), - ); - for (set, info) in sets - .into_iter() - .zip(&layout.sets[first_set ..]) - { - let set = set.borrow(); - - { - let coherent_buffers = set.coherent_buffers.lock(); - for sync in coherent_buffers.flush_coherent_buffers.borrow().iter() { - if !self - .flush_coherent_memory - .iter() - .any(|m| m.buffer == sync.device_buffer) - { - self.flush_coherent_memory.push(MemoryFlush { - host_memory: sync.host_ptr, - sync_range: sync.range.clone(), - buffer: sync.device_buffer, - }); - } - } - - for sync in coherent_buffers.invalidate_coherent_buffers.borrow().iter() { - if !self - .invalidate_coherent_memory - .iter() - .any(|m| m.buffer == sync.device_buffer) - { - self.invalidate_coherent_memory.push(MemoryInvalidate { - working_buffer: Some(self.internal.working_buffer.clone()), - working_buffer_size: self.internal.working_buffer_size, - host_memory: sync.host_ptr, - sync_range: sync.range.clone(), - buffer: sync.device_buffer, - }); - } - } - } - - // TODO: offsets - - if let Some(rd) = info.registers.cs.c.as_some() { - self.context.CSSetConstantBuffers( - rd.res_index as u32, - rd.count as u32, - set.handles.offset(rd.pool_offset as isize) as *const *mut _ as *const *mut _, - ); - } - if let Some(rd) = info.registers.cs.t.as_some() { - self.context.CSSetShaderResources( - rd.res_index as u32, - rd.count as u32, - set.handles.offset(rd.pool_offset as isize) as *const *mut _ as *const *mut _, - ); - } - if let Some(rd) = info.registers.cs.u.as_some() { - self.context.CSSetUnorderedAccessViews( - rd.res_index as u32, - rd.count as u32, - set.handles.offset(rd.pool_offset as isize) as *const *mut _ as *const *mut _, - ptr::null_mut(), - ); - } - if let Some(rd) = info.registers.cs.s.as_some() { - self.context.CSSetSamplers( - rd.res_index as u32, - rd.count as u32, - set.handles.offset(rd.pool_offset as isize) as *const *mut _ as *const *mut _, - ); - } - } - } - - unsafe fn dispatch(&mut self, count: WorkGroupCount) { - self.context.Dispatch(count[0], count[1], count[2]); - } - - unsafe fn dispatch_indirect(&mut self, _buffer: &Buffer, _offset: buffer::Offset) { - unimplemented!() - } - - unsafe fn fill_buffer(&mut self, _buffer: &Buffer, _range: R, _data: u32) - where - R: RangeArg, - { - unimplemented!() - } - - unsafe fn update_buffer(&mut self, _buffer: &Buffer, _offset: buffer::Offset, _data: &[u8]) { - unimplemented!() - } - - unsafe fn copy_buffer(&mut self, src: &Buffer, dst: &Buffer, regions: T) - where - T: IntoIterator, - T::Item: Borrow, - { - if src.properties.contains(memory::Properties::COHERENT) { - self.defer_coherent_flush(src); - } - - for region in regions.into_iter() { - let info = region.borrow(); - let dst_box = d3d11::D3D11_BOX { - left: info.src as _, - top: 0, - front: 0, - right: (info.src + info.size) as _, - bottom: 1, - back: 1, - }; - - self.context.CopySubresourceRegion( - dst.internal.raw as _, - 0, - info.dst as _, - 0, - 0, - src.internal.raw as _, - 0, - &dst_box, - ); - - if let Some(disjoint_cb) = dst.internal.disjoint_cb { - self.context.CopySubresourceRegion( - disjoint_cb as _, - 0, - info.dst as _, - 0, - 0, - src.internal.raw as _, - 0, - &dst_box, - ); - } - } - } - - unsafe fn copy_image( - &mut self, - src: &Image, - _: image::Layout, - dst: &Image, - _: image::Layout, - regions: T, - ) where - T: IntoIterator, - T::Item: Borrow, - { - self.internal - .copy_image_2d(&self.context, src, dst, regions); - } - - unsafe fn copy_buffer_to_image( - &mut self, - buffer: &Buffer, - image: &Image, - _: image::Layout, - regions: T, - ) where - T: IntoIterator, - T::Item: Borrow, - { - if buffer.properties.contains(memory::Properties::COHERENT) { - self.defer_coherent_flush(buffer); - } - - self.internal - .copy_buffer_to_image_2d(&self.context, buffer, image, regions); - } - - unsafe fn copy_image_to_buffer( - &mut self, - image: &Image, - _: image::Layout, - buffer: &Buffer, - regions: T, - ) where - T: IntoIterator, - T::Item: Borrow, - { - if buffer.properties.contains(memory::Properties::COHERENT) { - self.defer_coherent_invalidate(buffer); - } - - self.internal - .copy_image_2d_to_buffer(&self.context, image, buffer, regions); - } - - unsafe fn draw(&mut self, vertices: Range, instances: Range) { - self.context.DrawInstanced( - vertices.end - vertices.start, - instances.end - instances.start, - vertices.start, - instances.start, - ); - } - - unsafe fn draw_indexed( - &mut self, - indices: Range, - base_vertex: VertexOffset, - instances: Range, - ) { - self.context.DrawIndexedInstanced( - indices.end - indices.start, - instances.end - instances.start, - indices.start, - base_vertex, - instances.start, - ); - } - - unsafe fn draw_indirect( - &mut self, - _buffer: &Buffer, - _offset: buffer::Offset, - _draw_count: DrawCount, - _stride: u32, - ) { - unimplemented!() - } - - unsafe fn draw_indexed_indirect( - &mut self, - _buffer: &Buffer, - _offset: buffer::Offset, - _draw_count: DrawCount, - _stride: u32, - ) { - unimplemented!() - } - - unsafe fn set_event(&mut self, _: &(), _: pso::PipelineStage) { - unimplemented!() - } - - unsafe fn reset_event(&mut self, _: &(), _: pso::PipelineStage) { - unimplemented!() - } - - unsafe fn wait_events<'a, I, J>(&mut self, _: I, _: Range, _: J) - where - I: IntoIterator, - I::Item: Borrow<()>, - J: IntoIterator, - J::Item: Borrow>, - { - unimplemented!() - } - - unsafe fn begin_query(&mut self, _query: query::Query, _flags: query::ControlFlags) { - unimplemented!() - } - - unsafe fn end_query(&mut self, _query: query::Query) { - unimplemented!() - } - - unsafe fn reset_query_pool(&mut self, _pool: &QueryPool, _queries: Range) { - unimplemented!() - } - - unsafe fn copy_query_pool_results( - &mut self, - _pool: &QueryPool, - _queries: Range, - _buffer: &Buffer, - _offset: buffer::Offset, - _stride: buffer::Offset, - _flags: query::ResultFlags, - ) { - unimplemented!() - } - - unsafe fn write_timestamp(&mut self, _: pso::PipelineStage, _query: query::Query) { - unimplemented!() - } - - unsafe fn push_graphics_constants( - &mut self, - _layout: &PipelineLayout, - _stages: pso::ShaderStageFlags, - _offset: u32, - _constants: &[u32], - ) { - // unimplemented!() - } - - unsafe fn push_compute_constants( - &mut self, - _layout: &PipelineLayout, - _offset: u32, - _constants: &[u32], - ) { - unimplemented!() - } - - unsafe fn execute_commands<'a, T, I>(&mut self, _buffers: I) - where - T: 'a + Borrow, - I: IntoIterator, - { - unimplemented!() - } -} - -#[derive(Clone, Debug)] -enum SyncRange { - Whole, - Partial(Range), -} - -#[derive(Debug)] -pub struct MemoryFlush { - host_memory: *mut u8, - sync_range: SyncRange, - buffer: *mut d3d11::ID3D11Buffer, -} - -pub struct MemoryInvalidate { - working_buffer: Option>, - working_buffer_size: u64, - host_memory: *mut u8, - sync_range: Range, - buffer: *mut d3d11::ID3D11Buffer, -} - -impl fmt::Debug for MemoryInvalidate { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str("MemoryInvalidate") - } -} - -fn intersection(a: &Range, b: &Range) -> Option> { - let min = if a.start < b.start { a } else { b }; - let max = if min == a { b } else { a }; - - if min.end < max.start { - None - } else { - let end = if min.end < max.end { min.end } else { max.end }; - Some(max.start .. end) - } -} - -impl MemoryFlush { - fn do_flush(&self, context: &ComPtr) { - let src = self.host_memory; - - debug_marker!(context, "Flush({:?})", self.sync_range); - let region = match self.sync_range { - SyncRange::Partial(ref range) if range.start < range.end => Some(d3d11::D3D11_BOX { - left: range.start as u32, - top: 0, - front: 0, - right: range.end as u32, - bottom: 1, - back: 1, - }), - _ => None, - }; - - unsafe { - context.UpdateSubresource( - self.buffer as _, - 0, - if let Some(region) = region { - ®ion - } else { - ptr::null_mut() - }, - src as _, - 0, - 0, - ); - } - } -} - -impl MemoryInvalidate { - fn download( - &self, - context: &ComPtr, - buffer: *mut d3d11::ID3D11Buffer, - range: Range, - ) { - unsafe { - context.CopySubresourceRegion( - self.working_buffer.clone().unwrap().as_raw() as _, - 0, - 0, - 0, - 0, - buffer as _, - 0, - &d3d11::D3D11_BOX { - left: range.start as _, - top: 0, - front: 0, - right: range.end as _, - bottom: 1, - back: 1, - }, - ); - - // copy over to our vec - let dst = self.host_memory.offset(range.start as isize); - let src = self.map(&context); - ptr::copy(src, dst, (range.end - range.start) as usize); - self.unmap(&context); - } - } - - fn do_invalidate(&self, context: &ComPtr) { - let stride = self.working_buffer_size; - let range = &self.sync_range; - let len = range.end - range.start; - let chunks = len / stride; - let remainder = len % stride; - - // we split up the copies into chunks the size of our working buffer - for i in 0 .. chunks { - let offset = range.start + i * stride; - let range = offset .. (offset + stride); - - self.download(context, self.buffer, range); - } - - if remainder != 0 { - self.download(context, self.buffer, (chunks * stride) .. range.end); - } - } - - fn map(&self, context: &ComPtr) -> *mut u8 { - assert_eq!(self.working_buffer.is_some(), true); - - unsafe { - let mut map = mem::zeroed(); - let hr = context.Map( - self.working_buffer.clone().unwrap().as_raw() as _, - 0, - d3d11::D3D11_MAP_READ, - 0, - &mut map, - ); - - assert_eq!(hr, winerror::S_OK); - - map.pData as _ - } - } - - fn unmap(&self, context: &ComPtr) { - unsafe { - context.Unmap(self.working_buffer.clone().unwrap().as_raw() as _, 0); - } - } -} - -// Since we dont have any heaps to work with directly, everytime we bind a -// buffer/image to memory we allocate a dx11 resource and assign it a range. -// -// `HOST_VISIBLE` memory gets a `Vec` which covers the entire memory -// range. This forces us to only expose non-coherent memory, as this -// abstraction acts as a "cache" since the "staging buffer" vec is disjoint -// from all the dx11 resources we store in the struct. -pub struct Memory { - properties: memory::Properties, - size: u64, - - mapped_ptr: *mut u8, - - // staging buffer covering the whole memory region, if it's HOST_VISIBLE - host_visible: Option>>, - - // list of all buffers bound to this memory - local_buffers: RefCell, InternalBuffer)>>, - - // list of all images bound to this memory - _local_images: RefCell, InternalImage)>>, -} - -impl fmt::Debug for Memory { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str("Memory") - } -} - -unsafe impl Send for Memory {} -unsafe impl Sync for Memory {} - -impl Memory { - pub fn resolve>(&self, range: &R) -> Range { - *range.start().unwrap_or(&0) .. *range.end().unwrap_or(&self.size) - } - - pub fn bind_buffer(&self, range: Range, buffer: InternalBuffer) { - self.local_buffers.borrow_mut().push((range, buffer)); - } - - pub fn flush(&self, context: &ComPtr, range: Range) { - use buffer::Usage; - - for &(ref buffer_range, ref buffer) in self.local_buffers.borrow().iter() { - if let Some(range) = intersection(&range, &buffer_range) { - let ptr = self.mapped_ptr; - - // we need to handle 3 cases for updating buffers: - // - // 1. if our buffer was created as a `UNIFORM` buffer *and* other usage flags, we - // also have a disjoint buffer which only has `D3D11_BIND_CONSTANT_BUFFER` due - // to DX11 limitation. we then need to update both the original buffer and the - // disjoint one with the *whole* range (TODO: allow for partial updates) - // - // 2. if our buffer was created with *only* `UNIFORM` usage we need to upload - // the whole range (TODO: allow for partial updates) - // - // 3. the general case, without any `UNIFORM` usage has no restrictions on - // partial updates, so we upload the specified range - // - if buffer.usage.contains(Usage::UNIFORM) && buffer.usage != Usage::UNIFORM { - MemoryFlush { - host_memory: unsafe { ptr.offset(buffer_range.start as _) }, - sync_range: SyncRange::Whole, - buffer: buffer.raw, - } - .do_flush(&context); - - if let Some(disjoint) = buffer.disjoint_cb { - MemoryFlush { - host_memory: unsafe { ptr.offset(buffer_range.start as _) }, - sync_range: SyncRange::Whole, - buffer: disjoint, - } - .do_flush(&context); - } - } else if buffer.usage == Usage::UNIFORM { - MemoryFlush { - host_memory: unsafe { ptr.offset(buffer_range.start as _) }, - sync_range: SyncRange::Whole, - buffer: buffer.raw, - } - .do_flush(&context); - } else { - let local_start = range.start - buffer_range.start; - let local_len = range.end - range.start; - - MemoryFlush { - host_memory: unsafe { ptr.offset(range.start as _) }, - sync_range: SyncRange::Partial(local_start .. (local_start + local_len)), - buffer: buffer.raw, - } - .do_flush(&context); - } - } - } - } - - pub fn invalidate( - &self, - context: &ComPtr, - range: Range, - working_buffer: ComPtr, - working_buffer_size: u64, - ) { - for &(ref buffer_range, ref buffer) in self.local_buffers.borrow().iter() { - if let Some(range) = intersection(&range, &buffer_range) { - MemoryInvalidate { - working_buffer: Some(working_buffer.clone()), - working_buffer_size, - host_memory: self.mapped_ptr, - sync_range: range.clone(), - buffer: buffer.raw, - } - .do_invalidate(&context); - } - } - } -} - -#[derive(Debug)] -pub struct CommandPool { - device: ComPtr, - internal: internal::Internal, -} - -unsafe impl Send for CommandPool {} -unsafe impl Sync for CommandPool {} - -impl hal::pool::CommandPool for CommandPool { - unsafe fn reset(&mut self, _release_resources: bool) { - //unimplemented!() - } - - unsafe fn allocate_one(&mut self, _level: command::Level) -> CommandBuffer { - CommandBuffer::create_deferred(self.device.clone(), self.internal.clone()) - } - - unsafe fn free(&mut self, _cbufs: I) - where - I: IntoIterator, - { - // TODO: - // unimplemented!() - } -} - -/// Similarily to dx12 backend, we can handle either precompiled dxbc or spirv -pub enum ShaderModule { - Dxbc(Vec), - Spirv(Vec), -} - -// TODO: temporary -impl ::fmt::Debug for ShaderModule { - fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - write!(f, "{}", "ShaderModule { ... }") - } -} - -unsafe impl Send for ShaderModule {} -unsafe impl Sync for ShaderModule {} - -#[derive(Clone, Debug)] -pub struct SubpassDesc { - pub color_attachments: Vec, - pub depth_stencil_attachment: Option, - pub input_attachments: Vec, - pub resolve_attachments: Vec, -} - -impl SubpassDesc { - pub(crate) fn is_using(&self, at_id: pass::AttachmentId) -> bool { - self.color_attachments - .iter() - .chain(self.depth_stencil_attachment.iter()) - .chain(self.input_attachments.iter()) - .chain(self.resolve_attachments.iter()) - .any(|&(id, _)| id == at_id) - } -} - -#[derive(Clone, Debug)] -pub struct RenderPass { - pub attachments: Vec, - pub subpasses: Vec, -} - -#[derive(Clone, Debug)] -pub struct Framebuffer { - attachments: Vec, - layers: image::Layer, -} - -#[derive(Clone, Debug)] -pub struct InternalBuffer { - raw: *mut d3d11::ID3D11Buffer, - // TODO: need to sync between `raw` and `disjoint_cb`, same way as we do with - // `MemoryFlush/Invalidate` - disjoint_cb: Option<*mut d3d11::ID3D11Buffer>, // if unbound this buffer might be null. - srv: Option<*mut d3d11::ID3D11ShaderResourceView>, - uav: Option<*mut d3d11::ID3D11UnorderedAccessView>, - usage: buffer::Usage, -} - -pub struct Buffer { - internal: InternalBuffer, - properties: memory::Properties, // empty if unbound - host_ptr: *mut u8, // null if unbound - bound_range: Range, // 0 if unbound - requirements: memory::Requirements, - bind: d3d11::D3D11_BIND_FLAG, -} - -impl fmt::Debug for Buffer { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str("Buffer") - } -} - -unsafe impl Send for Buffer {} -unsafe impl Sync for Buffer {} - -#[derive(Debug)] -pub struct BufferView; - -pub struct Image { - kind: image::Kind, - usage: image::Usage, - format: format::Format, - view_caps: image::ViewCapabilities, - decomposed_format: conv::DecomposedDxgiFormat, - mip_levels: image::Level, - internal: InternalImage, - bind: d3d11::D3D11_BIND_FLAG, - requirements: memory::Requirements, -} - -impl fmt::Debug for Image { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str("Image") - } -} - -pub struct InternalImage { - raw: *mut d3d11::ID3D11Resource, - copy_srv: Option>, - srv: Option>, - - /// Contains UAVs for all subresources - unordered_access_views: Vec>, - - /// Contains DSVs for all subresources - depth_stencil_views: Vec>, - - /// Contains RTVs for all subresources - render_target_views: Vec>, -} - -impl fmt::Debug for InternalImage { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str("InternalImage") - } -} - -unsafe impl Send for Image {} -unsafe impl Sync for Image {} - -impl Image { - pub fn calc_subresource(&self, mip_level: UINT, layer: UINT) -> UINT { - mip_level + (layer * self.mip_levels as UINT) - } - - pub fn get_uav( - &self, - mip_level: image::Level, - _layer: image::Layer, - ) -> Option<&ComPtr> { - self.internal - .unordered_access_views - .get(self.calc_subresource(mip_level as _, 0) as usize) - } - - pub fn get_dsv( - &self, - mip_level: image::Level, - layer: image::Layer, - ) -> Option<&ComPtr> { - self.internal - .depth_stencil_views - .get(self.calc_subresource(mip_level as _, layer as _) as usize) - } - - pub fn get_rtv( - &self, - mip_level: image::Level, - layer: image::Layer, - ) -> Option<&ComPtr> { - self.internal - .render_target_views - .get(self.calc_subresource(mip_level as _, layer as _) as usize) - } -} - -#[derive(Clone)] -pub struct ImageView { - format: format::Format, - rtv_handle: Option>, - srv_handle: Option>, - dsv_handle: Option>, - uav_handle: Option>, -} - -impl fmt::Debug for ImageView { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str("ImageView") - } -} - -unsafe impl Send for ImageView {} -unsafe impl Sync for ImageView {} - -pub struct Sampler { - sampler_handle: ComPtr, -} - -impl fmt::Debug for Sampler { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str("Sampler") - } -} - -unsafe impl Send for Sampler {} -unsafe impl Sync for Sampler {} - -pub struct ComputePipeline { - cs: ComPtr, -} - -impl fmt::Debug for ComputePipeline { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str("ComputePipeline") - } -} - -unsafe impl Send for ComputePipeline {} -unsafe impl Sync for ComputePipeline {} - -/// NOTE: some objects are hashed internally and reused when created with the -/// same params[0], need to investigate which interfaces this applies -/// to. -/// -/// [0]: https://msdn.microsoft.com/en-us/library/windows/desktop/ff476500(v=vs.85).aspx -#[derive(Clone)] -pub struct GraphicsPipeline { - vs: ComPtr, - gs: Option>, - hs: Option>, - ds: Option>, - ps: Option>, - topology: d3d11::D3D11_PRIMITIVE_TOPOLOGY, - input_layout: ComPtr, - rasterizer_state: ComPtr, - blend_state: ComPtr, - depth_stencil_state: Option<( - ComPtr, - pso::State, - )>, - baked_states: pso::BakedStates, - required_bindings: u32, - max_vertex_bindings: u32, - strides: Vec, -} - -impl fmt::Debug for GraphicsPipeline { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str("GraphicsPipeline") - } -} - -unsafe impl Send for GraphicsPipeline {} -unsafe impl Sync for GraphicsPipeline {} - -type ResourceIndex = u8; -type DescriptorIndex = u16; - -#[derive(Clone, Debug, Default)] -struct RegisterData { - // CBV - c: T, - // SRV - t: T, - // UAV - u: T, - // Sampler - s: T, -} - -impl RegisterData { - fn map U>( - &self, - fun: F, - ) -> RegisterData { - RegisterData { - c: fun(&self.c), - t: fun(&self.t), - u: fun(&self.u), - s: fun(&self.s), - } - } -} - -impl RegisterData { - fn add_content_many(&mut self, content: DescriptorContent, many: DescriptorIndex) { - if content.contains(DescriptorContent::CBV) { - self.c += many; - } - if content.contains(DescriptorContent::SRV) { - self.t += many; - } - if content.contains(DescriptorContent::UAV) { - self.u += many; - } - if content.contains(DescriptorContent::SAMPLER) { - self.s += many; - } - } - - fn add_content(&mut self, content: DescriptorContent) { - self.add_content_many(content, 1) - } - - fn sum(&self) -> DescriptorIndex { - self.c + self.t + self.u + self.s - } -} - -#[derive(Clone, Debug, Default)] -struct MultiStageData { - vs: T, - ps: T, - cs: T, -} - -impl MultiStageData { - fn select(self, stage: pso::Stage) -> T { - match stage { - pso::Stage::Vertex => self.vs, - pso::Stage::Fragment => self.ps, - pso::Stage::Compute => self.cs, - _ => panic!("Unsupported stage {:?}", stage) - } - } -} - -impl MultiStageData> { - fn map_register U>( - &self, - fun: F, - ) -> MultiStageData> { - MultiStageData { - vs: self.vs.map(&fun), - ps: self.ps.map(&fun), - cs: self.cs.map(&fun), - } - } - - fn map_other) -> U>( - &self, - fun: F, - ) -> MultiStageData { - MultiStageData { - vs: fun(&self.vs), - ps: fun(&self.ps), - cs: fun(&self.cs), - } - } -} - -impl MultiStageData> { - fn add_content(&mut self, content: DescriptorContent, stages: pso::ShaderStageFlags) { - if stages.contains(pso::ShaderStageFlags::VERTEX) { - self.vs.add_content(content); - } - if stages.contains(pso::ShaderStageFlags::FRAGMENT) { - self.ps.add_content(content); - } - if stages.contains(pso::ShaderStageFlags::COMPUTE) { - self.cs.add_content(content); - } - } - - fn sum(&self) -> DescriptorIndex { - self.vs.sum() + self.ps.sum() + self.cs.sum() - } -} - -#[derive(Clone, Debug, Default)] -struct RegisterPoolMapping { - offset: DescriptorIndex, - count: ResourceIndex, -} - -#[derive(Clone, Debug, Default)] -struct RegisterInfo { - res_index: ResourceIndex, - pool_offset: DescriptorIndex, - count: ResourceIndex, -} - -impl RegisterInfo { - fn as_some(&self) -> Option<&Self> { - if self.count == 0 { - None - } else { - Some(self) - } - } -} - -#[derive(Clone, Debug, Default)] -struct RegisterAccumulator { - res_index: ResourceIndex, -} - -impl RegisterAccumulator { - fn to_mapping( - &self, - cur_offset: &mut DescriptorIndex, - ) -> RegisterPoolMapping { - let offset = *cur_offset; - *cur_offset += self.res_index as DescriptorIndex; - - RegisterPoolMapping { - offset, - count: self.res_index, - } - } - - fn advance( - &mut self, - mapping: &RegisterPoolMapping, - ) -> RegisterInfo { - let res_index = self.res_index; - self.res_index += mapping.count; - RegisterInfo { - res_index, - pool_offset: mapping.offset, - count: mapping.count, - } - } -} - -impl RegisterData { - fn to_mapping( - &self, - pool_offset: &mut DescriptorIndex, - ) -> RegisterData { - RegisterData { - c: self.c.to_mapping(pool_offset), - t: self.t.to_mapping(pool_offset), - u: self.u.to_mapping(pool_offset), - s: self.s.to_mapping(pool_offset), - } - } - - fn advance( - &mut self, - mapping: &RegisterData, - ) -> RegisterData { - RegisterData { - c: self.c.advance(&mapping.c), - t: self.t.advance(&mapping.t), - u: self.u.advance(&mapping.u), - s: self.s.advance(&mapping.s), - } - } -} - -impl MultiStageData> { - fn to_mapping(&self) -> MultiStageData> { - let mut pool_offset = 0; - MultiStageData { - vs: self.vs.to_mapping(&mut pool_offset), - ps: self.ps.to_mapping(&mut pool_offset), - cs: self.cs.to_mapping(&mut pool_offset), - } - } - - fn advance( - &mut self, - mapping: &MultiStageData>, - ) -> MultiStageData> { - MultiStageData { - vs: self.vs.advance(&mapping.vs), - ps: self.ps.advance(&mapping.ps), - cs: self.cs.advance(&mapping.cs), - } - } -} - -#[derive(Clone, Debug)] -struct DescriptorSetInfo { - bindings: Arc>, - registers: MultiStageData>, -} - -impl DescriptorSetInfo { - fn find_register( - &self, - stage: pso::Stage, - binding_index: pso::DescriptorBinding, - ) -> (DescriptorContent, RegisterData) { - let mut res_offsets = self.registers - .map_register(|info| info.res_index as DescriptorIndex) - .select(stage); - for binding in self.bindings.iter() { - let content = DescriptorContent::from(binding.ty); - if binding.binding == binding_index { - return (content, res_offsets.map(|offset| *offset as ResourceIndex)) - } - res_offsets.add_content(content); - } - panic!("Unable to find binding {:?}", binding_index); - } -} - -/// The pipeline layout holds optimized (less api calls) ranges of objects for all descriptor sets -/// belonging to the pipeline object. -#[derive(Debug)] -pub struct PipelineLayout { - sets: Vec, -} - -/// The descriptor set layout contains mappings from a given binding to the offset in our -/// descriptor pool storage and what type of descriptor it is (combined image sampler takes up two -/// handles). -#[derive(Debug)] -pub struct DescriptorSetLayout { - bindings: Arc>, - pool_mapping: MultiStageData>, -} - -#[derive(Debug)] -struct CoherentBufferFlushRange { - device_buffer: *mut d3d11::ID3D11Buffer, - host_ptr: *mut u8, - range: SyncRange, -} - -#[derive(Debug)] -struct CoherentBufferInvalidateRange { - device_buffer: *mut d3d11::ID3D11Buffer, - host_ptr: *mut u8, - range: Range, -} - -#[derive(Debug)] -struct CoherentBuffers { - // descriptor set writes containing coherent resources go into these vecs and are added to the - // command buffers own Vec on binding the set. - flush_coherent_buffers: RefCell>, - invalidate_coherent_buffers: RefCell>, -} - -impl CoherentBuffers { - fn _add_flush(&self, old: *mut d3d11::ID3D11Buffer, buffer: &Buffer) { - let new = buffer.internal.raw; - - if old != new { - let mut buffers = self.flush_coherent_buffers.borrow_mut(); - - let pos = buffers.iter().position(|sync| old == sync.device_buffer); - - let sync_range = CoherentBufferFlushRange { - device_buffer: new, - host_ptr: buffer.host_ptr, - range: SyncRange::Whole, - }; - - if let Some(pos) = pos { - buffers[pos] = sync_range; - } else { - buffers.push(sync_range); - } - - if let Some(disjoint) = buffer.internal.disjoint_cb { - let pos = buffers - .iter() - .position(|sync| disjoint == sync.device_buffer); - - let sync_range = CoherentBufferFlushRange { - device_buffer: disjoint, - host_ptr: buffer.host_ptr, - range: SyncRange::Whole, - }; - - if let Some(pos) = pos { - buffers[pos] = sync_range; - } else { - buffers.push(sync_range); - } - } - } - } - - fn _add_invalidate(&self, old: *mut d3d11::ID3D11Buffer, buffer: &Buffer) { - let new = buffer.internal.raw; - - if old != new { - let mut buffers = self.invalidate_coherent_buffers.borrow_mut(); - - let pos = buffers.iter().position(|sync| old == sync.device_buffer); - - let sync_range = CoherentBufferInvalidateRange { - device_buffer: new, - host_ptr: buffer.host_ptr, - range: buffer.bound_range.clone(), - }; - - if let Some(pos) = pos { - buffers[pos] = sync_range; - } else { - buffers.push(sync_range); - } - } - } -} - -/// Newtype around a common interface that all bindable resources inherit from. -#[derive(Debug, Copy, Clone)] -#[repr(C)] -struct Descriptor(*mut d3d11::ID3D11DeviceChild); - -bitflags! { - /// A set of D3D11 descriptor types that need to be associated - /// with a single gfx-hal `DescriptorType`. - #[derive(Default)] - pub struct DescriptorContent: u8 { - const CBV = 0x1; - const SRV = 0x2; - const UAV = 0x4; - const SAMPLER = 0x8; - /// Indicates if the descriptor is a dynamic uniform/storage buffer. - /// Important as dynamic buffers are implemented as root descriptors. - const DYNAMIC = 0x10; - } -} - -impl From for DescriptorContent { - fn from(ty: pso::DescriptorType) -> Self { - use hal::pso::DescriptorType as Dt; - match ty { - Dt::Sampler => DescriptorContent::SAMPLER, - Dt::CombinedImageSampler => DescriptorContent::SRV | DescriptorContent::SAMPLER, - Dt::SampledImage | Dt::InputAttachment | Dt::UniformTexelBuffer => { - DescriptorContent::SRV - } - Dt::StorageImage | Dt::StorageBuffer | Dt::StorageTexelBuffer => { - DescriptorContent::SRV | DescriptorContent::UAV - } - Dt::StorageBufferDynamic => { - DescriptorContent::SRV | DescriptorContent::UAV | DescriptorContent::DYNAMIC - } - Dt::UniformBuffer => DescriptorContent::CBV, - Dt::UniformBufferDynamic => DescriptorContent::CBV | DescriptorContent::DYNAMIC, - } - } -} - -pub struct DescriptorSet { - offset: DescriptorIndex, - len: DescriptorIndex, - handles: *mut Descriptor, - coherent_buffers: Mutex, - layout: DescriptorSetLayout, -} - -impl fmt::Debug for DescriptorSet { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str("DescriptorSet") - } -} - -unsafe impl Send for DescriptorSet {} -unsafe impl Sync for DescriptorSet {} - -impl DescriptorSet { - fn _add_flush(&self, old: *mut d3d11::ID3D11Buffer, buffer: &Buffer) { - let new = buffer.internal.raw; - - if old != new { - self.coherent_buffers.lock()._add_flush(old, buffer); - } - } - - fn _add_invalidate(&self, old: *mut d3d11::ID3D11Buffer, buffer: &Buffer) { - let new = buffer.internal.raw; - - if old != new { - self.coherent_buffers.lock()._add_invalidate(old, buffer); - } - } - - unsafe fn assign(&self, offset: DescriptorIndex, value: *mut d3d11::ID3D11DeviceChild) { - *self.handles.offset(offset as isize) = Descriptor(value); - } - - unsafe fn assign_stages( - &self, - offsets: &MultiStageData, - stages: pso::ShaderStageFlags, - value: *mut d3d11::ID3D11DeviceChild, - ) { - if stages.contains(pso::ShaderStageFlags::VERTEX) { - self.assign(offsets.vs, value); - } - if stages.contains(pso::ShaderStageFlags::FRAGMENT) { - self.assign(offsets.ps, value); - } - if stages.contains(pso::ShaderStageFlags::COMPUTE) { - self.assign(offsets.cs, value); - } - } -} - -#[derive(Debug)] -pub struct DescriptorPool { - handles: Vec, - allocator: RangeAllocator, -} - -unsafe impl Send for DescriptorPool {} -unsafe impl Sync for DescriptorPool {} - -impl DescriptorPool { - fn with_capacity(size: DescriptorIndex) -> Self { - DescriptorPool { - handles: vec![Descriptor(ptr::null_mut()); size as usize], - allocator: RangeAllocator::new(0 .. size), - } - } -} - -impl pso::DescriptorPool for DescriptorPool { - unsafe fn allocate_set( - &mut self, - layout: &DescriptorSetLayout, - ) -> Result { - let len = layout.pool_mapping - .map_register(|mapping| mapping.count as DescriptorIndex) - .sum() - .max(1); - - self.allocator - .allocate_range(len) - .map(|range| { - for handle in &mut self.handles[range.start as usize .. range.end as usize] { - *handle = Descriptor(ptr::null_mut()); - } - - DescriptorSet { - offset: range.start, - len, - handles: self.handles.as_mut_ptr().offset(range.start as _), - coherent_buffers: Mutex::new(CoherentBuffers { - flush_coherent_buffers: RefCell::new(Vec::new()), - invalidate_coherent_buffers: RefCell::new(Vec::new()), - }), - layout: DescriptorSetLayout { - bindings: Arc::clone(&layout.bindings), - pool_mapping: layout.pool_mapping.clone(), - }, - } - }) - .map_err(|_| pso::AllocationError::OutOfPoolMemory) - } - - unsafe fn free_sets(&mut self, descriptor_sets: I) - where - I: IntoIterator, - { - for set in descriptor_sets { - self.allocator - .free_range(set.offset .. (set.offset + set.len)) - } - } - - unsafe fn reset(&mut self) { - self.allocator.reset(); - } -} - -#[derive(Debug)] -pub struct RawFence { - mutex: Mutex, - condvar: Condvar, -} - -pub type Fence = Arc; - -#[derive(Debug)] -pub struct Semaphore; -#[derive(Debug)] -pub struct QueryPool; - -#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)] -pub enum Backend {} -impl hal::Backend for Backend { - type Instance = Instance; - type PhysicalDevice = PhysicalDevice; - type Device = device::Device; - - type Surface = Surface; - type Swapchain = Swapchain; - - type QueueFamily = QueueFamily; - type CommandQueue = CommandQueue; - type CommandBuffer = CommandBuffer; - - type Memory = Memory; - type CommandPool = CommandPool; - - type ShaderModule = ShaderModule; - type RenderPass = RenderPass; - type Framebuffer = Framebuffer; - - type Buffer = Buffer; - type BufferView = BufferView; - type Image = Image; - - type ImageView = ImageView; - type Sampler = Sampler; - - type ComputePipeline = ComputePipeline; - type GraphicsPipeline = GraphicsPipeline; - type PipelineLayout = PipelineLayout; - type PipelineCache = (); - type DescriptorSetLayout = DescriptorSetLayout; - type DescriptorPool = DescriptorPool; - type DescriptorSet = DescriptorSet; - - type Fence = Fence; - type Semaphore = Semaphore; - type Event = (); - type QueryPool = QueryPool; -} - -fn validate_line_width(width: f32) { - // Note from the Vulkan spec: - // > If the wide lines feature is not enabled, lineWidth must be 1.0 - // Simply assert and no-op because DX11 never exposes `Features::LINE_WIDTH` - assert_eq!(width, 1.0); -} +/*! +# DX11 backend internals. + +## Pipeline Layout + +In D3D11 there are tables of CBVs, SRVs, UAVs, and samplers. + +Each descriptor type can take 1 or two of those entry points. + +The descriptor pool is just and array of handles, belonging to descriptor set 1, descriptor set 2, etc. +Each range of descriptors in a descriptor set area of the pool is split into shader stages, +which in turn is split into CBS/SRV/UAV/Sampler parts. That allows binding a descriptor set as a list +of continuous descriptor ranges (per type, per shader stage). + +!*/ + +//#[deny(missing_docs)] + +#[macro_use] +extern crate bitflags; +#[macro_use] +extern crate log; +#[macro_use] +extern crate winapi; + +use hal::{ + adapter, + buffer, + command, + format, + image, + memory, + pass, + pso, + query, + queue, + window, + DrawCount, + IndexCount, + InstanceCount, + Limits, + VertexCount, + VertexOffset, + WorkGroupCount, +}; + +use range_alloc::RangeAllocator; + +use winapi::{ + shared::{ + dxgi::{IDXGIAdapter, IDXGIFactory, IDXGISwapChain}, + dxgiformat, + minwindef::{FALSE, HMODULE, UINT}, + windef::{HWND, RECT}, + winerror, + }, + um::{d3d11, d3dcommon, winuser::GetClientRect}, + Interface as _, +}; + +use wio::com::ComPtr; + +use parking_lot::{Condvar, Mutex}; + +use std::{borrow::Borrow, cell::RefCell, fmt, mem, ops::Range, os::raw::c_void, ptr, sync::Arc}; + +macro_rules! debug_scope { + ($context:expr, $($arg:tt)+) => ({ + #[cfg(debug_assertions)] + { + $crate::debug::DebugScope::with_name( + $context, + format_args!($($arg)+), + ) + } + #[cfg(not(debug_assertions))] + { + () + } + }); +} + +macro_rules! debug_marker { + ($context:expr, $($arg:tt)+) => ({ + #[cfg(debug_assertions)] + { + $crate::debug::debug_marker( + $context, + format_args!($($arg)+), + ); + } + }); +} + +mod conv; +#[cfg(debug_assertions)] +mod debug; +mod device; +mod dxgi; +mod internal; +mod shader; + +type CreateFun = unsafe extern "system" fn( + *mut IDXGIAdapter, + UINT, + HMODULE, + UINT, + *const UINT, + UINT, + UINT, + *mut *mut d3d11::ID3D11Device, + *mut UINT, + *mut *mut d3d11::ID3D11DeviceContext, +) -> winerror::HRESULT; + +#[derive(Clone)] +pub(crate) struct ViewInfo { + resource: *mut d3d11::ID3D11Resource, + kind: image::Kind, + caps: image::ViewCapabilities, + view_kind: image::ViewKind, + format: dxgiformat::DXGI_FORMAT, + range: image::SubresourceRange, +} + +impl fmt::Debug for ViewInfo { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("ViewInfo") + } +} + +#[derive(Debug)] +pub struct Instance { + pub(crate) factory: ComPtr, + pub(crate) dxgi_version: dxgi::DxgiVersion, + library_d3d11: Arc, + library_dxgi: libloading::Library, +} + +unsafe impl Send for Instance {} +unsafe impl Sync for Instance {} + +impl Instance { + pub fn create_surface_from_hwnd(&self, hwnd: *mut c_void) -> Surface { + Surface { + factory: self.factory.clone(), + wnd_handle: hwnd as *mut _, + presentation: None, + } + } +} + +fn get_features( + _device: ComPtr, + _feature_level: d3dcommon::D3D_FEATURE_LEVEL, +) -> hal::Features { + hal::Features::empty() + | hal::Features::ROBUST_BUFFER_ACCESS + | hal::Features::FULL_DRAW_INDEX_U32 + | hal::Features::FORMAT_BC + | hal::Features::INSTANCE_RATE + | hal::Features::SAMPLER_MIP_LOD_BIAS + | hal::Features::SAMPLER_MIRROR_CLAMP_EDGE + | hal::Features::NDC_Y_UP +} + +fn get_format_properties( + device: ComPtr, +) -> [format::Properties; format::NUM_FORMATS] { + let mut format_properties = [format::Properties::default(); format::NUM_FORMATS]; + for (i, props) in &mut format_properties.iter_mut().enumerate().skip(1) { + let format: format::Format = unsafe { mem::transmute(i as u32) }; + + let dxgi_format = match conv::map_format(format) { + Some(format) => format, + None => continue, + }; + + let mut support = d3d11::D3D11_FEATURE_DATA_FORMAT_SUPPORT { + InFormat: dxgi_format, + OutFormatSupport: 0, + }; + let mut support_2 = d3d11::D3D11_FEATURE_DATA_FORMAT_SUPPORT2 { + InFormat: dxgi_format, + OutFormatSupport2: 0, + }; + + let hr = unsafe { + device.CheckFeatureSupport( + d3d11::D3D11_FEATURE_FORMAT_SUPPORT, + &mut support as *mut _ as *mut _, + mem::size_of::() as UINT, + ) + }; + + if hr == winerror::S_OK { + let can_buffer = 0 != support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_BUFFER; + let can_image = 0 + != support.OutFormatSupport + & (d3d11::D3D11_FORMAT_SUPPORT_TEXTURE1D + | d3d11::D3D11_FORMAT_SUPPORT_TEXTURE2D + | d3d11::D3D11_FORMAT_SUPPORT_TEXTURE3D + | d3d11::D3D11_FORMAT_SUPPORT_TEXTURECUBE); + let can_linear = can_image && !format.surface_desc().is_compressed(); + if can_image { + props.optimal_tiling |= + format::ImageFeature::SAMPLED | format::ImageFeature::BLIT_SRC; + } + if can_linear { + props.linear_tiling |= + format::ImageFeature::SAMPLED | format::ImageFeature::BLIT_SRC; + } + if support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_IA_VERTEX_BUFFER != 0 { + props.buffer_features |= format::BufferFeature::VERTEX; + } + if support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_SHADER_SAMPLE != 0 { + props.optimal_tiling |= format::ImageFeature::SAMPLED_LINEAR; + } + if support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_RENDER_TARGET != 0 { + props.optimal_tiling |= + format::ImageFeature::COLOR_ATTACHMENT | format::ImageFeature::BLIT_DST; + if can_linear { + props.linear_tiling |= + format::ImageFeature::COLOR_ATTACHMENT | format::ImageFeature::BLIT_DST; + } + } + if support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_BLENDABLE != 0 { + props.optimal_tiling |= format::ImageFeature::COLOR_ATTACHMENT_BLEND; + } + if support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_DEPTH_STENCIL != 0 { + props.optimal_tiling |= format::ImageFeature::DEPTH_STENCIL_ATTACHMENT; + } + if support.OutFormatSupport & d3d11::D3D11_FORMAT_SUPPORT_SHADER_LOAD != 0 { + //TODO: check d3d12::D3D12_FORMAT_SUPPORT2_UAV_TYPED_LOAD ? + if can_buffer { + props.buffer_features |= format::BufferFeature::UNIFORM_TEXEL; + } + } + + let hr = unsafe { + device.CheckFeatureSupport( + d3d11::D3D11_FEATURE_FORMAT_SUPPORT2, + &mut support_2 as *mut _ as *mut _, + mem::size_of::() as UINT, + ) + }; + if hr == winerror::S_OK { + if support_2.OutFormatSupport2 & d3d11::D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_ADD != 0 { + //TODO: other atomic flags? + if can_buffer { + props.buffer_features |= format::BufferFeature::STORAGE_TEXEL_ATOMIC; + } + if can_image { + props.optimal_tiling |= format::ImageFeature::STORAGE_ATOMIC; + } + } + if support_2.OutFormatSupport2 & d3d11::D3D11_FORMAT_SUPPORT2_UAV_TYPED_STORE != 0 { + if can_buffer { + props.buffer_features |= format::BufferFeature::STORAGE_TEXEL; + } + if can_image { + props.optimal_tiling |= format::ImageFeature::STORAGE; + } + } + } + } + + //TODO: blits, linear tiling + } + + format_properties +} + +impl hal::Instance for Instance { + fn create(_: &str, _: u32) -> Result { + // TODO: get the latest factory we can find + + match dxgi::get_dxgi_factory() { + Ok((library_dxgi, factory, dxgi_version)) => { + info!("DXGI version: {:?}", dxgi_version); + let library_d3d11 = Arc::new( + libloading::Library::new("d3d11.dll").map_err(|_| hal::UnsupportedBackend)?, + ); + Ok(Instance { + factory, + dxgi_version, + library_d3d11, + library_dxgi, + }) + } + Err(hr) => { + info!("Failed on factory creation: {:?}", hr); + Err(hal::UnsupportedBackend) + } + } + } + + fn enumerate_adapters(&self) -> Vec> { + let mut adapters = Vec::new(); + let mut idx = 0; + + let func: libloading::Symbol = + match unsafe { self.library_d3d11.get(b"D3D11CreateDevice") } { + Ok(func) => func, + Err(e) => { + error!("Unable to get device creation function: {:?}", e); + return Vec::new(); + } + }; + + while let Ok((adapter, info)) = + dxgi::get_adapter(idx, self.factory.as_raw(), self.dxgi_version) + { + idx += 1; + + use hal::memory::Properties; + + // TODO: move into function? + let (device, feature_level) = { + let feature_level = get_feature_level(&func, adapter.as_raw()); + + let mut device = ptr::null_mut(); + let hr = unsafe { + func( + adapter.as_raw() as *mut _, + d3dcommon::D3D_DRIVER_TYPE_UNKNOWN, + ptr::null_mut(), + 0, + [feature_level].as_ptr(), + 1, + d3d11::D3D11_SDK_VERSION, + &mut device as *mut *mut _ as *mut *mut _, + ptr::null_mut(), + ptr::null_mut(), + ) + }; + + if !winerror::SUCCEEDED(hr) { + continue; + } + + ( + unsafe { ComPtr::::from_raw(device) }, + feature_level, + ) + }; + + let memory_properties = adapter::MemoryProperties { + memory_types: vec![ + adapter::MemoryType { + properties: Properties::DEVICE_LOCAL, + heap_index: 0, + }, + adapter::MemoryType { + properties: Properties::CPU_VISIBLE + | Properties::COHERENT + | Properties::CPU_CACHED, + heap_index: 1, + }, + adapter::MemoryType { + properties: Properties::CPU_VISIBLE | Properties::CPU_CACHED, + heap_index: 1, + }, + ], + // TODO: would using *VideoMemory and *SystemMemory from + // DXGI_ADAPTER_DESC be too optimistic? :) + memory_heaps: vec![!0, !0], + }; + + let limits = hal::Limits { + max_image_1d_size: d3d11::D3D11_REQ_TEXTURE1D_U_DIMENSION as _, + max_image_2d_size: d3d11::D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION as _, + max_image_3d_size: d3d11::D3D11_REQ_TEXTURE3D_U_V_OR_W_DIMENSION as _, + max_image_cube_size: d3d11::D3D11_REQ_TEXTURECUBE_DIMENSION as _, + max_image_array_layers: d3d11::D3D11_REQ_TEXTURE2D_ARRAY_AXIS_DIMENSION as _, + max_texel_elements: d3d11::D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION as _, //TODO + max_patch_size: 0, // TODO + max_viewports: d3d11::D3D11_VIEWPORT_AND_SCISSORRECT_OBJECT_COUNT_PER_PIPELINE as _, + max_viewport_dimensions: [d3d11::D3D11_VIEWPORT_BOUNDS_MAX; 2], + max_framebuffer_extent: hal::image::Extent { + //TODO + width: 4096, + height: 4096, + depth: 1, + }, + max_compute_work_group_count: [ + d3d11::D3D11_CS_THREAD_GROUP_MAX_X, + d3d11::D3D11_CS_THREAD_GROUP_MAX_Y, + d3d11::D3D11_CS_THREAD_GROUP_MAX_Z, + ], + max_compute_work_group_size: [ + d3d11::D3D11_CS_THREAD_GROUP_MAX_THREADS_PER_GROUP, + 1, + 1, + ], // TODO + max_vertex_input_attribute_offset: 255, // TODO + max_vertex_input_attributes: d3d11::D3D11_IA_VERTEX_INPUT_RESOURCE_SLOT_COUNT as _, + max_vertex_input_binding_stride: + d3d11::D3D11_REQ_MULTI_ELEMENT_STRUCTURE_SIZE_IN_BYTES as _, + max_vertex_input_bindings: d3d11::D3D11_IA_VERTEX_INPUT_RESOURCE_SLOT_COUNT as _, // TODO: verify same as attributes + max_vertex_output_components: d3d11::D3D11_VS_OUTPUT_REGISTER_COUNT as _, // TODO + min_texel_buffer_offset_alignment: 1, // TODO + min_uniform_buffer_offset_alignment: 16, // TODO: verify + min_storage_buffer_offset_alignment: 1, // TODO + framebuffer_color_sample_counts: 1, // TODO + framebuffer_depth_sample_counts: 1, // TODO + framebuffer_stencil_sample_counts: 1, // TODO + max_color_attachments: d3d11::D3D11_SIMULTANEOUS_RENDER_TARGET_COUNT as _, + buffer_image_granularity: 1, + non_coherent_atom_size: 1, // TODO + max_sampler_anisotropy: 16., + optimal_buffer_copy_offset_alignment: 1, // TODO + optimal_buffer_copy_pitch_alignment: 1, // TODO + min_vertex_input_binding_stride_alignment: 1, + ..hal::Limits::default() //TODO + }; + + let features = get_features(device.clone(), feature_level); + let format_properties = get_format_properties(device.clone()); + let hints = hal::Hints::BASE_VERTEX_INSTANCE_DRAWING; + + let physical_device = PhysicalDevice { + adapter, + library_d3d11: Arc::clone(&self.library_d3d11), + features, + hints, + limits, + memory_properties, + format_properties, + }; + + info!("{:#?}", info); + + adapters.push(adapter::Adapter { + info, + physical_device, + queue_families: vec![QueueFamily], + }); + } + + adapters + } + + unsafe fn create_surface( + &self, + has_handle: &impl raw_window_handle::HasRawWindowHandle, + ) -> Result { + match has_handle.raw_window_handle() { + raw_window_handle::RawWindowHandle::Windows(handle) => { + Ok(self.create_surface_from_hwnd(handle.hwnd)) + } + _ => Err(hal::window::InitError::UnsupportedWindowHandle), + } + } + + unsafe fn destroy_surface(&self, _surface: Surface) { + // TODO: Implement Surface cleanup + } +} + +pub struct PhysicalDevice { + adapter: ComPtr, + library_d3d11: Arc, + features: hal::Features, + hints: hal::Hints, + limits: hal::Limits, + memory_properties: adapter::MemoryProperties, + format_properties: [format::Properties; format::NUM_FORMATS], +} + +impl fmt::Debug for PhysicalDevice { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("PhysicalDevice") + } +} + +unsafe impl Send for PhysicalDevice {} +unsafe impl Sync for PhysicalDevice {} + +// TODO: does the adapter we get earlier matter for feature level? +fn get_feature_level(func: &CreateFun, adapter: *mut IDXGIAdapter) -> d3dcommon::D3D_FEATURE_LEVEL { + let requested_feature_levels = [ + d3dcommon::D3D_FEATURE_LEVEL_11_1, + d3dcommon::D3D_FEATURE_LEVEL_11_0, + d3dcommon::D3D_FEATURE_LEVEL_10_1, + d3dcommon::D3D_FEATURE_LEVEL_10_0, + d3dcommon::D3D_FEATURE_LEVEL_9_3, + d3dcommon::D3D_FEATURE_LEVEL_9_2, + d3dcommon::D3D_FEATURE_LEVEL_9_1, + ]; + + let mut feature_level = d3dcommon::D3D_FEATURE_LEVEL_9_1; + let hr = unsafe { + func( + adapter, + d3dcommon::D3D_DRIVER_TYPE_UNKNOWN, + ptr::null_mut(), + 0, + requested_feature_levels[..].as_ptr(), + requested_feature_levels.len() as _, + d3d11::D3D11_SDK_VERSION, + ptr::null_mut(), + &mut feature_level as *mut _, + ptr::null_mut(), + ) + }; + + if !winerror::SUCCEEDED(hr) { + // if there is no 11.1 runtime installed, requesting + // `D3D_FEATURE_LEVEL_11_1` will return E_INVALIDARG so we just retry + // without that + if hr == winerror::E_INVALIDARG { + let hr = unsafe { + func( + adapter, + d3dcommon::D3D_DRIVER_TYPE_UNKNOWN, + ptr::null_mut(), + 0, + requested_feature_levels[1 ..].as_ptr(), + (requested_feature_levels.len() - 1) as _, + d3d11::D3D11_SDK_VERSION, + ptr::null_mut(), + &mut feature_level as *mut _, + ptr::null_mut(), + ) + }; + + if !winerror::SUCCEEDED(hr) { + // TODO: device might not support any feature levels? + unimplemented!(); + } + } + } + + feature_level +} + +// TODO: PhysicalDevice +impl adapter::PhysicalDevice for PhysicalDevice { + unsafe fn open( + &self, + families: &[(&QueueFamily, &[queue::QueuePriority])], + requested_features: hal::Features, + ) -> Result, hal::device::CreationError> { + let func: libloading::Symbol = + self.library_d3d11.get(b"D3D11CreateDevice").unwrap(); + + let (device, cxt) = { + if !self.features().contains(requested_features) { + return Err(hal::device::CreationError::MissingFeature); + } + + let feature_level = get_feature_level(&func, self.adapter.as_raw()); + let mut returned_level = d3dcommon::D3D_FEATURE_LEVEL_9_1; + + #[cfg(debug_assertions)] + let create_flags = d3d11::D3D11_CREATE_DEVICE_DEBUG; + #[cfg(not(debug_assertions))] + let create_flags = 0; + + // TODO: request debug device only on debug config? + let mut device = ptr::null_mut(); + let mut cxt = ptr::null_mut(); + let hr = func( + self.adapter.as_raw() as *mut _, + d3dcommon::D3D_DRIVER_TYPE_UNKNOWN, + ptr::null_mut(), + create_flags, + [feature_level].as_ptr(), + 1, + d3d11::D3D11_SDK_VERSION, + &mut device as *mut *mut _ as *mut *mut _, + &mut returned_level as *mut _, + &mut cxt as *mut *mut _ as *mut *mut _, + ); + + // NOTE: returns error if adapter argument is non-null and driver + // type is not unknown; or if debug device is requested but not + // present + if !winerror::SUCCEEDED(hr) { + return Err(hal::device::CreationError::InitializationFailed); + } + + info!("feature level={:x}", feature_level); + + (ComPtr::from_raw(device), ComPtr::from_raw(cxt)) + }; + + let device = device::Device::new( + device, + cxt, + requested_features, + self.memory_properties.clone(), + ); + + // TODO: deferred context => 1 cxt/queue? + let queue_groups = families + .into_iter() + .map(|&(_family, prio)| { + assert_eq!(prio.len(), 1); + let mut group = queue::QueueGroup::new(queue::QueueFamilyId(0)); + + // TODO: multiple queues? + let queue = CommandQueue { + context: device.context.clone(), + }; + group.add_queue(queue); + group + }) + .collect(); + + Ok(adapter::Gpu { + device, + queue_groups, + }) + } + + fn format_properties(&self, fmt: Option) -> format::Properties { + let idx = fmt.map(|fmt| fmt as usize).unwrap_or(0); + self.format_properties[idx] + } + + fn image_format_properties( + &self, + format: format::Format, + dimensions: u8, + tiling: image::Tiling, + usage: image::Usage, + view_caps: image::ViewCapabilities, + ) -> Option { + conv::map_format(format)?; //filter out unknown formats + + let supported_usage = { + use hal::image::Usage as U; + let format_props = &self.format_properties[format as usize]; + let props = match tiling { + image::Tiling::Optimal => format_props.optimal_tiling, + image::Tiling::Linear => format_props.linear_tiling, + }; + let mut flags = U::empty(); + // Note: these checks would have been nicer if we had explicit BLIT usage + if props.contains(format::ImageFeature::BLIT_SRC) { + flags |= U::TRANSFER_SRC; + } + if props.contains(format::ImageFeature::BLIT_DST) { + flags |= U::TRANSFER_DST; + } + if props.contains(format::ImageFeature::SAMPLED) { + flags |= U::SAMPLED; + } + if props.contains(format::ImageFeature::STORAGE) { + flags |= U::STORAGE; + } + if props.contains(format::ImageFeature::COLOR_ATTACHMENT) { + flags |= U::COLOR_ATTACHMENT; + } + if props.contains(format::ImageFeature::DEPTH_STENCIL_ATTACHMENT) { + flags |= U::DEPTH_STENCIL_ATTACHMENT; + } + flags + }; + if !supported_usage.contains(usage) { + return None; + } + + let max_resource_size = + (d3d11::D3D11_REQ_RESOURCE_SIZE_IN_MEGABYTES_EXPRESSION_A_TERM as usize) << 20; + Some(match tiling { + image::Tiling::Optimal => image::FormatProperties { + max_extent: match dimensions { + 1 => image::Extent { + width: d3d11::D3D11_REQ_TEXTURE1D_U_DIMENSION, + height: 1, + depth: 1, + }, + 2 => image::Extent { + width: d3d11::D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION, + height: d3d11::D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION, + depth: 1, + }, + 3 => image::Extent { + width: d3d11::D3D11_REQ_TEXTURE3D_U_V_OR_W_DIMENSION, + height: d3d11::D3D11_REQ_TEXTURE3D_U_V_OR_W_DIMENSION, + depth: d3d11::D3D11_REQ_TEXTURE3D_U_V_OR_W_DIMENSION, + }, + _ => return None, + }, + max_levels: d3d11::D3D11_REQ_MIP_LEVELS as _, + max_layers: match dimensions { + 1 => d3d11::D3D11_REQ_TEXTURE1D_ARRAY_AXIS_DIMENSION as _, + 2 => d3d11::D3D11_REQ_TEXTURE2D_ARRAY_AXIS_DIMENSION as _, + _ => return None, + }, + sample_count_mask: if dimensions == 2 + && !view_caps.contains(image::ViewCapabilities::KIND_CUBE) + && (usage.contains(image::Usage::COLOR_ATTACHMENT) + | usage.contains(image::Usage::DEPTH_STENCIL_ATTACHMENT)) + { + 0x3F //TODO: use D3D12_FEATURE_DATA_FORMAT_SUPPORT + } else { + 0x1 + }, + max_resource_size, + }, + image::Tiling::Linear => image::FormatProperties { + max_extent: match dimensions { + 2 => image::Extent { + width: d3d11::D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION, + height: d3d11::D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION, + depth: 1, + }, + _ => return None, + }, + max_levels: 1, + max_layers: 1, + sample_count_mask: 0x1, + max_resource_size, + }, + }) + } + + fn memory_properties(&self) -> adapter::MemoryProperties { + self.memory_properties.clone() + } + + fn features(&self) -> hal::Features { + self.features + } + + fn hints(&self) -> hal::Hints { + self.hints + } + + fn limits(&self) -> Limits { + self.limits + } +} + +struct Presentation { + swapchain: ComPtr, + view: ComPtr, + format: format::Format, + size: window::Extent2D, +} + +pub struct Surface { + pub(crate) factory: ComPtr, + wnd_handle: HWND, + presentation: Option, +} + +impl fmt::Debug for Surface { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("Surface") + } +} + +unsafe impl Send for Surface {} +unsafe impl Sync for Surface {} + +impl window::Surface for Surface { + fn supports_queue_family(&self, _queue_family: &QueueFamily) -> bool { + true + } + + fn capabilities(&self, _physical_device: &PhysicalDevice) -> window::SurfaceCapabilities { + let current_extent = unsafe { + let mut rect: RECT = mem::zeroed(); + assert_ne!( + 0, + GetClientRect(self.wnd_handle as *mut _, &mut rect as *mut RECT) + ); + Some(window::Extent2D { + width: (rect.right - rect.left) as u32, + height: (rect.bottom - rect.top) as u32, + }) + }; + + // TODO: flip swap effects require dx11.1/windows8 + // NOTE: some swap effects affect msaa capabilities.. + // TODO: _DISCARD swap effects can only have one image? + window::SurfaceCapabilities { + present_modes: window::PresentMode::FIFO, //TODO + composite_alpha_modes: window::CompositeAlphaMode::OPAQUE, //TODO + image_count: 1 ..= 16, // TODO: + current_extent, + extents: window::Extent2D { + width: 16, + height: 16, + } ..= window::Extent2D { + width: 4096, + height: 4096, + }, + max_image_layers: 1, + usage: image::Usage::COLOR_ATTACHMENT | image::Usage::TRANSFER_SRC, + } + } + + fn supported_formats(&self, _physical_device: &PhysicalDevice) -> Option> { + Some(vec![ + format::Format::Bgra8Srgb, + format::Format::Bgra8Unorm, + format::Format::Rgba8Srgb, + format::Format::Rgba8Unorm, + format::Format::A2b10g10r10Unorm, + format::Format::Rgba16Sfloat, + ]) + } +} + +impl window::PresentationSurface for Surface { + type SwapchainImage = ImageView; + + unsafe fn configure_swapchain( + &mut self, + device: &device::Device, + config: window::SwapchainConfig, + ) -> Result<(), window::CreationError> { + assert!(image::Usage::COLOR_ATTACHMENT.contains(config.image_usage)); + + let swapchain = match self.presentation.take() { + Some(present) => { + if present.format == config.format && present.size == config.extent { + self.presentation = Some(present); + return Ok(()); + } + let non_srgb_format = conv::map_format_nosrgb(config.format).unwrap(); + drop(present.view); + let result = present.swapchain.ResizeBuffers( + config.image_count, + config.extent.width, + config.extent.height, + non_srgb_format, + 0, + ); + if result != winerror::S_OK { + error!("ResizeBuffers failed with 0x{:x}", result as u32); + return Err(window::CreationError::WindowInUse(hal::device::WindowInUse)); + } + present.swapchain + } + None => { + let (swapchain, _) = + device.create_swapchain_impl(&config, self.wnd_handle, self.factory.clone())?; + swapchain + } + }; + + let mut resource: *mut d3d11::ID3D11Resource = ptr::null_mut(); + assert_eq!( + winerror::S_OK, + swapchain.GetBuffer( + 0 as _, + &d3d11::ID3D11Resource::uuidof(), + &mut resource as *mut *mut _ as *mut *mut _, + ) + ); + + let kind = image::Kind::D2(config.extent.width, config.extent.height, 1, 1); + let format = conv::map_format(config.format).unwrap(); + let decomposed = conv::DecomposedDxgiFormat::from_dxgi_format(format); + + let view_info = ViewInfo { + resource, + kind, + caps: image::ViewCapabilities::empty(), + view_kind: image::ViewKind::D2, + format: decomposed.rtv.unwrap(), + range: image::SubresourceRange { + aspects: format::Aspects::COLOR, + levels: 0 .. 1, + layers: 0 .. 1, + }, + }; + let view = device.view_image_as_render_target(&view_info).unwrap(); + + (*resource).Release(); + + self.presentation = Some(Presentation { + swapchain, + view, + format: config.format, + size: config.extent, + }); + Ok(()) + } + + unsafe fn unconfigure_swapchain(&mut self, _device: &device::Device) { + self.presentation = None; + } + + unsafe fn acquire_image( + &mut self, + _timeout_ns: u64, //TODO: use the timeout + ) -> Result<(ImageView, Option), window::AcquireError> { + let present = self.presentation.as_ref().unwrap(); + let image_view = ImageView { + format: present.format, + rtv_handle: Some(present.view.clone()), + dsv_handle: None, + srv_handle: None, + uav_handle: None, + }; + Ok((image_view, None)) + } +} + +pub struct Swapchain { + dxgi_swapchain: ComPtr, +} + +impl fmt::Debug for Swapchain { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("Swapchain") + } +} + +unsafe impl Send for Swapchain {} +unsafe impl Sync for Swapchain {} + +impl window::Swapchain for Swapchain { + unsafe fn acquire_image( + &mut self, + _timeout_ns: u64, + _semaphore: Option<&Semaphore>, + _fence: Option<&Fence>, + ) -> Result<(window::SwapImageIndex, Option), window::AcquireError> { + // TODO: non-`_DISCARD` swap effects have more than one buffer, `FLIP` + // effects are dxgi 1.3 (w10+?) in which case there is + // `GetCurrentBackBufferIndex()` on the swapchain + Ok((0, None)) + } +} + +#[derive(Debug, Clone, Copy)] +pub struct QueueFamily; + +impl queue::QueueFamily for QueueFamily { + fn queue_type(&self) -> queue::QueueType { + queue::QueueType::General + } + fn max_queues(&self) -> usize { + 1 + } + fn id(&self) -> queue::QueueFamilyId { + queue::QueueFamilyId(0) + } +} + +#[derive(Clone)] +pub struct CommandQueue { + context: ComPtr, +} + +impl fmt::Debug for CommandQueue { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("CommandQueue") + } +} + +unsafe impl Send for CommandQueue {} +unsafe impl Sync for CommandQueue {} + +impl queue::CommandQueue for CommandQueue { + unsafe fn submit<'a, T, Ic, S, Iw, Is>( + &mut self, + submission: queue::Submission, + fence: Option<&Fence>, + ) where + T: 'a + Borrow, + Ic: IntoIterator, + S: 'a + Borrow, + Iw: IntoIterator, + Is: IntoIterator, + { + let _scope = debug_scope!(&self.context, "Submit(fence={:?})", fence); + for cmd_buf in submission.command_buffers { + let cmd_buf = cmd_buf.borrow(); + + let _scope = debug_scope!( + &self.context, + "CommandBuffer ({}/{})", + cmd_buf.flush_coherent_memory.len(), + cmd_buf.invalidate_coherent_memory.len() + ); + + { + let _scope = debug_scope!(&self.context, "Pre-Exec: Flush"); + for sync in &cmd_buf.flush_coherent_memory { + sync.do_flush(&self.context); + } + } + self.context + .ExecuteCommandList(cmd_buf.as_raw_list().as_raw(), FALSE); + { + let _scope = debug_scope!(&self.context, "Post-Exec: Invalidate"); + for sync in &cmd_buf.invalidate_coherent_memory { + sync.do_invalidate(&self.context); + } + } + } + + if let Some(fence) = fence { + *fence.mutex.lock() = true; + fence.condvar.notify_all(); + } + } + + unsafe fn present<'a, W, Is, S, Iw>( + &mut self, + swapchains: Is, + _wait_semaphores: Iw, + ) -> Result, window::PresentError> + where + W: 'a + Borrow, + Is: IntoIterator, + S: 'a + Borrow, + Iw: IntoIterator, + { + for (swapchain, _idx) in swapchains { + swapchain.borrow().dxgi_swapchain.Present(1, 0); + } + + Ok(None) + } + + unsafe fn present_surface( + &mut self, + surface: &mut Surface, + _image: ImageView, + _wait_semaphore: Option<&Semaphore>, + ) -> Result, window::PresentError> { + surface + .presentation + .as_ref() + .unwrap() + .swapchain + .Present(1, 0); + Ok(None) + } + + fn wait_idle(&self) -> Result<(), hal::device::OutOfMemory> { + // unimplemented!() + Ok(()) + } +} + +#[derive(Debug)] +pub struct AttachmentClear { + subpass_id: Option, + attachment_id: usize, + raw: command::AttachmentClear, +} + +#[derive(Debug)] +pub struct RenderPassCache { + pub render_pass: RenderPass, + pub framebuffer: Framebuffer, + pub attachment_clear_values: Vec, + pub target_rect: pso::Rect, + pub current_subpass: pass::SubpassId, +} + +impl RenderPassCache { + pub fn start_subpass( + &mut self, + internal: &mut internal::Internal, + context: &ComPtr, + cache: &mut CommandBufferState, + ) { + let attachments = self + .attachment_clear_values + .iter() + .filter(|clear| clear.subpass_id == Some(self.current_subpass)) + .map(|clear| clear.raw); + + cache + .dirty_flag + .insert(DirtyStateFlag::GRAPHICS_PIPELINE | DirtyStateFlag::VIEWPORTS); + internal.clear_attachments( + context, + attachments, + &[pso::ClearRect { + rect: self.target_rect, + layers: 0 .. 1, + }], + &self, + ); + + let subpass = &self.render_pass.subpasses[self.current_subpass as usize]; + let color_views = subpass + .color_attachments + .iter() + .map(|&(id, _)| { + self.framebuffer.attachments[id] + .rtv_handle + .clone() + .unwrap() + .as_raw() + }) + .collect::>(); + let ds_view = match subpass.depth_stencil_attachment { + Some((id, _)) => Some( + self.framebuffer.attachments[id] + .dsv_handle + .clone() + .unwrap() + .as_raw(), + ), + None => None, + }; + + cache.set_render_targets(&color_views, ds_view); + cache.bind(context); + } + + pub fn next_subpass(&mut self) { + self.current_subpass += 1; + } +} + +bitflags! { + struct DirtyStateFlag : u32 { + const RENDER_TARGETS = (1 << 1); + const VERTEX_BUFFERS = (1 << 2); + const GRAPHICS_PIPELINE = (1 << 3); + const VIEWPORTS = (1 << 4); + const BLEND_STATE = (1 << 5); + } +} + +pub struct CommandBufferState { + dirty_flag: DirtyStateFlag, + + render_target_len: u32, + render_targets: [*mut d3d11::ID3D11RenderTargetView; 8], + depth_target: Option<*mut d3d11::ID3D11DepthStencilView>, + graphics_pipeline: Option, + + // a bitmask that keeps track of what vertex buffer bindings have been "bound" into + // our vec + bound_bindings: u32, + // a bitmask that hold the required binding slots to be bound for the currently + // bound pipeline + required_bindings: Option, + // the highest binding number in currently bound pipeline + max_bindings: Option, + viewports: Vec, + vertex_buffers: Vec<*mut d3d11::ID3D11Buffer>, + vertex_offsets: Vec, + vertex_strides: Vec, + blend_factor: Option<[f32; 4]>, + // we can only support one face (rather, both faces must have the same value) + stencil_ref: Option, + stencil_read_mask: Option, + stencil_write_mask: Option, + current_blend: Option<*mut d3d11::ID3D11BlendState>, +} + +impl fmt::Debug for CommandBufferState { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("CommandBufferState") + } +} + +impl CommandBufferState { + fn new() -> Self { + CommandBufferState { + dirty_flag: DirtyStateFlag::empty(), + render_target_len: 0, + render_targets: [ptr::null_mut(); 8], + depth_target: None, + graphics_pipeline: None, + bound_bindings: 0, + required_bindings: None, + max_bindings: None, + viewports: Vec::new(), + vertex_buffers: Vec::new(), + vertex_offsets: Vec::new(), + vertex_strides: Vec::new(), + blend_factor: None, + stencil_ref: None, + stencil_read_mask: None, + stencil_write_mask: None, + current_blend: None, + } + } + + fn clear(&mut self) { + self.render_target_len = 0; + self.depth_target = None; + self.graphics_pipeline = None; + self.bound_bindings = 0; + self.required_bindings = None; + self.max_bindings = None; + self.viewports.clear(); + self.vertex_buffers.clear(); + self.vertex_offsets.clear(); + self.vertex_strides.clear(); + self.blend_factor = None; + self.stencil_ref = None; + self.stencil_read_mask = None; + self.stencil_write_mask = None; + self.current_blend = None; + } + + pub fn set_vertex_buffer( + &mut self, + index: usize, + offset: u32, + buffer: *mut d3d11::ID3D11Buffer, + ) { + self.bound_bindings |= 1 << index as u32; + + if index >= self.vertex_buffers.len() { + self.vertex_buffers.push(buffer); + self.vertex_offsets.push(offset); + } else { + self.vertex_buffers[index] = buffer; + self.vertex_offsets[index] = offset; + } + + self.dirty_flag.insert(DirtyStateFlag::VERTEX_BUFFERS); + } + + pub fn bind_vertex_buffers(&mut self, context: &ComPtr) { + if let Some(binding_count) = self.max_bindings { + if self.vertex_buffers.len() >= binding_count as usize + && self.vertex_strides.len() >= binding_count as usize + { + unsafe { + context.IASetVertexBuffers( + 0, + binding_count, + self.vertex_buffers.as_ptr(), + self.vertex_strides.as_ptr(), + self.vertex_offsets.as_ptr(), + ); + } + + self.dirty_flag.remove(DirtyStateFlag::VERTEX_BUFFERS); + } + } + } + + pub fn set_viewports(&mut self, viewports: &[d3d11::D3D11_VIEWPORT]) { + self.viewports.clear(); + self.viewports.extend(viewports); + + self.dirty_flag.insert(DirtyStateFlag::VIEWPORTS); + } + + pub fn bind_viewports(&mut self, context: &ComPtr) { + if let Some(ref pipeline) = self.graphics_pipeline { + if let Some(ref viewport) = pipeline.baked_states.viewport { + unsafe { + context.RSSetViewports(1, [conv::map_viewport(&viewport)].as_ptr()); + } + } else { + unsafe { + context.RSSetViewports(self.viewports.len() as u32, self.viewports.as_ptr()); + } + } + } else { + unsafe { + context.RSSetViewports(self.viewports.len() as u32, self.viewports.as_ptr()); + } + } + + self.dirty_flag.remove(DirtyStateFlag::VIEWPORTS); + } + + pub fn set_render_targets( + &mut self, + render_targets: &[*mut d3d11::ID3D11RenderTargetView], + depth_target: Option<*mut d3d11::ID3D11DepthStencilView>, + ) { + for (idx, &rt) in render_targets.iter().enumerate() { + self.render_targets[idx] = rt; + } + + self.render_target_len = render_targets.len() as u32; + self.depth_target = depth_target; + + self.dirty_flag.insert(DirtyStateFlag::RENDER_TARGETS); + } + + pub fn bind_render_targets(&mut self, context: &ComPtr) { + unsafe { + context.OMSetRenderTargets( + self.render_target_len, + self.render_targets.as_ptr(), + if let Some(dsv) = self.depth_target { + dsv + } else { + ptr::null_mut() + }, + ); + } + + self.dirty_flag.remove(DirtyStateFlag::RENDER_TARGETS); + } + + pub fn set_blend_factor(&mut self, factor: [f32; 4]) { + self.blend_factor = Some(factor); + + self.dirty_flag.insert(DirtyStateFlag::BLEND_STATE); + } + + pub fn bind_blend_state(&mut self, context: &ComPtr) { + if let Some(blend) = self.current_blend { + let blend_color = if let Some(ref pipeline) = self.graphics_pipeline { + pipeline + .baked_states + .blend_color + .or(self.blend_factor) + .unwrap_or([0f32; 4]) + } else { + self.blend_factor.unwrap_or([0f32; 4]) + }; + + // TODO: MSAA + unsafe { + context.OMSetBlendState(blend, &blend_color, !0); + } + + self.dirty_flag.remove(DirtyStateFlag::BLEND_STATE); + } + } + + pub fn set_graphics_pipeline(&mut self, pipeline: GraphicsPipeline) { + self.graphics_pipeline = Some(pipeline); + + self.dirty_flag.insert(DirtyStateFlag::GRAPHICS_PIPELINE); + } + + pub fn bind_graphics_pipeline(&mut self, context: &ComPtr) { + if let Some(ref pipeline) = self.graphics_pipeline { + self.vertex_strides.clear(); + self.vertex_strides.extend(&pipeline.strides); + + self.required_bindings = Some(pipeline.required_bindings); + self.max_bindings = Some(pipeline.max_vertex_bindings); + }; + + self.bind_vertex_buffers(context); + + if let Some(ref pipeline) = self.graphics_pipeline { + unsafe { + context.IASetPrimitiveTopology(pipeline.topology); + context.IASetInputLayout(pipeline.input_layout.as_raw()); + + context.VSSetShader(pipeline.vs.as_raw(), ptr::null_mut(), 0); + if let Some(ref ps) = pipeline.ps { + context.PSSetShader(ps.as_raw(), ptr::null_mut(), 0); + } + if let Some(ref gs) = pipeline.gs { + context.GSSetShader(gs.as_raw(), ptr::null_mut(), 0); + } + if let Some(ref hs) = pipeline.hs { + context.HSSetShader(hs.as_raw(), ptr::null_mut(), 0); + } + if let Some(ref ds) = pipeline.ds { + context.DSSetShader(ds.as_raw(), ptr::null_mut(), 0); + } + + context.RSSetState(pipeline.rasterizer_state.as_raw()); + if let Some(ref viewport) = pipeline.baked_states.viewport { + context.RSSetViewports(1, [conv::map_viewport(&viewport)].as_ptr()); + } + if let Some(ref scissor) = pipeline.baked_states.scissor { + context.RSSetScissorRects(1, [conv::map_rect(&scissor)].as_ptr()); + } + + if let Some((ref state, reference)) = pipeline.depth_stencil_state { + let stencil_ref = if let pso::State::Static(reference) = reference { + reference + } else { + self.stencil_ref.unwrap_or(0) + }; + + context.OMSetDepthStencilState(state.as_raw(), stencil_ref); + } + self.current_blend = Some(pipeline.blend_state.as_raw()); + } + }; + + self.bind_blend_state(context); + + self.dirty_flag.remove(DirtyStateFlag::GRAPHICS_PIPELINE); + } + + pub fn bind(&mut self, context: &ComPtr) { + if self.dirty_flag.contains(DirtyStateFlag::RENDER_TARGETS) { + self.bind_render_targets(context); + } + + if self.dirty_flag.contains(DirtyStateFlag::GRAPHICS_PIPELINE) { + self.bind_graphics_pipeline(context); + } + + if self.dirty_flag.contains(DirtyStateFlag::VERTEX_BUFFERS) { + self.bind_vertex_buffers(context); + } + + if self.dirty_flag.contains(DirtyStateFlag::VIEWPORTS) { + self.bind_viewports(context); + } + } +} + +pub struct CommandBuffer { + // TODO: better way of sharing + internal: internal::Internal, + context: ComPtr, + list: RefCell>>, + + // since coherent memory needs to be synchronized at submission, we need to gather up all + // coherent resources that are used in the command buffer and flush/invalidate them accordingly + // before executing. + flush_coherent_memory: Vec, + invalidate_coherent_memory: Vec, + + // holds information about the active render pass + render_pass_cache: Option, + + cache: CommandBufferState, + + one_time_submit: bool, +} + +impl fmt::Debug for CommandBuffer { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("CommandBuffer") + } +} + +unsafe impl Send for CommandBuffer {} +unsafe impl Sync for CommandBuffer {} + +impl CommandBuffer { + fn create_deferred(device: ComPtr, internal: internal::Internal) -> Self { + let mut context: *mut d3d11::ID3D11DeviceContext = ptr::null_mut(); + let hr = + unsafe { device.CreateDeferredContext(0, &mut context as *mut *mut _ as *mut *mut _) }; + assert_eq!(hr, winerror::S_OK); + + CommandBuffer { + internal, + context: unsafe { ComPtr::from_raw(context) }, + list: RefCell::new(None), + flush_coherent_memory: Vec::new(), + invalidate_coherent_memory: Vec::new(), + render_pass_cache: None, + cache: CommandBufferState::new(), + one_time_submit: false, + } + } + + fn as_raw_list(&self) -> ComPtr { + if self.one_time_submit { + self.list.replace(None).unwrap() + } else { + self.list.borrow().clone().unwrap() + } + } + + fn defer_coherent_flush(&mut self, buffer: &Buffer) { + if !self + .flush_coherent_memory + .iter() + .any(|m| m.buffer == buffer.internal.raw) + { + self.flush_coherent_memory.push(MemoryFlush { + host_memory: buffer.host_ptr, + sync_range: SyncRange::Whole, + buffer: buffer.internal.raw, + }); + } + } + + fn defer_coherent_invalidate(&mut self, buffer: &Buffer) { + if !self + .invalidate_coherent_memory + .iter() + .any(|m| m.buffer == buffer.internal.raw) + { + self.invalidate_coherent_memory.push(MemoryInvalidate { + working_buffer: Some(self.internal.working_buffer.clone()), + working_buffer_size: self.internal.working_buffer_size, + host_memory: buffer.host_ptr, + sync_range: buffer.bound_range.clone(), + buffer: buffer.internal.raw, + }); + } + } + + fn reset(&mut self) { + self.flush_coherent_memory.clear(); + self.invalidate_coherent_memory.clear(); + self.render_pass_cache = None; + self.cache.clear(); + } +} + +impl command::CommandBuffer for CommandBuffer { + unsafe fn begin( + &mut self, + flags: command::CommandBufferFlags, + _info: command::CommandBufferInheritanceInfo, + ) { + self.one_time_submit = flags.contains(command::CommandBufferFlags::ONE_TIME_SUBMIT); + self.reset(); + } + + unsafe fn finish(&mut self) { + let mut list = ptr::null_mut(); + let hr = self + .context + .FinishCommandList(FALSE, &mut list as *mut *mut _ as *mut *mut _); + assert_eq!(hr, winerror::S_OK); + + self.list.replace(Some(ComPtr::from_raw(list))); + } + + unsafe fn reset(&mut self, _release_resources: bool) { + self.reset(); + } + + unsafe fn begin_render_pass( + &mut self, + render_pass: &RenderPass, + framebuffer: &Framebuffer, + target_rect: pso::Rect, + clear_values: T, + _first_subpass: command::SubpassContents, + ) where + T: IntoIterator, + T::Item: Borrow, + { + use pass::AttachmentLoadOp as Alo; + + let mut clear_iter = clear_values.into_iter(); + let mut attachment_clears = Vec::new(); + + for (idx, attachment) in render_pass.attachments.iter().enumerate() { + //let attachment = render_pass.attachments[attachment_ref]; + let format = attachment.format.unwrap(); + + let subpass_id = render_pass + .subpasses + .iter() + .position(|sp| sp.is_using(idx)) + .map(|i| i as pass::SubpassId); + + if attachment.has_clears() { + let value = *clear_iter.next().unwrap().borrow(); + + match (attachment.ops.load, attachment.stencil_ops.load) { + (Alo::Clear, Alo::Clear) if format.is_depth() => { + attachment_clears.push(AttachmentClear { + subpass_id, + attachment_id: idx, + raw: command::AttachmentClear::DepthStencil { + depth: Some(value.depth_stencil.depth), + stencil: Some(value.depth_stencil.stencil), + }, + }); + } + (Alo::Clear, Alo::Clear) => { + attachment_clears.push(AttachmentClear { + subpass_id, + attachment_id: idx, + raw: command::AttachmentClear::Color { + index: idx, + value: value.color, + }, + }); + + attachment_clears.push(AttachmentClear { + subpass_id, + attachment_id: idx, + raw: command::AttachmentClear::DepthStencil { + depth: None, + stencil: Some(value.depth_stencil.stencil), + }, + }); + } + (Alo::Clear, _) if format.is_depth() => { + attachment_clears.push(AttachmentClear { + subpass_id, + attachment_id: idx, + raw: command::AttachmentClear::DepthStencil { + depth: Some(value.depth_stencil.depth), + stencil: None, + }, + }); + } + (Alo::Clear, _) => { + attachment_clears.push(AttachmentClear { + subpass_id, + attachment_id: idx, + raw: command::AttachmentClear::Color { + index: idx, + value: value.color, + }, + }); + } + (_, Alo::Clear) => { + attachment_clears.push(AttachmentClear { + subpass_id, + attachment_id: idx, + raw: command::AttachmentClear::DepthStencil { + depth: None, + stencil: Some(value.depth_stencil.stencil), + }, + }); + } + _ => {} + } + } + } + + self.render_pass_cache = Some(RenderPassCache { + render_pass: render_pass.clone(), + framebuffer: framebuffer.clone(), + attachment_clear_values: attachment_clears, + target_rect, + current_subpass: 0, + }); + + if let Some(ref mut current_render_pass) = self.render_pass_cache { + current_render_pass.start_subpass(&mut self.internal, &self.context, &mut self.cache); + } + } + + unsafe fn next_subpass(&mut self, _contents: command::SubpassContents) { + if let Some(ref mut current_render_pass) = self.render_pass_cache { + // TODO: resolve msaa + current_render_pass.next_subpass(); + current_render_pass.start_subpass(&mut self.internal, &self.context, &mut self.cache); + } + } + + unsafe fn end_render_pass(&mut self) { + self.context + .OMSetRenderTargets(8, [ptr::null_mut(); 8].as_ptr(), ptr::null_mut()); + + self.render_pass_cache = None; + } + + unsafe fn pipeline_barrier<'a, T>( + &mut self, + _stages: Range, + _dependencies: memory::Dependencies, + _barriers: T, + ) where + T: IntoIterator, + T::Item: Borrow>, + { + // TODO: should we track and assert on resource states? + // unimplemented!() + } + + unsafe fn clear_image( + &mut self, + image: &Image, + _: image::Layout, + value: command::ClearValue, + subresource_ranges: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + for range in subresource_ranges { + let range = range.borrow(); + + // TODO: clear Int/Uint depending on format + if range.aspects.contains(format::Aspects::COLOR) { + for layer in range.layers.clone() { + for level in range.levels.clone() { + self.context.ClearRenderTargetView( + image.get_rtv(level, layer).unwrap().as_raw(), + &value.color.float32, + ); + } + } + } + + let mut depth_stencil_flags = 0; + if range.aspects.contains(format::Aspects::DEPTH) { + depth_stencil_flags |= d3d11::D3D11_CLEAR_DEPTH; + } + + if range.aspects.contains(format::Aspects::STENCIL) { + depth_stencil_flags |= d3d11::D3D11_CLEAR_STENCIL; + } + + if depth_stencil_flags != 0 { + for layer in range.layers.clone() { + for level in range.levels.clone() { + self.context.ClearDepthStencilView( + image.get_dsv(level, layer).unwrap().as_raw(), + depth_stencil_flags, + value.depth_stencil.depth, + value.depth_stencil.stencil as _, + ); + } + } + } + } + } + + unsafe fn clear_attachments(&mut self, clears: T, rects: U) + where + T: IntoIterator, + T::Item: Borrow, + U: IntoIterator, + U::Item: Borrow, + { + if let Some(ref pass) = self.render_pass_cache { + self.cache.dirty_flag.insert( + DirtyStateFlag::GRAPHICS_PIPELINE + | DirtyStateFlag::VIEWPORTS + | DirtyStateFlag::RENDER_TARGETS, + ); + self.internal + .clear_attachments(&self.context, clears, rects, pass); + self.cache.bind(&self.context); + } else { + panic!("`clear_attachments` can only be called inside a renderpass") + } + } + + unsafe fn resolve_image( + &mut self, + _src: &Image, + _src_layout: image::Layout, + _dst: &Image, + _dst_layout: image::Layout, + _regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + unimplemented!() + } + + unsafe fn blit_image( + &mut self, + src: &Image, + _src_layout: image::Layout, + dst: &Image, + _dst_layout: image::Layout, + filter: image::Filter, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + self.cache + .dirty_flag + .insert(DirtyStateFlag::GRAPHICS_PIPELINE); + + self.internal + .blit_2d_image(&self.context, src, dst, filter, regions); + + self.cache.bind(&self.context); + } + + unsafe fn bind_index_buffer(&mut self, ibv: buffer::IndexBufferView) { + self.context.IASetIndexBuffer( + ibv.buffer.internal.raw, + conv::map_index_type(ibv.index_type), + ibv.range.offset as u32, + ); + } + + unsafe fn bind_vertex_buffers(&mut self, first_binding: pso::BufferIndex, buffers: I) + where + I: IntoIterator, + T: Borrow, + { + for (i, (buf, sub)) in buffers.into_iter().enumerate() { + let idx = i + first_binding as usize; + let buf = buf.borrow(); + + if buf.properties.contains(memory::Properties::COHERENT) { + self.defer_coherent_flush(buf); + } + + self.cache + .set_vertex_buffer(idx, sub.offset as u32, buf.internal.raw); + } + + self.cache.bind_vertex_buffers(&self.context); + } + + unsafe fn set_viewports(&mut self, _first_viewport: u32, viewports: T) + where + T: IntoIterator, + T::Item: Borrow, + { + let viewports = viewports + .into_iter() + .map(|v| { + let v = v.borrow(); + conv::map_viewport(v) + }) + .collect::>(); + + // TODO: DX only lets us set all VPs at once, so cache in slice? + self.cache.set_viewports(&viewports); + self.cache.bind_viewports(&self.context); + } + + unsafe fn set_scissors(&mut self, _first_scissor: u32, scissors: T) + where + T: IntoIterator, + T::Item: Borrow, + { + let scissors = scissors + .into_iter() + .map(|s| { + let s = s.borrow(); + conv::map_rect(s) + }) + .collect::>(); + + // TODO: same as for viewports + self.context + .RSSetScissorRects(scissors.len() as _, scissors.as_ptr()); + } + + unsafe fn set_blend_constants(&mut self, color: pso::ColorValue) { + self.cache.set_blend_factor(color); + self.cache.bind_blend_state(&self.context); + } + + unsafe fn set_stencil_reference(&mut self, _faces: pso::Face, value: pso::StencilValue) { + self.cache.stencil_ref = Some(value); + } + + unsafe fn set_stencil_read_mask(&mut self, _faces: pso::Face, value: pso::StencilValue) { + self.cache.stencil_read_mask = Some(value); + } + + unsafe fn set_stencil_write_mask(&mut self, _faces: pso::Face, value: pso::StencilValue) { + self.cache.stencil_write_mask = Some(value); + } + + unsafe fn set_depth_bounds(&mut self, _bounds: Range) { + unimplemented!() + } + + unsafe fn set_line_width(&mut self, width: f32) { + validate_line_width(width); + } + + unsafe fn set_depth_bias(&mut self, _depth_bias: pso::DepthBias) { + // TODO: + // unimplemented!() + } + + unsafe fn bind_graphics_pipeline(&mut self, pipeline: &GraphicsPipeline) { + self.cache.set_graphics_pipeline(pipeline.clone()); + self.cache.bind_graphics_pipeline(&self.context); + } + + unsafe fn bind_graphics_descriptor_sets<'a, I, J>( + &mut self, + layout: &PipelineLayout, + first_set: usize, + sets: I, + _offsets: J, + ) where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow, + { + let _scope = debug_scope!(&self.context, "BindGraphicsDescriptorSets"); + + // TODO: find a better solution to invalidating old bindings.. + self.context.CSSetUnorderedAccessViews( + 0, + 16, + [ptr::null_mut(); 16].as_ptr(), + ptr::null_mut(), + ); + + //let offsets: Vec = offsets.into_iter().map(|o| *o.borrow()).collect(); + + for (set, info) in sets.into_iter().zip(&layout.sets[first_set ..]) { + let set = set.borrow(); + + { + let coherent_buffers = set.coherent_buffers.lock(); + for sync in coherent_buffers.flush_coherent_buffers.borrow().iter() { + // TODO: merge sync range if a flush already exists + if !self + .flush_coherent_memory + .iter() + .any(|m| m.buffer == sync.device_buffer) + { + self.flush_coherent_memory.push(MemoryFlush { + host_memory: sync.host_ptr, + sync_range: sync.range.clone(), + buffer: sync.device_buffer, + }); + } + } + + for sync in coherent_buffers.invalidate_coherent_buffers.borrow().iter() { + if !self + .invalidate_coherent_memory + .iter() + .any(|m| m.buffer == sync.device_buffer) + { + self.invalidate_coherent_memory.push(MemoryInvalidate { + working_buffer: Some(self.internal.working_buffer.clone()), + working_buffer_size: self.internal.working_buffer_size, + host_memory: sync.host_ptr, + sync_range: sync.range.clone(), + buffer: sync.device_buffer, + }); + } + } + } + + // TODO: offsets + + if let Some(rd) = info.registers.vs.c.as_some() { + self.context.VSSetConstantBuffers( + rd.res_index as u32, + rd.count as u32, + set.handles.offset(rd.pool_offset as isize) as *const *mut _ as *const *mut _, + ); + } + if let Some(rd) = info.registers.vs.t.as_some() { + self.context.VSSetShaderResources( + rd.res_index as u32, + rd.count as u32, + set.handles.offset(rd.pool_offset as isize) as *const *mut _ as *const *mut _, + ); + } + if let Some(rd) = info.registers.vs.s.as_some() { + self.context.VSSetSamplers( + rd.res_index as u32, + rd.count as u32, + set.handles.offset(rd.pool_offset as isize) as *const *mut _ as *const *mut _, + ); + } + + if let Some(rd) = info.registers.ps.c.as_some() { + self.context.PSSetConstantBuffers( + rd.res_index as u32, + rd.count as u32, + set.handles.offset(rd.pool_offset as isize) as *const *mut _ as *const *mut _, + ); + } + if let Some(rd) = info.registers.ps.t.as_some() { + self.context.PSSetShaderResources( + rd.res_index as u32, + rd.count as u32, + set.handles.offset(rd.pool_offset as isize) as *const *mut _ as *const *mut _, + ); + } + if let Some(rd) = info.registers.ps.s.as_some() { + self.context.PSSetSamplers( + rd.res_index as u32, + rd.count as u32, + set.handles.offset(rd.pool_offset as isize) as *const *mut _ as *const *mut _, + ); + } + } + } + + unsafe fn bind_compute_pipeline(&mut self, pipeline: &ComputePipeline) { + self.context + .CSSetShader(pipeline.cs.as_raw(), ptr::null_mut(), 0); + } + + unsafe fn bind_compute_descriptor_sets( + &mut self, + layout: &PipelineLayout, + first_set: usize, + sets: I, + _offsets: J, + ) where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow, + { + let _scope = debug_scope!(&self.context, "BindComputeDescriptorSets"); + + self.context.CSSetUnorderedAccessViews( + 0, + 16, + [ptr::null_mut(); 16].as_ptr(), + ptr::null_mut(), + ); + for (set, info) in sets.into_iter().zip(&layout.sets[first_set ..]) { + let set = set.borrow(); + + { + let coherent_buffers = set.coherent_buffers.lock(); + for sync in coherent_buffers.flush_coherent_buffers.borrow().iter() { + if !self + .flush_coherent_memory + .iter() + .any(|m| m.buffer == sync.device_buffer) + { + self.flush_coherent_memory.push(MemoryFlush { + host_memory: sync.host_ptr, + sync_range: sync.range.clone(), + buffer: sync.device_buffer, + }); + } + } + + for sync in coherent_buffers.invalidate_coherent_buffers.borrow().iter() { + if !self + .invalidate_coherent_memory + .iter() + .any(|m| m.buffer == sync.device_buffer) + { + self.invalidate_coherent_memory.push(MemoryInvalidate { + working_buffer: Some(self.internal.working_buffer.clone()), + working_buffer_size: self.internal.working_buffer_size, + host_memory: sync.host_ptr, + sync_range: sync.range.clone(), + buffer: sync.device_buffer, + }); + } + } + } + + // TODO: offsets + + if let Some(rd) = info.registers.cs.c.as_some() { + self.context.CSSetConstantBuffers( + rd.res_index as u32, + rd.count as u32, + set.handles.offset(rd.pool_offset as isize) as *const *mut _ as *const *mut _, + ); + } + if let Some(rd) = info.registers.cs.t.as_some() { + self.context.CSSetShaderResources( + rd.res_index as u32, + rd.count as u32, + set.handles.offset(rd.pool_offset as isize) as *const *mut _ as *const *mut _, + ); + } + if let Some(rd) = info.registers.cs.u.as_some() { + self.context.CSSetUnorderedAccessViews( + rd.res_index as u32, + rd.count as u32, + set.handles.offset(rd.pool_offset as isize) as *const *mut _ as *const *mut _, + ptr::null_mut(), + ); + } + if let Some(rd) = info.registers.cs.s.as_some() { + self.context.CSSetSamplers( + rd.res_index as u32, + rd.count as u32, + set.handles.offset(rd.pool_offset as isize) as *const *mut _ as *const *mut _, + ); + } + } + } + + unsafe fn dispatch(&mut self, count: WorkGroupCount) { + self.context.Dispatch(count[0], count[1], count[2]); + } + + unsafe fn dispatch_indirect(&mut self, _buffer: &Buffer, _offset: buffer::Offset) { + unimplemented!() + } + + unsafe fn fill_buffer(&mut self, _buffer: &Buffer, _sub: buffer::SubRange, _data: u32) { + unimplemented!() + } + + unsafe fn update_buffer(&mut self, _buffer: &Buffer, _offset: buffer::Offset, _data: &[u8]) { + unimplemented!() + } + + unsafe fn copy_buffer(&mut self, src: &Buffer, dst: &Buffer, regions: T) + where + T: IntoIterator, + T::Item: Borrow, + { + if src.properties.contains(memory::Properties::COHERENT) { + self.defer_coherent_flush(src); + } + + for region in regions.into_iter() { + let info = region.borrow(); + let dst_box = d3d11::D3D11_BOX { + left: info.src as _, + top: 0, + front: 0, + right: (info.src + info.size) as _, + bottom: 1, + back: 1, + }; + + self.context.CopySubresourceRegion( + dst.internal.raw as _, + 0, + info.dst as _, + 0, + 0, + src.internal.raw as _, + 0, + &dst_box, + ); + + if let Some(disjoint_cb) = dst.internal.disjoint_cb { + self.context.CopySubresourceRegion( + disjoint_cb as _, + 0, + info.dst as _, + 0, + 0, + src.internal.raw as _, + 0, + &dst_box, + ); + } + } + } + + unsafe fn copy_image( + &mut self, + src: &Image, + _: image::Layout, + dst: &Image, + _: image::Layout, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + self.internal + .copy_image_2d(&self.context, src, dst, regions); + } + + unsafe fn copy_buffer_to_image( + &mut self, + buffer: &Buffer, + image: &Image, + _: image::Layout, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + if buffer.properties.contains(memory::Properties::COHERENT) { + self.defer_coherent_flush(buffer); + } + + self.internal + .copy_buffer_to_image_2d(&self.context, buffer, image, regions); + } + + unsafe fn copy_image_to_buffer( + &mut self, + image: &Image, + _: image::Layout, + buffer: &Buffer, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + if buffer.properties.contains(memory::Properties::COHERENT) { + self.defer_coherent_invalidate(buffer); + } + + self.internal + .copy_image_2d_to_buffer(&self.context, image, buffer, regions); + } + + unsafe fn draw(&mut self, vertices: Range, instances: Range) { + self.context.DrawInstanced( + vertices.end - vertices.start, + instances.end - instances.start, + vertices.start, + instances.start, + ); + } + + unsafe fn draw_indexed( + &mut self, + indices: Range, + base_vertex: VertexOffset, + instances: Range, + ) { + self.context.DrawIndexedInstanced( + indices.end - indices.start, + instances.end - instances.start, + indices.start, + base_vertex, + instances.start, + ); + } + + unsafe fn draw_indirect( + &mut self, + _buffer: &Buffer, + _offset: buffer::Offset, + _draw_count: DrawCount, + _stride: u32, + ) { + unimplemented!() + } + + unsafe fn draw_indexed_indirect( + &mut self, + _buffer: &Buffer, + _offset: buffer::Offset, + _draw_count: DrawCount, + _stride: u32, + ) { + unimplemented!() + } + + unsafe fn set_event(&mut self, _: &(), _: pso::PipelineStage) { + unimplemented!() + } + + unsafe fn reset_event(&mut self, _: &(), _: pso::PipelineStage) { + unimplemented!() + } + + unsafe fn wait_events<'a, I, J>(&mut self, _: I, _: Range, _: J) + where + I: IntoIterator, + I::Item: Borrow<()>, + J: IntoIterator, + J::Item: Borrow>, + { + unimplemented!() + } + + unsafe fn begin_query(&mut self, _query: query::Query, _flags: query::ControlFlags) { + unimplemented!() + } + + unsafe fn end_query(&mut self, _query: query::Query) { + unimplemented!() + } + + unsafe fn reset_query_pool(&mut self, _pool: &QueryPool, _queries: Range) { + unimplemented!() + } + + unsafe fn copy_query_pool_results( + &mut self, + _pool: &QueryPool, + _queries: Range, + _buffer: &Buffer, + _offset: buffer::Offset, + _stride: buffer::Offset, + _flags: query::ResultFlags, + ) { + unimplemented!() + } + + unsafe fn write_timestamp(&mut self, _: pso::PipelineStage, _query: query::Query) { + unimplemented!() + } + + unsafe fn push_graphics_constants( + &mut self, + _layout: &PipelineLayout, + _stages: pso::ShaderStageFlags, + _offset: u32, + _constants: &[u32], + ) { + // unimplemented!() + } + + unsafe fn push_compute_constants( + &mut self, + _layout: &PipelineLayout, + _offset: u32, + _constants: &[u32], + ) { + unimplemented!() + } + + unsafe fn execute_commands<'a, T, I>(&mut self, _buffers: I) + where + T: 'a + Borrow, + I: IntoIterator, + { + unimplemented!() + } + + unsafe fn insert_debug_marker(&mut self, _name: &str, _color: u32) { + //TODO + } + unsafe fn begin_debug_marker(&mut self, _name: &str, _color: u32) { + //TODO + } + unsafe fn end_debug_marker(&mut self) { + //TODO + } +} + +#[derive(Clone, Debug)] +enum SyncRange { + Whole, + Partial(Range), +} + +#[derive(Debug)] +pub struct MemoryFlush { + host_memory: *mut u8, + sync_range: SyncRange, + buffer: *mut d3d11::ID3D11Buffer, +} + +pub struct MemoryInvalidate { + working_buffer: Option>, + working_buffer_size: u64, + host_memory: *mut u8, + sync_range: Range, + buffer: *mut d3d11::ID3D11Buffer, +} + +impl fmt::Debug for MemoryInvalidate { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("MemoryInvalidate") + } +} + +fn intersection(a: &Range, b: &Range) -> Option> { + let min = if a.start < b.start { a } else { b }; + let max = if min == a { b } else { a }; + + if min.end < max.start { + None + } else { + let end = if min.end < max.end { min.end } else { max.end }; + Some(max.start .. end) + } +} + +impl MemoryFlush { + fn do_flush(&self, context: &ComPtr) { + let src = self.host_memory; + + debug_marker!(context, "Flush({:?})", self.sync_range); + let region = match self.sync_range { + SyncRange::Partial(ref range) if range.start < range.end => Some(d3d11::D3D11_BOX { + left: range.start as u32, + top: 0, + front: 0, + right: range.end as u32, + bottom: 1, + back: 1, + }), + _ => None, + }; + + unsafe { + context.UpdateSubresource( + self.buffer as _, + 0, + if let Some(region) = region { + ®ion + } else { + ptr::null_mut() + }, + src as _, + 0, + 0, + ); + } + } +} + +impl MemoryInvalidate { + fn download( + &self, + context: &ComPtr, + buffer: *mut d3d11::ID3D11Buffer, + range: Range, + ) { + unsafe { + context.CopySubresourceRegion( + self.working_buffer.clone().unwrap().as_raw() as _, + 0, + 0, + 0, + 0, + buffer as _, + 0, + &d3d11::D3D11_BOX { + left: range.start as _, + top: 0, + front: 0, + right: range.end as _, + bottom: 1, + back: 1, + }, + ); + + // copy over to our vec + let dst = self.host_memory.offset(range.start as isize); + let src = self.map(&context); + ptr::copy(src, dst, (range.end - range.start) as usize); + self.unmap(&context); + } + } + + fn do_invalidate(&self, context: &ComPtr) { + let stride = self.working_buffer_size; + let range = &self.sync_range; + let len = range.end - range.start; + let chunks = len / stride; + let remainder = len % stride; + + // we split up the copies into chunks the size of our working buffer + for i in 0 .. chunks { + let offset = range.start + i * stride; + let range = offset .. (offset + stride); + + self.download(context, self.buffer, range); + } + + if remainder != 0 { + self.download(context, self.buffer, (chunks * stride) .. range.end); + } + } + + fn map(&self, context: &ComPtr) -> *mut u8 { + assert_eq!(self.working_buffer.is_some(), true); + + unsafe { + let mut map = mem::zeroed(); + let hr = context.Map( + self.working_buffer.clone().unwrap().as_raw() as _, + 0, + d3d11::D3D11_MAP_READ, + 0, + &mut map, + ); + + assert_eq!(hr, winerror::S_OK); + + map.pData as _ + } + } + + fn unmap(&self, context: &ComPtr) { + unsafe { + context.Unmap(self.working_buffer.clone().unwrap().as_raw() as _, 0); + } + } +} + +// Since we dont have any heaps to work with directly, everytime we bind a +// buffer/image to memory we allocate a dx11 resource and assign it a range. +// +// `HOST_VISIBLE` memory gets a `Vec` which covers the entire memory +// range. This forces us to only expose non-coherent memory, as this +// abstraction acts as a "cache" since the "staging buffer" vec is disjoint +// from all the dx11 resources we store in the struct. +pub struct Memory { + properties: memory::Properties, + size: u64, + + mapped_ptr: *mut u8, + + // staging buffer covering the whole memory region, if it's HOST_VISIBLE + host_visible: Option>>, + + // list of all buffers bound to this memory + local_buffers: RefCell, InternalBuffer)>>, + + // list of all images bound to this memory + _local_images: RefCell, InternalImage)>>, +} + +impl fmt::Debug for Memory { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("Memory") + } +} + +unsafe impl Send for Memory {} +unsafe impl Sync for Memory {} + +impl Memory { + pub fn resolve(&self, segment: &memory::Segment) -> Range { + segment.offset .. segment.size.map_or(self.size, |s| segment.offset + s) + } + + pub fn bind_buffer(&self, range: Range, buffer: InternalBuffer) { + self.local_buffers.borrow_mut().push((range, buffer)); + } + + pub fn flush(&self, context: &ComPtr, range: Range) { + use buffer::Usage; + + for &(ref buffer_range, ref buffer) in self.local_buffers.borrow().iter() { + if let Some(range) = intersection(&range, &buffer_range) { + let ptr = self.mapped_ptr; + + // we need to handle 3 cases for updating buffers: + // + // 1. if our buffer was created as a `UNIFORM` buffer *and* other usage flags, we + // also have a disjoint buffer which only has `D3D11_BIND_CONSTANT_BUFFER` due + // to DX11 limitation. we then need to update both the original buffer and the + // disjoint one with the *whole* range (TODO: allow for partial updates) + // + // 2. if our buffer was created with *only* `UNIFORM` usage we need to upload + // the whole range (TODO: allow for partial updates) + // + // 3. the general case, without any `UNIFORM` usage has no restrictions on + // partial updates, so we upload the specified range + // + if buffer.usage.contains(Usage::UNIFORM) && buffer.usage != Usage::UNIFORM { + MemoryFlush { + host_memory: unsafe { ptr.offset(buffer_range.start as _) }, + sync_range: SyncRange::Whole, + buffer: buffer.raw, + } + .do_flush(&context); + + if let Some(disjoint) = buffer.disjoint_cb { + MemoryFlush { + host_memory: unsafe { ptr.offset(buffer_range.start as _) }, + sync_range: SyncRange::Whole, + buffer: disjoint, + } + .do_flush(&context); + } + } else if buffer.usage == Usage::UNIFORM { + MemoryFlush { + host_memory: unsafe { ptr.offset(buffer_range.start as _) }, + sync_range: SyncRange::Whole, + buffer: buffer.raw, + } + .do_flush(&context); + } else { + let local_start = range.start - buffer_range.start; + let local_len = range.end - range.start; + + MemoryFlush { + host_memory: unsafe { ptr.offset(range.start as _) }, + sync_range: SyncRange::Partial(local_start .. (local_start + local_len)), + buffer: buffer.raw, + } + .do_flush(&context); + } + } + } + } + + pub fn invalidate( + &self, + context: &ComPtr, + range: Range, + working_buffer: ComPtr, + working_buffer_size: u64, + ) { + for &(ref buffer_range, ref buffer) in self.local_buffers.borrow().iter() { + if let Some(range) = intersection(&range, &buffer_range) { + MemoryInvalidate { + working_buffer: Some(working_buffer.clone()), + working_buffer_size, + host_memory: self.mapped_ptr, + sync_range: range.clone(), + buffer: buffer.raw, + } + .do_invalidate(&context); + } + } + } +} + +#[derive(Debug)] +pub struct CommandPool { + device: ComPtr, + internal: internal::Internal, +} + +unsafe impl Send for CommandPool {} +unsafe impl Sync for CommandPool {} + +impl hal::pool::CommandPool for CommandPool { + unsafe fn reset(&mut self, _release_resources: bool) { + //unimplemented!() + } + + unsafe fn allocate_one(&mut self, _level: command::Level) -> CommandBuffer { + CommandBuffer::create_deferred(self.device.clone(), self.internal.clone()) + } + + unsafe fn free(&mut self, _cbufs: I) + where + I: IntoIterator, + { + // TODO: + // unimplemented!() + } +} + +/// Similarily to dx12 backend, we can handle either precompiled dxbc or spirv +pub enum ShaderModule { + Dxbc(Vec), + Spirv(Vec), +} + +// TODO: temporary +impl fmt::Debug for ShaderModule { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + write!(f, "{}", "ShaderModule { ... }") + } +} + +unsafe impl Send for ShaderModule {} +unsafe impl Sync for ShaderModule {} + +#[derive(Clone, Debug)] +pub struct SubpassDesc { + pub color_attachments: Vec, + pub depth_stencil_attachment: Option, + pub input_attachments: Vec, + pub resolve_attachments: Vec, +} + +impl SubpassDesc { + pub(crate) fn is_using(&self, at_id: pass::AttachmentId) -> bool { + self.color_attachments + .iter() + .chain(self.depth_stencil_attachment.iter()) + .chain(self.input_attachments.iter()) + .chain(self.resolve_attachments.iter()) + .any(|&(id, _)| id == at_id) + } +} + +#[derive(Clone, Debug)] +pub struct RenderPass { + pub attachments: Vec, + pub subpasses: Vec, +} + +#[derive(Clone, Debug)] +pub struct Framebuffer { + attachments: Vec, + layers: image::Layer, +} + +#[derive(Clone, Debug)] +pub struct InternalBuffer { + raw: *mut d3d11::ID3D11Buffer, + // TODO: need to sync between `raw` and `disjoint_cb`, same way as we do with + // `MemoryFlush/Invalidate` + disjoint_cb: Option<*mut d3d11::ID3D11Buffer>, // if unbound this buffer might be null. + srv: Option<*mut d3d11::ID3D11ShaderResourceView>, + uav: Option<*mut d3d11::ID3D11UnorderedAccessView>, + usage: buffer::Usage, +} + +pub struct Buffer { + internal: InternalBuffer, + properties: memory::Properties, // empty if unbound + host_ptr: *mut u8, // null if unbound + bound_range: Range, // 0 if unbound + requirements: memory::Requirements, + bind: d3d11::D3D11_BIND_FLAG, +} + +impl fmt::Debug for Buffer { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("Buffer") + } +} + +unsafe impl Send for Buffer {} +unsafe impl Sync for Buffer {} + +#[derive(Debug)] +pub struct BufferView; + +pub struct Image { + kind: image::Kind, + usage: image::Usage, + format: format::Format, + view_caps: image::ViewCapabilities, + decomposed_format: conv::DecomposedDxgiFormat, + mip_levels: image::Level, + internal: InternalImage, + bind: d3d11::D3D11_BIND_FLAG, + requirements: memory::Requirements, +} + +impl fmt::Debug for Image { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("Image") + } +} + +pub struct InternalImage { + raw: *mut d3d11::ID3D11Resource, + copy_srv: Option>, + srv: Option>, + + /// Contains UAVs for all subresources + unordered_access_views: Vec>, + + /// Contains DSVs for all subresources + depth_stencil_views: Vec>, + + /// Contains RTVs for all subresources + render_target_views: Vec>, +} + +impl fmt::Debug for InternalImage { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("InternalImage") + } +} + +unsafe impl Send for Image {} +unsafe impl Sync for Image {} + +impl Image { + pub fn calc_subresource(&self, mip_level: UINT, layer: UINT) -> UINT { + mip_level + (layer * self.mip_levels as UINT) + } + + pub fn get_uav( + &self, + mip_level: image::Level, + _layer: image::Layer, + ) -> Option<&ComPtr> { + self.internal + .unordered_access_views + .get(self.calc_subresource(mip_level as _, 0) as usize) + } + + pub fn get_dsv( + &self, + mip_level: image::Level, + layer: image::Layer, + ) -> Option<&ComPtr> { + self.internal + .depth_stencil_views + .get(self.calc_subresource(mip_level as _, layer as _) as usize) + } + + pub fn get_rtv( + &self, + mip_level: image::Level, + layer: image::Layer, + ) -> Option<&ComPtr> { + self.internal + .render_target_views + .get(self.calc_subresource(mip_level as _, layer as _) as usize) + } +} + +#[derive(Clone)] +pub struct ImageView { + format: format::Format, + rtv_handle: Option>, + srv_handle: Option>, + dsv_handle: Option>, + uav_handle: Option>, +} + +impl fmt::Debug for ImageView { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("ImageView") + } +} + +unsafe impl Send for ImageView {} +unsafe impl Sync for ImageView {} + +pub struct Sampler { + sampler_handle: ComPtr, +} + +impl fmt::Debug for Sampler { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("Sampler") + } +} + +unsafe impl Send for Sampler {} +unsafe impl Sync for Sampler {} + +pub struct ComputePipeline { + cs: ComPtr, +} + +impl fmt::Debug for ComputePipeline { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("ComputePipeline") + } +} + +unsafe impl Send for ComputePipeline {} +unsafe impl Sync for ComputePipeline {} + +/// NOTE: some objects are hashed internally and reused when created with the +/// same params[0], need to investigate which interfaces this applies +/// to. +/// +/// [0]: https://msdn.microsoft.com/en-us/library/windows/desktop/ff476500(v=vs.85).aspx +#[derive(Clone)] +pub struct GraphicsPipeline { + vs: ComPtr, + gs: Option>, + hs: Option>, + ds: Option>, + ps: Option>, + topology: d3d11::D3D11_PRIMITIVE_TOPOLOGY, + input_layout: ComPtr, + rasterizer_state: ComPtr, + blend_state: ComPtr, + depth_stencil_state: Option<( + ComPtr, + pso::State, + )>, + baked_states: pso::BakedStates, + required_bindings: u32, + max_vertex_bindings: u32, + strides: Vec, +} + +impl fmt::Debug for GraphicsPipeline { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("GraphicsPipeline") + } +} + +unsafe impl Send for GraphicsPipeline {} +unsafe impl Sync for GraphicsPipeline {} + +type ResourceIndex = u8; +type DescriptorIndex = u16; + +#[derive(Clone, Debug, Default)] +struct RegisterData { + // CBV + c: T, + // SRV + t: T, + // UAV + u: T, + // Sampler + s: T, +} + +impl RegisterData { + fn map U>(&self, fun: F) -> RegisterData { + RegisterData { + c: fun(&self.c), + t: fun(&self.t), + u: fun(&self.u), + s: fun(&self.s), + } + } +} + +impl RegisterData { + fn add_content_many(&mut self, content: DescriptorContent, many: DescriptorIndex) { + if content.contains(DescriptorContent::CBV) { + self.c += many; + } + if content.contains(DescriptorContent::SRV) { + self.t += many; + } + if content.contains(DescriptorContent::UAV) { + self.u += many; + } + if content.contains(DescriptorContent::SAMPLER) { + self.s += many; + } + } + + fn add_content(&mut self, content: DescriptorContent) { + self.add_content_many(content, 1) + } + + fn sum(&self) -> DescriptorIndex { + self.c + self.t + self.u + self.s + } +} + +#[derive(Clone, Debug, Default)] +struct MultiStageData { + vs: T, + ps: T, + cs: T, +} + +impl MultiStageData { + fn select(self, stage: pso::Stage) -> T { + match stage { + pso::Stage::Vertex => self.vs, + pso::Stage::Fragment => self.ps, + pso::Stage::Compute => self.cs, + _ => panic!("Unsupported stage {:?}", stage), + } + } +} + +impl MultiStageData> { + fn map_register U>(&self, fun: F) -> MultiStageData> { + MultiStageData { + vs: self.vs.map(&fun), + ps: self.ps.map(&fun), + cs: self.cs.map(&fun), + } + } + + fn map_other) -> U>(&self, fun: F) -> MultiStageData { + MultiStageData { + vs: fun(&self.vs), + ps: fun(&self.ps), + cs: fun(&self.cs), + } + } +} + +impl MultiStageData> { + fn add_content(&mut self, content: DescriptorContent, stages: pso::ShaderStageFlags) { + if stages.contains(pso::ShaderStageFlags::VERTEX) { + self.vs.add_content(content); + } + if stages.contains(pso::ShaderStageFlags::FRAGMENT) { + self.ps.add_content(content); + } + if stages.contains(pso::ShaderStageFlags::COMPUTE) { + self.cs.add_content(content); + } + } + + fn sum(&self) -> DescriptorIndex { + self.vs.sum() + self.ps.sum() + self.cs.sum() + } +} + +#[derive(Clone, Debug, Default)] +struct RegisterPoolMapping { + offset: DescriptorIndex, + count: ResourceIndex, +} + +#[derive(Clone, Debug, Default)] +struct RegisterInfo { + res_index: ResourceIndex, + pool_offset: DescriptorIndex, + count: ResourceIndex, +} + +impl RegisterInfo { + fn as_some(&self) -> Option<&Self> { + if self.count == 0 { + None + } else { + Some(self) + } + } +} + +#[derive(Clone, Debug, Default)] +struct RegisterAccumulator { + res_index: ResourceIndex, +} + +impl RegisterAccumulator { + fn to_mapping(&self, cur_offset: &mut DescriptorIndex) -> RegisterPoolMapping { + let offset = *cur_offset; + *cur_offset += self.res_index as DescriptorIndex; + + RegisterPoolMapping { + offset, + count: self.res_index, + } + } + + fn advance(&mut self, mapping: &RegisterPoolMapping) -> RegisterInfo { + let res_index = self.res_index; + self.res_index += mapping.count; + RegisterInfo { + res_index, + pool_offset: mapping.offset, + count: mapping.count, + } + } +} + +impl RegisterData { + fn to_mapping(&self, pool_offset: &mut DescriptorIndex) -> RegisterData { + RegisterData { + c: self.c.to_mapping(pool_offset), + t: self.t.to_mapping(pool_offset), + u: self.u.to_mapping(pool_offset), + s: self.s.to_mapping(pool_offset), + } + } + + fn advance( + &mut self, + mapping: &RegisterData, + ) -> RegisterData { + RegisterData { + c: self.c.advance(&mapping.c), + t: self.t.advance(&mapping.t), + u: self.u.advance(&mapping.u), + s: self.s.advance(&mapping.s), + } + } +} + +impl MultiStageData> { + fn to_mapping(&self) -> MultiStageData> { + let mut pool_offset = 0; + MultiStageData { + vs: self.vs.to_mapping(&mut pool_offset), + ps: self.ps.to_mapping(&mut pool_offset), + cs: self.cs.to_mapping(&mut pool_offset), + } + } + + fn advance( + &mut self, + mapping: &MultiStageData>, + ) -> MultiStageData> { + MultiStageData { + vs: self.vs.advance(&mapping.vs), + ps: self.ps.advance(&mapping.ps), + cs: self.cs.advance(&mapping.cs), + } + } +} + +#[derive(Clone, Debug)] +struct DescriptorSetInfo { + bindings: Arc>, + registers: MultiStageData>, +} + +impl DescriptorSetInfo { + fn find_register( + &self, + stage: pso::Stage, + binding_index: pso::DescriptorBinding, + ) -> (DescriptorContent, RegisterData) { + let mut res_offsets = self + .registers + .map_register(|info| info.res_index as DescriptorIndex) + .select(stage); + for binding in self.bindings.iter() { + let content = DescriptorContent::from(binding.ty); + if binding.binding == binding_index { + return (content, res_offsets.map(|offset| *offset as ResourceIndex)); + } + res_offsets.add_content(content); + } + panic!("Unable to find binding {:?}", binding_index); + } +} + +/// The pipeline layout holds optimized (less api calls) ranges of objects for all descriptor sets +/// belonging to the pipeline object. +#[derive(Debug)] +pub struct PipelineLayout { + sets: Vec, +} + +/// The descriptor set layout contains mappings from a given binding to the offset in our +/// descriptor pool storage and what type of descriptor it is (combined image sampler takes up two +/// handles). +#[derive(Debug)] +pub struct DescriptorSetLayout { + bindings: Arc>, + pool_mapping: MultiStageData>, +} + +#[derive(Debug)] +struct CoherentBufferFlushRange { + device_buffer: *mut d3d11::ID3D11Buffer, + host_ptr: *mut u8, + range: SyncRange, +} + +#[derive(Debug)] +struct CoherentBufferInvalidateRange { + device_buffer: *mut d3d11::ID3D11Buffer, + host_ptr: *mut u8, + range: Range, +} + +#[derive(Debug)] +struct CoherentBuffers { + // descriptor set writes containing coherent resources go into these vecs and are added to the + // command buffers own Vec on binding the set. + flush_coherent_buffers: RefCell>, + invalidate_coherent_buffers: RefCell>, +} + +impl CoherentBuffers { + fn _add_flush(&self, old: *mut d3d11::ID3D11Buffer, buffer: &Buffer) { + let new = buffer.internal.raw; + + if old != new { + let mut buffers = self.flush_coherent_buffers.borrow_mut(); + + let pos = buffers.iter().position(|sync| old == sync.device_buffer); + + let sync_range = CoherentBufferFlushRange { + device_buffer: new, + host_ptr: buffer.host_ptr, + range: SyncRange::Whole, + }; + + if let Some(pos) = pos { + buffers[pos] = sync_range; + } else { + buffers.push(sync_range); + } + + if let Some(disjoint) = buffer.internal.disjoint_cb { + let pos = buffers + .iter() + .position(|sync| disjoint == sync.device_buffer); + + let sync_range = CoherentBufferFlushRange { + device_buffer: disjoint, + host_ptr: buffer.host_ptr, + range: SyncRange::Whole, + }; + + if let Some(pos) = pos { + buffers[pos] = sync_range; + } else { + buffers.push(sync_range); + } + } + } + } + + fn _add_invalidate(&self, old: *mut d3d11::ID3D11Buffer, buffer: &Buffer) { + let new = buffer.internal.raw; + + if old != new { + let mut buffers = self.invalidate_coherent_buffers.borrow_mut(); + + let pos = buffers.iter().position(|sync| old == sync.device_buffer); + + let sync_range = CoherentBufferInvalidateRange { + device_buffer: new, + host_ptr: buffer.host_ptr, + range: buffer.bound_range.clone(), + }; + + if let Some(pos) = pos { + buffers[pos] = sync_range; + } else { + buffers.push(sync_range); + } + } + } +} + +/// Newtype around a common interface that all bindable resources inherit from. +#[derive(Debug, Copy, Clone)] +#[repr(C)] +struct Descriptor(*mut d3d11::ID3D11DeviceChild); + +bitflags! { + /// A set of D3D11 descriptor types that need to be associated + /// with a single gfx-hal `DescriptorType`. + #[derive(Default)] + pub struct DescriptorContent: u8 { + const CBV = 0x1; + const SRV = 0x2; + const UAV = 0x4; + const SAMPLER = 0x8; + /// Indicates if the descriptor is a dynamic uniform/storage buffer. + /// Important as dynamic buffers are implemented as root descriptors. + const DYNAMIC = 0x10; + } +} + +impl From for DescriptorContent { + fn from(ty: pso::DescriptorType) -> Self { + use hal::pso::{ + BufferDescriptorFormat as Bdf, + BufferDescriptorType as Bdt, + DescriptorType as Dt, + ImageDescriptorType as Idt, + }; + match ty { + Dt::Sampler => DescriptorContent::SAMPLER, + Dt::Image { + ty: Idt::Sampled { with_sampler: true }, + } => DescriptorContent::SRV | DescriptorContent::SAMPLER, + Dt::Image { + ty: Idt::Sampled { + with_sampler: false, + }, + } + | Dt::Image { + ty: Idt::Storage { read_only: true }, + } + | Dt::InputAttachment => DescriptorContent::SRV, + Dt::Image { + ty: Idt::Storage { read_only: false }, + } => DescriptorContent::SRV | DescriptorContent::UAV, + Dt::Buffer { + ty: Bdt::Uniform, + format: + Bdf::Structured { + dynamic_offset: true, + }, + } => DescriptorContent::CBV | DescriptorContent::DYNAMIC, + Dt::Buffer { + ty: Bdt::Uniform, .. + } => DescriptorContent::CBV, + Dt::Buffer { + ty: Bdt::Storage { read_only: true }, + format: + Bdf::Structured { + dynamic_offset: true, + }, + } => DescriptorContent::SRV | DescriptorContent::DYNAMIC, + Dt::Buffer { + ty: Bdt::Storage { read_only: false }, + format: + Bdf::Structured { + dynamic_offset: true, + }, + } => DescriptorContent::SRV | DescriptorContent::UAV | DescriptorContent::DYNAMIC, + Dt::Buffer { + ty: Bdt::Storage { read_only: true }, + .. + } => DescriptorContent::SRV, + Dt::Buffer { + ty: Bdt::Storage { read_only: false }, + .. + } => DescriptorContent::SRV | DescriptorContent::UAV, + } + } +} + +pub struct DescriptorSet { + offset: DescriptorIndex, + len: DescriptorIndex, + handles: *mut Descriptor, + coherent_buffers: Mutex, + layout: DescriptorSetLayout, +} + +impl fmt::Debug for DescriptorSet { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("DescriptorSet") + } +} + +unsafe impl Send for DescriptorSet {} +unsafe impl Sync for DescriptorSet {} + +impl DescriptorSet { + fn _add_flush(&self, old: *mut d3d11::ID3D11Buffer, buffer: &Buffer) { + let new = buffer.internal.raw; + + if old != new { + self.coherent_buffers.lock()._add_flush(old, buffer); + } + } + + fn _add_invalidate(&self, old: *mut d3d11::ID3D11Buffer, buffer: &Buffer) { + let new = buffer.internal.raw; + + if old != new { + self.coherent_buffers.lock()._add_invalidate(old, buffer); + } + } + + unsafe fn assign(&self, offset: DescriptorIndex, value: *mut d3d11::ID3D11DeviceChild) { + *self.handles.offset(offset as isize) = Descriptor(value); + } + + unsafe fn assign_stages( + &self, + offsets: &MultiStageData, + stages: pso::ShaderStageFlags, + value: *mut d3d11::ID3D11DeviceChild, + ) { + if stages.contains(pso::ShaderStageFlags::VERTEX) { + self.assign(offsets.vs, value); + } + if stages.contains(pso::ShaderStageFlags::FRAGMENT) { + self.assign(offsets.ps, value); + } + if stages.contains(pso::ShaderStageFlags::COMPUTE) { + self.assign(offsets.cs, value); + } + } +} + +#[derive(Debug)] +pub struct DescriptorPool { + handles: Vec, + allocator: RangeAllocator, +} + +unsafe impl Send for DescriptorPool {} +unsafe impl Sync for DescriptorPool {} + +impl DescriptorPool { + fn with_capacity(size: DescriptorIndex) -> Self { + DescriptorPool { + handles: vec![Descriptor(ptr::null_mut()); size as usize], + allocator: RangeAllocator::new(0 .. size), + } + } +} + +impl pso::DescriptorPool for DescriptorPool { + unsafe fn allocate_set( + &mut self, + layout: &DescriptorSetLayout, + ) -> Result { + let len = layout + .pool_mapping + .map_register(|mapping| mapping.count as DescriptorIndex) + .sum() + .max(1); + + self.allocator + .allocate_range(len) + .map(|range| { + for handle in &mut self.handles[range.start as usize .. range.end as usize] { + *handle = Descriptor(ptr::null_mut()); + } + + DescriptorSet { + offset: range.start, + len, + handles: self.handles.as_mut_ptr().offset(range.start as _), + coherent_buffers: Mutex::new(CoherentBuffers { + flush_coherent_buffers: RefCell::new(Vec::new()), + invalidate_coherent_buffers: RefCell::new(Vec::new()), + }), + layout: DescriptorSetLayout { + bindings: Arc::clone(&layout.bindings), + pool_mapping: layout.pool_mapping.clone(), + }, + } + }) + .map_err(|_| pso::AllocationError::OutOfPoolMemory) + } + + unsafe fn free_sets(&mut self, descriptor_sets: I) + where + I: IntoIterator, + { + for set in descriptor_sets { + self.allocator + .free_range(set.offset .. (set.offset + set.len)) + } + } + + unsafe fn reset(&mut self) { + self.allocator.reset(); + } +} + +#[derive(Debug)] +pub struct RawFence { + mutex: Mutex, + condvar: Condvar, +} + +pub type Fence = Arc; + +#[derive(Debug)] +pub struct Semaphore; +#[derive(Debug)] +pub struct QueryPool; + +#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)] +pub enum Backend {} +impl hal::Backend for Backend { + type Instance = Instance; + type PhysicalDevice = PhysicalDevice; + type Device = device::Device; + + type Surface = Surface; + type Swapchain = Swapchain; + + type QueueFamily = QueueFamily; + type CommandQueue = CommandQueue; + type CommandBuffer = CommandBuffer; + + type Memory = Memory; + type CommandPool = CommandPool; + + type ShaderModule = ShaderModule; + type RenderPass = RenderPass; + type Framebuffer = Framebuffer; + + type Buffer = Buffer; + type BufferView = BufferView; + type Image = Image; + + type ImageView = ImageView; + type Sampler = Sampler; + + type ComputePipeline = ComputePipeline; + type GraphicsPipeline = GraphicsPipeline; + type PipelineLayout = PipelineLayout; + type PipelineCache = (); + type DescriptorSetLayout = DescriptorSetLayout; + type DescriptorPool = DescriptorPool; + type DescriptorSet = DescriptorSet; + + type Fence = Fence; + type Semaphore = Semaphore; + type Event = (); + type QueryPool = QueryPool; +} + +fn validate_line_width(width: f32) { + // Note from the Vulkan spec: + // > If the wide lines feature is not enabled, lineWidth must be 1.0 + // Simply assert and no-op because DX11 never exposes `Features::LINE_WIDTH` + assert_eq!(width, 1.0); +} diff --git a/third_party/rust/gfx-backend-dx11/src/shader.rs b/third_party/rust/gfx-backend-dx11/src/shader.rs index 91f39763f435..b08efc18697d 100644 --- a/third_party/rust/gfx-backend-dx11/src/shader.rs +++ b/third_party/rust/gfx-backend-dx11/src/shader.rs @@ -1,299 +1,289 @@ -use std::{ffi, ptr, slice}; - -use spirv_cross::{hlsl, spirv, ErrorCode as SpirvErrorCode}; - -use winapi::shared::winerror; -use winapi::um::{d3dcommon, d3dcompiler}; -use wio::com::ComPtr; - -use auxil::spirv_cross_specialize_ast; -use hal::{device, pso}; - -use {conv, Backend, PipelineLayout}; - -/// Emit error during shader module creation. Used if we don't expect an error -/// but might panic due to an exception in SPIRV-Cross. -fn gen_unexpected_error(err: SpirvErrorCode) -> device::ShaderError { - let msg = match err { - SpirvErrorCode::CompilationError(msg) => msg, - SpirvErrorCode::Unhandled => "Unexpected error".into(), - }; - device::ShaderError::CompilationFailed(msg) -} - -/// Emit error during shader module creation. Used if we execute an query command. -fn gen_query_error(err: SpirvErrorCode) -> device::ShaderError { - let msg = match err { - SpirvErrorCode::CompilationError(msg) => msg, - SpirvErrorCode::Unhandled => "Unknown query error".into(), - }; - device::ShaderError::CompilationFailed(msg) -} - -pub(crate) fn compile_spirv_entrypoint( - raw_data: &[u32], - stage: pso::Stage, - source: &pso::EntryPoint, - layout: &PipelineLayout, -) -> Result>, device::ShaderError> { - let mut ast = parse_spirv(raw_data)?; - spirv_cross_specialize_ast(&mut ast, &source.specialization)?; - - patch_spirv_resources(&mut ast, stage, layout)?; - let shader_model = hlsl::ShaderModel::V5_0; - let shader_code = translate_spirv(&mut ast, shader_model, layout, stage)?; - log::debug!("Generated {:?} shader:\n{:?}", stage, shader_code.replace("\n", "\r\n")); - - let real_name = ast - .get_cleansed_entry_point_name(source.entry, conv::map_stage(stage)) - .map_err(gen_query_error)?; - - // TODO: opt: don't query *all* entry points. - let entry_points = ast.get_entry_points().map_err(gen_query_error)?; - entry_points - .iter() - .find(|entry_point| entry_point.name == real_name) - .ok_or(device::ShaderError::MissingEntryPoint(source.entry.into())) - .and_then(|entry_point| { - let stage = conv::map_execution_model(entry_point.execution_model); - let shader = compile_hlsl_shader( - stage, - shader_model, - &entry_point.name, - shader_code.as_bytes(), - )?; - Ok(Some(unsafe { ComPtr::from_raw(shader) })) - }) -} - -pub(crate) fn compile_hlsl_shader( - stage: pso::Stage, - shader_model: hlsl::ShaderModel, - entry: &str, - code: &[u8], -) -> Result<*mut d3dcommon::ID3DBlob, device::ShaderError> { - let stage_str = { - let stage = match stage { - pso::Stage::Vertex => "vs", - pso::Stage::Fragment => "ps", - pso::Stage::Compute => "cs", - _ => unimplemented!(), - }; - - let model = match shader_model { - hlsl::ShaderModel::V5_0 => "5_0", - // TODO: >= 11.3 - hlsl::ShaderModel::V5_1 => "5_1", - // TODO: >= 12?, no mention of 11 on msdn - hlsl::ShaderModel::V6_0 => "6_0", - _ => unimplemented!(), - }; - - format!("{}_{}\0", stage, model) - }; - - let mut blob = ptr::null_mut(); - let mut error = ptr::null_mut(); - let entry = ffi::CString::new(entry).unwrap(); - let hr = unsafe { - d3dcompiler::D3DCompile( - code.as_ptr() as *const _, - code.len(), - ptr::null(), - ptr::null(), - ptr::null_mut(), - entry.as_ptr() as *const _, - stage_str.as_ptr() as *const i8, - 1, - 0, - &mut blob as *mut *mut _, - &mut error as *mut *mut _, - ) - }; - - if !winerror::SUCCEEDED(hr) { - let error = unsafe { ComPtr::::from_raw(error) }; - let message = unsafe { - let pointer = error.GetBufferPointer(); - let size = error.GetBufferSize(); - let slice = slice::from_raw_parts(pointer as *const u8, size as usize); - String::from_utf8_lossy(slice).into_owned() - }; - - Err(device::ShaderError::CompilationFailed(message)) - } else { - Ok(blob) - } -} - -fn parse_spirv(raw_data: &[u32]) -> Result, device::ShaderError> { - let module = spirv::Module::from_words(raw_data); - - spirv::Ast::parse(&module).map_err(|err| { - let msg = match err { - SpirvErrorCode::CompilationError(msg) => msg, - SpirvErrorCode::Unhandled => "Unknown parsing error".into(), - }; - device::ShaderError::CompilationFailed(msg) - }) -} - -fn patch_spirv_resources( - ast: &mut spirv::Ast, - stage: pso::Stage, - layout: &PipelineLayout, -) -> Result<(), device::ShaderError> { - // we remap all `layout(binding = n, set = n)` to a flat space which we get from our - // `PipelineLayout` which knows of all descriptor set layouts - - let shader_resources = ast.get_shader_resources().map_err(gen_query_error)?; - for image in &shader_resources.separate_images { - let set = ast - .get_decoration(image.id, spirv::Decoration::DescriptorSet) - .map_err(gen_query_error)? as usize; - let binding = ast - .get_decoration(image.id, spirv::Decoration::Binding) - .map_err(gen_query_error)?; - let (_content, res_index) = layout.sets[set] - .find_register(stage, binding); - - ast.set_decoration( - image.id, - spirv::Decoration::Binding, - res_index.t as u32, - ) - .map_err(gen_unexpected_error)?; - } - - for uniform_buffer in &shader_resources.uniform_buffers { - let set = ast - .get_decoration(uniform_buffer.id, spirv::Decoration::DescriptorSet) - .map_err(gen_query_error)? as usize; - let binding = ast - .get_decoration(uniform_buffer.id, spirv::Decoration::Binding) - .map_err(gen_query_error)?; - let (_content, res_index) = layout.sets[set] - .find_register(stage, binding); - - ast.set_decoration( - uniform_buffer.id, - spirv::Decoration::Binding, - res_index.c as u32, - ) - .map_err(gen_unexpected_error)?; - } - - for storage_buffer in &shader_resources.storage_buffers { - let set = ast - .get_decoration(storage_buffer.id, spirv::Decoration::DescriptorSet) - .map_err(gen_query_error)? as usize; - let binding = ast - .get_decoration(storage_buffer.id, spirv::Decoration::Binding) - .map_err(gen_query_error)?; - let (_content, res_index) = layout.sets[set] - .find_register(stage, binding); - - ast.set_decoration( - storage_buffer.id, - spirv::Decoration::Binding, - res_index.u as u32, //TODO: also decorate `res_index.t` - ) - .map_err(gen_unexpected_error)?; - } - - for image in &shader_resources.storage_images { - let set = ast - .get_decoration(image.id, spirv::Decoration::DescriptorSet) - .map_err(gen_query_error)? as usize; - let binding = ast - .get_decoration(image.id, spirv::Decoration::Binding) - .map_err(gen_query_error)?; - let (_content, res_index) = layout.sets[set] - .find_register(stage, binding); - - ast.set_decoration( - image.id, - spirv::Decoration::Binding, - res_index.u as u32, //TODO: also decorate `res_index.t` - ) - .map_err(gen_unexpected_error)?; - } - - for sampler in &shader_resources.separate_samplers { - let set = ast - .get_decoration(sampler.id, spirv::Decoration::DescriptorSet) - .map_err(gen_query_error)? as usize; - let binding = ast - .get_decoration(sampler.id, spirv::Decoration::Binding) - .map_err(gen_query_error)?; - let (_content, res_index) = layout.sets[set] - .find_register(stage, binding); - - ast.set_decoration( - sampler.id, - spirv::Decoration::Binding, - res_index.s as u32, - ) - .map_err(gen_unexpected_error)?; - } - - for image in &shader_resources.sampled_images { - let set = ast - .get_decoration(image.id, spirv::Decoration::DescriptorSet) - .map_err(gen_query_error)? as usize; - let binding = ast - .get_decoration(image.id, spirv::Decoration::Binding) - .map_err(gen_query_error)?; - let (_content, res_index) = layout.sets[set] - .find_register(stage, binding); - - ast.set_decoration( - image.id, - spirv::Decoration::Binding, - res_index.t as u32, - ) - .map_err(gen_unexpected_error)?; - } - - Ok(()) -} - -fn translate_spirv( - ast: &mut spirv::Ast, - shader_model: hlsl::ShaderModel, - _layout: &PipelineLayout, - _stage: pso::Stage, -) -> Result { - let mut compile_options = hlsl::CompilerOptions::default(); - compile_options.shader_model = shader_model; - compile_options.vertex.invert_y = true; - - //let stage_flag = stage.into(); - - // TODO: - /*let root_constant_layout = layout - .root_constants - .iter() - .filter_map(|constant| if constant.stages.contains(stage_flag) { - Some(hlsl::RootConstant { - start: constant.range.start * 4, - end: constant.range.end * 4, - binding: constant.range.start, - space: 0, - }) - } else { - None - }) - .collect();*/ - ast.set_compiler_options(&compile_options) - .map_err(gen_unexpected_error)?; - //ast.set_root_constant_layout(root_constant_layout) - // .map_err(gen_unexpected_error)?; - ast.compile().map_err(|err| { - let msg = match err { - SpirvErrorCode::CompilationError(msg) => msg, - SpirvErrorCode::Unhandled => "Unknown compile error".into(), - }; - device::ShaderError::CompilationFailed(msg) - }) -} +use std::{ffi, ptr, slice}; + +use spirv_cross::{hlsl, spirv, ErrorCode as SpirvErrorCode}; + +use winapi::{ + shared::winerror, + um::{d3dcommon, d3dcompiler}, +}; +use wio::com::ComPtr; + +use auxil::spirv_cross_specialize_ast; +use hal::{device, pso}; + +use crate::{conv, Backend, PipelineLayout}; + +/// Emit error during shader module creation. Used if we don't expect an error +/// but might panic due to an exception in SPIRV-Cross. +fn gen_unexpected_error(err: SpirvErrorCode) -> device::ShaderError { + let msg = match err { + SpirvErrorCode::CompilationError(msg) => msg, + SpirvErrorCode::Unhandled => "Unexpected error".into(), + }; + device::ShaderError::CompilationFailed(msg) +} + +/// Emit error during shader module creation. Used if we execute an query command. +fn gen_query_error(err: SpirvErrorCode) -> device::ShaderError { + let msg = match err { + SpirvErrorCode::CompilationError(msg) => msg, + SpirvErrorCode::Unhandled => "Unknown query error".into(), + }; + device::ShaderError::CompilationFailed(msg) +} + +pub(crate) fn compile_spirv_entrypoint( + raw_data: &[u32], + stage: pso::Stage, + source: &pso::EntryPoint, + layout: &PipelineLayout, + features: &hal::Features, +) -> Result>, device::ShaderError> { + let mut ast = parse_spirv(raw_data)?; + spirv_cross_specialize_ast(&mut ast, &source.specialization)?; + + patch_spirv_resources(&mut ast, stage, layout)?; + let shader_model = hlsl::ShaderModel::V5_0; + let shader_code = translate_spirv(&mut ast, shader_model, layout, stage, features)?; + log::debug!( + "Generated {:?} shader:\n{:?}", + stage, + shader_code.replace("\n", "\r\n") + ); + + let real_name = ast + .get_cleansed_entry_point_name(source.entry, conv::map_stage(stage)) + .map_err(gen_query_error)?; + + // TODO: opt: don't query *all* entry points. + let entry_points = ast.get_entry_points().map_err(gen_query_error)?; + entry_points + .iter() + .find(|entry_point| entry_point.name == real_name) + .ok_or(device::ShaderError::MissingEntryPoint(source.entry.into())) + .and_then(|entry_point| { + let stage = conv::map_execution_model(entry_point.execution_model); + let shader = compile_hlsl_shader( + stage, + shader_model, + &entry_point.name, + shader_code.as_bytes(), + )?; + Ok(Some(unsafe { ComPtr::from_raw(shader) })) + }) +} + +pub(crate) fn compile_hlsl_shader( + stage: pso::Stage, + shader_model: hlsl::ShaderModel, + entry: &str, + code: &[u8], +) -> Result<*mut d3dcommon::ID3DBlob, device::ShaderError> { + let stage_str = { + let stage = match stage { + pso::Stage::Vertex => "vs", + pso::Stage::Fragment => "ps", + pso::Stage::Compute => "cs", + _ => unimplemented!(), + }; + + let model = match shader_model { + hlsl::ShaderModel::V5_0 => "5_0", + // TODO: >= 11.3 + hlsl::ShaderModel::V5_1 => "5_1", + // TODO: >= 12?, no mention of 11 on msdn + hlsl::ShaderModel::V6_0 => "6_0", + _ => unimplemented!(), + }; + + format!("{}_{}\0", stage, model) + }; + + let mut blob = ptr::null_mut(); + let mut error = ptr::null_mut(); + let entry = ffi::CString::new(entry).unwrap(); + let hr = unsafe { + d3dcompiler::D3DCompile( + code.as_ptr() as *const _, + code.len(), + ptr::null(), + ptr::null(), + ptr::null_mut(), + entry.as_ptr() as *const _, + stage_str.as_ptr() as *const i8, + 1, + 0, + &mut blob as *mut *mut _, + &mut error as *mut *mut _, + ) + }; + + if !winerror::SUCCEEDED(hr) { + let error = unsafe { ComPtr::::from_raw(error) }; + let message = unsafe { + let pointer = error.GetBufferPointer(); + let size = error.GetBufferSize(); + let slice = slice::from_raw_parts(pointer as *const u8, size as usize); + String::from_utf8_lossy(slice).into_owned() + }; + + Err(device::ShaderError::CompilationFailed(message)) + } else { + Ok(blob) + } +} + +fn parse_spirv(raw_data: &[u32]) -> Result, device::ShaderError> { + let module = spirv::Module::from_words(raw_data); + + spirv::Ast::parse(&module).map_err(|err| { + let msg = match err { + SpirvErrorCode::CompilationError(msg) => msg, + SpirvErrorCode::Unhandled => "Unknown parsing error".into(), + }; + device::ShaderError::CompilationFailed(msg) + }) +} + +fn patch_spirv_resources( + ast: &mut spirv::Ast, + stage: pso::Stage, + layout: &PipelineLayout, +) -> Result<(), device::ShaderError> { + // we remap all `layout(binding = n, set = n)` to a flat space which we get from our + // `PipelineLayout` which knows of all descriptor set layouts + + let shader_resources = ast.get_shader_resources().map_err(gen_query_error)?; + for image in &shader_resources.separate_images { + let set = ast + .get_decoration(image.id, spirv::Decoration::DescriptorSet) + .map_err(gen_query_error)? as usize; + let binding = ast + .get_decoration(image.id, spirv::Decoration::Binding) + .map_err(gen_query_error)?; + let (_content, res_index) = layout.sets[set].find_register(stage, binding); + + ast.set_decoration(image.id, spirv::Decoration::Binding, res_index.t as u32) + .map_err(gen_unexpected_error)?; + } + + for uniform_buffer in &shader_resources.uniform_buffers { + let set = ast + .get_decoration(uniform_buffer.id, spirv::Decoration::DescriptorSet) + .map_err(gen_query_error)? as usize; + let binding = ast + .get_decoration(uniform_buffer.id, spirv::Decoration::Binding) + .map_err(gen_query_error)?; + let (_content, res_index) = layout.sets[set].find_register(stage, binding); + + ast.set_decoration( + uniform_buffer.id, + spirv::Decoration::Binding, + res_index.c as u32, + ) + .map_err(gen_unexpected_error)?; + } + + for storage_buffer in &shader_resources.storage_buffers { + let set = ast + .get_decoration(storage_buffer.id, spirv::Decoration::DescriptorSet) + .map_err(gen_query_error)? as usize; + let binding = ast + .get_decoration(storage_buffer.id, spirv::Decoration::Binding) + .map_err(gen_query_error)?; + let (_content, res_index) = layout.sets[set].find_register(stage, binding); + + ast.set_decoration( + storage_buffer.id, + spirv::Decoration::Binding, + res_index.u as u32, //TODO: also decorate `res_index.t` + ) + .map_err(gen_unexpected_error)?; + } + + for image in &shader_resources.storage_images { + let set = ast + .get_decoration(image.id, spirv::Decoration::DescriptorSet) + .map_err(gen_query_error)? as usize; + let binding = ast + .get_decoration(image.id, spirv::Decoration::Binding) + .map_err(gen_query_error)?; + let (_content, res_index) = layout.sets[set].find_register(stage, binding); + + ast.set_decoration( + image.id, + spirv::Decoration::Binding, + res_index.u as u32, //TODO: also decorate `res_index.t` + ) + .map_err(gen_unexpected_error)?; + } + + for sampler in &shader_resources.separate_samplers { + let set = ast + .get_decoration(sampler.id, spirv::Decoration::DescriptorSet) + .map_err(gen_query_error)? as usize; + let binding = ast + .get_decoration(sampler.id, spirv::Decoration::Binding) + .map_err(gen_query_error)?; + let (_content, res_index) = layout.sets[set].find_register(stage, binding); + + ast.set_decoration(sampler.id, spirv::Decoration::Binding, res_index.s as u32) + .map_err(gen_unexpected_error)?; + } + + for image in &shader_resources.sampled_images { + let set = ast + .get_decoration(image.id, spirv::Decoration::DescriptorSet) + .map_err(gen_query_error)? as usize; + let binding = ast + .get_decoration(image.id, spirv::Decoration::Binding) + .map_err(gen_query_error)?; + let (_content, res_index) = layout.sets[set].find_register(stage, binding); + + ast.set_decoration(image.id, spirv::Decoration::Binding, res_index.t as u32) + .map_err(gen_unexpected_error)?; + } + + Ok(()) +} + +fn translate_spirv( + ast: &mut spirv::Ast, + shader_model: hlsl::ShaderModel, + _layout: &PipelineLayout, + _stage: pso::Stage, + features: &hal::Features, +) -> Result { + let mut compile_options = hlsl::CompilerOptions::default(); + compile_options.shader_model = shader_model; + compile_options.vertex.invert_y = !features.contains(hal::Features::NDC_Y_UP); + + //let stage_flag = stage.into(); + + // TODO: + /*let root_constant_layout = layout + .root_constants + .iter() + .filter_map(|constant| if constant.stages.contains(stage_flag) { + Some(hlsl::RootConstant { + start: constant.range.start * 4, + end: constant.range.end * 4, + binding: constant.range.start, + space: 0, + }) + } else { + None + }) + .collect();*/ + ast.set_compiler_options(&compile_options) + .map_err(gen_unexpected_error)?; + //ast.set_root_constant_layout(root_constant_layout) + // .map_err(gen_unexpected_error)?; + ast.compile().map_err(|err| { + let msg = match err { + SpirvErrorCode::CompilationError(msg) => msg, + SpirvErrorCode::Unhandled => "Unknown compile error".into(), + }; + device::ShaderError::CompilationFailed(msg) + }) +} diff --git a/third_party/rust/gfx-backend-dx12/.cargo-checksum.json b/third_party/rust/gfx-backend-dx12/.cargo-checksum.json index f990b2fc2214..eec3406aae16 100644 --- a/third_party/rust/gfx-backend-dx12/.cargo-checksum.json +++ b/third_party/rust/gfx-backend-dx12/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"96e407f8222b9949caa9a7aa16b8b5b16b3f6cebecc43110b7848ec535645bb5","README.md":"53ad6efa9975b59f9ab830c26c940e18c3e41efd107cd34d670771a8ba53ae1a","shaders/blit.hlsl":"1f8819f3a91acf71a69bfd14ccd245180a92a9efb0ab76bf6b2e89aae10f3952","src/command.rs":"486abca81c49e037946e02a44f0c0b6ce02b1d5ecd76c04769c39ff1efa03e6b","src/conv.rs":"c9bd69d537d3c1d196d3e45bee02aad83dc3b847dd440da354c210bc65c49901","src/descriptors_cpu.rs":"ba881e6f9b90ad90aaebba5bc8d5cf1903560a762ba977ab63f278abba2e2d70","src/device.rs":"b78557efe443c69e2637a1721ae4290c1f006c468e9112c4f5fa906b2992c2d5","src/internal.rs":"da049335e8514d44686ec34f74001a155766fb7ae3cc6d9197479865eb2e3f82","src/lib.rs":"0d48df7adee4833d090dadd9d804f65ee9e6b4bbb4569b20fb0c1eb7605349b6","src/pool.rs":"d76526023087026752acf8a15a7d5e585dbb0486d0511bcae57b0e8f905970eb","src/resource.rs":"3e3d93a8793ebda2977162b5bb7681efce1a1598edd9a8d0e0b40266d67ea38d","src/root_constants.rs":"b6bb4d5ee8dd9686fb7172bc951c4c04801966f8e5cf9843fca52faa45cf7943","src/window.rs":"10003034e9512a69422520238823f342e79b32073351dec654a5360a0280b48d"},"package":"b6e913cc800fb12eaba2c420091a02aca9aafbefd672600dfc5b52654343d341"} \ No newline at end of file +{"files":{"Cargo.toml":"c1095e8de14600328b90f6ece976bdcda84e91e5fedfe67a01c21d56c877d857","README.md":"53ad6efa9975b59f9ab830c26c940e18c3e41efd107cd34d670771a8ba53ae1a","shaders/blit.hlsl":"1f8819f3a91acf71a69bfd14ccd245180a92a9efb0ab76bf6b2e89aae10f3952","src/command.rs":"d00f66380601409595e85c5d2ad5e610a1c28067b5b1308b994d1bd7f9f08ad3","src/conv.rs":"157753744baa9fc74e2d185d0595bb23fbfbff702b8e04caaec4dfd45ec58d34","src/descriptors_cpu.rs":"2d8434fa23b71db6e51f4b6e68a63c4ce46159d74f027da2a309c4c0323956a7","src/device.rs":"dd84307a98095c375381e9876a4a1538d72488505b8dee9ddf5d145bee6a845c","src/internal.rs":"374bf4f7fa58e687a29e3e3a5d78aa8965e39b8a89b49d591b827894f735b386","src/lib.rs":"11f2f0b161729ecc73b60f5ec8329e0b9af387192d619fac13cf0ddc1aedafd1","src/pool.rs":"cc370b53a6843bcdbc507af013650505a60ab8b617747a3cb38c773e4ec1c6d7","src/resource.rs":"043b12a1cebdd87707815c4e6a1d42877ea5cbfc2f7d2e0458bf8e20970a86b4","src/root_constants.rs":"fce7f096f6a06c6796c937be0e1e9ae12c8183177a69be3387b33e33cf1d1b67","src/window.rs":"7cfd4408f64e9cab0bf3d121738a9ca8f9a5e44ba0b52c571ebdd75df33e266b"},"package":"a0e526746379e974501551b08958947e67a81b5ea8cdc717a000cdd72577da05"} \ No newline at end of file diff --git a/third_party/rust/gfx-backend-dx12/Cargo.toml b/third_party/rust/gfx-backend-dx12/Cargo.toml index 985f0397f058..fb5e21525add 100644 --- a/third_party/rust/gfx-backend-dx12/Cargo.toml +++ b/third_party/rust/gfx-backend-dx12/Cargo.toml @@ -11,8 +11,9 @@ # will likely look very different (and much more reasonable) [package] +edition = "2018" name = "gfx-backend-dx12" -version = "0.4.1" +version = "0.5.0" authors = ["The Gfx-rs Developers"] description = "DirectX-12 API backend for gfx-rs" homepage = "https://github.com/gfx-rs/gfx" @@ -27,22 +28,25 @@ default-target = "x86_64-pc-windows-msvc" [lib] name = "gfx_backend_dx12" [dependencies.auxil] -version = "0.1" +version = "0.3" +features = ["spirv_cross"] package = "gfx-auxil" [dependencies.bitflags] version = "1" -[dependencies.d3d12] -version = "0.3" -features = ["libloading"] - -[dependencies.gfx-hal] -version = "0.4" +[dependencies.hal] +version = "0.5" +package = "gfx-hal" [dependencies.log] version = "0.4" +[dependencies.native] +version = "0.3" +features = ["libloading"] +package = "d3d12" + [dependencies.range-alloc] version = "0.1" @@ -50,10 +54,10 @@ version = "0.1" version = "0.3" [dependencies.smallvec] -version = "0.6" +version = "1.0" [dependencies.spirv_cross] -version = "0.16" +version = "0.18" features = ["hlsl"] [dependencies.winapi] diff --git a/third_party/rust/gfx-backend-dx12/src/command.rs b/third_party/rust/gfx-backend-dx12/src/command.rs index 026c31cba000..7fdb63c50f2f 100644 --- a/third_party/rust/gfx-backend-dx12/src/command.rs +++ b/third_party/rust/gfx-backend-dx12/src/command.rs @@ -1,8 +1,15 @@ use auxil::FastHashMap; -use hal::format::Aspects; -use hal::range::RangeArg; -use hal::{buffer, command as com, format, image, memory, pass, pool, pso, query}; use hal::{ + buffer, + command as com, + format, + format::Aspects, + image, + memory, + pass, + pool, + pso, + query, DrawCount, IndexCount, IndexType, @@ -12,26 +19,27 @@ use hal::{ WorkGroupCount, }; -use std::borrow::Borrow; -use std::ops::Range; -use std::sync::Arc; -use std::{cmp, fmt, iter, mem, ptr}; +use std::{borrow::Borrow, cmp, fmt, iter, mem, ops::Range, ptr, sync::Arc}; -use winapi::shared::minwindef::{FALSE, TRUE, UINT}; -use winapi::shared::{dxgiformat, winerror}; -use winapi::um::{d3d12, d3dcommon}; -use winapi::Interface; +use winapi::{ + shared::{ + dxgiformat, + minwindef::{FALSE, TRUE, UINT}, + winerror, + }, + um::{d3d12, d3dcommon}, + Interface, +}; -use native; - -use root_constants::RootConstant; use smallvec::SmallVec; -use { + +use crate::{ conv, descriptors_cpu, device, internal, resource as r, + root_constants::RootConstant, validate_line_width, Backend, Device, @@ -92,7 +100,6 @@ enum OcclusionQuery { Precise(UINT), } - /// Strongly-typed root signature element /// /// Could be removed for an unsafer variant to occupy less memory @@ -293,8 +300,10 @@ impl PipelineCache { let dynamic_descriptors = unsafe { &*binding.dynamic_descriptors.get() }; for descriptor in dynamic_descriptors { let root_offset = element.descriptors[descriptor_id].offset; - self.user_data - .set_descriptor_cbv(root_offset, descriptor.gpu_buffer_location + offsets.next().unwrap()); + self.user_data.set_descriptor_cbv( + root_offset, + descriptor.gpu_buffer_location + offsets.next().unwrap(), + ); descriptor_id += 1; } } @@ -331,7 +340,7 @@ pub struct CommandBuffer { // Cache renderpasses for graphics operations pass_cache: Option, - cur_subpass: usize, + cur_subpass: pass::SubpassId, // Cache current graphics root signature and pipeline to minimize rebinding and support two // bindpoints. @@ -500,7 +509,7 @@ impl CommandBuffer { fn insert_subpass_barriers(&self, insertion: BarrierPoint) { let state = self.pass_cache.as_ref().unwrap(); - let proto_barriers = match state.render_pass.subpasses.get(self.cur_subpass) { + let proto_barriers = match state.render_pass.subpasses.get(self.cur_subpass as usize) { Some(subpass) => match insertion { BarrierPoint::Pre => &subpass.pre_barriers, BarrierPoint::Post => &subpass.post_barriers, @@ -542,7 +551,7 @@ impl CommandBuffer { fn bind_targets(&mut self) { let state = self.pass_cache.as_ref().unwrap(); - let subpass = &state.render_pass.subpasses[self.cur_subpass]; + let subpass = &state.render_pass.subpasses[self.cur_subpass as usize]; // collect render targets let color_views = subpass @@ -596,7 +605,7 @@ impl CommandBuffer { fn resolve_attachments(&self) { let state = self.pass_cache.as_ref().unwrap(); let framebuffer = &state.framebuffer; - let subpass = &state.render_pass.subpasses[self.cur_subpass]; + let subpass = &state.render_pass.subpasses[self.cur_subpass as usize]; for (&(src_attachment, _), &(dst_attachment, _)) in subpass .color_attachments @@ -814,18 +823,25 @@ impl CommandBuffer { if user_data.is_index_dirty(table_index) { match user_data.data[table_index] { RootElement::TableSrvCbvUav(offset) => { - let gpu = d3d12::D3D12_GPU_DESCRIPTOR_HANDLE { ptr: pipeline.srv_cbv_uav_start + offset as u64 }; + let gpu = d3d12::D3D12_GPU_DESCRIPTOR_HANDLE { + ptr: pipeline.srv_cbv_uav_start + offset as u64, + }; table_update(i as _, gpu); user_data.clear_dirty(table_index); } RootElement::TableSampler(offset) => { - let gpu = d3d12::D3D12_GPU_DESCRIPTOR_HANDLE { ptr: pipeline.sampler_start + offset as u64 }; + let gpu = d3d12::D3D12_GPU_DESCRIPTOR_HANDLE { + ptr: pipeline.sampler_start + offset as u64, + }; table_update(i as _, gpu); user_data.clear_dirty(table_index); } RootElement::DescriptorCbv { buffer } => { debug_assert!(user_data.is_index_dirty(table_index + 1)); - debug_assert_eq!(user_data.data[table_index + 1], RootElement::DescriptorPlaceholder); + debug_assert_eq!( + user_data.data[table_index + 1], + RootElement::DescriptorPlaceholder + ); descriptor_cbv_update(i as _, buffer); @@ -1108,13 +1124,12 @@ impl CommandBuffer { range: &image::SubresourceRange, list: &mut impl Extend, ) { - let mut bar = - Self::transition_barrier(d3d12::D3D12_RESOURCE_TRANSITION_BARRIER { - pResource: target.resource.as_mut_ptr(), - Subresource: d3d12::D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES, - StateBefore: states.start, - StateAfter: states.end, - }); + let mut bar = Self::transition_barrier(d3d12::D3D12_RESOURCE_TRANSITION_BARRIER { + pResource: target.resource.as_mut_ptr(), + Subresource: d3d12::D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES, + StateBefore: states.start, + StateAfter: states.end, + }); if *range == target.to_subresource_range(range.aspects) { // Only one barrier if it affects the whole image. @@ -1204,7 +1219,11 @@ impl com::CommandBuffer for CommandBuffer { }; AttachmentClear { - subpass_id: render_pass.subpasses.iter().position(|sp| sp.is_using(i)), + subpass_id: render_pass + .subpasses + .iter() + .position(|sp| sp.is_using(i)) + .map(|i| i as pass::SubpassId), value: if attachment.ops.load == pass::AttachmentLoadOp::Clear { assert!(cv.is_some()); cv @@ -1326,7 +1345,12 @@ impl com::CommandBuffer for CommandBuffer { } let target = target.expect_bound(); - Self::fill_texture_barries(target, state_src .. state_dst, range, &mut raw_barriers); + Self::fill_texture_barries( + target, + state_src .. state_dst, + range, + &mut raw_barriers, + ); } } } @@ -1439,7 +1463,7 @@ impl com::CommandBuffer for CommandBuffer { Some(ref cache) => cache, None => panic!("`clear_attachments` can only be called inside a renderpass"), }; - let sub_pass = &pass_cache.render_pass.subpasses[self.cur_subpass]; + let sub_pass = &pass_cache.render_pass.subpasses[self.cur_subpass as usize]; let clear_rects: SmallVec<[pso::ClearRect; 4]> = rects .into_iter() @@ -1836,27 +1860,27 @@ impl com::CommandBuffer for CommandBuffer { }; let location = buffer.resource.gpu_virtual_address(); self.raw.set_index_buffer( - location + ibv.offset, - (buffer.requirements.size - ibv.offset) as u32, + location + ibv.range.offset, + ibv.range.size_to(buffer.requirements.size) as u32, format, ); } unsafe fn bind_vertex_buffers(&mut self, first_binding: pso::BufferIndex, buffers: I) where - I: IntoIterator, + I: IntoIterator, T: Borrow, { assert!(first_binding as usize <= MAX_VERTEX_BUFFERS); - for (view, (buffer, offset)) in self.vertex_buffer_views[first_binding as _ ..] + for (view, (buffer, sub)) in self.vertex_buffer_views[first_binding as _ ..] .iter_mut() .zip(buffers) { let b = buffer.borrow().expect_bound(); let base = (*b.resource).GetGPUVirtualAddress(); - view.BufferLocation = base + offset; - view.SizeInBytes = (b.requirements.size - offset) as u32; + view.BufferLocation = base + sub.offset; + view.SizeInBytes = sub.size_to(b.requirements.size) as u32; } self.set_vertex_buffers(); } @@ -2068,18 +2092,17 @@ impl com::CommandBuffer for CommandBuffer { ); } - unsafe fn fill_buffer(&mut self, buffer: &r::Buffer, range: R, _data: u32) - where - R: RangeArg, - { + unsafe fn fill_buffer(&mut self, buffer: &r::Buffer, range: buffer::SubRange, _data: u32) { let buffer = buffer.expect_bound(); assert!( buffer.clear_uav.is_some(), "Buffer needs to be created with usage `TRANSFER_DST`" ); let bytes_per_unit = 4; - let start = *range.start().unwrap_or(&0) as i32; - let end = *range.end().unwrap_or(&(buffer.requirements.size as u64)) as i32; + let start = range.offset as i32; + let end = range + .size + .map_or(buffer.requirements.size, |s| range.offset + s) as i32; if start % 4 != 0 || end % 4 != 0 { warn!("Fill buffer bounds have to be multiples of 4"); } @@ -2540,9 +2563,7 @@ impl com::CommandBuffer for CommandBuffer { self.occlusion_query = None; d3d12::D3D12_QUERY_TYPE_BINARY_OCCLUSION } - native::QueryHeapType::PipelineStatistics - if self.pipeline_stats_query == Some(id) => - { + native::QueryHeapType::PipelineStatistics if self.pipeline_stats_query == Some(id) => { self.pipeline_stats_query = None; d3d12::D3D12_QUERY_TYPE_PIPELINE_STATISTICS } @@ -2615,4 +2636,14 @@ impl com::CommandBuffer for CommandBuffer { error!("TODO: execute_commands"); } } + + unsafe fn insert_debug_marker(&mut self, _name: &str, _color: u32) { + //TODO + } + unsafe fn begin_debug_marker(&mut self, _name: &str, _color: u32) { + //TODO + } + unsafe fn end_debug_marker(&mut self) { + //TODO + } } diff --git a/third_party/rust/gfx-backend-dx12/src/conv.rs b/third_party/rust/gfx-backend-dx12/src/conv.rs index 923bc2abd5bd..05aaafdc856e 100644 --- a/third_party/rust/gfx-backend-dx12/src/conv.rs +++ b/third_party/rust/gfx-backend-dx12/src/conv.rs @@ -1,16 +1,23 @@ -use validate_line_width; +use crate::validate_line_width; use spirv_cross::spirv; use std::mem; -use winapi::shared::basetsd::UINT8; -use winapi::shared::dxgiformat::*; -use winapi::shared::minwindef::{FALSE, INT, TRUE, UINT}; -use winapi::um::d3d12::*; -use winapi::um::d3dcommon::*; +use winapi::{ + shared::{ + basetsd::UINT8, + dxgiformat::*, + minwindef::{FALSE, INT, TRUE, UINT}, + }, + um::{d3d12::*, d3dcommon::*}, +}; -use hal::format::{Format, ImageFeature, SurfaceType, Swizzle}; -use hal::{buffer, image, pso}; +use hal::{ + buffer, + format::{Format, ImageFeature, SurfaceType, Swizzle}, + image, + pso, +}; use native::ShaderVisibility; @@ -179,12 +186,8 @@ pub fn map_topology_type(primitive: pso::Primitive) -> D3D12_PRIMITIVE_TOPOLOGY_ use hal::pso::Primitive::*; match primitive { PointList => D3D12_PRIMITIVE_TOPOLOGY_TYPE_POINT, - LineList | LineStrip => { - D3D12_PRIMITIVE_TOPOLOGY_TYPE_LINE - } - TriangleList | TriangleStrip => { - D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE - } + LineList | LineStrip => D3D12_PRIMITIVE_TOPOLOGY_TYPE_LINE, + TriangleList | TriangleStrip => D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE, PatchList(_) => D3D12_PRIMITIVE_TOPOLOGY_TYPE_PATCH, } } @@ -220,18 +223,17 @@ pub fn map_rasterizer(rasterizer: &pso::Rasterizer) -> D3D12_RASTERIZER_DESC { Some(_) | None => pso::DepthBias::default(), }; + if let pso::State::Static(w) = rasterizer.line_width { + validate_line_width(w); + } + D3D12_RASTERIZER_DESC { FillMode: match rasterizer.polygon_mode { Point => { error!("Point rasterization is not supported"); D3D12_FILL_MODE_WIREFRAME } - Line(width) => { - if let pso::State::Static(w) = width { - validate_line_width(w); - } - D3D12_FILL_MODE_WIREFRAME - } + Line => D3D12_FILL_MODE_WIREFRAME, Fill => D3D12_FILL_MODE_SOLID, }, CullMode: match rasterizer.cull_face { @@ -420,6 +422,7 @@ pub fn map_wrap(wrap: image::WrapMode) -> D3D12_TEXTURE_ADDRESS_MODE { Mirror => D3D12_TEXTURE_ADDRESS_MODE_MIRROR, Clamp => D3D12_TEXTURE_ADDRESS_MODE_CLAMP, Border => D3D12_TEXTURE_ADDRESS_MODE_BORDER, + MirrorClamp => D3D12_TEXTURE_ADDRESS_MODE_MIRROR_ONCE, } } @@ -430,19 +433,12 @@ fn map_filter_type(filter: image::Filter) -> D3D12_FILTER_TYPE { } } -fn map_anisotropic(anisotropic: image::Anisotropic) -> D3D12_FILTER { - match anisotropic { - image::Anisotropic::On(_) => D3D12_FILTER_ANISOTROPIC, - image::Anisotropic::Off => 0, - } -} - pub fn map_filter( mag_filter: image::Filter, min_filter: image::Filter, mip_filter: image::Filter, reduction: D3D12_FILTER_REDUCTION_TYPE, - anisotropic: image::Anisotropic, + anisotropy_clamp: Option, ) -> D3D12_FILTER { let mag = map_filter_type(mag_filter); let min = map_filter_type(min_filter); @@ -452,7 +448,9 @@ pub fn map_filter( | (mag & D3D12_FILTER_TYPE_MASK) << D3D12_MAG_FILTER_SHIFT | (mip & D3D12_FILTER_TYPE_MASK) << D3D12_MIP_FILTER_SHIFT | (reduction & D3D12_FILTER_REDUCTION_TYPE_MASK) << D3D12_FILTER_REDUCTION_TYPE_SHIFT - | map_anisotropic(anisotropic) + | anisotropy_clamp + .map(|_| D3D12_FILTER_ANISOTROPIC) + .unwrap_or(0) } pub fn map_buffer_resource_state(access: buffer::Access) -> D3D12_RESOURCE_STATES { @@ -475,8 +473,7 @@ pub fn map_buffer_resource_state(access: buffer::Access) -> D3D12_RESOURCE_STATE if access.contains(Access::INDEX_BUFFER_READ) { state |= D3D12_RESOURCE_STATE_INDEX_BUFFER; } - if access.contains(Access::VERTEX_BUFFER_READ) || access.contains(Access::UNIFORM_READ) - { + if access.contains(Access::VERTEX_BUFFER_READ) || access.contains(Access::UNIFORM_READ) { state |= D3D12_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER; } if access.contains(Access::INDIRECT_COMMAND_READ) { @@ -513,10 +510,22 @@ fn derive_image_state(access: image::Access) -> D3D12_RESOURCE_STATES { } const MUTABLE_IMAGE_ACCESS: &[(image::Access, D3D12_RESOURCE_STATES)] = &[ - (image::Access::SHADER_WRITE, D3D12_RESOURCE_STATE_UNORDERED_ACCESS), - (image::Access::COLOR_ATTACHMENT_WRITE, D3D12_RESOURCE_STATE_RENDER_TARGET), - (image::Access::DEPTH_STENCIL_ATTACHMENT_WRITE, D3D12_RESOURCE_STATE_DEPTH_WRITE), - (image::Access::TRANSFER_WRITE, D3D12_RESOURCE_STATE_COPY_DEST), + ( + image::Access::SHADER_WRITE, + D3D12_RESOURCE_STATE_UNORDERED_ACCESS, + ), + ( + image::Access::COLOR_ATTACHMENT_WRITE, + D3D12_RESOURCE_STATE_RENDER_TARGET, + ), + ( + image::Access::DEPTH_STENCIL_ATTACHMENT_WRITE, + D3D12_RESOURCE_STATE_DEPTH_WRITE, + ), + ( + image::Access::TRANSFER_WRITE, + D3D12_RESOURCE_STATE_COPY_DEST, + ), ]; pub fn map_image_resource_state( @@ -536,7 +545,10 @@ pub fn map_image_resource_state( image::Layout::TransferDstOptimal => D3D12_RESOURCE_STATE_COPY_DEST, image::Layout::TransferSrcOptimal => D3D12_RESOURCE_STATE_COPY_SOURCE, image::Layout::General => { - match MUTABLE_IMAGE_ACCESS.iter().find(|&(bit, _)| access.contains(*bit)) { + match MUTABLE_IMAGE_ACCESS + .iter() + .find(|&(bit, _)| access.contains(*bit)) + { Some(&(bit, state)) => { if !(access & !bit).is_empty() { warn!("Required access contains multiple writable states with `General` layout: {:?}", access); @@ -546,10 +558,10 @@ pub fn map_image_resource_state( None => derive_image_state(access), } } - image::Layout::ShaderReadOnlyOptimal | - image::Layout::DepthStencilReadOnlyOptimal => derive_image_state(access), - image::Layout::Undefined | - image::Layout::Preinitialized => D3D12_RESOURCE_STATE_COMMON, + image::Layout::ShaderReadOnlyOptimal | image::Layout::DepthStencilReadOnlyOptimal => { + derive_image_state(access) + } + image::Layout::Undefined | image::Layout::Preinitialized => D3D12_RESOURCE_STATE_COMMON, } } diff --git a/third_party/rust/gfx-backend-dx12/src/descriptors_cpu.rs b/third_party/rust/gfx-backend-dx12/src/descriptors_cpu.rs index 1cc6f25bdcc9..854e59c23858 100644 --- a/third_party/rust/gfx-backend-dx12/src/descriptors_cpu.rs +++ b/third_party/rust/gfx-backend-dx12/src/descriptors_cpu.rs @@ -18,7 +18,8 @@ impl fmt::Debug for HeapLinear { impl HeapLinear { pub fn new(device: native::Device, ty: DescriptorHeapType, size: usize) -> Self { - let (heap, _hr) = device.create_descriptor_heap(size as _, ty, DescriptorHeapFlags::empty(), 0); + let (heap, _hr) = + device.create_descriptor_heap(size as _, ty, DescriptorHeapFlags::empty(), 0); HeapLinear { handle_size: device.get_descriptor_increment_size(ty) as _, @@ -75,8 +76,12 @@ impl fmt::Debug for Heap { impl Heap { pub fn new(device: native::Device, ty: DescriptorHeapType) -> Self { - let (heap, _hr) = - device.create_descriptor_heap(HEAP_SIZE_FIXED as _, ty, DescriptorHeapFlags::empty(), 0); + let (heap, _hr) = device.create_descriptor_heap( + HEAP_SIZE_FIXED as _, + ty, + DescriptorHeapFlags::empty(), + 0, + ); Heap { handle_size: device.get_descriptor_increment_size(ty) as _, diff --git a/third_party/rust/gfx-backend-dx12/src/device.rs b/third_party/rust/gfx-backend-dx12/src/device.rs index 8052f3e04774..649da4785499 100644 --- a/third_party/rust/gfx-backend-dx12/src/device.rs +++ b/third_party/rust/gfx-backend-dx12/src/device.rs @@ -1,45 +1,59 @@ -use std::borrow::Borrow; -use std::collections::{BTreeMap, VecDeque}; -use std::ops::Range; -use std::{ffi, mem, ptr, slice}; +use std::{ + borrow::Borrow, + collections::{BTreeMap, VecDeque}, + ffi, + mem, + ops::Range, + ptr, + slice, +}; -use spirv_cross::{hlsl, spirv, ErrorCode as SpirvErrorCode}; +use range_alloc::RangeAllocator; use smallvec::SmallVec; +use spirv_cross::{hlsl, spirv, ErrorCode as SpirvErrorCode}; -use winapi::shared::minwindef::{FALSE, TRUE, UINT}; -use winapi::shared::{dxgi, dxgi1_2, dxgi1_4, dxgiformat, dxgitype, windef, winerror}; -use winapi::um::{d3d12, d3dcompiler, synchapi, winbase, winnt}; -use winapi::Interface; +use winapi::{ + shared::{ + dxgi, + dxgi1_2, + dxgi1_4, + dxgiformat, + dxgitype, + minwindef::{FALSE, TRUE, UINT}, + windef, + winerror, + }, + um::{d3d12, d3dcompiler, synchapi, winbase, winnt}, + Interface, +}; use auxil::spirv_cross_specialize_ast; -use hal::format::Aspects; -use hal::memory::Requirements; -use hal::pool::CommandPoolCreateFlags; -use hal::pso::VertexInputRate; -use hal::queue::{CommandQueue as _, QueueFamilyId}; -use hal::range::RangeArg; use hal::{ self, buffer, device as d, format, + format::Aspects, image, memory, + memory::Requirements, pass, + pool::CommandPoolCreateFlags, pso, + pso::VertexInputRate, query, + queue::{CommandQueue as _, QueueFamilyId}, window as w, }; -use pool::{CommandPool, CommandPoolAllocator}; -use range_alloc::RangeAllocator; -use root_constants::RootConstant; -use { - conv, +use crate::{ command as cmd, + conv, descriptors_cpu, + pool::{CommandPool, CommandPoolAllocator}, resource as r, root_constants, + root_constants::RootConstant, window::{Surface, Swapchain}, Backend as B, Device, @@ -48,10 +62,7 @@ use { NUM_HEAP_PROPERTIES, QUEUE_FAMILIES, }; -use native::{ - PipelineStateSubobject, - Subobject, -}; +use native::{PipelineStateSubobject, Subobject}; // Register space used for root constants. const ROOT_CONSTANT_SPACE: u32 = 0; @@ -268,100 +279,131 @@ impl Device { fn patch_spirv_resources( ast: &mut spirv::Ast, - layout: Option<&r::PipelineLayout>, + layout: &r::PipelineLayout, ) -> Result<(), d::ShaderError> { // Move the descriptor sets away to yield for the root constants at "space0". - let space_offset = match layout { - Some(layout) if !layout.constants.is_empty() => 1, - _ => return Ok(()), - }; - + let space_offset = if layout.constants.is_empty() { 0 } else { 1 }; let shader_resources = ast.get_shader_resources().map_err(gen_query_error)?; - for image in &shader_resources.separate_images { - let set = ast - .get_decoration(image.id, spirv::Decoration::DescriptorSet) - .map_err(gen_query_error)?; - ast.set_decoration( - image.id, - spirv::Decoration::DescriptorSet, - space_offset + set, - ) - .map_err(gen_unexpected_error)?; + + if space_offset != 0 { + for image in &shader_resources.separate_images { + let set = ast + .get_decoration(image.id, spirv::Decoration::DescriptorSet) + .map_err(gen_query_error)?; + ast.set_decoration( + image.id, + spirv::Decoration::DescriptorSet, + space_offset + set, + ) + .map_err(gen_unexpected_error)?; + } } - for uniform_buffer in &shader_resources.uniform_buffers { - let set = ast - .get_decoration(uniform_buffer.id, spirv::Decoration::DescriptorSet) - .map_err(gen_query_error)?; - ast.set_decoration( - uniform_buffer.id, - spirv::Decoration::DescriptorSet, - space_offset + set, - ) - .map_err(gen_unexpected_error)?; + if space_offset != 0 { + for uniform_buffer in &shader_resources.uniform_buffers { + let set = ast + .get_decoration(uniform_buffer.id, spirv::Decoration::DescriptorSet) + .map_err(gen_query_error)?; + ast.set_decoration( + uniform_buffer.id, + spirv::Decoration::DescriptorSet, + space_offset + set, + ) + .map_err(gen_unexpected_error)?; + } } for storage_buffer in &shader_resources.storage_buffers { let set = ast .get_decoration(storage_buffer.id, spirv::Decoration::DescriptorSet) .map_err(gen_query_error)?; - ast.set_decoration( - storage_buffer.id, - spirv::Decoration::DescriptorSet, - space_offset + set, - ) - .map_err(gen_unexpected_error)?; + let binding = ast + .get_decoration(storage_buffer.id, spirv::Decoration::Binding) + .map_err(gen_query_error)?; + if space_offset != 0 { + ast.set_decoration( + storage_buffer.id, + spirv::Decoration::DescriptorSet, + space_offset + set, + ) + .map_err(gen_unexpected_error)?; + } + if !layout.elements[set as usize].mutable_bindings.contains(&binding) { + ast.set_decoration( + storage_buffer.id, + spirv::Decoration::NonWritable, + 0, + ) + .map_err(gen_unexpected_error)? + } } for image in &shader_resources.storage_images { let set = ast .get_decoration(image.id, spirv::Decoration::DescriptorSet) .map_err(gen_query_error)?; - ast.set_decoration( - image.id, - spirv::Decoration::DescriptorSet, - space_offset + set, - ) - .map_err(gen_unexpected_error)?; - } - - for sampler in &shader_resources.separate_samplers { - let set = ast - .get_decoration(sampler.id, spirv::Decoration::DescriptorSet) + let binding = ast + .get_decoration(image.id, spirv::Decoration::Binding) .map_err(gen_query_error)?; - ast.set_decoration( - sampler.id, - spirv::Decoration::DescriptorSet, - space_offset + set, - ) - .map_err(gen_unexpected_error)?; + if space_offset != 0 { + ast.set_decoration( + image.id, + spirv::Decoration::DescriptorSet, + space_offset + set, + ) + .map_err(gen_unexpected_error)?; + } + if !layout.elements[set as usize].mutable_bindings.contains(&binding) { + ast.set_decoration( + image.id, + spirv::Decoration::NonWritable, + 0, + ) + .map_err(gen_unexpected_error)? + } } - for image in &shader_resources.sampled_images { - let set = ast - .get_decoration(image.id, spirv::Decoration::DescriptorSet) - .map_err(gen_query_error)?; - ast.set_decoration( - image.id, - spirv::Decoration::DescriptorSet, - space_offset + set, - ) - .map_err(gen_unexpected_error)?; + if space_offset != 0 { + for sampler in &shader_resources.separate_samplers { + let set = ast + .get_decoration(sampler.id, spirv::Decoration::DescriptorSet) + .map_err(gen_query_error)?; + ast.set_decoration( + sampler.id, + spirv::Decoration::DescriptorSet, + space_offset + set, + ) + .map_err(gen_unexpected_error)?; + } } - for input in &shader_resources.subpass_inputs { - let set = ast - .get_decoration(input.id, spirv::Decoration::DescriptorSet) - .map_err(gen_query_error)?; - ast.set_decoration( - input.id, - spirv::Decoration::DescriptorSet, - space_offset + set, - ) - .map_err(gen_unexpected_error)?; + if space_offset != 0 { + for image in &shader_resources.sampled_images { + let set = ast + .get_decoration(image.id, spirv::Decoration::DescriptorSet) + .map_err(gen_query_error)?; + ast.set_decoration( + image.id, + spirv::Decoration::DescriptorSet, + space_offset + set, + ) + .map_err(gen_unexpected_error)?; + } } - // TODO: other resources + if space_offset != 0 { + for input in &shader_resources.subpass_inputs { + let set = ast + .get_decoration(input.id, spirv::Decoration::DescriptorSet) + .map_err(gen_query_error)?; + ast.set_decoration( + input.id, + spirv::Decoration::DescriptorSet, + space_offset + set, + ) + .map_err(gen_unexpected_error)?; + } + } Ok(()) } @@ -371,10 +413,11 @@ impl Device { shader_model: hlsl::ShaderModel, layout: &r::PipelineLayout, stage: pso::Stage, + features: &hal::Features, ) -> Result { let mut compile_options = hlsl::CompilerOptions::default(); compile_options.shader_model = shader_model; - compile_options.vertex.invert_y = true; + compile_options.vertex.invert_y = !features.contains(hal::Features::NDC_Y_UP); let stage_flag = stage.into(); let root_constant_layout = layout @@ -413,6 +456,7 @@ impl Device { stage: pso::Stage, source: &pso::EntryPoint, layout: &r::PipelineLayout, + features: &hal::Features, ) -> Result<(native::Blob, bool), d::ShaderError> { match *source.module { r::ShaderModule::Compiled(ref shaders) => { @@ -426,10 +470,11 @@ impl Device { r::ShaderModule::Spirv(ref raw_data) => { let mut ast = Self::parse_spirv(raw_data)?; spirv_cross_specialize_ast(&mut ast, &source.specialization)?; - Self::patch_spirv_resources(&mut ast, Some(layout))?; + Self::patch_spirv_resources(&mut ast, layout)?; let shader_model = hlsl::ShaderModel::V5_1; - let shader_code = Self::translate_spirv(&mut ast, shader_model, layout, stage)?; + let shader_code = + Self::translate_spirv(&mut ast, shader_model, layout, stage, features)?; debug!("SPIRV-Cross generated shader:\n{}", shader_code); let real_name = ast @@ -530,7 +575,7 @@ impl Device { device: native::Device, handle: d3d12::D3D12_CPU_DESCRIPTOR_HANDLE, info: ViewInfo, - ) -> Result<(), image::ViewError> { + ) -> Result<(), image::ViewCreationError> { #![allow(non_snake_case)] let mut desc = d3d12::D3D12_RENDER_TARGET_VIEW_DESC { @@ -544,12 +589,12 @@ impl Device { let ArraySize = (info.range.layers.end - info.range.layers.start) as _; let is_msaa = info.kind.num_samples() > 1; if info.range.levels.start + 1 != info.range.levels.end { - return Err(image::ViewError::Level(info.range.levels.start)); + return Err(image::ViewCreationError::Level(info.range.levels.start)); } if info.range.layers.end > info.kind.num_layers() { - return Err(image::ViewError::Layer(image::LayerError::OutOfBounds( - info.range.layers, - ))); + return Err(image::ViewCreationError::Layer( + image::LayerError::OutOfBounds(info.range.layers), + )); } match info.view_kind { @@ -628,7 +673,7 @@ impl Device { fn view_image_as_render_target( &self, info: ViewInfo, - ) -> Result { + ) -> Result { let handle = self.rtv_pool.lock().unwrap().alloc_handle(); Self::view_image_as_render_target_impl(self.raw, handle, info).map(|_| handle) } @@ -637,7 +682,7 @@ impl Device { device: native::Device, handle: d3d12::D3D12_CPU_DESCRIPTOR_HANDLE, info: ViewInfo, - ) -> Result<(), image::ViewError> { + ) -> Result<(), image::ViewCreationError> { #![allow(non_snake_case)] let mut desc = d3d12::D3D12_DEPTH_STENCIL_VIEW_DESC { @@ -652,12 +697,12 @@ impl Device { let ArraySize = (info.range.layers.end - info.range.layers.start) as _; let is_msaa = info.kind.num_samples() > 1; if info.range.levels.start + 1 != info.range.levels.end { - return Err(image::ViewError::Level(info.range.levels.start)); + return Err(image::ViewCreationError::Level(info.range.levels.start)); } if info.range.layers.end > info.kind.num_layers() { - return Err(image::ViewError::Layer(image::LayerError::OutOfBounds( - info.range.layers, - ))); + return Err(image::ViewCreationError::Layer( + image::LayerError::OutOfBounds(info.range.layers), + )); } match info.view_kind { @@ -701,7 +746,9 @@ impl Device { ArraySize, } } - image::ViewKind::D3 | image::ViewKind::Cube | image::ViewKind::CubeArray => unimplemented!(), + image::ViewKind::D3 | image::ViewKind::Cube | image::ViewKind::CubeArray => { + unimplemented!() + } }; unsafe { @@ -714,14 +761,14 @@ impl Device { fn view_image_as_depth_stencil( &self, info: ViewInfo, - ) -> Result { + ) -> Result { let handle = self.dsv_pool.lock().unwrap().alloc_handle(); Self::view_image_as_depth_stencil_impl(self.raw, handle, info).map(|_| handle) } pub(crate) fn build_image_as_shader_resource_desc( info: &ViewInfo, - ) -> Result { + ) -> Result { #![allow(non_snake_case)] let mut desc = d3d12::D3D12_SHADER_RESOURCE_VIEW_DESC { @@ -737,9 +784,9 @@ impl Device { let ArraySize = (info.range.layers.end - info.range.layers.start) as _; if info.range.layers.end > info.kind.num_layers() { - return Err(image::ViewError::Layer(image::LayerError::OutOfBounds( - info.range.layers.clone(), - ))); + return Err(image::ViewCreationError::Layer( + image::LayerError::OutOfBounds(info.range.layers.clone()), + )); } let is_msaa = info.kind.num_samples() > 1; let is_cube = info.caps.contains(image::ViewCapabilities::KIND_CUBE); @@ -832,7 +879,7 @@ impl Device { "Cube views are not supported for the image, kind: {:?}", info.kind ); - return Err(image::ViewError::BadKind(info.view_kind)); + return Err(image::ViewCreationError::BadKind(info.view_kind)); } } @@ -842,7 +889,7 @@ impl Device { fn view_image_as_shader_resource( &self, mut info: ViewInfo, - ) -> Result { + ) -> Result { #![allow(non_snake_case)] // Depth-stencil formats can't be used for SRVs. @@ -865,7 +912,7 @@ impl Device { fn view_image_as_storage( &self, info: ViewInfo, - ) -> Result { + ) -> Result { #![allow(non_snake_case)] assert_eq!(info.range.levels.start + 1, info.range.levels.end); @@ -880,13 +927,13 @@ impl Device { let ArraySize = (info.range.layers.end - info.range.layers.start) as _; if info.range.layers.end > info.kind.num_layers() { - return Err(image::ViewError::Layer(image::LayerError::OutOfBounds( - info.range.layers, - ))); + return Err(image::ViewCreationError::Layer( + image::LayerError::OutOfBounds(info.range.layers), + )); } if info.kind.num_samples() > 1 { error!("MSAA images can't be viewed as UAV"); - return Err(image::ViewError::Unsupported); + return Err(image::ViewCreationError::Unsupported); } match info.view_kind { @@ -931,7 +978,7 @@ impl Device { } image::ViewKind::Cube | image::ViewKind::CubeArray => { error!("Cubic images can't be viewed as UAV"); - return Err(image::ViewError::Unsupported); + return Err(image::ViewCreationError::Unsupported); } } @@ -1233,12 +1280,10 @@ impl d::Device for Device { .collect::>(); let mut sub_infos = subpasses .into_iter() - .map(|desc| { - SubInfo { - desc: desc.borrow().clone(), - external_dependencies: image::Access::empty() .. image::Access::empty(), - unresolved_dependencies: 0, - } + .map(|desc| SubInfo { + desc: desc.borrow().clone(), + external_dependencies: image::Access::empty() .. image::Access::empty(), + unresolved_dependencies: 0, }) .collect::>(); let dependencies = dependencies.into_iter().collect::>(); @@ -1252,22 +1297,33 @@ impl d::Device for Device { .collect::>(); for dep in &dependencies { - use hal::pass::SubpassRef as Sr; let dep = dep.borrow(); match dep.passes { - Range { start: Sr::External, end: Sr::External } => { + Range { + start: None, + end: None, + } => { error!("Unexpected external-external dependency!"); } - Range { start: Sr::External, end: Sr::Pass(sid) } => { - sub_infos[sid].external_dependencies.start |= dep.accesses.start; + Range { + start: None, + end: Some(sid), + } => { + sub_infos[sid as usize].external_dependencies.start |= dep.accesses.start; } - Range { start: Sr::Pass(sid), end: Sr::External } => { - sub_infos[sid].external_dependencies.end |= dep.accesses.end; + Range { + start: Some(sid), + end: None, + } => { + sub_infos[sid as usize].external_dependencies.end |= dep.accesses.end; } - Range { start: Sr::Pass(from_sid), end: Sr::Pass(sid) } => { + Range { + start: Some(from_sid), + end: Some(sid), + } => { //Note: self-dependencies are ignored if from_sid != sid { - sub_infos[sid].unresolved_dependencies += 1; + sub_infos[sid as usize].unresolved_dependencies += 1; } } } @@ -1287,8 +1343,12 @@ impl d::Device for Device { } for &(id, layout) in sub.depth_stencil { let state = SubState::New(match layout { - image::Layout::DepthStencilAttachmentOptimal => d3d12::D3D12_RESOURCE_STATE_DEPTH_WRITE, - image::Layout::DepthStencilReadOnlyOptimal => d3d12::D3D12_RESOURCE_STATE_DEPTH_READ, + image::Layout::DepthStencilAttachmentOptimal => { + d3d12::D3D12_RESOURCE_STATE_DEPTH_WRITE + } + image::Layout::DepthStencilReadOnlyOptimal => { + d3d12::D3D12_RESOURCE_STATE_DEPTH_READ + } image::Layout::General => d3d12::D3D12_RESOURCE_STATE_DEPTH_WRITE, _ => { error!("Unexpected depth/stencil layout: {:?}", layout); @@ -1320,14 +1380,17 @@ impl d::Device for Device { post_barriers: Vec::new(), }; - while let Some(sid) = sub_infos.iter().position(|si| si.unresolved_dependencies == 0) { + while let Some(sid) = sub_infos + .iter() + .position(|si| si.unresolved_dependencies == 0) + { for dep in &dependencies { let dep = dep.borrow(); if dep.passes.start != dep.passes.end - && dep.passes.start == pass::SubpassRef::Pass(sid) + && dep.passes.start == Some(sid as pass::SubpassId) { - if let pass::SubpassRef::Pass(other) = dep.passes.end { - sub_infos[other].unresolved_dependencies -= 1; + if let Some(other) = dep.passes.end { + sub_infos[other as usize].unresolved_dependencies -= 1; } } } @@ -1389,8 +1452,7 @@ impl d::Device for Device { ai.last_state = resolve_state; ai.barrier_start_index = rp.subpasses.len() + 1; } - SubState::Undefined | - SubState::New(_) => {} + SubState::Undefined | SubState::New(_) => {} }; } @@ -1411,7 +1473,7 @@ impl d::Device for Device { for (att_id, (ai, att)) in att_infos.iter().zip(attachments.iter()).enumerate() { let state_dst = if ai.barrier_start_index == 0 { // attachment wasn't used in any sub-pass? - continue + continue; } else { let si = &sub_infos[ai.barrier_start_index - 1]; conv::map_image_resource_state(si.external_dependencies.end, att.layouts.end) @@ -1530,105 +1592,113 @@ impl d::Device for Device { .sum(); let mut ranges = Vec::with_capacity(total); - let elements = sets.iter().enumerate().map(|(i, set)| { - let set = set.borrow(); - let space = (root_space_offset + i) as u32; - let mut table_type = r::SetTableTypes::empty(); - let root_table_offset = root_offset; + let elements = sets + .iter() + .enumerate() + .map(|(i, set)| { + let set = set.borrow(); + let space = (root_space_offset + i) as u32; + let mut table_type = r::SetTableTypes::empty(); + let root_table_offset = root_offset; - //TODO: split between sampler and non-sampler tables - let visibility = conv::map_shader_visibility( - set.bindings - .iter() - .fold(pso::ShaderStageFlags::empty(), |u, bind| { - u | bind.stage_flags - }), - ); + //TODO: split between sampler and non-sampler tables + let visibility = conv::map_shader_visibility( + set.bindings + .iter() + .fold(pso::ShaderStageFlags::empty(), |u, bind| { + u | bind.stage_flags + }), + ); - for bind in set.bindings.iter() { - debug!("\tRange {:?} at space={}", bind, space); - } + for bind in set.bindings.iter() { + debug!("\tRange {:?} at space={}", bind, space); + } - let describe = |bind: &pso::DescriptorSetLayoutBinding, ty| { - native::DescriptorRange::new( - ty, - bind.count as _, - native::Binding { - register: bind.binding as _, - space, - }, - d3d12::D3D12_DESCRIPTOR_RANGE_OFFSET_APPEND, - ) - }; + let describe = |bind: &pso::DescriptorSetLayoutBinding, ty| { + native::DescriptorRange::new( + ty, + bind.count as _, + native::Binding { + register: bind.binding as _, + space, + }, + d3d12::D3D12_DESCRIPTOR_RANGE_OFFSET_APPEND, + ) + }; - let mut descriptors = Vec::new(); - let mut range_base = ranges.len(); - for bind in set.bindings.iter() { - let content = r::DescriptorContent::from(bind.ty); + let mut descriptors = Vec::new(); + let mut mutable_bindings = auxil::FastHashSet::default(); + let mut range_base = ranges.len(); + for bind in set.bindings.iter() { + let content = r::DescriptorContent::from(bind.ty); - if content.is_dynamic() { - // Root Descriptor - let binding = native::Binding { - register: bind.binding as _, - space, - }; + if content.is_dynamic() { + // Root Descriptor + let binding = native::Binding { + register: bind.binding as _, + space, + }; - if content.contains(r::DescriptorContent::CBV) { - descriptors.push(r::RootDescriptor { - offset: root_offset, - }); - parameters.push(native::RootParameter::cbv_descriptor(visibility, binding)); - root_offset += 2; + if content.contains(r::DescriptorContent::CBV) { + descriptors.push(r::RootDescriptor { + offset: root_offset, + }); + parameters + .push(native::RootParameter::cbv_descriptor(visibility, binding)); + root_offset += 2; + } else { + // SRV and UAV not implemented so far + unimplemented!() + } } else { - // SRV and UAV not implemented so far - unimplemented!() - } - } else { - // Descriptor table ranges - if content.contains(r::DescriptorContent::CBV) { - ranges.push(describe(bind, native::DescriptorRangeType::CBV)); - } - if content.contains(r::DescriptorContent::SRV) { - ranges.push(describe(bind, native::DescriptorRangeType::SRV)); - } - if content.contains(r::DescriptorContent::UAV) { - ranges.push(describe(bind, native::DescriptorRangeType::UAV)); + // Descriptor table ranges + if content.contains(r::DescriptorContent::CBV) { + ranges.push(describe(bind, native::DescriptorRangeType::CBV)); + } + if content.contains(r::DescriptorContent::SRV) { + ranges.push(describe(bind, native::DescriptorRangeType::SRV)); + } + if content.contains(r::DescriptorContent::UAV) { + ranges.push(describe(bind, native::DescriptorRangeType::UAV)); + mutable_bindings.insert(bind.binding); + } } } - } - if ranges.len() > range_base { - parameters.push(native::RootParameter::descriptor_table( - visibility, - &ranges[range_base ..], - )); - table_type |= r::SRV_CBV_UAV; - root_offset += 1; - } - - range_base = ranges.len(); - for bind in set.bindings.iter() { - let content = r::DescriptorContent::from(bind.ty); - if content.contains(r::DescriptorContent::SAMPLER) { - ranges.push(describe(bind, native::DescriptorRangeType::Sampler)); + if ranges.len() > range_base { + parameters.push(native::RootParameter::descriptor_table( + visibility, + &ranges[range_base ..], + )); + table_type |= r::SRV_CBV_UAV; + root_offset += 1; } - } - if ranges.len() > range_base { - parameters.push(native::RootParameter::descriptor_table( - visibility, - &ranges[range_base ..], - )); - table_type |= r::SAMPLERS; - root_offset += 1; - } - r::RootElement { - table: r::RootTable { - ty: table_type, - offset: root_table_offset as _, - }, - descriptors, - } - }).collect(); + range_base = ranges.len(); + for bind in set.bindings.iter() { + let content = r::DescriptorContent::from(bind.ty); + if content.contains(r::DescriptorContent::SAMPLER) { + ranges.push(describe(bind, native::DescriptorRangeType::Sampler)); + } + } + if ranges.len() > range_base { + parameters.push(native::RootParameter::descriptor_table( + visibility, + &ranges[range_base ..], + )); + table_type |= r::SAMPLERS; + root_offset += 1; + } + + r::RootElement { + table: r::RootTable { + ty: table_type, + offset: root_table_offset as _, + }, + descriptors, + mutable_bindings, + } + }) + .collect(); // Ensure that we didn't reallocate! debug_assert_eq!(ranges.len(), total); @@ -1697,6 +1767,7 @@ impl d::Device for Device { Borrowed(native::Blob), None, } + let features = &self.features; impl ShaderBc { pub fn shader(&self) -> native::Shader { match *self { @@ -1714,7 +1785,7 @@ impl d::Device for Device { None => return Ok(ShaderBc::None), }; - match Self::extract_entry_point(stage, source, desc.layout) { + match Self::extract_entry_point(stage, source, desc.layout, features) { Ok((shader, true)) => Ok(ShaderBc::Owned(shader)), Ok((shader, false)) => Ok(ShaderBc::Borrowed(shader)), Err(err) => Err(pso::CreationError::Shader(err)), @@ -1820,7 +1891,7 @@ impl d::Device for Device { // Get associated subpass information let pass = { let subpass = &desc.subpass; - match subpass.main_pass.subpasses.get(subpass.index) { + match subpass.main_pass.subpasses.get(subpass.index as usize) { Some(subpass) => subpass, None => return Err(pso::CreationError::InvalidSubpass(subpass.index)), } @@ -1973,9 +2044,13 @@ impl d::Device for Device { desc: &pso::ComputePipelineDesc<'a, B>, _cache: Option<&()>, ) -> Result { - let (cs, cs_destroy) = - Self::extract_entry_point(pso::Stage::Compute, &desc.shader, desc.layout) - .map_err(|err| pso::CreationError::Shader(err))?; + let (cs, cs_destroy) = Self::extract_entry_point( + pso::Stage::Compute, + &desc.shader, + desc.layout, + &self.features, + ) + .map_err(|err| pso::CreationError::Shader(err))?; let (pipeline, hr) = self.raw.create_compute_pipeline_state( desc.layout.raw, @@ -2149,11 +2224,11 @@ impl d::Device for Device { Ok(()) } - unsafe fn create_buffer_view>( + unsafe fn create_buffer_view( &self, buffer: &r::Buffer, format: Option, - range: R, + sub: buffer::SubRange, ) -> Result { let buffer = buffer.expect_bound(); let buffer_features = { @@ -2162,17 +2237,17 @@ impl d::Device for Device { }; let (format, format_desc) = match format.and_then(conv::map_format) { Some(fmt) => (fmt, format.unwrap().surface_desc()), - None => return Err(buffer::ViewCreationError::UnsupportedFormat { format }), + None => return Err(buffer::ViewCreationError::UnsupportedFormat(format)), }; - let start = *range.start().unwrap_or(&0); - let end = *range.end().unwrap_or(&(buffer.requirements.size as _)); + let start = sub.offset; + let size = sub.size.unwrap_or(buffer.requirements.size - start); let bytes_per_texel = (format_desc.bits / 8) as u64; // Check if it adheres to the texel buffer offset limit assert_eq!(start % bytes_per_texel, 0); let first_element = start / bytes_per_texel; - let num_elements = (end - start) / bytes_per_texel; // rounds down to next smaller size + let num_elements = size / bytes_per_texel; // rounds down to next smaller size let handle_srv = if buffer_features.contains(format::BufferFeature::UNIFORM_TEXEL) { let mut desc = d3d12::D3D12_SHADER_RESOURCE_VIEW_DESC { @@ -2528,7 +2603,7 @@ impl d::Device for Device { format: format::Format, swizzle: format::Swizzle, range: image::SubresourceRange, - ) -> Result { + ) -> Result { let image = image.expect_bound(); let is_array = image.kind.num_layers() > 1; let mip_levels = (range.levels.start, range.levels.end); @@ -2546,7 +2621,7 @@ impl d::Device for Device { } else { view_kind }, - format: conv::map_format(format).ok_or(image::ViewError::BadFormat(format))?, + format: conv::map_format(format).ok_or(image::ViewCreationError::BadFormat(format))?, component_mapping: conv::map_swizzle(swizzle), range, }; @@ -2613,7 +2688,7 @@ impl d::Device for Device { info.min_filter, info.mip_filter, op, - info.anisotropic, + info.anisotropy_clamp, ), [ conv::map_wrap(info.wrap_mode.0), @@ -2621,10 +2696,7 @@ impl d::Device for Device { conv::map_wrap(info.wrap_mode.2), ], info.lod_bias.0, - match info.anisotropic { - image::Anisotropic::On(max) => max as _, // TODO: check support here? - image::Anisotropic::Off => 0, - }, + info.anisotropy_clamp.map_or(0, |aniso| aniso as u32), conv::map_comparison(info.comparison.unwrap_or(pso::Comparison::Always)), info.border.into(), info.lod_range.start.0 .. info.lod_range.end.0, @@ -2781,17 +2853,16 @@ impl d::Device for Device { let mut src_sampler = None; match *descriptor.borrow() { - pso::Descriptor::Buffer(buffer, ref range) => { + pso::Descriptor::Buffer(buffer, ref sub) => { let buffer = buffer.expect_bound(); if bind_info.content.is_dynamic() { // Root Descriptor - let buffer_offset = range.start.unwrap_or(0); let buffer_address = (*buffer.resource).GetGPUVirtualAddress(); - // Descriptor sets need to be externally synchronized according to specification let dynamic_descriptors = &mut *bind_info.dynamic_descriptors.get(); - dynamic_descriptors[offset as usize].gpu_buffer_location = buffer_address + buffer_offset; + dynamic_descriptors[offset as usize].gpu_buffer_location = + buffer_address + sub.offset; } else { // Descriptor table if update_pool_index == descriptor_update_pools.len() { @@ -2802,9 +2873,8 @@ impl d::Device for Device { max_size as _, )); } - let mut heap = descriptor_update_pools.last_mut().unwrap(); - let start = range.start.unwrap_or(0); - let end = range.end.unwrap_or(buffer.requirements.size as _); + let mut heap = descriptor_update_pools.pop().unwrap(); + let size = sub.size_to(buffer.requirements.size); if bind_info.content.contains(r::DescriptorContent::CBV) { // Making the size field of buffer requirements for uniform @@ -2812,17 +2882,17 @@ impl d::Device for Device { // alignment to 256 allows us to patch the size here. // We can always enforce the size to be aligned to 256 for // CBVs without going out-of-bounds. - let size = ((end - start) + 255) & !255; let desc = d3d12::D3D12_CONSTANT_BUFFER_VIEW_DESC { - BufferLocation: (*buffer.resource).GetGPUVirtualAddress() + start, - SizeInBytes: size as _, + BufferLocation: (*buffer.resource).GetGPUVirtualAddress() + + sub.offset, + SizeInBytes: ((size + 0xFF) & !0xFF) as _, }; let handle = heap.alloc_handle(); self.raw.CreateConstantBufferView(&desc, handle); src_cbv = Some(handle); } if bind_info.content.contains(r::DescriptorContent::SRV) { - assert_eq!((end - start) % 4, 0); + assert_eq!(size % 4, 0); let mut desc = d3d12::D3D12_SHADER_RESOURCE_VIEW_DESC { Format: dxgiformat::DXGI_FORMAT_R32_TYPELESS, Shader4ComponentMapping: IDENTITY_MAPPING, @@ -2830,8 +2900,8 @@ impl d::Device for Device { u: mem::zeroed(), }; *desc.u.Buffer_mut() = d3d12::D3D12_BUFFER_SRV { - FirstElement: start as _, - NumElements: ((end - start) / 4) as _, + FirstElement: sub.offset as _, + NumElements: (size / 4) as _, StructureByteStride: 0, Flags: d3d12::D3D12_BUFFER_SRV_FLAG_RAW, }; @@ -2844,15 +2914,15 @@ impl d::Device for Device { src_srv = Some(handle); } if bind_info.content.contains(r::DescriptorContent::UAV) { - assert_eq!((end - start) % 4, 0); + assert_eq!(size % 4, 0); let mut desc = d3d12::D3D12_UNORDERED_ACCESS_VIEW_DESC { Format: dxgiformat::DXGI_FORMAT_R32_TYPELESS, ViewDimension: d3d12::D3D12_UAV_DIMENSION_BUFFER, u: mem::zeroed(), }; *desc.u.Buffer_mut() = d3d12::D3D12_BUFFER_UAV { - FirstElement: start as _, - NumElements: ((end - start) / 4) as _, + FirstElement: sub.offset as _, + NumElements: (size / 4) as _, StructureByteStride: 0, CounterOffsetInBytes: 0, Flags: d3d12::D3D12_BUFFER_UAV_FLAG_RAW, @@ -2861,12 +2931,15 @@ impl d::Device for Device { // pool is full, move to the next one update_pool_index += 1; let max_size = 1u64 << 12; //arbitrary - descriptor_update_pools.push(descriptors_cpu::HeapLinear::new( - self.raw, - native::DescriptorHeapType::CbvSrvUav, - max_size as _, - )); - heap = descriptor_update_pools.last_mut().unwrap(); + let full_heap = mem::replace( + &mut heap, + descriptors_cpu::HeapLinear::new( + self.raw, + native::DescriptorHeapType::CbvSrvUav, + max_size as _, + ), + ); + descriptor_update_pools.push(full_heap); } let handle = heap.alloc_handle(); self.raw.CreateUnorderedAccessView( @@ -2883,6 +2956,7 @@ impl d::Device for Device { // pool is full, move to the next one update_pool_index += 1; } + descriptor_update_pools.push(heap); } } pso::Descriptor::Image(image, _layout) => { @@ -2900,14 +2974,7 @@ impl d::Device for Device { pso::Descriptor::Sampler(sampler) => { src_sampler = Some(sampler.handle); } - pso::Descriptor::UniformTexelBuffer(buffer_view) => { - let handle = buffer_view.handle_srv; - src_srv = Some(handle); - if handle.ptr == 0 { - error!("SRV handle of the uniform texel buffer is zero (not supported by specified format)."); - } - } - pso::Descriptor::StorageTexelBuffer(buffer_view) => { + pso::Descriptor::TexelBuffer(buffer_view) => { if bind_info.content.contains(r::DescriptorContent::SRV) { let handle = buffer_view.handle_srv; src_srv = Some(handle); @@ -3065,25 +3132,21 @@ impl d::Device for Device { } } - unsafe fn map_memory(&self, memory: &r::Memory, range: R) -> Result<*mut u8, d::MapError> - where - R: RangeArg, - { - if let Some(mem) = memory.resource { - let start = range.start().unwrap_or(&0); - let end = range.end().unwrap_or(&memory.size); - assert!(start <= end); - - let mut ptr = ptr::null_mut(); - assert_eq!( - winerror::S_OK, - (*mem).Map(0, &d3d12::D3D12_RANGE { Begin: 0, End: 0 }, &mut ptr) - ); - ptr = ptr.offset(*start as _); - Ok(ptr as *mut _) - } else { - panic!("Memory not created with a memory type exposing `CPU_VISIBLE`.") - } + unsafe fn map_memory( + &self, + memory: &r::Memory, + segment: memory::Segment, + ) -> Result<*mut u8, d::MapError> { + let mem = memory + .resource + .expect("Memory not created with a memory type exposing `CPU_VISIBLE`"); + let mut ptr = ptr::null_mut(); + assert_eq!( + winerror::S_OK, + (*mem).Map(0, &d3d12::D3D12_RANGE { Begin: 0, End: 0 }, &mut ptr) + ); + ptr = ptr.offset(segment.offset as isize); + Ok(ptr as *mut _) } unsafe fn unmap_memory(&self, memory: &r::Memory) { @@ -3092,14 +3155,13 @@ impl d::Device for Device { } } - unsafe fn flush_mapped_memory_ranges<'a, I, R>(&self, ranges: I) -> Result<(), d::OutOfMemory> + unsafe fn flush_mapped_memory_ranges<'a, I>(&self, ranges: I) -> Result<(), d::OutOfMemory> where I: IntoIterator, - I::Item: Borrow<(&'a r::Memory, R)>, - R: RangeArg, + I::Item: Borrow<(&'a r::Memory, memory::Segment)>, { for range in ranges { - let &(ref memory, ref range) = range.borrow(); + let &(ref memory, ref segment) = range.borrow(); if let Some(mem) = memory.resource { // map and immediately unmap, hoping that dx12 drivers internally cache // currently mapped buffers. @@ -3108,8 +3170,8 @@ impl d::Device for Device { (*mem).Map(0, &d3d12::D3D12_RANGE { Begin: 0, End: 0 }, ptr::null_mut()) ); - let start = *range.start().unwrap_or(&0); - let end = *range.end().unwrap_or(&memory.size); // TODO: only need to be end of current mapping + let start = segment.offset; + let end = segment.size.map_or(memory.size, |s| start + s); // TODO: only need to be end of current mapping (*mem).Unmap( 0, @@ -3124,20 +3186,16 @@ impl d::Device for Device { Ok(()) } - unsafe fn invalidate_mapped_memory_ranges<'a, I, R>( - &self, - ranges: I, - ) -> Result<(), d::OutOfMemory> + unsafe fn invalidate_mapped_memory_ranges<'a, I>(&self, ranges: I) -> Result<(), d::OutOfMemory> where I: IntoIterator, - I::Item: Borrow<(&'a r::Memory, R)>, - R: RangeArg, + I::Item: Borrow<(&'a r::Memory, memory::Segment)>, { for range in ranges { - let &(ref memory, ref range) = range.borrow(); + let &(ref memory, ref segment) = range.borrow(); if let Some(mem) = memory.resource { - let start = *range.start().unwrap_or(&0); - let end = *range.end().unwrap_or(&memory.size); // TODO: only need to be end of current mapping + let start = segment.offset; + let end = segment.size.map_or(memory.size, |s| start + s); // TODO: only need to be end of current mapping // map and immediately unmap, hoping that dx12 drivers internally cache // currently mapped buffers. @@ -3467,7 +3525,7 @@ impl d::Device for Device { unsafe fn set_command_buffer_name( &self, _command_buffer: &mut cmd::CommandBuffer, - _name: &str + _name: &str, ) { // TODO } diff --git a/third_party/rust/gfx-backend-dx12/src/internal.rs b/third_party/rust/gfx-backend-dx12/src/internal.rs index 5890a2d24b6b..90d7e2f08236 100644 --- a/third_party/rust/gfx-backend-dx12/src/internal.rs +++ b/third_party/rust/gfx-backend-dx12/src/internal.rs @@ -1,13 +1,21 @@ use auxil::FastHashMap; -use std::ffi::CStr; -use std::sync::{Arc, Mutex}; -use std::{mem, ptr}; +use std::{ + ffi::CStr, + mem, + ptr, + sync::{Arc, Mutex}, +}; -use d3d12; -use winapi::shared::minwindef::{FALSE, TRUE}; -use winapi::shared::{dxgiformat, dxgitype, winerror}; -use winapi::um::d3d12::*; -use winapi::Interface; +use winapi::{ + shared::{ + dxgiformat, + dxgitype, + minwindef::{FALSE, TRUE}, + winerror, + }, + um::d3d12::{self, *}, + Interface, +}; use native; @@ -132,7 +140,9 @@ impl ServicePipes { } let (signature, _hr) = self.device.create_root_signature(signature_raw, 0); - unsafe { signature_raw.destroy(); } + unsafe { + signature_raw.destroy(); + } let shader_src = include_bytes!("../shaders/blit.hlsl"); // TODO: check results diff --git a/third_party/rust/gfx-backend-dx12/src/lib.rs b/third_party/rust/gfx-backend-dx12/src/lib.rs index 6d064f822215..5f77cbdab10f 100644 --- a/third_party/rust/gfx-backend-dx12/src/lib.rs +++ b/third_party/rust/gfx-backend-dx12/src/lib.rs @@ -1,14 +1,7 @@ -extern crate gfx_hal as hal; -extern crate auxil; -extern crate range_alloc; #[macro_use] extern crate bitflags; -extern crate d3d12 as native; #[macro_use] extern crate log; -extern crate smallvec; -extern crate spirv_cross; -extern crate winapi; mod command; mod conv; @@ -20,21 +13,34 @@ mod resource; mod root_constants; mod window; -use hal::pso::PipelineStage; -use hal::{adapter, format as f, image, memory, queue as q, Features, Limits}; +use hal::{ + adapter, + format as f, + image, + memory, + pso::PipelineStage, + queue as q, + Features, + Hints, + Limits, +}; -use winapi::shared::minwindef::TRUE; -use winapi::shared::{dxgi, dxgi1_2, dxgi1_4, dxgi1_6, winerror}; -use winapi::um::{d3d12, d3d12sdklayers, handleapi, synchapi, winbase}; -use winapi::Interface; +use winapi::{ + shared::{dxgi, dxgi1_2, dxgi1_4, dxgi1_6, minwindef::TRUE, winerror}, + um::{d3d12, d3d12sdklayers, handleapi, synchapi, winbase}, + Interface, +}; -use std::borrow::Borrow; -use std::ffi::OsString; -use std::os::windows::ffi::OsStringExt; -use std::sync::{Arc, Mutex}; -use std::{fmt, mem}; +use std::{ + borrow::Borrow, + ffi::OsString, + fmt, + mem, + os::windows::ffi::OsStringExt, + sync::{Arc, Mutex}, +}; -use descriptors_cpu::DescriptorCpuPool; +use self::descriptors_cpu::DescriptorCpuPool; #[derive(Debug)] pub(crate) struct HeapProperties { @@ -177,6 +183,7 @@ pub struct PhysicalDevice { library: Arc, adapter: native::WeakPtr, features: Features, + hints: Hints, limits: Limits, format_properties: Arc, private_caps: Capabilities, @@ -212,10 +219,10 @@ impl adapter::PhysicalDevice for PhysicalDevice { return Err(hal::device::CreationError::MissingFeature); } - let device_raw = match self.library.create_device( - self.adapter, - native::FeatureLevel::L11_0, - ) { + let device_raw = match self + .library + .create_device(self.adapter, native::FeatureLevel::L11_0) + { Ok((device, hr)) if winerror::SUCCEEDED(hr) => device, Ok((_, hr)) => { error!("error on device creation: {:x}", hr); @@ -236,6 +243,7 @@ impl adapter::PhysicalDevice for PhysicalDevice { } let mut device = Device::new(device_raw, &self, present_queue); + device.features = requested_features; let queue_groups = families .into_iter() @@ -405,6 +413,11 @@ impl adapter::PhysicalDevice for PhysicalDevice { fn features(&self) -> Features { self.features } + + fn hints(&self) -> Hints { + self.hints + } + fn limits(&self) -> Limits { self.limits } @@ -555,6 +568,7 @@ pub struct Device { raw: native::Device, library: Arc, private_caps: Capabilities, + features: Features, format_properties: Arc, heap_properties: &'static [HeapProperties], // CPU only pools @@ -606,8 +620,12 @@ impl Device { 1_000_000, // maximum number of CBV/SRV/UAV descriptors in heap for Tier 1 ); - let heap_sampler = - Self::create_descriptor_heap_impl(device, native::DescriptorHeapType::Sampler, true, 2_048); + let heap_sampler = Self::create_descriptor_heap_impl( + device, + native::DescriptorHeapType::Sampler, + true, + 2_048, + ); let draw_signature = Self::create_command_signature(device, device::CommandSignature::Draw); let draw_indexed_signature = @@ -620,10 +638,8 @@ impl Device { draw_indexed: draw_indexed_signature, dispatch: dispatch_signature, }; - let service_pipes = internal::ServicePipes::new( - device, - Arc::clone(&physical_device.library), - ); + let service_pipes = + internal::ServicePipes::new(device, Arc::clone(&physical_device.library)); let shared = Shared { signatures, service_pipes, @@ -633,6 +649,7 @@ impl Device { raw: device, library: Arc::clone(&physical_device.library), private_caps: physical_device.private_caps, + features: Features::empty(), format_properties: physical_device.format_properties.clone(), heap_properties: physical_device.heap_properties, rtv_pool: Mutex::new(rtv_pool), @@ -699,6 +716,7 @@ impl Drop for Device { pub struct Instance { pub(crate) factory: native::Factory4, library: Arc, + lib_dxgi: native::DxgiLib, } impl Drop for Instance { @@ -725,7 +743,9 @@ impl hal::Instance for Instance { match lib_main.get_debug_interface() { Ok((debug_controller, hr)) if winerror::SUCCEEDED(hr) => { debug_controller.enable_layer(); - unsafe { debug_controller.Release() }; + unsafe { + debug_controller.Release(); + } } _ => { warn!("Unable to get D3D12 debug interface"); @@ -751,7 +771,7 @@ impl hal::Instance for Instance { Ok((factory, hr)) if winerror::SUCCEEDED(hr) => factory, Ok((_, hr)) => { info!("Failed on dxgi factory creation: {:?}", hr); - return Err(hal::UnsupportedBackend) + return Err(hal::UnsupportedBackend); } Err(_) => return Err(hal::UnsupportedBackend), }; @@ -759,6 +779,7 @@ impl hal::Instance for Instance { Ok(Instance { factory, library: Arc::new(lib_main), + lib_dxgi, }) } @@ -825,7 +846,10 @@ impl hal::Instance for Instance { // Check for D3D12 support // Create temporary device to get physical device information - let device = match self.library.create_device(adapter, native::FeatureLevel::L11_0) { + let device = match self + .library + .create_device(adapter, native::FeatureLevel::L11_0) + { Ok((device, hr)) if winerror::SUCCEEDED(hr) => device, _ => continue, }; @@ -1024,10 +1048,10 @@ impl hal::Instance for Instance { }; let query_memory = |segment: dxgi1_4::DXGI_MEMORY_SEGMENT_GROUP| unsafe { - let mut mem_info: dxgi1_4::DXGI_QUERY_VIDEO_MEMORY_INFO = mem::uninitialized(); + let mut mem_info: dxgi1_4::DXGI_QUERY_VIDEO_MEMORY_INFO = mem::zeroed(); assert_eq!( winerror::S_OK, - adapter.QueryVideoMemoryInfo(0, segment, &mut mem_info,) + adapter.QueryVideoMemoryInfo(0, segment, &mut mem_info) ); mem_info.Budget }; @@ -1061,7 +1085,11 @@ impl hal::Instance for Instance { Features::FORMAT_BC | Features::INSTANCE_RATE | Features::SAMPLER_MIP_LOD_BIAS | - Features::SAMPLER_ANISOTROPY, + Features::SAMPLER_ANISOTROPY | + Features::SAMPLER_MIRROR_CLAMP_EDGE | + Features::NDC_Y_UP, + hints: + Hints::BASE_VERTEX_INSTANCE_DRAWING, limits: Limits { // TODO max_image_1d_size: d3d12::D3D12_REQ_TEXTURE1D_U_DIMENSION as _, max_image_2d_size: d3d12::D3D12_REQ_TEXTURE2D_U_OR_V_DIMENSION as _, diff --git a/third_party/rust/gfx-backend-dx12/src/pool.rs b/third_party/rust/gfx-backend-dx12/src/pool.rs index 8549a6024bee..7a6b53e239ad 100644 --- a/third_party/rust/gfx-backend-dx12/src/pool.rs +++ b/third_party/rust/gfx-backend-dx12/src/pool.rs @@ -1,12 +1,9 @@ -use std::sync::Arc; -use std::fmt; +use std::{fmt, sync::Arc}; use winapi::shared::winerror::SUCCEEDED; -use command::CommandBuffer; +use crate::{command::CommandBuffer, Backend, Shared}; use hal::{command, pool}; -use native; -use {Backend, Shared}; #[derive(Debug)] pub enum CommandPoolAllocator { diff --git a/third_party/rust/gfx-backend-dx12/src/resource.rs b/third_party/rust/gfx-backend-dx12/src/resource.rs index 7c69b4e73aba..e9f350799860 100644 --- a/third_party/rust/gfx-backend-dx12/src/resource.rs +++ b/third_party/rust/gfx-backend-dx12/src/resource.rs @@ -1,16 +1,14 @@ -use winapi::shared::dxgiformat::DXGI_FORMAT; -use winapi::shared::minwindef::UINT; -use winapi::um::d3d12; +use winapi::{ + shared::{dxgiformat::DXGI_FORMAT, minwindef::UINT}, + um::d3d12, +}; use hal::{buffer, format, image, memory, pass, pso}; use range_alloc::RangeAllocator; use crate::{root_constants::RootConstant, Backend, MAX_VERTEX_BUFFERS}; -use std::collections::BTreeMap; -use std::fmt; -use std::ops::Range; -use std::cell::UnsafeCell; +use std::{cell::UnsafeCell, collections::BTreeMap, fmt, ops::Range}; // ShaderModule is either a precompiled if the source comes from HLSL or // the SPIR-V module doesn't contain specialization constants or push constants @@ -143,13 +141,14 @@ pub struct RootDescriptor { pub offset: RootSignatureOffset, } -#[derive(Debug, Hash)] +#[derive(Debug)] pub struct RootElement { pub table: RootTable, pub descriptors: Vec, + pub mutable_bindings: auxil::FastHashSet, } -#[derive(Debug, Hash)] +#[derive(Debug)] pub struct PipelineLayout { pub(crate) raw: native::RootSignature, // Disjunct, sorted vector of root constant ranges. @@ -458,21 +457,55 @@ impl DescriptorContent { impl From for DescriptorContent { fn from(ty: pso::DescriptorType) -> Self { - use hal::pso::DescriptorType as Dt; + use hal::pso::{ + BufferDescriptorFormat as Bdf, + BufferDescriptorType as Bdt, + DescriptorType as Dt, + ImageDescriptorType as Idt, + }; + + use DescriptorContent as Dc; + match ty { - Dt::Sampler => DescriptorContent::SAMPLER, - Dt::CombinedImageSampler => DescriptorContent::SRV | DescriptorContent::SAMPLER, - Dt::SampledImage | Dt::InputAttachment | Dt::UniformTexelBuffer => { - DescriptorContent::SRV - } - Dt::StorageImage | Dt::StorageBuffer | Dt::StorageTexelBuffer => { - DescriptorContent::SRV | DescriptorContent::UAV - } - Dt::StorageBufferDynamic => { - DescriptorContent::SRV | DescriptorContent::UAV | DescriptorContent::DYNAMIC - } - Dt::UniformBuffer => DescriptorContent::CBV, - Dt::UniformBufferDynamic => DescriptorContent::CBV | DescriptorContent::DYNAMIC, + Dt::Sampler => Dc::SAMPLER, + Dt::Image { ty } => match ty { + Idt::Storage { read_only: true } => Dc::SRV, + Idt::Storage { read_only: false } => Dc::SRV | Dc::UAV, + Idt::Sampled { with_sampler } => match with_sampler { + true => Dc::SRV | Dc::SAMPLER, + false => Dc::SRV, + }, + }, + Dt::Buffer { ty, format } => match ty { + Bdt::Storage { read_only: true } => match format { + Bdf::Structured { + dynamic_offset: true, + } => Dc::SRV | Dc::DYNAMIC, + Bdf::Structured { + dynamic_offset: false, + } + | Bdf::Texel => Dc::SRV, + }, + Bdt::Storage { read_only: false } => match format { + Bdf::Structured { + dynamic_offset: true, + } => Dc::SRV | Dc::UAV | Dc::DYNAMIC, + Bdf::Structured { + dynamic_offset: false, + } + | Bdf::Texel => Dc::SRV | Dc::UAV, + }, + Bdt::Uniform => match format { + Bdf::Structured { + dynamic_offset: true, + } => Dc::CBV | Dc::DYNAMIC, + Bdf::Structured { + dynamic_offset: false, + } => Dc::CBV, + Bdf::Texel => Dc::SRV, + }, + }, + Dt::InputAttachment => Dc::SRV, } } } @@ -682,25 +715,26 @@ impl pso::DescriptorPool for DescriptorPool { None }; - let sampler_range = if content.intersects(DescriptorContent::SAMPLER) && !content.is_dynamic() { - let count = binding.count as u64; - debug!("\tsampler handles: {}", count); - let handle = self - .heap_sampler - .alloc_handles(count) - .ok_or(pso::AllocationError::OutOfPoolMemory)?; - if first_gpu_sampler.is_none() { - first_gpu_sampler = Some(handle.gpu); - } - Some(DescriptorRange { - handle, - ty: binding.ty, - count, - handle_size: self.heap_sampler.handle_size, - }) - } else { - None - }; + let sampler_range = + if content.intersects(DescriptorContent::SAMPLER) && !content.is_dynamic() { + let count = binding.count as u64; + debug!("\tsampler handles: {}", count); + let handle = self + .heap_sampler + .alloc_handles(count) + .ok_or(pso::AllocationError::OutOfPoolMemory)?; + if first_gpu_sampler.is_none() { + first_gpu_sampler = Some(handle.gpu); + } + Some(DescriptorRange { + handle, + ty: binding.ty, + count, + handle_size: self.heap_sampler.handle_size, + }) + } else { + None + }; (view_range, sampler_range, Vec::new()) }; diff --git a/third_party/rust/gfx-backend-dx12/src/root_constants.rs b/third_party/rust/gfx-backend-dx12/src/root_constants.rs index 1e4c7ab35db4..6a8f1fb48806 100644 --- a/third_party/rust/gfx-backend-dx12/src/root_constants.rs +++ b/third_party/rust/gfx-backend-dx12/src/root_constants.rs @@ -6,9 +6,7 @@ //! ranges. The disjunct ranges can be then converted into root signature entries. use hal::pso; -use std::borrow::Borrow; -use std::cmp::Ordering; -use std::ops::Range; +use std::{borrow::Borrow, cmp::Ordering, ops::Range}; #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct RootConstant { diff --git a/third_party/rust/gfx-backend-dx12/src/window.rs b/third_party/rust/gfx-backend-dx12/src/window.rs index 3a894c73ae75..9e64d9c4c9a3 100644 --- a/third_party/rust/gfx-backend-dx12/src/window.rs +++ b/third_party/rust/gfx-backend-dx12/src/window.rs @@ -1,17 +1,16 @@ -use std::collections::VecDeque; -use std::{fmt, mem}; +use std::{collections::VecDeque, fmt, mem, os::raw::c_void}; -use winapi::shared::{ - dxgi1_4, - windef::{HWND, RECT}, - winerror, +use winapi::{ + shared::{ + dxgi1_4, + windef::{HWND, RECT}, + winerror, + }, + um::winuser::GetClientRect, }; -use winapi::um::winuser::GetClientRect; +use crate::{conv, resource as r, Backend, Device, Instance, PhysicalDevice, QueueFamily}; use hal::{self, device::Device as _, format as f, image as i, window as w}; -use {conv, native, resource as r, Backend, Device, Instance, PhysicalDevice, QueueFamily}; - -use std::os::raw::c_void; impl Instance { pub fn create_surface_from_hwnd(&self, hwnd: *mut c_void) -> Surface { @@ -77,7 +76,7 @@ impl w::Surface for Surface { }; w::SurfaceCapabilities { - present_modes: w::PresentMode::FIFO, //TODO + present_modes: w::PresentMode::FIFO, //TODO composite_alpha_modes: w::CompositeAlphaMode::OPAQUE, //TODO image_count: 2 ..= 16, // we currently use a flip effect which supports 2..=16 buffers current_extent, @@ -94,7 +93,7 @@ impl w::Surface for Surface { } fn supported_formats(&self, _physical_device: &PhysicalDevice) -> Option> { - Some(vec![ + Some(vec![ f::Format::Bgra8Srgb, f::Format::Bgra8Unorm, f::Format::Rgba8Srgb, diff --git a/third_party/rust/gfx-backend-empty/.cargo-checksum.json b/third_party/rust/gfx-backend-empty/.cargo-checksum.json index 5e856666b5b3..d38dbf0eb83e 100644 --- a/third_party/rust/gfx-backend-empty/.cargo-checksum.json +++ b/third_party/rust/gfx-backend-empty/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"b92fafa311fcd3d2553f556adce6b0a44f74ffc65eed9746de32754cd4625d8f","src/lib.rs":"1c84f39df58771de5f35dca2e4ef6fd00e61eea82fbd7f53b417b2c5778ec9a4"},"package":"3d383e6bc48867cb37d298a20139fd1eec298f8f6d594690cd1c50ef25470cc7"} \ No newline at end of file +{"files":{"Cargo.toml":"d9f2268e462b7f65a2523a9c938c0088832ee64779bb6f062a767e88cd0a9c3e","src/lib.rs":"152fcf079601a6078fef0025dab3969346ce0f2112ef274c37fe9a3d856cc7c8"},"package":"b67bd2d7bc022b257ddbdabc5fa3b10c29c292372c3409f2b6a6e3f4e11cdb85"} \ No newline at end of file diff --git a/third_party/rust/gfx-backend-empty/Cargo.toml b/third_party/rust/gfx-backend-empty/Cargo.toml index 8bab6ab59883..d88070861959 100644 --- a/third_party/rust/gfx-backend-empty/Cargo.toml +++ b/third_party/rust/gfx-backend-empty/Cargo.toml @@ -13,7 +13,7 @@ [package] edition = "2018" name = "gfx-backend-empty" -version = "0.4.0" +version = "0.5.0" authors = ["The Gfx-rs Developers"] description = "Empty backend for gfx-rs" documentation = "https://docs.rs/gfx-backend-empty" @@ -22,7 +22,7 @@ license = "MIT OR Apache-2.0" [lib] name = "gfx_backend_empty" [dependencies.gfx-hal] -version = "0.4" +version = "0.5" [dependencies.raw-window-handle] version = "0.3" diff --git a/third_party/rust/gfx-backend-empty/src/lib.rs b/third_party/rust/gfx-backend-empty/src/lib.rs old mode 100755 new mode 100644 index 5f2d841e3ebb..f631723aba01 --- a/third_party/rust/gfx-backend-empty/src/lib.rs +++ b/third_party/rust/gfx-backend-empty/src/lib.rs @@ -1,1021 +1,1022 @@ -//! Dummy backend implementation to test the code for compile errors -//! outside of the graphics development environment. - -extern crate gfx_hal as hal; - -use hal::range::RangeArg; -use hal::{ - adapter, - buffer, - command, - device, - format, - image, - memory, - pass, - pool, - pso, - query, - queue, - window, -}; -use std::borrow::Borrow; -use std::ops::Range; - -const DO_NOT_USE_MESSAGE: &str = "You need to enable a native API feature (vulkan/metal/dx11/dx12/gl/wgl) in order to use gfx-rs"; - -/// Dummy backend. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -pub enum Backend {} -impl hal::Backend for Backend { - type Instance = Instance; - type PhysicalDevice = PhysicalDevice; - type Device = Device; - - type Surface = Surface; - type Swapchain = Swapchain; - - type QueueFamily = QueueFamily; - type CommandQueue = CommandQueue; - type CommandBuffer = CommandBuffer; - - type Memory = (); - type CommandPool = CommandPool; - - type ShaderModule = (); - type RenderPass = (); - type Framebuffer = (); - - type Buffer = (); - type BufferView = (); - type Image = (); - type ImageView = (); - type Sampler = (); - - type ComputePipeline = (); - type GraphicsPipeline = (); - type PipelineCache = (); - type PipelineLayout = (); - type DescriptorSetLayout = (); - type DescriptorPool = DescriptorPool; - type DescriptorSet = (); - - type Fence = (); - type Semaphore = (); - type Event = (); - type QueryPool = (); -} - -/// Dummy physical device. -#[derive(Debug)] -pub struct PhysicalDevice; -impl adapter::PhysicalDevice for PhysicalDevice { - unsafe fn open( - &self, - _: &[(&QueueFamily, &[queue::QueuePriority])], - _: hal::Features, - ) -> Result, device::CreationError> { - panic!(DO_NOT_USE_MESSAGE) - } - - fn format_properties(&self, _: Option) -> format::Properties { - panic!(DO_NOT_USE_MESSAGE) - } - - fn image_format_properties( - &self, - _: format::Format, - _dim: u8, - _: image::Tiling, - _: image::Usage, - _: image::ViewCapabilities, - ) -> Option { - panic!(DO_NOT_USE_MESSAGE) - } - - fn memory_properties(&self) -> adapter::MemoryProperties { - panic!(DO_NOT_USE_MESSAGE) - } - - fn features(&self) -> hal::Features { - panic!(DO_NOT_USE_MESSAGE) - } - - fn limits(&self) -> hal::Limits { - panic!(DO_NOT_USE_MESSAGE) - } -} - -/// Dummy command queue doing nothing. -#[derive(Debug)] -pub struct CommandQueue; -impl queue::CommandQueue for CommandQueue { - unsafe fn submit<'a, T, Ic, S, Iw, Is>( - &mut self, - _: queue::Submission, - _: Option<&()>, - ) where - T: 'a + Borrow, - Ic: IntoIterator, - S: 'a + Borrow<()>, - Iw: IntoIterator, - Is: IntoIterator, - { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn present<'a, W, Is, S, Iw>( - &mut self, - _: Is, - _: Iw, - ) -> Result, window::PresentError> - where - W: 'a + Borrow, - Is: IntoIterator, - S: 'a + Borrow<()>, - Iw: IntoIterator, - { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn present_surface( - &mut self, - _surface: &mut Surface, - _image: (), - _wait_semaphore: Option<&()>, - ) -> Result, window::PresentError> { - panic!(DO_NOT_USE_MESSAGE) - } - - fn wait_idle(&self) -> Result<(), device::OutOfMemory> { - panic!(DO_NOT_USE_MESSAGE) - } -} - -/// Dummy device doing nothing. -#[derive(Debug)] -pub struct Device; -impl device::Device for Device { - unsafe fn create_command_pool( - &self, - _: queue::QueueFamilyId, - _: pool::CommandPoolCreateFlags, - ) -> Result { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn destroy_command_pool(&self, _: CommandPool) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn allocate_memory( - &self, - _: hal::MemoryTypeId, - _: u64, - ) -> Result<(), device::AllocationError> { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn create_render_pass<'a, IA, IS, ID>( - &self, - _: IA, - _: IS, - _: ID, - ) -> Result<(), device::OutOfMemory> - where - IA: IntoIterator, - IA::Item: Borrow, - IS: IntoIterator, - IS::Item: Borrow>, - ID: IntoIterator, - ID::Item: Borrow, - { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn create_pipeline_layout(&self, _: IS, _: IR) -> Result<(), device::OutOfMemory> - where - IS: IntoIterator, - IS::Item: Borrow<()>, - IR: IntoIterator, - IR::Item: Borrow<(pso::ShaderStageFlags, Range)>, - { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn create_pipeline_cache( - &self, - _data: Option<&[u8]>, - ) -> Result<(), device::OutOfMemory> { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn get_pipeline_cache_data(&self, _cache: &()) -> Result, device::OutOfMemory> { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn destroy_pipeline_cache(&self, _: ()) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn create_graphics_pipeline<'a>( - &self, - _: &pso::GraphicsPipelineDesc<'a, Backend>, - _: Option<&()>, - ) -> Result<(), pso::CreationError> { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn create_compute_pipeline<'a>( - &self, - _: &pso::ComputePipelineDesc<'a, Backend>, - _: Option<&()>, - ) -> Result<(), pso::CreationError> { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn merge_pipeline_caches(&self, _: &(), _: I) -> Result<(), device::OutOfMemory> - where - I: IntoIterator, - I::Item: Borrow<()>, - { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn create_framebuffer( - &self, - _: &(), - _: I, - _: image::Extent, - ) -> Result<(), device::OutOfMemory> - where - I: IntoIterator, - I::Item: Borrow<()>, - { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn create_shader_module(&self, _: &[u32]) -> Result<(), device::ShaderError> { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn create_sampler(&self, _: &image::SamplerDesc) -> Result<(), device::AllocationError> { - panic!(DO_NOT_USE_MESSAGE) - } - unsafe fn create_buffer(&self, _: u64, _: buffer::Usage) -> Result<(), buffer::CreationError> { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn get_buffer_requirements(&self, _: &()) -> memory::Requirements { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn bind_buffer_memory( - &self, - _: &(), - _: u64, - _: &mut (), - ) -> Result<(), device::BindError> { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn create_buffer_view>( - &self, - _: &(), - _: Option, - _: R, - ) -> Result<(), buffer::ViewCreationError> { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn create_image( - &self, - _: image::Kind, - _: image::Level, - _: format::Format, - _: image::Tiling, - _: image::Usage, - _: image::ViewCapabilities, - ) -> Result<(), image::CreationError> { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn get_image_requirements(&self, _: &()) -> memory::Requirements { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn get_image_subresource_footprint( - &self, - _: &(), - _: image::Subresource, - ) -> image::SubresourceFootprint { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn bind_image_memory( - &self, - _: &(), - _: u64, - _: &mut (), - ) -> Result<(), device::BindError> { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn create_image_view( - &self, - _: &(), - _: image::ViewKind, - _: format::Format, - _: format::Swizzle, - _: image::SubresourceRange, - ) -> Result<(), image::ViewError> { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn create_descriptor_pool( - &self, - _: usize, - _: I, - _: pso::DescriptorPoolCreateFlags, - ) -> Result - where - I: IntoIterator, - I::Item: Borrow, - { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn create_descriptor_set_layout( - &self, - _: I, - _: J, - ) -> Result<(), device::OutOfMemory> - where - I: IntoIterator, - I::Item: Borrow, - J: IntoIterator, - J::Item: Borrow<()>, - { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn write_descriptor_sets<'a, I, J>(&self, _: I) - where - I: IntoIterator>, - J: IntoIterator, - J::Item: Borrow>, - { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn copy_descriptor_sets<'a, I>(&self, _: I) - where - I: IntoIterator, - I::Item: Borrow>, - { - panic!(DO_NOT_USE_MESSAGE) - } - - fn create_semaphore(&self) -> Result<(), device::OutOfMemory> { - panic!(DO_NOT_USE_MESSAGE) - } - - fn create_fence(&self, _: bool) -> Result<(), device::OutOfMemory> { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn get_fence_status(&self, _: &()) -> Result { - panic!(DO_NOT_USE_MESSAGE) - } - - fn create_event(&self) -> Result<(), device::OutOfMemory> { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn get_event_status(&self, _: &()) -> Result { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn set_event(&self, _: &()) -> Result<(), device::OutOfMemory> { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn reset_event(&self, _: &()) -> Result<(), device::OutOfMemory> { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn create_query_pool(&self, _: query::Type, _: u32) -> Result<(), query::CreationError> { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn destroy_query_pool(&self, _: ()) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn get_query_pool_results( - &self, - _: &(), - _: Range, - _: &mut [u8], - _: buffer::Offset, - _: query::ResultFlags, - ) -> Result { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn map_memory>( - &self, - _: &(), - _: R, - ) -> Result<*mut u8, device::MapError> { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn unmap_memory(&self, _: &()) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn flush_mapped_memory_ranges<'a, I, R>(&self, _: I) -> Result<(), device::OutOfMemory> - where - I: IntoIterator, - I::Item: Borrow<(&'a (), R)>, - R: RangeArg, - { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn invalidate_mapped_memory_ranges<'a, I, R>( - &self, - _: I, - ) -> Result<(), device::OutOfMemory> - where - I: IntoIterator, - I::Item: Borrow<(&'a (), R)>, - R: RangeArg, - { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn free_memory(&self, _: ()) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn destroy_shader_module(&self, _: ()) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn destroy_render_pass(&self, _: ()) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn destroy_pipeline_layout(&self, _: ()) { - panic!(DO_NOT_USE_MESSAGE) - } - unsafe fn destroy_graphics_pipeline(&self, _: ()) { - panic!(DO_NOT_USE_MESSAGE) - } - unsafe fn destroy_compute_pipeline(&self, _: ()) { - panic!(DO_NOT_USE_MESSAGE) - } - unsafe fn destroy_framebuffer(&self, _: ()) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn destroy_buffer(&self, _: ()) { - panic!(DO_NOT_USE_MESSAGE) - } - unsafe fn destroy_buffer_view(&self, _: ()) { - panic!(DO_NOT_USE_MESSAGE) - } - unsafe fn destroy_image(&self, _: ()) { - panic!(DO_NOT_USE_MESSAGE) - } - unsafe fn destroy_image_view(&self, _: ()) { - panic!(DO_NOT_USE_MESSAGE) - } - unsafe fn destroy_sampler(&self, _: ()) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn destroy_descriptor_pool(&self, _: DescriptorPool) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn destroy_descriptor_set_layout(&self, _: ()) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn destroy_fence(&self, _: ()) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn destroy_semaphore(&self, _: ()) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn destroy_event(&self, _: ()) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn create_swapchain( - &self, - _: &mut Surface, - _: window::SwapchainConfig, - _: Option, - ) -> Result<(Swapchain, Vec<()>), hal::window::CreationError> { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn destroy_swapchain(&self, _: Swapchain) { - panic!(DO_NOT_USE_MESSAGE) - } - - fn wait_idle(&self) -> Result<(), device::OutOfMemory> { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn set_image_name(&self, _: &mut (), _: &str) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn set_buffer_name(&self, _: &mut (), _: &str) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn set_command_buffer_name(&self, _: &mut CommandBuffer, _: &str) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn set_semaphore_name(&self, _: &mut (), _: &str) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn set_fence_name(&self, _: &mut (), _: &str) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn set_framebuffer_name(&self, _: &mut (), _: &str) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn set_render_pass_name(&self, _: &mut (), _: &str) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn set_descriptor_set_name(&self, _: &mut (), _: &str) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn set_descriptor_set_layout_name(&self, _: &mut (), _: &str) { - panic!(DO_NOT_USE_MESSAGE) - } -} - -#[derive(Debug)] -pub struct QueueFamily; -impl queue::QueueFamily for QueueFamily { - fn queue_type(&self) -> queue::QueueType { - panic!(DO_NOT_USE_MESSAGE) - } - fn max_queues(&self) -> usize { - panic!(DO_NOT_USE_MESSAGE) - } - fn id(&self) -> queue::QueueFamilyId { - panic!(DO_NOT_USE_MESSAGE) - } -} - -/// Dummy raw command pool. -#[derive(Debug)] -pub struct CommandPool; -impl pool::CommandPool for CommandPool { - unsafe fn reset(&mut self, _: bool) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn free(&mut self, _: I) - where - I: IntoIterator, - { - panic!(DO_NOT_USE_MESSAGE) - } -} - -/// Dummy command buffer, which ignores all the calls. -#[derive(Debug)] -pub struct CommandBuffer; -impl command::CommandBuffer for CommandBuffer { - unsafe fn begin( - &mut self, - _: command::CommandBufferFlags, - _: command::CommandBufferInheritanceInfo, - ) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn finish(&mut self) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn reset(&mut self, _: bool) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn pipeline_barrier<'a, T>( - &mut self, - _: Range, - _: memory::Dependencies, - _: T, - ) where - T: IntoIterator, - T::Item: Borrow>, - { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn fill_buffer(&mut self, _: &(), _: R, _: u32) - where - R: RangeArg, - { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn update_buffer(&mut self, _: &(), _: buffer::Offset, _: &[u8]) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn clear_image(&mut self, _: &(), _: image::Layout, _: command::ClearValue, _: T) - where - T: IntoIterator, - T::Item: Borrow, - { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn clear_attachments(&mut self, _: T, _: U) - where - T: IntoIterator, - T::Item: Borrow, - U: IntoIterator, - U::Item: Borrow, - { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn resolve_image(&mut self, _: &(), _: image::Layout, _: &(), _: image::Layout, _: T) - where - T: IntoIterator, - T::Item: Borrow, - { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn blit_image( - &mut self, - _: &(), - _: image::Layout, - _: &(), - _: image::Layout, - _: image::Filter, - _: T, - ) where - T: IntoIterator, - T::Item: Borrow, - { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn bind_index_buffer(&mut self, _: buffer::IndexBufferView) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn bind_vertex_buffers(&mut self, _: u32, _: I) - where - I: IntoIterator, - T: Borrow<()>, - { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn set_viewports(&mut self, _: u32, _: T) - where - T: IntoIterator, - T::Item: Borrow, - { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn set_scissors(&mut self, _: u32, _: T) - where - T: IntoIterator, - T::Item: Borrow, - { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn set_stencil_reference(&mut self, _: pso::Face, _: pso::StencilValue) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn set_stencil_read_mask(&mut self, _: pso::Face, _: pso::StencilValue) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn set_stencil_write_mask(&mut self, _: pso::Face, _: pso::StencilValue) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn set_blend_constants(&mut self, _: pso::ColorValue) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn set_depth_bounds(&mut self, _: Range) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn set_line_width(&mut self, _: f32) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn set_depth_bias(&mut self, _: pso::DepthBias) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn begin_render_pass( - &mut self, - _: &(), - _: &(), - _: pso::Rect, - _: T, - _: command::SubpassContents, - ) where - T: IntoIterator, - T::Item: Borrow, - { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn next_subpass(&mut self, _: command::SubpassContents) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn end_render_pass(&mut self) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn bind_graphics_pipeline(&mut self, _: &()) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn bind_graphics_descriptor_sets(&mut self, _: &(), _: usize, _: I, _: J) - where - I: IntoIterator, - I::Item: Borrow<()>, - J: IntoIterator, - J::Item: Borrow, - { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn bind_compute_pipeline(&mut self, _: &()) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn bind_compute_descriptor_sets(&mut self, _: &(), _: usize, _: I, _: J) - where - I: IntoIterator, - I::Item: Borrow<()>, - J: IntoIterator, - J::Item: Borrow, - { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn dispatch(&mut self, _: hal::WorkGroupCount) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn dispatch_indirect(&mut self, _: &(), _: buffer::Offset) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn copy_buffer(&mut self, _: &(), _: &(), _: T) - where - T: IntoIterator, - T::Item: Borrow, - { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn copy_image(&mut self, _: &(), _: image::Layout, _: &(), _: image::Layout, _: T) - where - T: IntoIterator, - T::Item: Borrow, - { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn copy_buffer_to_image(&mut self, _: &(), _: &(), _: image::Layout, _: T) - where - T: IntoIterator, - T::Item: Borrow, - { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn copy_image_to_buffer(&mut self, _: &(), _: image::Layout, _: &(), _: T) - where - T: IntoIterator, - T::Item: Borrow, - { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn draw(&mut self, _: Range, _: Range) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn draw_indexed( - &mut self, - _: Range, - _: hal::VertexOffset, - _: Range, - ) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn draw_indirect(&mut self, _: &(), _: buffer::Offset, _: hal::DrawCount, _: u32) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn draw_indexed_indirect( - &mut self, - _: &(), - _: buffer::Offset, - _: hal::DrawCount, - _: u32, - ) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn set_event(&mut self, _: &(), _: pso::PipelineStage) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn reset_event(&mut self, _: &(), _: pso::PipelineStage) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn wait_events<'a, I, J>(&mut self, _: I, _: Range, _: J) - where - I: IntoIterator, - I::Item: Borrow<()>, - J: IntoIterator, - J::Item: Borrow>, - { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn begin_query(&mut self, _: query::Query, _: query::ControlFlags) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn end_query(&mut self, _: query::Query) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn reset_query_pool(&mut self, _: &(), _: Range) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn copy_query_pool_results( - &mut self, - _: &(), - _: Range, - _: &(), - _: buffer::Offset, - _: buffer::Offset, - _: query::ResultFlags, - ) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn write_timestamp(&mut self, _: pso::PipelineStage, _: query::Query) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn push_graphics_constants( - &mut self, - _: &(), - _: pso::ShaderStageFlags, - _: u32, - _: &[u32], - ) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn push_compute_constants(&mut self, _: &(), _: u32, _: &[u32]) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn execute_commands<'a, T, I>(&mut self, _: I) - where - T: 'a + Borrow, - I: IntoIterator, - { - panic!(DO_NOT_USE_MESSAGE) - } -} - -// Dummy descriptor pool. -#[derive(Debug)] -pub struct DescriptorPool; -impl pso::DescriptorPool for DescriptorPool { - unsafe fn free_sets(&mut self, _descriptor_sets: I) - where - I: IntoIterator, - { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn reset(&mut self) { - panic!(DO_NOT_USE_MESSAGE) - } -} - -/// Dummy surface. -#[derive(Debug)] -pub struct Surface; -impl window::Surface for Surface { - fn supports_queue_family(&self, _: &QueueFamily) -> bool { - panic!(DO_NOT_USE_MESSAGE) - } - - fn capabilities(&self, _: &PhysicalDevice) -> window::SurfaceCapabilities { - panic!(DO_NOT_USE_MESSAGE) - } - - fn supported_formats(&self, _: &PhysicalDevice) -> Option> { - panic!(DO_NOT_USE_MESSAGE) - } -} -impl window::PresentationSurface for Surface { - type SwapchainImage = (); - - unsafe fn configure_swapchain( - &mut self, - _: &Device, - _: window::SwapchainConfig, - ) -> Result<(), window::CreationError> { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn unconfigure_swapchain(&mut self, _: &Device) { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn acquire_image( - &mut self, - _: u64, - ) -> Result<((), Option), window::AcquireError> { - panic!(DO_NOT_USE_MESSAGE) - } -} - -/// Dummy swapchain. -#[derive(Debug)] -pub struct Swapchain; -impl window::Swapchain for Swapchain { - unsafe fn acquire_image( - &mut self, - _: u64, - _: Option<&()>, - _: Option<&()>, - ) -> Result<(window::SwapImageIndex, Option), window::AcquireError> { - panic!(DO_NOT_USE_MESSAGE) - } -} - -#[derive(Debug)] -pub struct Instance; - -impl hal::Instance for Instance { - fn create(_name: &str, _version: u32) -> Result { - Ok(Instance) - } - - fn enumerate_adapters(&self) -> Vec> { - vec![] - } - - unsafe fn create_surface( - &self, - _: &impl raw_window_handle::HasRawWindowHandle, - ) -> Result { - panic!(DO_NOT_USE_MESSAGE) - } - - unsafe fn destroy_surface(&self, _surface: Surface) { - panic!(DO_NOT_USE_MESSAGE) - } -} +//! Dummy backend implementation to test the code for compile errors +//! outside of the graphics development environment. + +extern crate gfx_hal as hal; + +use hal::{ + adapter, + buffer, + command, + device, + format, + image, + memory, + pass, + pool, + pso, + query, + queue, + window, +}; +use std::borrow::Borrow; +use std::ops::Range; + +const DO_NOT_USE_MESSAGE: &str = "You need to enable a native API feature (vulkan/metal/dx11/dx12/gl/wgl) in order to use gfx-rs"; + +/// Dummy backend. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub enum Backend {} +impl hal::Backend for Backend { + type Instance = Instance; + type PhysicalDevice = PhysicalDevice; + type Device = Device; + + type Surface = Surface; + type Swapchain = Swapchain; + + type QueueFamily = QueueFamily; + type CommandQueue = CommandQueue; + type CommandBuffer = CommandBuffer; + + type Memory = (); + type CommandPool = CommandPool; + + type ShaderModule = (); + type RenderPass = (); + type Framebuffer = (); + + type Buffer = (); + type BufferView = (); + type Image = (); + type ImageView = (); + type Sampler = (); + + type ComputePipeline = (); + type GraphicsPipeline = (); + type PipelineCache = (); + type PipelineLayout = (); + type DescriptorSetLayout = (); + type DescriptorPool = DescriptorPool; + type DescriptorSet = (); + + type Fence = (); + type Semaphore = (); + type Event = (); + type QueryPool = (); +} + +/// Dummy physical device. +#[derive(Debug)] +pub struct PhysicalDevice; +impl adapter::PhysicalDevice for PhysicalDevice { + unsafe fn open( + &self, + _: &[(&QueueFamily, &[queue::QueuePriority])], + _: hal::Features, + ) -> Result, device::CreationError> { + panic!(DO_NOT_USE_MESSAGE) + } + + fn format_properties(&self, _: Option) -> format::Properties { + panic!(DO_NOT_USE_MESSAGE) + } + + fn image_format_properties( + &self, + _: format::Format, + _dim: u8, + _: image::Tiling, + _: image::Usage, + _: image::ViewCapabilities, + ) -> Option { + panic!(DO_NOT_USE_MESSAGE) + } + + fn memory_properties(&self) -> adapter::MemoryProperties { + panic!(DO_NOT_USE_MESSAGE) + } + + fn features(&self) -> hal::Features { + panic!(DO_NOT_USE_MESSAGE) + } + + fn hints(&self) -> hal::Hints { + panic!(DO_NOT_USE_MESSAGE) + } + + fn limits(&self) -> hal::Limits { + panic!(DO_NOT_USE_MESSAGE) + } +} + +/// Dummy command queue doing nothing. +#[derive(Debug)] +pub struct CommandQueue; +impl queue::CommandQueue for CommandQueue { + unsafe fn submit<'a, T, Ic, S, Iw, Is>( + &mut self, + _: queue::Submission, + _: Option<&()>, + ) where + T: 'a + Borrow, + Ic: IntoIterator, + S: 'a + Borrow<()>, + Iw: IntoIterator, + Is: IntoIterator, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn present<'a, W, Is, S, Iw>( + &mut self, + _: Is, + _: Iw, + ) -> Result, window::PresentError> + where + W: 'a + Borrow, + Is: IntoIterator, + S: 'a + Borrow<()>, + Iw: IntoIterator, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn present_surface( + &mut self, + _surface: &mut Surface, + _image: (), + _wait_semaphore: Option<&()>, + ) -> Result, window::PresentError> { + panic!(DO_NOT_USE_MESSAGE) + } + + fn wait_idle(&self) -> Result<(), device::OutOfMemory> { + panic!(DO_NOT_USE_MESSAGE) + } +} + +/// Dummy device doing nothing. +#[derive(Debug)] +pub struct Device; +impl device::Device for Device { + unsafe fn create_command_pool( + &self, + _: queue::QueueFamilyId, + _: pool::CommandPoolCreateFlags, + ) -> Result { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn destroy_command_pool(&self, _: CommandPool) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn allocate_memory( + &self, + _: hal::MemoryTypeId, + _: u64, + ) -> Result<(), device::AllocationError> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn create_render_pass<'a, IA, IS, ID>( + &self, + _: IA, + _: IS, + _: ID, + ) -> Result<(), device::OutOfMemory> + where + IA: IntoIterator, + IA::Item: Borrow, + IS: IntoIterator, + IS::Item: Borrow>, + ID: IntoIterator, + ID::Item: Borrow, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn create_pipeline_layout(&self, _: IS, _: IR) -> Result<(), device::OutOfMemory> + where + IS: IntoIterator, + IS::Item: Borrow<()>, + IR: IntoIterator, + IR::Item: Borrow<(pso::ShaderStageFlags, Range)>, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn create_pipeline_cache( + &self, + _data: Option<&[u8]>, + ) -> Result<(), device::OutOfMemory> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn get_pipeline_cache_data(&self, _cache: &()) -> Result, device::OutOfMemory> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn destroy_pipeline_cache(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn create_graphics_pipeline<'a>( + &self, + _: &pso::GraphicsPipelineDesc<'a, Backend>, + _: Option<&()>, + ) -> Result<(), pso::CreationError> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn create_compute_pipeline<'a>( + &self, + _: &pso::ComputePipelineDesc<'a, Backend>, + _: Option<&()>, + ) -> Result<(), pso::CreationError> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn merge_pipeline_caches(&self, _: &(), _: I) -> Result<(), device::OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow<()>, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn create_framebuffer( + &self, + _: &(), + _: I, + _: image::Extent, + ) -> Result<(), device::OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow<()>, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn create_shader_module(&self, _: &[u32]) -> Result<(), device::ShaderError> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn create_sampler(&self, _: &image::SamplerDesc) -> Result<(), device::AllocationError> { + panic!(DO_NOT_USE_MESSAGE) + } + unsafe fn create_buffer(&self, _: u64, _: buffer::Usage) -> Result<(), buffer::CreationError> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn get_buffer_requirements(&self, _: &()) -> memory::Requirements { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn bind_buffer_memory( + &self, + _: &(), + _: u64, + _: &mut (), + ) -> Result<(), device::BindError> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn create_buffer_view( + &self, + _: &(), + _: Option, + _: buffer::SubRange, + ) -> Result<(), buffer::ViewCreationError> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn create_image( + &self, + _: image::Kind, + _: image::Level, + _: format::Format, + _: image::Tiling, + _: image::Usage, + _: image::ViewCapabilities, + ) -> Result<(), image::CreationError> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn get_image_requirements(&self, _: &()) -> memory::Requirements { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn get_image_subresource_footprint( + &self, + _: &(), + _: image::Subresource, + ) -> image::SubresourceFootprint { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn bind_image_memory( + &self, + _: &(), + _: u64, + _: &mut (), + ) -> Result<(), device::BindError> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn create_image_view( + &self, + _: &(), + _: image::ViewKind, + _: format::Format, + _: format::Swizzle, + _: image::SubresourceRange, + ) -> Result<(), image::ViewCreationError> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn create_descriptor_pool( + &self, + _: usize, + _: I, + _: pso::DescriptorPoolCreateFlags, + ) -> Result + where + I: IntoIterator, + I::Item: Borrow, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn create_descriptor_set_layout( + &self, + _: I, + _: J, + ) -> Result<(), device::OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow<()>, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn write_descriptor_sets<'a, I, J>(&self, _: I) + where + I: IntoIterator>, + J: IntoIterator, + J::Item: Borrow>, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn copy_descriptor_sets<'a, I>(&self, _: I) + where + I: IntoIterator, + I::Item: Borrow>, + { + panic!(DO_NOT_USE_MESSAGE) + } + + fn create_semaphore(&self) -> Result<(), device::OutOfMemory> { + panic!(DO_NOT_USE_MESSAGE) + } + + fn create_fence(&self, _: bool) -> Result<(), device::OutOfMemory> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn get_fence_status(&self, _: &()) -> Result { + panic!(DO_NOT_USE_MESSAGE) + } + + fn create_event(&self) -> Result<(), device::OutOfMemory> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn get_event_status(&self, _: &()) -> Result { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_event(&self, _: &()) -> Result<(), device::OutOfMemory> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn reset_event(&self, _: &()) -> Result<(), device::OutOfMemory> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn create_query_pool(&self, _: query::Type, _: u32) -> Result<(), query::CreationError> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn destroy_query_pool(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn get_query_pool_results( + &self, + _: &(), + _: Range, + _: &mut [u8], + _: buffer::Offset, + _: query::ResultFlags, + ) -> Result { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn map_memory(&self, _: &(), _: memory::Segment) -> Result<*mut u8, device::MapError> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn unmap_memory(&self, _: &()) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn flush_mapped_memory_ranges<'a, I>(&self, _: I) -> Result<(), device::OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow<(&'a (), memory::Segment)>, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn invalidate_mapped_memory_ranges<'a, I>(&self, _: I) -> Result<(), device::OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow<(&'a (), memory::Segment)>, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn free_memory(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn destroy_shader_module(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn destroy_render_pass(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn destroy_pipeline_layout(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + unsafe fn destroy_graphics_pipeline(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + unsafe fn destroy_compute_pipeline(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + unsafe fn destroy_framebuffer(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn destroy_buffer(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + unsafe fn destroy_buffer_view(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + unsafe fn destroy_image(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + unsafe fn destroy_image_view(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + unsafe fn destroy_sampler(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn destroy_descriptor_pool(&self, _: DescriptorPool) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn destroy_descriptor_set_layout(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn destroy_fence(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn destroy_semaphore(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn destroy_event(&self, _: ()) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn create_swapchain( + &self, + _: &mut Surface, + _: window::SwapchainConfig, + _: Option, + ) -> Result<(Swapchain, Vec<()>), hal::window::CreationError> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn destroy_swapchain(&self, _: Swapchain) { + panic!(DO_NOT_USE_MESSAGE) + } + + fn wait_idle(&self) -> Result<(), device::OutOfMemory> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_image_name(&self, _: &mut (), _: &str) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_buffer_name(&self, _: &mut (), _: &str) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_command_buffer_name(&self, _: &mut CommandBuffer, _: &str) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_semaphore_name(&self, _: &mut (), _: &str) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_fence_name(&self, _: &mut (), _: &str) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_framebuffer_name(&self, _: &mut (), _: &str) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_render_pass_name(&self, _: &mut (), _: &str) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_descriptor_set_name(&self, _: &mut (), _: &str) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_descriptor_set_layout_name(&self, _: &mut (), _: &str) { + panic!(DO_NOT_USE_MESSAGE) + } +} + +#[derive(Debug)] +pub struct QueueFamily; +impl queue::QueueFamily for QueueFamily { + fn queue_type(&self) -> queue::QueueType { + panic!(DO_NOT_USE_MESSAGE) + } + fn max_queues(&self) -> usize { + panic!(DO_NOT_USE_MESSAGE) + } + fn id(&self) -> queue::QueueFamilyId { + panic!(DO_NOT_USE_MESSAGE) + } +} + +/// Dummy raw command pool. +#[derive(Debug)] +pub struct CommandPool; +impl pool::CommandPool for CommandPool { + unsafe fn reset(&mut self, _: bool) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn free(&mut self, _: I) + where + I: IntoIterator, + { + panic!(DO_NOT_USE_MESSAGE) + } +} + +/// Dummy command buffer, which ignores all the calls. +#[derive(Debug)] +pub struct CommandBuffer; +impl command::CommandBuffer for CommandBuffer { + unsafe fn begin( + &mut self, + _: command::CommandBufferFlags, + _: command::CommandBufferInheritanceInfo, + ) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn finish(&mut self) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn reset(&mut self, _: bool) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn pipeline_barrier<'a, T>( + &mut self, + _: Range, + _: memory::Dependencies, + _: T, + ) where + T: IntoIterator, + T::Item: Borrow>, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn fill_buffer(&mut self, _: &(), _: buffer::SubRange, _: u32) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn update_buffer(&mut self, _: &(), _: buffer::Offset, _: &[u8]) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn clear_image(&mut self, _: &(), _: image::Layout, _: command::ClearValue, _: T) + where + T: IntoIterator, + T::Item: Borrow, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn clear_attachments(&mut self, _: T, _: U) + where + T: IntoIterator, + T::Item: Borrow, + U: IntoIterator, + U::Item: Borrow, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn resolve_image(&mut self, _: &(), _: image::Layout, _: &(), _: image::Layout, _: T) + where + T: IntoIterator, + T::Item: Borrow, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn blit_image( + &mut self, + _: &(), + _: image::Layout, + _: &(), + _: image::Layout, + _: image::Filter, + _: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn bind_index_buffer(&mut self, _: buffer::IndexBufferView) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn bind_vertex_buffers(&mut self, _: u32, _: I) + where + I: IntoIterator, + T: Borrow<()>, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_viewports(&mut self, _: u32, _: T) + where + T: IntoIterator, + T::Item: Borrow, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_scissors(&mut self, _: u32, _: T) + where + T: IntoIterator, + T::Item: Borrow, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_stencil_reference(&mut self, _: pso::Face, _: pso::StencilValue) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_stencil_read_mask(&mut self, _: pso::Face, _: pso::StencilValue) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_stencil_write_mask(&mut self, _: pso::Face, _: pso::StencilValue) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_blend_constants(&mut self, _: pso::ColorValue) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_depth_bounds(&mut self, _: Range) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_line_width(&mut self, _: f32) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_depth_bias(&mut self, _: pso::DepthBias) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn begin_render_pass( + &mut self, + _: &(), + _: &(), + _: pso::Rect, + _: T, + _: command::SubpassContents, + ) where + T: IntoIterator, + T::Item: Borrow, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn next_subpass(&mut self, _: command::SubpassContents) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn end_render_pass(&mut self) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn bind_graphics_pipeline(&mut self, _: &()) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn bind_graphics_descriptor_sets(&mut self, _: &(), _: usize, _: I, _: J) + where + I: IntoIterator, + I::Item: Borrow<()>, + J: IntoIterator, + J::Item: Borrow, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn bind_compute_pipeline(&mut self, _: &()) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn bind_compute_descriptor_sets(&mut self, _: &(), _: usize, _: I, _: J) + where + I: IntoIterator, + I::Item: Borrow<()>, + J: IntoIterator, + J::Item: Borrow, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn dispatch(&mut self, _: hal::WorkGroupCount) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn dispatch_indirect(&mut self, _: &(), _: buffer::Offset) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn copy_buffer(&mut self, _: &(), _: &(), _: T) + where + T: IntoIterator, + T::Item: Borrow, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn copy_image(&mut self, _: &(), _: image::Layout, _: &(), _: image::Layout, _: T) + where + T: IntoIterator, + T::Item: Borrow, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn copy_buffer_to_image(&mut self, _: &(), _: &(), _: image::Layout, _: T) + where + T: IntoIterator, + T::Item: Borrow, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn copy_image_to_buffer(&mut self, _: &(), _: image::Layout, _: &(), _: T) + where + T: IntoIterator, + T::Item: Borrow, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn draw(&mut self, _: Range, _: Range) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn draw_indexed( + &mut self, + _: Range, + _: hal::VertexOffset, + _: Range, + ) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn draw_indirect(&mut self, _: &(), _: buffer::Offset, _: hal::DrawCount, _: u32) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn draw_indexed_indirect( + &mut self, + _: &(), + _: buffer::Offset, + _: hal::DrawCount, + _: u32, + ) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn set_event(&mut self, _: &(), _: pso::PipelineStage) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn reset_event(&mut self, _: &(), _: pso::PipelineStage) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn wait_events<'a, I, J>(&mut self, _: I, _: Range, _: J) + where + I: IntoIterator, + I::Item: Borrow<()>, + J: IntoIterator, + J::Item: Borrow>, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn begin_query(&mut self, _: query::Query, _: query::ControlFlags) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn end_query(&mut self, _: query::Query) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn reset_query_pool(&mut self, _: &(), _: Range) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn copy_query_pool_results( + &mut self, + _: &(), + _: Range, + _: &(), + _: buffer::Offset, + _: buffer::Offset, + _: query::ResultFlags, + ) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn write_timestamp(&mut self, _: pso::PipelineStage, _: query::Query) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn push_graphics_constants( + &mut self, + _: &(), + _: pso::ShaderStageFlags, + _: u32, + _: &[u32], + ) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn push_compute_constants(&mut self, _: &(), _: u32, _: &[u32]) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn execute_commands<'a, T, I>(&mut self, _: I) + where + T: 'a + Borrow, + I: IntoIterator, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn insert_debug_marker(&mut self, _: &str, _: u32) { + panic!(DO_NOT_USE_MESSAGE) + } + unsafe fn begin_debug_marker(&mut self, _: &str, _: u32) { + panic!(DO_NOT_USE_MESSAGE) + } + unsafe fn end_debug_marker(&mut self) { + panic!(DO_NOT_USE_MESSAGE) + } +} + +// Dummy descriptor pool. +#[derive(Debug)] +pub struct DescriptorPool; +impl pso::DescriptorPool for DescriptorPool { + unsafe fn free_sets(&mut self, _descriptor_sets: I) + where + I: IntoIterator, + { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn reset(&mut self) { + panic!(DO_NOT_USE_MESSAGE) + } +} + +/// Dummy surface. +#[derive(Debug)] +pub struct Surface; +impl window::Surface for Surface { + fn supports_queue_family(&self, _: &QueueFamily) -> bool { + panic!(DO_NOT_USE_MESSAGE) + } + + fn capabilities(&self, _: &PhysicalDevice) -> window::SurfaceCapabilities { + panic!(DO_NOT_USE_MESSAGE) + } + + fn supported_formats(&self, _: &PhysicalDevice) -> Option> { + panic!(DO_NOT_USE_MESSAGE) + } +} +impl window::PresentationSurface for Surface { + type SwapchainImage = (); + + unsafe fn configure_swapchain( + &mut self, + _: &Device, + _: window::SwapchainConfig, + ) -> Result<(), window::CreationError> { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn unconfigure_swapchain(&mut self, _: &Device) { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn acquire_image( + &mut self, + _: u64, + ) -> Result<((), Option), window::AcquireError> { + panic!(DO_NOT_USE_MESSAGE) + } +} + +/// Dummy swapchain. +#[derive(Debug)] +pub struct Swapchain; +impl window::Swapchain for Swapchain { + unsafe fn acquire_image( + &mut self, + _: u64, + _: Option<&()>, + _: Option<&()>, + ) -> Result<(window::SwapImageIndex, Option), window::AcquireError> { + panic!(DO_NOT_USE_MESSAGE) + } +} + +#[derive(Debug)] +pub struct Instance; + +impl hal::Instance for Instance { + fn create(_name: &str, _version: u32) -> Result { + Ok(Instance) + } + + fn enumerate_adapters(&self) -> Vec> { + vec![] + } + + unsafe fn create_surface( + &self, + _: &impl raw_window_handle::HasRawWindowHandle, + ) -> Result { + panic!(DO_NOT_USE_MESSAGE) + } + + unsafe fn destroy_surface(&self, _surface: Surface) { + panic!(DO_NOT_USE_MESSAGE) + } +} diff --git a/third_party/rust/gfx-backend-metal/.cargo-checksum.json b/third_party/rust/gfx-backend-metal/.cargo-checksum.json index 34da982ae151..8557bfed96da 100644 --- a/third_party/rust/gfx-backend-metal/.cargo-checksum.json +++ b/third_party/rust/gfx-backend-metal/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"f180d9eace35977c44464b71590db85cc2b47e9ffa53687afc20d44dd7eb39ee","README.md":"0b5008f38b9cf1bda9de72f8ca467c399404df0e75daf3b1e5796f4d1fd7568f","shaders/blit.metal":"b243873ac0d7ded37b199d17d1a7b53d5332b4a57bfa22f99dcf60273730be45","shaders/clear.metal":"796a612c1cb48e46fc94b7227feaab993d7ddeed293b69e9f09b2dd88e6a1189","shaders/fill.metal":"2642b5df62f8eb2246a442137d083010d2a3132110d9be4eb25b479123098d25","shaders/gfx-shaders-ios.metallib":"b93c70027cf196548eac31a3cf5f37947ee2b13655445bc03c68c8224dad9613","shaders/gfx-shaders-macos.metallib":"cc7e8a6ad0a0d99197bdd9c65939e3a4d9960fa8aa181467363aa3578d68af54","shaders/macros.h":"a4550ac7c180935c2edb57aa7a5f8442b53f1f3dc65df8cc800d0afb8289cdeb","src/command.rs":"06578d5f260ea9c440e81b7239f14a4a0216664a1afbc786a3df5f6687609484","src/conversions.rs":"ab9daf8e97b7d28bea3b8e6773afc287b3441d148a1cc12822c646cdbba2a37f","src/device.rs":"1d475ace1313b3c82a5f1e122ab7c818ca746f45d82ecd6e24932021cb743167","src/internal.rs":"93039ce3266f771c40d186f887f434bbef403e3deef02bc606b7a1a2f6031db1","src/lib.rs":"43bc34e00352819340e92b48516fa101fb1d20b2e6f8d275df9d0a0a31c95a49","src/native.rs":"516229d72433df23296f11b1490278f080d5a90646e7961f0e928da036f7f28d","src/soft.rs":"795767c3756a95b5a1e3bf28d2d4ce3eb85fb358ef098a4fbe0af893509e3941","src/window.rs":"cebbe53f2fb45dbdfcf03ba18ca181fa966997665cec65ae1a1d77d0c193f20b"},"package":"05b6130b9a72129ebb5c91d3d75a142a7fa54dcc112603231582e3fdc0b84247"} \ No newline at end of file +{"files":{"Cargo.toml":"126d7c0db193cdab96124382b4e801e093d0228ff896212c6df83886b9eed2ba","README.md":"0b5008f38b9cf1bda9de72f8ca467c399404df0e75daf3b1e5796f4d1fd7568f","shaders/blit.metal":"b243873ac0d7ded37b199d17d1a7b53d5332b4a57bfa22f99dcf60273730be45","shaders/clear.metal":"796a612c1cb48e46fc94b7227feaab993d7ddeed293b69e9f09b2dd88e6a1189","shaders/fill.metal":"2642b5df62f8eb2246a442137d083010d2a3132110d9be4eb25b479123098d25","shaders/gfx-shaders-ios.metallib":"b93c70027cf196548eac31a3cf5f37947ee2b13655445bc03c68c8224dad9613","shaders/gfx-shaders-macos.metallib":"cc7e8a6ad0a0d99197bdd9c65939e3a4d9960fa8aa181467363aa3578d68af54","shaders/macros.h":"a4550ac7c180935c2edb57aa7a5f8442b53f1f3dc65df8cc800d0afb8289cdeb","src/command.rs":"58be3498f916beca1cb5811ce589f30d31d72d2d35b1b494811edff0f3de9c3e","src/conversions.rs":"68fc4f31ad6c207b0b300c93fc711432ad2597b07dc865b537226ee4689b3846","src/device.rs":"f005f44c41548cc6637e5bae715913b7ab5c22554f8444c6a1e3e79dde794683","src/internal.rs":"5654ee0fc5acd5dab8def2724045dcba656c75c7ce5160765eb17da85f32020a","src/lib.rs":"9a2bb79c6a62e93c360f5f8dc8940793f7fb59827afce3c218357e1a0563f3c5","src/native.rs":"aed343d9a74b7f4fd81141c21d9daf578d3e91710e320a58690e65bb97180e51","src/soft.rs":"e7cc07ed5ebfcccaec8bf4529d6914647aa505dbc1f19da16493c69a64d1660d","src/window.rs":"b8ab41db35cd91cba8e24c532d0f3e643ccea78629ae3a5fdad8e36f8a200b24"},"package":"cfe128c29675b5afc8acdda1dfe096d6abd5e3528059ab0b98bda8215d8beed9"} \ No newline at end of file diff --git a/third_party/rust/gfx-backend-metal/Cargo.toml b/third_party/rust/gfx-backend-metal/Cargo.toml index 5d791a9a81b4..ee0217189cda 100644 --- a/third_party/rust/gfx-backend-metal/Cargo.toml +++ b/third_party/rust/gfx-backend-metal/Cargo.toml @@ -13,7 +13,7 @@ [package] edition = "2018" name = "gfx-backend-metal" -version = "0.4.5" +version = "0.5.1" authors = ["The Gfx-rs Developers"] description = "Metal API backend for gfx-rs" homepage = "https://github.com/gfx-rs/gfx" @@ -31,7 +31,8 @@ name = "gfx_backend_metal" version = "0.5" [dependencies.auxil] -version = "0.1" +version = "0.3" +features = ["spirv_cross"] package = "gfx-auxil" [dependencies.bitflags] @@ -50,14 +51,14 @@ version = "0.1.4" version = "0.19" [dependencies.dispatch] -version = "0.1" +version = "0.2" optional = true [dependencies.foreign-types] version = "0.3" [dependencies.hal] -version = "0.4" +version = "0.5" package = "gfx-hal" [dependencies.lazy_static] @@ -83,10 +84,10 @@ version = "0.1" version = "0.3" [dependencies.smallvec] -version = "0.6" +version = "1" [dependencies.spirv_cross] -version = "0.16" +version = "0.18" features = ["msl"] [dependencies.storage-map] diff --git a/third_party/rust/gfx-backend-metal/src/command.rs b/third_party/rust/gfx-backend-metal/src/command.rs index e88b9aeebab2..d6ee49281efd 100644 --- a/third_party/rust/gfx-backend-metal/src/command.rs +++ b/third_party/rust/gfx-backend-metal/src/command.rs @@ -27,7 +27,6 @@ use hal::{ pass::AttachmentLoadOp, pso, query, - range::RangeArg, window::{PresentError, Suboptimal, SwapImageIndex}, DrawCount, IndexCount, @@ -68,7 +67,6 @@ use std::{ time, }; - const WORD_SIZE: usize = 4; const WORD_ALIGNMENT: u64 = WORD_SIZE as _; /// Number of frames to average when reporting the performance counters. @@ -198,7 +196,6 @@ impl QueueBlocker { } } - #[derive(Debug)] struct PoolShared { online_recording: OnlineRecording, @@ -379,10 +376,10 @@ impl State { } fn make_viewport_and_scissor_commands( - &self + &self, ) -> ( Option>, - Option>, + Option>, ) { let com_vp = self .viewport @@ -505,7 +502,6 @@ impl State { .map(|&(resource, usage)| soft::ComputeCommand::UseResource { resource, usage }) }); - com_pso .into_iter() .chain(iter::once(com_buffers)) @@ -631,7 +627,7 @@ impl State { disabilities: PrivateDisabilities, ) -> soft::RenderCommand<&'a soft::Ref> { let depth = vp.depth.start .. if disabilities.broken_viewport_near_depth { - (vp.depth.end - vp.depth.start) + vp.depth.end - vp.depth.start } else { vp.depth.end }; @@ -798,7 +794,7 @@ unsafe impl Send for SharedCommandBuffer {} impl EncodePass { fn schedule(self, queue: &dispatch::Queue, cmd_buffer_arc: &Arc>) { let cmd_buffer = SharedCommandBuffer(Arc::clone(cmd_buffer_arc)); - queue.r#async(move || match self { + queue.exec_async(move || match self { EncodePass::Render(list, resources, desc, label) => { let encoder = cmd_buffer .0 @@ -832,8 +828,12 @@ impl EncodePass { fn update(&self, capacity: &mut Capacity) { match &self { - EncodePass::Render(ref list, _, _, _) => capacity.render = capacity.render.max(list.len()), - EncodePass::Compute(ref list, _, _) => capacity.compute = capacity.compute.max(list.len()), + EncodePass::Render(ref list, _, _, _) => { + capacity.render = capacity.render.max(list.len()) + } + EncodePass::Compute(ref list, _, _) => { + capacity.compute = capacity.compute.max(list.len()) + } EncodePass::Blit(ref list, _) => capacity.blit = capacity.blit.max(list.len()), } } @@ -921,9 +921,11 @@ impl Journal { soft::Pass::Compute => self.compute_commands.len(), soft::Pass::Blit => self.blit_commands.len(), }; - self.passes - .alloc() - .init((pass.clone(), range.start + offset .. range.end + offset, label.clone())); + self.passes.alloc().init(( + pass.clone(), + range.start + offset .. range.end + offset, + label.clone(), + )); } } @@ -1063,7 +1065,9 @@ impl<'a> PreCompute<'a> { impl CommandSink { fn label(&mut self, label: &str) -> &Self { match self { - CommandSink::Immediate { label: l, .. } | CommandSink::Deferred { label: l, .. } => *l = label.to_string(), + CommandSink::Immediate { label: l, .. } | CommandSink::Deferred { label: l, .. } => { + *l = label.to_string() + } #[cfg(feature = "dispatch")] CommandSink::Remote { label: l, .. } => *l = label.to_string(), } @@ -1172,7 +1176,12 @@ impl CommandSink { .. } => { let list = Vec::with_capacity(capacity.render); - *pass = Some(EncodePass::Render(list, soft::Own::default(), descriptor, label.clone())); + *pass = Some(EncodePass::Render( + list, + soft::Own::default(), + descriptor, + label.clone(), + )); match *pass { Some(EncodePass::Render(ref mut list, ref mut resources, _, _)) => { PreRender::Deferred(resources, list) @@ -1241,10 +1250,11 @@ impl CommandSink { if let Some(&(soft::Pass::Blit, _, _)) = journal.passes.last() { } else { journal.stop(); - journal - .passes - .alloc() - .init((soft::Pass::Blit, journal.blit_commands.len() .. 0, label.clone())); + journal.passes.alloc().init(( + soft::Pass::Blit, + journal.blit_commands.len() .. 0, + label.clone(), + )); } PreBlit::Deferred(&mut journal.blit_commands) } @@ -1348,10 +1358,11 @@ impl CommandSink { false } else { journal.stop(); - journal - .passes - .alloc() - .init((soft::Pass::Compute, journal.compute_commands.len() .. 0, label.clone())); + journal.passes.alloc().init(( + soft::Pass::Compute, + journal.compute_commands.len() .. 0, + label.clone(), + )); true }; ( @@ -1378,7 +1389,11 @@ impl CommandSink { pass.schedule(queue, cmd_buffer); } let list = Vec::with_capacity(capacity.compute); - *pass = Some(EncodePass::Compute(list, soft::Own::default(), label.clone())); + *pass = Some(EncodePass::Compute( + list, + soft::Own::default(), + label.clone(), + )); match *pass { Some(EncodePass::Compute(ref mut list, ref mut resources, _)) => { (PreCompute::Deferred(resources, list), true) @@ -1983,7 +1998,6 @@ where } } - #[derive(Default, Debug)] struct PerformanceCounters { immediate_command_buffers: usize, @@ -2181,7 +2195,7 @@ impl hal::queue::CommandQueue for CommandQueue { cmd_buffer.lock().enqueue(); let shared_cb = SharedCommandBuffer(Arc::clone(cmd_buffer)); //TODO: make this compatible with events - queue.sync(move || { + queue.exec_sync(move || { shared_cb.0.lock().commit(); }); } @@ -2544,7 +2558,7 @@ impl com::CommandBuffer for CommandBuffer { self.state.target_extent = framebuffer.extent; } if let Some(sp) = info.subpass { - let subpass = &sp.main_pass.subpasses[sp.index]; + let subpass = &sp.main_pass.subpasses[sp.index as usize]; self.state.target_formats.copy_from(&subpass.target_formats); self.state.target_aspects = Aspects::empty(); @@ -2566,10 +2580,11 @@ impl com::CommandBuffer for CommandBuffer { }) => { *is_encoding = true; let pass_desc = metal::RenderPassDescriptor::new().to_owned(); - journal - .passes - .alloc() - .init((soft::Pass::Render(pass_desc), 0 .. 0, label.clone())); + journal.passes.alloc().init(( + soft::Pass::Render(pass_desc), + 0 .. 0, + label.clone(), + )); } _ => { warn!("Unexpected inheritance info on a primary command buffer"); @@ -2600,23 +2615,17 @@ impl com::CommandBuffer for CommandBuffer { { } - unsafe fn fill_buffer(&mut self, buffer: &native::Buffer, range: R, data: u32) - where - R: RangeArg, - { + unsafe fn fill_buffer(&mut self, buffer: &native::Buffer, sub: buffer::SubRange, data: u32) { let (raw, base_range) = buffer.as_bound(); let mut inner = self.inner.borrow_mut(); - let start = base_range.start + *range.start().unwrap_or(&0); + let start = base_range.start + sub.offset; assert_eq!(start % WORD_ALIGNMENT, 0); - let end = match range.end() { - Some(&e) => { - assert_eq!(e % WORD_ALIGNMENT, 0); - base_range.start + e - } - None => base_range.end, - }; + let end = sub.size.map_or(base_range.end, |s| { + assert_eq!(s % WORD_ALIGNMENT, 0); + base_range.start + s + }); if (data & 0xFF) * 0x0101_0101 == data { let command = soft::BlitCommand::FillBuffer { @@ -3291,10 +3300,10 @@ impl com::CommandBuffer for CommandBuffer { unsafe fn bind_index_buffer(&mut self, view: buffer::IndexBufferView) { let (raw, range) = view.buffer.as_bound(); - assert!(range.start + view.offset < range.end); // conservative + assert!(range.start + view.range.offset + view.range.size.unwrap_or(0) <= range.end); // conservative self.state.index_buffer = Some(IndexBuffer { buffer: AsNative::from(raw), - offset: (range.start + view.offset) as _, + offset: (range.start + view.range.offset) as _, stride: match view.index_type { IndexType::U16 => 2, IndexType::U32 => 4, @@ -3304,7 +3313,7 @@ impl com::CommandBuffer for CommandBuffer { unsafe fn bind_vertex_buffers(&mut self, first_binding: pso::BufferIndex, buffers: I) where - I: IntoIterator, + I: IntoIterator, T: Borrow, { if self.state.vertex_buffers.len() <= first_binding as usize { @@ -3312,7 +3321,7 @@ impl com::CommandBuffer for CommandBuffer { .vertex_buffers .resize(first_binding as usize + 1, None); } - for (i, (buffer, offset)) in buffers.into_iter().enumerate() { + for (i, (buffer, sub)) in buffers.into_iter().enumerate() { let b = buffer.borrow(); let (raw, range) = b.as_bound(); let buffer_ptr = AsNative::from(raw); @@ -3320,7 +3329,7 @@ impl com::CommandBuffer for CommandBuffer { self.state .vertex_buffers .entry(index) - .set(Some((buffer_ptr, range.start + offset))); + .set(Some((buffer_ptr, range.start + sub.offset))); } if let Some(command) = self @@ -4749,4 +4758,14 @@ impl com::CommandBuffer for CommandBuffer { } } } + + unsafe fn insert_debug_marker(&mut self, _name: &str, _color: u32) { + //TODO + } + unsafe fn begin_debug_marker(&mut self, _name: &str, _color: u32) { + //TODO + } + unsafe fn end_debug_marker(&mut self) { + //TODO + } } diff --git a/third_party/rust/gfx-backend-metal/src/conversions.rs b/third_party/rust/gfx-backend-metal/src/conversions.rs index 8eb2cf5ec8cd..408e867d55a6 100644 --- a/third_party/rust/gfx-backend-metal/src/conversions.rs +++ b/third_party/rust/gfx-backend-metal/src/conversions.rs @@ -1163,6 +1163,7 @@ pub fn map_wrap_mode(wrap: image::WrapMode) -> MTLSamplerAddressMode { image::WrapMode::Mirror => MTLSamplerAddressMode::MirrorRepeat, image::WrapMode::Clamp => MTLSamplerAddressMode::ClampToEdge, image::WrapMode::Border => MTLSamplerAddressMode::ClampToBorderColor, + image::WrapMode::MirrorClamp => MTLSamplerAddressMode::MirrorClampToEdge, } } @@ -1208,15 +1209,7 @@ pub fn map_polygon_mode(mode: pso::PolygonMode) -> MTLTriangleFillMode { warn!("Unable to fill with points"); MTLTriangleFillMode::Lines } - pso::PolygonMode::Line(width) => { - match width { - pso::State::Static(w) if w != 1.0 => { - warn!("Unsupported line width: {:?}", w); - } - _ => {} - } - MTLTriangleFillMode::Lines - } + pso::PolygonMode::Line => MTLTriangleFillMode::Lines, pso::PolygonMode::Fill => MTLTriangleFillMode::Fill, } } diff --git a/third_party/rust/gfx-backend-metal/src/device.rs b/third_party/rust/gfx-backend-metal/src/device.rs index 26fef57d7eb9..b6a122f37c1c 100644 --- a/third_party/rust/gfx-backend-metal/src/device.rs +++ b/third_party/rust/gfx-backend-metal/src/device.rs @@ -17,10 +17,7 @@ use crate::{ }; use arrayvec::ArrayVec; -use auxil::{ - FastHashMap, - spirv_cross_specialize_ast, -}; +use auxil::{spirv_cross_specialize_ast, FastHashMap}; use cocoa::foundation::{NSRange, NSUInteger}; use copyless::VecHelper; use foreign_types::{ForeignType, ForeignTypeRef}; @@ -47,7 +44,6 @@ use hal::{ pso::VertexInputRate, query, queue::{QueueFamilyId, QueueGroup, QueuePriority}, - range::RangeArg, window, }; use metal::{ @@ -81,7 +77,6 @@ use std::sync::{ }; use std::{cmp, iter, mem, ptr, thread, time}; - const PUSH_CONSTANTS_DESC_SET: u32 = !0; const PUSH_CONSTANTS_DESC_BINDING: u32 = 0; const STRIDE_GRANULARITY: pso::ElemStride = 4; //TODO: work around? @@ -118,10 +113,9 @@ fn get_final_function( })?; if !function_specialization { - assert!( - specialization.data.is_empty() && specialization.constants.is_empty(), - "platform does not support specialization", - ); + if !specialization.data.is_empty() || !specialization.constants.is_empty() { + error!("platform does not support specialization"); + } return Ok(mtl_function); } @@ -179,9 +173,10 @@ impl VisibilityShared { } } -#[derive(Clone, Debug)] +#[derive(Debug)] pub struct Device { pub(crate) shared: Arc, + invalidation_queue: command::QueueInner, memory_types: Vec, features: hal::Features, pub online_recording: OnlineRecording, @@ -327,6 +322,7 @@ impl adapter::PhysicalDevice for PhysicalDevice { let device = Device { shared: self.shared.clone(), + invalidation_queue: command::QueueInner::new(&*device, Some(1)), memory_types: self.memory_types.clone(), features: requested_features, online_recording: OnlineRecording::default(), @@ -423,7 +419,8 @@ impl adapter::PhysicalDevice for PhysicalDevice { } fn features(&self) -> hal::Features { - hal::Features::ROBUST_BUFFER_ACCESS + hal::Features::empty() + | hal::Features::ROBUST_BUFFER_ACCESS | hal::Features::DRAW_INDIRECT_FIRST_INSTANCE | hal::Features::DEPTH_CLAMP | hal::Features::SAMPLER_ANISOTROPY @@ -445,6 +442,16 @@ impl adapter::PhysicalDevice for PhysicalDevice { hal::Features::empty() } | hal::Features::SHADER_CLIP_DISTANCE + //| hal::Features::SAMPLER_MIRROR_CLAMP_EDGE + | hal::Features::NDC_Y_UP + } + + fn hints(&self) -> hal::Hints { + if self.shared.private_caps.base_vertex_instance_drawing { + hal::Hints::BASE_VERTEX_INSTANCE_DRAWING + } else { + hal::Hints::empty() + } } fn limits(&self) -> hal::Limits { @@ -463,13 +470,18 @@ impl adapter::PhysicalDevice for PhysicalDevice { max_sampler_allocation_count: !0, max_bound_descriptor_sets: MAX_BOUND_DESCRIPTOR_SETS as _, max_descriptor_set_samplers: pc.max_samplers_per_stage as usize * SHADER_STAGE_COUNT, - max_descriptor_set_uniform_buffers: pc.max_buffers_per_stage as usize * SHADER_STAGE_COUNT, - max_descriptor_set_storage_buffers: pc.max_buffers_per_stage as usize * SHADER_STAGE_COUNT, - max_descriptor_set_sampled_images: pc.max_textures_per_stage as usize * SHADER_STAGE_COUNT, - max_descriptor_set_storage_images: pc.max_textures_per_stage as usize * SHADER_STAGE_COUNT, - max_descriptor_set_input_attachments: pc.max_textures_per_stage as usize * SHADER_STAGE_COUNT, + max_descriptor_set_uniform_buffers: pc.max_buffers_per_stage as usize + * SHADER_STAGE_COUNT, + max_descriptor_set_storage_buffers: pc.max_buffers_per_stage as usize + * SHADER_STAGE_COUNT, + max_descriptor_set_sampled_images: pc.max_textures_per_stage as usize + * SHADER_STAGE_COUNT, + max_descriptor_set_storage_images: pc.max_textures_per_stage as usize + * SHADER_STAGE_COUNT, + max_descriptor_set_input_attachments: pc.max_textures_per_stage as usize + * SHADER_STAGE_COUNT, max_fragment_input_components: pc.max_fragment_input_components as usize, - max_framebuffer_layers: 2048, // TODO: Determine is this is the correct value + max_framebuffer_layers: 2048, // TODO: Determine is this is the correct value max_memory_allocation_count: 4096, // TODO: Determine is this is the correct value max_per_stage_descriptor_samplers: pc.max_samplers_per_stage as usize, @@ -619,7 +631,8 @@ impl Device { spirv_cross_specialize_ast(&mut ast, specialization)?; - ast.set_compiler_options(compiler_options).map_err(gen_unexpected_error)?; + ast.set_compiler_options(compiler_options) + .map_err(gen_unexpected_error)?; let entry_points = ast.get_entry_points().map_err(|err| { ShaderError::CompilationFailed(match err { @@ -786,7 +799,7 @@ impl Device { image::Filter::Linear => MTLSamplerMipFilter::Linear, }); - if let image::Anisotropic::On(aniso) = info.anisotropic { + if let Some(aniso) = info.anisotropy_clamp { descriptor.set_max_anisotropy(aniso as _); } @@ -847,6 +860,9 @@ impl Device { image::WrapMode::Mirror => msl::SamplerAddress::MirroredRepeat, image::WrapMode::Clamp => msl::SamplerAddress::ClampToEdge, image::WrapMode::Border => msl::SamplerAddress::ClampToBorder, + image::WrapMode::MirrorClamp => { + unimplemented!("https://github.com/grovesNL/spirv_cross/issues/138") + } } } @@ -866,9 +882,7 @@ impl Device { image::Filter::Linear => msl::SamplerFilter::Linear, }, mip_filter: match info.min_filter { - image::Filter::Nearest if info.lod_range.end.0 < 0.5 => { - msl::SamplerMipFilter::None - } + image::Filter::Nearest if info.lod_range.end.0 < 0.5 => msl::SamplerMipFilter::None, image::Filter::Nearest => msl::SamplerMipFilter::Nearest, image::Filter::Linear => msl::SamplerMipFilter::Linear, }, @@ -890,10 +904,22 @@ impl Device { }, lod_clamp_min: lods.start.into(), lod_clamp_max: lods.end.into(), - max_anisotropy: match info.anisotropic { - image::Anisotropic::On(aniso) => aniso as i32, - image::Anisotropic::Off => 0, - }, + max_anisotropy: info.anisotropy_clamp.map_or(0, |aniso| aniso as i32), + planes: 0, + resolution: msl::FormatResolution::_444, + chroma_filter: msl::SamplerFilter::Nearest, + x_chroma_offset: msl::ChromaLocation::CositedEven, + y_chroma_offset: msl::ChromaLocation::CositedEven, + swizzle: [ + msl::ComponentSwizzle::Identity, + msl::ComponentSwizzle::Identity, + msl::ComponentSwizzle::Identity, + msl::ComponentSwizzle::Identity, + ], + ycbcr_conversion_enable: false, + ycbcr_model: msl::SamplerYCbCrModelConversion::RgbIdentity, + ycbcr_range: msl::SamplerYCbCrRange::ItuFull, + bpc: 8, } } } @@ -1012,8 +1038,8 @@ impl hal::device::Device for Device { } } if let Some((id, ref mut ops)) = sub.depth_stencil { - if use_mask & 1 << id == 0 { - *ops |= n::SubpassOps::LOAD; + if use_mask & 1 << id != 0 { + *ops |= n::SubpassOps::STORE; use_mask ^= 1 << id; } } @@ -1250,7 +1276,7 @@ impl hal::device::Device for Device { MTLLanguageVersion::V2_2 => msl::Version::V2_2, }; shader_compiler_options.enable_point_size_builtin = false; - shader_compiler_options.vertex.invert_y = true; + shader_compiler_options.vertex.invert_y = !self.features.contains(hal::Features::NDC_Y_UP); shader_compiler_options.resource_binding_overrides = res_overrides; shader_compiler_options.const_samplers = const_samplers; shader_compiler_options.enable_argument_buffers = self.shared.private_caps.argument_buffers; @@ -1351,7 +1377,7 @@ impl hal::device::Device for Device { let pipeline_layout = &pipeline_desc.layout; let (rp_attachments, subpass) = { let pass::Subpass { main_pass, index } = pipeline_desc.subpass; - (&main_pass.attachments, &main_pass.subpasses[index]) + (&main_pass.attachments, &main_pass.subpasses[index as usize]) }; let (primitive_class, primitive_type) = match pipeline_desc.input_assembler.primitive { @@ -1442,7 +1468,7 @@ impl hal::device::Device for Device { { let desc = pipeline .color_attachments() - .object_at(i as NSUInteger) + .object_at(i as u64) .expect("too many color attachments"); desc.set_pixel_format(mtl_format); @@ -1523,7 +1549,7 @@ impl hal::device::Device for Device { // pass the refined data to Metal let mtl_attribute_desc = vertex_descriptor .attributes() - .object_at(location as NSUInteger) + .object_at(location as u64) .expect("too many vertex attributes"); let mtl_vertex_format = conv::map_vertex_format(element.format).expect("unsupported vertex format"); @@ -1535,7 +1561,7 @@ impl hal::device::Device for Device { for (i, (vb, _)) in vertex_buffers.iter().enumerate() { let mtl_buffer_desc = vertex_descriptor .layouts() - .object_at(self.shared.private_caps.max_buffers_per_stage as NSUInteger - 1 - i as NSUInteger) + .object_at(self.shared.private_caps.max_buffers_per_stage as u64 - 1 - i as u64) .expect("too many vertex descriptor layouts"); if vb.stride % STRIDE_GRANULARITY != 0 { error!( @@ -1565,6 +1591,12 @@ impl hal::device::Device for Device { pipeline.set_vertex_descriptor(Some(&vertex_descriptor)); } + if let pso::State::Static(w) = pipeline_desc.rasterizer.line_width { + if w != 1.0 { + warn!("Unsupported line width: {:?}", w); + } + } + let rasterizer_state = Some(n::RasterizerState { front_winding: conv::map_winding(pipeline_desc.rasterizer.front_face), fill_mode: conv::map_polygon_mode(pipeline_desc.rasterizer.polygon_mode), @@ -1687,6 +1719,7 @@ impl hal::device::Device for Device { ) -> Result { //TODO: we can probably at least parse here and save the `Ast` let depends_on_pipeline_layout = true; //TODO: !self.private_caps.argument_buffers + // TODO: also depends on pipeline layout if there are specialization constants that // SPIRV-Cross generates macros for, which occurs when MSL version is older than 1.2 or the // constant is used as an array size (see @@ -1696,7 +1729,7 @@ impl hal::device::Device for Device { } else { let mut options = msl::CompilerOptions::default(); options.enable_point_size_builtin = false; - options.vertex.invert_y = true; + options.vertex.invert_y = !self.features.contains(hal::Features::NDC_Y_UP); let info = Self::compile_shader_library( &self.shared.device, raw_data, @@ -1725,12 +1758,12 @@ impl hal::device::Device for Device { unsafe fn destroy_sampler(&self, _sampler: n::Sampler) {} - unsafe fn map_memory>( + unsafe fn map_memory( &self, memory: &n::Memory, - generic_range: R, + segment: memory::Segment, ) -> Result<*mut u8, MapError> { - let range = memory.resolve(&generic_range); + let range = memory.resolve(&segment); debug!("map_memory of size {} at {:?}", memory.size, range); let base_ptr = match memory.heap { @@ -1744,16 +1777,15 @@ impl hal::device::Device for Device { debug!("unmap_memory of size {}", memory.size); } - unsafe fn flush_mapped_memory_ranges<'a, I, R>(&self, iter: I) -> Result<(), OutOfMemory> + unsafe fn flush_mapped_memory_ranges<'a, I>(&self, iter: I) -> Result<(), OutOfMemory> where I: IntoIterator, - I::Item: Borrow<(&'a n::Memory, R)>, - R: RangeArg, + I::Item: Borrow<(&'a n::Memory, memory::Segment)>, { debug!("flush_mapped_memory_ranges"); for item in iter { - let (memory, ref generic_range) = *item.borrow(); - let range = memory.resolve(generic_range); + let (memory, ref segment) = *item.borrow(); + let range = memory.resolve(segment); debug!("\trange {:?}", range); match memory.heap { @@ -1774,25 +1806,24 @@ impl hal::device::Device for Device { Ok(()) } - unsafe fn invalidate_mapped_memory_ranges<'a, I, R>(&self, iter: I) -> Result<(), OutOfMemory> + unsafe fn invalidate_mapped_memory_ranges<'a, I>(&self, iter: I) -> Result<(), OutOfMemory> where I: IntoIterator, - I::Item: Borrow<(&'a n::Memory, R)>, - R: RangeArg, + I::Item: Borrow<(&'a n::Memory, memory::Segment)>, { let mut num_syncs = 0; debug!("invalidate_mapped_memory_ranges"); // temporary command buffer to copy the contents from // the given buffers into the allocated CPU-visible buffers - let cmd_queue = self.shared.queue.lock(); - let cmd_buffer = cmd_queue.spawn_temp(); + // Note: using a separate internal queue in order to avoid a stall + let cmd_buffer = self.invalidation_queue.spawn_temp(); autoreleasepool(|| { let encoder = cmd_buffer.new_blit_command_encoder(); for item in iter { - let (memory, ref generic_range) = *item.borrow(); - let range = memory.resolve(generic_range); + let (memory, ref segment) = *item.borrow(); + let range = memory.resolve(segment); debug!("\trange {:?}", range); match memory.heap { @@ -1907,13 +1938,24 @@ impl hal::device::Device for Device { //TODO: have the API providing the dimensions and MSAA flag // for textures in an argument buffer match desc.ty { - pso::DescriptorType::UniformBufferDynamic - | pso::DescriptorType::StorageBufferDynamic => { + pso::DescriptorType::Buffer { + format: + pso::BufferDescriptorFormat::Structured { + dynamic_offset: true, + }, + .. + } => { //TODO: apply the offsets somehow at the binding time error!("Dynamic offsets are not yet supported in argument buffers!"); } - pso::DescriptorType::StorageImage | pso::DescriptorType::StorageTexelBuffer => { - //TODO: bind storage images separately + pso::DescriptorType::Image { + ty: pso::ImageDescriptorType::Storage { .. }, + } + | pso::DescriptorType::Buffer { + ty: pso::BufferDescriptorType::Storage { .. }, + format: pso::BufferDescriptorFormat::Texel, + } => { + //TODO: bind storage buffers and images separately error!("Storage images are not yet supported in argument buffers!"); } _ => {} @@ -2077,20 +2119,18 @@ impl hal::device::Device for Device { data.textures[counters.textures as usize] = Some((AsNative::from(view.texture.as_ref()), il)); } - pso::Descriptor::UniformTexelBuffer(view) - | pso::Descriptor::StorageTexelBuffer(view) => { + pso::Descriptor::TexelBuffer(view) => { data.textures[counters.textures as usize] = Some(( AsNative::from(view.raw.as_ref()), image::Layout::General, )); } - pso::Descriptor::Buffer(buf, ref desc_range) => { + pso::Descriptor::Buffer(buf, ref sub) => { let (raw, range) = buf.as_bound(); - if let Some(end) = desc_range.end { - debug_assert!(range.start + end <= range.end); - } - let start = range.start + desc_range.start.unwrap_or(0); - let pair = (AsNative::from(raw), start); + debug_assert!( + range.start + sub.offset + sub.size.unwrap_or(0) <= range.end + ); + let pair = (AsNative::from(raw), range.start + sub.offset); data.buffers[counters.buffers as usize] = Some(pair); } } @@ -2157,18 +2197,17 @@ impl hal::device::Device for Device { encoder.set_texture(arg_index, tex_ref); data.ptr = (&**tex_ref).as_ptr(); } - pso::Descriptor::UniformTexelBuffer(view) - | pso::Descriptor::StorageTexelBuffer(view) => { + pso::Descriptor::TexelBuffer(view) => { encoder.set_texture(arg_index, &view.raw); data.ptr = (&**view.raw).as_ptr(); arg_index += 1; } - pso::Descriptor::Buffer(buffer, ref desc_range) => { + pso::Descriptor::Buffer(buffer, ref sub) => { let (buf_raw, buf_range) = buffer.as_bound(); encoder.set_buffer( arg_index, buf_raw, - buf_range.start + desc_range.start.unwrap_or(0), + buf_range.start + sub.offset, ); data.ptr = (&**buf_raw).as_ptr(); arg_index += 1; @@ -2254,7 +2293,11 @@ impl hal::device::Device for Device { usage: buffer::Usage, ) -> Result { debug!("create_buffer of size {} and usage {:?}", size, usage); - Ok(n::Buffer::Unbound { usage, size, name: String::new() }) + Ok(n::Buffer::Unbound { + usage, + size, + name: String::new(), + }) } unsafe fn get_buffer_requirements(&self, buffer: &n::Buffer) -> memory::Requirements { @@ -2335,8 +2378,14 @@ impl hal::device::Device for Device { let options = conv::resource_options_from_storage_and_cache(storage, cache); if offset == 0x0 && size == cpu_buffer.length() { cpu_buffer.set_label(name); - } else { - cpu_buffer.add_debug_marker(name, NSRange { location: offset, length: size }); + } else if self.shared.private_caps.supports_debug_markers { + cpu_buffer.add_debug_marker( + name, + NSRange { + location: offset, + length: size, + }, + ); } n::Buffer::Bound { raw: cpu_buffer.clone(), @@ -2371,11 +2420,11 @@ impl hal::device::Device for Device { } } - unsafe fn create_buffer_view>( + unsafe fn create_buffer_view( &self, buffer: &n::Buffer, format_maybe: Option, - range: R, + sub: buffer::SubRange, ) -> Result { let (raw, base_range, options) = match *buffer { n::Buffer::Bound { @@ -2385,37 +2434,30 @@ impl hal::device::Device for Device { } => (raw, range, options), n::Buffer::Unbound { .. } => panic!("Unexpected Buffer::Unbound"), }; - let start = base_range.start + *range.start().unwrap_or(&0); - let end_rough = match range.end() { - Some(end) => base_range.start + end, - None => base_range.end, - }; + let start = base_range.start + sub.offset; + let size_rough = sub.size.unwrap_or(base_range.end - start); let format = match format_maybe { Some(fmt) => fmt, None => { - return Err(buffer::ViewCreationError::UnsupportedFormat { - format: format_maybe, - }); + return Err(buffer::ViewCreationError::UnsupportedFormat(format_maybe)); } }; let format_desc = format.surface_desc(); if format_desc.aspects != format::Aspects::COLOR || format_desc.is_compressed() { // Vadlidator says "Linear texture: cannot create compressed, depth, or stencil textures" - return Err(buffer::ViewCreationError::UnsupportedFormat { - format: format_maybe, - }); + return Err(buffer::ViewCreationError::UnsupportedFormat(format_maybe)); } //Note: we rely on SPIRV-Cross to use the proper 2D texel indexing here - let texel_count = (end_rough - start) * 8 / format_desc.bits as u64; + let texel_count = size_rough * 8 / format_desc.bits as u64; let col_count = cmp::min(texel_count, self.shared.private_caps.max_texture_size); let row_count = (texel_count + self.shared.private_caps.max_texture_size - 1) / self.shared.private_caps.max_texture_size; - let mtl_format = self.shared.private_caps.map_format(format).ok_or( - buffer::ViewCreationError::UnsupportedFormat { - format: format_maybe, - }, - )?; + let mtl_format = self + .shared + .private_caps + .map_format(format) + .ok_or(buffer::ViewCreationError::UnsupportedFormat(format_maybe))?; let descriptor = metal::TextureDescriptor::new(); descriptor.set_texture_type(MTLTextureType::D2); @@ -2663,8 +2705,14 @@ impl hal::device::Device for Device { assert_eq!(mip_sizes.len(), 1); if offset == 0x0 && cpu_buffer.length() == mip_sizes[0] { cpu_buffer.set_label(name); - } else { - cpu_buffer.add_debug_marker(name, NSRange { location: offset, length: mip_sizes[0] }); + } else if self.shared.private_caps.supports_debug_markers { + cpu_buffer.add_debug_marker( + name, + NSRange { + location: offset, + length: mip_sizes[0], + }, + ); } n::ImageLike::Buffer(n::Buffer::Bound { raw: cpu_buffer.clone(), @@ -2695,7 +2743,7 @@ impl hal::device::Device for Device { format: format::Format, swizzle: format::Swizzle, range: image::SubresourceRange, - ) -> Result { + ) -> Result { let mtl_format = match self .shared .private_caps @@ -2704,7 +2752,7 @@ impl hal::device::Device for Device { Some(f) => f, None => { error!("failed to swizzle format {:?} with {:?}", format, swizzle); - return Err(image::ViewError::BadFormat(format)); + return Err(image::ViewCreationError::BadFormat(format)); } }; let raw = image.like.as_texture(); @@ -2979,9 +3027,22 @@ impl hal::device::Device for Device { unsafe fn set_image_name(&self, image: &mut n::Image, name: &str) { match image { - n::Image { like: n::ImageLike::Buffer(ref mut buf), .. } => self.set_buffer_name(buf, name), - n::Image { like: n::ImageLike::Texture(ref tex), .. } => tex.set_label(name), - n::Image { like: n::ImageLike::Unbound { name: ref mut unbound_name, .. }, .. } => { + n::Image { + like: n::ImageLike::Buffer(ref mut buf), + .. + } => self.set_buffer_name(buf, name), + n::Image { + like: n::ImageLike::Texture(ref tex), + .. + } => tex.set_label(name), + n::Image { + like: + n::ImageLike::Unbound { + name: ref mut unbound_name, + .. + }, + .. + } => { *unbound_name = name.to_string(); } }; @@ -2989,14 +3050,24 @@ impl hal::device::Device for Device { unsafe fn set_buffer_name(&self, buffer: &mut n::Buffer, name: &str) { match buffer { - n::Buffer::Unbound { name: ref mut unbound_name, .. } => { + n::Buffer::Unbound { + name: ref mut unbound_name, + .. + } => { *unbound_name = name.to_string(); - }, - n::Buffer::Bound { ref raw, ref range, .. } => { - raw.add_debug_marker( - name, - NSRange { location: range.start, length: range.end - range.start } - ); + } + n::Buffer::Bound { + ref raw, ref range, .. + } => { + if self.shared.private_caps.supports_debug_markers { + raw.add_debug_marker( + name, + NSRange { + location: range.start, + length: range.end - range.start, + }, + ); + } } } } @@ -3009,14 +3080,11 @@ impl hal::device::Device for Device { command_buffer.name = name.to_string(); } - unsafe fn set_semaphore_name(&self, _semaphore: &mut n::Semaphore, _name: &str) { - } + unsafe fn set_semaphore_name(&self, _semaphore: &mut n::Semaphore, _name: &str) {} - unsafe fn set_fence_name(&self, _fence: &mut n::Fence, _name: &str) { - } + unsafe fn set_fence_name(&self, _fence: &mut n::Fence, _name: &str) {} - unsafe fn set_framebuffer_name(&self, _framebuffer: &mut n::Framebuffer, _name: &str) { - } + unsafe fn set_framebuffer_name(&self, _framebuffer: &mut n::Framebuffer, _name: &str) {} unsafe fn set_render_pass_name(&self, render_pass: &mut n::RenderPass, name: &str) { render_pass.name = name.to_string(); diff --git a/third_party/rust/gfx-backend-metal/src/internal.rs b/third_party/rust/gfx-backend-metal/src/internal.rs index 6dec4f28b6a8..c65e6b28a68c 100644 --- a/third_party/rust/gfx-backend-metal/src/internal.rs +++ b/third_party/rust/gfx-backend-metal/src/internal.rs @@ -14,7 +14,6 @@ use storage_map::{StorageMap, StorageMapGuard}; use std::mem; - pub type FastStorageMap = StorageMap>; pub type FastStorageGuard<'a, V> = StorageMapGuard<'a, RawRwLock, V>; @@ -330,10 +329,10 @@ impl ImageClearPipes { for i in 0 .. 1 { let mtl_attribute_desc = vertex_descriptor .attributes() - .object_at(i as u64) + .object_at(i) .expect("too many vertex attributes"); mtl_attribute_desc.set_buffer_index(0); - mtl_attribute_desc.set_offset((i * mem::size_of::<[f32; 4]>()) as _); + mtl_attribute_desc.set_offset(i * mem::size_of::<[f32; 4]>() as u64); mtl_attribute_desc.set_format(metal::MTLVertexFormat::Float4); } pipeline.set_vertex_descriptor(Some(&vertex_descriptor)); @@ -426,10 +425,10 @@ impl ImageBlitPipes { for i in 0 .. 2 { let mtl_attribute_desc = vertex_descriptor .attributes() - .object_at(i as u64) + .object_at(i) .expect("too many vertex attributes"); mtl_attribute_desc.set_buffer_index(0); - mtl_attribute_desc.set_offset((i * mem::size_of::<[f32; 4]>()) as _); + mtl_attribute_desc.set_offset(i * mem::size_of::<[f32; 4]>() as u64); mtl_attribute_desc.set_format(metal::MTLVertexFormat::Float4); } pipeline.set_vertex_descriptor(Some(&vertex_descriptor)); diff --git a/third_party/rust/gfx-backend-metal/src/lib.rs b/third_party/rust/gfx-backend-metal/src/lib.rs index 868767c302e9..29c9732f2229 100644 --- a/third_party/rust/gfx-backend-metal/src/lib.rs +++ b/third_party/rust/gfx-backend-metal/src/lib.rs @@ -70,14 +70,14 @@ use core_graphics::geometry::CGRect; #[cfg(feature = "dispatch")] use dispatch; use foreign_types::ForeignTypeRef; +use lazy_static::lazy_static; use metal::MTLFeatureSet; use metal::MTLLanguageVersion; use objc::{ declare::ClassDecl, - runtime::{Object, BOOL, YES, Sel, Class} + runtime::{Class, Object, Sel, BOOL, YES}, }; use parking_lot::{Condvar, Mutex}; -use lazy_static::lazy_static; use std::mem; use std::os::raw::c_void; @@ -98,7 +98,6 @@ pub use crate::window::{AcquireMode, CAMetalLayer, Surface, Swapchain}; pub type GraphicsCommandPool = CommandPool; - //TODO: investigate why exactly using `u8` here is slower (~5% total). /// A type representing Metal binding's resource index. type ResourceIndex = u32; @@ -216,7 +215,7 @@ impl hal::Instance for Instance { fn create(_: &str, _: u32) -> Result { Ok(Instance { experiments: Experiments::default(), - gfx_managed_metal_layer_delegate: GfxManagedMetalLayerDelegate::new() + gfx_managed_metal_layer_delegate: GfxManagedMetalLayerDelegate::new(), }) } @@ -292,7 +291,7 @@ extern "C" fn layer_should_inherit_contents_scale_from_window( _: Sel, _layer: *mut Object, _new_scale: CGFloat, - _from_window: *mut Object + _from_window: *mut Object, ) -> BOOL { return YES; } @@ -303,7 +302,8 @@ struct GfxManagedMetalLayerDelegate(*mut Object); impl GfxManagedMetalLayerDelegate { pub fn new() -> Self { unsafe { - let mut delegate: *mut Object = msg_send![*GFX_MANAGED_METAL_LAYER_DELEGATE_CLASS, alloc]; + let mut delegate: *mut Object = + msg_send![*GFX_MANAGED_METAL_LAYER_DELEGATE_CLASS, alloc]; delegate = msg_send![delegate, init]; Self(delegate) } @@ -651,6 +651,7 @@ struct PrivateCapabilities { shared_textures: bool, mutable_comparison_samplers: bool, base_instance: bool, + base_vertex_instance_drawing: bool, dual_source_blending: bool, low_power: bool, headless: bool, @@ -713,6 +714,7 @@ struct PrivateCapabilities { max_texture_layers: u64, max_fragment_input_components: u64, sample_count_mask: u8, + supports_debug_markers: bool, } impl PrivateCapabilities { @@ -794,6 +796,17 @@ impl PrivateCapabilities { MUTABLE_COMPARISON_SAMPLER_SUPPORT, ), base_instance: Self::supports_any(&device, BASE_INSTANCE_SUPPORT), + base_vertex_instance_drawing: Self::supports_any( + &device, + &[ + MTLFeatureSet::iOS_GPUFamily3_v1, + MTLFeatureSet::iOS_GPUFamily4_v1, + MTLFeatureSet::iOS_GPUFamily5_v1, + MTLFeatureSet::tvOS_GPUFamily2_v1, + MTLFeatureSet::macOS_GPUFamily1_v1, + MTLFeatureSet::macOS_GPUFamily2_v1, + ], + ), dual_source_blending: Self::supports_any(&device, DUAL_SOURCE_BLEND_SUPPORT), low_power: !os_is_mac || device.is_low_power(), headless: os_is_mac && device.is_headless(), @@ -958,6 +971,20 @@ impl PrivateCapabilities { max_texture_layers: 2048, max_fragment_input_components: if os_is_mac { 128 } else { 60 }, sample_count_mask, + supports_debug_markers: Self::supports_any( + &device, + &[ + MTLFeatureSet::macOS_GPUFamily1_v2, + MTLFeatureSet::macOS_GPUFamily2_v1, + MTLFeatureSet::iOS_GPUFamily1_v3, + MTLFeatureSet::iOS_GPUFamily2_v3, + MTLFeatureSet::iOS_GPUFamily3_v2, + MTLFeatureSet::iOS_GPUFamily4_v1, + MTLFeatureSet::iOS_GPUFamily5_v1, + MTLFeatureSet::tvOS_GPUFamily1_v2, + MTLFeatureSet::tvOS_GPUFamily2_v1, + ], + ), } } diff --git a/third_party/rust/gfx-backend-metal/src/native.rs b/third_party/rust/gfx-backend-metal/src/native.rs index 0f1729540770..35b0949dd977 100644 --- a/third_party/rust/gfx-backend-metal/src/native.rs +++ b/third_party/rust/gfx-backend-metal/src/native.rs @@ -14,9 +14,9 @@ use hal::{ buffer, format::FormatDesc, image, + memory::Segment, pass::{Attachment, AttachmentId}, pso, - range::RangeArg, MemoryTypeId, }; use range_alloc::RangeAllocator; @@ -36,7 +36,6 @@ use std::{ sync::{atomic::AtomicBool, Arc}, }; - pub type EntryPointMap = FastHashMap; /// An index of a resource within descriptor pool. pub type PoolResourceIndex = u32; @@ -108,7 +107,6 @@ pub struct Framebuffer { unsafe impl Send for Framebuffer {} unsafe impl Sync for Framebuffer {} - #[derive(Clone, Debug)] pub struct ResourceData { pub buffers: T, @@ -814,21 +812,22 @@ impl From for DescriptorContent { fn from(ty: pso::DescriptorType) -> Self { match ty { pso::DescriptorType::Sampler => DescriptorContent::SAMPLER, - pso::DescriptorType::CombinedImageSampler => { - DescriptorContent::TEXTURE | DescriptorContent::SAMPLER - } - pso::DescriptorType::SampledImage - | pso::DescriptorType::StorageImage - | pso::DescriptorType::UniformTexelBuffer - | pso::DescriptorType::StorageTexelBuffer - | pso::DescriptorType::InputAttachment => DescriptorContent::TEXTURE, - pso::DescriptorType::UniformBuffer | pso::DescriptorType::StorageBuffer => { - DescriptorContent::BUFFER - } - pso::DescriptorType::UniformBufferDynamic - | pso::DescriptorType::StorageBufferDynamic => { - DescriptorContent::BUFFER | DescriptorContent::DYNAMIC_BUFFER - } + pso::DescriptorType::Image { ty } => match ty { + pso::ImageDescriptorType::Sampled { with_sampler: true } => { + DescriptorContent::TEXTURE | DescriptorContent::SAMPLER + } + _ => DescriptorContent::TEXTURE, + }, + pso::DescriptorType::Buffer { format, .. } => match format { + pso::BufferDescriptorFormat::Structured { dynamic_offset } => { + match dynamic_offset { + true => DescriptorContent::BUFFER | DescriptorContent::DYNAMIC_BUFFER, + false => DescriptorContent::BUFFER, + } + } + pso::BufferDescriptorFormat::Texel => DescriptorContent::TEXTURE, + }, + pso::DescriptorType::InputAttachment => DescriptorContent::TEXTURE, } } } @@ -904,8 +903,8 @@ impl Memory { Memory { heap, size } } - pub(crate) fn resolve>(&self, range: &R) -> Range { - *range.start().unwrap_or(&0) .. *range.end().unwrap_or(&self.size) + pub(crate) fn resolve(&self, range: &Segment) -> Range { + range.offset .. range.size.map_or(self.size, |s| range.offset + s) } } @@ -932,15 +931,20 @@ impl ArgumentArray { match ty { Dt::Sampler => MTLResourceUsage::empty(), - Dt::CombinedImageSampler | Dt::SampledImage | Dt::InputAttachment => { - MTLResourceUsage::Sample - } - Dt::UniformTexelBuffer => MTLResourceUsage::Sample, - Dt::UniformBuffer | Dt::UniformBufferDynamic => MTLResourceUsage::Read, - Dt::StorageImage - | Dt::StorageBuffer - | Dt::StorageBufferDynamic - | Dt::StorageTexelBuffer => MTLResourceUsage::Write, + Dt::Image { ty } => match ty { + pso::ImageDescriptorType::Sampled { .. } => MTLResourceUsage::Sample, + pso::ImageDescriptorType::Storage { read_only: true } => MTLResourceUsage::Read, + pso::ImageDescriptorType::Storage { .. } => MTLResourceUsage::Write, + }, + Dt::Buffer { ty, format } => match ty { + pso::BufferDescriptorType::Storage { read_only: true } => MTLResourceUsage::Read, + pso::BufferDescriptorType::Storage { .. } => MTLResourceUsage::Write, + pso::BufferDescriptorType::Uniform => match format { + pso::BufferDescriptorFormat::Structured { .. } => MTLResourceUsage::Read, + pso::BufferDescriptorFormat::Texel => MTLResourceUsage::Sample, + }, + }, + Dt::InputAttachment => MTLResourceUsage::Sample, } } diff --git a/third_party/rust/gfx-backend-metal/src/soft.rs b/third_party/rust/gfx-backend-metal/src/soft.rs index c0c50c502194..9a4ad9bb0b76 100644 --- a/third_party/rust/gfx-backend-metal/src/soft.rs +++ b/third_party/rust/gfx-backend-metal/src/soft.rs @@ -13,7 +13,6 @@ use metal; use std::{fmt::Debug, ops::Range}; - pub type CacheResourceIndex = u32; pub trait Resources: Debug { diff --git a/third_party/rust/gfx-backend-metal/src/window.rs b/third_party/rust/gfx-backend-metal/src/window.rs index a16423cd1cd9..a298283ab09a 100644 --- a/third_party/rust/gfx-backend-metal/src/window.rs +++ b/third_party/rust/gfx-backend-metal/src/window.rs @@ -22,7 +22,6 @@ use std::ptr::NonNull; use std::sync::Arc; use std::thread; - //TODO: make it a weak pointer, so that we know which // frames can be replaced if we receive an unknown // texture pointer by an acquired drawable. @@ -247,7 +246,6 @@ impl Drop for Frame { } } - #[derive(Clone, Debug, PartialEq)] pub enum AcquireMode { Wait, @@ -414,7 +412,7 @@ impl w::Surface for Surface { let can_set_display_sync = device_caps.os_is_mac && device_caps.has_version_at_least(10, 13); - w::SurfaceCapabilities { + w::SurfaceCapabilities { present_modes: if can_set_display_sync { w::PresentMode::FIFO | w::PresentMode::IMMEDIATE } else { @@ -446,7 +444,7 @@ impl w::Surface for Surface { } fn supported_formats(&self, _physical_device: &PhysicalDevice) -> Option> { - Some(vec![ + Some(vec![ format::Format::Bgra8Unorm, format::Format::Bgra8Srgb, format::Format::Rgba16Sfloat, @@ -532,8 +530,10 @@ impl Device { match old.resize_fill { ResizeFill::Empty => {} ResizeFill::Clear(value) => { - let descriptor = metal::RenderPassDescriptor::new().to_owned(); - let attachment = descriptor.color_attachments().object_at(0).unwrap(); + let descriptor = + metal::RenderPassDescriptor::new().to_owned(); + let attachment = + descriptor.color_attachments().object_at(0).unwrap(); attachment.set_texture(Some(texture)); attachment.set_store_action(metal::MTLStoreAction::Store); attachment.set_load_action(metal::MTLLoadAction::Clear); @@ -543,7 +543,8 @@ impl Device { value[2] as _, value[3] as _, )); - let encoder = cmd_buffer.new_render_command_encoder(&descriptor); + let encoder = + cmd_buffer.new_render_command_encoder(&descriptor); encoder.end_encoding(); } ResizeFill::Blit => { diff --git a/third_party/rust/gfx-backend-vulkan/.cargo-checksum.json b/third_party/rust/gfx-backend-vulkan/.cargo-checksum.json index 00ae898dc8e9..c4fc5f4275ba 100644 --- a/third_party/rust/gfx-backend-vulkan/.cargo-checksum.json +++ b/third_party/rust/gfx-backend-vulkan/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"a9c1718fe065e2e4e9bb5f9d35472f1d515b948ce313520f207b3d6c006c6a66","README.md":"8cc42e022567870c58a53ff1cb6f94e961482e789fe5e22f9960408a43cf8405","src/command.rs":"4fb6d6ffb4b3e312f0e84818e80adaeef6206fe87827c447cf1b25d3effb74a5","src/conv.rs":"075f844081adb4fb7a76ab3950b017a44648a5eb9bf3721647a4f5bce617105b","src/device.rs":"f53127dc3f66899a8feb736425a50d498a493cdd3794782856bdbd6dfadc0059","src/info.rs":"4a21b54f85ff73c538ca2f57f4d371eb862b5a28f126cd0ecafd37fc6dfd1318","src/lib.rs":"fb6db733b117d87511af0217e8157e5313bb1d4bd99e1671203db9bf1111709e","src/native.rs":"fc8c7d40054f59eeb36db5c4c439e2173cd9e967c4d69797f223e1c58748f71d","src/pool.rs":"8bfd5f750baef41a7edc539433f7e417e367dc60debfcb002188e12b0f9bd933","src/window.rs":"9f6f7593ed74e013c475504cc0d40b9d0989c7a7a2138172861aa86874d3bc2f"},"package":"f1b8d901941d1734d307dacd8e5f00c89ee8fb8e78b4dab3edd91248150b26b4"} \ No newline at end of file +{"files":{"Cargo.toml":"b0bb273d2dc664f6e8cf5aa24775ebfd56d27b51b169517ef9e9d9f38e7717a2","README.md":"8ac155de510fa20cfceb7a9c9a9891abb28b11a0bd930470c3fd145dbaea2d2b","src/command.rs":"0d51171269e626d5a198f6d04e7e27435edc9ee3fd06e79bbc41c1b9abe07b95","src/conv.rs":"0548ce0fec00e91d214fe0b04691173235b04bb0040d6e60338f4ceb869047e3","src/device.rs":"d3bab36762a929cfcf7c9c6d7b78f70ed8402389cd8e80863359cbedec63e58a","src/info.rs":"5f278828dc418fc052e6bf9d2baa6396270646d78148a62e0787082a30787899","src/lib.rs":"8b51447dc0a12836ca75f201db588f3e8b5e7b7179a35ecdfd1405b72eca8ceb","src/native.rs":"d7eb53631db7a1a59a3880602e67c27bb39a4385f5bffcf659fc39499156effe","src/pool.rs":"b964b974063e64113233645199daa96fa453851bb7fe566ec7bc9b254b2728d2","src/window.rs":"bfad91e51eecae9d4d534b362dafa244e28c864d3cef5ce53d3798e39cb5bffc"},"package":"ebd1dee09bd8d8f1ba52c5ba22d1f70c7ffa990c5eb245eb3ef2d0206f631673"} \ No newline at end of file diff --git a/third_party/rust/gfx-backend-vulkan/Cargo.toml b/third_party/rust/gfx-backend-vulkan/Cargo.toml index 5f7a7880abc0..95013aa468a8 100644 --- a/third_party/rust/gfx-backend-vulkan/Cargo.toml +++ b/third_party/rust/gfx-backend-vulkan/Cargo.toml @@ -13,7 +13,7 @@ [package] edition = "2018" name = "gfx-backend-vulkan" -version = "0.4.3" +version = "0.5.2" authors = ["The Gfx-rs Developers"] description = "Vulkan API backend for gfx-rs" homepage = "https://github.com/gfx-rs/gfx" @@ -29,13 +29,13 @@ name = "gfx_backend_vulkan" version = "0.5" [dependencies.ash] -version = "0.29.0" +version = "0.30" [dependencies.byteorder] version = "1" [dependencies.hal] -version = "0.4" +version = "0.5" package = "gfx-hal" [dependencies.lazy_static] @@ -52,7 +52,7 @@ version = "0.1.9" optional = true [dependencies.smallvec] -version = "0.6" +version = "1.0" [features] default = [] @@ -63,7 +63,7 @@ features = ["xlib"] optional = true [target."cfg(all(unix, not(target_os = \"macos\"), not(target_os = \"ios\"), not(target_os = \"android\")))".dependencies.xcb] -version = "0.8" +version = "0.9" optional = true [target."cfg(target_os = \"macos\")".dependencies.core-graphics] version = "0.19" diff --git a/third_party/rust/gfx-backend-vulkan/README.md b/third_party/rust/gfx-backend-vulkan/README.md index 0e8420ecb809..66e30fcece1d 100644 --- a/third_party/rust/gfx-backend-vulkan/README.md +++ b/third_party/rust/gfx-backend-vulkan/README.md @@ -1,13 +1,13 @@ -# gfx-backend-vulkan - -[Vulkan](https://www.khronos.org/vulkan/) backend for gfx-rs. - -## Normalized Coordinates - -Render | Depth | Texture --------|-------|-------- -![render_coordinates](../../../info/vk_render_coordinates.png) | ![depth_coordinates](../../../info/dx_depth_coordinates.png) | ![texture_coordinates](../../../info/dx_texture_coordinates.png) - -## Mirroring - -HAL is modelled after Vulkan, so everything should be 1:1. +# gfx-backend-vulkan + +[Vulkan](https://www.khronos.org/vulkan/) backend for gfx-rs. + +## Normalized Coordinates + +Render | Depth | Texture +-------|-------|-------- +![render_coordinates](../../../info/vk_render_coordinates.png) | ![depth_coordinates](../../../info/dx_depth_coordinates.png) | ![texture_coordinates](../../../info/dx_texture_coordinates.png) + +## Mirroring + +HAL is modelled after Vulkan, so everything should be 1:1. diff --git a/third_party/rust/gfx-backend-vulkan/src/command.rs b/third_party/rust/gfx-backend-vulkan/src/command.rs index 85564cbdd47b..4e5451256e68 100644 --- a/third_party/rust/gfx-backend-vulkan/src/command.rs +++ b/third_party/rust/gfx-backend-vulkan/src/command.rs @@ -1,971 +1,1003 @@ -use ash::version::DeviceV1_0; -use ash::vk; -use smallvec::SmallVec; -use std::borrow::Borrow; -use std::ops::Range; -use std::sync::Arc; -use std::{mem, ptr, slice}; - -use crate::{conv, native as n, Backend, RawDevice}; -use hal::{ - buffer, - command as com, - format::Aspects, - image::{Filter, Layout, SubresourceRange}, - memory, - pso, - query, - range::RangeArg, - DrawCount, - IndexCount, - InstanceCount, - VertexCount, - VertexOffset, - WorkGroupCount, -}; - -#[derive(Debug)] -pub struct CommandBuffer { - pub raw: vk::CommandBuffer, - pub device: Arc, -} - -fn map_subpass_contents(contents: com::SubpassContents) -> vk::SubpassContents { - match contents { - com::SubpassContents::Inline => vk::SubpassContents::INLINE, - com::SubpassContents::SecondaryBuffers => vk::SubpassContents::SECONDARY_COMMAND_BUFFERS, - } -} - -fn map_buffer_image_regions(_image: &n::Image, regions: T) -> SmallVec<[vk::BufferImageCopy; 16]> -where - T: IntoIterator, - T::Item: Borrow, -{ - regions - .into_iter() - .map(|region| { - let r = region.borrow(); - let image_subresource = conv::map_subresource_layers(&r.image_layers); - vk::BufferImageCopy { - buffer_offset: r.buffer_offset, - buffer_row_length: r.buffer_width, - buffer_image_height: r.buffer_height, - image_subresource, - image_offset: conv::map_offset(r.image_offset), - image_extent: conv::map_extent(r.image_extent), - } - }) - .collect() -} - -struct BarrierSet { - global: SmallVec<[vk::MemoryBarrier; 4]>, - buffer: SmallVec<[vk::BufferMemoryBarrier; 4]>, - image: SmallVec<[vk::ImageMemoryBarrier; 4]>, -} - -fn destructure_barriers<'a, T>(barriers: T) -> BarrierSet -where - T: IntoIterator, - T::Item: Borrow>, -{ - let mut global: SmallVec<[vk::MemoryBarrier; 4]> = SmallVec::new(); - let mut buffer: SmallVec<[vk::BufferMemoryBarrier; 4]> = SmallVec::new(); - let mut image: SmallVec<[vk::ImageMemoryBarrier; 4]> = SmallVec::new(); - - for barrier in barriers { - match *barrier.borrow() { - memory::Barrier::AllBuffers(ref access) => { - global.push(vk::MemoryBarrier { - s_type: vk::StructureType::MEMORY_BARRIER, - p_next: ptr::null(), - src_access_mask: conv::map_buffer_access(access.start), - dst_access_mask: conv::map_buffer_access(access.end), - }); - } - memory::Barrier::AllImages(ref access) => { - global.push(vk::MemoryBarrier { - s_type: vk::StructureType::MEMORY_BARRIER, - p_next: ptr::null(), - src_access_mask: conv::map_image_access(access.start), - dst_access_mask: conv::map_image_access(access.end), - }); - } - memory::Barrier::Buffer { - ref states, - target, - ref range, - ref families, - } => { - let families = match families { - Some(f) => f.start.0 as u32 .. f.end.0 as u32, - None => vk::QUEUE_FAMILY_IGNORED .. vk::QUEUE_FAMILY_IGNORED, - }; - buffer.push(vk::BufferMemoryBarrier { - s_type: vk::StructureType::BUFFER_MEMORY_BARRIER, - p_next: ptr::null(), - src_access_mask: conv::map_buffer_access(states.start), - dst_access_mask: conv::map_buffer_access(states.end), - src_queue_family_index: families.start, - dst_queue_family_index: families.end, - buffer: target.raw, - offset: range.start.unwrap_or(0), - size: range - .end - .map_or(vk::WHOLE_SIZE, |end| end - range.start.unwrap_or(0)), - }); - } - memory::Barrier::Image { - ref states, - target, - ref range, - ref families, - } => { - let subresource_range = conv::map_subresource_range(range); - let families = match families { - Some(f) => f.start.0 as u32 .. f.end.0 as u32, - None => vk::QUEUE_FAMILY_IGNORED .. vk::QUEUE_FAMILY_IGNORED, - }; - image.push(vk::ImageMemoryBarrier { - s_type: vk::StructureType::IMAGE_MEMORY_BARRIER, - p_next: ptr::null(), - src_access_mask: conv::map_image_access(states.start.0), - dst_access_mask: conv::map_image_access(states.end.0), - old_layout: conv::map_image_layout(states.start.1), - new_layout: conv::map_image_layout(states.end.1), - src_queue_family_index: families.start, - dst_queue_family_index: families.end, - image: target.raw, - subresource_range, - }); - } - } - } - - BarrierSet { - global, - buffer, - image, - } -} - -impl CommandBuffer { - fn bind_descriptor_sets( - &mut self, - bind_point: vk::PipelineBindPoint, - layout: &n::PipelineLayout, - first_set: usize, - sets: I, - offsets: J, - ) where - I: IntoIterator, - I::Item: Borrow, - J: IntoIterator, - J::Item: Borrow, - { - let sets: SmallVec<[_; 16]> = sets.into_iter().map(|set| set.borrow().raw).collect(); - let dynamic_offsets: SmallVec<[_; 16]> = - offsets.into_iter().map(|offset| *offset.borrow()).collect(); - - unsafe { - self.device.0.cmd_bind_descriptor_sets( - self.raw, - bind_point, - layout.raw, - first_set as u32, - &sets, - &dynamic_offsets, - ); - } - } -} - -impl com::CommandBuffer for CommandBuffer { - unsafe fn begin( - &mut self, - flags: com::CommandBufferFlags, - info: com::CommandBufferInheritanceInfo, - ) { - let inheritance_info = vk::CommandBufferInheritanceInfo { - s_type: vk::StructureType::COMMAND_BUFFER_INHERITANCE_INFO, - p_next: ptr::null(), - render_pass: info - .subpass - .map_or(vk::RenderPass::null(), |subpass| subpass.main_pass.raw), - subpass: info.subpass.map_or(0, |subpass| subpass.index as u32), - framebuffer: info - .framebuffer - .map_or(vk::Framebuffer::null(), |buffer| buffer.raw), - occlusion_query_enable: if info.occlusion_query_enable { - vk::TRUE - } else { - vk::FALSE - }, - query_flags: conv::map_query_control_flags(info.occlusion_query_flags), - pipeline_statistics: conv::map_pipeline_statistics(info.pipeline_statistics), - }; - - let info = vk::CommandBufferBeginInfo { - s_type: vk::StructureType::COMMAND_BUFFER_BEGIN_INFO, - p_next: ptr::null(), - flags: conv::map_command_buffer_flags(flags), - p_inheritance_info: &inheritance_info, - }; - - assert_eq!(Ok(()), self.device.0.begin_command_buffer(self.raw, &info)); - } - - unsafe fn finish(&mut self) { - assert_eq!(Ok(()), self.device.0.end_command_buffer(self.raw)); - } - - unsafe fn reset(&mut self, release_resources: bool) { - let flags = if release_resources { - vk::CommandBufferResetFlags::RELEASE_RESOURCES - } else { - vk::CommandBufferResetFlags::empty() - }; - - assert_eq!(Ok(()), self.device.0.reset_command_buffer(self.raw, flags)); - } - - unsafe fn begin_render_pass( - &mut self, - render_pass: &n::RenderPass, - frame_buffer: &n::Framebuffer, - render_area: pso::Rect, - clear_values: T, - first_subpass: com::SubpassContents, - ) where - T: IntoIterator, - T::Item: Borrow, - { - let render_area = conv::map_rect(&render_area); - - // Vulkan wants one clear value per attachment (even those that don't need clears), - // but can receive less clear values than total attachments. - let clear_value_count = 64 - render_pass.clear_attachments_mask.leading_zeros() as u32; - let mut clear_value_iter = clear_values.into_iter(); - let raw_clear_values = (0 .. clear_value_count) - .map(|i| { - if render_pass.clear_attachments_mask & (1 << i) != 0 { - // Vulkan and HAL share same memory layout - let next = clear_value_iter.next().unwrap(); - mem::transmute(*next.borrow()) - } else { - mem::zeroed() - } - }) - .collect::>(); - - let info = vk::RenderPassBeginInfo { - s_type: vk::StructureType::RENDER_PASS_BEGIN_INFO, - p_next: ptr::null(), - render_pass: render_pass.raw, - framebuffer: frame_buffer.raw, - render_area, - clear_value_count, - p_clear_values: raw_clear_values.as_ptr(), - }; - - let contents = map_subpass_contents(first_subpass); - self.device - .0 - .cmd_begin_render_pass(self.raw, &info, contents); - } - - unsafe fn next_subpass(&mut self, contents: com::SubpassContents) { - let contents = map_subpass_contents(contents); - self.device.0.cmd_next_subpass(self.raw, contents); - } - - unsafe fn end_render_pass(&mut self) { - self.device.0.cmd_end_render_pass(self.raw); - } - - unsafe fn pipeline_barrier<'a, T>( - &mut self, - stages: Range, - dependencies: memory::Dependencies, - barriers: T, - ) where - T: IntoIterator, - T::Item: Borrow>, - { - let BarrierSet { - global, - buffer, - image, - } = destructure_barriers(barriers); - - self.device.0.cmd_pipeline_barrier( - self.raw, // commandBuffer - conv::map_pipeline_stage(stages.start), - conv::map_pipeline_stage(stages.end), - mem::transmute(dependencies), - &global, - &buffer, - &image, - ); - } - - unsafe fn fill_buffer(&mut self, buffer: &n::Buffer, range: R, data: u32) - where - R: RangeArg, - { - let (offset, size) = conv::map_range_arg(&range); - self.device - .0 - .cmd_fill_buffer(self.raw, buffer.raw, offset, size, data); - } - - unsafe fn update_buffer(&mut self, buffer: &n::Buffer, offset: buffer::Offset, data: &[u8]) { - self.device - .0 - .cmd_update_buffer(self.raw, buffer.raw, offset, data); - } - - unsafe fn clear_image( - &mut self, - image: &n::Image, - layout: Layout, - value: com::ClearValue, - subresource_ranges: T, - ) where - T: IntoIterator, - T::Item: Borrow, - { - let mut color_ranges = Vec::new(); - let mut ds_ranges = Vec::new(); - - for subresource_range in subresource_ranges { - let sub = subresource_range.borrow(); - let aspect_ds = sub.aspects & (Aspects::DEPTH | Aspects::STENCIL); - let vk_range = conv::map_subresource_range(sub); - if sub.aspects.contains(Aspects::COLOR) { - color_ranges.push(vk::ImageSubresourceRange { - aspect_mask: conv::map_image_aspects(Aspects::COLOR), - ..vk_range - }); - } - if !aspect_ds.is_empty() { - ds_ranges.push(vk::ImageSubresourceRange { - aspect_mask: conv::map_image_aspects(aspect_ds), - ..vk_range - }); - } - } - - // Vulkan and HAL share same memory layout - let color_value = mem::transmute(value.color); - let depth_stencil_value = vk::ClearDepthStencilValue { - depth: value.depth_stencil.depth, - stencil: value.depth_stencil.stencil, - }; - - if !color_ranges.is_empty() { - self.device.0.cmd_clear_color_image( - self.raw, - image.raw, - conv::map_image_layout(layout), - &color_value, - &color_ranges, - ) - } - if !ds_ranges.is_empty() { - self.device.0.cmd_clear_depth_stencil_image( - self.raw, - image.raw, - conv::map_image_layout(layout), - &depth_stencil_value, - &ds_ranges, - ) - } - } - - unsafe fn clear_attachments(&mut self, clears: T, rects: U) - where - T: IntoIterator, - T::Item: Borrow, - U: IntoIterator, - U::Item: Borrow, - { - let clears: SmallVec<[vk::ClearAttachment; 16]> = clears - .into_iter() - .map(|clear| match *clear.borrow() { - com::AttachmentClear::Color { index, value } => vk::ClearAttachment { - aspect_mask: vk::ImageAspectFlags::COLOR, - color_attachment: index as _, - clear_value: vk::ClearValue { - color: mem::transmute(value), - }, - }, - com::AttachmentClear::DepthStencil { depth, stencil } => vk::ClearAttachment { - aspect_mask: if depth.is_some() { - vk::ImageAspectFlags::DEPTH - } else { - vk::ImageAspectFlags::empty() - } | if stencil.is_some() { - vk::ImageAspectFlags::STENCIL - } else { - vk::ImageAspectFlags::empty() - }, - color_attachment: 0, - clear_value: vk::ClearValue { - depth_stencil: vk::ClearDepthStencilValue { - depth: depth.unwrap_or_default(), - stencil: stencil.unwrap_or_default(), - }, - }, - }, - }) - .collect(); - - let rects: SmallVec<[vk::ClearRect; 16]> = rects - .into_iter() - .map(|rect| conv::map_clear_rect(rect.borrow())) - .collect(); - - self.device - .0 - .cmd_clear_attachments(self.raw, &clears, &rects) - } - - unsafe fn resolve_image( - &mut self, - src: &n::Image, - src_layout: Layout, - dst: &n::Image, - dst_layout: Layout, - regions: T, - ) where - T: IntoIterator, - T::Item: Borrow, - { - let regions = regions - .into_iter() - .map(|region| { - let r = region.borrow(); - vk::ImageResolve { - src_subresource: conv::map_subresource_layers(&r.src_subresource), - src_offset: conv::map_offset(r.src_offset), - dst_subresource: conv::map_subresource_layers(&r.dst_subresource), - dst_offset: conv::map_offset(r.dst_offset), - extent: conv::map_extent(r.extent), - } - }) - .collect::>(); - - self.device.0.cmd_resolve_image( - self.raw, - src.raw, - conv::map_image_layout(src_layout), - dst.raw, - conv::map_image_layout(dst_layout), - ®ions, - ); - } - - unsafe fn blit_image( - &mut self, - src: &n::Image, - src_layout: Layout, - dst: &n::Image, - dst_layout: Layout, - filter: Filter, - regions: T, - ) where - T: IntoIterator, - T::Item: Borrow, - { - let regions = regions - .into_iter() - .map(|region| { - let r = region.borrow(); - vk::ImageBlit { - src_subresource: conv::map_subresource_layers(&r.src_subresource), - src_offsets: [ - conv::map_offset(r.src_bounds.start), - conv::map_offset(r.src_bounds.end), - ], - dst_subresource: conv::map_subresource_layers(&r.dst_subresource), - dst_offsets: [ - conv::map_offset(r.dst_bounds.start), - conv::map_offset(r.dst_bounds.end), - ], - } - }) - .collect::>(); - - self.device.0.cmd_blit_image( - self.raw, - src.raw, - conv::map_image_layout(src_layout), - dst.raw, - conv::map_image_layout(dst_layout), - ®ions, - conv::map_filter(filter), - ); - } - - unsafe fn bind_index_buffer(&mut self, ibv: buffer::IndexBufferView) { - self.device.0.cmd_bind_index_buffer( - self.raw, - ibv.buffer.raw, - ibv.offset, - conv::map_index_type(ibv.index_type), - ); - } - - unsafe fn bind_vertex_buffers(&mut self, first_binding: pso::BufferIndex, buffers: I) - where - I: IntoIterator, - T: Borrow, - { - let (buffers, offsets): (SmallVec<[vk::Buffer; 16]>, SmallVec<[vk::DeviceSize; 16]>) = - buffers - .into_iter() - .map(|(buffer, offset)| (buffer.borrow().raw, offset)) - .unzip(); - - self.device - .0 - .cmd_bind_vertex_buffers(self.raw, first_binding, &buffers, &offsets); - } - - unsafe fn set_viewports(&mut self, first_viewport: u32, viewports: T) - where - T: IntoIterator, - T::Item: Borrow, - { - let viewports: SmallVec<[vk::Viewport; 16]> = viewports - .into_iter() - .map(|viewport| conv::map_viewport(viewport.borrow())) - .collect(); - - self.device - .0 - .cmd_set_viewport(self.raw, first_viewport, &viewports); - } - - unsafe fn set_scissors(&mut self, first_scissor: u32, scissors: T) - where - T: IntoIterator, - T::Item: Borrow, - { - let scissors: SmallVec<[vk::Rect2D; 16]> = scissors - .into_iter() - .map(|scissor| conv::map_rect(scissor.borrow())) - .collect(); - - self.device - .0 - .cmd_set_scissor(self.raw, first_scissor, &scissors); - } - - unsafe fn set_stencil_reference(&mut self, faces: pso::Face, value: pso::StencilValue) { - // Vulkan and HAL share same faces bit flags - self.device - .0 - .cmd_set_stencil_reference(self.raw, mem::transmute(faces), value); - } - - unsafe fn set_stencil_read_mask(&mut self, faces: pso::Face, value: pso::StencilValue) { - // Vulkan and HAL share same faces bit flags - self.device - .0 - .cmd_set_stencil_compare_mask(self.raw, mem::transmute(faces), value); - } - - unsafe fn set_stencil_write_mask(&mut self, faces: pso::Face, value: pso::StencilValue) { - // Vulkan and HAL share same faces bit flags - self.device - .0 - .cmd_set_stencil_write_mask(self.raw, mem::transmute(faces), value); - } - - unsafe fn set_blend_constants(&mut self, color: pso::ColorValue) { - self.device.0.cmd_set_blend_constants(self.raw, &color); - } - - unsafe fn set_depth_bounds(&mut self, bounds: Range) { - self.device - .0 - .cmd_set_depth_bounds(self.raw, bounds.start, bounds.end); - } - - unsafe fn set_line_width(&mut self, width: f32) { - self.device.0.cmd_set_line_width(self.raw, width); - } - - unsafe fn set_depth_bias(&mut self, depth_bias: pso::DepthBias) { - self.device.0.cmd_set_depth_bias( - self.raw, - depth_bias.const_factor, - depth_bias.clamp, - depth_bias.slope_factor, - ); - } - - unsafe fn bind_graphics_pipeline(&mut self, pipeline: &n::GraphicsPipeline) { - self.device - .0 - .cmd_bind_pipeline(self.raw, vk::PipelineBindPoint::GRAPHICS, pipeline.0) - } - - unsafe fn bind_graphics_descriptor_sets( - &mut self, - layout: &n::PipelineLayout, - first_set: usize, - sets: I, - offsets: J, - ) where - I: IntoIterator, - I::Item: Borrow, - J: IntoIterator, - J::Item: Borrow, - { - self.bind_descriptor_sets( - vk::PipelineBindPoint::GRAPHICS, - layout, - first_set, - sets, - offsets, - ); - } - - unsafe fn bind_compute_pipeline(&mut self, pipeline: &n::ComputePipeline) { - self.device - .0 - .cmd_bind_pipeline(self.raw, vk::PipelineBindPoint::COMPUTE, pipeline.0) - } - - unsafe fn bind_compute_descriptor_sets( - &mut self, - layout: &n::PipelineLayout, - first_set: usize, - sets: I, - offsets: J, - ) where - I: IntoIterator, - I::Item: Borrow, - J: IntoIterator, - J::Item: Borrow, - { - self.bind_descriptor_sets( - vk::PipelineBindPoint::COMPUTE, - layout, - first_set, - sets, - offsets, - ); - } - - unsafe fn dispatch(&mut self, count: WorkGroupCount) { - self.device - .0 - .cmd_dispatch(self.raw, count[0], count[1], count[2]) - } - - unsafe fn dispatch_indirect(&mut self, buffer: &n::Buffer, offset: buffer::Offset) { - self.device - .0 - .cmd_dispatch_indirect(self.raw, buffer.raw, offset) - } - - unsafe fn copy_buffer(&mut self, src: &n::Buffer, dst: &n::Buffer, regions: T) - where - T: IntoIterator, - T::Item: Borrow, - { - let regions: SmallVec<[vk::BufferCopy; 16]> = regions - .into_iter() - .map(|region| { - let region = region.borrow(); - vk::BufferCopy { - src_offset: region.src, - dst_offset: region.dst, - size: region.size, - } - }) - .collect(); - - self.device - .0 - .cmd_copy_buffer(self.raw, src.raw, dst.raw, ®ions) - } - - unsafe fn copy_image( - &mut self, - src: &n::Image, - src_layout: Layout, - dst: &n::Image, - dst_layout: Layout, - regions: T, - ) where - T: IntoIterator, - T::Item: Borrow, - { - let regions: SmallVec<[vk::ImageCopy; 16]> = regions - .into_iter() - .map(|region| { - let r = region.borrow(); - vk::ImageCopy { - src_subresource: conv::map_subresource_layers(&r.src_subresource), - src_offset: conv::map_offset(r.src_offset), - dst_subresource: conv::map_subresource_layers(&r.dst_subresource), - dst_offset: conv::map_offset(r.dst_offset), - extent: conv::map_extent(r.extent), - } - }) - .collect(); - - self.device.0.cmd_copy_image( - self.raw, - src.raw, - conv::map_image_layout(src_layout), - dst.raw, - conv::map_image_layout(dst_layout), - ®ions, - ); - } - - unsafe fn copy_buffer_to_image( - &mut self, - src: &n::Buffer, - dst: &n::Image, - dst_layout: Layout, - regions: T, - ) where - T: IntoIterator, - T::Item: Borrow, - { - let regions = map_buffer_image_regions(dst, regions); - - self.device.0.cmd_copy_buffer_to_image( - self.raw, - src.raw, - dst.raw, - conv::map_image_layout(dst_layout), - ®ions, - ); - } - - unsafe fn copy_image_to_buffer( - &mut self, - src: &n::Image, - src_layout: Layout, - dst: &n::Buffer, - regions: T, - ) where - T: IntoIterator, - T::Item: Borrow, - { - let regions = map_buffer_image_regions(src, regions); - - self.device.0.cmd_copy_image_to_buffer( - self.raw, - src.raw, - conv::map_image_layout(src_layout), - dst.raw, - ®ions, - ); - } - - unsafe fn draw(&mut self, vertices: Range, instances: Range) { - self.device.0.cmd_draw( - self.raw, - vertices.end - vertices.start, - instances.end - instances.start, - vertices.start, - instances.start, - ) - } - - unsafe fn draw_indexed( - &mut self, - indices: Range, - base_vertex: VertexOffset, - instances: Range, - ) { - self.device.0.cmd_draw_indexed( - self.raw, - indices.end - indices.start, - instances.end - instances.start, - indices.start, - base_vertex, - instances.start, - ) - } - - unsafe fn draw_indirect( - &mut self, - buffer: &n::Buffer, - offset: buffer::Offset, - draw_count: DrawCount, - stride: u32, - ) { - self.device - .0 - .cmd_draw_indirect(self.raw, buffer.raw, offset, draw_count, stride) - } - - unsafe fn draw_indexed_indirect( - &mut self, - buffer: &n::Buffer, - offset: buffer::Offset, - draw_count: DrawCount, - stride: u32, - ) { - self.device - .0 - .cmd_draw_indexed_indirect(self.raw, buffer.raw, offset, draw_count, stride) - } - - unsafe fn set_event(&mut self, event: &n::Event, stage_mask: pso::PipelineStage) { - self.device.0.cmd_set_event( - self.raw, - event.0, - vk::PipelineStageFlags::from_raw(stage_mask.bits()), - ) - } - - unsafe fn reset_event(&mut self, event: &n::Event, stage_mask: pso::PipelineStage) { - self.device.0.cmd_reset_event( - self.raw, - event.0, - vk::PipelineStageFlags::from_raw(stage_mask.bits()), - ) - } - - unsafe fn wait_events<'a, I, J>( - &mut self, - events: I, - stages: Range, - barriers: J, - ) where - I: IntoIterator, - I::Item: Borrow, - J: IntoIterator, - J::Item: Borrow>, - { - let events = events.into_iter().map(|e| e.borrow().0).collect::>(); - - let BarrierSet { - global, - buffer, - image, - } = destructure_barriers(barriers); - - self.device.0.cmd_wait_events( - self.raw, - &events, - vk::PipelineStageFlags::from_raw(stages.start.bits()), - vk::PipelineStageFlags::from_raw(stages.end.bits()), - &global, - &buffer, - &image, - ) - } - - unsafe fn begin_query(&mut self, query: query::Query, flags: query::ControlFlags) { - self.device.0.cmd_begin_query( - self.raw, - query.pool.0, - query.id, - conv::map_query_control_flags(flags), - ) - } - - unsafe fn end_query(&mut self, query: query::Query) { - self.device - .0 - .cmd_end_query(self.raw, query.pool.0, query.id) - } - - unsafe fn reset_query_pool(&mut self, pool: &n::QueryPool, queries: Range) { - self.device.0.cmd_reset_query_pool( - self.raw, - pool.0, - queries.start, - queries.end - queries.start, - ) - } - - unsafe fn copy_query_pool_results( - &mut self, - pool: &n::QueryPool, - queries: Range, - buffer: &n::Buffer, - offset: buffer::Offset, - stride: buffer::Offset, - flags: query::ResultFlags, - ) { - //TODO: use safer wrapper - self.device.0.fp_v1_0().cmd_copy_query_pool_results( - self.raw, - pool.0, - queries.start, - queries.end - queries.start, - buffer.raw, - offset, - stride, - conv::map_query_result_flags(flags), - ); - } - - unsafe fn write_timestamp(&mut self, stage: pso::PipelineStage, query: query::Query) { - self.device.0.cmd_write_timestamp( - self.raw, - conv::map_pipeline_stage(stage), - query.pool.0, - query.id, - ) - } - - unsafe fn push_compute_constants( - &mut self, - layout: &n::PipelineLayout, - offset: u32, - constants: &[u32], - ) { - self.device.0.cmd_push_constants( - self.raw, - layout.raw, - vk::ShaderStageFlags::COMPUTE, - offset, - slice::from_raw_parts(constants.as_ptr() as _, constants.len() * 4), - ); - } - - unsafe fn push_graphics_constants( - &mut self, - layout: &n::PipelineLayout, - stages: pso::ShaderStageFlags, - offset: u32, - constants: &[u32], - ) { - self.device.0.cmd_push_constants( - self.raw, - layout.raw, - conv::map_stage_flags(stages), - offset, - slice::from_raw_parts(constants.as_ptr() as _, constants.len() * 4), - ); - } - - unsafe fn execute_commands<'a, T, I>(&mut self, buffers: I) - where - T: 'a + Borrow, - I: IntoIterator, - { - let command_buffers = buffers - .into_iter() - .map(|b| b.borrow().raw) - .collect::>(); - self.device - .0 - .cmd_execute_commands(self.raw, &command_buffers); - } -} +use ash::version::DeviceV1_0; +use ash::vk; +use smallvec::SmallVec; +use std::borrow::Borrow; +use std::ffi::CString; +use std::ops::Range; +use std::sync::Arc; +use std::{mem, ptr, slice}; + +use crate::{conv, native as n, Backend, DebugMessenger, RawDevice}; +use hal::{ + buffer, + command as com, + format::Aspects, + image::{Filter, Layout, SubresourceRange}, + memory, + pso, + query, + DrawCount, + IndexCount, + InstanceCount, + VertexCount, + VertexOffset, + WorkGroupCount, +}; + +#[derive(Debug)] +pub struct CommandBuffer { + pub raw: vk::CommandBuffer, + pub device: Arc, +} + +fn debug_color(color: u32) -> [f32; 4] { + let mut result = [0.0; 4]; + for (i, c) in result.iter_mut().enumerate() { + *c = ((color >> (24 - i * 8)) & 0xFF) as f32 / 255.0; + } + result +} + +fn map_subpass_contents(contents: com::SubpassContents) -> vk::SubpassContents { + match contents { + com::SubpassContents::Inline => vk::SubpassContents::INLINE, + com::SubpassContents::SecondaryBuffers => vk::SubpassContents::SECONDARY_COMMAND_BUFFERS, + } +} + +fn map_buffer_image_regions(_image: &n::Image, regions: T) -> SmallVec<[vk::BufferImageCopy; 16]> +where + T: IntoIterator, + T::Item: Borrow, +{ + regions + .into_iter() + .map(|region| { + let r = region.borrow(); + let image_subresource = conv::map_subresource_layers(&r.image_layers); + vk::BufferImageCopy { + buffer_offset: r.buffer_offset, + buffer_row_length: r.buffer_width, + buffer_image_height: r.buffer_height, + image_subresource, + image_offset: conv::map_offset(r.image_offset), + image_extent: conv::map_extent(r.image_extent), + } + }) + .collect() +} + +struct BarrierSet { + global: SmallVec<[vk::MemoryBarrier; 4]>, + buffer: SmallVec<[vk::BufferMemoryBarrier; 4]>, + image: SmallVec<[vk::ImageMemoryBarrier; 4]>, +} + +fn destructure_barriers<'a, T>(barriers: T) -> BarrierSet +where + T: IntoIterator, + T::Item: Borrow>, +{ + let mut global: SmallVec<[vk::MemoryBarrier; 4]> = SmallVec::new(); + let mut buffer: SmallVec<[vk::BufferMemoryBarrier; 4]> = SmallVec::new(); + let mut image: SmallVec<[vk::ImageMemoryBarrier; 4]> = SmallVec::new(); + + for barrier in barriers { + match *barrier.borrow() { + memory::Barrier::AllBuffers(ref access) => { + global.push(vk::MemoryBarrier { + s_type: vk::StructureType::MEMORY_BARRIER, + p_next: ptr::null(), + src_access_mask: conv::map_buffer_access(access.start), + dst_access_mask: conv::map_buffer_access(access.end), + }); + } + memory::Barrier::AllImages(ref access) => { + global.push(vk::MemoryBarrier { + s_type: vk::StructureType::MEMORY_BARRIER, + p_next: ptr::null(), + src_access_mask: conv::map_image_access(access.start), + dst_access_mask: conv::map_image_access(access.end), + }); + } + memory::Barrier::Buffer { + ref states, + target, + ref range, + ref families, + } => { + let families = match families { + Some(f) => f.start.0 as u32 .. f.end.0 as u32, + None => vk::QUEUE_FAMILY_IGNORED .. vk::QUEUE_FAMILY_IGNORED, + }; + buffer.push(vk::BufferMemoryBarrier { + s_type: vk::StructureType::BUFFER_MEMORY_BARRIER, + p_next: ptr::null(), + src_access_mask: conv::map_buffer_access(states.start), + dst_access_mask: conv::map_buffer_access(states.end), + src_queue_family_index: families.start, + dst_queue_family_index: families.end, + buffer: target.raw, + offset: range.offset, + size: range.size.unwrap_or(vk::WHOLE_SIZE), + }); + } + memory::Barrier::Image { + ref states, + target, + ref range, + ref families, + } => { + let subresource_range = conv::map_subresource_range(range); + let families = match families { + Some(f) => f.start.0 as u32 .. f.end.0 as u32, + None => vk::QUEUE_FAMILY_IGNORED .. vk::QUEUE_FAMILY_IGNORED, + }; + image.push(vk::ImageMemoryBarrier { + s_type: vk::StructureType::IMAGE_MEMORY_BARRIER, + p_next: ptr::null(), + src_access_mask: conv::map_image_access(states.start.0), + dst_access_mask: conv::map_image_access(states.end.0), + old_layout: conv::map_image_layout(states.start.1), + new_layout: conv::map_image_layout(states.end.1), + src_queue_family_index: families.start, + dst_queue_family_index: families.end, + image: target.raw, + subresource_range, + }); + } + } + } + + BarrierSet { + global, + buffer, + image, + } +} + +impl CommandBuffer { + fn bind_descriptor_sets( + &mut self, + bind_point: vk::PipelineBindPoint, + layout: &n::PipelineLayout, + first_set: usize, + sets: I, + offsets: J, + ) where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow, + { + let sets: SmallVec<[_; 16]> = sets.into_iter().map(|set| set.borrow().raw).collect(); + let dynamic_offsets: SmallVec<[_; 16]> = + offsets.into_iter().map(|offset| *offset.borrow()).collect(); + + unsafe { + self.device.raw.cmd_bind_descriptor_sets( + self.raw, + bind_point, + layout.raw, + first_set as u32, + &sets, + &dynamic_offsets, + ); + } + } +} + +impl com::CommandBuffer for CommandBuffer { + unsafe fn begin( + &mut self, + flags: com::CommandBufferFlags, + info: com::CommandBufferInheritanceInfo, + ) { + let inheritance_info = vk::CommandBufferInheritanceInfo { + s_type: vk::StructureType::COMMAND_BUFFER_INHERITANCE_INFO, + p_next: ptr::null(), + render_pass: info + .subpass + .map_or(vk::RenderPass::null(), |subpass| subpass.main_pass.raw), + subpass: info.subpass.map_or(0, |subpass| subpass.index as u32), + framebuffer: info + .framebuffer + .map_or(vk::Framebuffer::null(), |buffer| buffer.raw), + occlusion_query_enable: if info.occlusion_query_enable { + vk::TRUE + } else { + vk::FALSE + }, + query_flags: conv::map_query_control_flags(info.occlusion_query_flags), + pipeline_statistics: conv::map_pipeline_statistics(info.pipeline_statistics), + }; + + let info = vk::CommandBufferBeginInfo { + s_type: vk::StructureType::COMMAND_BUFFER_BEGIN_INFO, + p_next: ptr::null(), + flags: conv::map_command_buffer_flags(flags), + p_inheritance_info: &inheritance_info, + }; + + assert_eq!(Ok(()), self.device.raw.begin_command_buffer(self.raw, &info)); + } + + unsafe fn finish(&mut self) { + assert_eq!(Ok(()), self.device.raw.end_command_buffer(self.raw)); + } + + unsafe fn reset(&mut self, release_resources: bool) { + let flags = if release_resources { + vk::CommandBufferResetFlags::RELEASE_RESOURCES + } else { + vk::CommandBufferResetFlags::empty() + }; + + assert_eq!(Ok(()), self.device.raw.reset_command_buffer(self.raw, flags)); + } + + unsafe fn begin_render_pass( + &mut self, + render_pass: &n::RenderPass, + frame_buffer: &n::Framebuffer, + render_area: pso::Rect, + clear_values: T, + first_subpass: com::SubpassContents, + ) where + T: IntoIterator, + T::Item: Borrow, + { + let render_area = conv::map_rect(&render_area); + + // Vulkan wants one clear value per attachment (even those that don't need clears), + // but can receive less clear values than total attachments. + let clear_value_count = 64 - render_pass.clear_attachments_mask.leading_zeros() as u32; + let mut clear_value_iter = clear_values.into_iter(); + let raw_clear_values = (0 .. clear_value_count) + .map(|i| { + if render_pass.clear_attachments_mask & (1 << i) != 0 { + // Vulkan and HAL share same memory layout + let next = clear_value_iter.next().unwrap(); + mem::transmute(*next.borrow()) + } else { + mem::zeroed() + } + }) + .collect::>(); + + let info = vk::RenderPassBeginInfo { + s_type: vk::StructureType::RENDER_PASS_BEGIN_INFO, + p_next: ptr::null(), + render_pass: render_pass.raw, + framebuffer: frame_buffer.raw, + render_area, + clear_value_count, + p_clear_values: raw_clear_values.as_ptr(), + }; + + let contents = map_subpass_contents(first_subpass); + self.device + .raw + .cmd_begin_render_pass(self.raw, &info, contents); + } + + unsafe fn next_subpass(&mut self, contents: com::SubpassContents) { + let contents = map_subpass_contents(contents); + self.device.raw.cmd_next_subpass(self.raw, contents); + } + + unsafe fn end_render_pass(&mut self) { + self.device.raw.cmd_end_render_pass(self.raw); + } + + unsafe fn pipeline_barrier<'a, T>( + &mut self, + stages: Range, + dependencies: memory::Dependencies, + barriers: T, + ) where + T: IntoIterator, + T::Item: Borrow>, + { + let BarrierSet { + global, + buffer, + image, + } = destructure_barriers(barriers); + + self.device.raw.cmd_pipeline_barrier( + self.raw, // commandBuffer + conv::map_pipeline_stage(stages.start), + conv::map_pipeline_stage(stages.end), + mem::transmute(dependencies), + &global, + &buffer, + &image, + ); + } + + unsafe fn fill_buffer(&mut self, buffer: &n::Buffer, range: buffer::SubRange, data: u32) { + self.device.raw.cmd_fill_buffer( + self.raw, + buffer.raw, + range.offset, + range.size.unwrap_or(vk::WHOLE_SIZE), + data, + ); + } + + unsafe fn update_buffer(&mut self, buffer: &n::Buffer, offset: buffer::Offset, data: &[u8]) { + self.device + .raw + .cmd_update_buffer(self.raw, buffer.raw, offset, data); + } + + unsafe fn clear_image( + &mut self, + image: &n::Image, + layout: Layout, + value: com::ClearValue, + subresource_ranges: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + let mut color_ranges = Vec::new(); + let mut ds_ranges = Vec::new(); + + for subresource_range in subresource_ranges { + let sub = subresource_range.borrow(); + let aspect_ds = sub.aspects & (Aspects::DEPTH | Aspects::STENCIL); + let vk_range = conv::map_subresource_range(sub); + if sub.aspects.contains(Aspects::COLOR) { + color_ranges.push(vk::ImageSubresourceRange { + aspect_mask: conv::map_image_aspects(Aspects::COLOR), + ..vk_range + }); + } + if !aspect_ds.is_empty() { + ds_ranges.push(vk::ImageSubresourceRange { + aspect_mask: conv::map_image_aspects(aspect_ds), + ..vk_range + }); + } + } + + // Vulkan and HAL share same memory layout + let color_value = mem::transmute(value.color); + let depth_stencil_value = vk::ClearDepthStencilValue { + depth: value.depth_stencil.depth, + stencil: value.depth_stencil.stencil, + }; + + if !color_ranges.is_empty() { + self.device.raw.cmd_clear_color_image( + self.raw, + image.raw, + conv::map_image_layout(layout), + &color_value, + &color_ranges, + ) + } + if !ds_ranges.is_empty() { + self.device.raw.cmd_clear_depth_stencil_image( + self.raw, + image.raw, + conv::map_image_layout(layout), + &depth_stencil_value, + &ds_ranges, + ) + } + } + + unsafe fn clear_attachments(&mut self, clears: T, rects: U) + where + T: IntoIterator, + T::Item: Borrow, + U: IntoIterator, + U::Item: Borrow, + { + let clears: SmallVec<[vk::ClearAttachment; 16]> = clears + .into_iter() + .map(|clear| match *clear.borrow() { + com::AttachmentClear::Color { index, value } => vk::ClearAttachment { + aspect_mask: vk::ImageAspectFlags::COLOR, + color_attachment: index as _, + clear_value: vk::ClearValue { + color: mem::transmute(value), + }, + }, + com::AttachmentClear::DepthStencil { depth, stencil } => vk::ClearAttachment { + aspect_mask: if depth.is_some() { + vk::ImageAspectFlags::DEPTH + } else { + vk::ImageAspectFlags::empty() + } | if stencil.is_some() { + vk::ImageAspectFlags::STENCIL + } else { + vk::ImageAspectFlags::empty() + }, + color_attachment: 0, + clear_value: vk::ClearValue { + depth_stencil: vk::ClearDepthStencilValue { + depth: depth.unwrap_or_default(), + stencil: stencil.unwrap_or_default(), + }, + }, + }, + }) + .collect(); + + let rects: SmallVec<[vk::ClearRect; 16]> = rects + .into_iter() + .map(|rect| conv::map_clear_rect(rect.borrow())) + .collect(); + + self.device + .raw + .cmd_clear_attachments(self.raw, &clears, &rects) + } + + unsafe fn resolve_image( + &mut self, + src: &n::Image, + src_layout: Layout, + dst: &n::Image, + dst_layout: Layout, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + let regions = regions + .into_iter() + .map(|region| { + let r = region.borrow(); + vk::ImageResolve { + src_subresource: conv::map_subresource_layers(&r.src_subresource), + src_offset: conv::map_offset(r.src_offset), + dst_subresource: conv::map_subresource_layers(&r.dst_subresource), + dst_offset: conv::map_offset(r.dst_offset), + extent: conv::map_extent(r.extent), + } + }) + .collect::>(); + + self.device.raw.cmd_resolve_image( + self.raw, + src.raw, + conv::map_image_layout(src_layout), + dst.raw, + conv::map_image_layout(dst_layout), + ®ions, + ); + } + + unsafe fn blit_image( + &mut self, + src: &n::Image, + src_layout: Layout, + dst: &n::Image, + dst_layout: Layout, + filter: Filter, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + let regions = regions + .into_iter() + .map(|region| { + let r = region.borrow(); + vk::ImageBlit { + src_subresource: conv::map_subresource_layers(&r.src_subresource), + src_offsets: [ + conv::map_offset(r.src_bounds.start), + conv::map_offset(r.src_bounds.end), + ], + dst_subresource: conv::map_subresource_layers(&r.dst_subresource), + dst_offsets: [ + conv::map_offset(r.dst_bounds.start), + conv::map_offset(r.dst_bounds.end), + ], + } + }) + .collect::>(); + + self.device.raw.cmd_blit_image( + self.raw, + src.raw, + conv::map_image_layout(src_layout), + dst.raw, + conv::map_image_layout(dst_layout), + ®ions, + conv::map_filter(filter), + ); + } + + unsafe fn bind_index_buffer(&mut self, ibv: buffer::IndexBufferView) { + self.device.raw.cmd_bind_index_buffer( + self.raw, + ibv.buffer.raw, + ibv.range.offset, + conv::map_index_type(ibv.index_type), + ); + } + + unsafe fn bind_vertex_buffers(&mut self, first_binding: pso::BufferIndex, buffers: I) + where + I: IntoIterator, + T: Borrow, + { + let (buffers, offsets): (SmallVec<[vk::Buffer; 16]>, SmallVec<[vk::DeviceSize; 16]>) = + buffers + .into_iter() + .map(|(buffer, sub)| (buffer.borrow().raw, sub.offset)) + .unzip(); + + self.device + .raw + .cmd_bind_vertex_buffers(self.raw, first_binding, &buffers, &offsets); + } + + unsafe fn set_viewports(&mut self, first_viewport: u32, viewports: T) + where + T: IntoIterator, + T::Item: Borrow, + { + let viewports: SmallVec<[vk::Viewport; 16]> = viewports + .into_iter() + .map(|viewport| self.device.map_viewport(viewport.borrow())) + .collect(); + + self.device + .raw + .cmd_set_viewport(self.raw, first_viewport, &viewports); + } + + unsafe fn set_scissors(&mut self, first_scissor: u32, scissors: T) + where + T: IntoIterator, + T::Item: Borrow, + { + let scissors: SmallVec<[vk::Rect2D; 16]> = scissors + .into_iter() + .map(|scissor| conv::map_rect(scissor.borrow())) + .collect(); + + self.device + .raw + .cmd_set_scissor(self.raw, first_scissor, &scissors); + } + + unsafe fn set_stencil_reference(&mut self, faces: pso::Face, value: pso::StencilValue) { + // Vulkan and HAL share same faces bit flags + self.device + .raw + .cmd_set_stencil_reference(self.raw, mem::transmute(faces), value); + } + + unsafe fn set_stencil_read_mask(&mut self, faces: pso::Face, value: pso::StencilValue) { + // Vulkan and HAL share same faces bit flags + self.device + .raw + .cmd_set_stencil_compare_mask(self.raw, mem::transmute(faces), value); + } + + unsafe fn set_stencil_write_mask(&mut self, faces: pso::Face, value: pso::StencilValue) { + // Vulkan and HAL share same faces bit flags + self.device + .raw + .cmd_set_stencil_write_mask(self.raw, mem::transmute(faces), value); + } + + unsafe fn set_blend_constants(&mut self, color: pso::ColorValue) { + self.device.raw.cmd_set_blend_constants(self.raw, &color); + } + + unsafe fn set_depth_bounds(&mut self, bounds: Range) { + self.device + .raw + .cmd_set_depth_bounds(self.raw, bounds.start, bounds.end); + } + + unsafe fn set_line_width(&mut self, width: f32) { + self.device.raw.cmd_set_line_width(self.raw, width); + } + + unsafe fn set_depth_bias(&mut self, depth_bias: pso::DepthBias) { + self.device.raw.cmd_set_depth_bias( + self.raw, + depth_bias.const_factor, + depth_bias.clamp, + depth_bias.slope_factor, + ); + } + + unsafe fn bind_graphics_pipeline(&mut self, pipeline: &n::GraphicsPipeline) { + self.device + .raw + .cmd_bind_pipeline(self.raw, vk::PipelineBindPoint::GRAPHICS, pipeline.0) + } + + unsafe fn bind_graphics_descriptor_sets( + &mut self, + layout: &n::PipelineLayout, + first_set: usize, + sets: I, + offsets: J, + ) where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow, + { + self.bind_descriptor_sets( + vk::PipelineBindPoint::GRAPHICS, + layout, + first_set, + sets, + offsets, + ); + } + + unsafe fn bind_compute_pipeline(&mut self, pipeline: &n::ComputePipeline) { + self.device + .raw + .cmd_bind_pipeline(self.raw, vk::PipelineBindPoint::COMPUTE, pipeline.0) + } + + unsafe fn bind_compute_descriptor_sets( + &mut self, + layout: &n::PipelineLayout, + first_set: usize, + sets: I, + offsets: J, + ) where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow, + { + self.bind_descriptor_sets( + vk::PipelineBindPoint::COMPUTE, + layout, + first_set, + sets, + offsets, + ); + } + + unsafe fn dispatch(&mut self, count: WorkGroupCount) { + self.device + .raw + .cmd_dispatch(self.raw, count[0], count[1], count[2]) + } + + unsafe fn dispatch_indirect(&mut self, buffer: &n::Buffer, offset: buffer::Offset) { + self.device + .raw + .cmd_dispatch_indirect(self.raw, buffer.raw, offset) + } + + unsafe fn copy_buffer(&mut self, src: &n::Buffer, dst: &n::Buffer, regions: T) + where + T: IntoIterator, + T::Item: Borrow, + { + let regions: SmallVec<[vk::BufferCopy; 16]> = regions + .into_iter() + .map(|region| { + let region = region.borrow(); + vk::BufferCopy { + src_offset: region.src, + dst_offset: region.dst, + size: region.size, + } + }) + .collect(); + + self.device + .raw + .cmd_copy_buffer(self.raw, src.raw, dst.raw, ®ions) + } + + unsafe fn copy_image( + &mut self, + src: &n::Image, + src_layout: Layout, + dst: &n::Image, + dst_layout: Layout, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + let regions: SmallVec<[vk::ImageCopy; 16]> = regions + .into_iter() + .map(|region| { + let r = region.borrow(); + vk::ImageCopy { + src_subresource: conv::map_subresource_layers(&r.src_subresource), + src_offset: conv::map_offset(r.src_offset), + dst_subresource: conv::map_subresource_layers(&r.dst_subresource), + dst_offset: conv::map_offset(r.dst_offset), + extent: conv::map_extent(r.extent), + } + }) + .collect(); + + self.device.raw.cmd_copy_image( + self.raw, + src.raw, + conv::map_image_layout(src_layout), + dst.raw, + conv::map_image_layout(dst_layout), + ®ions, + ); + } + + unsafe fn copy_buffer_to_image( + &mut self, + src: &n::Buffer, + dst: &n::Image, + dst_layout: Layout, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + let regions = map_buffer_image_regions(dst, regions); + + self.device.raw.cmd_copy_buffer_to_image( + self.raw, + src.raw, + dst.raw, + conv::map_image_layout(dst_layout), + ®ions, + ); + } + + unsafe fn copy_image_to_buffer( + &mut self, + src: &n::Image, + src_layout: Layout, + dst: &n::Buffer, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow, + { + let regions = map_buffer_image_regions(src, regions); + + self.device.raw.cmd_copy_image_to_buffer( + self.raw, + src.raw, + conv::map_image_layout(src_layout), + dst.raw, + ®ions, + ); + } + + unsafe fn draw(&mut self, vertices: Range, instances: Range) { + self.device.raw.cmd_draw( + self.raw, + vertices.end - vertices.start, + instances.end - instances.start, + vertices.start, + instances.start, + ) + } + + unsafe fn draw_indexed( + &mut self, + indices: Range, + base_vertex: VertexOffset, + instances: Range, + ) { + self.device.raw.cmd_draw_indexed( + self.raw, + indices.end - indices.start, + instances.end - instances.start, + indices.start, + base_vertex, + instances.start, + ) + } + + unsafe fn draw_indirect( + &mut self, + buffer: &n::Buffer, + offset: buffer::Offset, + draw_count: DrawCount, + stride: u32, + ) { + self.device + .raw + .cmd_draw_indirect(self.raw, buffer.raw, offset, draw_count, stride) + } + + unsafe fn draw_indexed_indirect( + &mut self, + buffer: &n::Buffer, + offset: buffer::Offset, + draw_count: DrawCount, + stride: u32, + ) { + self.device + .raw + .cmd_draw_indexed_indirect(self.raw, buffer.raw, offset, draw_count, stride) + } + + unsafe fn set_event(&mut self, event: &n::Event, stage_mask: pso::PipelineStage) { + self.device.raw.cmd_set_event( + self.raw, + event.0, + vk::PipelineStageFlags::from_raw(stage_mask.bits()), + ) + } + + unsafe fn reset_event(&mut self, event: &n::Event, stage_mask: pso::PipelineStage) { + self.device.raw.cmd_reset_event( + self.raw, + event.0, + vk::PipelineStageFlags::from_raw(stage_mask.bits()), + ) + } + + unsafe fn wait_events<'a, I, J>( + &mut self, + events: I, + stages: Range, + barriers: J, + ) where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow>, + { + let events = events.into_iter().map(|e| e.borrow().0).collect::>(); + + let BarrierSet { + global, + buffer, + image, + } = destructure_barriers(barriers); + + self.device.raw.cmd_wait_events( + self.raw, + &events, + vk::PipelineStageFlags::from_raw(stages.start.bits()), + vk::PipelineStageFlags::from_raw(stages.end.bits()), + &global, + &buffer, + &image, + ) + } + + unsafe fn begin_query(&mut self, query: query::Query, flags: query::ControlFlags) { + self.device.raw.cmd_begin_query( + self.raw, + query.pool.0, + query.id, + conv::map_query_control_flags(flags), + ) + } + + unsafe fn end_query(&mut self, query: query::Query) { + self.device + .raw + .cmd_end_query(self.raw, query.pool.0, query.id) + } + + unsafe fn reset_query_pool(&mut self, pool: &n::QueryPool, queries: Range) { + self.device.raw.cmd_reset_query_pool( + self.raw, + pool.0, + queries.start, + queries.end - queries.start, + ) + } + + unsafe fn copy_query_pool_results( + &mut self, + pool: &n::QueryPool, + queries: Range, + buffer: &n::Buffer, + offset: buffer::Offset, + stride: buffer::Offset, + flags: query::ResultFlags, + ) { + //TODO: use safer wrapper + self.device.raw.fp_v1_0().cmd_copy_query_pool_results( + self.raw, + pool.0, + queries.start, + queries.end - queries.start, + buffer.raw, + offset, + stride, + conv::map_query_result_flags(flags), + ); + } + + unsafe fn write_timestamp(&mut self, stage: pso::PipelineStage, query: query::Query) { + self.device.raw.cmd_write_timestamp( + self.raw, + conv::map_pipeline_stage(stage), + query.pool.0, + query.id, + ) + } + + unsafe fn push_compute_constants( + &mut self, + layout: &n::PipelineLayout, + offset: u32, + constants: &[u32], + ) { + self.device.raw.cmd_push_constants( + self.raw, + layout.raw, + vk::ShaderStageFlags::COMPUTE, + offset, + slice::from_raw_parts(constants.as_ptr() as _, constants.len() * 4), + ); + } + + unsafe fn push_graphics_constants( + &mut self, + layout: &n::PipelineLayout, + stages: pso::ShaderStageFlags, + offset: u32, + constants: &[u32], + ) { + self.device.raw.cmd_push_constants( + self.raw, + layout.raw, + conv::map_stage_flags(stages), + offset, + slice::from_raw_parts(constants.as_ptr() as _, constants.len() * 4), + ); + } + + unsafe fn execute_commands<'a, T, I>(&mut self, buffers: I) + where + T: 'a + Borrow, + I: IntoIterator, + { + let command_buffers = buffers + .into_iter() + .map(|b| b.borrow().raw) + .collect::>(); + self.device + .raw + .cmd_execute_commands(self.raw, &command_buffers); + } + + unsafe fn insert_debug_marker(&mut self, name: &str, color: u32) { + if let Some(&DebugMessenger::Utils(ref ext, _)) = self.device.debug_messenger() { + let cstr = CString::new(name).unwrap(); + let label = vk::DebugUtilsLabelEXT::builder() + .label_name(&cstr) + .color(debug_color(color)) + .build(); + ext.cmd_insert_debug_utils_label(self.raw, &label); + } + } + unsafe fn begin_debug_marker(&mut self, name: &str, color: u32) { + if let Some(&DebugMessenger::Utils(ref ext, _)) = self.device.debug_messenger() { + let cstr = CString::new(name).unwrap(); + let label = vk::DebugUtilsLabelEXT::builder() + .label_name(&cstr) + .color(debug_color(color)) + .build(); + ext.cmd_begin_debug_utils_label(self.raw, &label); + } + } + unsafe fn end_debug_marker(&mut self) { + if let Some(&DebugMessenger::Utils(ref ext, _)) = self.device.debug_messenger() { + ext.cmd_end_debug_utils_label(self.raw); + } + } +} diff --git a/third_party/rust/gfx-backend-vulkan/src/conv.rs b/third_party/rust/gfx-backend-vulkan/src/conv.rs index 071f43104261..13c941c74441 100644 --- a/third_party/rust/gfx-backend-vulkan/src/conv.rs +++ b/third_party/rust/gfx-backend-vulkan/src/conv.rs @@ -1,587 +1,609 @@ -use ash::vk; - -use hal::{ - buffer, - command, - format, - image, - pass, - pso, - query, - range::RangeArg, - window::{CompositeAlphaMode, PresentMode}, - Features, - IndexType, -}; - -use crate::native as n; -use std::borrow::Borrow; -use std::mem; -use std::ptr; - -pub fn map_format(format: format::Format) -> vk::Format { - vk::Format::from_raw(format as i32) -} - -pub fn map_vk_format(vk_format: vk::Format) -> Option { - if (vk_format.as_raw() as usize) < format::NUM_FORMATS && vk_format != vk::Format::UNDEFINED { - Some(unsafe { mem::transmute(vk_format) }) - } else { - None - } -} - -pub fn map_tiling(tiling: image::Tiling) -> vk::ImageTiling { - vk::ImageTiling::from_raw(tiling as i32) -} - -pub fn map_component(component: format::Component) -> vk::ComponentSwizzle { - use hal::format::Component::*; - match component { - Zero => vk::ComponentSwizzle::ZERO, - One => vk::ComponentSwizzle::ONE, - R => vk::ComponentSwizzle::R, - G => vk::ComponentSwizzle::G, - B => vk::ComponentSwizzle::B, - A => vk::ComponentSwizzle::A, - } -} - -pub fn map_swizzle(swizzle: format::Swizzle) -> vk::ComponentMapping { - vk::ComponentMapping { - r: map_component(swizzle.0), - g: map_component(swizzle.1), - b: map_component(swizzle.2), - a: map_component(swizzle.3), - } -} - -pub fn map_index_type(index_type: IndexType) -> vk::IndexType { - match index_type { - IndexType::U16 => vk::IndexType::UINT16, - IndexType::U32 => vk::IndexType::UINT32, - } -} - -pub fn map_image_layout(layout: image::Layout) -> vk::ImageLayout { - use hal::image::Layout as Il; - match layout { - Il::General => vk::ImageLayout::GENERAL, - Il::ColorAttachmentOptimal => vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL, - Il::DepthStencilAttachmentOptimal => vk::ImageLayout::DEPTH_STENCIL_ATTACHMENT_OPTIMAL, - Il::DepthStencilReadOnlyOptimal => vk::ImageLayout::DEPTH_STENCIL_READ_ONLY_OPTIMAL, - Il::ShaderReadOnlyOptimal => vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL, - Il::TransferSrcOptimal => vk::ImageLayout::TRANSFER_SRC_OPTIMAL, - Il::TransferDstOptimal => vk::ImageLayout::TRANSFER_DST_OPTIMAL, - Il::Undefined => vk::ImageLayout::UNDEFINED, - Il::Preinitialized => vk::ImageLayout::PREINITIALIZED, - Il::Present => vk::ImageLayout::PRESENT_SRC_KHR, - } -} - -pub fn map_image_aspects(aspects: format::Aspects) -> vk::ImageAspectFlags { - vk::ImageAspectFlags::from_raw(aspects.bits() as u32) -} - -pub fn map_offset(offset: image::Offset) -> vk::Offset3D { - vk::Offset3D { - x: offset.x, - y: offset.y, - z: offset.z, - } -} - -pub fn map_extent(offset: image::Extent) -> vk::Extent3D { - vk::Extent3D { - width: offset.width, - height: offset.height, - depth: offset.depth, - } -} - -pub fn map_subresource(sub: &image::Subresource) -> vk::ImageSubresource { - vk::ImageSubresource { - aspect_mask: map_image_aspects(sub.aspects), - mip_level: sub.level as _, - array_layer: sub.layer as _, - } -} - -pub fn map_subresource_layers(sub: &image::SubresourceLayers) -> vk::ImageSubresourceLayers { - vk::ImageSubresourceLayers { - aspect_mask: map_image_aspects(sub.aspects), - mip_level: sub.level as _, - base_array_layer: sub.layers.start as _, - layer_count: (sub.layers.end - sub.layers.start) as _, - } -} - -pub fn map_subresource_range(range: &image::SubresourceRange) -> vk::ImageSubresourceRange { - vk::ImageSubresourceRange { - aspect_mask: map_image_aspects(range.aspects), - base_mip_level: range.levels.start as _, - level_count: (range.levels.end - range.levels.start) as _, - base_array_layer: range.layers.start as _, - layer_count: (range.layers.end - range.layers.start) as _, - } -} - -pub fn map_attachment_load_op(op: pass::AttachmentLoadOp) -> vk::AttachmentLoadOp { - use hal::pass::AttachmentLoadOp as Alo; - match op { - Alo::Load => vk::AttachmentLoadOp::LOAD, - Alo::Clear => vk::AttachmentLoadOp::CLEAR, - Alo::DontCare => vk::AttachmentLoadOp::DONT_CARE, - } -} - -pub fn map_attachment_store_op(op: pass::AttachmentStoreOp) -> vk::AttachmentStoreOp { - use hal::pass::AttachmentStoreOp as Aso; - match op { - Aso::Store => vk::AttachmentStoreOp::STORE, - Aso::DontCare => vk::AttachmentStoreOp::DONT_CARE, - } -} - -pub fn map_buffer_access(access: buffer::Access) -> vk::AccessFlags { - vk::AccessFlags::from_raw(access.bits()) -} - -pub fn map_image_access(access: image::Access) -> vk::AccessFlags { - vk::AccessFlags::from_raw(access.bits()) -} - -pub fn map_pipeline_stage(stage: pso::PipelineStage) -> vk::PipelineStageFlags { - vk::PipelineStageFlags::from_raw(stage.bits()) -} - -pub fn map_buffer_usage(usage: buffer::Usage) -> vk::BufferUsageFlags { - vk::BufferUsageFlags::from_raw(usage.bits()) -} - -pub fn map_image_usage(usage: image::Usage) -> vk::ImageUsageFlags { - vk::ImageUsageFlags::from_raw(usage.bits()) -} - -pub fn map_vk_image_usage(usage: vk::ImageUsageFlags) -> image::Usage { - image::Usage::from_bits_truncate(usage.as_raw()) -} - -pub fn map_descriptor_type(ty: pso::DescriptorType) -> vk::DescriptorType { - vk::DescriptorType::from_raw(ty as i32) -} - -pub fn map_stage_flags(stages: pso::ShaderStageFlags) -> vk::ShaderStageFlags { - vk::ShaderStageFlags::from_raw(stages.bits()) -} - -pub fn map_filter(filter: image::Filter) -> vk::Filter { - vk::Filter::from_raw(filter as i32) -} - -pub fn map_mip_filter(filter: image::Filter) -> vk::SamplerMipmapMode { - vk::SamplerMipmapMode::from_raw(filter as i32) -} - -pub fn map_wrap(wrap: image::WrapMode) -> vk::SamplerAddressMode { - use hal::image::WrapMode as Wm; - match wrap { - Wm::Tile => vk::SamplerAddressMode::REPEAT, - Wm::Mirror => vk::SamplerAddressMode::MIRRORED_REPEAT, - Wm::Clamp => vk::SamplerAddressMode::CLAMP_TO_EDGE, - Wm::Border => vk::SamplerAddressMode::CLAMP_TO_BORDER, - } -} - -pub fn map_border_color(col: image::PackedColor) -> Option { - match col.0 { - 0x00000000 => Some(vk::BorderColor::FLOAT_TRANSPARENT_BLACK), - 0xFF000000 => Some(vk::BorderColor::FLOAT_OPAQUE_BLACK), - 0xFFFFFFFF => Some(vk::BorderColor::FLOAT_OPAQUE_WHITE), - _ => None, - } -} - -pub fn map_topology(ia: &pso::InputAssemblerDesc) -> vk::PrimitiveTopology { - match (ia.primitive, ia.with_adjacency) { - (pso::Primitive::PointList, false) => vk::PrimitiveTopology::POINT_LIST, - (pso::Primitive::PointList, true) => panic!("Points can't have adjacency info"), - (pso::Primitive::LineList, false) => vk::PrimitiveTopology::LINE_LIST, - (pso::Primitive::LineList, true) => vk::PrimitiveTopology::LINE_LIST_WITH_ADJACENCY, - (pso::Primitive::LineStrip, false) => vk::PrimitiveTopology::LINE_STRIP, - (pso::Primitive::LineStrip, true) => vk::PrimitiveTopology::LINE_STRIP_WITH_ADJACENCY, - (pso::Primitive::TriangleList, false) => vk::PrimitiveTopology::TRIANGLE_LIST, - (pso::Primitive::TriangleList, true) => vk::PrimitiveTopology::TRIANGLE_LIST_WITH_ADJACENCY, - (pso::Primitive::TriangleStrip, false) => vk::PrimitiveTopology::TRIANGLE_STRIP, - (pso::Primitive::TriangleStrip, true) => vk::PrimitiveTopology::TRIANGLE_STRIP_WITH_ADJACENCY, - (pso::Primitive::PatchList(_), false) => vk::PrimitiveTopology::PATCH_LIST, - (pso::Primitive::PatchList(_), true) => panic!("Patches can't have adjacency info"), - } -} - -pub fn map_cull_face(cf: pso::Face) -> vk::CullModeFlags { - match cf { - pso::Face::NONE => vk::CullModeFlags::NONE, - pso::Face::FRONT => vk::CullModeFlags::FRONT, - pso::Face::BACK => vk::CullModeFlags::BACK, - _ => vk::CullModeFlags::FRONT_AND_BACK, - } -} - -pub fn map_front_face(ff: pso::FrontFace) -> vk::FrontFace { - match ff { - pso::FrontFace::Clockwise => vk::FrontFace::CLOCKWISE, - pso::FrontFace::CounterClockwise => vk::FrontFace::COUNTER_CLOCKWISE, - } -} - -pub fn map_comparison(fun: pso::Comparison) -> vk::CompareOp { - use hal::pso::Comparison::*; - match fun { - Never => vk::CompareOp::NEVER, - Less => vk::CompareOp::LESS, - LessEqual => vk::CompareOp::LESS_OR_EQUAL, - Equal => vk::CompareOp::EQUAL, - GreaterEqual => vk::CompareOp::GREATER_OR_EQUAL, - Greater => vk::CompareOp::GREATER, - NotEqual => vk::CompareOp::NOT_EQUAL, - Always => vk::CompareOp::ALWAYS, - } -} - -pub fn map_stencil_op(op: pso::StencilOp) -> vk::StencilOp { - use hal::pso::StencilOp::*; - match op { - Keep => vk::StencilOp::KEEP, - Zero => vk::StencilOp::ZERO, - Replace => vk::StencilOp::REPLACE, - IncrementClamp => vk::StencilOp::INCREMENT_AND_CLAMP, - IncrementWrap => vk::StencilOp::INCREMENT_AND_WRAP, - DecrementClamp => vk::StencilOp::DECREMENT_AND_CLAMP, - DecrementWrap => vk::StencilOp::DECREMENT_AND_WRAP, - Invert => vk::StencilOp::INVERT, - } -} - -pub fn map_stencil_side(side: &pso::StencilFace) -> vk::StencilOpState { - vk::StencilOpState { - fail_op: map_stencil_op(side.op_fail), - pass_op: map_stencil_op(side.op_pass), - depth_fail_op: map_stencil_op(side.op_depth_fail), - compare_op: map_comparison(side.fun), - compare_mask: !0, - write_mask: !0, - reference: 0, - } -} - -pub fn map_blend_factor(factor: pso::Factor) -> vk::BlendFactor { - use hal::pso::Factor::*; - match factor { - Zero => vk::BlendFactor::ZERO, - One => vk::BlendFactor::ONE, - SrcColor => vk::BlendFactor::SRC_COLOR, - OneMinusSrcColor => vk::BlendFactor::ONE_MINUS_SRC_COLOR, - DstColor => vk::BlendFactor::DST_COLOR, - OneMinusDstColor => vk::BlendFactor::ONE_MINUS_DST_COLOR, - SrcAlpha => vk::BlendFactor::SRC_ALPHA, - OneMinusSrcAlpha => vk::BlendFactor::ONE_MINUS_SRC_ALPHA, - DstAlpha => vk::BlendFactor::DST_ALPHA, - OneMinusDstAlpha => vk::BlendFactor::ONE_MINUS_DST_ALPHA, - ConstColor => vk::BlendFactor::CONSTANT_COLOR, - OneMinusConstColor => vk::BlendFactor::ONE_MINUS_CONSTANT_COLOR, - ConstAlpha => vk::BlendFactor::CONSTANT_ALPHA, - OneMinusConstAlpha => vk::BlendFactor::ONE_MINUS_CONSTANT_ALPHA, - SrcAlphaSaturate => vk::BlendFactor::SRC_ALPHA_SATURATE, - Src1Color => vk::BlendFactor::SRC1_COLOR, - OneMinusSrc1Color => vk::BlendFactor::ONE_MINUS_SRC1_COLOR, - Src1Alpha => vk::BlendFactor::SRC1_ALPHA, - OneMinusSrc1Alpha => vk::BlendFactor::ONE_MINUS_SRC1_ALPHA, - } -} - -pub fn map_blend_op(operation: pso::BlendOp) -> (vk::BlendOp, vk::BlendFactor, vk::BlendFactor) { - use hal::pso::BlendOp::*; - match operation { - Add { src, dst } => ( - vk::BlendOp::ADD, - map_blend_factor(src), - map_blend_factor(dst), - ), - Sub { src, dst } => ( - vk::BlendOp::SUBTRACT, - map_blend_factor(src), - map_blend_factor(dst), - ), - RevSub { src, dst } => ( - vk::BlendOp::REVERSE_SUBTRACT, - map_blend_factor(src), - map_blend_factor(dst), - ), - Min => ( - vk::BlendOp::MIN, - vk::BlendFactor::ZERO, - vk::BlendFactor::ZERO, - ), - Max => ( - vk::BlendOp::MAX, - vk::BlendFactor::ZERO, - vk::BlendFactor::ZERO, - ), - } -} - -pub fn map_pipeline_statistics( - statistics: query::PipelineStatistic, -) -> vk::QueryPipelineStatisticFlags { - vk::QueryPipelineStatisticFlags::from_raw(statistics.bits()) -} - -pub fn map_query_control_flags(flags: query::ControlFlags) -> vk::QueryControlFlags { - // Safe due to equivalence of HAL values and Vulkan values - vk::QueryControlFlags::from_raw(flags.bits() & vk::QueryControlFlags::all().as_raw()) -} - -pub fn map_query_result_flags(flags: query::ResultFlags) -> vk::QueryResultFlags { - vk::QueryResultFlags::from_raw(flags.bits() & vk::QueryResultFlags::all().as_raw()) -} - -pub fn map_image_features(features: vk::FormatFeatureFlags) -> format::ImageFeature { - format::ImageFeature::from_bits_truncate(features.as_raw()) -} - -pub fn map_buffer_features(features: vk::FormatFeatureFlags) -> format::BufferFeature { - format::BufferFeature::from_bits_truncate(features.as_raw()) -} - -pub fn map_device_features(features: Features) -> vk::PhysicalDeviceFeatures { - // vk::PhysicalDeviceFeatures is a struct composed of Bool32's while - // Features is a bitfield so we need to map everything manually - vk::PhysicalDeviceFeatures::builder() - .robust_buffer_access(features.contains(Features::ROBUST_BUFFER_ACCESS)) - .full_draw_index_uint32(features.contains(Features::FULL_DRAW_INDEX_U32)) - .image_cube_array(features.contains(Features::IMAGE_CUBE_ARRAY)) - .independent_blend(features.contains(Features::INDEPENDENT_BLENDING)) - .geometry_shader(features.contains(Features::GEOMETRY_SHADER)) - .tessellation_shader(features.contains(Features::TESSELLATION_SHADER)) - .sample_rate_shading(features.contains(Features::SAMPLE_RATE_SHADING)) - .dual_src_blend(features.contains(Features::DUAL_SRC_BLENDING)) - .logic_op(features.contains(Features::LOGIC_OP)) - .multi_draw_indirect(features.contains(Features::MULTI_DRAW_INDIRECT)) - .draw_indirect_first_instance(features.contains(Features::DRAW_INDIRECT_FIRST_INSTANCE)) - .depth_clamp(features.contains(Features::DEPTH_CLAMP)) - .depth_bias_clamp(features.contains(Features::DEPTH_BIAS_CLAMP)) - .fill_mode_non_solid(features.contains(Features::NON_FILL_POLYGON_MODE)) - .depth_bounds(features.contains(Features::DEPTH_BOUNDS)) - .wide_lines(features.contains(Features::LINE_WIDTH)) - .large_points(features.contains(Features::POINT_SIZE)) - .alpha_to_one(features.contains(Features::ALPHA_TO_ONE)) - .multi_viewport(features.contains(Features::MULTI_VIEWPORTS)) - .sampler_anisotropy(features.contains(Features::SAMPLER_ANISOTROPY)) - .texture_compression_etc2(features.contains(Features::FORMAT_ETC2)) - .texture_compression_astc_ldr(features.contains(Features::FORMAT_ASTC_LDR)) - .texture_compression_bc(features.contains(Features::FORMAT_BC)) - .occlusion_query_precise(features.contains(Features::PRECISE_OCCLUSION_QUERY)) - .pipeline_statistics_query(features.contains(Features::PIPELINE_STATISTICS_QUERY)) - .vertex_pipeline_stores_and_atomics(features.contains(Features::VERTEX_STORES_AND_ATOMICS)) - .fragment_stores_and_atomics(features.contains(Features::FRAGMENT_STORES_AND_ATOMICS)) - .shader_tessellation_and_geometry_point_size( - features.contains(Features::SHADER_TESSELLATION_AND_GEOMETRY_POINT_SIZE), - ) - .shader_image_gather_extended(features.contains(Features::SHADER_IMAGE_GATHER_EXTENDED)) - .shader_storage_image_extended_formats( - features.contains(Features::SHADER_STORAGE_IMAGE_EXTENDED_FORMATS), - ) - .shader_storage_image_multisample( - features.contains(Features::SHADER_STORAGE_IMAGE_MULTISAMPLE), - ) - .shader_storage_image_read_without_format( - features.contains(Features::SHADER_STORAGE_IMAGE_READ_WITHOUT_FORMAT), - ) - .shader_storage_image_write_without_format( - features.contains(Features::SHADER_STORAGE_IMAGE_WRITE_WITHOUT_FORMAT), - ) - .shader_uniform_buffer_array_dynamic_indexing( - features.contains(Features::SHADER_UNIFORM_BUFFER_ARRAY_DYNAMIC_INDEXING), - ) - .shader_sampled_image_array_dynamic_indexing( - features.contains(Features::SHADER_SAMPLED_IMAGE_ARRAY_DYNAMIC_INDEXING), - ) - .shader_storage_buffer_array_dynamic_indexing( - features.contains(Features::SHADER_STORAGE_BUFFER_ARRAY_DYNAMIC_INDEXING), - ) - .shader_storage_image_array_dynamic_indexing( - features.contains(Features::SHADER_STORAGE_IMAGE_ARRAY_DYNAMIC_INDEXING), - ) - .shader_clip_distance(features.contains(Features::SHADER_CLIP_DISTANCE)) - .shader_cull_distance(features.contains(Features::SHADER_CULL_DISTANCE)) - .shader_float64(features.contains(Features::SHADER_FLOAT64)) - .shader_int64(features.contains(Features::SHADER_INT64)) - .shader_int16(features.contains(Features::SHADER_INT16)) - .shader_resource_residency(features.contains(Features::SHADER_RESOURCE_RESIDENCY)) - .shader_resource_min_lod(features.contains(Features::SHADER_RESOURCE_MIN_LOD)) - .sparse_binding(features.contains(Features::SPARSE_BINDING)) - .sparse_residency_buffer(features.contains(Features::SPARSE_RESIDENCY_BUFFER)) - .sparse_residency_image2_d(features.contains(Features::SPARSE_RESIDENCY_IMAGE_2D)) - .sparse_residency_image3_d(features.contains(Features::SPARSE_RESIDENCY_IMAGE_3D)) - .sparse_residency2_samples(features.contains(Features::SPARSE_RESIDENCY_2_SAMPLES)) - .sparse_residency4_samples(features.contains(Features::SPARSE_RESIDENCY_4_SAMPLES)) - .sparse_residency8_samples(features.contains(Features::SPARSE_RESIDENCY_8_SAMPLES)) - .sparse_residency16_samples(features.contains(Features::SPARSE_RESIDENCY_16_SAMPLES)) - .sparse_residency_aliased(features.contains(Features::SPARSE_RESIDENCY_ALIASED)) - .variable_multisample_rate(features.contains(Features::VARIABLE_MULTISAMPLE_RATE)) - .inherited_queries(features.contains(Features::INHERITED_QUERIES)) - .build() -} - -pub fn map_memory_ranges<'a, I, R>(ranges: I) -> Vec -where - I: IntoIterator, - I::Item: Borrow<(&'a n::Memory, R)>, - R: RangeArg, -{ - ranges - .into_iter() - .map(|range| { - let &(ref memory, ref range) = range.borrow(); - let (offset, size) = map_range_arg(range); - vk::MappedMemoryRange { - s_type: vk::StructureType::MAPPED_MEMORY_RANGE, - p_next: ptr::null(), - memory: memory.raw, - offset, - size, - } - }) - .collect() -} - -/// Returns (offset, size) of the range. -/// -/// Unbound start indices will be mapped to 0. -/// Unbound end indices will be mapped to VK_WHOLE_SIZE. -pub fn map_range_arg(range: &R) -> (u64, u64) -where - R: RangeArg, -{ - let offset = *range.start().unwrap_or(&0); - let size = match range.end() { - Some(end) => end - offset, - None => vk::WHOLE_SIZE, - }; - - (offset, size) -} - -pub fn map_command_buffer_flags(flags: command::CommandBufferFlags) -> vk::CommandBufferUsageFlags { - // Safe due to equivalence of HAL values and Vulkan values - vk::CommandBufferUsageFlags::from_raw(flags.bits()) -} - -pub fn map_command_buffer_level(level: command::Level) -> vk::CommandBufferLevel { - match level { - command::Level::Primary => vk::CommandBufferLevel::PRIMARY, - command::Level::Secondary => vk::CommandBufferLevel::SECONDARY, - } -} - -pub fn map_view_kind( - kind: image::ViewKind, - ty: vk::ImageType, - is_cube: bool, -) -> Option { - use crate::image::ViewKind::*; - use crate::vk::ImageType; - - Some(match (ty, kind) { - (ImageType::TYPE_1D, D1) => vk::ImageViewType::TYPE_1D, - (ImageType::TYPE_1D, D1Array) => vk::ImageViewType::TYPE_1D_ARRAY, - (ImageType::TYPE_2D, D2) => vk::ImageViewType::TYPE_2D, - (ImageType::TYPE_2D, D2Array) => vk::ImageViewType::TYPE_2D_ARRAY, - (ImageType::TYPE_3D, D3) => vk::ImageViewType::TYPE_3D, - (ImageType::TYPE_2D, Cube) if is_cube => vk::ImageViewType::CUBE, - (ImageType::TYPE_2D, CubeArray) if is_cube => vk::ImageViewType::CUBE_ARRAY, - (ImageType::TYPE_3D, Cube) if is_cube => vk::ImageViewType::CUBE, - (ImageType::TYPE_3D, CubeArray) if is_cube => vk::ImageViewType::CUBE_ARRAY, - _ => return None, - }) -} - -pub fn map_rect(rect: &pso::Rect) -> vk::Rect2D { - vk::Rect2D { - offset: vk::Offset2D { - x: rect.x as _, - y: rect.y as _, - }, - extent: vk::Extent2D { - width: rect.w as _, - height: rect.h as _, - }, - } -} - -pub fn map_clear_rect(rect: &pso::ClearRect) -> vk::ClearRect { - vk::ClearRect { - base_array_layer: rect.layers.start as _, - layer_count: (rect.layers.end - rect.layers.start) as _, - rect: map_rect(&rect.rect), - } -} - -pub fn map_viewport(vp: &pso::Viewport) -> vk::Viewport { - vk::Viewport { - x: vp.rect.x as _, - y: vp.rect.y as _, - width: vp.rect.w as _, - height: vp.rect.h as _, - min_depth: vp.depth.start, - max_depth: vp.depth.end, - } -} - -pub fn map_view_capabilities(caps: image::ViewCapabilities) -> vk::ImageCreateFlags { - vk::ImageCreateFlags::from_raw(caps.bits()) -} - -pub fn map_present_mode(mode: PresentMode) -> vk::PresentModeKHR { - if mode == PresentMode::IMMEDIATE { - vk::PresentModeKHR::IMMEDIATE - } else if mode == PresentMode::MAILBOX { - vk::PresentModeKHR::MAILBOX - } else if mode == PresentMode::FIFO { - vk::PresentModeKHR::FIFO - } else if mode == PresentMode::RELAXED { - vk::PresentModeKHR::FIFO_RELAXED - } else { - panic!("Unexpected present mode {:?}", mode) - } -} - -pub fn map_vk_present_mode(mode: vk::PresentModeKHR) -> PresentMode { - if mode == vk::PresentModeKHR::IMMEDIATE { - PresentMode::IMMEDIATE - } else if mode == vk::PresentModeKHR::MAILBOX { - PresentMode::MAILBOX - } else if mode == vk::PresentModeKHR::FIFO { - PresentMode::FIFO - } else if mode == vk::PresentModeKHR::FIFO_RELAXED { - PresentMode::RELAXED - } else { - warn!("Unrecognized present mode {:?}", mode); - PresentMode::IMMEDIATE - } -} - -pub fn map_composite_alpha_mode(composite_alpha_mode: CompositeAlphaMode) -> vk::CompositeAlphaFlagsKHR { - vk::CompositeAlphaFlagsKHR::from_raw(composite_alpha_mode.bits()) -} - -pub fn map_vk_composite_alpha(composite_alpha: vk::CompositeAlphaFlagsKHR) -> CompositeAlphaMode { - CompositeAlphaMode::from_bits_truncate(composite_alpha.as_raw()) -} - -pub fn map_descriptor_pool_create_flags( - flags: pso::DescriptorPoolCreateFlags, -) -> vk::DescriptorPoolCreateFlags { - vk::DescriptorPoolCreateFlags::from_raw(flags.bits()) -} +use crate::native as n; + +use ash::vk; + +use hal::{ + buffer, + command, + format, + image, + memory::Segment, + pass, + pso, + query, + window::{CompositeAlphaMode, PresentMode}, + Features, + IndexType, +}; + +use smallvec::SmallVec; + +use std::{borrow::Borrow, mem, ptr}; + + +pub fn map_format(format: format::Format) -> vk::Format { + vk::Format::from_raw(format as i32) +} + +pub fn map_vk_format(vk_format: vk::Format) -> Option { + if (vk_format.as_raw() as usize) < format::NUM_FORMATS && vk_format != vk::Format::UNDEFINED { + Some(unsafe { mem::transmute(vk_format) }) + } else { + None + } +} + +pub fn map_tiling(tiling: image::Tiling) -> vk::ImageTiling { + vk::ImageTiling::from_raw(tiling as i32) +} + +pub fn map_component(component: format::Component) -> vk::ComponentSwizzle { + use hal::format::Component::*; + match component { + Zero => vk::ComponentSwizzle::ZERO, + One => vk::ComponentSwizzle::ONE, + R => vk::ComponentSwizzle::R, + G => vk::ComponentSwizzle::G, + B => vk::ComponentSwizzle::B, + A => vk::ComponentSwizzle::A, + } +} + +pub fn map_swizzle(swizzle: format::Swizzle) -> vk::ComponentMapping { + vk::ComponentMapping { + r: map_component(swizzle.0), + g: map_component(swizzle.1), + b: map_component(swizzle.2), + a: map_component(swizzle.3), + } +} + +pub fn map_index_type(index_type: IndexType) -> vk::IndexType { + match index_type { + IndexType::U16 => vk::IndexType::UINT16, + IndexType::U32 => vk::IndexType::UINT32, + } +} + +pub fn map_image_layout(layout: image::Layout) -> vk::ImageLayout { + use hal::image::Layout as Il; + match layout { + Il::General => vk::ImageLayout::GENERAL, + Il::ColorAttachmentOptimal => vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL, + Il::DepthStencilAttachmentOptimal => vk::ImageLayout::DEPTH_STENCIL_ATTACHMENT_OPTIMAL, + Il::DepthStencilReadOnlyOptimal => vk::ImageLayout::DEPTH_STENCIL_READ_ONLY_OPTIMAL, + Il::ShaderReadOnlyOptimal => vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL, + Il::TransferSrcOptimal => vk::ImageLayout::TRANSFER_SRC_OPTIMAL, + Il::TransferDstOptimal => vk::ImageLayout::TRANSFER_DST_OPTIMAL, + Il::Undefined => vk::ImageLayout::UNDEFINED, + Il::Preinitialized => vk::ImageLayout::PREINITIALIZED, + Il::Present => vk::ImageLayout::PRESENT_SRC_KHR, + } +} + +pub fn map_image_aspects(aspects: format::Aspects) -> vk::ImageAspectFlags { + vk::ImageAspectFlags::from_raw(aspects.bits() as u32) +} + +pub fn map_offset(offset: image::Offset) -> vk::Offset3D { + vk::Offset3D { + x: offset.x, + y: offset.y, + z: offset.z, + } +} + +pub fn map_extent(offset: image::Extent) -> vk::Extent3D { + vk::Extent3D { + width: offset.width, + height: offset.height, + depth: offset.depth, + } +} + +pub fn map_subresource(sub: &image::Subresource) -> vk::ImageSubresource { + vk::ImageSubresource { + aspect_mask: map_image_aspects(sub.aspects), + mip_level: sub.level as _, + array_layer: sub.layer as _, + } +} + +pub fn map_subresource_layers(sub: &image::SubresourceLayers) -> vk::ImageSubresourceLayers { + vk::ImageSubresourceLayers { + aspect_mask: map_image_aspects(sub.aspects), + mip_level: sub.level as _, + base_array_layer: sub.layers.start as _, + layer_count: (sub.layers.end - sub.layers.start) as _, + } +} + +pub fn map_subresource_range(range: &image::SubresourceRange) -> vk::ImageSubresourceRange { + vk::ImageSubresourceRange { + aspect_mask: map_image_aspects(range.aspects), + base_mip_level: range.levels.start as _, + level_count: (range.levels.end - range.levels.start) as _, + base_array_layer: range.layers.start as _, + layer_count: (range.layers.end - range.layers.start) as _, + } +} + +pub fn map_attachment_load_op(op: pass::AttachmentLoadOp) -> vk::AttachmentLoadOp { + use hal::pass::AttachmentLoadOp as Alo; + match op { + Alo::Load => vk::AttachmentLoadOp::LOAD, + Alo::Clear => vk::AttachmentLoadOp::CLEAR, + Alo::DontCare => vk::AttachmentLoadOp::DONT_CARE, + } +} + +pub fn map_attachment_store_op(op: pass::AttachmentStoreOp) -> vk::AttachmentStoreOp { + use hal::pass::AttachmentStoreOp as Aso; + match op { + Aso::Store => vk::AttachmentStoreOp::STORE, + Aso::DontCare => vk::AttachmentStoreOp::DONT_CARE, + } +} + +pub fn map_buffer_access(access: buffer::Access) -> vk::AccessFlags { + vk::AccessFlags::from_raw(access.bits()) +} + +pub fn map_image_access(access: image::Access) -> vk::AccessFlags { + vk::AccessFlags::from_raw(access.bits()) +} + +pub fn map_pipeline_stage(stage: pso::PipelineStage) -> vk::PipelineStageFlags { + vk::PipelineStageFlags::from_raw(stage.bits()) +} + +pub fn map_buffer_usage(usage: buffer::Usage) -> vk::BufferUsageFlags { + vk::BufferUsageFlags::from_raw(usage.bits()) +} + +pub fn map_image_usage(usage: image::Usage) -> vk::ImageUsageFlags { + vk::ImageUsageFlags::from_raw(usage.bits()) +} + +pub fn map_vk_image_usage(usage: vk::ImageUsageFlags) -> image::Usage { + image::Usage::from_bits_truncate(usage.as_raw()) +} + +pub fn map_descriptor_type(ty: pso::DescriptorType) -> vk::DescriptorType { + match ty { + pso::DescriptorType::Sampler => vk::DescriptorType::SAMPLER, + pso::DescriptorType::Image { ty } => match ty { + pso::ImageDescriptorType::Sampled { with_sampler } => match with_sampler { + true => vk::DescriptorType::COMBINED_IMAGE_SAMPLER, + false => vk::DescriptorType::SAMPLED_IMAGE, + }, + pso::ImageDescriptorType::Storage { .. } => vk::DescriptorType::STORAGE_IMAGE, + }, + pso::DescriptorType::Buffer { ty, format } => match ty { + pso::BufferDescriptorType::Storage { .. } => match format { + pso::BufferDescriptorFormat::Structured { dynamic_offset } => { + match dynamic_offset { + true => vk::DescriptorType::STORAGE_BUFFER_DYNAMIC, + false => vk::DescriptorType::STORAGE_BUFFER, + } + } + pso::BufferDescriptorFormat::Texel => vk::DescriptorType::STORAGE_TEXEL_BUFFER, + }, + pso::BufferDescriptorType::Uniform => match format { + pso::BufferDescriptorFormat::Structured { dynamic_offset } => { + match dynamic_offset { + true => vk::DescriptorType::UNIFORM_BUFFER_DYNAMIC, + false => vk::DescriptorType::UNIFORM_BUFFER, + } + } + pso::BufferDescriptorFormat::Texel => vk::DescriptorType::UNIFORM_TEXEL_BUFFER, + }, + }, + pso::DescriptorType::InputAttachment => vk::DescriptorType::INPUT_ATTACHMENT, + } +} + +pub fn map_stage_flags(stages: pso::ShaderStageFlags) -> vk::ShaderStageFlags { + vk::ShaderStageFlags::from_raw(stages.bits()) +} + +pub fn map_filter(filter: image::Filter) -> vk::Filter { + vk::Filter::from_raw(filter as i32) +} + +pub fn map_mip_filter(filter: image::Filter) -> vk::SamplerMipmapMode { + vk::SamplerMipmapMode::from_raw(filter as i32) +} + +pub fn map_wrap(wrap: image::WrapMode) -> vk::SamplerAddressMode { + use hal::image::WrapMode as Wm; + match wrap { + Wm::Tile => vk::SamplerAddressMode::REPEAT, + Wm::Mirror => vk::SamplerAddressMode::MIRRORED_REPEAT, + Wm::Clamp => vk::SamplerAddressMode::CLAMP_TO_EDGE, + Wm::Border => vk::SamplerAddressMode::CLAMP_TO_BORDER, + Wm::MirrorClamp => vk::SamplerAddressMode::MIRROR_CLAMP_TO_EDGE, + } +} + +pub fn map_border_color(col: image::PackedColor) -> Option { + match col.0 { + 0x00000000 => Some(vk::BorderColor::FLOAT_TRANSPARENT_BLACK), + 0xFF000000 => Some(vk::BorderColor::FLOAT_OPAQUE_BLACK), + 0xFFFFFFFF => Some(vk::BorderColor::FLOAT_OPAQUE_WHITE), + _ => None, + } +} + +pub fn map_topology(ia: &pso::InputAssemblerDesc) -> vk::PrimitiveTopology { + match (ia.primitive, ia.with_adjacency) { + (pso::Primitive::PointList, false) => vk::PrimitiveTopology::POINT_LIST, + (pso::Primitive::PointList, true) => panic!("Points can't have adjacency info"), + (pso::Primitive::LineList, false) => vk::PrimitiveTopology::LINE_LIST, + (pso::Primitive::LineList, true) => vk::PrimitiveTopology::LINE_LIST_WITH_ADJACENCY, + (pso::Primitive::LineStrip, false) => vk::PrimitiveTopology::LINE_STRIP, + (pso::Primitive::LineStrip, true) => vk::PrimitiveTopology::LINE_STRIP_WITH_ADJACENCY, + (pso::Primitive::TriangleList, false) => vk::PrimitiveTopology::TRIANGLE_LIST, + (pso::Primitive::TriangleList, true) => vk::PrimitiveTopology::TRIANGLE_LIST_WITH_ADJACENCY, + (pso::Primitive::TriangleStrip, false) => vk::PrimitiveTopology::TRIANGLE_STRIP, + (pso::Primitive::TriangleStrip, true) => { + vk::PrimitiveTopology::TRIANGLE_STRIP_WITH_ADJACENCY + } + (pso::Primitive::PatchList(_), false) => vk::PrimitiveTopology::PATCH_LIST, + (pso::Primitive::PatchList(_), true) => panic!("Patches can't have adjacency info"), + } +} + +pub fn map_cull_face(cf: pso::Face) -> vk::CullModeFlags { + match cf { + pso::Face::NONE => vk::CullModeFlags::NONE, + pso::Face::FRONT => vk::CullModeFlags::FRONT, + pso::Face::BACK => vk::CullModeFlags::BACK, + _ => vk::CullModeFlags::FRONT_AND_BACK, + } +} + +pub fn map_front_face(ff: pso::FrontFace) -> vk::FrontFace { + match ff { + pso::FrontFace::Clockwise => vk::FrontFace::CLOCKWISE, + pso::FrontFace::CounterClockwise => vk::FrontFace::COUNTER_CLOCKWISE, + } +} + +pub fn map_comparison(fun: pso::Comparison) -> vk::CompareOp { + use hal::pso::Comparison::*; + match fun { + Never => vk::CompareOp::NEVER, + Less => vk::CompareOp::LESS, + LessEqual => vk::CompareOp::LESS_OR_EQUAL, + Equal => vk::CompareOp::EQUAL, + GreaterEqual => vk::CompareOp::GREATER_OR_EQUAL, + Greater => vk::CompareOp::GREATER, + NotEqual => vk::CompareOp::NOT_EQUAL, + Always => vk::CompareOp::ALWAYS, + } +} + +pub fn map_stencil_op(op: pso::StencilOp) -> vk::StencilOp { + use hal::pso::StencilOp::*; + match op { + Keep => vk::StencilOp::KEEP, + Zero => vk::StencilOp::ZERO, + Replace => vk::StencilOp::REPLACE, + IncrementClamp => vk::StencilOp::INCREMENT_AND_CLAMP, + IncrementWrap => vk::StencilOp::INCREMENT_AND_WRAP, + DecrementClamp => vk::StencilOp::DECREMENT_AND_CLAMP, + DecrementWrap => vk::StencilOp::DECREMENT_AND_WRAP, + Invert => vk::StencilOp::INVERT, + } +} + +pub fn map_stencil_side(side: &pso::StencilFace) -> vk::StencilOpState { + vk::StencilOpState { + fail_op: map_stencil_op(side.op_fail), + pass_op: map_stencil_op(side.op_pass), + depth_fail_op: map_stencil_op(side.op_depth_fail), + compare_op: map_comparison(side.fun), + compare_mask: !0, + write_mask: !0, + reference: 0, + } +} + +pub fn map_blend_factor(factor: pso::Factor) -> vk::BlendFactor { + use hal::pso::Factor::*; + match factor { + Zero => vk::BlendFactor::ZERO, + One => vk::BlendFactor::ONE, + SrcColor => vk::BlendFactor::SRC_COLOR, + OneMinusSrcColor => vk::BlendFactor::ONE_MINUS_SRC_COLOR, + DstColor => vk::BlendFactor::DST_COLOR, + OneMinusDstColor => vk::BlendFactor::ONE_MINUS_DST_COLOR, + SrcAlpha => vk::BlendFactor::SRC_ALPHA, + OneMinusSrcAlpha => vk::BlendFactor::ONE_MINUS_SRC_ALPHA, + DstAlpha => vk::BlendFactor::DST_ALPHA, + OneMinusDstAlpha => vk::BlendFactor::ONE_MINUS_DST_ALPHA, + ConstColor => vk::BlendFactor::CONSTANT_COLOR, + OneMinusConstColor => vk::BlendFactor::ONE_MINUS_CONSTANT_COLOR, + ConstAlpha => vk::BlendFactor::CONSTANT_ALPHA, + OneMinusConstAlpha => vk::BlendFactor::ONE_MINUS_CONSTANT_ALPHA, + SrcAlphaSaturate => vk::BlendFactor::SRC_ALPHA_SATURATE, + Src1Color => vk::BlendFactor::SRC1_COLOR, + OneMinusSrc1Color => vk::BlendFactor::ONE_MINUS_SRC1_COLOR, + Src1Alpha => vk::BlendFactor::SRC1_ALPHA, + OneMinusSrc1Alpha => vk::BlendFactor::ONE_MINUS_SRC1_ALPHA, + } +} + +pub fn map_blend_op(operation: pso::BlendOp) -> (vk::BlendOp, vk::BlendFactor, vk::BlendFactor) { + use hal::pso::BlendOp::*; + match operation { + Add { src, dst } => ( + vk::BlendOp::ADD, + map_blend_factor(src), + map_blend_factor(dst), + ), + Sub { src, dst } => ( + vk::BlendOp::SUBTRACT, + map_blend_factor(src), + map_blend_factor(dst), + ), + RevSub { src, dst } => ( + vk::BlendOp::REVERSE_SUBTRACT, + map_blend_factor(src), + map_blend_factor(dst), + ), + Min => ( + vk::BlendOp::MIN, + vk::BlendFactor::ZERO, + vk::BlendFactor::ZERO, + ), + Max => ( + vk::BlendOp::MAX, + vk::BlendFactor::ZERO, + vk::BlendFactor::ZERO, + ), + } +} + +pub fn map_pipeline_statistics( + statistics: query::PipelineStatistic, +) -> vk::QueryPipelineStatisticFlags { + vk::QueryPipelineStatisticFlags::from_raw(statistics.bits()) +} + +pub fn map_query_control_flags(flags: query::ControlFlags) -> vk::QueryControlFlags { + // Safe due to equivalence of HAL values and Vulkan values + vk::QueryControlFlags::from_raw(flags.bits() & vk::QueryControlFlags::all().as_raw()) +} + +pub fn map_query_result_flags(flags: query::ResultFlags) -> vk::QueryResultFlags { + vk::QueryResultFlags::from_raw(flags.bits() & vk::QueryResultFlags::all().as_raw()) +} + +pub fn map_image_features(features: vk::FormatFeatureFlags) -> format::ImageFeature { + format::ImageFeature::from_bits_truncate(features.as_raw()) +} + +pub fn map_buffer_features(features: vk::FormatFeatureFlags) -> format::BufferFeature { + format::BufferFeature::from_bits_truncate(features.as_raw()) +} + +pub fn map_device_features(features: Features) -> vk::PhysicalDeviceFeatures { + // vk::PhysicalDeviceFeatures is a struct composed of Bool32's while + // Features is a bitfield so we need to map everything manually + vk::PhysicalDeviceFeatures::builder() + .robust_buffer_access(features.contains(Features::ROBUST_BUFFER_ACCESS)) + .full_draw_index_uint32(features.contains(Features::FULL_DRAW_INDEX_U32)) + .image_cube_array(features.contains(Features::IMAGE_CUBE_ARRAY)) + .independent_blend(features.contains(Features::INDEPENDENT_BLENDING)) + .geometry_shader(features.contains(Features::GEOMETRY_SHADER)) + .tessellation_shader(features.contains(Features::TESSELLATION_SHADER)) + .sample_rate_shading(features.contains(Features::SAMPLE_RATE_SHADING)) + .dual_src_blend(features.contains(Features::DUAL_SRC_BLENDING)) + .logic_op(features.contains(Features::LOGIC_OP)) + .multi_draw_indirect(features.contains(Features::MULTI_DRAW_INDIRECT)) + .draw_indirect_first_instance(features.contains(Features::DRAW_INDIRECT_FIRST_INSTANCE)) + .depth_clamp(features.contains(Features::DEPTH_CLAMP)) + .depth_bias_clamp(features.contains(Features::DEPTH_BIAS_CLAMP)) + .fill_mode_non_solid(features.contains(Features::NON_FILL_POLYGON_MODE)) + .depth_bounds(features.contains(Features::DEPTH_BOUNDS)) + .wide_lines(features.contains(Features::LINE_WIDTH)) + .large_points(features.contains(Features::POINT_SIZE)) + .alpha_to_one(features.contains(Features::ALPHA_TO_ONE)) + .multi_viewport(features.contains(Features::MULTI_VIEWPORTS)) + .sampler_anisotropy(features.contains(Features::SAMPLER_ANISOTROPY)) + .texture_compression_etc2(features.contains(Features::FORMAT_ETC2)) + .texture_compression_astc_ldr(features.contains(Features::FORMAT_ASTC_LDR)) + .texture_compression_bc(features.contains(Features::FORMAT_BC)) + .occlusion_query_precise(features.contains(Features::PRECISE_OCCLUSION_QUERY)) + .pipeline_statistics_query(features.contains(Features::PIPELINE_STATISTICS_QUERY)) + .vertex_pipeline_stores_and_atomics(features.contains(Features::VERTEX_STORES_AND_ATOMICS)) + .fragment_stores_and_atomics(features.contains(Features::FRAGMENT_STORES_AND_ATOMICS)) + .shader_tessellation_and_geometry_point_size( + features.contains(Features::SHADER_TESSELLATION_AND_GEOMETRY_POINT_SIZE), + ) + .shader_image_gather_extended(features.contains(Features::SHADER_IMAGE_GATHER_EXTENDED)) + .shader_storage_image_extended_formats( + features.contains(Features::SHADER_STORAGE_IMAGE_EXTENDED_FORMATS), + ) + .shader_storage_image_multisample( + features.contains(Features::SHADER_STORAGE_IMAGE_MULTISAMPLE), + ) + .shader_storage_image_read_without_format( + features.contains(Features::SHADER_STORAGE_IMAGE_READ_WITHOUT_FORMAT), + ) + .shader_storage_image_write_without_format( + features.contains(Features::SHADER_STORAGE_IMAGE_WRITE_WITHOUT_FORMAT), + ) + .shader_uniform_buffer_array_dynamic_indexing( + features.contains(Features::SHADER_UNIFORM_BUFFER_ARRAY_DYNAMIC_INDEXING), + ) + .shader_sampled_image_array_dynamic_indexing( + features.contains(Features::SHADER_SAMPLED_IMAGE_ARRAY_DYNAMIC_INDEXING), + ) + .shader_storage_buffer_array_dynamic_indexing( + features.contains(Features::SHADER_STORAGE_BUFFER_ARRAY_DYNAMIC_INDEXING), + ) + .shader_storage_image_array_dynamic_indexing( + features.contains(Features::SHADER_STORAGE_IMAGE_ARRAY_DYNAMIC_INDEXING), + ) + .shader_clip_distance(features.contains(Features::SHADER_CLIP_DISTANCE)) + .shader_cull_distance(features.contains(Features::SHADER_CULL_DISTANCE)) + .shader_float64(features.contains(Features::SHADER_FLOAT64)) + .shader_int64(features.contains(Features::SHADER_INT64)) + .shader_int16(features.contains(Features::SHADER_INT16)) + .shader_resource_residency(features.contains(Features::SHADER_RESOURCE_RESIDENCY)) + .shader_resource_min_lod(features.contains(Features::SHADER_RESOURCE_MIN_LOD)) + .sparse_binding(features.contains(Features::SPARSE_BINDING)) + .sparse_residency_buffer(features.contains(Features::SPARSE_RESIDENCY_BUFFER)) + .sparse_residency_image2_d(features.contains(Features::SPARSE_RESIDENCY_IMAGE_2D)) + .sparse_residency_image3_d(features.contains(Features::SPARSE_RESIDENCY_IMAGE_3D)) + .sparse_residency2_samples(features.contains(Features::SPARSE_RESIDENCY_2_SAMPLES)) + .sparse_residency4_samples(features.contains(Features::SPARSE_RESIDENCY_4_SAMPLES)) + .sparse_residency8_samples(features.contains(Features::SPARSE_RESIDENCY_8_SAMPLES)) + .sparse_residency16_samples(features.contains(Features::SPARSE_RESIDENCY_16_SAMPLES)) + .sparse_residency_aliased(features.contains(Features::SPARSE_RESIDENCY_ALIASED)) + .variable_multisample_rate(features.contains(Features::VARIABLE_MULTISAMPLE_RATE)) + .inherited_queries(features.contains(Features::INHERITED_QUERIES)) + .build() +} + +pub fn map_memory_ranges<'a, I>(ranges: I) -> SmallVec<[vk::MappedMemoryRange; 4]> +where + I: IntoIterator, + I::Item: Borrow<(&'a n::Memory, Segment)>, +{ + ranges + .into_iter() + .map(|range| { + let &(ref memory, ref segment) = range.borrow(); + vk::MappedMemoryRange { + s_type: vk::StructureType::MAPPED_MEMORY_RANGE, + p_next: ptr::null(), + memory: memory.raw, + offset: segment.offset, + size: segment.size.unwrap_or(vk::WHOLE_SIZE), + } + }) + .collect() +} + +pub fn map_command_buffer_flags(flags: command::CommandBufferFlags) -> vk::CommandBufferUsageFlags { + // Safe due to equivalence of HAL values and Vulkan values + vk::CommandBufferUsageFlags::from_raw(flags.bits()) +} + +pub fn map_command_buffer_level(level: command::Level) -> vk::CommandBufferLevel { + match level { + command::Level::Primary => vk::CommandBufferLevel::PRIMARY, + command::Level::Secondary => vk::CommandBufferLevel::SECONDARY, + } +} + +pub fn map_view_kind( + kind: image::ViewKind, + ty: vk::ImageType, + is_cube: bool, +) -> Option { + use crate::image::ViewKind::*; + use crate::vk::ImageType; + + Some(match (ty, kind) { + (ImageType::TYPE_1D, D1) => vk::ImageViewType::TYPE_1D, + (ImageType::TYPE_1D, D1Array) => vk::ImageViewType::TYPE_1D_ARRAY, + (ImageType::TYPE_2D, D2) => vk::ImageViewType::TYPE_2D, + (ImageType::TYPE_2D, D2Array) => vk::ImageViewType::TYPE_2D_ARRAY, + (ImageType::TYPE_3D, D3) => vk::ImageViewType::TYPE_3D, + (ImageType::TYPE_2D, Cube) if is_cube => vk::ImageViewType::CUBE, + (ImageType::TYPE_2D, CubeArray) if is_cube => vk::ImageViewType::CUBE_ARRAY, + (ImageType::TYPE_3D, Cube) if is_cube => vk::ImageViewType::CUBE, + (ImageType::TYPE_3D, CubeArray) if is_cube => vk::ImageViewType::CUBE_ARRAY, + _ => return None, + }) +} + +pub fn map_rect(rect: &pso::Rect) -> vk::Rect2D { + vk::Rect2D { + offset: vk::Offset2D { + x: rect.x as _, + y: rect.y as _, + }, + extent: vk::Extent2D { + width: rect.w as _, + height: rect.h as _, + }, + } +} + +pub fn map_clear_rect(rect: &pso::ClearRect) -> vk::ClearRect { + vk::ClearRect { + base_array_layer: rect.layers.start as _, + layer_count: (rect.layers.end - rect.layers.start) as _, + rect: map_rect(&rect.rect), + } +} + +pub fn map_viewport(vp: &pso::Viewport, flip_y: bool, shift_y: bool) -> vk::Viewport { + vk::Viewport { + x: vp.rect.x as _, + y: if shift_y { + vp.rect.y + vp.rect.h + } else { + vp.rect.y + } as _, + width: vp.rect.w as _, + height: if flip_y { -vp.rect.h } else { vp.rect.h } as _, + min_depth: vp.depth.start, + max_depth: vp.depth.end, + } +} + +pub fn map_view_capabilities(caps: image::ViewCapabilities) -> vk::ImageCreateFlags { + vk::ImageCreateFlags::from_raw(caps.bits()) +} + +pub fn map_present_mode(mode: PresentMode) -> vk::PresentModeKHR { + if mode == PresentMode::IMMEDIATE { + vk::PresentModeKHR::IMMEDIATE + } else if mode == PresentMode::MAILBOX { + vk::PresentModeKHR::MAILBOX + } else if mode == PresentMode::FIFO { + vk::PresentModeKHR::FIFO + } else if mode == PresentMode::RELAXED { + vk::PresentModeKHR::FIFO_RELAXED + } else { + panic!("Unexpected present mode {:?}", mode) + } +} + +pub fn map_vk_present_mode(mode: vk::PresentModeKHR) -> PresentMode { + if mode == vk::PresentModeKHR::IMMEDIATE { + PresentMode::IMMEDIATE + } else if mode == vk::PresentModeKHR::MAILBOX { + PresentMode::MAILBOX + } else if mode == vk::PresentModeKHR::FIFO { + PresentMode::FIFO + } else if mode == vk::PresentModeKHR::FIFO_RELAXED { + PresentMode::RELAXED + } else { + warn!("Unrecognized present mode {:?}", mode); + PresentMode::IMMEDIATE + } +} + +pub fn map_composite_alpha_mode( + composite_alpha_mode: CompositeAlphaMode, +) -> vk::CompositeAlphaFlagsKHR { + vk::CompositeAlphaFlagsKHR::from_raw(composite_alpha_mode.bits()) +} + +pub fn map_vk_composite_alpha(composite_alpha: vk::CompositeAlphaFlagsKHR) -> CompositeAlphaMode { + CompositeAlphaMode::from_bits_truncate(composite_alpha.as_raw()) +} + +pub fn map_descriptor_pool_create_flags( + flags: pso::DescriptorPoolCreateFlags, +) -> vk::DescriptorPoolCreateFlags { + vk::DescriptorPoolCreateFlags::from_raw(flags.bits()) +} diff --git a/third_party/rust/gfx-backend-vulkan/src/device.rs b/third_party/rust/gfx-backend-vulkan/src/device.rs index 87779c4b432d..cd984baa7132 100644 --- a/third_party/rust/gfx-backend-vulkan/src/device.rs +++ b/third_party/rust/gfx-backend-vulkan/src/device.rs @@ -1,2308 +1,2303 @@ -use arrayvec::ArrayVec; -use ash::extensions::khr; -use ash::version::DeviceV1_0; -use ash::vk; -use ash::vk::Handle; -use smallvec::SmallVec; - -use hal::{ - memory::Requirements, - pool::CommandPoolCreateFlags, - pso::VertexInputRate, - range::RangeArg, - window::SwapchainConfig, - {buffer, device as d, format, image, pass, pso, query, queue}, - {Features, MemoryTypeId}, -}; - -use std::borrow::Borrow; -use std::ffi::CString; -use std::ops::Range; -use std::pin::Pin; -use std::sync::Arc; -use std::{mem, ptr}; - -use crate::pool::RawCommandPool; -use crate::{conv, native as n, window as w, command as cmd}; -use crate::{Backend as B, DebugMessenger, Device}; - -#[derive(Debug, Default)] -struct GraphicsPipelineInfoBuf { - // 10 is the max amount of dynamic states - dynamic_states: ArrayVec<[vk::DynamicState; 10]>, - - // 5 is the amount of stages - c_strings: ArrayVec<[CString; 5]>, - stages: ArrayVec<[vk::PipelineShaderStageCreateInfo; 5]>, - specializations: ArrayVec<[vk::SpecializationInfo; 5]>, - specialization_entries: ArrayVec<[SmallVec<[vk::SpecializationMapEntry; 4]>; 5]>, - - vertex_bindings: Vec, - vertex_attributes: Vec, - blend_states: Vec, - - sample_mask: [u32; 2], - vertex_input_state: vk::PipelineVertexInputStateCreateInfo, - input_assembly_state: vk::PipelineInputAssemblyStateCreateInfo, - tessellation_state: Option, - viewport_state: vk::PipelineViewportStateCreateInfo, - rasterization_state: vk::PipelineRasterizationStateCreateInfo, - multisample_state: vk::PipelineMultisampleStateCreateInfo, - depth_stencil_state: vk::PipelineDepthStencilStateCreateInfo, - color_blend_state: vk::PipelineColorBlendStateCreateInfo, - pipeline_dynamic_state: vk::PipelineDynamicStateCreateInfo, - viewport: vk::Viewport, - scissor: vk::Rect2D, -} -impl GraphicsPipelineInfoBuf { - unsafe fn add_stage<'a>(&mut self, stage: vk::ShaderStageFlags, source: &pso::EntryPoint<'a, B>) { - let string = CString::new(source.entry).unwrap(); - let p_name = string.as_ptr(); - self.c_strings.push(string); - - self.specialization_entries.push( - source - .specialization - .constants - .iter() - .map(|c| vk::SpecializationMapEntry { - constant_id: c.id, - offset: c.range.start as _, - size: (c.range.end - c.range.start) as _, - }) - .collect(), - ); - let map_entries = self.specialization_entries.last().unwrap(); - - self.specializations.push(vk::SpecializationInfo { - map_entry_count: map_entries.len() as _, - p_map_entries: map_entries.as_ptr(), - data_size: source.specialization.data.len() as _, - p_data: source.specialization.data.as_ptr() as _, - }); - - self.stages.push(vk::PipelineShaderStageCreateInfo { - s_type: vk::StructureType::PIPELINE_SHADER_STAGE_CREATE_INFO, - p_next: ptr::null(), - flags: vk::PipelineShaderStageCreateFlags::empty(), - stage, - module: source.module.raw, - p_name, - p_specialization_info: self.specializations.last().unwrap(), - }) - } - - unsafe fn initialize<'a>( - this: &mut Pin<&mut Self>, - device: &Device, - desc: &pso::GraphicsPipelineDesc<'a, B>, - ) { - let mut this = Pin::get_mut(this.as_mut()); // use into_inner when it gets stable - - // Vertex stage - // vertex shader is required - this.add_stage(vk::ShaderStageFlags::VERTEX, &desc.shaders.vertex); - // Pixel stage - if let Some(ref entry) = desc.shaders.fragment { - this.add_stage(vk::ShaderStageFlags::FRAGMENT, entry); - } - // Geometry stage - if let Some(ref entry) = desc.shaders.geometry { - this.add_stage(vk::ShaderStageFlags::GEOMETRY, entry); - } - // Domain stage - if let Some(ref entry) = desc.shaders.domain { - this.add_stage(vk::ShaderStageFlags::TESSELLATION_EVALUATION, entry); - } - // Hull stage - if let Some(ref entry) = desc.shaders.hull { - this.add_stage(vk::ShaderStageFlags::TESSELLATION_CONTROL, entry); - } - - this.vertex_bindings = desc.vertex_buffers.iter().map(|vbuf| { - vk::VertexInputBindingDescription { - binding: vbuf.binding, - stride: vbuf.stride as u32, - input_rate: match vbuf.rate { - VertexInputRate::Vertex => vk::VertexInputRate::VERTEX, - VertexInputRate::Instance(divisor) => { - debug_assert_eq!(divisor, 1, "Custom vertex rate divisors not supported in Vulkan backend without extension"); - vk::VertexInputRate::INSTANCE - }, - }, - } - }).collect(); - this.vertex_attributes = desc - .attributes - .iter() - .map(|attr| vk::VertexInputAttributeDescription { - location: attr.location as u32, - binding: attr.binding as u32, - format: conv::map_format(attr.element.format), - offset: attr.element.offset as u32, - }) - .collect(); - - this.vertex_input_state = vk::PipelineVertexInputStateCreateInfo { - s_type: vk::StructureType::PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, - p_next: ptr::null(), - flags: vk::PipelineVertexInputStateCreateFlags::empty(), - vertex_binding_description_count: this.vertex_bindings.len() as _, - p_vertex_binding_descriptions: this.vertex_bindings.as_ptr(), - vertex_attribute_description_count: this.vertex_attributes.len() as _, - p_vertex_attribute_descriptions: this.vertex_attributes.as_ptr(), - }; - - this.input_assembly_state = vk::PipelineInputAssemblyStateCreateInfo { - s_type: vk::StructureType::PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, - p_next: ptr::null(), - flags: vk::PipelineInputAssemblyStateCreateFlags::empty(), - topology: conv::map_topology(&desc.input_assembler), - primitive_restart_enable: match desc.input_assembler.restart_index { - Some(_) => vk::TRUE, - None => vk::FALSE, - }, - }; - - let depth_bias = match desc.rasterizer.depth_bias { - Some(pso::State::Static(db)) => db, - Some(pso::State::Dynamic) => { - this.dynamic_states.push(vk::DynamicState::DEPTH_BIAS); - pso::DepthBias::default() - } - None => pso::DepthBias::default(), - }; - - let (polygon_mode, line_width) = match desc.rasterizer.polygon_mode { - pso::PolygonMode::Point => (vk::PolygonMode::POINT, 1.0), - pso::PolygonMode::Line(width) => ( - vk::PolygonMode::LINE, - match width { - pso::State::Static(w) => w, - pso::State::Dynamic => { - this.dynamic_states.push(vk::DynamicState::LINE_WIDTH); - 1.0 - } - }, - ), - pso::PolygonMode::Fill => (vk::PolygonMode::FILL, 1.0), - }; - - this.rasterization_state = vk::PipelineRasterizationStateCreateInfo { - s_type: vk::StructureType::PIPELINE_RASTERIZATION_STATE_CREATE_INFO, - p_next: ptr::null(), - flags: vk::PipelineRasterizationStateCreateFlags::empty(), - depth_clamp_enable: if desc.rasterizer.depth_clamping { - if device.raw.1.contains(Features::DEPTH_CLAMP) { - vk::TRUE - } else { - warn!("Depth clamping was requested on a device with disabled feature"); - vk::FALSE - } - } else { - vk::FALSE - }, - rasterizer_discard_enable: if desc.shaders.fragment.is_none() - && desc.depth_stencil.depth.is_none() - && desc.depth_stencil.stencil.is_none() - { - vk::TRUE - } else { - vk::FALSE - }, - polygon_mode, - cull_mode: conv::map_cull_face(desc.rasterizer.cull_face), - front_face: conv::map_front_face(desc.rasterizer.front_face), - depth_bias_enable: if desc.rasterizer.depth_bias.is_some() { - vk::TRUE - } else { - vk::FALSE - }, - depth_bias_constant_factor: depth_bias.const_factor, - depth_bias_clamp: depth_bias.clamp, - depth_bias_slope_factor: depth_bias.slope_factor, - line_width, - }; - - this.tessellation_state = { - if let pso::Primitive::PatchList(patch_control_points) = desc.input_assembler.primitive { - Some(vk::PipelineTessellationStateCreateInfo { - s_type: vk::StructureType::PIPELINE_TESSELLATION_STATE_CREATE_INFO, - p_next: ptr::null(), - flags: vk::PipelineTessellationStateCreateFlags::empty(), - patch_control_points: patch_control_points as _, - }) - } else { - None - } - }; - - this.viewport_state = vk::PipelineViewportStateCreateInfo { - s_type: vk::StructureType::PIPELINE_VIEWPORT_STATE_CREATE_INFO, - p_next: ptr::null(), - flags: vk::PipelineViewportStateCreateFlags::empty(), - scissor_count: 1, // TODO - p_scissors: match desc.baked_states.scissor { - Some(ref rect) => { - this.scissor = conv::map_rect(rect); - &this.scissor - } - None => { - this.dynamic_states.push(vk::DynamicState::SCISSOR); - ptr::null() - } - }, - viewport_count: 1, // TODO - p_viewports: match desc.baked_states.viewport { - Some(ref vp) => { - this.viewport = conv::map_viewport(vp); - &this.viewport - } - None => { - this.dynamic_states.push(vk::DynamicState::VIEWPORT); - ptr::null() - } - }, - }; - - this.multisample_state = match desc.multisampling { - Some(ref ms) => { - this.sample_mask = [ - (ms.sample_mask & 0xFFFFFFFF) as u32, - ((ms.sample_mask >> 32) & 0xFFFFFFFF) as u32, - ]; - vk::PipelineMultisampleStateCreateInfo { - s_type: vk::StructureType::PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, - p_next: ptr::null(), - flags: vk::PipelineMultisampleStateCreateFlags::empty(), - rasterization_samples: vk::SampleCountFlags::from_raw( - (ms.rasterization_samples as u32) & vk::SampleCountFlags::all().as_raw(), - ), - sample_shading_enable: ms.sample_shading.is_some() as _, - min_sample_shading: ms.sample_shading.unwrap_or(0.0), - p_sample_mask: &this.sample_mask as _, - alpha_to_coverage_enable: ms.alpha_coverage as _, - alpha_to_one_enable: ms.alpha_to_one as _, - } - } - None => vk::PipelineMultisampleStateCreateInfo { - s_type: vk::StructureType::PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, - p_next: ptr::null(), - flags: vk::PipelineMultisampleStateCreateFlags::empty(), - rasterization_samples: vk::SampleCountFlags::TYPE_1, - sample_shading_enable: vk::FALSE, - min_sample_shading: 0.0, - p_sample_mask: ptr::null(), - alpha_to_coverage_enable: vk::FALSE, - alpha_to_one_enable: vk::FALSE, - }, - }; - - let depth_stencil = desc.depth_stencil; - let (depth_test_enable, depth_write_enable, depth_compare_op) = match depth_stencil.depth { - Some(ref depth) => (vk::TRUE, depth.write as _, conv::map_comparison(depth.fun)), - None => (vk::FALSE, vk::FALSE, vk::CompareOp::NEVER), - }; - let (stencil_test_enable, front, back) = match depth_stencil.stencil { - Some(ref stencil) => { - let mut front = conv::map_stencil_side(&stencil.faces.front); - let mut back = conv::map_stencil_side(&stencil.faces.back); - match stencil.read_masks { - pso::State::Static(ref sides) => { - front.compare_mask = sides.front; - back.compare_mask = sides.back; - } - pso::State::Dynamic => { - this.dynamic_states - .push(vk::DynamicState::STENCIL_COMPARE_MASK); - } - } - match stencil.write_masks { - pso::State::Static(ref sides) => { - front.write_mask = sides.front; - back.write_mask = sides.back; - } - pso::State::Dynamic => { - this.dynamic_states - .push(vk::DynamicState::STENCIL_WRITE_MASK); - } - } - match stencil.reference_values { - pso::State::Static(ref sides) => { - front.reference = sides.front; - back.reference = sides.back; - } - pso::State::Dynamic => { - this.dynamic_states - .push(vk::DynamicState::STENCIL_REFERENCE); - } - } - (vk::TRUE, front, back) - } - None => mem::zeroed(), - }; - let (min_depth_bounds, max_depth_bounds) = match desc.baked_states.depth_bounds { - Some(ref range) => (range.start, range.end), - None => { - this.dynamic_states.push(vk::DynamicState::DEPTH_BOUNDS); - (0.0, 1.0) - } - }; - - this.depth_stencil_state = vk::PipelineDepthStencilStateCreateInfo { - s_type: vk::StructureType::PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, - p_next: ptr::null(), - flags: vk::PipelineDepthStencilStateCreateFlags::empty(), - depth_test_enable, - depth_write_enable, - depth_compare_op, - depth_bounds_test_enable: depth_stencil.depth_bounds as _, - stencil_test_enable, - front, - back, - min_depth_bounds, - max_depth_bounds, - }; - - this.blend_states = desc - .blender - .targets - .iter() - .map(|color_desc| { - let color_write_mask = - vk::ColorComponentFlags::from_raw(color_desc.mask.bits() as _); - match color_desc.blend { - Some(ref bs) => { - let (color_blend_op, src_color_blend_factor, dst_color_blend_factor) = - conv::map_blend_op(bs.color); - let (alpha_blend_op, src_alpha_blend_factor, dst_alpha_blend_factor) = - conv::map_blend_op(bs.alpha); - vk::PipelineColorBlendAttachmentState { - color_write_mask, - blend_enable: vk::TRUE, - src_color_blend_factor, - dst_color_blend_factor, - color_blend_op, - src_alpha_blend_factor, - dst_alpha_blend_factor, - alpha_blend_op, - } - } - None => vk::PipelineColorBlendAttachmentState { - color_write_mask, - ..mem::zeroed() - }, - } - }) - .collect(); - - this.color_blend_state = vk::PipelineColorBlendStateCreateInfo { - s_type: vk::StructureType::PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, - p_next: ptr::null(), - flags: vk::PipelineColorBlendStateCreateFlags::empty(), - logic_op_enable: vk::FALSE, // TODO - logic_op: vk::LogicOp::CLEAR, - attachment_count: this.blend_states.len() as _, - p_attachments: this.blend_states.as_ptr(), // TODO: - blend_constants: match desc.baked_states.blend_color { - Some(value) => value, - None => { - this.dynamic_states.push(vk::DynamicState::BLEND_CONSTANTS); - [0.0; 4] - } - }, - }; - - this.pipeline_dynamic_state = vk::PipelineDynamicStateCreateInfo { - s_type: vk::StructureType::PIPELINE_DYNAMIC_STATE_CREATE_INFO, - p_next: ptr::null(), - flags: vk::PipelineDynamicStateCreateFlags::empty(), - dynamic_state_count: this.dynamic_states.len() as _, - p_dynamic_states: this.dynamic_states.as_ptr(), - }; - } -} - -#[derive(Debug, Default)] -struct ComputePipelineInfoBuf { - c_string: CString, - specialization: vk::SpecializationInfo, - entries: SmallVec<[vk::SpecializationMapEntry; 4]>, -} -impl ComputePipelineInfoBuf { - unsafe fn initialize<'a>( - this: &mut Pin<&mut Self>, - desc: &pso::ComputePipelineDesc<'a, B>, - ) { - let mut this = Pin::get_mut(this.as_mut()); // use into_inner when it gets stable - - this.c_string = CString::new(desc.shader.entry).unwrap(); - this.entries = desc - .shader - .specialization - .constants - .iter() - .map(|c| vk::SpecializationMapEntry { - constant_id: c.id, - offset: c.range.start as _, - size: (c.range.end - c.range.start) as _, - }) - .collect(); - this.specialization = vk::SpecializationInfo { - map_entry_count: this.entries.len() as _, - p_map_entries: this.entries.as_ptr(), - data_size: desc.shader.specialization.data.len() as _, - p_data: desc.shader.specialization.data.as_ptr() as _, - }; - } -} - -impl d::Device for Device { - unsafe fn allocate_memory( - &self, - mem_type: MemoryTypeId, - size: u64, - ) -> Result { - let info = vk::MemoryAllocateInfo { - s_type: vk::StructureType::MEMORY_ALLOCATE_INFO, - p_next: ptr::null(), - allocation_size: size, - memory_type_index: mem_type.0 as _, - }; - - let result = self.raw.0.allocate_memory(&info, None); - - match result { - Ok(memory) => Ok(n::Memory { raw: memory }), - Err(vk::Result::ERROR_TOO_MANY_OBJECTS) => Err(d::AllocationError::TooManyObjects), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), - _ => unreachable!(), - } - } - - unsafe fn create_command_pool( - &self, - family: queue::QueueFamilyId, - create_flags: CommandPoolCreateFlags, - ) -> Result { - let mut flags = vk::CommandPoolCreateFlags::empty(); - if create_flags.contains(CommandPoolCreateFlags::TRANSIENT) { - flags |= vk::CommandPoolCreateFlags::TRANSIENT; - } - if create_flags.contains(CommandPoolCreateFlags::RESET_INDIVIDUAL) { - flags |= vk::CommandPoolCreateFlags::RESET_COMMAND_BUFFER; - } - - let info = vk::CommandPoolCreateInfo { - s_type: vk::StructureType::COMMAND_POOL_CREATE_INFO, - p_next: ptr::null(), - flags, - queue_family_index: family.0 as _, - }; - - let result = self.raw.0.create_command_pool(&info, None); - - match result { - Ok(pool) => Ok(RawCommandPool { - raw: pool, - device: self.raw.clone(), - }), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device), - _ => unreachable!(), - } - } - - unsafe fn destroy_command_pool(&self, pool: RawCommandPool) { - self.raw.0.destroy_command_pool(pool.raw, None); - } - - unsafe fn create_render_pass<'a, IA, IS, ID>( - &self, - attachments: IA, - subpasses: IS, - dependencies: ID, - ) -> Result - where - IA: IntoIterator, - IA::Item: Borrow, - IS: IntoIterator, - IS::Item: Borrow>, - ID: IntoIterator, - ID::Item: Borrow, - { - let map_subpass_ref = |pass: pass::SubpassRef| match pass { - pass::SubpassRef::External => vk::SUBPASS_EXTERNAL, - pass::SubpassRef::Pass(id) => id as u32, - }; - - let attachments = attachments - .into_iter() - .map(|attachment| { - let attachment = attachment.borrow(); - vk::AttachmentDescription { - flags: vk::AttachmentDescriptionFlags::empty(), // TODO: may even alias! - format: attachment - .format - .map_or(vk::Format::UNDEFINED, conv::map_format), - samples: vk::SampleCountFlags::from_raw( - (attachment.samples as u32) & vk::SampleCountFlags::all().as_raw(), - ), - load_op: conv::map_attachment_load_op(attachment.ops.load), - store_op: conv::map_attachment_store_op(attachment.ops.store), - stencil_load_op: conv::map_attachment_load_op(attachment.stencil_ops.load), - stencil_store_op: conv::map_attachment_store_op(attachment.stencil_ops.store), - initial_layout: conv::map_image_layout(attachment.layouts.start), - final_layout: conv::map_image_layout(attachment.layouts.end), - } - }) - .collect::>(); - - let clear_attachments_mask = attachments - .iter() - .enumerate() - .filter_map(|(i, at)| { - if at.load_op == vk::AttachmentLoadOp::CLEAR - || at.stencil_load_op == vk::AttachmentLoadOp::CLEAR - { - Some(1 << i as u64) - } else { - None - } - }) - .sum(); - - let attachment_refs = subpasses - .into_iter() - .map(|subpass| { - let subpass = subpass.borrow(); - fn make_ref(&(id, layout): &pass::AttachmentRef) -> vk::AttachmentReference { - vk::AttachmentReference { - attachment: id as _, - layout: conv::map_image_layout(layout), - } - } - let colors = subpass.colors.iter().map(make_ref).collect::>(); - let depth_stencil = subpass.depth_stencil.map(make_ref); - let inputs = subpass.inputs.iter().map(make_ref).collect::>(); - let preserves = subpass - .preserves - .iter() - .map(|&id| id as u32) - .collect::>(); - let resolves = subpass.resolves.iter().map(make_ref).collect::>(); - - (colors, depth_stencil, inputs, preserves, resolves) - }) - .collect::>(); - - let subpasses = attachment_refs - .iter() - .map( - |(colors, depth_stencil, inputs, preserves, resolves)| vk::SubpassDescription { - flags: vk::SubpassDescriptionFlags::empty(), - pipeline_bind_point: vk::PipelineBindPoint::GRAPHICS, - input_attachment_count: inputs.len() as u32, - p_input_attachments: inputs.as_ptr(), - color_attachment_count: colors.len() as u32, - p_color_attachments: colors.as_ptr(), - p_resolve_attachments: if resolves.is_empty() { - ptr::null() - } else { - resolves.as_ptr() - }, - p_depth_stencil_attachment: match depth_stencil { - Some(ref aref) => aref as *const _, - None => ptr::null(), - }, - preserve_attachment_count: preserves.len() as u32, - p_preserve_attachments: preserves.as_ptr(), - }, - ) - .collect::>(); - - let dependencies = dependencies - .into_iter() - .map(|subpass_dep| { - let sdep = subpass_dep.borrow(); - // TODO: checks - vk::SubpassDependency { - src_subpass: map_subpass_ref(sdep.passes.start), - dst_subpass: map_subpass_ref(sdep.passes.end), - src_stage_mask: conv::map_pipeline_stage(sdep.stages.start), - dst_stage_mask: conv::map_pipeline_stage(sdep.stages.end), - src_access_mask: conv::map_image_access(sdep.accesses.start), - dst_access_mask: conv::map_image_access(sdep.accesses.end), - dependency_flags: mem::transmute(sdep.flags), - } - }) - .collect::>(); - - let info = vk::RenderPassCreateInfo { - s_type: vk::StructureType::RENDER_PASS_CREATE_INFO, - p_next: ptr::null(), - flags: vk::RenderPassCreateFlags::empty(), - attachment_count: attachments.len() as u32, - p_attachments: attachments.as_ptr(), - subpass_count: subpasses.len() as u32, - p_subpasses: subpasses.as_ptr(), - dependency_count: dependencies.len() as u32, - p_dependencies: dependencies.as_ptr(), - }; - - let result = self.raw.0.create_render_pass(&info, None); - - match result { - Ok(renderpass) => Ok(n::RenderPass { - raw: renderpass, - clear_attachments_mask, - }), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device), - _ => unreachable!(), - } - } - - unsafe fn create_pipeline_layout( - &self, - sets: IS, - push_constant_ranges: IR, - ) -> Result - where - IS: IntoIterator, - IS::Item: Borrow, - IR: IntoIterator, - IR::Item: Borrow<(pso::ShaderStageFlags, Range)>, - { - let set_layouts = sets - .into_iter() - .map(|set| set.borrow().raw) - .collect::>(); - - debug!("create_pipeline_layout {:?}", set_layouts); - - let push_constant_ranges = push_constant_ranges - .into_iter() - .map(|range| { - let &(s, ref r) = range.borrow(); - vk::PushConstantRange { - stage_flags: conv::map_stage_flags(s), - offset: r.start, - size: r.end - r.start, - } - }) - .collect::>(); - - let info = vk::PipelineLayoutCreateInfo { - s_type: vk::StructureType::PIPELINE_LAYOUT_CREATE_INFO, - p_next: ptr::null(), - flags: vk::PipelineLayoutCreateFlags::empty(), - set_layout_count: set_layouts.len() as u32, - p_set_layouts: set_layouts.as_ptr(), - push_constant_range_count: push_constant_ranges.len() as u32, - p_push_constant_ranges: push_constant_ranges.as_ptr(), - }; - - let result = self.raw.0.create_pipeline_layout(&info, None); - - match result { - Ok(raw) => Ok(n::PipelineLayout { raw }), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device), - _ => unreachable!(), - } - } - - unsafe fn create_pipeline_cache( - &self, - data: Option<&[u8]>, - ) -> Result { - let (data_len, data) = if let Some(d) = data { - (d.len(), d.as_ptr()) - } else { - (0_usize, ptr::null()) - }; - - let info = vk::PipelineCacheCreateInfo { - s_type: vk::StructureType::PIPELINE_CACHE_CREATE_INFO, - p_next: ptr::null(), - flags: vk::PipelineCacheCreateFlags::empty(), - initial_data_size: data_len, - p_initial_data: data as _, - }; - - let result = self.raw.0.create_pipeline_cache(&info, None); - - match result { - Ok(raw) => Ok(n::PipelineCache { raw }), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device), - _ => unreachable!(), - } - } - - unsafe fn get_pipeline_cache_data( - &self, - cache: &n::PipelineCache, - ) -> Result, d::OutOfMemory> { - let result = self.raw.0.get_pipeline_cache_data(cache.raw); - - match result { - Ok(data) => Ok(data), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device), - _ => unreachable!(), - } - } - - unsafe fn destroy_pipeline_cache(&self, cache: n::PipelineCache) { - self.raw.0.destroy_pipeline_cache(cache.raw, None); - } - - unsafe fn merge_pipeline_caches( - &self, - target: &n::PipelineCache, - sources: I, - ) -> Result<(), d::OutOfMemory> - where - I: IntoIterator, - I::Item: Borrow, - { - let caches = sources - .into_iter() - .map(|s| s.borrow().raw) - .collect::>(); - let result = self.raw.0.fp_v1_0().merge_pipeline_caches( - self.raw.0.handle(), - target.raw, - caches.len() as u32, - caches.as_ptr(), - ); - - match result { - vk::Result::SUCCESS => Ok(()), - vk::Result::ERROR_OUT_OF_HOST_MEMORY => Err(d::OutOfMemory::Host), - vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => Err(d::OutOfMemory::Device), - _ => unreachable!(), - } - } - - unsafe fn create_graphics_pipeline<'a>( - &self, - desc: &pso::GraphicsPipelineDesc<'a, B>, - cache: Option<&n::PipelineCache>, - ) -> Result { - debug!("create_graphics_pipeline {:?}", desc); - - let mut buf = GraphicsPipelineInfoBuf::default(); - let mut buf = Pin::new(&mut buf); - GraphicsPipelineInfoBuf::initialize(&mut buf, self, desc); - - let info = { - let (base_handle, base_index) = match desc.parent { - pso::BasePipeline::Pipeline(pipeline) => (pipeline.0, -1), - pso::BasePipeline::Index(index) => (vk::Pipeline::null(), index as _), - pso::BasePipeline::None => (vk::Pipeline::null(), -1), - }; - - let mut flags = vk::PipelineCreateFlags::empty(); - match desc.parent { - pso::BasePipeline::None => (), - _ => { - flags |= vk::PipelineCreateFlags::DERIVATIVE; - } - } - if desc - .flags - .contains(pso::PipelineCreationFlags::DISABLE_OPTIMIZATION) - { - flags |= vk::PipelineCreateFlags::DISABLE_OPTIMIZATION; - } - if desc - .flags - .contains(pso::PipelineCreationFlags::ALLOW_DERIVATIVES) - { - flags |= vk::PipelineCreateFlags::ALLOW_DERIVATIVES; - } - - vk::GraphicsPipelineCreateInfo { - s_type: vk::StructureType::GRAPHICS_PIPELINE_CREATE_INFO, - p_next: ptr::null(), - flags, - stage_count: buf.stages.len() as _, - p_stages: buf.stages.as_ptr(), - p_vertex_input_state: &buf.vertex_input_state, - p_input_assembly_state: &buf.input_assembly_state, - p_rasterization_state: &buf.rasterization_state, - p_tessellation_state: match buf.tessellation_state.as_ref() { - Some(t) => t as _, - None => ptr::null(), - }, - p_viewport_state: &buf.viewport_state, - p_multisample_state: &buf.multisample_state, - p_depth_stencil_state: &buf.depth_stencil_state, - p_color_blend_state: &buf.color_blend_state, - p_dynamic_state: &buf.pipeline_dynamic_state, - layout: desc.layout.raw, - render_pass: desc.subpass.main_pass.raw, - subpass: desc.subpass.index as _, - base_pipeline_handle: base_handle, - base_pipeline_index: base_index, - } - }; - - let mut pipeline = vk::Pipeline::null(); - - match self.raw.0.fp_v1_0().create_graphics_pipelines( - self.raw.0.handle(), - cache.map_or(vk::PipelineCache::null(), |cache| cache.raw), - 1, - &info, - ptr::null(), - &mut pipeline, - ) { - vk::Result::SUCCESS => Ok(n::GraphicsPipeline(pipeline)), - vk::Result::ERROR_OUT_OF_HOST_MEMORY => Err(d::OutOfMemory::Host.into()), - vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => Err(d::OutOfMemory::Device.into()), - _ => Err(pso::CreationError::Other), - } - } - - unsafe fn create_graphics_pipelines<'a, T>( - &self, - descs: T, - cache: Option<&n::PipelineCache>, - ) -> Vec> - where - T: IntoIterator, - T::Item: Borrow>, - { - debug!("create_graphics_pipelines:"); - - let mut bufs: Pin> = descs - .into_iter() - .enumerate() - .inspect(|(idx, desc)| debug!("# {} {:?}", idx, desc.borrow())) - .map(|(_, desc)| (desc, GraphicsPipelineInfoBuf::default())) - .collect::>() - .into(); - - for (desc, buf) in bufs.as_mut().get_unchecked_mut() { - let desc: &T::Item = desc; - GraphicsPipelineInfoBuf::initialize(&mut Pin::new_unchecked(buf), self, desc.borrow()); - } - - let infos: Vec<_> = bufs - .iter() - .map(|(desc, buf)| { - let desc = desc.borrow(); - - let (base_handle, base_index) = match desc.parent { - pso::BasePipeline::Pipeline(pipeline) => (pipeline.0, -1), - pso::BasePipeline::Index(index) => (vk::Pipeline::null(), index as _), - pso::BasePipeline::None => (vk::Pipeline::null(), -1), - }; - - let mut flags = vk::PipelineCreateFlags::empty(); - match desc.parent { - pso::BasePipeline::None => (), - _ => { - flags |= vk::PipelineCreateFlags::DERIVATIVE; - } - } - if desc - .flags - .contains(pso::PipelineCreationFlags::DISABLE_OPTIMIZATION) - { - flags |= vk::PipelineCreateFlags::DISABLE_OPTIMIZATION; - } - if desc - .flags - .contains(pso::PipelineCreationFlags::ALLOW_DERIVATIVES) - { - flags |= vk::PipelineCreateFlags::ALLOW_DERIVATIVES; - } - - vk::GraphicsPipelineCreateInfo { - s_type: vk::StructureType::GRAPHICS_PIPELINE_CREATE_INFO, - p_next: ptr::null(), - flags, - stage_count: buf.stages.len() as _, - p_stages: buf.stages.as_ptr(), - p_vertex_input_state: &buf.vertex_input_state, - p_input_assembly_state: &buf.input_assembly_state, - p_rasterization_state: &buf.rasterization_state, - p_tessellation_state: match buf.tessellation_state.as_ref() { - Some(t) => t as _, - None => ptr::null(), - }, - p_viewport_state: &buf.viewport_state, - p_multisample_state: &buf.multisample_state, - p_depth_stencil_state: &buf.depth_stencil_state, - p_color_blend_state: &buf.color_blend_state, - p_dynamic_state: &buf.pipeline_dynamic_state, - layout: desc.layout.raw, - render_pass: desc.subpass.main_pass.raw, - subpass: desc.subpass.index as _, - base_pipeline_handle: base_handle, - base_pipeline_index: base_index, - } - }) - .collect(); - - let (pipelines, error) = if infos.is_empty() { - (Vec::new(), None) - } else { - match self.raw.0.create_graphics_pipelines( - cache.map_or(vk::PipelineCache::null(), |cache| cache.raw), - &infos, - None, - ) { - Ok(pipelines) => (pipelines, None), - Err((pipelines, error)) => (pipelines, Some(error)), - } - }; - - pipelines - .into_iter() - .map(|pso| { - if pso == vk::Pipeline::null() { - match error { - Some(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => { - Err(d::OutOfMemory::Host.into()) - } - Some(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => { - Err(d::OutOfMemory::Device.into()) - } - _ => unreachable!(), - } - } else { - Ok(n::GraphicsPipeline(pso)) - } - }) - .collect() - } - - unsafe fn create_compute_pipeline<'a>( - &self, - desc: &pso::ComputePipelineDesc<'a, B>, - cache: Option<&n::PipelineCache>, - ) -> Result { - let mut buf = ComputePipelineInfoBuf::default(); - let mut buf = Pin::new(&mut buf); - ComputePipelineInfoBuf::initialize(&mut buf, desc); - - let info = { - let stage = vk::PipelineShaderStageCreateInfo { - s_type: vk::StructureType::PIPELINE_SHADER_STAGE_CREATE_INFO, - p_next: ptr::null(), - flags: vk::PipelineShaderStageCreateFlags::empty(), - stage: vk::ShaderStageFlags::COMPUTE, - module: desc.shader.module.raw, - p_name: buf.c_string.as_ptr(), - p_specialization_info: &buf.specialization, - }; - - let (base_handle, base_index) = match desc.parent { - pso::BasePipeline::Pipeline(pipeline) => (pipeline.0, -1), - pso::BasePipeline::Index(index) => (vk::Pipeline::null(), index as _), - pso::BasePipeline::None => (vk::Pipeline::null(), -1), - }; - - let mut flags = vk::PipelineCreateFlags::empty(); - match desc.parent { - pso::BasePipeline::None => (), - _ => { - flags |= vk::PipelineCreateFlags::DERIVATIVE; - } - } - if desc - .flags - .contains(pso::PipelineCreationFlags::DISABLE_OPTIMIZATION) - { - flags |= vk::PipelineCreateFlags::DISABLE_OPTIMIZATION; - } - if desc - .flags - .contains(pso::PipelineCreationFlags::ALLOW_DERIVATIVES) - { - flags |= vk::PipelineCreateFlags::ALLOW_DERIVATIVES; - } - - vk::ComputePipelineCreateInfo { - s_type: vk::StructureType::COMPUTE_PIPELINE_CREATE_INFO, - p_next: ptr::null(), - flags, - stage, - layout: desc.layout.raw, - base_pipeline_handle: base_handle, - base_pipeline_index: base_index, - } - }; - - let mut pipeline = vk::Pipeline::null(); - - match self.raw.0.fp_v1_0().create_compute_pipelines( - self.raw.0.handle(), - cache.map_or(vk::PipelineCache::null(), |cache| cache.raw), - 1, - &info, - ptr::null(), - &mut pipeline, - ) { - vk::Result::SUCCESS => Ok(n::ComputePipeline(pipeline)), - vk::Result::ERROR_OUT_OF_HOST_MEMORY => Err(d::OutOfMemory::Host.into()), - vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => Err(d::OutOfMemory::Device.into()), - _ => Err(pso::CreationError::Other), - } - } - - unsafe fn create_compute_pipelines<'a, T>( - &self, - descs: T, - cache: Option<&n::PipelineCache>, - ) -> Vec> - where - T: IntoIterator, - T::Item: Borrow>, - { - let mut bufs: Pin> = descs - .into_iter() - .map(|desc| (desc, ComputePipelineInfoBuf::default())) - .collect::>() - .into(); - - for (desc, buf) in bufs.as_mut().get_unchecked_mut() { - let desc: &T::Item = desc; - ComputePipelineInfoBuf::initialize(&mut Pin::new_unchecked(buf), desc.borrow()); - } - - let infos: Vec<_> = bufs - .iter() - .map(|(desc, buf)| { - let desc = desc.borrow(); - - let stage = vk::PipelineShaderStageCreateInfo { - s_type: vk::StructureType::PIPELINE_SHADER_STAGE_CREATE_INFO, - p_next: ptr::null(), - flags: vk::PipelineShaderStageCreateFlags::empty(), - stage: vk::ShaderStageFlags::COMPUTE, - module: desc.shader.module.raw, - p_name: buf.c_string.as_ptr(), - p_specialization_info: &buf.specialization, - }; - - let (base_handle, base_index) = match desc.parent { - pso::BasePipeline::Pipeline(pipeline) => (pipeline.0, -1), - pso::BasePipeline::Index(index) => (vk::Pipeline::null(), index as _), - pso::BasePipeline::None => (vk::Pipeline::null(), -1), - }; - - let mut flags = vk::PipelineCreateFlags::empty(); - match desc.parent { - pso::BasePipeline::None => (), - _ => { - flags |= vk::PipelineCreateFlags::DERIVATIVE; - } - } - if desc - .flags - .contains(pso::PipelineCreationFlags::DISABLE_OPTIMIZATION) - { - flags |= vk::PipelineCreateFlags::DISABLE_OPTIMIZATION; - } - if desc - .flags - .contains(pso::PipelineCreationFlags::ALLOW_DERIVATIVES) - { - flags |= vk::PipelineCreateFlags::ALLOW_DERIVATIVES; - } - - vk::ComputePipelineCreateInfo { - s_type: vk::StructureType::COMPUTE_PIPELINE_CREATE_INFO, - p_next: ptr::null(), - flags, - stage, - layout: desc.layout.raw, - base_pipeline_handle: base_handle, - base_pipeline_index: base_index, - } - }) - .collect(); - - let (pipelines, error) = if infos.is_empty() { - (Vec::new(), None) - } else { - match self.raw.0.create_compute_pipelines( - cache.map_or(vk::PipelineCache::null(), |cache| cache.raw), - &infos, - None, - ) { - Ok(pipelines) => (pipelines, None), - Err((pipelines, error)) => (pipelines, Some(error)), - } - }; - - pipelines - .into_iter() - .map(|pso| { - if pso == vk::Pipeline::null() { - match error { - Some(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => { - Err(d::OutOfMemory::Host.into()) - } - Some(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => { - Err(d::OutOfMemory::Device.into()) - } - _ => unreachable!(), - } - } else { - Ok(n::ComputePipeline(pso)) - } - }) - .collect() - } - - unsafe fn create_framebuffer( - &self, - renderpass: &n::RenderPass, - attachments: T, - extent: image::Extent, - ) -> Result - where - T: IntoIterator, - T::Item: Borrow, - { - let mut framebuffers_ptr = None; - let mut raw_attachments = SmallVec::<[_; 4]>::new(); - for attachment in attachments { - let at = attachment.borrow(); - raw_attachments.push(at.view); - match at.owner { - n::ImageViewOwner::User => {} - n::ImageViewOwner::Surface(ref fbo_ptr) => { - framebuffers_ptr = Some(Arc::clone(&fbo_ptr.0)); - } - } - } - - let info = vk::FramebufferCreateInfo { - s_type: vk::StructureType::FRAMEBUFFER_CREATE_INFO, - p_next: ptr::null(), - flags: vk::FramebufferCreateFlags::empty(), - render_pass: renderpass.raw, - attachment_count: raw_attachments.len() as u32, - p_attachments: raw_attachments.as_ptr(), - width: extent.width, - height: extent.height, - layers: extent.depth, - }; - - let result = self.raw.0.create_framebuffer(&info, None); - - match result { - Ok(raw) => Ok(n::Framebuffer { - raw, - owned: match framebuffers_ptr { - Some(fbo_ptr) => { - fbo_ptr.lock().unwrap().framebuffers.push(raw); - false - } - None => true, - }, - }), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device), - _ => unreachable!(), - } - } - - unsafe fn create_shader_module( - &self, - spirv_data: &[u32], - ) -> Result { - let info = vk::ShaderModuleCreateInfo { - s_type: vk::StructureType::SHADER_MODULE_CREATE_INFO, - p_next: ptr::null(), - flags: vk::ShaderModuleCreateFlags::empty(), - code_size: spirv_data.len() * 4, - p_code: spirv_data.as_ptr(), - }; - - let module = self.raw.0.create_shader_module(&info, None); - - match module { - Ok(raw) => Ok(n::ShaderModule { raw }), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), - Err(_) => { - Err(d::ShaderError::CompilationFailed(String::new())) // TODO - } - } - } - - unsafe fn create_sampler( - &self, - desc: &image::SamplerDesc, - ) -> Result { - use hal::pso::Comparison; - - let (anisotropy_enable, max_anisotropy) = match desc.anisotropic { - image::Anisotropic::Off => (vk::FALSE, 1.0), - image::Anisotropic::On(aniso) => { - if self.raw.1.contains(Features::SAMPLER_ANISOTROPY) { - (vk::TRUE, aniso as f32) - } else { - warn!( - "Anisotropy({}) was requested on a device with disabled feature", - aniso - ); - (vk::FALSE, 1.0) - } - } - }; - let info = vk::SamplerCreateInfo { - s_type: vk::StructureType::SAMPLER_CREATE_INFO, - p_next: ptr::null(), - flags: vk::SamplerCreateFlags::empty(), - mag_filter: conv::map_filter(desc.mag_filter), - min_filter: conv::map_filter(desc.min_filter), - mipmap_mode: conv::map_mip_filter(desc.mip_filter), - address_mode_u: conv::map_wrap(desc.wrap_mode.0), - address_mode_v: conv::map_wrap(desc.wrap_mode.1), - address_mode_w: conv::map_wrap(desc.wrap_mode.2), - mip_lod_bias: desc.lod_bias.0, - anisotropy_enable, - max_anisotropy, - compare_enable: if desc.comparison.is_some() { - vk::TRUE - } else { - vk::FALSE - }, - compare_op: conv::map_comparison(desc.comparison.unwrap_or(Comparison::Never)), - min_lod: desc.lod_range.start.0, - max_lod: desc.lod_range.end.0, - border_color: match conv::map_border_color(desc.border) { - Some(bc) => bc, - None => { - error!("Unsupported border color {:x}", desc.border.0); - vk::BorderColor::FLOAT_TRANSPARENT_BLACK - } - }, - unnormalized_coordinates: if desc.normalized { - vk::FALSE - } else { - vk::TRUE - }, - }; - - let result = self.raw.0.create_sampler(&info, None); - - match result { - Ok(sampler) => Ok(n::Sampler(sampler)), - Err(vk::Result::ERROR_TOO_MANY_OBJECTS) => Err(d::AllocationError::TooManyObjects), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), - _ => unreachable!(), - } - } - - /// - unsafe fn create_buffer( - &self, - size: u64, - usage: buffer::Usage, - ) -> Result { - let info = vk::BufferCreateInfo { - s_type: vk::StructureType::BUFFER_CREATE_INFO, - p_next: ptr::null(), - flags: vk::BufferCreateFlags::empty(), // TODO: - size, - usage: conv::map_buffer_usage(usage), - sharing_mode: vk::SharingMode::EXCLUSIVE, // TODO: - queue_family_index_count: 0, - p_queue_family_indices: ptr::null(), - }; - - let result = self.raw.0.create_buffer(&info, None); - - match result { - Ok(raw) => Ok(n::Buffer { raw }), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), - _ => unreachable!(), - } - } - - unsafe fn get_buffer_requirements(&self, buffer: &n::Buffer) -> Requirements { - let req = self.raw.0.get_buffer_memory_requirements(buffer.raw); - - Requirements { - size: req.size, - alignment: req.alignment, - type_mask: req.memory_type_bits as _, - } - } - - unsafe fn bind_buffer_memory( - &self, - memory: &n::Memory, - offset: u64, - buffer: &mut n::Buffer, - ) -> Result<(), d::BindError> { - let result = self - .raw - .0 - .bind_buffer_memory(buffer.raw, memory.raw, offset); - - match result { - Ok(()) => Ok(()), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), - _ => unreachable!(), - } - } - - unsafe fn create_buffer_view>( - &self, - buffer: &n::Buffer, - format: Option, - range: R, - ) -> Result { - let (offset, size) = conv::map_range_arg(&range); - let info = vk::BufferViewCreateInfo { - s_type: vk::StructureType::BUFFER_VIEW_CREATE_INFO, - p_next: ptr::null(), - flags: vk::BufferViewCreateFlags::empty(), - buffer: buffer.raw, - format: format.map_or(vk::Format::UNDEFINED, conv::map_format), - offset, - range: size, - }; - - let result = self.raw.0.create_buffer_view(&info, None); - - match result { - Ok(raw) => Ok(n::BufferView { raw }), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), - _ => unreachable!(), - } - } - - unsafe fn create_image( - &self, - kind: image::Kind, - mip_levels: image::Level, - format: format::Format, - tiling: image::Tiling, - usage: image::Usage, - view_caps: image::ViewCapabilities, - ) -> Result { - let flags = conv::map_view_capabilities(view_caps); - let extent = conv::map_extent(kind.extent()); - let array_layers = kind.num_layers(); - let samples = kind.num_samples() as u32; - let image_type = match kind { - image::Kind::D1(..) => vk::ImageType::TYPE_1D, - image::Kind::D2(..) => vk::ImageType::TYPE_2D, - image::Kind::D3(..) => vk::ImageType::TYPE_3D, - }; - - let info = vk::ImageCreateInfo { - s_type: vk::StructureType::IMAGE_CREATE_INFO, - p_next: ptr::null(), - flags, - image_type, - format: conv::map_format(format), - extent: extent.clone(), - mip_levels: mip_levels as u32, - array_layers: array_layers as u32, - samples: vk::SampleCountFlags::from_raw(samples & vk::SampleCountFlags::all().as_raw()), - tiling: conv::map_tiling(tiling), - usage: conv::map_image_usage(usage), - sharing_mode: vk::SharingMode::EXCLUSIVE, // TODO: - queue_family_index_count: 0, - p_queue_family_indices: ptr::null(), - initial_layout: vk::ImageLayout::UNDEFINED, - }; - - let result = self.raw.0.create_image(&info, None); - - match result { - Ok(raw) => Ok(n::Image { - raw, - ty: image_type, - flags, - extent, - }), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), - _ => unreachable!(), - } - } - - unsafe fn get_image_requirements(&self, image: &n::Image) -> Requirements { - let req = self.raw.0.get_image_memory_requirements(image.raw); - - Requirements { - size: req.size, - alignment: req.alignment, - type_mask: req.memory_type_bits as _, - } - } - - unsafe fn get_image_subresource_footprint( - &self, - image: &n::Image, - subresource: image::Subresource, - ) -> image::SubresourceFootprint { - let sub = conv::map_subresource(&subresource); - let layout = self.raw.0.get_image_subresource_layout(image.raw, sub); - - image::SubresourceFootprint { - slice: layout.offset .. layout.offset + layout.size, - row_pitch: layout.row_pitch, - array_pitch: layout.array_pitch, - depth_pitch: layout.depth_pitch, - } - } - - unsafe fn bind_image_memory( - &self, - memory: &n::Memory, - offset: u64, - image: &mut n::Image, - ) -> Result<(), d::BindError> { - // TODO: error handling - // TODO: check required type - let result = self.raw.0.bind_image_memory(image.raw, memory.raw, offset); - - match result { - Ok(()) => Ok(()), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), - _ => unreachable!(), - } - } - - unsafe fn create_image_view( - &self, - image: &n::Image, - kind: image::ViewKind, - format: format::Format, - swizzle: format::Swizzle, - range: image::SubresourceRange, - ) -> Result { - let is_cube = image - .flags - .intersects(vk::ImageCreateFlags::CUBE_COMPATIBLE); - let info = vk::ImageViewCreateInfo { - s_type: vk::StructureType::IMAGE_VIEW_CREATE_INFO, - p_next: ptr::null(), - flags: vk::ImageViewCreateFlags::empty(), - image: image.raw, - view_type: match conv::map_view_kind(kind, image.ty, is_cube) { - Some(ty) => ty, - None => return Err(image::ViewError::BadKind(kind)), - }, - format: conv::map_format(format), - components: conv::map_swizzle(swizzle), - subresource_range: conv::map_subresource_range(&range), - }; - - let result = self.raw.0.create_image_view(&info, None); - - match result { - Ok(view) => Ok(n::ImageView { - image: image.raw, - view, - range, - owner: n::ImageViewOwner::User, - }), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), - _ => unreachable!(), - } - } - - unsafe fn create_descriptor_pool( - &self, - max_sets: usize, - descriptor_pools: T, - flags: pso::DescriptorPoolCreateFlags, - ) -> Result - where - T: IntoIterator, - T::Item: Borrow, - { - let pools = descriptor_pools - .into_iter() - .map(|pool| { - let pool = pool.borrow(); - vk::DescriptorPoolSize { - ty: conv::map_descriptor_type(pool.ty), - descriptor_count: pool.count as u32, - } - }) - .collect::>(); - - let info = vk::DescriptorPoolCreateInfo { - s_type: vk::StructureType::DESCRIPTOR_POOL_CREATE_INFO, - p_next: ptr::null(), - flags: conv::map_descriptor_pool_create_flags(flags), - max_sets: max_sets as u32, - pool_size_count: pools.len() as u32, - p_pool_sizes: pools.as_ptr(), - }; - - let result = self.raw.0.create_descriptor_pool(&info, None); - - match result { - Ok(pool) => Ok(n::DescriptorPool { - raw: pool, - device: self.raw.clone(), - set_free_vec: Vec::new(), - }), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), - _ => unreachable!(), - } - } - - unsafe fn create_descriptor_set_layout( - &self, - binding_iter: I, - immutable_sampler_iter: J, - ) -> Result - where - I: IntoIterator, - I::Item: Borrow, - J: IntoIterator, - J::Item: Borrow, - { - let immutable_samplers = immutable_sampler_iter - .into_iter() - .map(|is| is.borrow().0) - .collect::>(); - let mut sampler_offset = 0; - - let bindings = Arc::new( - binding_iter - .into_iter() - .map(|b| b.borrow().clone()) - .collect::>(), - ); - - let raw_bindings = bindings - .iter() - .map(|b| vk::DescriptorSetLayoutBinding { - binding: b.binding, - descriptor_type: conv::map_descriptor_type(b.ty), - descriptor_count: b.count as _, - stage_flags: conv::map_stage_flags(b.stage_flags), - p_immutable_samplers: if b.immutable_samplers { - let slice = &immutable_samplers[sampler_offset ..]; - sampler_offset += b.count; - slice.as_ptr() - } else { - ptr::null() - }, - }) - .collect::>(); - - debug!("create_descriptor_set_layout {:?}", raw_bindings); - - let info = vk::DescriptorSetLayoutCreateInfo { - s_type: vk::StructureType::DESCRIPTOR_SET_LAYOUT_CREATE_INFO, - p_next: ptr::null(), - flags: vk::DescriptorSetLayoutCreateFlags::empty(), - binding_count: raw_bindings.len() as _, - p_bindings: raw_bindings.as_ptr(), - }; - - let result = self.raw.0.create_descriptor_set_layout(&info, None); - - match result { - Ok(layout) => Ok(n::DescriptorSetLayout { - raw: layout, - bindings, - }), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), - _ => unreachable!(), - } - } - - unsafe fn write_descriptor_sets<'a, I, J>(&self, write_iter: I) - where - I: IntoIterator>, - J: IntoIterator, - J::Item: Borrow>, - { - let mut raw_writes = Vec::new(); - let mut image_infos = Vec::new(); - let mut buffer_infos = Vec::new(); - let mut texel_buffer_views = Vec::new(); - - for sw in write_iter { - let layout = sw - .set - .bindings - .iter() - .find(|lb| lb.binding == sw.binding) - .expect("Descriptor set writes don't match the set layout!"); - let mut raw = vk::WriteDescriptorSet { - s_type: vk::StructureType::WRITE_DESCRIPTOR_SET, - p_next: ptr::null(), - dst_set: sw.set.raw, - dst_binding: sw.binding, - dst_array_element: sw.array_offset as _, - descriptor_count: 0, - descriptor_type: conv::map_descriptor_type(layout.ty), - p_image_info: ptr::null(), - p_buffer_info: ptr::null(), - p_texel_buffer_view: ptr::null(), - }; - - for descriptor in sw.descriptors { - raw.descriptor_count += 1; - match *descriptor.borrow() { - pso::Descriptor::Sampler(sampler) => { - image_infos.push(vk::DescriptorImageInfo { - sampler: sampler.0, - image_view: vk::ImageView::null(), - image_layout: vk::ImageLayout::GENERAL, - }); - } - pso::Descriptor::Image(view, layout) => { - image_infos.push(vk::DescriptorImageInfo { - sampler: vk::Sampler::null(), - image_view: view.view, - image_layout: conv::map_image_layout(layout), - }); - } - pso::Descriptor::CombinedImageSampler(view, layout, sampler) => { - image_infos.push(vk::DescriptorImageInfo { - sampler: sampler.0, - image_view: view.view, - image_layout: conv::map_image_layout(layout), - }); - } - pso::Descriptor::Buffer(buffer, ref range) => { - let offset = range.start.unwrap_or(0); - buffer_infos.push(vk::DescriptorBufferInfo { - buffer: buffer.raw, - offset, - range: match range.end { - Some(end) => end - offset, - None => vk::WHOLE_SIZE, - }, - }); - } - pso::Descriptor::UniformTexelBuffer(view) - | pso::Descriptor::StorageTexelBuffer(view) => { - texel_buffer_views.push(view.raw); - } - } - } - - raw.p_image_info = image_infos.len() as _; - raw.p_buffer_info = buffer_infos.len() as _; - raw.p_texel_buffer_view = texel_buffer_views.len() as _; - raw_writes.push(raw); - } - - // Patch the pointers now that we have all the storage allocated - for raw in &mut raw_writes { - use crate::vk::DescriptorType as Dt; - match raw.descriptor_type { - Dt::SAMPLER - | Dt::SAMPLED_IMAGE - | Dt::STORAGE_IMAGE - | Dt::COMBINED_IMAGE_SAMPLER - | Dt::INPUT_ATTACHMENT => { - raw.p_buffer_info = ptr::null(); - raw.p_texel_buffer_view = ptr::null(); - let base = raw.p_image_info as usize - raw.descriptor_count as usize; - raw.p_image_info = image_infos[base ..].as_ptr(); - } - Dt::UNIFORM_TEXEL_BUFFER | Dt::STORAGE_TEXEL_BUFFER => { - raw.p_buffer_info = ptr::null(); - raw.p_image_info = ptr::null(); - let base = raw.p_texel_buffer_view as usize - raw.descriptor_count as usize; - raw.p_texel_buffer_view = texel_buffer_views[base ..].as_ptr(); - } - Dt::UNIFORM_BUFFER - | Dt::STORAGE_BUFFER - | Dt::STORAGE_BUFFER_DYNAMIC - | Dt::UNIFORM_BUFFER_DYNAMIC => { - raw.p_image_info = ptr::null(); - raw.p_texel_buffer_view = ptr::null(); - let base = raw.p_buffer_info as usize - raw.descriptor_count as usize; - raw.p_buffer_info = buffer_infos[base ..].as_ptr(); - } - _ => panic!("unknown descriptor type"), - } - } - - self.raw.0.update_descriptor_sets(&raw_writes, &[]); - } - - unsafe fn copy_descriptor_sets<'a, I>(&self, copies: I) - where - I: IntoIterator, - I::Item: Borrow>, - { - let copies = copies - .into_iter() - .map(|copy| { - let c = copy.borrow(); - vk::CopyDescriptorSet { - s_type: vk::StructureType::COPY_DESCRIPTOR_SET, - p_next: ptr::null(), - src_set: c.src_set.raw, - src_binding: c.src_binding as u32, - src_array_element: c.src_array_offset as u32, - dst_set: c.dst_set.raw, - dst_binding: c.dst_binding as u32, - dst_array_element: c.dst_array_offset as u32, - descriptor_count: c.count as u32, - } - }) - .collect::>(); - - self.raw.0.update_descriptor_sets(&[], &copies); - } - - unsafe fn map_memory(&self, memory: &n::Memory, range: R) -> Result<*mut u8, d::MapError> - where - R: RangeArg, - { - let (offset, size) = conv::map_range_arg(&range); - let result = self - .raw - .0 - .map_memory(memory.raw, offset, size, vk::MemoryMapFlags::empty()); - - match result { - Ok(ptr) => Ok(ptr as *mut _), - Err(vk::Result::ERROR_MEMORY_MAP_FAILED) => Err(d::MapError::MappingFailed), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), - _ => unreachable!(), - } - } - - unsafe fn unmap_memory(&self, memory: &n::Memory) { - self.raw.0.unmap_memory(memory.raw) - } - - unsafe fn flush_mapped_memory_ranges<'a, I, R>(&self, ranges: I) -> Result<(), d::OutOfMemory> - where - I: IntoIterator, - I::Item: Borrow<(&'a n::Memory, R)>, - R: RangeArg, - { - let ranges = conv::map_memory_ranges(ranges); - let result = self.raw.0.flush_mapped_memory_ranges(&ranges); - - match result { - Ok(()) => Ok(()), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device), - _ => unreachable!(), - } - } - - unsafe fn invalidate_mapped_memory_ranges<'a, I, R>( - &self, - ranges: I, - ) -> Result<(), d::OutOfMemory> - where - I: IntoIterator, - I::Item: Borrow<(&'a n::Memory, R)>, - R: RangeArg, - { - let ranges = conv::map_memory_ranges(ranges); - let result = self.raw.0.invalidate_mapped_memory_ranges(&ranges); - - match result { - Ok(()) => Ok(()), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device), - _ => unreachable!(), - } - } - - fn create_semaphore(&self) -> Result { - let info = vk::SemaphoreCreateInfo { - s_type: vk::StructureType::SEMAPHORE_CREATE_INFO, - p_next: ptr::null(), - flags: vk::SemaphoreCreateFlags::empty(), - }; - - let result = unsafe { self.raw.0.create_semaphore(&info, None) }; - - match result { - Ok(semaphore) => Ok(n::Semaphore(semaphore)), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), - _ => unreachable!(), - } - } - - fn create_fence(&self, signaled: bool) -> Result { - let info = vk::FenceCreateInfo { - s_type: vk::StructureType::FENCE_CREATE_INFO, - p_next: ptr::null(), - flags: if signaled { - vk::FenceCreateFlags::SIGNALED - } else { - vk::FenceCreateFlags::empty() - }, - }; - - let result = unsafe { self.raw.0.create_fence(&info, None) }; - - match result { - Ok(fence) => Ok(n::Fence(fence)), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), - _ => unreachable!(), - } - } - - unsafe fn reset_fences(&self, fences: I) -> Result<(), d::OutOfMemory> - where - I: IntoIterator, - I::Item: Borrow, - { - let fences = fences - .into_iter() - .map(|fence| fence.borrow().0) - .collect::>(); - let result = self.raw.0.reset_fences(&fences); - - match result { - Ok(()) => Ok(()), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), - _ => unreachable!(), - } - } - - unsafe fn wait_for_fences( - &self, - fences: I, - wait: d::WaitFor, - timeout_ns: u64, - ) -> Result - where - I: IntoIterator, - I::Item: Borrow, - { - let fences = fences - .into_iter() - .map(|fence| fence.borrow().0) - .collect::>(); - let all = match wait { - d::WaitFor::Any => false, - d::WaitFor::All => true, - }; - let result = self.raw.0.wait_for_fences(&fences, all, timeout_ns); - match result { - Ok(()) => Ok(true), - Err(vk::Result::TIMEOUT) => Ok(false), - Err(vk::Result::ERROR_DEVICE_LOST) => Err(d::DeviceLost.into()), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), - _ => unreachable!(), - } - } - - unsafe fn get_fence_status(&self, fence: &n::Fence) -> Result { - let result = self.raw.0.get_fence_status(fence.0); - match result { - Ok(()) => Ok(true), - Err(vk::Result::NOT_READY) => Ok(false), - Err(vk::Result::ERROR_DEVICE_LOST) => Err(d::DeviceLost), - _ => unreachable!(), - } - } - - fn create_event(&self) -> Result { - let info = vk::EventCreateInfo { - s_type: vk::StructureType::EVENT_CREATE_INFO, - p_next: ptr::null(), - flags: vk::EventCreateFlags::empty(), - }; - - let result = unsafe { self.raw.0.create_event(&info, None) }; - match result { - Ok(e) => Ok(n::Event(e)), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), - _ => unreachable!(), - } - } - - unsafe fn get_event_status(&self, event: &n::Event) -> Result { - let result = self.raw.0.get_event_status(event.0); - match result { - Ok(b) => Ok(b), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), - Err(vk::Result::ERROR_DEVICE_LOST) => Err(d::DeviceLost.into()), - _ => unreachable!(), - } - } - - unsafe fn set_event(&self, event: &n::Event) -> Result<(), d::OutOfMemory> { - let result = self.raw.0.set_event(event.0); - match result { - Ok(()) => Ok(()), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), - _ => unreachable!(), - } - } - - unsafe fn reset_event(&self, event: &n::Event) -> Result<(), d::OutOfMemory> { - let result = self.raw.0.reset_event(event.0); - match result { - Ok(()) => Ok(()), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), - _ => unreachable!(), - } - } - - unsafe fn free_memory(&self, memory: n::Memory) { - self.raw.0.free_memory(memory.raw, None); - } - - unsafe fn create_query_pool( - &self, - ty: query::Type, - query_count: query::Id, - ) -> Result { - let (query_type, pipeline_statistics) = match ty { - query::Type::Occlusion => ( - vk::QueryType::OCCLUSION, - vk::QueryPipelineStatisticFlags::empty(), - ), - query::Type::PipelineStatistics(statistics) => ( - vk::QueryType::PIPELINE_STATISTICS, - conv::map_pipeline_statistics(statistics), - ), - query::Type::Timestamp => ( - vk::QueryType::TIMESTAMP, - vk::QueryPipelineStatisticFlags::empty(), - ), - }; - - let info = vk::QueryPoolCreateInfo { - s_type: vk::StructureType::QUERY_POOL_CREATE_INFO, - p_next: ptr::null(), - flags: vk::QueryPoolCreateFlags::empty(), - query_type, - query_count, - pipeline_statistics, - }; - - let result = self.raw.0.create_query_pool(&info, None); - - match result { - Ok(pool) => Ok(n::QueryPool(pool)), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), - _ => unreachable!(), - } - } - - unsafe fn get_query_pool_results( - &self, - pool: &n::QueryPool, - queries: Range, - data: &mut [u8], - stride: buffer::Offset, - flags: query::ResultFlags, - ) -> Result { - let result = self.raw.0.fp_v1_0().get_query_pool_results( - self.raw.0.handle(), - pool.0, - queries.start, - queries.end - queries.start, - data.len(), - data.as_mut_ptr() as *mut _, - stride, - conv::map_query_result_flags(flags), - ); - - match result { - vk::Result::SUCCESS => Ok(true), - vk::Result::NOT_READY => Ok(false), - vk::Result::ERROR_DEVICE_LOST => Err(d::DeviceLost.into()), - vk::Result::ERROR_OUT_OF_HOST_MEMORY => Err(d::OutOfMemory::Host.into()), - vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => Err(d::OutOfMemory::Device.into()), - _ => unreachable!(), - } - } - - unsafe fn create_swapchain( - &self, - surface: &mut w::Surface, - config: SwapchainConfig, - provided_old_swapchain: Option, - ) -> Result<(w::Swapchain, Vec), hal::window::CreationError> { - let functor = khr::Swapchain::new(&surface.raw.instance.0, &self.raw.0); - - let old_swapchain = match provided_old_swapchain { - Some(osc) => osc.raw, - None => vk::SwapchainKHR::null(), - }; - - let info = vk::SwapchainCreateInfoKHR { - s_type: vk::StructureType::SWAPCHAIN_CREATE_INFO_KHR, - p_next: ptr::null(), - flags: vk::SwapchainCreateFlagsKHR::empty(), - surface: surface.raw.handle, - min_image_count: config.image_count, - image_format: conv::map_format(config.format), - image_color_space: vk::ColorSpaceKHR::SRGB_NONLINEAR, - image_extent: vk::Extent2D { - width: config.extent.width, - height: config.extent.height, - }, - image_array_layers: 1, - image_usage: conv::map_image_usage(config.image_usage), - image_sharing_mode: vk::SharingMode::EXCLUSIVE, - queue_family_index_count: 0, - p_queue_family_indices: ptr::null(), - pre_transform: vk::SurfaceTransformFlagsKHR::IDENTITY, - composite_alpha: conv::map_composite_alpha_mode(config.composite_alpha_mode), - present_mode: conv::map_present_mode(config.present_mode), - clipped: 1, - old_swapchain, - }; - - let result = functor.create_swapchain(&info, None); - - if old_swapchain != vk::SwapchainKHR::null() { - functor.destroy_swapchain(old_swapchain, None) - } - - let swapchain_raw = match result { - Ok(swapchain_raw) => swapchain_raw, - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => { - return Err(d::OutOfMemory::Host.into()); - } - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => { - return Err(d::OutOfMemory::Device.into()); - } - Err(vk::Result::ERROR_DEVICE_LOST) => return Err(d::DeviceLost.into()), - Err(vk::Result::ERROR_SURFACE_LOST_KHR) => return Err(d::SurfaceLost.into()), - Err(vk::Result::ERROR_NATIVE_WINDOW_IN_USE_KHR) => return Err(d::WindowInUse.into()), - _ => unreachable!("Unexpected result - driver bug? {:?}", result), - }; - - let result = functor.get_swapchain_images(swapchain_raw); - - let backbuffer_images = match result { - Ok(backbuffer_images) => backbuffer_images, - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => { - return Err(d::OutOfMemory::Host.into()); - } - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => { - return Err(d::OutOfMemory::Device.into()); - } - _ => unreachable!(), - }; - - let swapchain = w::Swapchain { - raw: swapchain_raw, - functor, - vendor_id: self.vendor_id, - }; - - let images = backbuffer_images - .into_iter() - .map(|image| n::Image { - raw: image, - ty: vk::ImageType::TYPE_2D, - flags: vk::ImageCreateFlags::empty(), - extent: vk::Extent3D { - width: config.extent.width, - height: config.extent.height, - depth: 1, - }, - }) - .collect(); - - Ok((swapchain, images)) - } - - unsafe fn destroy_swapchain(&self, swapchain: w::Swapchain) { - swapchain.functor.destroy_swapchain(swapchain.raw, None); - } - - unsafe fn destroy_query_pool(&self, pool: n::QueryPool) { - self.raw.0.destroy_query_pool(pool.0, None); - } - - unsafe fn destroy_shader_module(&self, module: n::ShaderModule) { - self.raw.0.destroy_shader_module(module.raw, None); - } - - unsafe fn destroy_render_pass(&self, rp: n::RenderPass) { - self.raw.0.destroy_render_pass(rp.raw, None); - } - - unsafe fn destroy_pipeline_layout(&self, pl: n::PipelineLayout) { - self.raw.0.destroy_pipeline_layout(pl.raw, None); - } - - unsafe fn destroy_graphics_pipeline(&self, pipeline: n::GraphicsPipeline) { - self.raw.0.destroy_pipeline(pipeline.0, None); - } - - unsafe fn destroy_compute_pipeline(&self, pipeline: n::ComputePipeline) { - self.raw.0.destroy_pipeline(pipeline.0, None); - } - - unsafe fn destroy_framebuffer(&self, fb: n::Framebuffer) { - if fb.owned { - self.raw.0.destroy_framebuffer(fb.raw, None); - } - } - - unsafe fn destroy_buffer(&self, buffer: n::Buffer) { - self.raw.0.destroy_buffer(buffer.raw, None); - } - - unsafe fn destroy_buffer_view(&self, view: n::BufferView) { - self.raw.0.destroy_buffer_view(view.raw, None); - } - - unsafe fn destroy_image(&self, image: n::Image) { - self.raw.0.destroy_image(image.raw, None); - } - - unsafe fn destroy_image_view(&self, view: n::ImageView) { - match view.owner { - n::ImageViewOwner::User => { - self.raw.0.destroy_image_view(view.view, None); - } - n::ImageViewOwner::Surface(_fbo_cache) => { - //TODO: mark as deleted? - } - } - } - - unsafe fn destroy_sampler(&self, sampler: n::Sampler) { - self.raw.0.destroy_sampler(sampler.0, None); - } - - unsafe fn destroy_descriptor_pool(&self, pool: n::DescriptorPool) { - self.raw.0.destroy_descriptor_pool(pool.raw, None); - } - - unsafe fn destroy_descriptor_set_layout(&self, layout: n::DescriptorSetLayout) { - self.raw.0.destroy_descriptor_set_layout(layout.raw, None); - } - - unsafe fn destroy_fence(&self, fence: n::Fence) { - self.raw.0.destroy_fence(fence.0, None); - } - - unsafe fn destroy_semaphore(&self, semaphore: n::Semaphore) { - self.raw.0.destroy_semaphore(semaphore.0, None); - } - - unsafe fn destroy_event(&self, event: n::Event) { - self.raw.0.destroy_event(event.0, None); - } - - fn wait_idle(&self) -> Result<(), d::OutOfMemory> { - match unsafe { self.raw.0.device_wait_idle() } { - Ok(()) => Ok(()), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device), - _ => unreachable!(), - } - } - - unsafe fn set_image_name(&self, image: &mut n::Image, name: &str) { - self.set_object_name(vk::ObjectType::IMAGE, image.raw.as_raw(), name) - } - - unsafe fn set_buffer_name(&self, buffer: &mut n::Buffer, name: &str) { - self.set_object_name(vk::ObjectType::BUFFER, buffer.raw.as_raw(), name) - } - - unsafe fn set_command_buffer_name( - &self, - command_buffer: &mut cmd::CommandBuffer, - name: &str - ) { - self.set_object_name(vk::ObjectType::COMMAND_BUFFER, command_buffer.raw.as_raw(), name) - } - - unsafe fn set_semaphore_name(&self, semaphore: &mut n::Semaphore, name: &str) { - self.set_object_name(vk::ObjectType::SEMAPHORE, semaphore.0.as_raw(), name) - } - - unsafe fn set_fence_name(&self, fence: &mut n::Fence, name: &str) { - self.set_object_name(vk::ObjectType::FENCE, fence.0.as_raw(), name) - } - - unsafe fn set_framebuffer_name(&self, framebuffer: &mut n::Framebuffer, name: &str) { - self.set_object_name(vk::ObjectType::FRAMEBUFFER, framebuffer.raw.as_raw(), name) - } - - unsafe fn set_render_pass_name(&self, render_pass: &mut n::RenderPass, name: &str) { - self.set_object_name(vk::ObjectType::RENDER_PASS, render_pass.raw.as_raw(), name) - } - - unsafe fn set_descriptor_set_name(&self, descriptor_set: &mut n::DescriptorSet, name: &str) { - self.set_object_name(vk::ObjectType::DESCRIPTOR_SET, descriptor_set.raw.as_raw(), name) - } - - unsafe fn set_descriptor_set_layout_name(&self, descriptor_set_layout: &mut n::DescriptorSetLayout, name: &str) { - self.set_object_name(vk::ObjectType::DESCRIPTOR_SET_LAYOUT, descriptor_set_layout.raw.as_raw(), name) - } -} - -impl Device { - unsafe fn set_object_name(&self, object_type: vk::ObjectType, object_handle: u64, name: &str) { - let instance = &self.raw.2; - if let Some(DebugMessenger::Utils(ref debug_utils_ext, _)) = instance.1 { - // Append a null terminator to the string while avoiding allocating memory - static mut NAME_BUF: [u8; 64] = [0u8; 64]; - std::ptr::copy_nonoverlapping( - name.as_ptr(), - &mut NAME_BUF[0], - name.len().min(NAME_BUF.len()) - ); - NAME_BUF[name.len()] = 0; - let _result = debug_utils_ext.debug_utils_set_object_name( - self.raw.0.handle(), - &vk::DebugUtilsObjectNameInfoEXT { - s_type: vk::StructureType::DEBUG_UTILS_OBJECT_NAME_INFO_EXT, - p_next: std::ptr::null_mut(), - object_type, - object_handle, - p_object_name: NAME_BUF.as_ptr() as *mut _, - } - ); - } - } -} - -#[test] -fn test_send_sync() { - fn foo() {} - foo::() -} +use arrayvec::ArrayVec; +use ash::extensions::khr; +use ash::version::DeviceV1_0; +use ash::vk; +use ash::vk::Handle; +use smallvec::SmallVec; + +use hal::{ + memory::{Requirements, Segment}, + pool::CommandPoolCreateFlags, + pso::VertexInputRate, + window::SwapchainConfig, + {buffer, device as d, format, image, pass, pso, query, queue}, + {Features, MemoryTypeId}, +}; + +use std::borrow::Borrow; +use std::ffi::CString; +use std::ops::Range; +use std::pin::Pin; +use std::sync::Arc; +use std::{mem, ptr}; + +use crate::pool::RawCommandPool; +use crate::{command as cmd, conv, native as n, window as w}; +use crate::{Backend as B, DebugMessenger, Device}; + +#[derive(Debug, Default)] +struct GraphicsPipelineInfoBuf { + // 10 is the max amount of dynamic states + dynamic_states: ArrayVec<[vk::DynamicState; 10]>, + + // 5 is the amount of stages + c_strings: ArrayVec<[CString; 5]>, + stages: ArrayVec<[vk::PipelineShaderStageCreateInfo; 5]>, + specializations: ArrayVec<[vk::SpecializationInfo; 5]>, + specialization_entries: ArrayVec<[SmallVec<[vk::SpecializationMapEntry; 4]>; 5]>, + + vertex_bindings: Vec, + vertex_attributes: Vec, + blend_states: Vec, + + sample_mask: [u32; 2], + vertex_input_state: vk::PipelineVertexInputStateCreateInfo, + input_assembly_state: vk::PipelineInputAssemblyStateCreateInfo, + tessellation_state: Option, + viewport_state: vk::PipelineViewportStateCreateInfo, + rasterization_state: vk::PipelineRasterizationStateCreateInfo, + multisample_state: vk::PipelineMultisampleStateCreateInfo, + depth_stencil_state: vk::PipelineDepthStencilStateCreateInfo, + color_blend_state: vk::PipelineColorBlendStateCreateInfo, + pipeline_dynamic_state: vk::PipelineDynamicStateCreateInfo, + viewport: vk::Viewport, + scissor: vk::Rect2D, +} +impl GraphicsPipelineInfoBuf { + unsafe fn add_stage<'a>( + &mut self, + stage: vk::ShaderStageFlags, + source: &pso::EntryPoint<'a, B>, + ) { + let string = CString::new(source.entry).unwrap(); + let p_name = string.as_ptr(); + self.c_strings.push(string); + + self.specialization_entries.push( + source + .specialization + .constants + .iter() + .map(|c| vk::SpecializationMapEntry { + constant_id: c.id, + offset: c.range.start as _, + size: (c.range.end - c.range.start) as _, + }) + .collect(), + ); + let map_entries = self.specialization_entries.last().unwrap(); + + self.specializations.push(vk::SpecializationInfo { + map_entry_count: map_entries.len() as _, + p_map_entries: map_entries.as_ptr(), + data_size: source.specialization.data.len() as _, + p_data: source.specialization.data.as_ptr() as _, + }); + + self.stages.push(vk::PipelineShaderStageCreateInfo { + s_type: vk::StructureType::PIPELINE_SHADER_STAGE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::PipelineShaderStageCreateFlags::empty(), + stage, + module: source.module.raw, + p_name, + p_specialization_info: self.specializations.last().unwrap(), + }) + } + + unsafe fn initialize<'a>( + this: &mut Pin<&mut Self>, + device: &Device, + desc: &pso::GraphicsPipelineDesc<'a, B>, + ) { + let mut this = Pin::get_mut(this.as_mut()); // use into_inner when it gets stable + + // Vertex stage + // vertex shader is required + this.add_stage(vk::ShaderStageFlags::VERTEX, &desc.shaders.vertex); + // Pixel stage + if let Some(ref entry) = desc.shaders.fragment { + this.add_stage(vk::ShaderStageFlags::FRAGMENT, entry); + } + // Geometry stage + if let Some(ref entry) = desc.shaders.geometry { + this.add_stage(vk::ShaderStageFlags::GEOMETRY, entry); + } + // Domain stage + if let Some(ref entry) = desc.shaders.domain { + this.add_stage(vk::ShaderStageFlags::TESSELLATION_EVALUATION, entry); + } + // Hull stage + if let Some(ref entry) = desc.shaders.hull { + this.add_stage(vk::ShaderStageFlags::TESSELLATION_CONTROL, entry); + } + + this.vertex_bindings = desc.vertex_buffers.iter().map(|vbuf| { + vk::VertexInputBindingDescription { + binding: vbuf.binding, + stride: vbuf.stride as u32, + input_rate: match vbuf.rate { + VertexInputRate::Vertex => vk::VertexInputRate::VERTEX, + VertexInputRate::Instance(divisor) => { + debug_assert_eq!(divisor, 1, "Custom vertex rate divisors not supported in Vulkan backend without extension"); + vk::VertexInputRate::INSTANCE + }, + }, + } + }).collect(); + this.vertex_attributes = desc + .attributes + .iter() + .map(|attr| vk::VertexInputAttributeDescription { + location: attr.location as u32, + binding: attr.binding as u32, + format: conv::map_format(attr.element.format), + offset: attr.element.offset as u32, + }) + .collect(); + + this.vertex_input_state = vk::PipelineVertexInputStateCreateInfo { + s_type: vk::StructureType::PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::PipelineVertexInputStateCreateFlags::empty(), + vertex_binding_description_count: this.vertex_bindings.len() as _, + p_vertex_binding_descriptions: this.vertex_bindings.as_ptr(), + vertex_attribute_description_count: this.vertex_attributes.len() as _, + p_vertex_attribute_descriptions: this.vertex_attributes.as_ptr(), + }; + + this.input_assembly_state = vk::PipelineInputAssemblyStateCreateInfo { + s_type: vk::StructureType::PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::PipelineInputAssemblyStateCreateFlags::empty(), + topology: conv::map_topology(&desc.input_assembler), + primitive_restart_enable: match desc.input_assembler.restart_index { + Some(_) => vk::TRUE, + None => vk::FALSE, + }, + }; + + let depth_bias = match desc.rasterizer.depth_bias { + Some(pso::State::Static(db)) => db, + Some(pso::State::Dynamic) => { + this.dynamic_states.push(vk::DynamicState::DEPTH_BIAS); + pso::DepthBias::default() + } + None => pso::DepthBias::default(), + }; + + let polygon_mode = match desc.rasterizer.polygon_mode { + pso::PolygonMode::Point => vk::PolygonMode::POINT, + pso::PolygonMode::Line => vk::PolygonMode::LINE, + pso::PolygonMode::Fill => vk::PolygonMode::FILL, + }; + + let line_width = match desc.rasterizer.line_width { + pso::State::Static(w) => w, + pso::State::Dynamic => { + this.dynamic_states.push(vk::DynamicState::LINE_WIDTH); + 1.0 + } + }; + + this.rasterization_state = vk::PipelineRasterizationStateCreateInfo { + s_type: vk::StructureType::PIPELINE_RASTERIZATION_STATE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::PipelineRasterizationStateCreateFlags::empty(), + depth_clamp_enable: if desc.rasterizer.depth_clamping { + if device.shared.features.contains(Features::DEPTH_CLAMP) { + vk::TRUE + } else { + warn!("Depth clamping was requested on a device with disabled feature"); + vk::FALSE + } + } else { + vk::FALSE + }, + rasterizer_discard_enable: if desc.shaders.fragment.is_none() + && desc.depth_stencil.depth.is_none() + && desc.depth_stencil.stencil.is_none() + { + vk::TRUE + } else { + vk::FALSE + }, + polygon_mode, + cull_mode: conv::map_cull_face(desc.rasterizer.cull_face), + front_face: conv::map_front_face(desc.rasterizer.front_face), + depth_bias_enable: if desc.rasterizer.depth_bias.is_some() { + vk::TRUE + } else { + vk::FALSE + }, + depth_bias_constant_factor: depth_bias.const_factor, + depth_bias_clamp: depth_bias.clamp, + depth_bias_slope_factor: depth_bias.slope_factor, + line_width, + }; + + this.tessellation_state = { + if let pso::Primitive::PatchList(patch_control_points) = desc.input_assembler.primitive + { + Some(vk::PipelineTessellationStateCreateInfo { + s_type: vk::StructureType::PIPELINE_TESSELLATION_STATE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::PipelineTessellationStateCreateFlags::empty(), + patch_control_points: patch_control_points as _, + }) + } else { + None + } + }; + + this.viewport_state = vk::PipelineViewportStateCreateInfo { + s_type: vk::StructureType::PIPELINE_VIEWPORT_STATE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::PipelineViewportStateCreateFlags::empty(), + scissor_count: 1, // TODO + p_scissors: match desc.baked_states.scissor { + Some(ref rect) => { + this.scissor = conv::map_rect(rect); + &this.scissor + } + None => { + this.dynamic_states.push(vk::DynamicState::SCISSOR); + ptr::null() + } + }, + viewport_count: 1, // TODO + p_viewports: match desc.baked_states.viewport { + Some(ref vp) => { + this.viewport = device.shared.map_viewport(vp); + &this.viewport + } + None => { + this.dynamic_states.push(vk::DynamicState::VIEWPORT); + ptr::null() + } + }, + }; + + this.multisample_state = match desc.multisampling { + Some(ref ms) => { + this.sample_mask = [ + (ms.sample_mask & 0xFFFFFFFF) as u32, + ((ms.sample_mask >> 32) & 0xFFFFFFFF) as u32, + ]; + vk::PipelineMultisampleStateCreateInfo { + s_type: vk::StructureType::PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::PipelineMultisampleStateCreateFlags::empty(), + rasterization_samples: vk::SampleCountFlags::from_raw( + (ms.rasterization_samples as u32) & vk::SampleCountFlags::all().as_raw(), + ), + sample_shading_enable: ms.sample_shading.is_some() as _, + min_sample_shading: ms.sample_shading.unwrap_or(0.0), + p_sample_mask: &this.sample_mask as _, + alpha_to_coverage_enable: ms.alpha_coverage as _, + alpha_to_one_enable: ms.alpha_to_one as _, + } + } + None => vk::PipelineMultisampleStateCreateInfo { + s_type: vk::StructureType::PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::PipelineMultisampleStateCreateFlags::empty(), + rasterization_samples: vk::SampleCountFlags::TYPE_1, + sample_shading_enable: vk::FALSE, + min_sample_shading: 0.0, + p_sample_mask: ptr::null(), + alpha_to_coverage_enable: vk::FALSE, + alpha_to_one_enable: vk::FALSE, + }, + }; + + let depth_stencil = desc.depth_stencil; + let (depth_test_enable, depth_write_enable, depth_compare_op) = match depth_stencil.depth { + Some(ref depth) => (vk::TRUE, depth.write as _, conv::map_comparison(depth.fun)), + None => (vk::FALSE, vk::FALSE, vk::CompareOp::NEVER), + }; + let (stencil_test_enable, front, back) = match depth_stencil.stencil { + Some(ref stencil) => { + let mut front = conv::map_stencil_side(&stencil.faces.front); + let mut back = conv::map_stencil_side(&stencil.faces.back); + match stencil.read_masks { + pso::State::Static(ref sides) => { + front.compare_mask = sides.front; + back.compare_mask = sides.back; + } + pso::State::Dynamic => { + this.dynamic_states + .push(vk::DynamicState::STENCIL_COMPARE_MASK); + } + } + match stencil.write_masks { + pso::State::Static(ref sides) => { + front.write_mask = sides.front; + back.write_mask = sides.back; + } + pso::State::Dynamic => { + this.dynamic_states + .push(vk::DynamicState::STENCIL_WRITE_MASK); + } + } + match stencil.reference_values { + pso::State::Static(ref sides) => { + front.reference = sides.front; + back.reference = sides.back; + } + pso::State::Dynamic => { + this.dynamic_states + .push(vk::DynamicState::STENCIL_REFERENCE); + } + } + (vk::TRUE, front, back) + } + None => mem::zeroed(), + }; + let (min_depth_bounds, max_depth_bounds) = match desc.baked_states.depth_bounds { + Some(ref range) => (range.start, range.end), + None => { + this.dynamic_states.push(vk::DynamicState::DEPTH_BOUNDS); + (0.0, 1.0) + } + }; + + this.depth_stencil_state = vk::PipelineDepthStencilStateCreateInfo { + s_type: vk::StructureType::PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::PipelineDepthStencilStateCreateFlags::empty(), + depth_test_enable, + depth_write_enable, + depth_compare_op, + depth_bounds_test_enable: depth_stencil.depth_bounds as _, + stencil_test_enable, + front, + back, + min_depth_bounds, + max_depth_bounds, + }; + + this.blend_states = desc + .blender + .targets + .iter() + .map(|color_desc| { + let color_write_mask = + vk::ColorComponentFlags::from_raw(color_desc.mask.bits() as _); + match color_desc.blend { + Some(ref bs) => { + let (color_blend_op, src_color_blend_factor, dst_color_blend_factor) = + conv::map_blend_op(bs.color); + let (alpha_blend_op, src_alpha_blend_factor, dst_alpha_blend_factor) = + conv::map_blend_op(bs.alpha); + vk::PipelineColorBlendAttachmentState { + color_write_mask, + blend_enable: vk::TRUE, + src_color_blend_factor, + dst_color_blend_factor, + color_blend_op, + src_alpha_blend_factor, + dst_alpha_blend_factor, + alpha_blend_op, + } + } + None => vk::PipelineColorBlendAttachmentState { + color_write_mask, + ..mem::zeroed() + }, + } + }) + .collect(); + + this.color_blend_state = vk::PipelineColorBlendStateCreateInfo { + s_type: vk::StructureType::PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::PipelineColorBlendStateCreateFlags::empty(), + logic_op_enable: vk::FALSE, // TODO + logic_op: vk::LogicOp::CLEAR, + attachment_count: this.blend_states.len() as _, + p_attachments: this.blend_states.as_ptr(), // TODO: + blend_constants: match desc.baked_states.blend_color { + Some(value) => value, + None => { + this.dynamic_states.push(vk::DynamicState::BLEND_CONSTANTS); + [0.0; 4] + } + }, + }; + + this.pipeline_dynamic_state = vk::PipelineDynamicStateCreateInfo { + s_type: vk::StructureType::PIPELINE_DYNAMIC_STATE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::PipelineDynamicStateCreateFlags::empty(), + dynamic_state_count: this.dynamic_states.len() as _, + p_dynamic_states: this.dynamic_states.as_ptr(), + }; + } +} + +#[derive(Debug, Default)] +struct ComputePipelineInfoBuf { + c_string: CString, + specialization: vk::SpecializationInfo, + entries: SmallVec<[vk::SpecializationMapEntry; 4]>, +} +impl ComputePipelineInfoBuf { + unsafe fn initialize<'a>(this: &mut Pin<&mut Self>, desc: &pso::ComputePipelineDesc<'a, B>) { + let mut this = Pin::get_mut(this.as_mut()); // use into_inner when it gets stable + + this.c_string = CString::new(desc.shader.entry).unwrap(); + this.entries = desc + .shader + .specialization + .constants + .iter() + .map(|c| vk::SpecializationMapEntry { + constant_id: c.id, + offset: c.range.start as _, + size: (c.range.end - c.range.start) as _, + }) + .collect(); + this.specialization = vk::SpecializationInfo { + map_entry_count: this.entries.len() as _, + p_map_entries: this.entries.as_ptr(), + data_size: desc.shader.specialization.data.len() as _, + p_data: desc.shader.specialization.data.as_ptr() as _, + }; + } +} + +impl d::Device for Device { + unsafe fn allocate_memory( + &self, + mem_type: MemoryTypeId, + size: u64, + ) -> Result { + let info = vk::MemoryAllocateInfo { + s_type: vk::StructureType::MEMORY_ALLOCATE_INFO, + p_next: ptr::null(), + allocation_size: size, + memory_type_index: mem_type.0 as _, + }; + + let result = self.shared.raw.allocate_memory(&info, None); + + match result { + Ok(memory) => Ok(n::Memory { raw: memory }), + Err(vk::Result::ERROR_TOO_MANY_OBJECTS) => Err(d::AllocationError::TooManyObjects), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn create_command_pool( + &self, + family: queue::QueueFamilyId, + create_flags: CommandPoolCreateFlags, + ) -> Result { + let mut flags = vk::CommandPoolCreateFlags::empty(); + if create_flags.contains(CommandPoolCreateFlags::TRANSIENT) { + flags |= vk::CommandPoolCreateFlags::TRANSIENT; + } + if create_flags.contains(CommandPoolCreateFlags::RESET_INDIVIDUAL) { + flags |= vk::CommandPoolCreateFlags::RESET_COMMAND_BUFFER; + } + + let info = vk::CommandPoolCreateInfo { + s_type: vk::StructureType::COMMAND_POOL_CREATE_INFO, + p_next: ptr::null(), + flags, + queue_family_index: family.0 as _, + }; + + let result = self.shared.raw.create_command_pool(&info, None); + + match result { + Ok(pool) => Ok(RawCommandPool { + raw: pool, + device: self.shared.clone(), + }), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device), + _ => unreachable!(), + } + } + + unsafe fn destroy_command_pool(&self, pool: RawCommandPool) { + self.shared.raw.destroy_command_pool(pool.raw, None); + } + + unsafe fn create_render_pass<'a, IA, IS, ID>( + &self, + attachments: IA, + subpasses: IS, + dependencies: ID, + ) -> Result + where + IA: IntoIterator, + IA::Item: Borrow, + IS: IntoIterator, + IS::Item: Borrow>, + ID: IntoIterator, + ID::Item: Borrow, + { + let attachments = attachments + .into_iter() + .map(|attachment| { + let attachment = attachment.borrow(); + vk::AttachmentDescription { + flags: vk::AttachmentDescriptionFlags::empty(), // TODO: may even alias! + format: attachment + .format + .map_or(vk::Format::UNDEFINED, conv::map_format), + samples: vk::SampleCountFlags::from_raw( + (attachment.samples as u32) & vk::SampleCountFlags::all().as_raw(), + ), + load_op: conv::map_attachment_load_op(attachment.ops.load), + store_op: conv::map_attachment_store_op(attachment.ops.store), + stencil_load_op: conv::map_attachment_load_op(attachment.stencil_ops.load), + stencil_store_op: conv::map_attachment_store_op(attachment.stencil_ops.store), + initial_layout: conv::map_image_layout(attachment.layouts.start), + final_layout: conv::map_image_layout(attachment.layouts.end), + } + }) + .collect::>(); + + let clear_attachments_mask = attachments + .iter() + .enumerate() + .filter_map(|(i, at)| { + if at.load_op == vk::AttachmentLoadOp::CLEAR + || at.stencil_load_op == vk::AttachmentLoadOp::CLEAR + { + Some(1 << i as u64) + } else { + None + } + }) + .sum(); + + let attachment_refs = subpasses + .into_iter() + .map(|subpass| { + let subpass = subpass.borrow(); + fn make_ref(&(id, layout): &pass::AttachmentRef) -> vk::AttachmentReference { + vk::AttachmentReference { + attachment: id as _, + layout: conv::map_image_layout(layout), + } + } + let colors = subpass.colors.iter().map(make_ref).collect::>(); + let depth_stencil = subpass.depth_stencil.map(make_ref); + let inputs = subpass.inputs.iter().map(make_ref).collect::>(); + let preserves = subpass + .preserves + .iter() + .map(|&id| id as u32) + .collect::>(); + let resolves = subpass.resolves.iter().map(make_ref).collect::>(); + + (colors, depth_stencil, inputs, preserves, resolves) + }) + .collect::>(); + + let subpasses = attachment_refs + .iter() + .map( + |(colors, depth_stencil, inputs, preserves, resolves)| vk::SubpassDescription { + flags: vk::SubpassDescriptionFlags::empty(), + pipeline_bind_point: vk::PipelineBindPoint::GRAPHICS, + input_attachment_count: inputs.len() as u32, + p_input_attachments: inputs.as_ptr(), + color_attachment_count: colors.len() as u32, + p_color_attachments: colors.as_ptr(), + p_resolve_attachments: if resolves.is_empty() { + ptr::null() + } else { + resolves.as_ptr() + }, + p_depth_stencil_attachment: match depth_stencil { + Some(ref aref) => aref as *const _, + None => ptr::null(), + }, + preserve_attachment_count: preserves.len() as u32, + p_preserve_attachments: preserves.as_ptr(), + }, + ) + .collect::>(); + + let dependencies = dependencies + .into_iter() + .map(|subpass_dep| { + let sdep = subpass_dep.borrow(); + // TODO: checks + vk::SubpassDependency { + src_subpass: sdep + .passes + .start + .map_or(vk::SUBPASS_EXTERNAL, |id| id as u32), + dst_subpass: sdep.passes.end.map_or(vk::SUBPASS_EXTERNAL, |id| id as u32), + src_stage_mask: conv::map_pipeline_stage(sdep.stages.start), + dst_stage_mask: conv::map_pipeline_stage(sdep.stages.end), + src_access_mask: conv::map_image_access(sdep.accesses.start), + dst_access_mask: conv::map_image_access(sdep.accesses.end), + dependency_flags: mem::transmute(sdep.flags), + } + }) + .collect::>(); + + let info = vk::RenderPassCreateInfo { + s_type: vk::StructureType::RENDER_PASS_CREATE_INFO, + p_next: ptr::null(), + flags: vk::RenderPassCreateFlags::empty(), + attachment_count: attachments.len() as u32, + p_attachments: attachments.as_ptr(), + subpass_count: subpasses.len() as u32, + p_subpasses: subpasses.as_ptr(), + dependency_count: dependencies.len() as u32, + p_dependencies: dependencies.as_ptr(), + }; + + let result = self.shared.raw.create_render_pass(&info, None); + + match result { + Ok(renderpass) => Ok(n::RenderPass { + raw: renderpass, + clear_attachments_mask, + }), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device), + _ => unreachable!(), + } + } + + unsafe fn create_pipeline_layout( + &self, + sets: IS, + push_constant_ranges: IR, + ) -> Result + where + IS: IntoIterator, + IS::Item: Borrow, + IR: IntoIterator, + IR::Item: Borrow<(pso::ShaderStageFlags, Range)>, + { + let set_layouts = sets + .into_iter() + .map(|set| set.borrow().raw) + .collect::>(); + + debug!("create_pipeline_layout {:?}", set_layouts); + + let push_constant_ranges = push_constant_ranges + .into_iter() + .map(|range| { + let &(s, ref r) = range.borrow(); + vk::PushConstantRange { + stage_flags: conv::map_stage_flags(s), + offset: r.start, + size: r.end - r.start, + } + }) + .collect::>(); + + let info = vk::PipelineLayoutCreateInfo { + s_type: vk::StructureType::PIPELINE_LAYOUT_CREATE_INFO, + p_next: ptr::null(), + flags: vk::PipelineLayoutCreateFlags::empty(), + set_layout_count: set_layouts.len() as u32, + p_set_layouts: set_layouts.as_ptr(), + push_constant_range_count: push_constant_ranges.len() as u32, + p_push_constant_ranges: push_constant_ranges.as_ptr(), + }; + + let result = self.shared.raw.create_pipeline_layout(&info, None); + + match result { + Ok(raw) => Ok(n::PipelineLayout { raw }), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device), + _ => unreachable!(), + } + } + + unsafe fn create_pipeline_cache( + &self, + data: Option<&[u8]>, + ) -> Result { + let (data_len, data) = if let Some(d) = data { + (d.len(), d.as_ptr()) + } else { + (0_usize, ptr::null()) + }; + + let info = vk::PipelineCacheCreateInfo { + s_type: vk::StructureType::PIPELINE_CACHE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::PipelineCacheCreateFlags::empty(), + initial_data_size: data_len, + p_initial_data: data as _, + }; + + let result = self.shared.raw.create_pipeline_cache(&info, None); + + match result { + Ok(raw) => Ok(n::PipelineCache { raw }), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device), + _ => unreachable!(), + } + } + + unsafe fn get_pipeline_cache_data( + &self, + cache: &n::PipelineCache, + ) -> Result, d::OutOfMemory> { + let result = self.shared.raw.get_pipeline_cache_data(cache.raw); + + match result { + Ok(data) => Ok(data), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device), + _ => unreachable!(), + } + } + + unsafe fn destroy_pipeline_cache(&self, cache: n::PipelineCache) { + self.shared.raw.destroy_pipeline_cache(cache.raw, None); + } + + unsafe fn merge_pipeline_caches( + &self, + target: &n::PipelineCache, + sources: I, + ) -> Result<(), d::OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow, + { + let caches = sources + .into_iter() + .map(|s| s.borrow().raw) + .collect::>(); + let result = self.shared.raw.fp_v1_0().merge_pipeline_caches( + self.shared.raw.handle(), + target.raw, + caches.len() as u32, + caches.as_ptr(), + ); + + match result { + vk::Result::SUCCESS => Ok(()), + vk::Result::ERROR_OUT_OF_HOST_MEMORY => Err(d::OutOfMemory::Host), + vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => Err(d::OutOfMemory::Device), + _ => unreachable!(), + } + } + + unsafe fn create_graphics_pipeline<'a>( + &self, + desc: &pso::GraphicsPipelineDesc<'a, B>, + cache: Option<&n::PipelineCache>, + ) -> Result { + debug!("create_graphics_pipeline {:?}", desc); + + let mut buf = GraphicsPipelineInfoBuf::default(); + let mut buf = Pin::new(&mut buf); + GraphicsPipelineInfoBuf::initialize(&mut buf, self, desc); + + let info = { + let (base_handle, base_index) = match desc.parent { + pso::BasePipeline::Pipeline(pipeline) => (pipeline.0, -1), + pso::BasePipeline::Index(index) => (vk::Pipeline::null(), index as _), + pso::BasePipeline::None => (vk::Pipeline::null(), -1), + }; + + let mut flags = vk::PipelineCreateFlags::empty(); + match desc.parent { + pso::BasePipeline::None => (), + _ => { + flags |= vk::PipelineCreateFlags::DERIVATIVE; + } + } + if desc + .flags + .contains(pso::PipelineCreationFlags::DISABLE_OPTIMIZATION) + { + flags |= vk::PipelineCreateFlags::DISABLE_OPTIMIZATION; + } + if desc + .flags + .contains(pso::PipelineCreationFlags::ALLOW_DERIVATIVES) + { + flags |= vk::PipelineCreateFlags::ALLOW_DERIVATIVES; + } + + vk::GraphicsPipelineCreateInfo { + s_type: vk::StructureType::GRAPHICS_PIPELINE_CREATE_INFO, + p_next: ptr::null(), + flags, + stage_count: buf.stages.len() as _, + p_stages: buf.stages.as_ptr(), + p_vertex_input_state: &buf.vertex_input_state, + p_input_assembly_state: &buf.input_assembly_state, + p_rasterization_state: &buf.rasterization_state, + p_tessellation_state: match buf.tessellation_state.as_ref() { + Some(t) => t as _, + None => ptr::null(), + }, + p_viewport_state: &buf.viewport_state, + p_multisample_state: &buf.multisample_state, + p_depth_stencil_state: &buf.depth_stencil_state, + p_color_blend_state: &buf.color_blend_state, + p_dynamic_state: &buf.pipeline_dynamic_state, + layout: desc.layout.raw, + render_pass: desc.subpass.main_pass.raw, + subpass: desc.subpass.index as _, + base_pipeline_handle: base_handle, + base_pipeline_index: base_index, + } + }; + + let mut pipeline = vk::Pipeline::null(); + + match self.shared.raw.fp_v1_0().create_graphics_pipelines( + self.shared.raw.handle(), + cache.map_or(vk::PipelineCache::null(), |cache| cache.raw), + 1, + &info, + ptr::null(), + &mut pipeline, + ) { + vk::Result::SUCCESS => Ok(n::GraphicsPipeline(pipeline)), + vk::Result::ERROR_OUT_OF_HOST_MEMORY => Err(d::OutOfMemory::Host.into()), + vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => Err(d::OutOfMemory::Device.into()), + _ => Err(pso::CreationError::Other), + } + } + + unsafe fn create_graphics_pipelines<'a, T>( + &self, + descs: T, + cache: Option<&n::PipelineCache>, + ) -> Vec> + where + T: IntoIterator, + T::Item: Borrow>, + { + debug!("create_graphics_pipelines:"); + + let mut bufs: Pin> = descs + .into_iter() + .enumerate() + .inspect(|(idx, desc)| debug!("# {} {:?}", idx, desc.borrow())) + .map(|(_, desc)| (desc, GraphicsPipelineInfoBuf::default())) + .collect::>() + .into(); + + for (desc, buf) in bufs.as_mut().get_unchecked_mut() { + let desc: &T::Item = desc; + GraphicsPipelineInfoBuf::initialize(&mut Pin::new_unchecked(buf), self, desc.borrow()); + } + + let infos: Vec<_> = bufs + .iter() + .map(|(desc, buf)| { + let desc = desc.borrow(); + + let (base_handle, base_index) = match desc.parent { + pso::BasePipeline::Pipeline(pipeline) => (pipeline.0, -1), + pso::BasePipeline::Index(index) => (vk::Pipeline::null(), index as _), + pso::BasePipeline::None => (vk::Pipeline::null(), -1), + }; + + let mut flags = vk::PipelineCreateFlags::empty(); + match desc.parent { + pso::BasePipeline::None => (), + _ => { + flags |= vk::PipelineCreateFlags::DERIVATIVE; + } + } + if desc + .flags + .contains(pso::PipelineCreationFlags::DISABLE_OPTIMIZATION) + { + flags |= vk::PipelineCreateFlags::DISABLE_OPTIMIZATION; + } + if desc + .flags + .contains(pso::PipelineCreationFlags::ALLOW_DERIVATIVES) + { + flags |= vk::PipelineCreateFlags::ALLOW_DERIVATIVES; + } + + vk::GraphicsPipelineCreateInfo { + s_type: vk::StructureType::GRAPHICS_PIPELINE_CREATE_INFO, + p_next: ptr::null(), + flags, + stage_count: buf.stages.len() as _, + p_stages: buf.stages.as_ptr(), + p_vertex_input_state: &buf.vertex_input_state, + p_input_assembly_state: &buf.input_assembly_state, + p_rasterization_state: &buf.rasterization_state, + p_tessellation_state: match buf.tessellation_state.as_ref() { + Some(t) => t as _, + None => ptr::null(), + }, + p_viewport_state: &buf.viewport_state, + p_multisample_state: &buf.multisample_state, + p_depth_stencil_state: &buf.depth_stencil_state, + p_color_blend_state: &buf.color_blend_state, + p_dynamic_state: &buf.pipeline_dynamic_state, + layout: desc.layout.raw, + render_pass: desc.subpass.main_pass.raw, + subpass: desc.subpass.index as _, + base_pipeline_handle: base_handle, + base_pipeline_index: base_index, + } + }) + .collect(); + + let (pipelines, error) = if infos.is_empty() { + (Vec::new(), None) + } else { + match self.shared.raw.create_graphics_pipelines( + cache.map_or(vk::PipelineCache::null(), |cache| cache.raw), + &infos, + None, + ) { + Ok(pipelines) => (pipelines, None), + Err((pipelines, error)) => (pipelines, Some(error)), + } + }; + + pipelines + .into_iter() + .map(|pso| { + if pso == vk::Pipeline::null() { + match error { + Some(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => { + Err(d::OutOfMemory::Host.into()) + } + Some(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => { + Err(d::OutOfMemory::Device.into()) + } + _ => unreachable!(), + } + } else { + Ok(n::GraphicsPipeline(pso)) + } + }) + .collect() + } + + unsafe fn create_compute_pipeline<'a>( + &self, + desc: &pso::ComputePipelineDesc<'a, B>, + cache: Option<&n::PipelineCache>, + ) -> Result { + let mut buf = ComputePipelineInfoBuf::default(); + let mut buf = Pin::new(&mut buf); + ComputePipelineInfoBuf::initialize(&mut buf, desc); + + let info = { + let stage = vk::PipelineShaderStageCreateInfo { + s_type: vk::StructureType::PIPELINE_SHADER_STAGE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::PipelineShaderStageCreateFlags::empty(), + stage: vk::ShaderStageFlags::COMPUTE, + module: desc.shader.module.raw, + p_name: buf.c_string.as_ptr(), + p_specialization_info: &buf.specialization, + }; + + let (base_handle, base_index) = match desc.parent { + pso::BasePipeline::Pipeline(pipeline) => (pipeline.0, -1), + pso::BasePipeline::Index(index) => (vk::Pipeline::null(), index as _), + pso::BasePipeline::None => (vk::Pipeline::null(), -1), + }; + + let mut flags = vk::PipelineCreateFlags::empty(); + match desc.parent { + pso::BasePipeline::None => (), + _ => { + flags |= vk::PipelineCreateFlags::DERIVATIVE; + } + } + if desc + .flags + .contains(pso::PipelineCreationFlags::DISABLE_OPTIMIZATION) + { + flags |= vk::PipelineCreateFlags::DISABLE_OPTIMIZATION; + } + if desc + .flags + .contains(pso::PipelineCreationFlags::ALLOW_DERIVATIVES) + { + flags |= vk::PipelineCreateFlags::ALLOW_DERIVATIVES; + } + + vk::ComputePipelineCreateInfo { + s_type: vk::StructureType::COMPUTE_PIPELINE_CREATE_INFO, + p_next: ptr::null(), + flags, + stage, + layout: desc.layout.raw, + base_pipeline_handle: base_handle, + base_pipeline_index: base_index, + } + }; + + let mut pipeline = vk::Pipeline::null(); + + match self.shared.raw.fp_v1_0().create_compute_pipelines( + self.shared.raw.handle(), + cache.map_or(vk::PipelineCache::null(), |cache| cache.raw), + 1, + &info, + ptr::null(), + &mut pipeline, + ) { + vk::Result::SUCCESS => Ok(n::ComputePipeline(pipeline)), + vk::Result::ERROR_OUT_OF_HOST_MEMORY => Err(d::OutOfMemory::Host.into()), + vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => Err(d::OutOfMemory::Device.into()), + _ => Err(pso::CreationError::Other), + } + } + + unsafe fn create_compute_pipelines<'a, T>( + &self, + descs: T, + cache: Option<&n::PipelineCache>, + ) -> Vec> + where + T: IntoIterator, + T::Item: Borrow>, + { + let mut bufs: Pin> = descs + .into_iter() + .map(|desc| (desc, ComputePipelineInfoBuf::default())) + .collect::>() + .into(); + + for (desc, buf) in bufs.as_mut().get_unchecked_mut() { + let desc: &T::Item = desc; + ComputePipelineInfoBuf::initialize(&mut Pin::new_unchecked(buf), desc.borrow()); + } + + let infos: Vec<_> = bufs + .iter() + .map(|(desc, buf)| { + let desc = desc.borrow(); + + let stage = vk::PipelineShaderStageCreateInfo { + s_type: vk::StructureType::PIPELINE_SHADER_STAGE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::PipelineShaderStageCreateFlags::empty(), + stage: vk::ShaderStageFlags::COMPUTE, + module: desc.shader.module.raw, + p_name: buf.c_string.as_ptr(), + p_specialization_info: &buf.specialization, + }; + + let (base_handle, base_index) = match desc.parent { + pso::BasePipeline::Pipeline(pipeline) => (pipeline.0, -1), + pso::BasePipeline::Index(index) => (vk::Pipeline::null(), index as _), + pso::BasePipeline::None => (vk::Pipeline::null(), -1), + }; + + let mut flags = vk::PipelineCreateFlags::empty(); + match desc.parent { + pso::BasePipeline::None => (), + _ => { + flags |= vk::PipelineCreateFlags::DERIVATIVE; + } + } + if desc + .flags + .contains(pso::PipelineCreationFlags::DISABLE_OPTIMIZATION) + { + flags |= vk::PipelineCreateFlags::DISABLE_OPTIMIZATION; + } + if desc + .flags + .contains(pso::PipelineCreationFlags::ALLOW_DERIVATIVES) + { + flags |= vk::PipelineCreateFlags::ALLOW_DERIVATIVES; + } + + vk::ComputePipelineCreateInfo { + s_type: vk::StructureType::COMPUTE_PIPELINE_CREATE_INFO, + p_next: ptr::null(), + flags, + stage, + layout: desc.layout.raw, + base_pipeline_handle: base_handle, + base_pipeline_index: base_index, + } + }) + .collect(); + + let (pipelines, error) = if infos.is_empty() { + (Vec::new(), None) + } else { + match self.shared.raw.create_compute_pipelines( + cache.map_or(vk::PipelineCache::null(), |cache| cache.raw), + &infos, + None, + ) { + Ok(pipelines) => (pipelines, None), + Err((pipelines, error)) => (pipelines, Some(error)), + } + }; + + pipelines + .into_iter() + .map(|pso| { + if pso == vk::Pipeline::null() { + match error { + Some(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => { + Err(d::OutOfMemory::Host.into()) + } + Some(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => { + Err(d::OutOfMemory::Device.into()) + } + _ => unreachable!(), + } + } else { + Ok(n::ComputePipeline(pso)) + } + }) + .collect() + } + + unsafe fn create_framebuffer( + &self, + renderpass: &n::RenderPass, + attachments: T, + extent: image::Extent, + ) -> Result + where + T: IntoIterator, + T::Item: Borrow, + { + let mut framebuffers_ptr = None; + let mut raw_attachments = SmallVec::<[_; 4]>::new(); + for attachment in attachments { + let at = attachment.borrow(); + raw_attachments.push(at.view); + match at.owner { + n::ImageViewOwner::User => {} + n::ImageViewOwner::Surface(ref fbo_ptr) => { + framebuffers_ptr = Some(Arc::clone(&fbo_ptr.0)); + } + } + } + + let info = vk::FramebufferCreateInfo { + s_type: vk::StructureType::FRAMEBUFFER_CREATE_INFO, + p_next: ptr::null(), + flags: vk::FramebufferCreateFlags::empty(), + render_pass: renderpass.raw, + attachment_count: raw_attachments.len() as u32, + p_attachments: raw_attachments.as_ptr(), + width: extent.width, + height: extent.height, + layers: extent.depth, + }; + + let result = self.shared.raw.create_framebuffer(&info, None); + + match result { + Ok(raw) => Ok(n::Framebuffer { + raw, + owned: match framebuffers_ptr { + Some(fbo_ptr) => { + fbo_ptr.lock().unwrap().framebuffers.push(raw); + false + } + None => true, + }, + }), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device), + _ => unreachable!(), + } + } + + unsafe fn create_shader_module( + &self, + spirv_data: &[u32], + ) -> Result { + let info = vk::ShaderModuleCreateInfo { + s_type: vk::StructureType::SHADER_MODULE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::ShaderModuleCreateFlags::empty(), + code_size: spirv_data.len() * 4, + p_code: spirv_data.as_ptr(), + }; + + let module = self.shared.raw.create_shader_module(&info, None); + + match module { + Ok(raw) => Ok(n::ShaderModule { raw }), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + Err(_) => { + Err(d::ShaderError::CompilationFailed(String::new())) // TODO + } + } + } + + unsafe fn create_sampler( + &self, + desc: &image::SamplerDesc, + ) -> Result { + use hal::pso::Comparison; + + let (anisotropy_enable, max_anisotropy) = + desc.anisotropy_clamp.map_or((vk::FALSE, 1.0), |aniso| { + if self.shared.features.contains(Features::SAMPLER_ANISOTROPY) { + (vk::TRUE, aniso as f32) + } else { + warn!( + "Anisotropy({}) was requested on a device with disabled feature", + aniso + ); + (vk::FALSE, 1.0) + } + }); + let info = vk::SamplerCreateInfo { + s_type: vk::StructureType::SAMPLER_CREATE_INFO, + p_next: ptr::null(), + flags: vk::SamplerCreateFlags::empty(), + mag_filter: conv::map_filter(desc.mag_filter), + min_filter: conv::map_filter(desc.min_filter), + mipmap_mode: conv::map_mip_filter(desc.mip_filter), + address_mode_u: conv::map_wrap(desc.wrap_mode.0), + address_mode_v: conv::map_wrap(desc.wrap_mode.1), + address_mode_w: conv::map_wrap(desc.wrap_mode.2), + mip_lod_bias: desc.lod_bias.0, + anisotropy_enable, + max_anisotropy, + compare_enable: if desc.comparison.is_some() { + vk::TRUE + } else { + vk::FALSE + }, + compare_op: conv::map_comparison(desc.comparison.unwrap_or(Comparison::Never)), + min_lod: desc.lod_range.start.0, + max_lod: desc.lod_range.end.0, + border_color: match conv::map_border_color(desc.border) { + Some(bc) => bc, + None => { + error!("Unsupported border color {:x}", desc.border.0); + vk::BorderColor::FLOAT_TRANSPARENT_BLACK + } + }, + unnormalized_coordinates: if desc.normalized { vk::FALSE } else { vk::TRUE }, + }; + + let result = self.shared.raw.create_sampler(&info, None); + + match result { + Ok(sampler) => Ok(n::Sampler(sampler)), + Err(vk::Result::ERROR_TOO_MANY_OBJECTS) => Err(d::AllocationError::TooManyObjects), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + /// + unsafe fn create_buffer( + &self, + size: u64, + usage: buffer::Usage, + ) -> Result { + let info = vk::BufferCreateInfo { + s_type: vk::StructureType::BUFFER_CREATE_INFO, + p_next: ptr::null(), + flags: vk::BufferCreateFlags::empty(), // TODO: + size, + usage: conv::map_buffer_usage(usage), + sharing_mode: vk::SharingMode::EXCLUSIVE, // TODO: + queue_family_index_count: 0, + p_queue_family_indices: ptr::null(), + }; + + let result = self.shared.raw.create_buffer(&info, None); + + match result { + Ok(raw) => Ok(n::Buffer { raw }), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn get_buffer_requirements(&self, buffer: &n::Buffer) -> Requirements { + let req = self.shared.raw.get_buffer_memory_requirements(buffer.raw); + + Requirements { + size: req.size, + alignment: req.alignment, + type_mask: req.memory_type_bits as _, + } + } + + unsafe fn bind_buffer_memory( + &self, + memory: &n::Memory, + offset: u64, + buffer: &mut n::Buffer, + ) -> Result<(), d::BindError> { + let result = self + .shared + .raw + .bind_buffer_memory(buffer.raw, memory.raw, offset); + + match result { + Ok(()) => Ok(()), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn create_buffer_view( + &self, + buffer: &n::Buffer, + format: Option, + range: buffer::SubRange, + ) -> Result { + let info = vk::BufferViewCreateInfo { + s_type: vk::StructureType::BUFFER_VIEW_CREATE_INFO, + p_next: ptr::null(), + flags: vk::BufferViewCreateFlags::empty(), + buffer: buffer.raw, + format: format.map_or(vk::Format::UNDEFINED, conv::map_format), + offset: range.offset, + range: range.size.unwrap_or(vk::WHOLE_SIZE), + }; + + let result = self.shared.raw.create_buffer_view(&info, None); + + match result { + Ok(raw) => Ok(n::BufferView { raw }), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn create_image( + &self, + kind: image::Kind, + mip_levels: image::Level, + format: format::Format, + tiling: image::Tiling, + usage: image::Usage, + view_caps: image::ViewCapabilities, + ) -> Result { + let flags = conv::map_view_capabilities(view_caps); + let extent = conv::map_extent(kind.extent()); + let array_layers = kind.num_layers(); + let samples = kind.num_samples() as u32; + let image_type = match kind { + image::Kind::D1(..) => vk::ImageType::TYPE_1D, + image::Kind::D2(..) => vk::ImageType::TYPE_2D, + image::Kind::D3(..) => vk::ImageType::TYPE_3D, + }; + + let info = vk::ImageCreateInfo { + s_type: vk::StructureType::IMAGE_CREATE_INFO, + p_next: ptr::null(), + flags, + image_type, + format: conv::map_format(format), + extent: extent.clone(), + mip_levels: mip_levels as u32, + array_layers: array_layers as u32, + samples: vk::SampleCountFlags::from_raw(samples & vk::SampleCountFlags::all().as_raw()), + tiling: conv::map_tiling(tiling), + usage: conv::map_image_usage(usage), + sharing_mode: vk::SharingMode::EXCLUSIVE, // TODO: + queue_family_index_count: 0, + p_queue_family_indices: ptr::null(), + initial_layout: vk::ImageLayout::UNDEFINED, + }; + + let result = self.shared.raw.create_image(&info, None); + + match result { + Ok(raw) => Ok(n::Image { + raw, + ty: image_type, + flags, + extent, + }), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn get_image_requirements(&self, image: &n::Image) -> Requirements { + let req = self.shared.raw.get_image_memory_requirements(image.raw); + + Requirements { + size: req.size, + alignment: req.alignment, + type_mask: req.memory_type_bits as _, + } + } + + unsafe fn get_image_subresource_footprint( + &self, + image: &n::Image, + subresource: image::Subresource, + ) -> image::SubresourceFootprint { + let sub = conv::map_subresource(&subresource); + let layout = self.shared.raw.get_image_subresource_layout(image.raw, sub); + + image::SubresourceFootprint { + slice: layout.offset .. layout.offset + layout.size, + row_pitch: layout.row_pitch, + array_pitch: layout.array_pitch, + depth_pitch: layout.depth_pitch, + } + } + + unsafe fn bind_image_memory( + &self, + memory: &n::Memory, + offset: u64, + image: &mut n::Image, + ) -> Result<(), d::BindError> { + // TODO: error handling + // TODO: check required type + let result = self.shared.raw.bind_image_memory(image.raw, memory.raw, offset); + + match result { + Ok(()) => Ok(()), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn create_image_view( + &self, + image: &n::Image, + kind: image::ViewKind, + format: format::Format, + swizzle: format::Swizzle, + range: image::SubresourceRange, + ) -> Result { + let is_cube = image + .flags + .intersects(vk::ImageCreateFlags::CUBE_COMPATIBLE); + let info = vk::ImageViewCreateInfo { + s_type: vk::StructureType::IMAGE_VIEW_CREATE_INFO, + p_next: ptr::null(), + flags: vk::ImageViewCreateFlags::empty(), + image: image.raw, + view_type: match conv::map_view_kind(kind, image.ty, is_cube) { + Some(ty) => ty, + None => return Err(image::ViewCreationError::BadKind(kind)), + }, + format: conv::map_format(format), + components: conv::map_swizzle(swizzle), + subresource_range: conv::map_subresource_range(&range), + }; + + let result = self.shared.raw.create_image_view(&info, None); + + match result { + Ok(view) => Ok(n::ImageView { + image: image.raw, + view, + range, + owner: n::ImageViewOwner::User, + }), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn create_descriptor_pool( + &self, + max_sets: usize, + descriptor_pools: T, + flags: pso::DescriptorPoolCreateFlags, + ) -> Result + where + T: IntoIterator, + T::Item: Borrow, + { + let pools = descriptor_pools + .into_iter() + .map(|pool| { + let pool = pool.borrow(); + vk::DescriptorPoolSize { + ty: conv::map_descriptor_type(pool.ty), + descriptor_count: pool.count as u32, + } + }) + .collect::>(); + + let info = vk::DescriptorPoolCreateInfo { + s_type: vk::StructureType::DESCRIPTOR_POOL_CREATE_INFO, + p_next: ptr::null(), + flags: conv::map_descriptor_pool_create_flags(flags), + max_sets: max_sets as u32, + pool_size_count: pools.len() as u32, + p_pool_sizes: pools.as_ptr(), + }; + + let result = self.shared.raw.create_descriptor_pool(&info, None); + + match result { + Ok(pool) => Ok(n::DescriptorPool { + raw: pool, + device: self.shared.clone(), + set_free_vec: Vec::new(), + }), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn create_descriptor_set_layout( + &self, + binding_iter: I, + immutable_sampler_iter: J, + ) -> Result + where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow, + { + let immutable_samplers = immutable_sampler_iter + .into_iter() + .map(|is| is.borrow().0) + .collect::>(); + let mut sampler_offset = 0; + + let bindings = Arc::new( + binding_iter + .into_iter() + .map(|b| b.borrow().clone()) + .collect::>(), + ); + + let raw_bindings = bindings + .iter() + .map(|b| vk::DescriptorSetLayoutBinding { + binding: b.binding, + descriptor_type: conv::map_descriptor_type(b.ty), + descriptor_count: b.count as _, + stage_flags: conv::map_stage_flags(b.stage_flags), + p_immutable_samplers: if b.immutable_samplers { + let slice = &immutable_samplers[sampler_offset ..]; + sampler_offset += b.count; + slice.as_ptr() + } else { + ptr::null() + }, + }) + .collect::>(); + + debug!("create_descriptor_set_layout {:?}", raw_bindings); + + let info = vk::DescriptorSetLayoutCreateInfo { + s_type: vk::StructureType::DESCRIPTOR_SET_LAYOUT_CREATE_INFO, + p_next: ptr::null(), + flags: vk::DescriptorSetLayoutCreateFlags::empty(), + binding_count: raw_bindings.len() as _, + p_bindings: raw_bindings.as_ptr(), + }; + + let result = self.shared.raw.create_descriptor_set_layout(&info, None); + + match result { + Ok(layout) => Ok(n::DescriptorSetLayout { + raw: layout, + bindings, + }), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn write_descriptor_sets<'a, I, J>(&self, write_iter: I) + where + I: IntoIterator>, + J: IntoIterator, + J::Item: Borrow>, + { + let mut raw_writes = Vec::new(); + let mut image_infos = Vec::new(); + let mut buffer_infos = Vec::new(); + let mut texel_buffer_views = Vec::new(); + + for sw in write_iter { + let layout = sw + .set + .bindings + .iter() + .find(|lb| lb.binding == sw.binding) + .expect("Descriptor set writes don't match the set layout!"); + let mut raw = vk::WriteDescriptorSet { + s_type: vk::StructureType::WRITE_DESCRIPTOR_SET, + p_next: ptr::null(), + dst_set: sw.set.raw, + dst_binding: sw.binding, + dst_array_element: sw.array_offset as _, + descriptor_count: 0, + descriptor_type: conv::map_descriptor_type(layout.ty), + p_image_info: ptr::null(), + p_buffer_info: ptr::null(), + p_texel_buffer_view: ptr::null(), + }; + + for descriptor in sw.descriptors { + raw.descriptor_count += 1; + match *descriptor.borrow() { + pso::Descriptor::Sampler(sampler) => { + image_infos.push(vk::DescriptorImageInfo { + sampler: sampler.0, + image_view: vk::ImageView::null(), + image_layout: vk::ImageLayout::GENERAL, + }); + } + pso::Descriptor::Image(view, layout) => { + image_infos.push(vk::DescriptorImageInfo { + sampler: vk::Sampler::null(), + image_view: view.view, + image_layout: conv::map_image_layout(layout), + }); + } + pso::Descriptor::CombinedImageSampler(view, layout, sampler) => { + image_infos.push(vk::DescriptorImageInfo { + sampler: sampler.0, + image_view: view.view, + image_layout: conv::map_image_layout(layout), + }); + } + pso::Descriptor::Buffer(buffer, ref sub) => { + buffer_infos.push(vk::DescriptorBufferInfo { + buffer: buffer.raw, + offset: sub.offset, + range: sub.size.unwrap_or(vk::WHOLE_SIZE), + }); + } + pso::Descriptor::TexelBuffer(view) => { + texel_buffer_views.push(view.raw); + } + } + } + + raw.p_image_info = image_infos.len() as _; + raw.p_buffer_info = buffer_infos.len() as _; + raw.p_texel_buffer_view = texel_buffer_views.len() as _; + raw_writes.push(raw); + } + + // Patch the pointers now that we have all the storage allocated + for raw in &mut raw_writes { + use crate::vk::DescriptorType as Dt; + match raw.descriptor_type { + Dt::SAMPLER + | Dt::SAMPLED_IMAGE + | Dt::STORAGE_IMAGE + | Dt::COMBINED_IMAGE_SAMPLER + | Dt::INPUT_ATTACHMENT => { + raw.p_buffer_info = ptr::null(); + raw.p_texel_buffer_view = ptr::null(); + let base = raw.p_image_info as usize - raw.descriptor_count as usize; + raw.p_image_info = image_infos[base ..].as_ptr(); + } + Dt::UNIFORM_TEXEL_BUFFER | Dt::STORAGE_TEXEL_BUFFER => { + raw.p_buffer_info = ptr::null(); + raw.p_image_info = ptr::null(); + let base = raw.p_texel_buffer_view as usize - raw.descriptor_count as usize; + raw.p_texel_buffer_view = texel_buffer_views[base ..].as_ptr(); + } + Dt::UNIFORM_BUFFER + | Dt::STORAGE_BUFFER + | Dt::STORAGE_BUFFER_DYNAMIC + | Dt::UNIFORM_BUFFER_DYNAMIC => { + raw.p_image_info = ptr::null(); + raw.p_texel_buffer_view = ptr::null(); + let base = raw.p_buffer_info as usize - raw.descriptor_count as usize; + raw.p_buffer_info = buffer_infos[base ..].as_ptr(); + } + _ => panic!("unknown descriptor type"), + } + } + + self.shared.raw.update_descriptor_sets(&raw_writes, &[]); + } + + unsafe fn copy_descriptor_sets<'a, I>(&self, copies: I) + where + I: IntoIterator, + I::Item: Borrow>, + { + let copies = copies + .into_iter() + .map(|copy| { + let c = copy.borrow(); + vk::CopyDescriptorSet { + s_type: vk::StructureType::COPY_DESCRIPTOR_SET, + p_next: ptr::null(), + src_set: c.src_set.raw, + src_binding: c.src_binding as u32, + src_array_element: c.src_array_offset as u32, + dst_set: c.dst_set.raw, + dst_binding: c.dst_binding as u32, + dst_array_element: c.dst_array_offset as u32, + descriptor_count: c.count as u32, + } + }) + .collect::>(); + + self.shared.raw.update_descriptor_sets(&[], &copies); + } + + unsafe fn map_memory( + &self, + memory: &n::Memory, + segment: Segment, + ) -> Result<*mut u8, d::MapError> { + let result = self.shared.raw.map_memory( + memory.raw, + segment.offset, + segment.size.unwrap_or(vk::WHOLE_SIZE), + vk::MemoryMapFlags::empty(), + ); + + match result { + Ok(ptr) => Ok(ptr as *mut _), + Err(vk::Result::ERROR_MEMORY_MAP_FAILED) => Err(d::MapError::MappingFailed), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn unmap_memory(&self, memory: &n::Memory) { + self.shared.raw.unmap_memory(memory.raw) + } + + unsafe fn flush_mapped_memory_ranges<'a, I>(&self, ranges: I) -> Result<(), d::OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow<(&'a n::Memory, Segment)>, + { + let ranges = conv::map_memory_ranges(ranges); + let result = self.shared.raw.flush_mapped_memory_ranges(&ranges); + + match result { + Ok(()) => Ok(()), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device), + _ => unreachable!(), + } + } + + unsafe fn invalidate_mapped_memory_ranges<'a, I>(&self, ranges: I) -> Result<(), d::OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow<(&'a n::Memory, Segment)>, + { + let ranges = conv::map_memory_ranges(ranges); + let result = self.shared.raw.invalidate_mapped_memory_ranges(&ranges); + + match result { + Ok(()) => Ok(()), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device), + _ => unreachable!(), + } + } + + fn create_semaphore(&self) -> Result { + let info = vk::SemaphoreCreateInfo { + s_type: vk::StructureType::SEMAPHORE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::SemaphoreCreateFlags::empty(), + }; + + let result = unsafe { self.shared.raw.create_semaphore(&info, None) }; + + match result { + Ok(semaphore) => Ok(n::Semaphore(semaphore)), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + fn create_fence(&self, signaled: bool) -> Result { + let info = vk::FenceCreateInfo { + s_type: vk::StructureType::FENCE_CREATE_INFO, + p_next: ptr::null(), + flags: if signaled { + vk::FenceCreateFlags::SIGNALED + } else { + vk::FenceCreateFlags::empty() + }, + }; + + let result = unsafe { self.shared.raw.create_fence(&info, None) }; + + match result { + Ok(fence) => Ok(n::Fence(fence)), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn reset_fences(&self, fences: I) -> Result<(), d::OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow, + { + let fences = fences + .into_iter() + .map(|fence| fence.borrow().0) + .collect::>(); + let result = self.shared.raw.reset_fences(&fences); + + match result { + Ok(()) => Ok(()), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn wait_for_fences( + &self, + fences: I, + wait: d::WaitFor, + timeout_ns: u64, + ) -> Result + where + I: IntoIterator, + I::Item: Borrow, + { + let fences = fences + .into_iter() + .map(|fence| fence.borrow().0) + .collect::>(); + let all = match wait { + d::WaitFor::Any => false, + d::WaitFor::All => true, + }; + let result = self.shared.raw.wait_for_fences(&fences, all, timeout_ns); + match result { + Ok(()) => Ok(true), + Err(vk::Result::TIMEOUT) => Ok(false), + Err(vk::Result::ERROR_DEVICE_LOST) => Err(d::DeviceLost.into()), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn get_fence_status(&self, fence: &n::Fence) -> Result { + let result = self.shared.raw.get_fence_status(fence.0); + match result { + Ok(ok) => Ok(ok), + Err(vk::Result::NOT_READY) => Ok(false), //TODO: shouldn't be needed + Err(vk::Result::ERROR_DEVICE_LOST) => Err(d::DeviceLost), + _ => unreachable!(), + } + } + + fn create_event(&self) -> Result { + let info = vk::EventCreateInfo { + s_type: vk::StructureType::EVENT_CREATE_INFO, + p_next: ptr::null(), + flags: vk::EventCreateFlags::empty(), + }; + + let result = unsafe { self.shared.raw.create_event(&info, None) }; + match result { + Ok(e) => Ok(n::Event(e)), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn get_event_status(&self, event: &n::Event) -> Result { + let result = self.shared.raw.get_event_status(event.0); + match result { + Ok(b) => Ok(b), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + Err(vk::Result::ERROR_DEVICE_LOST) => Err(d::DeviceLost.into()), + _ => unreachable!(), + } + } + + unsafe fn set_event(&self, event: &n::Event) -> Result<(), d::OutOfMemory> { + let result = self.shared.raw.set_event(event.0); + match result { + Ok(()) => Ok(()), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn reset_event(&self, event: &n::Event) -> Result<(), d::OutOfMemory> { + let result = self.shared.raw.reset_event(event.0); + match result { + Ok(()) => Ok(()), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn free_memory(&self, memory: n::Memory) { + self.shared.raw.free_memory(memory.raw, None); + } + + unsafe fn create_query_pool( + &self, + ty: query::Type, + query_count: query::Id, + ) -> Result { + let (query_type, pipeline_statistics) = match ty { + query::Type::Occlusion => ( + vk::QueryType::OCCLUSION, + vk::QueryPipelineStatisticFlags::empty(), + ), + query::Type::PipelineStatistics(statistics) => ( + vk::QueryType::PIPELINE_STATISTICS, + conv::map_pipeline_statistics(statistics), + ), + query::Type::Timestamp => ( + vk::QueryType::TIMESTAMP, + vk::QueryPipelineStatisticFlags::empty(), + ), + }; + + let info = vk::QueryPoolCreateInfo { + s_type: vk::StructureType::QUERY_POOL_CREATE_INFO, + p_next: ptr::null(), + flags: vk::QueryPoolCreateFlags::empty(), + query_type, + query_count, + pipeline_statistics, + }; + + let result = self.shared.raw.create_query_pool(&info, None); + + match result { + Ok(pool) => Ok(n::QueryPool(pool)), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn get_query_pool_results( + &self, + pool: &n::QueryPool, + queries: Range, + data: &mut [u8], + stride: buffer::Offset, + flags: query::ResultFlags, + ) -> Result { + let result = self.shared.raw.fp_v1_0().get_query_pool_results( + self.shared.raw.handle(), + pool.0, + queries.start, + queries.end - queries.start, + data.len(), + data.as_mut_ptr() as *mut _, + stride, + conv::map_query_result_flags(flags), + ); + + match result { + vk::Result::SUCCESS => Ok(true), + vk::Result::NOT_READY => Ok(false), + vk::Result::ERROR_DEVICE_LOST => Err(d::DeviceLost.into()), + vk::Result::ERROR_OUT_OF_HOST_MEMORY => Err(d::OutOfMemory::Host.into()), + vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => Err(d::OutOfMemory::Device.into()), + _ => unreachable!(), + } + } + + unsafe fn create_swapchain( + &self, + surface: &mut w::Surface, + config: SwapchainConfig, + provided_old_swapchain: Option, + ) -> Result<(w::Swapchain, Vec), hal::window::CreationError> { + let functor = khr::Swapchain::new(&surface.raw.instance.0, &self.shared.raw); + + let old_swapchain = match provided_old_swapchain { + Some(osc) => osc.raw, + None => vk::SwapchainKHR::null(), + }; + + let info = vk::SwapchainCreateInfoKHR { + s_type: vk::StructureType::SWAPCHAIN_CREATE_INFO_KHR, + p_next: ptr::null(), + flags: vk::SwapchainCreateFlagsKHR::empty(), + surface: surface.raw.handle, + min_image_count: config.image_count, + image_format: conv::map_format(config.format), + image_color_space: vk::ColorSpaceKHR::SRGB_NONLINEAR, + image_extent: vk::Extent2D { + width: config.extent.width, + height: config.extent.height, + }, + image_array_layers: 1, + image_usage: conv::map_image_usage(config.image_usage), + image_sharing_mode: vk::SharingMode::EXCLUSIVE, + queue_family_index_count: 0, + p_queue_family_indices: ptr::null(), + pre_transform: vk::SurfaceTransformFlagsKHR::IDENTITY, + composite_alpha: conv::map_composite_alpha_mode(config.composite_alpha_mode), + present_mode: conv::map_present_mode(config.present_mode), + clipped: 1, + old_swapchain, + }; + + let result = functor.create_swapchain(&info, None); + + if old_swapchain != vk::SwapchainKHR::null() { + functor.destroy_swapchain(old_swapchain, None) + } + + let swapchain_raw = match result { + Ok(swapchain_raw) => swapchain_raw, + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => { + return Err(d::OutOfMemory::Host.into()); + } + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => { + return Err(d::OutOfMemory::Device.into()); + } + Err(vk::Result::ERROR_DEVICE_LOST) => return Err(d::DeviceLost.into()), + Err(vk::Result::ERROR_SURFACE_LOST_KHR) => return Err(d::SurfaceLost.into()), + Err(vk::Result::ERROR_NATIVE_WINDOW_IN_USE_KHR) => return Err(d::WindowInUse.into()), + _ => unreachable!("Unexpected result - driver bug? {:?}", result), + }; + + let result = functor.get_swapchain_images(swapchain_raw); + + let backbuffer_images = match result { + Ok(backbuffer_images) => backbuffer_images, + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => { + return Err(d::OutOfMemory::Host.into()); + } + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => { + return Err(d::OutOfMemory::Device.into()); + } + _ => unreachable!(), + }; + + let swapchain = w::Swapchain { + raw: swapchain_raw, + functor, + vendor_id: self.vendor_id, + }; + + let images = backbuffer_images + .into_iter() + .map(|image| n::Image { + raw: image, + ty: vk::ImageType::TYPE_2D, + flags: vk::ImageCreateFlags::empty(), + extent: vk::Extent3D { + width: config.extent.width, + height: config.extent.height, + depth: 1, + }, + }) + .collect(); + + Ok((swapchain, images)) + } + + unsafe fn destroy_swapchain(&self, swapchain: w::Swapchain) { + swapchain.functor.destroy_swapchain(swapchain.raw, None); + } + + unsafe fn destroy_query_pool(&self, pool: n::QueryPool) { + self.shared.raw.destroy_query_pool(pool.0, None); + } + + unsafe fn destroy_shader_module(&self, module: n::ShaderModule) { + self.shared.raw.destroy_shader_module(module.raw, None); + } + + unsafe fn destroy_render_pass(&self, rp: n::RenderPass) { + self.shared.raw.destroy_render_pass(rp.raw, None); + } + + unsafe fn destroy_pipeline_layout(&self, pl: n::PipelineLayout) { + self.shared.raw.destroy_pipeline_layout(pl.raw, None); + } + + unsafe fn destroy_graphics_pipeline(&self, pipeline: n::GraphicsPipeline) { + self.shared.raw.destroy_pipeline(pipeline.0, None); + } + + unsafe fn destroy_compute_pipeline(&self, pipeline: n::ComputePipeline) { + self.shared.raw.destroy_pipeline(pipeline.0, None); + } + + unsafe fn destroy_framebuffer(&self, fb: n::Framebuffer) { + if fb.owned { + self.shared.raw.destroy_framebuffer(fb.raw, None); + } + } + + unsafe fn destroy_buffer(&self, buffer: n::Buffer) { + self.shared.raw.destroy_buffer(buffer.raw, None); + } + + unsafe fn destroy_buffer_view(&self, view: n::BufferView) { + self.shared.raw.destroy_buffer_view(view.raw, None); + } + + unsafe fn destroy_image(&self, image: n::Image) { + self.shared.raw.destroy_image(image.raw, None); + } + + unsafe fn destroy_image_view(&self, view: n::ImageView) { + match view.owner { + n::ImageViewOwner::User => { + self.shared.raw.destroy_image_view(view.view, None); + } + n::ImageViewOwner::Surface(_fbo_cache) => { + //TODO: mark as deleted? + } + } + } + + unsafe fn destroy_sampler(&self, sampler: n::Sampler) { + self.shared.raw.destroy_sampler(sampler.0, None); + } + + unsafe fn destroy_descriptor_pool(&self, pool: n::DescriptorPool) { + self.shared.raw.destroy_descriptor_pool(pool.raw, None); + } + + unsafe fn destroy_descriptor_set_layout(&self, layout: n::DescriptorSetLayout) { + self.shared.raw.destroy_descriptor_set_layout(layout.raw, None); + } + + unsafe fn destroy_fence(&self, fence: n::Fence) { + self.shared.raw.destroy_fence(fence.0, None); + } + + unsafe fn destroy_semaphore(&self, semaphore: n::Semaphore) { + self.shared.raw.destroy_semaphore(semaphore.0, None); + } + + unsafe fn destroy_event(&self, event: n::Event) { + self.shared.raw.destroy_event(event.0, None); + } + + fn wait_idle(&self) -> Result<(), d::OutOfMemory> { + match unsafe { self.shared.raw.device_wait_idle() } { + Ok(()) => Ok(()), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device), + _ => unreachable!(), + } + } + + unsafe fn set_image_name(&self, image: &mut n::Image, name: &str) { + self.set_object_name(vk::ObjectType::IMAGE, image.raw.as_raw(), name) + } + + unsafe fn set_buffer_name(&self, buffer: &mut n::Buffer, name: &str) { + self.set_object_name(vk::ObjectType::BUFFER, buffer.raw.as_raw(), name) + } + + unsafe fn set_command_buffer_name(&self, command_buffer: &mut cmd::CommandBuffer, name: &str) { + self.set_object_name( + vk::ObjectType::COMMAND_BUFFER, + command_buffer.raw.as_raw(), + name, + ) + } + + unsafe fn set_semaphore_name(&self, semaphore: &mut n::Semaphore, name: &str) { + self.set_object_name(vk::ObjectType::SEMAPHORE, semaphore.0.as_raw(), name) + } + + unsafe fn set_fence_name(&self, fence: &mut n::Fence, name: &str) { + self.set_object_name(vk::ObjectType::FENCE, fence.0.as_raw(), name) + } + + unsafe fn set_framebuffer_name(&self, framebuffer: &mut n::Framebuffer, name: &str) { + self.set_object_name(vk::ObjectType::FRAMEBUFFER, framebuffer.raw.as_raw(), name) + } + + unsafe fn set_render_pass_name(&self, render_pass: &mut n::RenderPass, name: &str) { + self.set_object_name(vk::ObjectType::RENDER_PASS, render_pass.raw.as_raw(), name) + } + + unsafe fn set_descriptor_set_name(&self, descriptor_set: &mut n::DescriptorSet, name: &str) { + self.set_object_name( + vk::ObjectType::DESCRIPTOR_SET, + descriptor_set.raw.as_raw(), + name, + ) + } + + unsafe fn set_descriptor_set_layout_name( + &self, + descriptor_set_layout: &mut n::DescriptorSetLayout, + name: &str, + ) { + self.set_object_name( + vk::ObjectType::DESCRIPTOR_SET_LAYOUT, + descriptor_set_layout.raw.as_raw(), + name, + ) + } +} + +impl Device { + unsafe fn set_object_name(&self, object_type: vk::ObjectType, object_handle: u64, name: &str) { + let instance = &self.shared.instance; + if let Some(DebugMessenger::Utils(ref debug_utils_ext, _)) = instance.1 { + // Append a null terminator to the string while avoiding allocating memory + static mut NAME_BUF: [u8; 64] = [0u8; 64]; + std::ptr::copy_nonoverlapping( + name.as_ptr(), + &mut NAME_BUF[0], + name.len().min(NAME_BUF.len()), + ); + NAME_BUF[name.len()] = 0; + let _result = debug_utils_ext.debug_utils_set_object_name( + self.shared.raw.handle(), + &vk::DebugUtilsObjectNameInfoEXT { + s_type: vk::StructureType::DEBUG_UTILS_OBJECT_NAME_INFO_EXT, + p_next: std::ptr::null_mut(), + object_type, + object_handle, + p_object_name: NAME_BUF.as_ptr() as *mut _, + }, + ); + } + } +} + +#[test] +fn test_send_sync() { + fn foo() {} + foo::() +} diff --git a/third_party/rust/gfx-backend-vulkan/src/info.rs b/third_party/rust/gfx-backend-vulkan/src/info.rs index 1e02a6f962ac..aa5d01f79b2b 100644 --- a/third_party/rust/gfx-backend-vulkan/src/info.rs +++ b/third_party/rust/gfx-backend-vulkan/src/info.rs @@ -1,5 +1,5 @@ -pub mod intel { - pub const VENDOR: u32 = 0x8086; - pub const DEVICE_KABY_LAKE_MASK: u32 = 0x5900; - pub const DEVICE_SKY_LAKE_MASK: u32 = 0x1900; -} +pub mod intel { + pub const VENDOR: u32 = 0x8086; + pub const DEVICE_KABY_LAKE_MASK: u32 = 0x5900; + pub const DEVICE_SKY_LAKE_MASK: u32 = 0x1900; +} diff --git a/third_party/rust/gfx-backend-vulkan/src/lib.rs b/third_party/rust/gfx-backend-vulkan/src/lib.rs index 0b29a7a7fe00..152c14add9e6 100644 --- a/third_party/rust/gfx-backend-vulkan/src/lib.rs +++ b/third_party/rust/gfx-backend-vulkan/src/lib.rs @@ -1,1435 +1,1511 @@ -#![allow(non_snake_case)] - -#[macro_use] -extern crate log; -#[macro_use] -extern crate ash; -#[macro_use] -extern crate lazy_static; - -#[cfg(target_os = "macos")] -#[macro_use] -extern crate objc; - -use ash::extensions::{ - self, - ext::{DebugReport, DebugUtils}, -}; -use ash::version::{DeviceV1_0, EntryV1_0, InstanceV1_0}; -use ash::vk; -#[cfg(not(feature = "use-rtld-next"))] -use ash::{Entry, LoadingError}; - -use hal::{ - adapter, - device::{CreationError as DeviceCreationError, DeviceLost, OutOfMemory, SurfaceLost}, - format, - image, - memory, - pso::{PatchSize, PipelineStage}, - queue, - window::{PresentError, Suboptimal, SwapImageIndex}, - Features, - Limits, -}; - -use std::borrow::{Borrow, Cow}; -use std::ffi::{CStr, CString}; -use std::sync::Arc; -use std::{fmt, mem, ptr, slice}; - -#[cfg(feature = "use-rtld-next")] -use ash::{EntryCustom, LoadingError}; -#[cfg(feature = "use-rtld-next")] -use shared_library::dynamic_library::{DynamicLibrary, SpecialHandles}; - -mod command; -mod conv; -mod device; -mod info; -mod native; -mod pool; -mod window; - -// CStr's cannot be constant yet, until const fn lands we need to use a lazy_static -lazy_static! { - static ref LAYERS: Vec<&'static CStr> = if cfg!(all(target_os = "android", debug_assertions)) { - vec![ - CStr::from_bytes_with_nul(b"VK_LAYER_LUNARG_core_validation\0").unwrap(), - CStr::from_bytes_with_nul(b"VK_LAYER_LUNARG_object_tracker\0").unwrap(), - CStr::from_bytes_with_nul(b"VK_LAYER_LUNARG_parameter_validation\0").unwrap(), - CStr::from_bytes_with_nul(b"VK_LAYER_GOOGLE_threading\0").unwrap(), - CStr::from_bytes_with_nul(b"VK_LAYER_GOOGLE_unique_objects\0").unwrap(), - ] - } else if cfg!(debug_assertions) { - vec![CStr::from_bytes_with_nul(b"VK_LAYER_LUNARG_standard_validation\0").unwrap()] - } else { - vec![] - }; - static ref EXTENSIONS: Vec<&'static CStr> = if cfg!(debug_assertions) { - vec![ - DebugUtils::name(), - DebugReport::name(), - ] - } else { - vec![] - }; - static ref DEVICE_EXTENSIONS: Vec<&'static CStr> = vec![extensions::khr::Swapchain::name()]; - static ref SURFACE_EXTENSIONS: Vec<&'static CStr> = vec![ - extensions::khr::Surface::name(), - // Platform-specific WSI extensions - #[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))] - extensions::khr::XlibSurface::name(), - #[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))] - extensions::khr::XcbSurface::name(), - #[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))] - extensions::khr::WaylandSurface::name(), - #[cfg(target_os = "android")] - extensions::khr::AndroidSurface::name(), - #[cfg(target_os = "windows")] - extensions::khr::Win32Surface::name(), - #[cfg(target_os = "macos")] - extensions::mvk::MacOSSurface::name(), - ]; -} - -#[cfg(not(feature = "use-rtld-next"))] -lazy_static! { - // Entry function pointers - pub static ref VK_ENTRY: Result = Entry::new(); -} - -#[cfg(feature = "use-rtld-next")] -lazy_static! { - // Entry function pointers - pub static ref VK_ENTRY: Result, LoadingError> - = EntryCustom::new_custom( - || Ok(()), - |_, name| unsafe { - DynamicLibrary::symbol_special(SpecialHandles::Next, &*name.to_string_lossy()) - .unwrap_or(ptr::null_mut()) - } - ); -} - -pub struct RawInstance(pub ash::Instance, Option); - -pub enum DebugMessenger { - Utils(DebugUtils, vk::DebugUtilsMessengerEXT), - Report(DebugReport, vk::DebugReportCallbackEXT), -} - -impl Drop for RawInstance { - fn drop(&mut self) { - unsafe { - #[cfg(debug_assertions)] - { - match self.1 { - Some(DebugMessenger::Utils(ref ext, callback)) => { - ext.destroy_debug_utils_messenger(callback, None) - } - Some(DebugMessenger::Report(ref ext, callback)) => { - ext.destroy_debug_report_callback(callback, None) - } - None => {} - } - } - - self.0.destroy_instance(None); - } - } -} - -pub struct Instance { - pub raw: Arc, - - /// Supported extensions of this instance. - pub extensions: Vec<&'static CStr>, -} - -impl fmt::Debug for Instance { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str("Instance") - } -} - -fn map_queue_type(flags: vk::QueueFlags) -> queue::QueueType { - if flags.contains(vk::QueueFlags::GRAPHICS | vk::QueueFlags::COMPUTE) { - // TRANSFER_BIT optional - queue::QueueType::General - } else if flags.contains(vk::QueueFlags::GRAPHICS) { - // TRANSFER_BIT optional - queue::QueueType::Graphics - } else if flags.contains(vk::QueueFlags::COMPUTE) { - // TRANSFER_BIT optional - queue::QueueType::Compute - } else if flags.contains(vk::QueueFlags::TRANSFER) { - queue::QueueType::Transfer - } else { - // TODO: present only queues? - unimplemented!() - } -} - -unsafe fn display_debug_utils_label_ext( - label_structs: *mut vk::DebugUtilsLabelEXT, - count: usize, -) -> Option { - if count == 0 { - return None; - } - - Some( - slice::from_raw_parts::(label_structs, count) - .iter() - .flat_map(|dul_obj| { - dul_obj - .p_label_name - .as_ref() - .map(|lbl| CStr::from_ptr(lbl).to_string_lossy().into_owned()) - }) - .collect::>() - .join(", "), - ) -} - -unsafe fn display_debug_utils_object_name_info_ext( - info_structs: *mut vk::DebugUtilsObjectNameInfoEXT, - count: usize, -) -> Option { - if count == 0 { - return None; - } - - //TODO: use color field of vk::DebugUtilsLabelsExt in a meaningful way? - Some( - slice::from_raw_parts::(info_structs, count) - .iter() - .map(|obj_info| { - let object_name = obj_info - .p_object_name - .as_ref() - .map(|name| CStr::from_ptr(name).to_string_lossy().into_owned()); - - match object_name { - Some(name) => format!( - "(type: {:?}, hndl: {}, name: {})", - obj_info.object_type, - &obj_info.object_handle.to_string(), - name - ), - None => format!( - "(type: {:?}, hndl: {})", - obj_info.object_type, - &obj_info.object_handle.to_string() - ), - } - }) - .collect::>() - .join(", "), - ) -} - -unsafe extern "system" fn debug_utils_messenger_callback( - message_severity: vk::DebugUtilsMessageSeverityFlagsEXT, - message_type: vk::DebugUtilsMessageTypeFlagsEXT, - p_callback_data: *const vk::DebugUtilsMessengerCallbackDataEXT, - _user_data: *mut std::os::raw::c_void, -) -> vk::Bool32 { - let callback_data = *p_callback_data; - - let message_severity = match message_severity { - vk::DebugUtilsMessageSeverityFlagsEXT::ERROR => log::Level::Error, - vk::DebugUtilsMessageSeverityFlagsEXT::WARNING => log::Level::Warn, - vk::DebugUtilsMessageSeverityFlagsEXT::INFO => log::Level::Info, - vk::DebugUtilsMessageSeverityFlagsEXT::VERBOSE => log::Level::Trace, - _ => log::Level::Warn, - }; - let message_type = &format!("{:?}", message_type); - let message_id_number: i32 = callback_data.message_id_number as i32; - - let message_id_name = if callback_data.p_message_id_name.is_null() { - Cow::from("") - } else { - CStr::from_ptr(callback_data.p_message_id_name).to_string_lossy() - }; - - let message = if callback_data.p_message.is_null() { - Cow::from("") - } else { - CStr::from_ptr(callback_data.p_message).to_string_lossy() - }; - - let additional_info: [(&str, Option); 3] = [ - ( - "queue info", - display_debug_utils_label_ext( - callback_data.p_queue_labels as *mut _, - callback_data.queue_label_count as usize, - ), - ), - ( - "cmd buf info", - display_debug_utils_label_ext( - callback_data.p_cmd_buf_labels as *mut _, - callback_data.cmd_buf_label_count as usize, - ), - ), - ( - "object info", - display_debug_utils_object_name_info_ext( - callback_data.p_objects as *mut _, - callback_data.object_count as usize, - ), - ), - ]; - - log!(message_severity, "{}\n", { - let mut msg = format!( - "\n{} [{} ({})] : {}", - message_type, - message_id_name, - &message_id_number.to_string(), - message - ); - - for (info_label, info) in additional_info.iter() { - match info { - Some(data) => { - msg = format!("{}\n{}: {}", msg, info_label, data); - } - None => {} - } - } - - msg - }); - - vk::FALSE -} - -unsafe extern "system" fn debug_report_callback( - type_: vk::DebugReportFlagsEXT, - _: vk::DebugReportObjectTypeEXT, - _object: u64, - _location: usize, - _msg_code: i32, - layer_prefix: *const std::os::raw::c_char, - description: *const std::os::raw::c_char, - _user_data: *mut std::os::raw::c_void, -) -> vk::Bool32 { - let level = match type_ { - vk::DebugReportFlagsEXT::ERROR => log::Level::Error, - vk::DebugReportFlagsEXT::WARNING => log::Level::Warn, - vk::DebugReportFlagsEXT::INFORMATION => log::Level::Info, - vk::DebugReportFlagsEXT::DEBUG => log::Level::Debug, - _ => log::Level::Warn, - }; - - let layer_prefix = CStr::from_ptr(layer_prefix).to_str().unwrap(); - let description = CStr::from_ptr(description).to_str().unwrap(); - log!(level, "[{}] {}", layer_prefix, description); - vk::FALSE -} - -impl hal::Instance for Instance { - fn create(name: &str, version: u32) -> Result { - // TODO: return errors instead of panic - let entry = VK_ENTRY.as_ref().map_err(|e| { - info!("Missing Vulkan entry points: {:?}", e); - hal::UnsupportedBackend - })?; - - let app_name = CString::new(name).unwrap(); - let app_info = vk::ApplicationInfo { - s_type: vk::StructureType::APPLICATION_INFO, - p_next: ptr::null(), - p_application_name: app_name.as_ptr(), - application_version: version, - p_engine_name: b"gfx-rs\0".as_ptr() as *const _, - engine_version: 1, - api_version: vk_make_version!(1, 0, 0), - }; - - let instance_extensions = entry - .enumerate_instance_extension_properties() - .expect("Unable to enumerate instance extensions"); - - let instance_layers = entry - .enumerate_instance_layer_properties() - .expect("Unable to enumerate instance layers"); - - // Check our extensions against the available extensions - let extensions = SURFACE_EXTENSIONS - .iter() - .chain(EXTENSIONS.iter()) - .filter_map(|&ext| { - instance_extensions - .iter() - .find(|inst_ext| unsafe { - CStr::from_ptr(inst_ext.extension_name.as_ptr()).to_bytes() - == ext.to_bytes() - }) - .map(|_| ext) - .or_else(|| { - warn!("Unable to find extension: {}", ext.to_string_lossy()); - None - }) - }) - .collect::>(); - - // Check requested layers against the available layers - let layers = LAYERS - .iter() - .filter_map(|&layer| { - instance_layers - .iter() - .find(|inst_layer| unsafe { - CStr::from_ptr(inst_layer.layer_name.as_ptr()).to_bytes() - == layer.to_bytes() - }) - .map(|_| layer) - .or_else(|| { - warn!("Unable to find layer: {}", layer.to_string_lossy()); - None - }) - }) - .collect::>(); - - let instance = { - let cstrings = layers - .iter() - .chain(extensions.iter()) - .map(|&s| CString::from(s)) - .collect::>(); - - let str_pointers = cstrings.iter().map(|s| s.as_ptr()).collect::>(); - - let create_info = vk::InstanceCreateInfo { - s_type: vk::StructureType::INSTANCE_CREATE_INFO, - p_next: ptr::null(), - flags: vk::InstanceCreateFlags::empty(), - p_application_info: &app_info, - enabled_layer_count: layers.len() as _, - pp_enabled_layer_names: str_pointers.as_ptr(), - enabled_extension_count: extensions.len() as _, - pp_enabled_extension_names: str_pointers[layers.len() ..].as_ptr(), - }; - - unsafe { entry.create_instance(&create_info, None) }.map_err(|e| { - warn!("Unable to create Vulkan instance: {:?}", e); - hal::UnsupportedBackend - })? - }; - - #[cfg(debug_assertions)] - let debug_messenger = { - // make sure VK_EXT_debug_utils is available - if instance_extensions.iter().any(|props| unsafe { - CStr::from_ptr(props.extension_name.as_ptr()) == DebugUtils::name() - }) { - let ext = DebugUtils::new(entry, &instance); - let info = vk::DebugUtilsMessengerCreateInfoEXT { - s_type: vk::StructureType::DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT, - p_next: ptr::null(), - flags: vk::DebugUtilsMessengerCreateFlagsEXT::empty(), - message_severity: vk::DebugUtilsMessageSeverityFlagsEXT::all(), - message_type: vk::DebugUtilsMessageTypeFlagsEXT::all(), - pfn_user_callback: Some(debug_utils_messenger_callback), - p_user_data: ptr::null_mut(), - }; - let handle = unsafe { ext.create_debug_utils_messenger(&info, None) }.unwrap(); - Some(DebugMessenger::Utils(ext, handle)) - } else if instance_extensions.iter().any(|props| unsafe { - CStr::from_ptr(props.extension_name.as_ptr()) == DebugReport::name() - }) { - let ext = DebugReport::new(entry, &instance); - let info = vk::DebugReportCallbackCreateInfoEXT { - s_type: vk::StructureType::DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT, - p_next: ptr::null(), - flags: vk::DebugReportFlagsEXT::all(), - pfn_callback: Some(debug_report_callback), - p_user_data: ptr::null_mut(), - }; - let handle = unsafe { ext.create_debug_report_callback(&info, None) }.unwrap(); - Some(DebugMessenger::Report(ext, handle)) - } else { - None - } - }; - #[cfg(not(debug_assertions))] - let debug_messenger = None; - - Ok(Instance { - raw: Arc::new(RawInstance(instance, debug_messenger)), - extensions, - }) - } - - fn enumerate_adapters(&self) -> Vec> { - let devices = match unsafe { self.raw.0.enumerate_physical_devices() } { - Ok(devices) => devices, - Err(err) => { - error!("Could not enumerate physical devices! {}", err); - vec![] - } - }; - - devices - .into_iter() - .map(|device| { - let properties = unsafe { self.raw.0.get_physical_device_properties(device) }; - let info = adapter::AdapterInfo { - name: unsafe { - CStr::from_ptr(properties.device_name.as_ptr()) - .to_str() - .unwrap_or("Unknown") - .to_owned() - }, - vendor: properties.vendor_id as usize, - device: properties.device_id as usize, - device_type: match properties.device_type { - ash::vk::PhysicalDeviceType::OTHER => adapter::DeviceType::Other, - ash::vk::PhysicalDeviceType::INTEGRATED_GPU => { - adapter::DeviceType::IntegratedGpu - } - ash::vk::PhysicalDeviceType::DISCRETE_GPU => { - adapter::DeviceType::DiscreteGpu - } - ash::vk::PhysicalDeviceType::VIRTUAL_GPU => adapter::DeviceType::VirtualGpu, - ash::vk::PhysicalDeviceType::CPU => adapter::DeviceType::Cpu, - _ => adapter::DeviceType::Other, - }, - }; - let physical_device = PhysicalDevice { - instance: self.raw.clone(), - handle: device, - properties, - }; - let queue_families = unsafe { - self.raw - .0 - .get_physical_device_queue_family_properties(device) - .into_iter() - .enumerate() - .map(|(i, properties)| QueueFamily { - properties, - device, - index: i as u32, - }) - .collect() - }; - - adapter::Adapter { - info, - physical_device, - queue_families, - } - }) - .collect() - } - - unsafe fn create_surface( - &self, - has_handle: &impl raw_window_handle::HasRawWindowHandle, - ) -> Result { - use raw_window_handle::RawWindowHandle; - - match has_handle.raw_window_handle() { - #[cfg(all( - unix, - not(target_os = "android"), - not(target_os = "macos") - ))] - RawWindowHandle::Wayland(handle) - if self.extensions.contains(&extensions::khr::WaylandSurface::name()) => - { - Ok(self.create_surface_from_wayland(handle.display, handle.surface)) - } - #[cfg(all( - feature = "x11", - unix, - not(target_os = "android"), - not(target_os = "macos") - ))] - RawWindowHandle::Xlib(handle) - if self.extensions.contains(&extensions::khr::XlibSurface::name()) => - { - Ok(self.create_surface_from_xlib(handle.display as *mut _, handle.window)) - } - #[cfg(all( - feature = "xcb", - unix, - not(target_os = "android"), - not(target_os = "macos"), - not(target_os = "ios") - ))] - RawWindowHandle::Xcb(handle) if self.extensions.contains(&extensions::khr::XcbSurface::name()) => { - Ok(self.create_surface_from_xcb(handle.connection as *mut _, handle.window)) - } - #[cfg(target_os = "android")] - RawWindowHandle::Android(handle) => { - Ok(self.create_surface_android(handle.a_native_window)) - } - #[cfg(windows)] - RawWindowHandle::Windows(handle) => { - use winapi::um::libloaderapi::GetModuleHandleW; - - let hinstance = GetModuleHandleW(ptr::null()); - Ok(self.create_surface_from_hwnd(hinstance as *mut _, handle.hwnd)) - } - #[cfg(target_os = "macos")] - RawWindowHandle::MacOS(handle) => { - Ok(self.create_surface_from_ns_view(handle.ns_view)) - } - _ => Err(hal::window::InitError::UnsupportedWindowHandle), - } - } - - unsafe fn destroy_surface(&self, surface: window::Surface) { - surface.raw.functor.destroy_surface(surface.raw.handle, None); - } -} - -#[derive(Debug, Clone)] -pub struct QueueFamily { - properties: vk::QueueFamilyProperties, - device: vk::PhysicalDevice, - index: u32, -} - -impl queue::QueueFamily for QueueFamily { - fn queue_type(&self) -> queue::QueueType { - map_queue_type(self.properties.queue_flags) - } - fn max_queues(&self) -> usize { - self.properties.queue_count as _ - } - fn id(&self) -> queue::QueueFamilyId { - queue::QueueFamilyId(self.index as _) - } -} - -pub struct PhysicalDevice { - instance: Arc, - handle: vk::PhysicalDevice, - properties: vk::PhysicalDeviceProperties, -} - -impl fmt::Debug for PhysicalDevice { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str("PhysicalDevice") - } -} - -impl adapter::PhysicalDevice for PhysicalDevice { - unsafe fn open( - &self, - families: &[(&QueueFamily, &[queue::QueuePriority])], - requested_features: Features, - ) -> Result, DeviceCreationError> { - let family_infos = families - .iter() - .map(|&(family, priorities)| vk::DeviceQueueCreateInfo { - s_type: vk::StructureType::DEVICE_QUEUE_CREATE_INFO, - p_next: ptr::null(), - flags: vk::DeviceQueueCreateFlags::empty(), - queue_family_index: family.index, - queue_count: priorities.len() as _, - p_queue_priorities: priorities.as_ptr(), - }) - .collect::>(); - - if !self.features().contains(requested_features) { - return Err(DeviceCreationError::MissingFeature); - } - - let enabled_features = conv::map_device_features(requested_features); - - // Create device - let device_raw = { - let cstrings = DEVICE_EXTENSIONS - .iter() - .map(|&s| CString::from(s)) - .collect::>(); - - let str_pointers = cstrings.iter().map(|s| s.as_ptr()).collect::>(); - - let info = vk::DeviceCreateInfo { - s_type: vk::StructureType::DEVICE_CREATE_INFO, - p_next: ptr::null(), - flags: vk::DeviceCreateFlags::empty(), - queue_create_info_count: family_infos.len() as u32, - p_queue_create_infos: family_infos.as_ptr(), - enabled_layer_count: 0, - pp_enabled_layer_names: ptr::null(), - enabled_extension_count: str_pointers.len() as u32, - pp_enabled_extension_names: str_pointers.as_ptr(), - p_enabled_features: &enabled_features, - }; - - match self.instance.0.create_device(self.handle, &info, None) { - Ok(device) => device, - Err(e) => return Err(match e { - vk::Result::ERROR_OUT_OF_HOST_MEMORY => DeviceCreationError::OutOfMemory(OutOfMemory::Host), - vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => DeviceCreationError::OutOfMemory(OutOfMemory::Device), - vk::Result::ERROR_INITIALIZATION_FAILED => DeviceCreationError::InitializationFailed, - vk::Result::ERROR_DEVICE_LOST => DeviceCreationError::DeviceLost, - vk::Result::ERROR_TOO_MANY_OBJECTS => DeviceCreationError::TooManyObjects, - _ => unreachable!(), - }), - } - }; - - let swapchain_fn = vk::KhrSwapchainFn::load(|name| { - mem::transmute( - self.instance - .0 - .get_device_proc_addr(device_raw.handle(), name.as_ptr()), - ) - }); - - let device = Device { - raw: Arc::new(RawDevice(device_raw, requested_features, self.instance.clone())), - vendor_id: self.properties.vendor_id, - }; - - let device_arc = device.raw.clone(); - let queue_groups = families - .into_iter() - .map(|&(family, ref priorities)| { - let mut family_raw = - queue::QueueGroup::new(queue::QueueFamilyId(family.index as usize)); - for id in 0 .. priorities.len() { - let queue_raw = device_arc.0.get_device_queue(family.index, id as _); - family_raw.add_queue(CommandQueue { - raw: Arc::new(queue_raw), - device: device_arc.clone(), - swapchain_fn: swapchain_fn.clone(), - }); - } - family_raw - }) - .collect(); - - Ok(adapter::Gpu { - device, - queue_groups, - }) - } - - fn format_properties(&self, format: Option) -> format::Properties { - let properties = unsafe { - self.instance.0.get_physical_device_format_properties( - self.handle, - format.map_or(vk::Format::UNDEFINED, conv::map_format), - ) - }; - - format::Properties { - linear_tiling: conv::map_image_features(properties.linear_tiling_features), - optimal_tiling: conv::map_image_features(properties.optimal_tiling_features), - buffer_features: conv::map_buffer_features(properties.buffer_features), - } - } - - fn image_format_properties( - &self, - format: format::Format, - dimensions: u8, - tiling: image::Tiling, - usage: image::Usage, - view_caps: image::ViewCapabilities, - ) -> Option { - let format_properties = unsafe { - self.instance.0.get_physical_device_image_format_properties( - self.handle, - conv::map_format(format), - match dimensions { - 1 => vk::ImageType::TYPE_1D, - 2 => vk::ImageType::TYPE_2D, - 3 => vk::ImageType::TYPE_3D, - _ => panic!("Unexpected image dimensionality: {}", dimensions), - }, - conv::map_tiling(tiling), - conv::map_image_usage(usage), - conv::map_view_capabilities(view_caps), - ) - }; - - match format_properties { - Ok(props) => Some(image::FormatProperties { - max_extent: image::Extent { - width: props.max_extent.width, - height: props.max_extent.height, - depth: props.max_extent.depth, - }, - max_levels: props.max_mip_levels as _, - max_layers: props.max_array_layers as _, - sample_count_mask: props.sample_counts.as_raw() as _, - max_resource_size: props.max_resource_size as _, - }), - Err(vk::Result::ERROR_FORMAT_NOT_SUPPORTED) => None, - Err(other) => { - error!("Unexpected error in `image_format_properties`: {:?}", other); - None - } - } - } - - fn memory_properties(&self) -> adapter::MemoryProperties { - let mem_properties = unsafe { - self.instance - .0 - .get_physical_device_memory_properties(self.handle) - }; - let memory_heaps = mem_properties.memory_heaps - [.. mem_properties.memory_heap_count as usize] - .iter() - .map(|mem| mem.size) - .collect(); - let memory_types = mem_properties.memory_types - [.. mem_properties.memory_type_count as usize] - .iter() - .map(|mem| { - use crate::memory::Properties; - let mut type_flags = Properties::empty(); - - if mem - .property_flags - .intersects(vk::MemoryPropertyFlags::DEVICE_LOCAL) - { - type_flags |= Properties::DEVICE_LOCAL; - } - if mem - .property_flags - .intersects(vk::MemoryPropertyFlags::HOST_VISIBLE) - { - type_flags |= Properties::CPU_VISIBLE; - } - if mem - .property_flags - .intersects(vk::MemoryPropertyFlags::HOST_COHERENT) - { - type_flags |= Properties::COHERENT; - } - if mem - .property_flags - .intersects(vk::MemoryPropertyFlags::HOST_CACHED) - { - type_flags |= Properties::CPU_CACHED; - } - if mem - .property_flags - .intersects(vk::MemoryPropertyFlags::LAZILY_ALLOCATED) - { - type_flags |= Properties::LAZILY_ALLOCATED; - } - - adapter::MemoryType { - properties: type_flags, - heap_index: mem.heap_index as usize, - } - }) - .collect(); - - adapter::MemoryProperties { - memory_heaps, - memory_types, - } - } - - fn features(&self) -> Features { - // see https://github.com/gfx-rs/gfx/issues/1930 - let is_windows_intel_dual_src_bug = cfg!(windows) - && self.properties.vendor_id == info::intel::VENDOR - && (self.properties.device_id & info::intel::DEVICE_KABY_LAKE_MASK - == info::intel::DEVICE_KABY_LAKE_MASK - || self.properties.device_id & info::intel::DEVICE_SKY_LAKE_MASK - == info::intel::DEVICE_SKY_LAKE_MASK); - - let features = unsafe { self.instance.0.get_physical_device_features(self.handle) }; - let mut bits = Features::TRIANGLE_FAN - | Features::SEPARATE_STENCIL_REF_VALUES - | Features::SAMPLER_MIP_LOD_BIAS; - - if features.robust_buffer_access != 0 { - bits |= Features::ROBUST_BUFFER_ACCESS; - } - if features.full_draw_index_uint32 != 0 { - bits |= Features::FULL_DRAW_INDEX_U32; - } - if features.image_cube_array != 0 { - bits |= Features::IMAGE_CUBE_ARRAY; - } - if features.independent_blend != 0 { - bits |= Features::INDEPENDENT_BLENDING; - } - if features.geometry_shader != 0 { - bits |= Features::GEOMETRY_SHADER; - } - if features.tessellation_shader != 0 { - bits |= Features::TESSELLATION_SHADER; - } - if features.sample_rate_shading != 0 { - bits |= Features::SAMPLE_RATE_SHADING; - } - if features.dual_src_blend != 0 && !is_windows_intel_dual_src_bug { - bits |= Features::DUAL_SRC_BLENDING; - } - if features.logic_op != 0 { - bits |= Features::LOGIC_OP; - } - if features.multi_draw_indirect != 0 { - bits |= Features::MULTI_DRAW_INDIRECT; - } - if features.draw_indirect_first_instance != 0 { - bits |= Features::DRAW_INDIRECT_FIRST_INSTANCE; - } - if features.depth_clamp != 0 { - bits |= Features::DEPTH_CLAMP; - } - if features.depth_bias_clamp != 0 { - bits |= Features::DEPTH_BIAS_CLAMP; - } - if features.fill_mode_non_solid != 0 { - bits |= Features::NON_FILL_POLYGON_MODE; - } - if features.depth_bounds != 0 { - bits |= Features::DEPTH_BOUNDS; - } - if features.wide_lines != 0 { - bits |= Features::LINE_WIDTH; - } - if features.large_points != 0 { - bits |= Features::POINT_SIZE; - } - if features.alpha_to_one != 0 { - bits |= Features::ALPHA_TO_ONE; - } - if features.multi_viewport != 0 { - bits |= Features::MULTI_VIEWPORTS; - } - if features.sampler_anisotropy != 0 { - bits |= Features::SAMPLER_ANISOTROPY; - } - if features.texture_compression_etc2 != 0 { - bits |= Features::FORMAT_ETC2; - } - if features.texture_compression_astc_ldr != 0 { - bits |= Features::FORMAT_ASTC_LDR; - } - if features.texture_compression_bc != 0 { - bits |= Features::FORMAT_BC; - } - if features.occlusion_query_precise != 0 { - bits |= Features::PRECISE_OCCLUSION_QUERY; - } - if features.pipeline_statistics_query != 0 { - bits |= Features::PIPELINE_STATISTICS_QUERY; - } - if features.vertex_pipeline_stores_and_atomics != 0 { - bits |= Features::VERTEX_STORES_AND_ATOMICS; - } - if features.fragment_stores_and_atomics != 0 { - bits |= Features::FRAGMENT_STORES_AND_ATOMICS; - } - if features.shader_tessellation_and_geometry_point_size != 0 { - bits |= Features::SHADER_TESSELLATION_AND_GEOMETRY_POINT_SIZE; - } - if features.shader_image_gather_extended != 0 { - bits |= Features::SHADER_IMAGE_GATHER_EXTENDED; - } - if features.shader_storage_image_extended_formats != 0 { - bits |= Features::SHADER_STORAGE_IMAGE_EXTENDED_FORMATS; - } - if features.shader_storage_image_multisample != 0 { - bits |= Features::SHADER_STORAGE_IMAGE_MULTISAMPLE; - } - if features.shader_storage_image_read_without_format != 0 { - bits |= Features::SHADER_STORAGE_IMAGE_READ_WITHOUT_FORMAT; - } - if features.shader_storage_image_write_without_format != 0 { - bits |= Features::SHADER_STORAGE_IMAGE_WRITE_WITHOUT_FORMAT; - } - if features.shader_uniform_buffer_array_dynamic_indexing != 0 { - bits |= Features::SHADER_UNIFORM_BUFFER_ARRAY_DYNAMIC_INDEXING; - } - if features.shader_sampled_image_array_dynamic_indexing != 0 { - bits |= Features::SHADER_SAMPLED_IMAGE_ARRAY_DYNAMIC_INDEXING; - } - if features.shader_storage_buffer_array_dynamic_indexing != 0 { - bits |= Features::SHADER_STORAGE_BUFFER_ARRAY_DYNAMIC_INDEXING; - } - if features.shader_storage_image_array_dynamic_indexing != 0 { - bits |= Features::SHADER_STORAGE_IMAGE_ARRAY_DYNAMIC_INDEXING; - } - if features.shader_clip_distance != 0 { - bits |= Features::SHADER_CLIP_DISTANCE; - } - if features.shader_cull_distance != 0 { - bits |= Features::SHADER_CULL_DISTANCE; - } - if features.shader_float64 != 0 { - bits |= Features::SHADER_FLOAT64; - } - if features.shader_int64 != 0 { - bits |= Features::SHADER_INT64; - } - if features.shader_int16 != 0 { - bits |= Features::SHADER_INT16; - } - if features.shader_resource_residency != 0 { - bits |= Features::SHADER_RESOURCE_RESIDENCY; - } - if features.shader_resource_min_lod != 0 { - bits |= Features::SHADER_RESOURCE_MIN_LOD; - } - if features.sparse_binding != 0 { - bits |= Features::SPARSE_BINDING; - } - if features.sparse_residency_buffer != 0 { - bits |= Features::SPARSE_RESIDENCY_BUFFER; - } - if features.sparse_residency_image2_d != 0 { - bits |= Features::SPARSE_RESIDENCY_IMAGE_2D; - } - if features.sparse_residency_image3_d != 0 { - bits |= Features::SPARSE_RESIDENCY_IMAGE_3D; - } - if features.sparse_residency2_samples != 0 { - bits |= Features::SPARSE_RESIDENCY_2_SAMPLES; - } - if features.sparse_residency4_samples != 0 { - bits |= Features::SPARSE_RESIDENCY_4_SAMPLES; - } - if features.sparse_residency8_samples != 0 { - bits |= Features::SPARSE_RESIDENCY_8_SAMPLES; - } - if features.sparse_residency16_samples != 0 { - bits |= Features::SPARSE_RESIDENCY_16_SAMPLES; - } - if features.sparse_residency_aliased != 0 { - bits |= Features::SPARSE_RESIDENCY_ALIASED; - } - if features.variable_multisample_rate != 0 { - bits |= Features::VARIABLE_MULTISAMPLE_RATE; - } - if features.inherited_queries != 0 { - bits |= Features::INHERITED_QUERIES; - } - - bits - } - - fn limits(&self) -> Limits { - let limits = &self.properties.limits; - let max_group_count = limits.max_compute_work_group_count; - let max_group_size = limits.max_compute_work_group_size; - - Limits { - max_image_1d_size: limits.max_image_dimension1_d, - max_image_2d_size: limits.max_image_dimension2_d, - max_image_3d_size: limits.max_image_dimension3_d, - max_image_cube_size: limits.max_image_dimension_cube, - max_image_array_layers: limits.max_image_array_layers as _, - max_texel_elements: limits.max_texel_buffer_elements as _, - max_patch_size: limits.max_tessellation_patch_size as PatchSize, - max_viewports: limits.max_viewports as _, - max_viewport_dimensions: limits.max_viewport_dimensions, - max_framebuffer_extent: image::Extent { - width: limits.max_framebuffer_width, - height: limits.max_framebuffer_height, - depth: limits.max_framebuffer_layers, - }, - max_compute_work_group_count: [ - max_group_count[0] as _, - max_group_count[1] as _, - max_group_count[2] as _, - ], - max_compute_work_group_size: [ - max_group_size[0] as _, - max_group_size[1] as _, - max_group_size[2] as _, - ], - max_vertex_input_attributes: limits.max_vertex_input_attributes as _, - max_vertex_input_bindings: limits.max_vertex_input_bindings as _, - max_vertex_input_attribute_offset: limits.max_vertex_input_attribute_offset as _, - max_vertex_input_binding_stride: limits.max_vertex_input_binding_stride as _, - max_vertex_output_components: limits.max_vertex_output_components as _, - optimal_buffer_copy_offset_alignment: limits.optimal_buffer_copy_offset_alignment as _, - optimal_buffer_copy_pitch_alignment: limits.optimal_buffer_copy_row_pitch_alignment - as _, - min_texel_buffer_offset_alignment: limits.min_texel_buffer_offset_alignment as _, - min_uniform_buffer_offset_alignment: limits.min_uniform_buffer_offset_alignment as _, - min_storage_buffer_offset_alignment: limits.min_storage_buffer_offset_alignment as _, - framebuffer_color_sample_counts: limits.framebuffer_color_sample_counts.as_raw() as _, - framebuffer_depth_sample_counts: limits.framebuffer_depth_sample_counts.as_raw() as _, - framebuffer_stencil_sample_counts: limits.framebuffer_stencil_sample_counts.as_raw() - as _, - max_color_attachments: limits.max_color_attachments as _, - buffer_image_granularity: limits.buffer_image_granularity, - non_coherent_atom_size: limits.non_coherent_atom_size as _, - max_sampler_anisotropy: limits.max_sampler_anisotropy, - min_vertex_input_binding_stride_alignment: 1, - max_bound_descriptor_sets: limits.max_bound_descriptor_sets as _, - max_compute_shared_memory_size: limits.max_compute_shared_memory_size as _, - max_compute_work_group_invocations: limits.max_compute_work_group_invocations as _, - max_descriptor_set_input_attachments: limits.max_descriptor_set_input_attachments as _, - max_descriptor_set_sampled_images: limits.max_descriptor_set_sampled_images as _, - max_descriptor_set_samplers: limits.max_descriptor_set_samplers as _, - max_descriptor_set_storage_buffers: limits.max_descriptor_set_storage_buffers as _, - max_descriptor_set_storage_buffers_dynamic: limits - .max_descriptor_set_storage_buffers_dynamic - as _, - max_descriptor_set_storage_images: limits.max_descriptor_set_storage_images as _, - max_descriptor_set_uniform_buffers: limits.max_descriptor_set_uniform_buffers as _, - max_descriptor_set_uniform_buffers_dynamic: limits - .max_descriptor_set_uniform_buffers_dynamic - as _, - max_draw_indexed_index_value: limits.max_draw_indexed_index_value, - max_draw_indirect_count: limits.max_draw_indirect_count, - max_fragment_combined_output_resources: limits.max_fragment_combined_output_resources - as _, - max_fragment_dual_source_attachments: limits.max_fragment_dual_src_attachments as _, - max_fragment_input_components: limits.max_fragment_input_components as _, - max_fragment_output_attachments: limits.max_fragment_output_attachments as _, - max_framebuffer_layers: limits.max_framebuffer_layers as _, - max_geometry_input_components: limits.max_geometry_input_components as _, - max_geometry_output_components: limits.max_geometry_output_components as _, - max_geometry_output_vertices: limits.max_geometry_output_vertices as _, - max_geometry_shader_invocations: limits.max_geometry_shader_invocations as _, - max_geometry_total_output_components: limits.max_geometry_total_output_components as _, - max_memory_allocation_count: limits.max_memory_allocation_count as _, - max_per_stage_descriptor_input_attachments: limits - .max_per_stage_descriptor_input_attachments - as _, - max_per_stage_descriptor_sampled_images: limits.max_per_stage_descriptor_sampled_images - as _, - max_per_stage_descriptor_samplers: limits.max_per_stage_descriptor_samplers as _, - max_per_stage_descriptor_storage_buffers: limits - .max_per_stage_descriptor_storage_buffers - as _, - max_per_stage_descriptor_storage_images: limits.max_per_stage_descriptor_storage_images - as _, - max_per_stage_descriptor_uniform_buffers: limits - .max_per_stage_descriptor_uniform_buffers - as _, - max_per_stage_resources: limits.max_per_stage_resources as _, - max_push_constants_size: limits.max_push_constants_size as _, - max_sampler_allocation_count: limits.max_sampler_allocation_count as _, - max_sampler_lod_bias: limits.max_sampler_lod_bias as _, - max_storage_buffer_range: limits.max_storage_buffer_range as _, - max_uniform_buffer_range: limits.max_uniform_buffer_range as _, - min_memory_map_alignment: limits.min_memory_map_alignment, - standard_sample_locations: limits.standard_sample_locations == ash::vk::TRUE, - } - } - - fn is_valid_cache(&self, cache: &[u8]) -> bool { - const HEADER_SIZE: usize = 16 + vk::UUID_SIZE; - - if cache.len() < HEADER_SIZE { - warn!("Bad cache data length {:?}", cache.len()); - return false; - } - - let header_len = u32::from_le_bytes([cache[0], cache[1], cache[2], cache[3]]); - let header_version = u32::from_le_bytes([cache[4], cache[5], cache[6], cache[7]]); - let vendor_id = u32::from_le_bytes([cache[8], cache[9], cache[10], cache[11]]); - let device_id = u32::from_le_bytes([cache[12], cache[13], cache[14], cache[15]]); - - // header length - if (header_len as usize) < HEADER_SIZE { - warn!("Bad header length {:?}", header_len); - return false; - } - - // cache header version - if header_version != vk::PipelineCacheHeaderVersion::ONE.as_raw() as u32 { - warn!("Unsupported cache header version: {:?}", header_version); - return false; - } - - // vendor id - if vendor_id != self.properties.vendor_id { - warn!( - "Vendor ID mismatch. Device: {:?}, cache: {:?}.", - self.properties.vendor_id, vendor_id, - ); - return false; - } - - // device id - if device_id != self.properties.device_id { - warn!( - "Device ID mismatch. Device: {:?}, cache: {:?}.", - self.properties.device_id, device_id, - ); - return false; - } - - if self.properties.pipeline_cache_uuid != cache[16 .. 16 + vk::UUID_SIZE] { - warn!( - "Pipeline cache UUID mismatch. Device: {:?}, cache: {:?}.", - self.properties.pipeline_cache_uuid, - &cache[16 .. 16 + vk::UUID_SIZE], - ); - return false; - } - true - } -} - -#[doc(hidden)] -pub struct RawDevice( - pub ash::Device, - Features, - Arc, -); - -impl fmt::Debug for RawDevice { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "RawDevice") // TODO: Real Debug impl - } -} -impl Drop for RawDevice { - fn drop(&mut self) { - unsafe { - self.0.destroy_device(None); - } - } -} - -// Need to explicitly synchronize on submission and present. -pub type RawCommandQueue = Arc; - -pub struct CommandQueue { - raw: RawCommandQueue, - device: Arc, - swapchain_fn: vk::KhrSwapchainFn, -} - -impl fmt::Debug for CommandQueue { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str("CommandQueue") - } -} - -impl queue::CommandQueue for CommandQueue { - unsafe fn submit<'a, T, Ic, S, Iw, Is>( - &mut self, - submission: queue::Submission, - fence: Option<&native::Fence>, - ) where - T: 'a + Borrow, - Ic: IntoIterator, - S: 'a + Borrow, - Iw: IntoIterator, - Is: IntoIterator, - { - //TODO: avoid heap allocations - let mut waits = Vec::new(); - let mut stages = Vec::new(); - - let buffers = submission - .command_buffers - .into_iter() - .map(|cmd| cmd.borrow().raw) - .collect::>(); - for (semaphore, stage) in submission.wait_semaphores { - waits.push(semaphore.borrow().0); - stages.push(conv::map_pipeline_stage(stage)); - } - let signals = submission - .signal_semaphores - .into_iter() - .map(|semaphore| semaphore.borrow().0) - .collect::>(); - - let info = vk::SubmitInfo { - s_type: vk::StructureType::SUBMIT_INFO, - p_next: ptr::null(), - wait_semaphore_count: waits.len() as u32, - p_wait_semaphores: waits.as_ptr(), - // If count is zero, AMD driver crashes if nullptr is not set for stage masks - p_wait_dst_stage_mask: if stages.is_empty() { - ptr::null() - } else { - stages.as_ptr() - }, - command_buffer_count: buffers.len() as u32, - p_command_buffers: buffers.as_ptr(), - signal_semaphore_count: signals.len() as u32, - p_signal_semaphores: signals.as_ptr(), - }; - - let fence_raw = fence.map(|fence| fence.0).unwrap_or(vk::Fence::null()); - - let result = self.device.0.queue_submit(*self.raw, &[info], fence_raw); - assert_eq!(Ok(()), result); - } - - unsafe fn present<'a, W, Is, S, Iw>( - &mut self, - swapchains: Is, - wait_semaphores: Iw, - ) -> Result, PresentError> - where - W: 'a + Borrow, - Is: IntoIterator, - S: 'a + Borrow, - Iw: IntoIterator, - { - let semaphores = wait_semaphores - .into_iter() - .map(|sem| sem.borrow().0) - .collect::>(); - - let mut frames = Vec::new(); - let mut vk_swapchains = Vec::new(); - for (swapchain, index) in swapchains { - vk_swapchains.push(swapchain.borrow().raw); - frames.push(index); - } - - let info = vk::PresentInfoKHR { - s_type: vk::StructureType::PRESENT_INFO_KHR, - p_next: ptr::null(), - wait_semaphore_count: semaphores.len() as _, - p_wait_semaphores: semaphores.as_ptr(), - swapchain_count: vk_swapchains.len() as _, - p_swapchains: vk_swapchains.as_ptr(), - p_image_indices: frames.as_ptr(), - p_results: ptr::null_mut(), - }; - - match self.swapchain_fn.queue_present_khr(*self.raw, &info) { - vk::Result::SUCCESS => Ok(None), - vk::Result::SUBOPTIMAL_KHR => Ok(Some(Suboptimal)), - vk::Result::ERROR_OUT_OF_HOST_MEMORY => { - Err(PresentError::OutOfMemory(OutOfMemory::Host)) - } - vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => { - Err(PresentError::OutOfMemory(OutOfMemory::Device)) - } - vk::Result::ERROR_DEVICE_LOST => Err(PresentError::DeviceLost(DeviceLost)), - vk::Result::ERROR_OUT_OF_DATE_KHR => Err(PresentError::OutOfDate), - vk::Result::ERROR_SURFACE_LOST_KHR => Err(PresentError::SurfaceLost(SurfaceLost)), - _ => panic!("Failed to present frame"), - } - } - - unsafe fn present_surface( - &mut self, - surface: &mut window::Surface, - image: window::SurfaceImage, - wait_semaphore: Option<&native::Semaphore>, - ) -> Result, PresentError> { - let ssc = surface.swapchain.as_ref().unwrap(); - let p_wait_semaphores = if let Some(wait_semaphore) = wait_semaphore { - &wait_semaphore.0 - } else { - let submit_info = vk::SubmitInfo { - s_type: vk::StructureType::SUBMIT_INFO, - p_next: ptr::null(), - wait_semaphore_count: 0, - p_wait_semaphores: ptr::null(), - p_wait_dst_stage_mask: &vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT, - command_buffer_count: 0, - p_command_buffers: ptr::null(), - signal_semaphore_count: 1, - p_signal_semaphores: &ssc.semaphore.0, - }; - self.device - .0 - .queue_submit(*self.raw, &[submit_info], vk::Fence::null()) - .unwrap(); - &ssc.semaphore.0 - }; - let present_info = vk::PresentInfoKHR { - s_type: vk::StructureType::PRESENT_INFO_KHR, - p_next: ptr::null(), - wait_semaphore_count: 1, - p_wait_semaphores, - swapchain_count: 1, - p_swapchains: &ssc.swapchain.raw, - p_image_indices: &image.index, - p_results: ptr::null_mut(), - }; - - match self - .swapchain_fn - .queue_present_khr(*self.raw, &present_info) - { - vk::Result::SUCCESS => Ok(None), - vk::Result::SUBOPTIMAL_KHR => Ok(Some(Suboptimal)), - vk::Result::ERROR_OUT_OF_HOST_MEMORY => { - Err(PresentError::OutOfMemory(OutOfMemory::Host)) - } - vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => { - Err(PresentError::OutOfMemory(OutOfMemory::Device)) - } - vk::Result::ERROR_DEVICE_LOST => Err(PresentError::DeviceLost(DeviceLost)), - vk::Result::ERROR_OUT_OF_DATE_KHR => Err(PresentError::OutOfDate), - vk::Result::ERROR_SURFACE_LOST_KHR => Err(PresentError::SurfaceLost(SurfaceLost)), - _ => panic!("Failed to present frame"), - } - } - - fn wait_idle(&self) -> Result<(), OutOfMemory> { - match unsafe { self.device.0.queue_wait_idle(*self.raw) } { - Ok(()) => Ok(()), - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(OutOfMemory::Host), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(OutOfMemory::Device), - Err(_) => unreachable!(), - } - } -} - -#[derive(Debug)] -pub struct Device { - raw: Arc, - vendor_id: u32, -} - -#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)] -pub enum Backend {} -impl hal::Backend for Backend { - type Instance = Instance; - type PhysicalDevice = PhysicalDevice; - type Device = Device; - - type Surface = window::Surface; - type Swapchain = window::Swapchain; - - type QueueFamily = QueueFamily; - type CommandQueue = CommandQueue; - type CommandBuffer = command::CommandBuffer; - - type Memory = native::Memory; - type CommandPool = pool::RawCommandPool; - - type ShaderModule = native::ShaderModule; - type RenderPass = native::RenderPass; - type Framebuffer = native::Framebuffer; - - type Buffer = native::Buffer; - type BufferView = native::BufferView; - type Image = native::Image; - type ImageView = native::ImageView; - type Sampler = native::Sampler; - - type ComputePipeline = native::ComputePipeline; - type GraphicsPipeline = native::GraphicsPipeline; - type PipelineLayout = native::PipelineLayout; - type PipelineCache = native::PipelineCache; - type DescriptorSetLayout = native::DescriptorSetLayout; - type DescriptorPool = native::DescriptorPool; - type DescriptorSet = native::DescriptorSet; - - type Fence = native::Fence; - type Semaphore = native::Semaphore; - type Event = native::Event; - type QueryPool = native::QueryPool; -} +#![allow(non_snake_case)] + +#[macro_use] +extern crate log; +#[macro_use] +extern crate lazy_static; + +#[cfg(target_os = "macos")] +#[macro_use] +extern crate objc; + +use ash::extensions::{ + self, + ext::{DebugReport, DebugUtils}, +}; +use ash::version::{DeviceV1_0, EntryV1_0, InstanceV1_0}; +use ash::vk; +#[cfg(not(feature = "use-rtld-next"))] +use ash::{Entry, LoadingError}; + +use hal::{ + adapter, + device::{CreationError as DeviceCreationError, DeviceLost, OutOfMemory, SurfaceLost}, + format, + image, + memory, + pso::{PatchSize, PipelineStage}, + queue, + window::{PresentError, Suboptimal, SwapImageIndex}, + Features, + Hints, + Limits, +}; + +use std::borrow::{Borrow, Cow}; +use std::ffi::{CStr, CString}; +use std::sync::Arc; +use std::{fmt, mem, ptr, slice}; + +#[cfg(feature = "use-rtld-next")] +use ash::{EntryCustom, LoadingError}; +#[cfg(feature = "use-rtld-next")] +use shared_library::dynamic_library::{DynamicLibrary, SpecialHandles}; + +mod command; +mod conv; +mod device; +mod info; +mod native; +mod pool; +mod window; + +// CStr's cannot be constant yet, until const fn lands we need to use a lazy_static +lazy_static! { + static ref LAYERS: Vec<&'static CStr> = if cfg!(all(target_os = "android", debug_assertions)) { + vec![ + CStr::from_bytes_with_nul(b"VK_LAYER_LUNARG_core_validation\0").unwrap(), + CStr::from_bytes_with_nul(b"VK_LAYER_LUNARG_object_tracker\0").unwrap(), + CStr::from_bytes_with_nul(b"VK_LAYER_LUNARG_parameter_validation\0").unwrap(), + CStr::from_bytes_with_nul(b"VK_LAYER_GOOGLE_threading\0").unwrap(), + CStr::from_bytes_with_nul(b"VK_LAYER_GOOGLE_unique_objects\0").unwrap(), + ] + } else if cfg!(debug_assertions) { + vec![CStr::from_bytes_with_nul(b"VK_LAYER_LUNARG_standard_validation\0").unwrap()] + } else { + vec![] + }; + static ref EXTENSIONS: Vec<&'static CStr> = if cfg!(debug_assertions) { + vec![ + DebugUtils::name(), + DebugReport::name(), + ] + } else { + vec![] + }; + static ref DEVICE_EXTENSIONS: Vec<&'static CStr> = vec![extensions::khr::Swapchain::name()]; + static ref SURFACE_EXTENSIONS: Vec<&'static CStr> = vec![ + extensions::khr::Surface::name(), + // Platform-specific WSI extensions + #[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))] + extensions::khr::XlibSurface::name(), + #[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))] + extensions::khr::XcbSurface::name(), + #[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))] + extensions::khr::WaylandSurface::name(), + #[cfg(target_os = "android")] + extensions::khr::AndroidSurface::name(), + #[cfg(target_os = "windows")] + extensions::khr::Win32Surface::name(), + #[cfg(target_os = "macos")] + extensions::mvk::MacOSSurface::name(), + ]; + static ref AMD_NEGATIVE_VIEWPORT_HEIGHT: &'static CStr = + CStr::from_bytes_with_nul(b"VK_AMD_negative_viewport_height\0").unwrap(); + static ref KHR_MAINTENANCE1: &'static CStr = + CStr::from_bytes_with_nul(b"VK_KHR_maintenance1\0").unwrap(); + static ref KHR_SAMPLER_MIRROR_MIRROR_CLAMP_TO_EDGE : &'static CStr = + CStr::from_bytes_with_nul(b"VK_KHR_sampler_mirror_clamp_to_edge\0").unwrap(); +} + +#[cfg(not(feature = "use-rtld-next"))] +lazy_static! { + // Entry function pointers + pub static ref VK_ENTRY: Result = Entry::new(); +} + +#[cfg(feature = "use-rtld-next")] +lazy_static! { + // Entry function pointers + pub static ref VK_ENTRY: Result, LoadingError> + = EntryCustom::new_custom( + || Ok(()), + |_, name| unsafe { + DynamicLibrary::symbol_special(SpecialHandles::Next, &*name.to_string_lossy()) + .unwrap_or(ptr::null_mut()) + } + ); +} + +pub struct RawInstance(ash::Instance, Option); + +pub enum DebugMessenger { + Utils(DebugUtils, vk::DebugUtilsMessengerEXT), + Report(DebugReport, vk::DebugReportCallbackEXT), +} + +impl Drop for RawInstance { + fn drop(&mut self) { + unsafe { + #[cfg(debug_assertions)] + { + match self.1 { + Some(DebugMessenger::Utils(ref ext, callback)) => { + ext.destroy_debug_utils_messenger(callback, None) + } + Some(DebugMessenger::Report(ref ext, callback)) => { + ext.destroy_debug_report_callback(callback, None) + } + None => {} + } + } + + self.0.destroy_instance(None); + } + } +} + +pub struct Instance { + pub raw: Arc, + + /// Supported extensions of this instance. + pub extensions: Vec<&'static CStr>, +} + +impl fmt::Debug for Instance { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("Instance") + } +} + +fn map_queue_type(flags: vk::QueueFlags) -> queue::QueueType { + if flags.contains(vk::QueueFlags::GRAPHICS | vk::QueueFlags::COMPUTE) { + // TRANSFER_BIT optional + queue::QueueType::General + } else if flags.contains(vk::QueueFlags::GRAPHICS) { + // TRANSFER_BIT optional + queue::QueueType::Graphics + } else if flags.contains(vk::QueueFlags::COMPUTE) { + // TRANSFER_BIT optional + queue::QueueType::Compute + } else if flags.contains(vk::QueueFlags::TRANSFER) { + queue::QueueType::Transfer + } else { + // TODO: present only queues? + unimplemented!() + } +} + +unsafe fn display_debug_utils_label_ext( + label_structs: *mut vk::DebugUtilsLabelEXT, + count: usize, +) -> Option { + if count == 0 { + return None; + } + + Some( + slice::from_raw_parts::(label_structs, count) + .iter() + .flat_map(|dul_obj| { + dul_obj + .p_label_name + .as_ref() + .map(|lbl| CStr::from_ptr(lbl).to_string_lossy().into_owned()) + }) + .collect::>() + .join(", "), + ) +} + +unsafe fn display_debug_utils_object_name_info_ext( + info_structs: *mut vk::DebugUtilsObjectNameInfoEXT, + count: usize, +) -> Option { + if count == 0 { + return None; + } + + //TODO: use color field of vk::DebugUtilsLabelsExt in a meaningful way? + Some( + slice::from_raw_parts::(info_structs, count) + .iter() + .map(|obj_info| { + let object_name = obj_info + .p_object_name + .as_ref() + .map(|name| CStr::from_ptr(name).to_string_lossy().into_owned()); + + match object_name { + Some(name) => format!( + "(type: {:?}, hndl: {}, name: {})", + obj_info.object_type, + &obj_info.object_handle.to_string(), + name + ), + None => format!( + "(type: {:?}, hndl: {})", + obj_info.object_type, + &obj_info.object_handle.to_string() + ), + } + }) + .collect::>() + .join(", "), + ) +} + +unsafe extern "system" fn debug_utils_messenger_callback( + message_severity: vk::DebugUtilsMessageSeverityFlagsEXT, + message_type: vk::DebugUtilsMessageTypeFlagsEXT, + p_callback_data: *const vk::DebugUtilsMessengerCallbackDataEXT, + _user_data: *mut std::os::raw::c_void, +) -> vk::Bool32 { + let callback_data = *p_callback_data; + + let message_severity = match message_severity { + vk::DebugUtilsMessageSeverityFlagsEXT::ERROR => log::Level::Error, + vk::DebugUtilsMessageSeverityFlagsEXT::WARNING => log::Level::Warn, + vk::DebugUtilsMessageSeverityFlagsEXT::INFO => log::Level::Info, + vk::DebugUtilsMessageSeverityFlagsEXT::VERBOSE => log::Level::Trace, + _ => log::Level::Warn, + }; + let message_type = &format!("{:?}", message_type); + let message_id_number: i32 = callback_data.message_id_number as i32; + + let message_id_name = if callback_data.p_message_id_name.is_null() { + Cow::from("") + } else { + CStr::from_ptr(callback_data.p_message_id_name).to_string_lossy() + }; + + let message = if callback_data.p_message.is_null() { + Cow::from("") + } else { + CStr::from_ptr(callback_data.p_message).to_string_lossy() + }; + + let additional_info: [(&str, Option); 3] = [ + ( + "queue info", + display_debug_utils_label_ext( + callback_data.p_queue_labels as *mut _, + callback_data.queue_label_count as usize, + ), + ), + ( + "cmd buf info", + display_debug_utils_label_ext( + callback_data.p_cmd_buf_labels as *mut _, + callback_data.cmd_buf_label_count as usize, + ), + ), + ( + "object info", + display_debug_utils_object_name_info_ext( + callback_data.p_objects as *mut _, + callback_data.object_count as usize, + ), + ), + ]; + + log!(message_severity, "{}\n", { + let mut msg = format!( + "\n{} [{} ({})] : {}", + message_type, + message_id_name, + &message_id_number.to_string(), + message + ); + + #[allow(array_into_iter)] + for (info_label, info) in additional_info.into_iter() { + match info { + Some(data) => { + msg = format!("{}\n{}: {}", msg, info_label, data); + } + None => {} + } + } + + msg + }); + + vk::FALSE +} + +unsafe extern "system" fn debug_report_callback( + type_: vk::DebugReportFlagsEXT, + _: vk::DebugReportObjectTypeEXT, + _object: u64, + _location: usize, + _msg_code: i32, + layer_prefix: *const std::os::raw::c_char, + description: *const std::os::raw::c_char, + _user_data: *mut std::os::raw::c_void, +) -> vk::Bool32 { + let level = match type_ { + vk::DebugReportFlagsEXT::ERROR => log::Level::Error, + vk::DebugReportFlagsEXT::WARNING => log::Level::Warn, + vk::DebugReportFlagsEXT::INFORMATION => log::Level::Info, + vk::DebugReportFlagsEXT::DEBUG => log::Level::Debug, + _ => log::Level::Warn, + }; + + let layer_prefix = CStr::from_ptr(layer_prefix).to_str().unwrap(); + let description = CStr::from_ptr(description).to_str().unwrap(); + log!(level, "[{}] {}", layer_prefix, description); + vk::FALSE +} + +impl hal::Instance for Instance { + fn create(name: &str, version: u32) -> Result { + // TODO: return errors instead of panic + let entry = VK_ENTRY.as_ref().map_err(|e| { + info!("Missing Vulkan entry points: {:?}", e); + hal::UnsupportedBackend + })?; + + let app_name = CString::new(name).unwrap(); + let app_info = vk::ApplicationInfo { + s_type: vk::StructureType::APPLICATION_INFO, + p_next: ptr::null(), + p_application_name: app_name.as_ptr(), + application_version: version, + p_engine_name: b"gfx-rs\0".as_ptr() as *const _, + engine_version: 1, + api_version: vk::make_version(1, 0, 0), + }; + + let instance_extensions = entry + .enumerate_instance_extension_properties() + .expect("Unable to enumerate instance extensions"); + + let instance_layers = entry + .enumerate_instance_layer_properties() + .expect("Unable to enumerate instance layers"); + + // Check our extensions against the available extensions + let extensions = SURFACE_EXTENSIONS + .iter() + .chain(EXTENSIONS.iter()) + .filter_map(|&ext| { + instance_extensions + .iter() + .find(|inst_ext| unsafe { + CStr::from_ptr(inst_ext.extension_name.as_ptr()) == ext + }) + .map(|_| ext) + .or_else(|| { + warn!("Unable to find extension: {}", ext.to_string_lossy()); + None + }) + }) + .collect::>(); + + // Check requested layers against the available layers + let layers = LAYERS + .iter() + .filter_map(|&layer| { + instance_layers + .iter() + .find(|inst_layer| unsafe { + CStr::from_ptr(inst_layer.layer_name.as_ptr()) == layer + }) + .map(|_| layer) + .or_else(|| { + warn!("Unable to find layer: {}", layer.to_string_lossy()); + None + }) + }) + .collect::>(); + + let instance = { + let cstrings = layers + .iter() + .chain(extensions.iter()) + .map(|&s| CString::from(s)) + .collect::>(); + + let str_pointers = cstrings.iter().map(|s| s.as_ptr()).collect::>(); + + let create_info = vk::InstanceCreateInfo { + s_type: vk::StructureType::INSTANCE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::InstanceCreateFlags::empty(), + p_application_info: &app_info, + enabled_layer_count: layers.len() as _, + pp_enabled_layer_names: str_pointers.as_ptr(), + enabled_extension_count: extensions.len() as _, + pp_enabled_extension_names: str_pointers[layers.len() ..].as_ptr(), + }; + + unsafe { entry.create_instance(&create_info, None) }.map_err(|e| { + warn!("Unable to create Vulkan instance: {:?}", e); + hal::UnsupportedBackend + })? + }; + + #[cfg(debug_assertions)] + let debug_messenger = { + // make sure VK_EXT_debug_utils is available + if instance_extensions.iter().any(|props| unsafe { + CStr::from_ptr(props.extension_name.as_ptr()) == DebugUtils::name() + }) { + let ext = DebugUtils::new(entry, &instance); + let info = vk::DebugUtilsMessengerCreateInfoEXT { + s_type: vk::StructureType::DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT, + p_next: ptr::null(), + flags: vk::DebugUtilsMessengerCreateFlagsEXT::empty(), + message_severity: vk::DebugUtilsMessageSeverityFlagsEXT::all(), + message_type: vk::DebugUtilsMessageTypeFlagsEXT::all(), + pfn_user_callback: Some(debug_utils_messenger_callback), + p_user_data: ptr::null_mut(), + }; + let handle = unsafe { ext.create_debug_utils_messenger(&info, None) }.unwrap(); + Some(DebugMessenger::Utils(ext, handle)) + } else if instance_extensions.iter().any(|props| unsafe { + CStr::from_ptr(props.extension_name.as_ptr()) == DebugReport::name() + }) { + let ext = DebugReport::new(entry, &instance); + let info = vk::DebugReportCallbackCreateInfoEXT { + s_type: vk::StructureType::DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT, + p_next: ptr::null(), + flags: vk::DebugReportFlagsEXT::all(), + pfn_callback: Some(debug_report_callback), + p_user_data: ptr::null_mut(), + }; + let handle = unsafe { ext.create_debug_report_callback(&info, None) }.unwrap(); + Some(DebugMessenger::Report(ext, handle)) + } else { + None + } + }; + #[cfg(not(debug_assertions))] + let debug_messenger = None; + + Ok(Instance { + raw: Arc::new(RawInstance(instance, debug_messenger)), + extensions, + }) + } + + fn enumerate_adapters(&self) -> Vec> { + let devices = match unsafe { self.raw.0.enumerate_physical_devices() } { + Ok(devices) => devices, + Err(err) => { + error!("Could not enumerate physical devices! {}", err); + vec![] + } + }; + + devices + .into_iter() + .map(|device| { + let extensions = + unsafe { self.raw.0.enumerate_device_extension_properties(device) }.unwrap(); + let properties = unsafe { self.raw.0.get_physical_device_properties(device) }; + let info = adapter::AdapterInfo { + name: unsafe { + CStr::from_ptr(properties.device_name.as_ptr()) + .to_str() + .unwrap_or("Unknown") + .to_owned() + }, + vendor: properties.vendor_id as usize, + device: properties.device_id as usize, + device_type: match properties.device_type { + ash::vk::PhysicalDeviceType::OTHER => adapter::DeviceType::Other, + ash::vk::PhysicalDeviceType::INTEGRATED_GPU => { + adapter::DeviceType::IntegratedGpu + } + ash::vk::PhysicalDeviceType::DISCRETE_GPU => { + adapter::DeviceType::DiscreteGpu + } + ash::vk::PhysicalDeviceType::VIRTUAL_GPU => adapter::DeviceType::VirtualGpu, + ash::vk::PhysicalDeviceType::CPU => adapter::DeviceType::Cpu, + _ => adapter::DeviceType::Other, + }, + }; + let physical_device = PhysicalDevice { + instance: self.raw.clone(), + handle: device, + extensions, + properties, + }; + let queue_families = unsafe { + self.raw + .0 + .get_physical_device_queue_family_properties(device) + .into_iter() + .enumerate() + .map(|(i, properties)| QueueFamily { + properties, + device, + index: i as u32, + }) + .collect() + }; + + adapter::Adapter { + info, + physical_device, + queue_families, + } + }) + .collect() + } + + unsafe fn create_surface( + &self, + has_handle: &impl raw_window_handle::HasRawWindowHandle, + ) -> Result { + use raw_window_handle::RawWindowHandle; + + match has_handle.raw_window_handle() { + #[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))] + RawWindowHandle::Wayland(handle) + if self + .extensions + .contains(&extensions::khr::WaylandSurface::name()) => + { + Ok(self.create_surface_from_wayland(handle.display, handle.surface)) + } + #[cfg(all( + feature = "x11", + unix, + not(target_os = "android"), + not(target_os = "macos") + ))] + RawWindowHandle::Xlib(handle) + if self + .extensions + .contains(&extensions::khr::XlibSurface::name()) => + { + Ok(self.create_surface_from_xlib(handle.display as *mut _, handle.window)) + } + #[cfg(all( + feature = "xcb", + unix, + not(target_os = "android"), + not(target_os = "macos"), + not(target_os = "ios") + ))] + RawWindowHandle::Xcb(handle) + if self + .extensions + .contains(&extensions::khr::XcbSurface::name()) => + { + Ok(self.create_surface_from_xcb(handle.connection as *mut _, handle.window)) + } + #[cfg(target_os = "android")] + RawWindowHandle::Android(handle) => { + Ok(self.create_surface_android(handle.a_native_window)) + } + #[cfg(windows)] + RawWindowHandle::Windows(handle) => { + use winapi::um::libloaderapi::GetModuleHandleW; + + let hinstance = GetModuleHandleW(ptr::null()); + Ok(self.create_surface_from_hwnd(hinstance as *mut _, handle.hwnd)) + } + #[cfg(target_os = "macos")] + RawWindowHandle::MacOS(handle) => Ok(self.create_surface_from_ns_view(handle.ns_view)), + _ => Err(hal::window::InitError::UnsupportedWindowHandle), + } + } + + unsafe fn destroy_surface(&self, surface: window::Surface) { + surface + .raw + .functor + .destroy_surface(surface.raw.handle, None); + } +} + +#[derive(Debug, Clone)] +pub struct QueueFamily { + properties: vk::QueueFamilyProperties, + device: vk::PhysicalDevice, + index: u32, +} + +impl queue::QueueFamily for QueueFamily { + fn queue_type(&self) -> queue::QueueType { + map_queue_type(self.properties.queue_flags) + } + fn max_queues(&self) -> usize { + self.properties.queue_count as _ + } + fn id(&self) -> queue::QueueFamilyId { + queue::QueueFamilyId(self.index as _) + } +} + +pub struct PhysicalDevice { + instance: Arc, + handle: vk::PhysicalDevice, + extensions: Vec, + properties: vk::PhysicalDeviceProperties, +} + +impl PhysicalDevice { + fn supports_extension(&self, extension: &CStr) -> bool { + self.extensions + .iter() + .any(|ep| unsafe { CStr::from_ptr(ep.extension_name.as_ptr()) } == extension) + } +} + +impl fmt::Debug for PhysicalDevice { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("PhysicalDevice") + } +} + +impl adapter::PhysicalDevice for PhysicalDevice { + unsafe fn open( + &self, + families: &[(&QueueFamily, &[queue::QueuePriority])], + requested_features: Features, + ) -> Result, DeviceCreationError> { + let family_infos = families + .iter() + .map(|&(family, priorities)| vk::DeviceQueueCreateInfo { + s_type: vk::StructureType::DEVICE_QUEUE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::DeviceQueueCreateFlags::empty(), + queue_family_index: family.index, + queue_count: priorities.len() as _, + p_queue_priorities: priorities.as_ptr(), + }) + .collect::>(); + + if !self.features().contains(requested_features) { + return Err(DeviceCreationError::MissingFeature); + } + + let maintenance_level = if self.supports_extension(*KHR_MAINTENANCE1) { 1 } else { 0 }; + let enabled_features = conv::map_device_features(requested_features); + let enabled_extensions = DEVICE_EXTENSIONS + .iter() + .cloned() + .chain( + if requested_features.contains(Features::NDC_Y_UP) && maintenance_level == 0 { + Some(*AMD_NEGATIVE_VIEWPORT_HEIGHT) + } else { + None + }, + ) + .chain( + match maintenance_level { + 0 => None, + 1 => Some(*KHR_MAINTENANCE1), + _ => unreachable!(), + } + ); + + // Create device + let device_raw = { + let cstrings = enabled_extensions.map(CString::from).collect::>(); + + let str_pointers = cstrings.iter().map(|s| s.as_ptr()).collect::>(); + + let info = vk::DeviceCreateInfo { + s_type: vk::StructureType::DEVICE_CREATE_INFO, + p_next: ptr::null(), + flags: vk::DeviceCreateFlags::empty(), + queue_create_info_count: family_infos.len() as u32, + p_queue_create_infos: family_infos.as_ptr(), + enabled_layer_count: 0, + pp_enabled_layer_names: ptr::null(), + enabled_extension_count: str_pointers.len() as u32, + pp_enabled_extension_names: str_pointers.as_ptr(), + p_enabled_features: &enabled_features, + }; + + match self.instance.0.create_device(self.handle, &info, None) { + Ok(device) => device, + Err(e) => { + return Err(match e { + vk::Result::ERROR_OUT_OF_HOST_MEMORY => { + DeviceCreationError::OutOfMemory(OutOfMemory::Host) + } + vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => { + DeviceCreationError::OutOfMemory(OutOfMemory::Device) + } + vk::Result::ERROR_INITIALIZATION_FAILED => { + DeviceCreationError::InitializationFailed + } + vk::Result::ERROR_DEVICE_LOST => DeviceCreationError::DeviceLost, + vk::Result::ERROR_TOO_MANY_OBJECTS => DeviceCreationError::TooManyObjects, + _ => unreachable!(), + }) + } + } + }; + + let swapchain_fn = vk::KhrSwapchainFn::load(|name| { + mem::transmute( + self.instance + .0 + .get_device_proc_addr(device_raw.handle(), name.as_ptr()), + ) + }); + + let device = Device { + shared: Arc::new(RawDevice { + raw: device_raw, + features: requested_features, + instance: Arc::clone(&self.instance), + maintenance_level, + }), + vendor_id: self.properties.vendor_id, + }; + + let device_arc = Arc::clone(&device.shared); + let queue_groups = families + .into_iter() + .map(|&(family, ref priorities)| { + let mut family_raw = + queue::QueueGroup::new(queue::QueueFamilyId(family.index as usize)); + for id in 0 .. priorities.len() { + let queue_raw = device_arc.raw.get_device_queue(family.index, id as _); + family_raw.add_queue(CommandQueue { + raw: Arc::new(queue_raw), + device: device_arc.clone(), + swapchain_fn: swapchain_fn.clone(), + }); + } + family_raw + }) + .collect(); + + Ok(adapter::Gpu { + device, + queue_groups, + }) + } + + fn format_properties(&self, format: Option) -> format::Properties { + let properties = unsafe { + self.instance.0.get_physical_device_format_properties( + self.handle, + format.map_or(vk::Format::UNDEFINED, conv::map_format), + ) + }; + + format::Properties { + linear_tiling: conv::map_image_features(properties.linear_tiling_features), + optimal_tiling: conv::map_image_features(properties.optimal_tiling_features), + buffer_features: conv::map_buffer_features(properties.buffer_features), + } + } + + fn image_format_properties( + &self, + format: format::Format, + dimensions: u8, + tiling: image::Tiling, + usage: image::Usage, + view_caps: image::ViewCapabilities, + ) -> Option { + let format_properties = unsafe { + self.instance.0.get_physical_device_image_format_properties( + self.handle, + conv::map_format(format), + match dimensions { + 1 => vk::ImageType::TYPE_1D, + 2 => vk::ImageType::TYPE_2D, + 3 => vk::ImageType::TYPE_3D, + _ => panic!("Unexpected image dimensionality: {}", dimensions), + }, + conv::map_tiling(tiling), + conv::map_image_usage(usage), + conv::map_view_capabilities(view_caps), + ) + }; + + match format_properties { + Ok(props) => Some(image::FormatProperties { + max_extent: image::Extent { + width: props.max_extent.width, + height: props.max_extent.height, + depth: props.max_extent.depth, + }, + max_levels: props.max_mip_levels as _, + max_layers: props.max_array_layers as _, + sample_count_mask: props.sample_counts.as_raw() as _, + max_resource_size: props.max_resource_size as _, + }), + Err(vk::Result::ERROR_FORMAT_NOT_SUPPORTED) => None, + Err(other) => { + error!("Unexpected error in `image_format_properties`: {:?}", other); + None + } + } + } + + fn memory_properties(&self) -> adapter::MemoryProperties { + let mem_properties = unsafe { + self.instance + .0 + .get_physical_device_memory_properties(self.handle) + }; + let memory_heaps = mem_properties.memory_heaps + [.. mem_properties.memory_heap_count as usize] + .iter() + .map(|mem| mem.size) + .collect(); + let memory_types = mem_properties.memory_types + [.. mem_properties.memory_type_count as usize] + .iter() + .map(|mem| { + use crate::memory::Properties; + let mut type_flags = Properties::empty(); + + if mem + .property_flags + .intersects(vk::MemoryPropertyFlags::DEVICE_LOCAL) + { + type_flags |= Properties::DEVICE_LOCAL; + } + if mem + .property_flags + .intersects(vk::MemoryPropertyFlags::HOST_VISIBLE) + { + type_flags |= Properties::CPU_VISIBLE; + } + if mem + .property_flags + .intersects(vk::MemoryPropertyFlags::HOST_COHERENT) + { + type_flags |= Properties::COHERENT; + } + if mem + .property_flags + .intersects(vk::MemoryPropertyFlags::HOST_CACHED) + { + type_flags |= Properties::CPU_CACHED; + } + if mem + .property_flags + .intersects(vk::MemoryPropertyFlags::LAZILY_ALLOCATED) + { + type_flags |= Properties::LAZILY_ALLOCATED; + } + + adapter::MemoryType { + properties: type_flags, + heap_index: mem.heap_index as usize, + } + }) + .collect(); + + adapter::MemoryProperties { + memory_heaps, + memory_types, + } + } + + fn features(&self) -> Features { + // see https://github.com/gfx-rs/gfx/issues/1930 + let is_windows_intel_dual_src_bug = cfg!(windows) + && self.properties.vendor_id == info::intel::VENDOR + && (self.properties.device_id & info::intel::DEVICE_KABY_LAKE_MASK + == info::intel::DEVICE_KABY_LAKE_MASK + || self.properties.device_id & info::intel::DEVICE_SKY_LAKE_MASK + == info::intel::DEVICE_SKY_LAKE_MASK); + + let features = unsafe { self.instance.0.get_physical_device_features(self.handle) }; + let mut bits = Features::empty() + | Features::TRIANGLE_FAN + | Features::SEPARATE_STENCIL_REF_VALUES + | Features::SAMPLER_MIP_LOD_BIAS; + + if self.supports_extension(*AMD_NEGATIVE_VIEWPORT_HEIGHT) + || self.supports_extension(*KHR_MAINTENANCE1) + { + bits |= Features::NDC_Y_UP; + } + if self.supports_extension(*KHR_SAMPLER_MIRROR_MIRROR_CLAMP_TO_EDGE) { + bits |= Features::SAMPLER_MIRROR_CLAMP_EDGE; + } + + if features.robust_buffer_access != 0 { + bits |= Features::ROBUST_BUFFER_ACCESS; + } + if features.full_draw_index_uint32 != 0 { + bits |= Features::FULL_DRAW_INDEX_U32; + } + if features.image_cube_array != 0 { + bits |= Features::IMAGE_CUBE_ARRAY; + } + if features.independent_blend != 0 { + bits |= Features::INDEPENDENT_BLENDING; + } + if features.geometry_shader != 0 { + bits |= Features::GEOMETRY_SHADER; + } + if features.tessellation_shader != 0 { + bits |= Features::TESSELLATION_SHADER; + } + if features.sample_rate_shading != 0 { + bits |= Features::SAMPLE_RATE_SHADING; + } + if features.dual_src_blend != 0 && !is_windows_intel_dual_src_bug { + bits |= Features::DUAL_SRC_BLENDING; + } + if features.logic_op != 0 { + bits |= Features::LOGIC_OP; + } + if features.multi_draw_indirect != 0 { + bits |= Features::MULTI_DRAW_INDIRECT; + } + if features.draw_indirect_first_instance != 0 { + bits |= Features::DRAW_INDIRECT_FIRST_INSTANCE; + } + if features.depth_clamp != 0 { + bits |= Features::DEPTH_CLAMP; + } + if features.depth_bias_clamp != 0 { + bits |= Features::DEPTH_BIAS_CLAMP; + } + if features.fill_mode_non_solid != 0 { + bits |= Features::NON_FILL_POLYGON_MODE; + } + if features.depth_bounds != 0 { + bits |= Features::DEPTH_BOUNDS; + } + if features.wide_lines != 0 { + bits |= Features::LINE_WIDTH; + } + if features.large_points != 0 { + bits |= Features::POINT_SIZE; + } + if features.alpha_to_one != 0 { + bits |= Features::ALPHA_TO_ONE; + } + if features.multi_viewport != 0 { + bits |= Features::MULTI_VIEWPORTS; + } + if features.sampler_anisotropy != 0 { + bits |= Features::SAMPLER_ANISOTROPY; + } + if features.texture_compression_etc2 != 0 { + bits |= Features::FORMAT_ETC2; + } + if features.texture_compression_astc_ldr != 0 { + bits |= Features::FORMAT_ASTC_LDR; + } + if features.texture_compression_bc != 0 { + bits |= Features::FORMAT_BC; + } + if features.occlusion_query_precise != 0 { + bits |= Features::PRECISE_OCCLUSION_QUERY; + } + if features.pipeline_statistics_query != 0 { + bits |= Features::PIPELINE_STATISTICS_QUERY; + } + if features.vertex_pipeline_stores_and_atomics != 0 { + bits |= Features::VERTEX_STORES_AND_ATOMICS; + } + if features.fragment_stores_and_atomics != 0 { + bits |= Features::FRAGMENT_STORES_AND_ATOMICS; + } + if features.shader_tessellation_and_geometry_point_size != 0 { + bits |= Features::SHADER_TESSELLATION_AND_GEOMETRY_POINT_SIZE; + } + if features.shader_image_gather_extended != 0 { + bits |= Features::SHADER_IMAGE_GATHER_EXTENDED; + } + if features.shader_storage_image_extended_formats != 0 { + bits |= Features::SHADER_STORAGE_IMAGE_EXTENDED_FORMATS; + } + if features.shader_storage_image_multisample != 0 { + bits |= Features::SHADER_STORAGE_IMAGE_MULTISAMPLE; + } + if features.shader_storage_image_read_without_format != 0 { + bits |= Features::SHADER_STORAGE_IMAGE_READ_WITHOUT_FORMAT; + } + if features.shader_storage_image_write_without_format != 0 { + bits |= Features::SHADER_STORAGE_IMAGE_WRITE_WITHOUT_FORMAT; + } + if features.shader_uniform_buffer_array_dynamic_indexing != 0 { + bits |= Features::SHADER_UNIFORM_BUFFER_ARRAY_DYNAMIC_INDEXING; + } + if features.shader_sampled_image_array_dynamic_indexing != 0 { + bits |= Features::SHADER_SAMPLED_IMAGE_ARRAY_DYNAMIC_INDEXING; + } + if features.shader_storage_buffer_array_dynamic_indexing != 0 { + bits |= Features::SHADER_STORAGE_BUFFER_ARRAY_DYNAMIC_INDEXING; + } + if features.shader_storage_image_array_dynamic_indexing != 0 { + bits |= Features::SHADER_STORAGE_IMAGE_ARRAY_DYNAMIC_INDEXING; + } + if features.shader_clip_distance != 0 { + bits |= Features::SHADER_CLIP_DISTANCE; + } + if features.shader_cull_distance != 0 { + bits |= Features::SHADER_CULL_DISTANCE; + } + if features.shader_float64 != 0 { + bits |= Features::SHADER_FLOAT64; + } + if features.shader_int64 != 0 { + bits |= Features::SHADER_INT64; + } + if features.shader_int16 != 0 { + bits |= Features::SHADER_INT16; + } + if features.shader_resource_residency != 0 { + bits |= Features::SHADER_RESOURCE_RESIDENCY; + } + if features.shader_resource_min_lod != 0 { + bits |= Features::SHADER_RESOURCE_MIN_LOD; + } + if features.sparse_binding != 0 { + bits |= Features::SPARSE_BINDING; + } + if features.sparse_residency_buffer != 0 { + bits |= Features::SPARSE_RESIDENCY_BUFFER; + } + if features.sparse_residency_image2_d != 0 { + bits |= Features::SPARSE_RESIDENCY_IMAGE_2D; + } + if features.sparse_residency_image3_d != 0 { + bits |= Features::SPARSE_RESIDENCY_IMAGE_3D; + } + if features.sparse_residency2_samples != 0 { + bits |= Features::SPARSE_RESIDENCY_2_SAMPLES; + } + if features.sparse_residency4_samples != 0 { + bits |= Features::SPARSE_RESIDENCY_4_SAMPLES; + } + if features.sparse_residency8_samples != 0 { + bits |= Features::SPARSE_RESIDENCY_8_SAMPLES; + } + if features.sparse_residency16_samples != 0 { + bits |= Features::SPARSE_RESIDENCY_16_SAMPLES; + } + if features.sparse_residency_aliased != 0 { + bits |= Features::SPARSE_RESIDENCY_ALIASED; + } + if features.variable_multisample_rate != 0 { + bits |= Features::VARIABLE_MULTISAMPLE_RATE; + } + if features.inherited_queries != 0 { + bits |= Features::INHERITED_QUERIES; + } + + bits + } + + fn hints(&self) -> Hints { + Hints::BASE_VERTEX_INSTANCE_DRAWING + } + + fn limits(&self) -> Limits { + let limits = &self.properties.limits; + let max_group_count = limits.max_compute_work_group_count; + let max_group_size = limits.max_compute_work_group_size; + + Limits { + max_image_1d_size: limits.max_image_dimension1_d, + max_image_2d_size: limits.max_image_dimension2_d, + max_image_3d_size: limits.max_image_dimension3_d, + max_image_cube_size: limits.max_image_dimension_cube, + max_image_array_layers: limits.max_image_array_layers as _, + max_texel_elements: limits.max_texel_buffer_elements as _, + max_patch_size: limits.max_tessellation_patch_size as PatchSize, + max_viewports: limits.max_viewports as _, + max_viewport_dimensions: limits.max_viewport_dimensions, + max_framebuffer_extent: image::Extent { + width: limits.max_framebuffer_width, + height: limits.max_framebuffer_height, + depth: limits.max_framebuffer_layers, + }, + max_compute_work_group_count: [ + max_group_count[0] as _, + max_group_count[1] as _, + max_group_count[2] as _, + ], + max_compute_work_group_size: [ + max_group_size[0] as _, + max_group_size[1] as _, + max_group_size[2] as _, + ], + max_vertex_input_attributes: limits.max_vertex_input_attributes as _, + max_vertex_input_bindings: limits.max_vertex_input_bindings as _, + max_vertex_input_attribute_offset: limits.max_vertex_input_attribute_offset as _, + max_vertex_input_binding_stride: limits.max_vertex_input_binding_stride as _, + max_vertex_output_components: limits.max_vertex_output_components as _, + optimal_buffer_copy_offset_alignment: limits.optimal_buffer_copy_offset_alignment as _, + optimal_buffer_copy_pitch_alignment: limits.optimal_buffer_copy_row_pitch_alignment + as _, + min_texel_buffer_offset_alignment: limits.min_texel_buffer_offset_alignment as _, + min_uniform_buffer_offset_alignment: limits.min_uniform_buffer_offset_alignment as _, + min_storage_buffer_offset_alignment: limits.min_storage_buffer_offset_alignment as _, + framebuffer_color_sample_counts: limits.framebuffer_color_sample_counts.as_raw() as _, + framebuffer_depth_sample_counts: limits.framebuffer_depth_sample_counts.as_raw() as _, + framebuffer_stencil_sample_counts: limits.framebuffer_stencil_sample_counts.as_raw() + as _, + max_color_attachments: limits.max_color_attachments as _, + buffer_image_granularity: limits.buffer_image_granularity, + non_coherent_atom_size: limits.non_coherent_atom_size as _, + max_sampler_anisotropy: limits.max_sampler_anisotropy, + min_vertex_input_binding_stride_alignment: 1, + max_bound_descriptor_sets: limits.max_bound_descriptor_sets as _, + max_compute_shared_memory_size: limits.max_compute_shared_memory_size as _, + max_compute_work_group_invocations: limits.max_compute_work_group_invocations as _, + max_descriptor_set_input_attachments: limits.max_descriptor_set_input_attachments as _, + max_descriptor_set_sampled_images: limits.max_descriptor_set_sampled_images as _, + max_descriptor_set_samplers: limits.max_descriptor_set_samplers as _, + max_descriptor_set_storage_buffers: limits.max_descriptor_set_storage_buffers as _, + max_descriptor_set_storage_buffers_dynamic: limits + .max_descriptor_set_storage_buffers_dynamic + as _, + max_descriptor_set_storage_images: limits.max_descriptor_set_storage_images as _, + max_descriptor_set_uniform_buffers: limits.max_descriptor_set_uniform_buffers as _, + max_descriptor_set_uniform_buffers_dynamic: limits + .max_descriptor_set_uniform_buffers_dynamic + as _, + max_draw_indexed_index_value: limits.max_draw_indexed_index_value, + max_draw_indirect_count: limits.max_draw_indirect_count, + max_fragment_combined_output_resources: limits.max_fragment_combined_output_resources + as _, + max_fragment_dual_source_attachments: limits.max_fragment_dual_src_attachments as _, + max_fragment_input_components: limits.max_fragment_input_components as _, + max_fragment_output_attachments: limits.max_fragment_output_attachments as _, + max_framebuffer_layers: limits.max_framebuffer_layers as _, + max_geometry_input_components: limits.max_geometry_input_components as _, + max_geometry_output_components: limits.max_geometry_output_components as _, + max_geometry_output_vertices: limits.max_geometry_output_vertices as _, + max_geometry_shader_invocations: limits.max_geometry_shader_invocations as _, + max_geometry_total_output_components: limits.max_geometry_total_output_components as _, + max_memory_allocation_count: limits.max_memory_allocation_count as _, + max_per_stage_descriptor_input_attachments: limits + .max_per_stage_descriptor_input_attachments + as _, + max_per_stage_descriptor_sampled_images: limits.max_per_stage_descriptor_sampled_images + as _, + max_per_stage_descriptor_samplers: limits.max_per_stage_descriptor_samplers as _, + max_per_stage_descriptor_storage_buffers: limits + .max_per_stage_descriptor_storage_buffers + as _, + max_per_stage_descriptor_storage_images: limits.max_per_stage_descriptor_storage_images + as _, + max_per_stage_descriptor_uniform_buffers: limits + .max_per_stage_descriptor_uniform_buffers + as _, + max_per_stage_resources: limits.max_per_stage_resources as _, + max_push_constants_size: limits.max_push_constants_size as _, + max_sampler_allocation_count: limits.max_sampler_allocation_count as _, + max_sampler_lod_bias: limits.max_sampler_lod_bias as _, + max_storage_buffer_range: limits.max_storage_buffer_range as _, + max_uniform_buffer_range: limits.max_uniform_buffer_range as _, + min_memory_map_alignment: limits.min_memory_map_alignment, + standard_sample_locations: limits.standard_sample_locations == ash::vk::TRUE, + } + } + + fn is_valid_cache(&self, cache: &[u8]) -> bool { + const HEADER_SIZE: usize = 16 + vk::UUID_SIZE; + + if cache.len() < HEADER_SIZE { + warn!("Bad cache data length {:?}", cache.len()); + return false; + } + + let header_len = u32::from_le_bytes([cache[0], cache[1], cache[2], cache[3]]); + let header_version = u32::from_le_bytes([cache[4], cache[5], cache[6], cache[7]]); + let vendor_id = u32::from_le_bytes([cache[8], cache[9], cache[10], cache[11]]); + let device_id = u32::from_le_bytes([cache[12], cache[13], cache[14], cache[15]]); + + // header length + if (header_len as usize) < HEADER_SIZE { + warn!("Bad header length {:?}", header_len); + return false; + } + + // cache header version + if header_version != vk::PipelineCacheHeaderVersion::ONE.as_raw() as u32 { + warn!("Unsupported cache header version: {:?}", header_version); + return false; + } + + // vendor id + if vendor_id != self.properties.vendor_id { + warn!( + "Vendor ID mismatch. Device: {:?}, cache: {:?}.", + self.properties.vendor_id, vendor_id, + ); + return false; + } + + // device id + if device_id != self.properties.device_id { + warn!( + "Device ID mismatch. Device: {:?}, cache: {:?}.", + self.properties.device_id, device_id, + ); + return false; + } + + if self.properties.pipeline_cache_uuid != cache[16 .. 16 + vk::UUID_SIZE] { + warn!( + "Pipeline cache UUID mismatch. Device: {:?}, cache: {:?}.", + self.properties.pipeline_cache_uuid, + &cache[16 .. 16 + vk::UUID_SIZE], + ); + return false; + } + true + } +} + +#[doc(hidden)] +pub struct RawDevice { + raw: ash::Device, + features: Features, + instance: Arc, + maintenance_level: u8, +} + +impl fmt::Debug for RawDevice { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "RawDevice") // TODO: Real Debug impl + } +} +impl Drop for RawDevice { + fn drop(&mut self) { + unsafe { + self.raw.destroy_device(None); + } + } +} + +impl RawDevice { + fn debug_messenger(&self) -> Option<&DebugMessenger> { + self.instance.1.as_ref() + } + + fn map_viewport(&self, rect: &hal::pso::Viewport) -> vk::Viewport { + let flip_y = self.features.contains(hal::Features::NDC_Y_UP); + let shift_y = flip_y && self.maintenance_level != 0; + conv::map_viewport(rect, flip_y, shift_y) + } +} + +// Need to explicitly synchronize on submission and present. +pub type RawCommandQueue = Arc; + +pub struct CommandQueue { + raw: RawCommandQueue, + device: Arc, + swapchain_fn: vk::KhrSwapchainFn, +} + +impl fmt::Debug for CommandQueue { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("CommandQueue") + } +} + +impl queue::CommandQueue for CommandQueue { + unsafe fn submit<'a, T, Ic, S, Iw, Is>( + &mut self, + submission: queue::Submission, + fence: Option<&native::Fence>, + ) where + T: 'a + Borrow, + Ic: IntoIterator, + S: 'a + Borrow, + Iw: IntoIterator, + Is: IntoIterator, + { + //TODO: avoid heap allocations + let mut waits = Vec::new(); + let mut stages = Vec::new(); + + let buffers = submission + .command_buffers + .into_iter() + .map(|cmd| cmd.borrow().raw) + .collect::>(); + for (semaphore, stage) in submission.wait_semaphores { + waits.push(semaphore.borrow().0); + stages.push(conv::map_pipeline_stage(stage)); + } + let signals = submission + .signal_semaphores + .into_iter() + .map(|semaphore| semaphore.borrow().0) + .collect::>(); + + let info = vk::SubmitInfo { + s_type: vk::StructureType::SUBMIT_INFO, + p_next: ptr::null(), + wait_semaphore_count: waits.len() as u32, + p_wait_semaphores: waits.as_ptr(), + // If count is zero, AMD driver crashes if nullptr is not set for stage masks + p_wait_dst_stage_mask: if stages.is_empty() { + ptr::null() + } else { + stages.as_ptr() + }, + command_buffer_count: buffers.len() as u32, + p_command_buffers: buffers.as_ptr(), + signal_semaphore_count: signals.len() as u32, + p_signal_semaphores: signals.as_ptr(), + }; + + let fence_raw = fence.map(|fence| fence.0).unwrap_or(vk::Fence::null()); + + let result = self.device.raw.queue_submit(*self.raw, &[info], fence_raw); + assert_eq!(Ok(()), result); + } + + unsafe fn present<'a, W, Is, S, Iw>( + &mut self, + swapchains: Is, + wait_semaphores: Iw, + ) -> Result, PresentError> + where + W: 'a + Borrow, + Is: IntoIterator, + S: 'a + Borrow, + Iw: IntoIterator, + { + let semaphores = wait_semaphores + .into_iter() + .map(|sem| sem.borrow().0) + .collect::>(); + + let mut frames = Vec::new(); + let mut vk_swapchains = Vec::new(); + for (swapchain, index) in swapchains { + vk_swapchains.push(swapchain.borrow().raw); + frames.push(index); + } + + let info = vk::PresentInfoKHR { + s_type: vk::StructureType::PRESENT_INFO_KHR, + p_next: ptr::null(), + wait_semaphore_count: semaphores.len() as _, + p_wait_semaphores: semaphores.as_ptr(), + swapchain_count: vk_swapchains.len() as _, + p_swapchains: vk_swapchains.as_ptr(), + p_image_indices: frames.as_ptr(), + p_results: ptr::null_mut(), + }; + + match self.swapchain_fn.queue_present_khr(*self.raw, &info) { + vk::Result::SUCCESS => Ok(None), + vk::Result::SUBOPTIMAL_KHR => Ok(Some(Suboptimal)), + vk::Result::ERROR_OUT_OF_HOST_MEMORY => { + Err(PresentError::OutOfMemory(OutOfMemory::Host)) + } + vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => { + Err(PresentError::OutOfMemory(OutOfMemory::Device)) + } + vk::Result::ERROR_DEVICE_LOST => Err(PresentError::DeviceLost(DeviceLost)), + vk::Result::ERROR_OUT_OF_DATE_KHR => Err(PresentError::OutOfDate), + vk::Result::ERROR_SURFACE_LOST_KHR => Err(PresentError::SurfaceLost(SurfaceLost)), + _ => panic!("Failed to present frame"), + } + } + + unsafe fn present_surface( + &mut self, + surface: &mut window::Surface, + image: window::SurfaceImage, + wait_semaphore: Option<&native::Semaphore>, + ) -> Result, PresentError> { + let ssc = surface.swapchain.as_ref().unwrap(); + let p_wait_semaphores = if let Some(wait_semaphore) = wait_semaphore { + &wait_semaphore.0 + } else { + let submit_info = vk::SubmitInfo { + s_type: vk::StructureType::SUBMIT_INFO, + p_next: ptr::null(), + wait_semaphore_count: 0, + p_wait_semaphores: ptr::null(), + p_wait_dst_stage_mask: &vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT, + command_buffer_count: 0, + p_command_buffers: ptr::null(), + signal_semaphore_count: 1, + p_signal_semaphores: &ssc.semaphore.0, + }; + self.device + .raw + .queue_submit(*self.raw, &[submit_info], vk::Fence::null()) + .unwrap(); + &ssc.semaphore.0 + }; + let present_info = vk::PresentInfoKHR { + s_type: vk::StructureType::PRESENT_INFO_KHR, + p_next: ptr::null(), + wait_semaphore_count: 1, + p_wait_semaphores, + swapchain_count: 1, + p_swapchains: &ssc.swapchain.raw, + p_image_indices: &image.index, + p_results: ptr::null_mut(), + }; + + match self + .swapchain_fn + .queue_present_khr(*self.raw, &present_info) + { + vk::Result::SUCCESS => Ok(None), + vk::Result::SUBOPTIMAL_KHR => Ok(Some(Suboptimal)), + vk::Result::ERROR_OUT_OF_HOST_MEMORY => { + Err(PresentError::OutOfMemory(OutOfMemory::Host)) + } + vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => { + Err(PresentError::OutOfMemory(OutOfMemory::Device)) + } + vk::Result::ERROR_DEVICE_LOST => Err(PresentError::DeviceLost(DeviceLost)), + vk::Result::ERROR_OUT_OF_DATE_KHR => Err(PresentError::OutOfDate), + vk::Result::ERROR_SURFACE_LOST_KHR => Err(PresentError::SurfaceLost(SurfaceLost)), + _ => panic!("Failed to present frame"), + } + } + + fn wait_idle(&self) -> Result<(), OutOfMemory> { + match unsafe { self.device.raw.queue_wait_idle(*self.raw) } { + Ok(()) => Ok(()), + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(OutOfMemory::Host), + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(OutOfMemory::Device), + Err(_) => unreachable!(), + } + } +} + +#[derive(Debug)] +pub struct Device { + shared: Arc, + vendor_id: u32, +} + +#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)] +pub enum Backend {} +impl hal::Backend for Backend { + type Instance = Instance; + type PhysicalDevice = PhysicalDevice; + type Device = Device; + + type Surface = window::Surface; + type Swapchain = window::Swapchain; + + type QueueFamily = QueueFamily; + type CommandQueue = CommandQueue; + type CommandBuffer = command::CommandBuffer; + + type Memory = native::Memory; + type CommandPool = pool::RawCommandPool; + + type ShaderModule = native::ShaderModule; + type RenderPass = native::RenderPass; + type Framebuffer = native::Framebuffer; + + type Buffer = native::Buffer; + type BufferView = native::BufferView; + type Image = native::Image; + type ImageView = native::ImageView; + type Sampler = native::Sampler; + + type ComputePipeline = native::ComputePipeline; + type GraphicsPipeline = native::GraphicsPipeline; + type PipelineLayout = native::PipelineLayout; + type PipelineCache = native::PipelineCache; + type DescriptorSetLayout = native::DescriptorSetLayout; + type DescriptorPool = native::DescriptorPool; + type DescriptorSet = native::DescriptorSet; + + type Fence = native::Fence; + type Semaphore = native::Semaphore; + type Event = native::Event; + type QueryPool = native::QueryPool; +} diff --git a/third_party/rust/gfx-backend-vulkan/src/native.rs b/third_party/rust/gfx-backend-vulkan/src/native.rs index c53509f3814d..f9f84b81c1eb 100644 --- a/third_party/rust/gfx-backend-vulkan/src/native.rs +++ b/third_party/rust/gfx-backend-vulkan/src/native.rs @@ -1,180 +1,180 @@ -use crate::{window::FramebufferCachePtr, Backend, RawDevice}; -use ash::{version::DeviceV1_0, vk}; -use hal::{image::SubresourceRange, pso}; -use smallvec::SmallVec; -use std::{borrow::Borrow, sync::Arc}; - -#[derive(Debug, Hash)] -pub struct Semaphore(pub vk::Semaphore); - -#[derive(Debug, Hash, PartialEq, Eq)] -pub struct Fence(pub vk::Fence); - -#[derive(Debug, Hash)] -pub struct Event(pub vk::Event); - -#[derive(Debug, Hash)] -pub struct GraphicsPipeline(pub vk::Pipeline); - -#[derive(Debug, Hash)] -pub struct ComputePipeline(pub vk::Pipeline); - -#[derive(Debug, Hash)] -pub struct Memory { - pub(crate) raw: vk::DeviceMemory, -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -pub struct Buffer { - pub(crate) raw: vk::Buffer, -} - -unsafe impl Sync for Buffer {} -unsafe impl Send for Buffer {} - -#[derive(Clone, Debug, Eq, Hash, PartialEq)] -pub struct BufferView { - pub(crate) raw: vk::BufferView, -} - -#[derive(Debug, Hash, PartialEq, Eq)] -pub struct Image { - pub(crate) raw: vk::Image, - pub(crate) ty: vk::ImageType, - pub(crate) flags: vk::ImageCreateFlags, - pub(crate) extent: vk::Extent3D, -} - -#[derive(Debug, Hash, PartialEq, Eq)] -pub enum ImageViewOwner { - User, - Surface(FramebufferCachePtr), -} - -#[derive(Debug, Hash, PartialEq, Eq)] -pub struct ImageView { - pub(crate) image: vk::Image, - pub(crate) view: vk::ImageView, - pub(crate) range: SubresourceRange, - pub(crate) owner: ImageViewOwner, -} - -#[derive(Debug, Hash)] -pub struct Sampler(pub vk::Sampler); - -#[derive(Debug, Hash)] -pub struct RenderPass { - pub raw: vk::RenderPass, - pub clear_attachments_mask: u64, -} - -#[derive(Debug, Hash)] -pub struct Framebuffer { - pub(crate) raw: vk::Framebuffer, - pub(crate) owned: bool, -} - -#[derive(Debug)] -pub struct DescriptorSetLayout { - pub(crate) raw: vk::DescriptorSetLayout, - pub(crate) bindings: Arc>, -} - -#[derive(Debug)] -pub struct DescriptorSet { - pub(crate) raw: vk::DescriptorSet, - pub(crate) bindings: Arc>, -} - -#[derive(Debug, Hash)] -pub struct PipelineLayout { - pub(crate) raw: vk::PipelineLayout, -} - -#[derive(Debug)] -pub struct PipelineCache { - pub(crate) raw: vk::PipelineCache, -} - -#[derive(Debug, Eq, Hash, PartialEq)] -pub struct ShaderModule { - pub(crate) raw: vk::ShaderModule, -} - -#[derive(Debug)] -pub struct DescriptorPool { - pub(crate) raw: vk::DescriptorPool, - pub(crate) device: Arc, - /// This vec only exists to re-use allocations when `DescriptorSet`s are freed. - pub(crate) set_free_vec: Vec, -} - -impl pso::DescriptorPool for DescriptorPool { - unsafe fn allocate_sets( - &mut self, - layout_iter: I, - output: &mut SmallVec<[DescriptorSet; 1]>, - ) -> Result<(), pso::AllocationError> - where - I: IntoIterator, - I::Item: Borrow, - { - use std::ptr; - - let mut raw_layouts = Vec::new(); - let mut layout_bindings = Vec::new(); - for layout in layout_iter { - raw_layouts.push(layout.borrow().raw); - layout_bindings.push(layout.borrow().bindings.clone()); - } - - let info = vk::DescriptorSetAllocateInfo { - s_type: vk::StructureType::DESCRIPTOR_SET_ALLOCATE_INFO, - p_next: ptr::null(), - descriptor_pool: self.raw, - descriptor_set_count: raw_layouts.len() as u32, - p_set_layouts: raw_layouts.as_ptr(), - }; - - self.device - .0 - .allocate_descriptor_sets(&info) - .map(|sets| { - output.extend( - sets.into_iter() - .zip(layout_bindings) - .map(|(raw, bindings)| DescriptorSet { raw, bindings }), - ) - }) - .map_err(|err| match err { - vk::Result::ERROR_OUT_OF_HOST_MEMORY => pso::AllocationError::Host, - vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => pso::AllocationError::Device, - vk::Result::ERROR_OUT_OF_POOL_MEMORY => pso::AllocationError::OutOfPoolMemory, - _ => pso::AllocationError::FragmentedPool, - }) - } - - unsafe fn free_sets(&mut self, descriptor_sets: I) - where - I: IntoIterator, - { - self.set_free_vec.clear(); - self.set_free_vec - .extend(descriptor_sets.into_iter().map(|d| d.raw)); - self.device - .0 - .free_descriptor_sets(self.raw, &self.set_free_vec); - } - - unsafe fn reset(&mut self) { - assert_eq!( - Ok(()), - self.device - .0 - .reset_descriptor_pool(self.raw, vk::DescriptorPoolResetFlags::empty()) - ); - } -} - -#[derive(Debug, Hash)] -pub struct QueryPool(pub vk::QueryPool); +use crate::{window::FramebufferCachePtr, Backend, RawDevice}; +use ash::{version::DeviceV1_0, vk}; +use hal::{image::SubresourceRange, pso}; +use std::{borrow::Borrow, sync::Arc}; + +#[derive(Debug, Hash)] +pub struct Semaphore(pub vk::Semaphore); + +#[derive(Debug, Hash, PartialEq, Eq)] +pub struct Fence(pub vk::Fence); + +#[derive(Debug, Hash)] +pub struct Event(pub vk::Event); + +#[derive(Debug, Hash)] +pub struct GraphicsPipeline(pub vk::Pipeline); + +#[derive(Debug, Hash)] +pub struct ComputePipeline(pub vk::Pipeline); + +#[derive(Debug, Hash)] +pub struct Memory { + pub(crate) raw: vk::DeviceMemory, +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub struct Buffer { + pub(crate) raw: vk::Buffer, +} + +unsafe impl Sync for Buffer {} +unsafe impl Send for Buffer {} + +#[derive(Clone, Debug, Eq, Hash, PartialEq)] +pub struct BufferView { + pub(crate) raw: vk::BufferView, +} + +#[derive(Debug, Hash, PartialEq, Eq)] +pub struct Image { + pub(crate) raw: vk::Image, + pub(crate) ty: vk::ImageType, + pub(crate) flags: vk::ImageCreateFlags, + pub(crate) extent: vk::Extent3D, +} + +#[derive(Debug, Hash, PartialEq, Eq)] +pub enum ImageViewOwner { + User, + Surface(FramebufferCachePtr), +} + +#[derive(Debug, Hash, PartialEq, Eq)] +pub struct ImageView { + pub(crate) image: vk::Image, + pub(crate) view: vk::ImageView, + pub(crate) range: SubresourceRange, + pub(crate) owner: ImageViewOwner, +} + +#[derive(Debug, Hash)] +pub struct Sampler(pub vk::Sampler); + +#[derive(Debug, Hash)] +pub struct RenderPass { + pub raw: vk::RenderPass, + pub clear_attachments_mask: u64, +} + +#[derive(Debug, Hash)] +pub struct Framebuffer { + pub(crate) raw: vk::Framebuffer, + pub(crate) owned: bool, +} + +#[derive(Debug)] +pub struct DescriptorSetLayout { + pub(crate) raw: vk::DescriptorSetLayout, + pub(crate) bindings: Arc>, +} + +#[derive(Debug)] +pub struct DescriptorSet { + pub(crate) raw: vk::DescriptorSet, + pub(crate) bindings: Arc>, +} + +#[derive(Debug, Hash)] +pub struct PipelineLayout { + pub(crate) raw: vk::PipelineLayout, +} + +#[derive(Debug)] +pub struct PipelineCache { + pub(crate) raw: vk::PipelineCache, +} + +#[derive(Debug, Eq, Hash, PartialEq)] +pub struct ShaderModule { + pub(crate) raw: vk::ShaderModule, +} + +#[derive(Debug)] +pub struct DescriptorPool { + pub(crate) raw: vk::DescriptorPool, + pub(crate) device: Arc, + /// This vec only exists to re-use allocations when `DescriptorSet`s are freed. + pub(crate) set_free_vec: Vec, +} + +impl pso::DescriptorPool for DescriptorPool { + unsafe fn allocate( + &mut self, + layout_iter: I, + list: &mut E, + ) -> Result<(), pso::AllocationError> + where + I: IntoIterator, + I::Item: Borrow, + E: Extend, + { + use std::ptr; + + let mut raw_layouts = Vec::new(); + let mut layout_bindings = Vec::new(); + for layout in layout_iter { + raw_layouts.push(layout.borrow().raw); + layout_bindings.push(layout.borrow().bindings.clone()); + } + + let info = vk::DescriptorSetAllocateInfo { + s_type: vk::StructureType::DESCRIPTOR_SET_ALLOCATE_INFO, + p_next: ptr::null(), + descriptor_pool: self.raw, + descriptor_set_count: raw_layouts.len() as u32, + p_set_layouts: raw_layouts.as_ptr(), + }; + + self.device + .raw + .allocate_descriptor_sets(&info) + .map(|sets| { + list.extend( + sets.into_iter() + .zip(layout_bindings) + .map(|(raw, bindings)| DescriptorSet { raw, bindings }), + ) + }) + .map_err(|err| match err { + vk::Result::ERROR_OUT_OF_HOST_MEMORY => pso::AllocationError::Host, + vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => pso::AllocationError::Device, + vk::Result::ERROR_OUT_OF_POOL_MEMORY => pso::AllocationError::OutOfPoolMemory, + _ => pso::AllocationError::FragmentedPool, + }) + } + + unsafe fn free_sets(&mut self, descriptor_sets: I) + where + I: IntoIterator, + { + self.set_free_vec.clear(); + self.set_free_vec + .extend(descriptor_sets.into_iter().map(|d| d.raw)); + self.device + .raw + .free_descriptor_sets(self.raw, &self.set_free_vec); + } + + unsafe fn reset(&mut self) { + assert_eq!( + Ok(()), + self.device + .raw + .reset_descriptor_pool(self.raw, vk::DescriptorPoolResetFlags::empty()) + ); + } +} + +#[derive(Debug, Hash)] +pub struct QueryPool(pub vk::QueryPool); diff --git a/third_party/rust/gfx-backend-vulkan/src/pool.rs b/third_party/rust/gfx-backend-vulkan/src/pool.rs index 707b749a0ae9..4d2b718919b0 100644 --- a/third_party/rust/gfx-backend-vulkan/src/pool.rs +++ b/third_party/rust/gfx-backend-vulkan/src/pool.rs @@ -1,60 +1,64 @@ -use ash::version::DeviceV1_0; -use ash::vk; -use smallvec::SmallVec; -use std::ptr; -use std::sync::Arc; - -use crate::command::CommandBuffer; -use crate::conv; -use crate::{Backend, RawDevice}; -use hal::{command, pool}; - -#[derive(Debug)] -pub struct RawCommandPool { - pub(crate) raw: vk::CommandPool, - pub(crate) device: Arc, -} - -impl pool::CommandPool for RawCommandPool { - unsafe fn reset(&mut self, release_resources: bool) { - let flags = if release_resources { - vk::CommandPoolResetFlags::RELEASE_RESOURCES - } else { - vk::CommandPoolResetFlags::empty() - }; - - assert_eq!(Ok(()), self.device.0.reset_command_pool(self.raw, flags)); - } - - unsafe fn allocate_vec(&mut self, num: usize, level: command::Level) -> SmallVec<[CommandBuffer; 1]> { - let info = vk::CommandBufferAllocateInfo { - s_type: vk::StructureType::COMMAND_BUFFER_ALLOCATE_INFO, - p_next: ptr::null(), - command_pool: self.raw, - level: conv::map_command_buffer_level(level), - command_buffer_count: num as u32, - }; - - let device = &self.device; - let cbufs_raw = device.0 - .allocate_command_buffers(&info) - .expect("Error on command buffer allocation"); - - cbufs_raw - .into_iter() - .map(|buffer| CommandBuffer { - raw: buffer, - device: device.clone(), - }) - .collect() - } - - unsafe fn free(&mut self, cbufs: I) - where - I: IntoIterator, - { - let buffers: SmallVec<[vk::CommandBuffer; 16]> = - cbufs.into_iter().map(|buffer| buffer.raw).collect(); - self.device.0.free_command_buffers(self.raw, &buffers); - } -} +use ash::version::DeviceV1_0; +use ash::vk; +use smallvec::SmallVec; +use std::ptr; +use std::sync::Arc; + +use crate::command::CommandBuffer; +use crate::conv; +use crate::{Backend, RawDevice}; +use hal::{command, pool}; + +#[derive(Debug)] +pub struct RawCommandPool { + pub(crate) raw: vk::CommandPool, + pub(crate) device: Arc, +} + +impl pool::CommandPool for RawCommandPool { + unsafe fn reset(&mut self, release_resources: bool) { + let flags = if release_resources { + vk::CommandPoolResetFlags::RELEASE_RESOURCES + } else { + vk::CommandPoolResetFlags::empty() + }; + + assert_eq!(Ok(()), self.device.raw.reset_command_pool(self.raw, flags)); + } + + unsafe fn allocate(&mut self, num: usize, level: command::Level, list: &mut E) + where + E: Extend, + { + let info = vk::CommandBufferAllocateInfo { + s_type: vk::StructureType::COMMAND_BUFFER_ALLOCATE_INFO, + p_next: ptr::null(), + command_pool: self.raw, + level: conv::map_command_buffer_level(level), + command_buffer_count: num as u32, + }; + + let device = &self.device; + + list.extend( + device + .raw + .allocate_command_buffers(&info) + .expect("Error on command buffer allocation") + .into_iter() + .map(|buffer| CommandBuffer { + raw: buffer, + device: Arc::clone(device), + }), + ); + } + + unsafe fn free(&mut self, cbufs: I) + where + I: IntoIterator, + { + let buffers: SmallVec<[vk::CommandBuffer; 16]> = + cbufs.into_iter().map(|buffer| buffer.raw).collect(); + self.device.raw.free_command_buffers(self.raw, &buffers); + } +} diff --git a/third_party/rust/gfx-backend-vulkan/src/window.rs b/third_party/rust/gfx-backend-vulkan/src/window.rs index 4b850f410e15..3dc26d2d2325 100644 --- a/third_party/rust/gfx-backend-vulkan/src/window.rs +++ b/third_party/rust/gfx-backend-vulkan/src/window.rs @@ -1,600 +1,609 @@ -use std::{ - borrow::Borrow, - fmt, - hash, - os::raw::c_void, - ptr, - sync::{Arc, Mutex}, - time::Instant, -}; - -use ash::{extensions::khr, version::DeviceV1_0 as _, vk}; -use hal::{format::Format, window as w}; -use smallvec::SmallVec; - -use crate::{conv, info, native}; -use crate::{ - Backend, - Device, - Instance, - PhysicalDevice, - QueueFamily, - RawDevice, - RawInstance, - VK_ENTRY, -}; - - -#[derive(Debug, Default)] -pub struct FramebufferCache { - // We expect exactly one framebuffer per frame, but can support more. - pub framebuffers: SmallVec<[vk::Framebuffer; 1]>, -} - -#[derive(Debug, Default)] -pub struct FramebufferCachePtr(pub Arc>); - -impl hash::Hash for FramebufferCachePtr { - fn hash(&self, hasher: &mut H) { - (self.0.as_ref() as *const Mutex).hash(hasher) - } -} -impl PartialEq for FramebufferCachePtr { - fn eq(&self, other: &Self) -> bool { - Arc::ptr_eq(&self.0, &other.0) - } -} -impl Eq for FramebufferCachePtr {} - -#[derive(Debug)] -struct SurfaceFrame { - image: vk::Image, - view: vk::ImageView, - framebuffers: FramebufferCachePtr, -} - -#[derive(Debug)] -pub struct SurfaceSwapchain { - pub(crate) swapchain: Swapchain, - device: Arc, - fence: native::Fence, - pub(crate) semaphore: native::Semaphore, - frames: Vec, -} - -impl SurfaceSwapchain { - unsafe fn release_resources(self, device: &ash::Device) -> Swapchain { - let _ = device.device_wait_idle(); - device.destroy_fence(self.fence.0, None); - device.destroy_semaphore(self.semaphore.0, None); - for frame in self.frames { - device.destroy_image_view(frame.view, None); - for framebuffer in frame.framebuffers.0.lock().unwrap().framebuffers.drain() { - device.destroy_framebuffer(framebuffer, None); - } - } - self.swapchain - } -} - -pub struct Surface { - // Vk (EXT) specs [29.2.7 Platform-Independent Information] - // For vkDestroySurfaceKHR: Host access to surface must be externally synchronized - pub(crate) raw: Arc, - pub(crate) swapchain: Option, -} - -impl fmt::Debug for Surface { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str("Surface") - } -} - -pub struct RawSurface { - pub(crate) handle: vk::SurfaceKHR, - pub(crate) functor: khr::Surface, - pub(crate) instance: Arc, -} - -impl Instance { - #[cfg(all( - feature = "x11", - unix, - not(target_os = "android"), - not(target_os = "macos") - ))] - pub fn create_surface_from_xlib(&self, dpy: *mut vk::Display, window: vk::Window) -> Surface { - let entry = VK_ENTRY - .as_ref() - .expect("Unable to load Vulkan entry points"); - - if !self.extensions.contains(&khr::XlibSurface::name()) { - panic!("Vulkan driver does not support VK_KHR_XLIB_SURFACE"); - } - - let surface = { - let xlib_loader = khr::XlibSurface::new(entry, &self.raw.0); - let info = vk::XlibSurfaceCreateInfoKHR { - s_type: vk::StructureType::XLIB_SURFACE_CREATE_INFO_KHR, - p_next: ptr::null(), - flags: vk::XlibSurfaceCreateFlagsKHR::empty(), - window, - dpy, - }; - - unsafe { xlib_loader.create_xlib_surface(&info, None) } - .expect("XlibSurface::create_xlib_surface() failed") - }; - - self.create_surface_from_vk_surface_khr(surface) - } - - #[cfg(all( - feature = "xcb", - unix, - not(target_os = "android"), - not(target_os = "macos") - ))] - pub fn create_surface_from_xcb( - &self, - connection: *mut vk::xcb_connection_t, - window: vk::xcb_window_t, - ) -> Surface { - let entry = VK_ENTRY - .as_ref() - .expect("Unable to load Vulkan entry points"); - - if !self.extensions.contains(&khr::XcbSurface::name()) { - panic!("Vulkan driver does not support VK_KHR_XCB_SURFACE"); - } - - let surface = { - let xcb_loader = khr::XcbSurface::new(entry, &self.raw.0); - let info = vk::XcbSurfaceCreateInfoKHR { - s_type: vk::StructureType::XCB_SURFACE_CREATE_INFO_KHR, - p_next: ptr::null(), - flags: vk::XcbSurfaceCreateFlagsKHR::empty(), - window, - connection, - }; - - unsafe { xcb_loader.create_xcb_surface(&info, None) } - .expect("XcbSurface::create_xcb_surface() failed") - }; - - self.create_surface_from_vk_surface_khr(surface) - } - - #[cfg(all(unix, not(target_os = "android")))] - pub fn create_surface_from_wayland( - &self, - display: *mut c_void, - surface: *mut c_void, - ) -> Surface { - let entry = VK_ENTRY - .as_ref() - .expect("Unable to load Vulkan entry points"); - - if !self.extensions.contains(&khr::WaylandSurface::name()) { - panic!("Vulkan driver does not support VK_KHR_WAYLAND_SURFACE"); - } - - let surface = { - let w_loader = khr::WaylandSurface::new(entry, &self.raw.0); - let info = vk::WaylandSurfaceCreateInfoKHR { - s_type: vk::StructureType::WAYLAND_SURFACE_CREATE_INFO_KHR, - p_next: ptr::null(), - flags: vk::WaylandSurfaceCreateFlagsKHR::empty(), - display: display as *mut _, - surface: surface as *mut _, - }; - - unsafe { w_loader.create_wayland_surface(&info, None) }.expect("WaylandSurface failed") - }; - - self.create_surface_from_vk_surface_khr(surface) - } - - #[cfg(target_os = "android")] - pub fn create_surface_android(&self, window: *const c_void) -> Surface { - let entry = VK_ENTRY - .as_ref() - .expect("Unable to load Vulkan entry points"); - - let surface = { - let loader = khr::AndroidSurface::new(entry, &self.raw.0); - let info = vk::AndroidSurfaceCreateInfoKHR { - s_type: vk::StructureType::ANDROID_SURFACE_CREATE_INFO_KHR, - p_next: ptr::null(), - flags: vk::AndroidSurfaceCreateFlagsKHR::empty(), - window: window as *const _ as *mut _, - }; - - unsafe { loader.create_android_surface(&info, None) }.expect("AndroidSurface failed") - }; - - self.create_surface_from_vk_surface_khr(surface) - } - - #[cfg(windows)] - pub fn create_surface_from_hwnd(&self, hinstance: *mut c_void, hwnd: *mut c_void) -> Surface { - let entry = VK_ENTRY - .as_ref() - .expect("Unable to load Vulkan entry points"); - - if !self.extensions.contains(&khr::Win32Surface::name()) { - panic!("Vulkan driver does not support VK_KHR_WIN32_SURFACE"); - } - - let surface = { - let info = vk::Win32SurfaceCreateInfoKHR { - s_type: vk::StructureType::WIN32_SURFACE_CREATE_INFO_KHR, - p_next: ptr::null(), - flags: vk::Win32SurfaceCreateFlagsKHR::empty(), - hinstance: hinstance as *mut _, - hwnd: hwnd as *mut _, - }; - let win32_loader = khr::Win32Surface::new(entry, &self.raw.0); - unsafe { - win32_loader - .create_win32_surface(&info, None) - .expect("Unable to create Win32 surface") - } - }; - - self.create_surface_from_vk_surface_khr(surface) - } - - #[cfg(target_os = "macos")] - pub fn create_surface_from_ns_view(&self, view: *mut c_void) -> Surface { - use ash::extensions::mvk; - use core_graphics::{base::CGFloat, geometry::CGRect}; - use objc::runtime::{Object, BOOL, YES}; - - // TODO: this logic is duplicated from gfx-backend-metal, refactor? - unsafe { - let view = view as *mut Object; - let existing: *mut Object = msg_send![view, layer]; - let class = class!(CAMetalLayer); - - let use_current = if existing.is_null() { - false - } else { - let result: BOOL = msg_send![existing, isKindOfClass: class]; - result == YES - }; - - if !use_current { - let layer: *mut Object = msg_send![class, new]; - let () = msg_send![view, setLayer: layer]; - let bounds: CGRect = msg_send![view, bounds]; - let () = msg_send![layer, setBounds: bounds]; - - let window: *mut Object = msg_send![view, window]; - if !window.is_null() { - let scale_factor: CGFloat = msg_send![window, backingScaleFactor]; - let () = msg_send![layer, setContentsScale: scale_factor]; - } - } - } - - let entry = VK_ENTRY - .as_ref() - .expect("Unable to load Vulkan entry points"); - - if !self.extensions.contains(&mvk::MacOSSurface::name()) { - panic!("Vulkan driver does not support VK_MVK_MACOS_SURFACE"); - } - - let surface = { - let mac_os_loader = mvk::MacOSSurface::new(entry, &self.raw.0); - let info = vk::MacOSSurfaceCreateInfoMVK { - s_type: vk::StructureType::MACOS_SURFACE_CREATE_INFO_M, - p_next: ptr::null(), - flags: vk::MacOSSurfaceCreateFlagsMVK::empty(), - p_view: view, - }; - - unsafe { - mac_os_loader - .create_mac_os_surface_mvk(&info, None) - .expect("Unable to create macOS surface") - } - }; - - self.create_surface_from_vk_surface_khr(surface) - } - - pub fn create_surface_from_vk_surface_khr(&self, surface: vk::SurfaceKHR) -> Surface { - let entry = VK_ENTRY - .as_ref() - .expect("Unable to load Vulkan entry points"); - - let functor = khr::Surface::new(entry, &self.raw.0); - - let raw = Arc::new(RawSurface { - handle: surface, - functor, - instance: self.raw.clone(), - }); - - Surface { - raw, - swapchain: None, - } - } -} - -impl w::Surface for Surface { - fn supports_queue_family(&self, queue_family: &QueueFamily) -> bool { - unsafe { - self.raw.functor.get_physical_device_surface_support( - queue_family.device, - queue_family.index, - self.raw.handle, - ) - } - } - - fn capabilities(&self, physical_device: &PhysicalDevice) -> w::SurfaceCapabilities { - // Capabilities - let caps = unsafe { - self.raw - .functor - .get_physical_device_surface_capabilities(physical_device.handle, self.raw.handle) - } - .expect("Unable to query surface capabilities"); - - // If image count is 0, the support number of images is unlimited. - let max_images = if caps.max_image_count == 0 { - !0 - } else { - caps.max_image_count - }; - - // `0xFFFFFFFF` indicates that the extent depends on the created swapchain. - let current_extent = if caps.current_extent.width != !0 && caps.current_extent.height != !0 - { - Some(w::Extent2D { - width: caps.current_extent.width, - height: caps.current_extent.height, - }) - } else { - None - }; - - let min_extent = w::Extent2D { - width: caps.min_image_extent.width, - height: caps.min_image_extent.height, - }; - - let max_extent = w::Extent2D { - width: caps.max_image_extent.width, - height: caps.max_image_extent.height, - }; - - let raw_present_modes = unsafe { - self.raw - .functor - .get_physical_device_surface_present_modes(physical_device.handle, self.raw.handle) - } - .expect("Unable to query present modes"); - - w::SurfaceCapabilities { - present_modes: raw_present_modes - .into_iter() - .fold(w::PresentMode::empty(), |u, m| { u | conv::map_vk_present_mode(m) }), - composite_alpha_modes: conv::map_vk_composite_alpha(caps.supported_composite_alpha), - image_count: caps.min_image_count ..= max_images, - current_extent, - extents: min_extent ..= max_extent, - max_image_layers: caps.max_image_array_layers as _, - usage: conv::map_vk_image_usage(caps.supported_usage_flags), - } - } - - fn supported_formats(&self, physical_device: &PhysicalDevice) -> Option> { - // Swapchain formats - let raw_formats = unsafe { - self.raw - .functor - .get_physical_device_surface_formats(physical_device.handle, self.raw.handle) - } - .expect("Unable to query surface formats"); - - match raw_formats[0].format { - // If pSurfaceFormats includes just one entry, whose value for format is - // VK_FORMAT_UNDEFINED, surface has no preferred format. In this case, the application - // can use any valid VkFormat value. - vk::Format::UNDEFINED => None, - _ => Some( - raw_formats - .into_iter() - .filter_map(|sf| conv::map_vk_format(sf.format)) - .collect(), - ), - } - } -} - -#[derive(Debug)] -pub struct SurfaceImage { - pub(crate) index: w::SwapImageIndex, - view: native::ImageView, -} - -impl Borrow for SurfaceImage { - fn borrow(&self) -> &native::ImageView { - &self.view - } -} - -impl w::PresentationSurface for Surface { - type SwapchainImage = SurfaceImage; - - unsafe fn configure_swapchain( - &mut self, - device: &Device, - config: w::SwapchainConfig, - ) -> Result<(), w::CreationError> { - use hal::device::Device as _; - - let format = config.format; - let old = self - .swapchain - .take() - .map(|ssc| ssc.release_resources(&device.raw.0)); - - let (swapchain, images) = device.create_swapchain(self, config, old)?; - - self.swapchain = Some(SurfaceSwapchain { - swapchain, - device: Arc::clone(&device.raw), - fence: device.create_fence(false).unwrap(), - semaphore: device.create_semaphore().unwrap(), - frames: images - .iter() - .map(|image| { - let view = device - .create_image_view( - image, - hal::image::ViewKind::D2, - format, - hal::format::Swizzle::NO, - hal::image::SubresourceRange { - aspects: hal::format::Aspects::COLOR, - layers: 0 .. 1, - levels: 0 .. 1, - }, - ) - .unwrap(); - SurfaceFrame { - image: view.image, - view: view.view, - framebuffers: Default::default(), - } - }) - .collect(), - }); - - Ok(()) - } - - unsafe fn unconfigure_swapchain(&mut self, device: &Device) { - if let Some(ssc) = self.swapchain.take() { - let swapchain = ssc.release_resources(&device.raw.0); - swapchain.functor.destroy_swapchain(swapchain.raw, None); - } - } - - unsafe fn acquire_image( - &mut self, - mut timeout_ns: u64, - ) -> Result<(Self::SwapchainImage, Option), w::AcquireError> { - use hal::window::Swapchain as _; - - let ssc = self.swapchain.as_mut().unwrap(); - let moment = Instant::now(); - let (index, suboptimal) = - ssc.swapchain - .acquire_image(timeout_ns, None, Some(&ssc.fence))?; - timeout_ns = timeout_ns.saturating_sub(moment.elapsed().as_nanos() as u64); - let fences = &[ssc.fence.0]; - - match ssc.device.0.wait_for_fences(fences, true, timeout_ns) { - Ok(()) => { - ssc.device.0.reset_fences(fences).unwrap(); - let frame = &ssc.frames[index as usize]; - // We have just waited for the frame to be fully available on CPU. - // All the associated framebuffers are expected to be destroyed by now. - for framebuffer in frame.framebuffers.0.lock().unwrap().framebuffers.drain() { - ssc.device.0.destroy_framebuffer(framebuffer, None); - } - let image = Self::SwapchainImage { - index, - view: native::ImageView { - image: frame.image, - view: frame.view, - range: hal::image::SubresourceRange { - aspects: hal::format::Aspects::COLOR, - layers: 0 .. 1, - levels: 0 .. 1, - }, - owner: native::ImageViewOwner::Surface(FramebufferCachePtr(Arc::clone( - &frame.framebuffers.0, - ))), - }, - }; - Ok((image, suboptimal)) - } - Err(vk::Result::NOT_READY) => Err(w::AcquireError::NotReady), - Err(vk::Result::TIMEOUT) => Err(w::AcquireError::Timeout), - Err(vk::Result::ERROR_OUT_OF_DATE_KHR) => Err(w::AcquireError::OutOfDate), - Err(vk::Result::ERROR_SURFACE_LOST_KHR) => { - Err(w::AcquireError::SurfaceLost(hal::device::SurfaceLost)) - } - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(w::AcquireError::OutOfMemory( - hal::device::OutOfMemory::Host, - )), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(w::AcquireError::OutOfMemory( - hal::device::OutOfMemory::Device, - )), - Err(vk::Result::ERROR_DEVICE_LOST) => { - Err(w::AcquireError::DeviceLost(hal::device::DeviceLost)) - } - _ => unreachable!(), - } - } -} - -pub struct Swapchain { - pub(crate) raw: vk::SwapchainKHR, - pub(crate) functor: khr::Swapchain, - pub(crate) vendor_id: u32, -} - -impl fmt::Debug for Swapchain { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str("Swapchain") - } -} - -impl w::Swapchain for Swapchain { - unsafe fn acquire_image( - &mut self, - timeout_ns: u64, - semaphore: Option<&native::Semaphore>, - fence: Option<&native::Fence>, - ) -> Result<(w::SwapImageIndex, Option), w::AcquireError> { - let semaphore = semaphore.map_or(vk::Semaphore::null(), |s| s.0); - let fence = fence.map_or(vk::Fence::null(), |f| f.0); - - // will block if no image is available - let index = self - .functor - .acquire_next_image(self.raw, timeout_ns, semaphore, fence); - - match index { - // special case for Intel Vulkan returning bizzare values (ugh) - Ok((i, _)) if self.vendor_id == info::intel::VENDOR && i > 0x100 => Err(w::AcquireError::OutOfDate), - Ok((i, true)) => Ok((i, Some(w::Suboptimal))), - Ok((i, false)) => Ok((i, None)), - Err(vk::Result::NOT_READY) => Err(w::AcquireError::NotReady), - Err(vk::Result::TIMEOUT) => Err(w::AcquireError::Timeout), - Err(vk::Result::ERROR_OUT_OF_DATE_KHR) => Err(w::AcquireError::OutOfDate), - Err(vk::Result::ERROR_SURFACE_LOST_KHR) => { - Err(w::AcquireError::SurfaceLost(hal::device::SurfaceLost)) - } - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(w::AcquireError::OutOfMemory( - hal::device::OutOfMemory::Host, - )), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(w::AcquireError::OutOfMemory( - hal::device::OutOfMemory::Device, - )), - Err(vk::Result::ERROR_DEVICE_LOST) => { - Err(w::AcquireError::DeviceLost(hal::device::DeviceLost)) - } - _ => panic!("Failed to acquire image."), - } - } -} +use std::{ + borrow::Borrow, + fmt, + hash, + os::raw::c_void, + ptr, + sync::{Arc, Mutex}, + time::Instant, +}; + +use ash::{extensions::khr, version::DeviceV1_0 as _, vk}; +use hal::{format::Format, window as w}; +use smallvec::SmallVec; + +use crate::{conv, info, native}; +use crate::{ + Backend, + Device, + Instance, + PhysicalDevice, + QueueFamily, + RawDevice, + RawInstance, + VK_ENTRY, +}; + +#[derive(Debug, Default)] +pub struct FramebufferCache { + // We expect exactly one framebuffer per frame, but can support more. + pub framebuffers: SmallVec<[vk::Framebuffer; 1]>, +} + +#[derive(Debug, Default)] +pub struct FramebufferCachePtr(pub Arc>); + +impl hash::Hash for FramebufferCachePtr { + fn hash(&self, hasher: &mut H) { + (self.0.as_ref() as *const Mutex).hash(hasher) + } +} +impl PartialEq for FramebufferCachePtr { + fn eq(&self, other: &Self) -> bool { + Arc::ptr_eq(&self.0, &other.0) + } +} +impl Eq for FramebufferCachePtr {} + +#[derive(Debug)] +struct SurfaceFrame { + image: vk::Image, + view: vk::ImageView, + framebuffers: FramebufferCachePtr, +} + +#[derive(Debug)] +pub struct SurfaceSwapchain { + pub(crate) swapchain: Swapchain, + device: Arc, + fence: native::Fence, + pub(crate) semaphore: native::Semaphore, + frames: Vec, +} + +impl SurfaceSwapchain { + unsafe fn release_resources(self, device: &ash::Device) -> Swapchain { + let _ = device.device_wait_idle(); + device.destroy_fence(self.fence.0, None); + device.destroy_semaphore(self.semaphore.0, None); + for frame in self.frames { + device.destroy_image_view(frame.view, None); + for framebuffer in frame.framebuffers.0.lock().unwrap().framebuffers.drain(..) { + device.destroy_framebuffer(framebuffer, None); + } + } + self.swapchain + } +} + +pub struct Surface { + // Vk (EXT) specs [29.2.7 Platform-Independent Information] + // For vkDestroySurfaceKHR: Host access to surface must be externally synchronized + pub(crate) raw: Arc, + pub(crate) swapchain: Option, +} + +impl fmt::Debug for Surface { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("Surface") + } +} + +pub struct RawSurface { + pub(crate) handle: vk::SurfaceKHR, + pub(crate) functor: khr::Surface, + pub(crate) instance: Arc, +} + +impl Instance { + #[cfg(all( + feature = "x11", + unix, + not(target_os = "android"), + not(target_os = "macos") + ))] + pub fn create_surface_from_xlib(&self, dpy: *mut vk::Display, window: vk::Window) -> Surface { + let entry = VK_ENTRY + .as_ref() + .expect("Unable to load Vulkan entry points"); + + if !self.extensions.contains(&khr::XlibSurface::name()) { + panic!("Vulkan driver does not support VK_KHR_XLIB_SURFACE"); + } + + let surface = { + let xlib_loader = khr::XlibSurface::new(entry, &self.raw.0); + let info = vk::XlibSurfaceCreateInfoKHR { + s_type: vk::StructureType::XLIB_SURFACE_CREATE_INFO_KHR, + p_next: ptr::null(), + flags: vk::XlibSurfaceCreateFlagsKHR::empty(), + window, + dpy, + }; + + unsafe { xlib_loader.create_xlib_surface(&info, None) } + .expect("XlibSurface::create_xlib_surface() failed") + }; + + self.create_surface_from_vk_surface_khr(surface) + } + + #[cfg(all( + feature = "xcb", + unix, + not(target_os = "android"), + not(target_os = "macos") + ))] + pub fn create_surface_from_xcb( + &self, + connection: *mut vk::xcb_connection_t, + window: vk::xcb_window_t, + ) -> Surface { + let entry = VK_ENTRY + .as_ref() + .expect("Unable to load Vulkan entry points"); + + if !self.extensions.contains(&khr::XcbSurface::name()) { + panic!("Vulkan driver does not support VK_KHR_XCB_SURFACE"); + } + + let surface = { + let xcb_loader = khr::XcbSurface::new(entry, &self.raw.0); + let info = vk::XcbSurfaceCreateInfoKHR { + s_type: vk::StructureType::XCB_SURFACE_CREATE_INFO_KHR, + p_next: ptr::null(), + flags: vk::XcbSurfaceCreateFlagsKHR::empty(), + window, + connection, + }; + + unsafe { xcb_loader.create_xcb_surface(&info, None) } + .expect("XcbSurface::create_xcb_surface() failed") + }; + + self.create_surface_from_vk_surface_khr(surface) + } + + #[cfg(all(unix, not(target_os = "android")))] + pub fn create_surface_from_wayland( + &self, + display: *mut c_void, + surface: *mut c_void, + ) -> Surface { + let entry = VK_ENTRY + .as_ref() + .expect("Unable to load Vulkan entry points"); + + if !self.extensions.contains(&khr::WaylandSurface::name()) { + panic!("Vulkan driver does not support VK_KHR_WAYLAND_SURFACE"); + } + + let surface = { + let w_loader = khr::WaylandSurface::new(entry, &self.raw.0); + let info = vk::WaylandSurfaceCreateInfoKHR { + s_type: vk::StructureType::WAYLAND_SURFACE_CREATE_INFO_KHR, + p_next: ptr::null(), + flags: vk::WaylandSurfaceCreateFlagsKHR::empty(), + display: display as *mut _, + surface: surface as *mut _, + }; + + unsafe { w_loader.create_wayland_surface(&info, None) }.expect("WaylandSurface failed") + }; + + self.create_surface_from_vk_surface_khr(surface) + } + + #[cfg(target_os = "android")] + pub fn create_surface_android(&self, window: *const c_void) -> Surface { + let entry = VK_ENTRY + .as_ref() + .expect("Unable to load Vulkan entry points"); + + let surface = { + let loader = khr::AndroidSurface::new(entry, &self.raw.0); + let info = vk::AndroidSurfaceCreateInfoKHR { + s_type: vk::StructureType::ANDROID_SURFACE_CREATE_INFO_KHR, + p_next: ptr::null(), + flags: vk::AndroidSurfaceCreateFlagsKHR::empty(), + window: window as *const _ as *mut _, + }; + + unsafe { loader.create_android_surface(&info, None) }.expect("AndroidSurface failed") + }; + + self.create_surface_from_vk_surface_khr(surface) + } + + #[cfg(windows)] + pub fn create_surface_from_hwnd(&self, hinstance: *mut c_void, hwnd: *mut c_void) -> Surface { + let entry = VK_ENTRY + .as_ref() + .expect("Unable to load Vulkan entry points"); + + if !self.extensions.contains(&khr::Win32Surface::name()) { + panic!("Vulkan driver does not support VK_KHR_WIN32_SURFACE"); + } + + let surface = { + let info = vk::Win32SurfaceCreateInfoKHR { + s_type: vk::StructureType::WIN32_SURFACE_CREATE_INFO_KHR, + p_next: ptr::null(), + flags: vk::Win32SurfaceCreateFlagsKHR::empty(), + hinstance: hinstance as *mut _, + hwnd: hwnd as *mut _, + }; + let win32_loader = khr::Win32Surface::new(entry, &self.raw.0); + unsafe { + win32_loader + .create_win32_surface(&info, None) + .expect("Unable to create Win32 surface") + } + }; + + self.create_surface_from_vk_surface_khr(surface) + } + + #[cfg(target_os = "macos")] + pub fn create_surface_from_ns_view(&self, view: *mut c_void) -> Surface { + use ash::extensions::mvk; + use core_graphics::{base::CGFloat, geometry::CGRect}; + use objc::runtime::{Object, BOOL, YES}; + + // TODO: this logic is duplicated from gfx-backend-metal, refactor? + unsafe { + let view = view as *mut Object; + let existing: *mut Object = msg_send![view, layer]; + let class = class!(CAMetalLayer); + + let use_current = if existing.is_null() { + false + } else { + let result: BOOL = msg_send![existing, isKindOfClass: class]; + result == YES + }; + + if !use_current { + let layer: *mut Object = msg_send![class, new]; + let () = msg_send![view, setLayer: layer]; + let bounds: CGRect = msg_send![view, bounds]; + let () = msg_send![layer, setBounds: bounds]; + + let window: *mut Object = msg_send![view, window]; + if !window.is_null() { + let scale_factor: CGFloat = msg_send![window, backingScaleFactor]; + let () = msg_send![layer, setContentsScale: scale_factor]; + } + } + } + + let entry = VK_ENTRY + .as_ref() + .expect("Unable to load Vulkan entry points"); + + if !self.extensions.contains(&mvk::MacOSSurface::name()) { + panic!("Vulkan driver does not support VK_MVK_MACOS_SURFACE"); + } + + let surface = { + let mac_os_loader = mvk::MacOSSurface::new(entry, &self.raw.0); + let info = vk::MacOSSurfaceCreateInfoMVK { + s_type: vk::StructureType::MACOS_SURFACE_CREATE_INFO_M, + p_next: ptr::null(), + flags: vk::MacOSSurfaceCreateFlagsMVK::empty(), + p_view: view, + }; + + unsafe { + mac_os_loader + .create_mac_os_surface_mvk(&info, None) + .expect("Unable to create macOS surface") + } + }; + + self.create_surface_from_vk_surface_khr(surface) + } + + pub fn create_surface_from_vk_surface_khr(&self, surface: vk::SurfaceKHR) -> Surface { + let entry = VK_ENTRY + .as_ref() + .expect("Unable to load Vulkan entry points"); + + let functor = khr::Surface::new(entry, &self.raw.0); + + let raw = Arc::new(RawSurface { + handle: surface, + functor, + instance: self.raw.clone(), + }); + + Surface { + raw, + swapchain: None, + } + } +} + +impl w::Surface for Surface { + fn supports_queue_family(&self, queue_family: &QueueFamily) -> bool { + match unsafe { + self.raw.functor.get_physical_device_surface_support( + queue_family.device, + queue_family.index, + self.raw.handle, + ) + } { + Ok(ok) => ok, + Err(e) => { + error!("get_physical_device_surface_support error {:?}", e); + false + } + } + } + + fn capabilities(&self, physical_device: &PhysicalDevice) -> w::SurfaceCapabilities { + // Capabilities + let caps = unsafe { + self.raw + .functor + .get_physical_device_surface_capabilities(physical_device.handle, self.raw.handle) + } + .expect("Unable to query surface capabilities"); + + // If image count is 0, the support number of images is unlimited. + let max_images = if caps.max_image_count == 0 { + !0 + } else { + caps.max_image_count + }; + + // `0xFFFFFFFF` indicates that the extent depends on the created swapchain. + let current_extent = if caps.current_extent.width != !0 && caps.current_extent.height != !0 + { + Some(w::Extent2D { + width: caps.current_extent.width, + height: caps.current_extent.height, + }) + } else { + None + }; + + let min_extent = w::Extent2D { + width: caps.min_image_extent.width, + height: caps.min_image_extent.height, + }; + + let max_extent = w::Extent2D { + width: caps.max_image_extent.width, + height: caps.max_image_extent.height, + }; + + let raw_present_modes = unsafe { + self.raw + .functor + .get_physical_device_surface_present_modes(physical_device.handle, self.raw.handle) + } + .expect("Unable to query present modes"); + + w::SurfaceCapabilities { + present_modes: raw_present_modes + .into_iter() + .fold(w::PresentMode::empty(), |u, m| { + u | conv::map_vk_present_mode(m) + }), + composite_alpha_modes: conv::map_vk_composite_alpha(caps.supported_composite_alpha), + image_count: caps.min_image_count ..= max_images, + current_extent, + extents: min_extent ..= max_extent, + max_image_layers: caps.max_image_array_layers as _, + usage: conv::map_vk_image_usage(caps.supported_usage_flags), + } + } + + fn supported_formats(&self, physical_device: &PhysicalDevice) -> Option> { + // Swapchain formats + let raw_formats = unsafe { + self.raw + .functor + .get_physical_device_surface_formats(physical_device.handle, self.raw.handle) + } + .expect("Unable to query surface formats"); + + match raw_formats[0].format { + // If pSurfaceFormats includes just one entry, whose value for format is + // VK_FORMAT_UNDEFINED, surface has no preferred format. In this case, the application + // can use any valid VkFormat value. + vk::Format::UNDEFINED => None, + _ => Some( + raw_formats + .into_iter() + .filter_map(|sf| conv::map_vk_format(sf.format)) + .collect(), + ), + } + } +} + +#[derive(Debug)] +pub struct SurfaceImage { + pub(crate) index: w::SwapImageIndex, + view: native::ImageView, +} + +impl Borrow for SurfaceImage { + fn borrow(&self) -> &native::ImageView { + &self.view + } +} + +impl w::PresentationSurface for Surface { + type SwapchainImage = SurfaceImage; + + unsafe fn configure_swapchain( + &mut self, + device: &Device, + config: w::SwapchainConfig, + ) -> Result<(), w::CreationError> { + use hal::device::Device as _; + + let format = config.format; + let old = self + .swapchain + .take() + .map(|ssc| ssc.release_resources(&device.shared.raw)); + + let (swapchain, images) = device.create_swapchain(self, config, old)?; + + self.swapchain = Some(SurfaceSwapchain { + swapchain, + device: Arc::clone(&device.shared), + fence: device.create_fence(false).unwrap(), + semaphore: device.create_semaphore().unwrap(), + frames: images + .iter() + .map(|image| { + let view = device + .create_image_view( + image, + hal::image::ViewKind::D2, + format, + hal::format::Swizzle::NO, + hal::image::SubresourceRange { + aspects: hal::format::Aspects::COLOR, + layers: 0 .. 1, + levels: 0 .. 1, + }, + ) + .unwrap(); + SurfaceFrame { + image: view.image, + view: view.view, + framebuffers: Default::default(), + } + }) + .collect(), + }); + + Ok(()) + } + + unsafe fn unconfigure_swapchain(&mut self, device: &Device) { + if let Some(ssc) = self.swapchain.take() { + let swapchain = ssc.release_resources(&device.shared.raw); + swapchain.functor.destroy_swapchain(swapchain.raw, None); + } + } + + unsafe fn acquire_image( + &mut self, + mut timeout_ns: u64, + ) -> Result<(Self::SwapchainImage, Option), w::AcquireError> { + use hal::window::Swapchain as _; + + let ssc = self.swapchain.as_mut().unwrap(); + let moment = Instant::now(); + let (index, suboptimal) = + ssc.swapchain + .acquire_image(timeout_ns, None, Some(&ssc.fence))?; + timeout_ns = timeout_ns.saturating_sub(moment.elapsed().as_nanos() as u64); + let fences = &[ssc.fence.0]; + + match ssc.device.raw.wait_for_fences(fences, true, timeout_ns) { + Ok(()) => { + ssc.device.raw.reset_fences(fences).unwrap(); + let frame = &ssc.frames[index as usize]; + // We have just waited for the frame to be fully available on CPU. + // All the associated framebuffers are expected to be destroyed by now. + for framebuffer in frame.framebuffers.0.lock().unwrap().framebuffers.drain(..) { + ssc.device.raw.destroy_framebuffer(framebuffer, None); + } + let image = Self::SwapchainImage { + index, + view: native::ImageView { + image: frame.image, + view: frame.view, + range: hal::image::SubresourceRange { + aspects: hal::format::Aspects::COLOR, + layers: 0 .. 1, + levels: 0 .. 1, + }, + owner: native::ImageViewOwner::Surface(FramebufferCachePtr(Arc::clone( + &frame.framebuffers.0, + ))), + }, + }; + Ok((image, suboptimal)) + } + Err(vk::Result::NOT_READY) => Err(w::AcquireError::NotReady), + Err(vk::Result::TIMEOUT) => Err(w::AcquireError::Timeout), + Err(vk::Result::ERROR_OUT_OF_DATE_KHR) => Err(w::AcquireError::OutOfDate), + Err(vk::Result::ERROR_SURFACE_LOST_KHR) => { + Err(w::AcquireError::SurfaceLost(hal::device::SurfaceLost)) + } + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => { + Err(w::AcquireError::OutOfMemory(hal::device::OutOfMemory::Host)) + } + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(w::AcquireError::OutOfMemory( + hal::device::OutOfMemory::Device, + )), + Err(vk::Result::ERROR_DEVICE_LOST) => { + Err(w::AcquireError::DeviceLost(hal::device::DeviceLost)) + } + _ => unreachable!(), + } + } +} + +pub struct Swapchain { + pub(crate) raw: vk::SwapchainKHR, + pub(crate) functor: khr::Swapchain, + pub(crate) vendor_id: u32, +} + +impl fmt::Debug for Swapchain { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("Swapchain") + } +} + +impl w::Swapchain for Swapchain { + unsafe fn acquire_image( + &mut self, + timeout_ns: u64, + semaphore: Option<&native::Semaphore>, + fence: Option<&native::Fence>, + ) -> Result<(w::SwapImageIndex, Option), w::AcquireError> { + let semaphore = semaphore.map_or(vk::Semaphore::null(), |s| s.0); + let fence = fence.map_or(vk::Fence::null(), |f| f.0); + + // will block if no image is available + let index = self + .functor + .acquire_next_image(self.raw, timeout_ns, semaphore, fence); + + match index { + // special case for Intel Vulkan returning bizzare values (ugh) + Ok((i, _)) if self.vendor_id == info::intel::VENDOR && i > 0x100 => { + Err(w::AcquireError::OutOfDate) + } + Ok((i, true)) => Ok((i, Some(w::Suboptimal))), + Ok((i, false)) => Ok((i, None)), + Err(vk::Result::NOT_READY) => Err(w::AcquireError::NotReady), + Err(vk::Result::TIMEOUT) => Err(w::AcquireError::Timeout), + Err(vk::Result::ERROR_OUT_OF_DATE_KHR) => Err(w::AcquireError::OutOfDate), + Err(vk::Result::ERROR_SURFACE_LOST_KHR) => { + Err(w::AcquireError::SurfaceLost(hal::device::SurfaceLost)) + } + Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => { + Err(w::AcquireError::OutOfMemory(hal::device::OutOfMemory::Host)) + } + Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(w::AcquireError::OutOfMemory( + hal::device::OutOfMemory::Device, + )), + Err(vk::Result::ERROR_DEVICE_LOST) => { + Err(w::AcquireError::DeviceLost(hal::device::DeviceLost)) + } + _ => panic!("Failed to acquire image."), + } + } +} diff --git a/third_party/rust/gfx-descriptor/.cargo-checksum.json b/third_party/rust/gfx-descriptor/.cargo-checksum.json new file mode 100644 index 000000000000..fe3a3cf30e5b --- /dev/null +++ b/third_party/rust/gfx-descriptor/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"612b857de2bc0447095a4207ccd8b91fc1f36349411232cbe7548a770571d906","src/allocator.rs":"5896b40325845e4a825a229abf2451bbec605269904d5cf611dab67c3b1debfc","src/counts.rs":"56b77dcb8ac7805941bd3f489931e1f138ca46da72990e937294b906df11378c","src/lib.rs":"f99ad2232ddf296622b82213b7a85c038cd56efadfa01c02d30c571d6f27f3cb"},"package":"1bf35f5d66d1bc56e63e68d7528441453f25992bd954b84309d23c659df2c5da"} \ No newline at end of file diff --git a/third_party/rust/rendy-descriptor/Cargo.toml b/third_party/rust/gfx-descriptor/Cargo.toml similarity index 60% rename from third_party/rust/rendy-descriptor/Cargo.toml rename to third_party/rust/gfx-descriptor/Cargo.toml index 8d949b0b7934..f68266632076 100644 --- a/third_party/rust/rendy-descriptor/Cargo.toml +++ b/third_party/rust/gfx-descriptor/Cargo.toml @@ -12,24 +12,21 @@ [package] edition = "2018" -name = "rendy-descriptor" -version = "0.5.1" -authors = ["omni-viral "] -description = "Rendy's descriptor allocator" -documentation = "https://docs.rs/rendy-descriptor" -keywords = ["graphics", "gfx-hal", "rendy"] +name = "gfx-descriptor" +version = "0.1.0" +authors = ["omni-viral ", "The Gfx-rs Developers"] +description = "gfx-hal descriptor allocator" +documentation = "https://docs.rs/gfx-descriptor" +keywords = ["graphics", "gfx-hal"] categories = ["rendering"] license = "MIT OR Apache-2.0" -repository = "https://github.com/amethyst/rendy" -[dependencies.gfx-hal] -version = "0.4" +repository = "https://github.com/gfx-rs/gfx-extras" +[dependencies.fxhash] +version = "0.2" + +[dependencies.hal] +version = "0.5" +package = "gfx-hal" [dependencies.log] version = "0.4" - -[dependencies.relevant] -version = "0.4" -features = ["log"] - -[dependencies.smallvec] -version = "0.6" diff --git a/third_party/rust/gfx-descriptor/src/allocator.rs b/third_party/rust/gfx-descriptor/src/allocator.rs new file mode 100644 index 000000000000..8f9dc65a32b4 --- /dev/null +++ b/third_party/rust/gfx-descriptor/src/allocator.rs @@ -0,0 +1,364 @@ +use { + crate::counts::DescriptorCounts, + hal::{ + device::{Device, OutOfMemory}, + pso::{AllocationError, DescriptorPool as _, DescriptorPoolCreateFlags}, + Backend, + }, + std::{ + collections::{HashMap, VecDeque}, + hash::BuildHasherDefault, + }, +}; + +type PoolIndex = u32; + +const MIN_SETS: u32 = 64; +const MAX_SETS: u32 = 512; + +/// Descriptor set from allocator. +#[derive(Debug)] +pub struct DescriptorSet { + raw: B::DescriptorSet, + pool_id: PoolIndex, + counts: DescriptorCounts, +} + +impl DescriptorSet { + pub fn raw(&self) -> &B::DescriptorSet { + &self.raw + } +} + +#[derive(Debug)] +struct Allocation { + sets: Vec, + pools: Vec, +} + +impl Allocation { + unsafe fn grow( + &mut self, + pool: &mut B::DescriptorPool, + layout: &B::DescriptorSetLayout, + count: u32, + ) -> Result<(), OutOfMemory> { + let sets_were = self.sets.len(); + match pool.allocate(std::iter::repeat(layout).take(count as usize), &mut self.sets) { + Err(err) => { + pool.free_sets(self.sets.drain(sets_were..)); + Err(match err { + AllocationError::Host => OutOfMemory::Host, + AllocationError::Device => OutOfMemory::Device, + _ => { + // We check pool for free descriptors and sets before calling this function, + // so it can't be exhausted. + // And it can't be fragmented either according to spec + // + // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/html/vkspec.html#VkDescriptorPoolCreateInfo + // + // """ + // Additionally, if all sets allocated from the pool since it was created or most recently reset + // use the same number of descriptors (of each type) and the requested allocation also + // uses that same number of descriptors (of each type), then fragmentation must not cause an allocation failure + // """ + panic!("Unexpected error: {:?}", err); + } + }) + } + Ok(()) => { + assert_eq!(self.sets.len(), sets_were + count as usize); + Ok(()) + } + } + } +} + +#[derive(Debug)] +struct DescriptorPool { + raw: B::DescriptorPool, + capacity: u32, + // Number of available sets. + available: u32, +} + +#[derive(Debug)] +struct DescriptorBucket { + pools_offset: PoolIndex, + pools: VecDeque>, + total: u64, +} + +impl DescriptorBucket { + fn new() -> Self { + DescriptorBucket { + pools_offset: 0, + pools: VecDeque::new(), + total: 0, + } + } + + fn new_pool_size(&self, count: u32) -> u32 { + MIN_SETS // at least MIN_SETS + .max(count) // at least enough for allocation + .max(self.total.min(MAX_SETS as u64) as u32) // at least as much as was allocated so far capped to MAX_SETS + .next_power_of_two() // rounded up to nearest 2^N + } + + unsafe fn dispose(mut self, device: &B::Device) { + if self.total > 0 { + log::error!("Not all descriptor sets were deallocated"); + } + + for pool in self.pools.drain(..) { + if pool.available < pool.capacity { + log::error!( + "Descriptor pool is still in use during allocator disposal. {:?}", + pool + ); + } + device.destroy_descriptor_pool(pool.raw); + } + } + + unsafe fn allocate( + &mut self, + device: &B::Device, + layout: &B::DescriptorSetLayout, + layout_counts: &DescriptorCounts, + mut count: u32, + allocation: &mut Allocation, + ) -> Result<(), OutOfMemory> { + if count == 0 { + return Ok(()); + } + + for (index, pool) in self.pools.iter_mut().enumerate().rev() { + if pool.available == 0 { + continue; + } + + let allocate = pool.available.min(count); + log::trace!("Allocate {} from exising pool", allocate); + allocation.grow(&mut pool.raw, layout, allocate)?; + allocation.pools.extend( + std::iter::repeat(index as PoolIndex + self.pools_offset).take(allocate as usize), + ); + count -= allocate; + pool.available -= allocate; + self.total += allocate as u64; + + if count == 0 { + return Ok(()); + } + } + + while count > 0 { + let size = self.new_pool_size(count); + let pool_counts = layout_counts.multiply(size); + log::trace!( + "Create new pool with {} sets and {:?} descriptors", + size, + pool_counts, + ); + let mut raw = device.create_descriptor_pool( + size as usize, + pool_counts.iter(), + DescriptorPoolCreateFlags::FREE_DESCRIPTOR_SET, + )?; + + let allocate = size.min(count); + allocation.grow(&mut raw, layout, allocate)?; + + let index = self.pools.len(); + allocation.pools.extend( + std::iter::repeat(index as PoolIndex + self.pools_offset).take(allocate as usize), + ); + + count -= allocate; + self.pools.push_back(DescriptorPool { + raw, + capacity: size, + available: size - allocate, + }); + self.total += allocate as u64; + } + + Ok(()) + } + + unsafe fn free(&mut self, sets: impl IntoIterator, pool_id: PoolIndex) { + let pool = &mut self.pools[(pool_id - self.pools_offset) as usize]; + let mut count = 0; + pool.raw.free_sets(sets.into_iter().map(|set| { + count += 1; + set + })); + pool.available += count; + self.total -= count as u64; + log::trace!("Freed {} from descriptor bucket", count); + } + + unsafe fn cleanup(&mut self, device: &B::Device) { + while let Some(pool) = self.pools.pop_front() { + if pool.available < pool.capacity { + self.pools.push_front(pool); + break; + } + log::trace!("Destroying used up descriptor pool"); + device.destroy_descriptor_pool(pool.raw); + self.pools_offset += 1; + } + } +} + +/// Descriptor allocator. +/// Can be used to allocate descriptor sets for any layout. +#[derive(Debug)] +pub struct DescriptorAllocator { + buckets: HashMap, BuildHasherDefault>, + allocation: Allocation, + total: u64, + free_sets: Vec, +} + +impl Drop for DescriptorAllocator { + fn drop(&mut self) { + if !self.buckets.is_empty() { + log::error!("DescriptorAllocator is dropped"); + } + } +} + +impl DescriptorAllocator { + /// Create new allocator instance. + pub fn new() -> Self { + DescriptorAllocator { + buckets: HashMap::default(), + allocation: Allocation { + sets: Vec::new(), + pools: Vec::new(), + }, + total: 0, + free_sets: Vec::new(), + } + } + + /// Clear the allocator instance. + /// All sets allocated from this allocator become invalid. + pub unsafe fn clear(&mut self, device: &B::Device) { + for (_, bucket) in self.buckets.drain() { + bucket.dispose(device); + } + } + + /// Allocate descriptor set with specified layout. + /// `DescriptorCounts` must match descriptor numbers of the layout. + /// `DescriptorCounts` can be constructed [from bindings] that were used + /// to create layout instance. + /// + /// [from bindings]: . + pub unsafe fn allocate( + &mut self, + device: &B::Device, + layout: &B::DescriptorSetLayout, + layout_counts: &DescriptorCounts, + count: u32, + extend: &mut impl Extend>, + ) -> Result<(), OutOfMemory> { + if count == 0 { + return Ok(()); + } + + log::trace!( + "Allocating {} sets with layout {:?} @ {:?}", + count, + layout, + layout_counts + ); + + let bucket = self + .buckets + .entry(layout_counts.clone()) + .or_insert_with(DescriptorBucket::new); + match bucket.allocate(device, layout, layout_counts, count, &mut self.allocation) { + Ok(()) => { + extend.extend( + self.allocation.pools + .drain(..) + .zip(self.allocation.sets.drain(..)) + .map(|(pool_id, set)| DescriptorSet { + raw: set, + counts: layout_counts.clone(), + pool_id, + }), + ); + Ok(()) + } + Err(err) => { + // Free sets allocated so far. + let mut last = None; + for (index, pool_id) in self.allocation.pools.drain(..).enumerate().rev() { + if Some(pool_id) != last { + if let Some(last_id) = last { + // Free contiguous range of sets from one pool in one go. + bucket.free(self.allocation.sets.drain(index + 1..), last_id); + } + last = Some(pool_id); + } + } + + if let Some(last_id) = last { + bucket.free(self.allocation.sets.drain(..), last_id); + } + + Err(err) + } + } + } + + /// Free descriptor sets. + /// + /// # Safety + /// + /// None of descriptor sets can be referenced in any pending command buffers. + /// All command buffers where at least one of descriptor sets referenced + /// move to invalid state. + pub unsafe fn free(&mut self, all_sets: impl IntoIterator>) { + let mut free_counts = DescriptorCounts::EMPTY; + let mut free_pool_id: PoolIndex = !0; + + for set in all_sets { + if free_counts != set.counts || free_pool_id != set.pool_id { + if free_pool_id != !0 { + let bucket = self + .buckets + .get_mut(&free_counts) + .expect("Set should be allocated from this allocator"); + debug_assert!(bucket.total >= self.free_sets.len() as u64); + bucket.free(self.free_sets.drain(..), free_pool_id); + } + free_counts = set.counts; + free_pool_id = set.pool_id; + } + self.free_sets.push(set.raw); + } + + if free_pool_id != !0 { + let bucket = self + .buckets + .get_mut(&free_counts) + .expect("Set should be allocated from this allocator"); + debug_assert!(bucket.total >= self.free_sets.len() as u64); + + bucket.free(self.free_sets.drain(..), free_pool_id); + } + } + + /// Perform cleanup to allow resources reuse. + pub unsafe fn cleanup(&mut self, device: &B::Device) { + for bucket in self.buckets.values_mut() { + bucket.cleanup(device) + } + } +} diff --git a/third_party/rust/gfx-descriptor/src/counts.rs b/third_party/rust/gfx-descriptor/src/counts.rs new file mode 100644 index 000000000000..4d2a609093f6 --- /dev/null +++ b/third_party/rust/gfx-descriptor/src/counts.rs @@ -0,0 +1,241 @@ +use std::{ + cmp::Ordering, + iter::FromIterator, + ops::{AddAssign, SubAssign}, +}; + +pub use hal::pso::{ + BufferDescriptorFormat, BufferDescriptorType, DescriptorRangeDesc, DescriptorSetLayoutBinding, + DescriptorType, ImageDescriptorType, +}; + +const DESCRIPTOR_TYPES_COUNT: usize = 15; + +const DESCRIPTOR_TYPES: [DescriptorType; DESCRIPTOR_TYPES_COUNT] = [ + DescriptorType::Sampler, + DescriptorType::Image { + ty: ImageDescriptorType::Sampled { with_sampler: true }, + }, + DescriptorType::Image { + ty: ImageDescriptorType::Sampled { + with_sampler: false, + }, + }, + DescriptorType::Image { + ty: ImageDescriptorType::Storage { read_only: true }, + }, + DescriptorType::Image { + ty: ImageDescriptorType::Storage { read_only: false }, + }, + DescriptorType::Buffer { + ty: BufferDescriptorType::Storage { read_only: true }, + format: BufferDescriptorFormat::Structured { + dynamic_offset: true, + }, + }, + DescriptorType::Buffer { + ty: BufferDescriptorType::Storage { read_only: true }, + format: BufferDescriptorFormat::Structured { + dynamic_offset: false, + }, + }, + DescriptorType::Buffer { + ty: BufferDescriptorType::Storage { read_only: true }, + format: BufferDescriptorFormat::Texel, + }, + DescriptorType::Buffer { + ty: BufferDescriptorType::Storage { read_only: false }, + format: BufferDescriptorFormat::Structured { + dynamic_offset: true, + }, + }, + DescriptorType::Buffer { + ty: BufferDescriptorType::Storage { read_only: false }, + format: BufferDescriptorFormat::Structured { + dynamic_offset: false, + }, + }, + DescriptorType::Buffer { + ty: BufferDescriptorType::Storage { read_only: false }, + format: BufferDescriptorFormat::Texel, + }, + DescriptorType::Buffer { + ty: BufferDescriptorType::Uniform, + format: BufferDescriptorFormat::Structured { + dynamic_offset: true, + }, + }, + DescriptorType::Buffer { + ty: BufferDescriptorType::Uniform, + format: BufferDescriptorFormat::Structured { + dynamic_offset: false, + }, + }, + DescriptorType::Buffer { + ty: BufferDescriptorType::Uniform, + format: BufferDescriptorFormat::Texel, + }, + DescriptorType::InputAttachment, +]; + +fn descriptor_type_index(ty: &DescriptorType) -> usize { + match ty { + DescriptorType::Sampler => 0, + DescriptorType::Image { + ty: ImageDescriptorType::Sampled { with_sampler: true }, + } => 1, + DescriptorType::Image { + ty: ImageDescriptorType::Sampled { + with_sampler: false, + }, + } => 2, + DescriptorType::Image { + ty: ImageDescriptorType::Storage { read_only: true }, + } => 3, + DescriptorType::Image { + ty: ImageDescriptorType::Storage { read_only: false }, + } => 4, + DescriptorType::Buffer { + ty: BufferDescriptorType::Storage { read_only: true }, + format: BufferDescriptorFormat::Structured { + dynamic_offset: true, + }, + } => 5, + DescriptorType::Buffer { + ty: BufferDescriptorType::Storage { read_only: true }, + format: BufferDescriptorFormat::Structured { + dynamic_offset: false, + }, + } => 6, + DescriptorType::Buffer { + ty: BufferDescriptorType::Storage { read_only: true }, + format: BufferDescriptorFormat::Texel, + } => 7, + DescriptorType::Buffer { + ty: BufferDescriptorType::Storage { read_only: false }, + format: BufferDescriptorFormat::Structured { + dynamic_offset: true, + }, + } => 8, + DescriptorType::Buffer { + ty: BufferDescriptorType::Storage { read_only: false }, + format: BufferDescriptorFormat::Structured { + dynamic_offset: false, + }, + } => 9, + DescriptorType::Buffer { + ty: BufferDescriptorType::Storage { read_only: false }, + format: BufferDescriptorFormat::Texel, + } => 10, + DescriptorType::Buffer { + ty: BufferDescriptorType::Uniform, + format: BufferDescriptorFormat::Structured { + dynamic_offset: true, + }, + } => 11, + DescriptorType::Buffer { + ty: BufferDescriptorType::Uniform, + format: BufferDescriptorFormat::Structured { + dynamic_offset: false, + }, + } => 12, + DescriptorType::Buffer { + ty: BufferDescriptorType::Uniform, + format: BufferDescriptorFormat::Texel, + } => 13, + DescriptorType::InputAttachment => 14, + } +} + +#[test] +fn test_descriptor_types() { + for (index, ty) in DESCRIPTOR_TYPES.iter().enumerate() { + assert_eq!(index, descriptor_type_index(ty)); + } +} + +/// Number of descriptors per type. +#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] +pub struct DescriptorCounts { + counts: [u32; DESCRIPTOR_TYPES_COUNT], +} + +impl DescriptorCounts { + /// Empty descriptor counts. + pub const EMPTY: Self = DescriptorCounts { + counts: [0; DESCRIPTOR_TYPES_COUNT], + }; + + /// Add a single layout binding. + /// Useful when created with `DescriptorCounts::EMPTY`. + pub fn add_binding(&mut self, binding: DescriptorSetLayoutBinding) { + self.counts[descriptor_type_index(&binding.ty)] += binding.count as u32; + } + + /// Iterate through counts yelding descriptor types and their amount. + pub fn iter(&self) -> impl '_ + Iterator { + self.counts + .iter() + .enumerate() + .filter(|&(_, count)| *count != 0) + .map(|(index, count)| DescriptorRangeDesc { + count: *count as usize, + ty: DESCRIPTOR_TYPES[index], + }) + } + + /// Multiply all the counts by a value. + pub fn multiply(&self, value: u32) -> Self { + let mut descs = self.clone(); + for c in descs.counts.iter_mut() { + *c *= value; + } + descs + } +} + +impl FromIterator for DescriptorCounts { + fn from_iter(iter: T) -> Self where + T: IntoIterator + { + let mut descs = Self::EMPTY; + + for binding in iter { + descs.counts[descriptor_type_index(&binding.ty)] += binding.count as u32; + } + + descs + } +} + +impl PartialOrd for DescriptorCounts { + fn partial_cmp(&self, other: &Self) -> Option { + let mut ord = self.counts[0].partial_cmp(&other.counts[0])?; + for i in 1..DESCRIPTOR_TYPES_COUNT { + match (ord, self.counts[i].partial_cmp(&other.counts[i])?) { + (Ordering::Less, Ordering::Greater) | (Ordering::Greater, Ordering::Less) => { + return None; + } + (Ordering::Equal, new) => ord = new, + _ => (), + } + } + Some(ord) + } +} + +impl AddAssign for DescriptorCounts { + fn add_assign(&mut self, rhs: Self) { + for i in 0..DESCRIPTOR_TYPES_COUNT { + self.counts[i] += rhs.counts[i]; + } + } +} + +impl SubAssign for DescriptorCounts { + fn sub_assign(&mut self, rhs: Self) { + for i in 0..DESCRIPTOR_TYPES_COUNT { + self.counts[i] -= rhs.counts[i]; + } + } +} diff --git a/third_party/rust/gfx-descriptor/src/lib.rs b/third_party/rust/gfx-descriptor/src/lib.rs new file mode 100644 index 000000000000..0289c0d09005 --- /dev/null +++ b/third_party/rust/gfx-descriptor/src/lib.rs @@ -0,0 +1,4 @@ +mod allocator; +mod counts; + +pub use crate::{allocator::*, counts::*}; diff --git a/third_party/rust/gfx-hal/.cargo-checksum.json b/third_party/rust/gfx-hal/.cargo-checksum.json index aad1581fb13f..3ec714e05d47 100644 --- a/third_party/rust/gfx-hal/.cargo-checksum.json +++ b/third_party/rust/gfx-hal/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"7fd90ffc01d0c91cbb8a545731350e210f681f9398c12b2826e90aeb6da08bbf","src/adapter.rs":"7076bac0db2368278d295db347e06b6d088b5a6448fc77c68ef64b3a59c024a3","src/buffer.rs":"576f59309efe8aebaf6b6826160cb7f4730b1d730b1bfefd3aa26b2f0398115b","src/command/clear.rs":"f0f08993f966a1f82091fa2a8dc226da28946f4a04bc3eca4307079bc4b14b43","src/command/mod.rs":"2c8050344f996eae3fca10724afbc89ba8df5adad5c94341af426ec77eecaade","src/command/structs.rs":"00b5850540ae21227c6578866e86cc741d074601239f8b1bbd0342a5e5f74623","src/device.rs":"766bea8943cba3934374359e5b56e8824b3ba3e709bb362c0eaa706890c4aa91","src/format.rs":"6d864c354dc5af0a31dd11f379b64f37609949d38f7e7349a546ec7c6e808307","src/image.rs":"c9c8fa1a63d616ed21cfa237a1f2f53aa0e923a63ed0ca30bdc9125306e95a5c","src/lib.rs":"d6f5404c0d6014e7602d5dc9fca41483f7a213fa5c014405688b79e2bb961616","src/memory.rs":"a8e3b745f44e54e74cce48bb0fffd6439498a9d96163a05cec4d4e6faa3fb500","src/pass.rs":"5dc3657ed879c1da91e310cc43287b4ec8feeeee1edaad0db5242e5bd8c5cf6d","src/pool.rs":"85330ac11f6a154694353615865cfddd52feec9435e20b0ff3a8ec6c7f7fe353","src/pso/compute.rs":"fb9a5748c3b9174924db13c1c59388bcd75279ff6d40d1a068fb52e70e5ccb94","src/pso/descriptor.rs":"f854d37aced6a3391d0c563fad472823a6536eabaf55f0c5e0a7babd2f146de6","src/pso/graphics.rs":"b572990d08112b2f76d044ee48359d82f50a3ea29bcdeecb62249fc15d7bbefb","src/pso/input_assembler.rs":"c6ac5a0e70b2efd0056a8f393b95a5159ace65f23eed0e5a32b1ffedd44c4e53","src/pso/mod.rs":"2dc44e5b494e1cc3823608c4faadccfcc19e7ae266590fd2db0b090c376f1ab9","src/pso/output_merger.rs":"174cceec8080a68d22f68a0a51044c16fa8a45028adc503b9a65919b68550827","src/pso/specialization.rs":"fb90dc6a34908b283514edb040293d382a3471e0c8fa0bd11ec5f98cceec5799","src/query.rs":"59a1e10bbf6efdb2837da7f20bb60218b4deadd541a1237e2470461e45d0a339","src/queue/family.rs":"80bc451a615b4643a1e0958ad8dd28c37c11801edad035fd9079fae489dfe315","src/queue/mod.rs":"19c10c5434ecbe29b35caf0bd74045e3576688c643b5020400e3a1337bc06206","src/range.rs":"94486dad94f5d7fafaaf019c7dd9715212b25447da76ea55e867f1a91a35e606","src/window.rs":"2251d16a62a83dea8078432bf1ba04f518feaf68e2ac44c427e5f62759864a52"},"package":"7c88981665c780447bb08eb099e1ded330754a7246719bab927ee4a949c0ba7f"} \ No newline at end of file +{"files":{"Cargo.toml":"9dee80ff7b7d891038aef8058b2afba1f7fcb3f781ddf24bfa415431e411da05","src/adapter.rs":"7cf18e6aa1bc09d4c6fa03072f498fec7c9bcfe85ff1bf5ffd40d8ceb2f029fd","src/buffer.rs":"f58458cdaea2b85d7adcbed6b0e963fb6b7ada97a30ee6b8108091aa2f255b75","src/command/clear.rs":"156d52a856766bf6d05245b7939528cc64671658f80dd8cc5be92b3faa51199d","src/command/mod.rs":"e4d637cd90f6c6365e8822742caf710a4bb6230b11e0ed8d85d84b71051b9b28","src/command/structs.rs":"7bc6951ce2d497527fd07d06809cfa46238205141699fd2cfd083bc7b024ec0a","src/device.rs":"1b5bb1870398c0a5eeb387ded1f99f9da9666b2e013951b9a0f009552434ec51","src/format.rs":"21e8273a661058c589280f15a06b6c0db0e7d4f57c2ccf0cf516735f17317c7b","src/image.rs":"660865087b114c30d790f2f895241914a2272d6ffb9d7cb701945a424b574dd8","src/lib.rs":"5a484b6c853d4d81768e8367bf996cfd81673e1b8f8113fbfb5676902bdf3e93","src/memory.rs":"1b842bdc2a40484a3ed9dcf629a3e5c267c20897ec0dd794a7fb816c9e8c6ebb","src/pass.rs":"88cd1e8fdbc5b6d3bc36265d32e75b767da8d5652912ad490c1f8eedc892afe6","src/pool.rs":"5129074c99fd2fe75638eb6e3b3de4c6a370f406c2714780def462404b5c4767","src/pso/compute.rs":"4bf5abeb931a1095a94094b3c5b157f92b5268b90c9b22290d5bbd74476c0265","src/pso/descriptor.rs":"8b1a9786aa8c0fd2c54c6af20304ae05f2a60079668a0a0a7eeefc6180630f62","src/pso/graphics.rs":"1bae44ff823054d3074fcaade3e3adfc3d4aa45068810fed6524c2d6ab706b73","src/pso/input_assembler.rs":"a0c6a4cbaa0ae61f63b187aeeed45b7b213e628db21962bb042a303e122aef5f","src/pso/mod.rs":"939911418074ad1d385dee0a9e154878b22e61e4a6f37651fa1779a70f6812e0","src/pso/output_merger.rs":"432ff2be2f4a836c883186e1041c14c39768f9acb938663c0c79bb1cf3ecc60a","src/pso/specialization.rs":"3a905ac51acefc23754e9ffac9f1ffa1cddfafa6245e1db02ff86ae11de71253","src/query.rs":"a283d1d086da7a997a2001fb3d4217699ef0fc5ef1588d15c78a7a36c8e2a1a0","src/queue/family.rs":"8424915c6e6e962c951c197836ea0a7a81e25fbba04f6aacf81d2e18beda7d5c","src/queue/mod.rs":"bb62027dcb7fb15f31e807f477003946fcb2e6312d523d3c057aeb17e2191ad3","src/window.rs":"b0c39de64dcd91bc645a3345e96648d6bb5e5ef5627553ba1c930c9ccb0364ff"},"package":"bc96180204064c9493e0fe4a9efeb721e0ac59fe8e1906d0c659142a93114fb1"} \ No newline at end of file diff --git a/third_party/rust/gfx-hal/Cargo.toml b/third_party/rust/gfx-hal/Cargo.toml index bdb375235e1e..e2422b513968 100644 --- a/third_party/rust/gfx-hal/Cargo.toml +++ b/third_party/rust/gfx-hal/Cargo.toml @@ -13,7 +13,7 @@ [package] edition = "2018" name = "gfx-hal" -version = "0.4.1" +version = "0.5.0" authors = ["The Gfx-rs Developers"] description = "gfx-rs hardware abstraction layer" homepage = "https://github.com/gfx-rs/gfx" @@ -40,10 +40,5 @@ version = "1" features = ["serde_derive"] optional = true -[dependencies.smallvec] -version = "0.6" -[dev-dependencies.gfx-backend-empty] -version = "0.4" - [features] unstable = [] diff --git a/third_party/rust/gfx-hal/src/adapter.rs b/third_party/rust/gfx-hal/src/adapter.rs old mode 100755 new mode 100644 index 10bb5ff1f29e..31b62eba6245 --- a/third_party/rust/gfx-hal/src/adapter.rs +++ b/third_party/rust/gfx-hal/src/adapter.rs @@ -1,153 +1,163 @@ -//! Physical devices and adapters. -//! -//! The `PhysicalDevice` trait specifies the API a backend must provide for dealing with -//! and querying a physical device, such as a particular GPU. An `Adapter` is a struct -//! containing a `PhysicalDevice` and metadata for a particular GPU, generally created -//! from an `Instance` of that backend. `adapter.open_with(...)` will return a `Device` -//! that has the properties specified. - -use std::{any::Any, fmt}; - -use crate::{ - queue::{QueueGroup, QueuePriority}, - device, format, image, memory, Backend, Features, Limits, -}; - -/// A description for a single chunk of memory in a heap. -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct MemoryType { - /// Properties of the associated memory, such as synchronization - /// properties or whether it's on the CPU or GPU. - pub properties: memory::Properties, - /// Index to the underlying memory heap in `Gpu::memory_heaps` - pub heap_index: usize, -} - -/// Types of memory supported by this adapter and available memory. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct MemoryProperties { - /// Each memory type is associated with one heap of `memory_heaps`. - /// Multiple types can point to the same heap. - pub memory_types: Vec, - /// Memory heaps with their size in bytes. - pub memory_heaps: Vec, -} - -/// Represents a combination of a logical device and the -/// hardware queues it provides. -/// -/// This structure is typically created using an `Adapter`. -#[derive(Debug)] -pub struct Gpu { - /// Logical device for a given backend. - pub device: B::Device, - /// The command queues that the device provides. - pub queue_groups: Vec>, -} - -/// Represents a physical device (such as a GPU) capable of supporting the given backend. -pub trait PhysicalDevice: fmt::Debug + Any + Send + Sync { - /// Create a new logical device with the requested features. If `requested_features` is - /// empty (e.g. through `Features::empty()`) then only the core features are supported. - /// - /// # Errors - /// - /// - Returns `TooManyObjects` if the implementation can't create a new logical device. - /// - Returns `MissingFeature` if the implementation does not support a requested feature. - /// - /// # Examples - /// - /// ```no_run - /// # extern crate gfx_backend_empty as empty; - /// # extern crate gfx_hal; - /// # fn main() { - /// use gfx_hal::{adapter::PhysicalDevice, Features}; - /// - /// # let physical_device: empty::PhysicalDevice = return; - /// # let family: empty::QueueFamily = return; - /// # unsafe { - /// let gpu = physical_device.open(&[(&family, &[1.0; 1])], Features::empty()); - /// # }} - /// ``` - unsafe fn open( - &self, - families: &[(&B::QueueFamily, &[QueuePriority])], - requested_features: Features, - ) -> Result, device::CreationError>; - - /// Fetch details for a particular format. - fn format_properties(&self, format: Option) -> format::Properties; - - /// Fetch details for a particular image format. - fn image_format_properties( - &self, - format: format::Format, - dimensions: u8, - tiling: image::Tiling, - usage: image::Usage, - view_caps: image::ViewCapabilities, - ) -> Option; - - /// Fetch details for the memory regions provided by the device. - fn memory_properties(&self) -> MemoryProperties; - - /// Returns the features of this `Device`. This usually depends on the graphics API being - /// used. - fn features(&self) -> Features; - - /// Returns the resource limits of this `Device`. - fn limits(&self) -> Limits; - - /// Check cache compatibility with the `Device`. - fn is_valid_cache(&self, _cache: &[u8]) -> bool { - false - } -} - -/// Supported physical device types -#[derive(Clone, PartialEq, Eq, Debug)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum DeviceType { - /// Other - Other = 0, - /// Integrated - IntegratedGpu = 1, - /// Discrete - DiscreteGpu = 2, - /// Virtual / Hosted - VirtualGpu = 3, - /// Cpu / Software Rendering - Cpu = 4, -} - -/// Metadata about a backend adapter. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct AdapterInfo { - /// Adapter name - pub name: String, - /// Vendor PCI id of the adapter - pub vendor: usize, - /// PCI id of the adapter - pub device: usize, - /// Type of device - pub device_type: DeviceType, -} - -/// The list of `Adapter` instances is obtained by calling `Instance::enumerate_adapters()`. -/// -/// Given an `Adapter` a `Gpu` can be constructed by calling `PhysicalDevice::open()` on its -/// `physical_device` field. However, if only a single queue family is needed or if no -/// additional device features are required, then the `Adapter::open_with` convenience method -/// can be used instead. -#[derive(Debug)] -pub struct Adapter { - /// General information about this adapter. - pub info: AdapterInfo, - /// Actual physical device. - pub physical_device: B::PhysicalDevice, - /// Queue families supported by this adapter. - pub queue_families: Vec, -} +//! Physical devices and adapters. +//! +//! The `PhysicalDevice` trait specifies the API a backend must provide for dealing with +//! and querying a physical device, such as a particular GPU. An `Adapter` is a struct +//! containing a `PhysicalDevice` and metadata for a particular GPU, generally created +//! from an `Instance` of that backend. `adapter.open_with(...)` will return a `Device` +//! that has the properties specified. + +use std::{any::Any, fmt}; + +use crate::{ + device, + format, + image, + memory, + queue::{QueueGroup, QueuePriority}, + Backend, + Features, + Hints, + Limits, +}; + +/// A description for a single chunk of memory in a heap. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct MemoryType { + /// Properties of the associated memory, such as synchronization + /// properties or whether it's on the CPU or GPU. + pub properties: memory::Properties, + /// Index to the underlying memory heap in `Gpu::memory_heaps` + pub heap_index: usize, +} + +/// Types of memory supported by this adapter and available memory. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct MemoryProperties { + /// Each memory type is associated with one heap of `memory_heaps`. + /// Multiple types can point to the same heap. + pub memory_types: Vec, + /// Memory heaps with their size in bytes. + pub memory_heaps: Vec, +} + +/// Represents a combination of a logical device and the +/// hardware queues it provides. +/// +/// This structure is typically created using an `Adapter`. +#[derive(Debug)] +pub struct Gpu { + /// Logical device for a given backend. + pub device: B::Device, + /// The command queues that the device provides. + pub queue_groups: Vec>, +} + +/// Represents a physical device (such as a GPU) capable of supporting the given backend. +pub trait PhysicalDevice: fmt::Debug + Any + Send + Sync { + /// Create a new logical device with the requested features. If `requested_features` is + /// empty (e.g. through `Features::empty()`) then only the core features are supported. + /// + /// # Errors + /// + /// - Returns `TooManyObjects` if the implementation can't create a new logical device. + /// - Returns `MissingFeature` if the implementation does not support a requested feature. + /// + /// # Examples + /// + /// ```no_run + /// # extern crate gfx_backend_empty as empty; + /// # extern crate gfx_hal; + /// # fn main() { + /// use gfx_hal::{adapter::PhysicalDevice, Features}; + /// + /// # let physical_device: empty::PhysicalDevice = return; + /// # let family: empty::QueueFamily = return; + /// # unsafe { + /// let gpu = physical_device.open(&[(&family, &[1.0; 1])], Features::empty()); + /// # }} + /// ``` + unsafe fn open( + &self, + families: &[(&B::QueueFamily, &[QueuePriority])], + requested_features: Features, + ) -> Result, device::CreationError>; + + /// Fetch details for a particular format. + fn format_properties(&self, format: Option) -> format::Properties; + + /// Fetch details for a particular image format. + fn image_format_properties( + &self, + format: format::Format, + dimensions: u8, + tiling: image::Tiling, + usage: image::Usage, + view_caps: image::ViewCapabilities, + ) -> Option; + + /// Fetch details for the memory regions provided by the device. + fn memory_properties(&self) -> MemoryProperties; + + /// Returns the features of this `PhysicalDevice`. This usually depends on the graphics API being + /// used. + fn features(&self) -> Features; + + /// Returns the performance hints of this `PhysicalDevice`. + fn hints(&self) -> Hints; + + /// Returns the resource limits of this `PhysicalDevice`. + fn limits(&self) -> Limits; + + /// Check cache compatibility with the `PhysicalDevice`. + fn is_valid_cache(&self, _cache: &[u8]) -> bool { + false + } +} + +/// Supported physical device types +#[derive(Clone, PartialEq, Eq, Debug)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum DeviceType { + /// Other + Other = 0, + /// Integrated + IntegratedGpu = 1, + /// Discrete + DiscreteGpu = 2, + /// Virtual / Hosted + VirtualGpu = 3, + /// Cpu / Software Rendering + Cpu = 4, +} + +/// Metadata about a backend adapter. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct AdapterInfo { + /// Adapter name + pub name: String, + /// Vendor PCI id of the adapter + pub vendor: usize, + /// PCI id of the adapter + pub device: usize, + /// Type of device + pub device_type: DeviceType, +} + +/// The list of `Adapter` instances is obtained by calling `Instance::enumerate_adapters()`. +/// +/// Given an `Adapter` a `Gpu` can be constructed by calling `PhysicalDevice::open()` on its +/// `physical_device` field. However, if only a single queue family is needed or if no +/// additional device features are required, then the `Adapter::open_with` convenience method +/// can be used instead. +#[derive(Debug)] +pub struct Adapter { + /// General information about this adapter. + pub info: AdapterInfo, + /// Actual physical device. + pub physical_device: B::PhysicalDevice, + /// Queue families supported by this adapter. + pub queue_families: Vec, +} diff --git a/third_party/rust/gfx-hal/src/buffer.rs b/third_party/rust/gfx-hal/src/buffer.rs old mode 100755 new mode 100644 index aaf36377efc0..a8ce6a075501 --- a/third_party/rust/gfx-hal/src/buffer.rs +++ b/third_party/rust/gfx-hal/src/buffer.rs @@ -1,175 +1,207 @@ -//! Memory buffers. -//! -//! # Buffer -//! -//! Buffers interpret memory slices as linear contiguous data array. -//! They can be used as shader resources, vertex buffers, index buffers or for -//! specifying the action commands for indirect execution. - -use crate::{device, format, Backend, IndexType}; - -/// An offset inside a buffer, in bytes. -pub type Offset = u64; - -/// Buffer state. -pub type State = Access; - -/// Error creating a buffer. -#[derive(Clone, Debug, PartialEq)] -pub enum CreationError { - /// Out of either host or device memory. - OutOfMemory(device::OutOfMemory), - - /// Requested buffer usage is not supported. - /// - /// Older GL version don't support constant buffers or multiple usage flags. - UnsupportedUsage { - /// Unsupported usage passed on buffer creation. - usage: Usage, - }, -} - -impl From for CreationError { - fn from(error: device::OutOfMemory) -> Self { - CreationError::OutOfMemory(error) - } -} - -impl std::fmt::Display for CreationError { - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - CreationError::OutOfMemory(err) => write!(fmt, "Failed to create buffer: {}", err), - CreationError::UnsupportedUsage { usage } => write!(fmt, "Failed to create buffer: Unsupported usage: {:?}", usage), - } - } -} - -impl std::error::Error for CreationError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - CreationError::OutOfMemory(err) => Some(err), - _ => None, - } - } -} - -/// Error creating a buffer view. -#[derive(Clone, Debug, PartialEq)] -pub enum ViewCreationError { - /// Out of either host or device memory. - OutOfMemory(device::OutOfMemory), - - /// Buffer view format is not supported. - UnsupportedFormat { - /// Unsupported format passed on view creation. - format: Option, - }, -} - -impl From for ViewCreationError { - fn from(error: device::OutOfMemory) -> Self { - ViewCreationError::OutOfMemory(error) - } -} - -impl std::fmt::Display for ViewCreationError { - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - ViewCreationError::OutOfMemory(err) => write!(fmt, "Failed to create buffer view: {}", err), - ViewCreationError::UnsupportedFormat { format: Some(format) } => write!(fmt, "Failed to create buffer view: Unsupported format {:?}", format), - ViewCreationError::UnsupportedFormat { format: None } => write!(fmt, "Failed to create buffer view: Unspecified format"), - } - } -} - -impl std::error::Error for ViewCreationError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - ViewCreationError::OutOfMemory(err) => Some(err), - _ => None, - } - } -} - -bitflags!( - /// Buffer usage flags. - #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] - pub struct Usage: u32 { - /// - const TRANSFER_SRC = 0x1; - /// - const TRANSFER_DST = 0x2; - /// - const UNIFORM_TEXEL = 0x4; - /// - const STORAGE_TEXEL = 0x8; - /// - const UNIFORM = 0x10; - /// - const STORAGE = 0x20; - /// - const INDEX = 0x40; - /// - const VERTEX = 0x80; - /// - const INDIRECT = 0x100; - } -); - -impl Usage { - /// Returns if the buffer can be used in transfer operations. - pub fn can_transfer(&self) -> bool { - self.intersects(Usage::TRANSFER_SRC | Usage::TRANSFER_DST) - } -} - -bitflags!( - /// Buffer access flags. - /// - /// Access of buffers by the pipeline or shaders. - #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] - pub struct Access: u32 { - /// Read commands instruction for indirect execution. - const INDIRECT_COMMAND_READ = 0x1; - /// Read index values for indexed draw commands. - /// - /// See [`draw_indexed`](../command/trait.RawCommandBuffer.html#tymethod.draw_indexed) - /// and [`draw_indexed_indirect`](../command/trait.RawCommandBuffer.html#tymethod.draw_indexed_indirect). - const INDEX_BUFFER_READ = 0x2; - /// Read vertices from vertex buffer for draw commands in the [`VERTEX_INPUT`]( - /// ../pso/struct.PipelineStage.html#associatedconstant.VERTEX_INPUT) stage. - const VERTEX_BUFFER_READ = 0x4; - /// - const UNIFORM_READ = 0x8; - /// - const SHADER_READ = 0x20; - /// - const SHADER_WRITE = 0x40; - /// - const TRANSFER_READ = 0x800; - /// - const TRANSFER_WRITE = 0x1000; - /// - const HOST_READ = 0x2000; - /// - const HOST_WRITE = 0x4000; - /// - const MEMORY_READ = 0x8000; - /// - const MEMORY_WRITE = 0x10000; - } -); - -/// Index buffer view for `bind_index_buffer`. -/// -/// Defines a buffer slice used for acquiring the indices on draw commands. -/// Indices are used to lookup vertex indices in the vertex buffers. -#[derive(Debug)] -pub struct IndexBufferView<'a, B: Backend> { - /// The buffer to bind. - pub buffer: &'a B::Buffer, - /// The offset into the buffer to start at. - pub offset: u64, - /// The type of the table elements (`u16` or `u32`). - pub index_type: IndexType, -} +//! Memory buffers. +//! +//! # Buffer +//! +//! Buffers interpret memory slices as linear contiguous data array. +//! They can be used as shader resources, vertex buffers, index buffers or for +//! specifying the action commands for indirect execution. + +use crate::{device, format, Backend, IndexType}; + +/// An offset inside a buffer, in bytes. +pub type Offset = u64; + +/// A subrange of the buffer. +#[derive(Clone, Debug, Default, Hash, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct SubRange { + /// Offset to the subrange. + pub offset: Offset, + /// Size of the subrange, or None for the remaining size of the buffer. + pub size: Option, +} + +impl SubRange { + /// Whole buffer subrange. + pub const WHOLE: Self = SubRange { + offset: 0, + size: None, + }; + + /// Return the stored size, if present, or computed size based on the limit. + pub fn size_to(&self, limit: Offset) -> Offset { + self.size.unwrap_or(limit - self.offset) + } +} + +/// Buffer state. +pub type State = Access; + +/// Error creating a buffer. +#[derive(Clone, Debug, PartialEq)] +pub enum CreationError { + /// Out of either host or device memory. + OutOfMemory(device::OutOfMemory), + + /// Requested buffer usage is not supported. + /// + /// Older GL version don't support constant buffers or multiple usage flags. + UnsupportedUsage { + /// Unsupported usage passed on buffer creation. + usage: Usage, + }, +} + +impl From for CreationError { + fn from(error: device::OutOfMemory) -> Self { + CreationError::OutOfMemory(error) + } +} + +impl std::fmt::Display for CreationError { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + CreationError::OutOfMemory(err) => write!(fmt, "Failed to create buffer: {}", err), + CreationError::UnsupportedUsage { usage } => write!( + fmt, + "Failed to create buffer: Unsupported usage: {:?}", + usage + ), + } + } +} + +impl std::error::Error for CreationError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + CreationError::OutOfMemory(err) => Some(err), + _ => None, + } + } +} + +/// Error creating a buffer view. +#[derive(Clone, Debug, PartialEq)] +pub enum ViewCreationError { + /// Out of either host or device memory. + OutOfMemory(device::OutOfMemory), + + /// Buffer view format is not supported. + UnsupportedFormat(Option), +} + +impl From for ViewCreationError { + fn from(error: device::OutOfMemory) -> Self { + ViewCreationError::OutOfMemory(error) + } +} + +impl std::fmt::Display for ViewCreationError { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ViewCreationError::OutOfMemory(err) => { + write!(fmt, "Failed to create buffer view: {}", err) + } + ViewCreationError::UnsupportedFormat(Some(format)) => write!( + fmt, + "Failed to create buffer view: Unsupported format {:?}", + format + ), + ViewCreationError::UnsupportedFormat(None) => { + write!(fmt, "Failed to create buffer view: Unspecified format") + } + } + } +} + +impl std::error::Error for ViewCreationError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + ViewCreationError::OutOfMemory(err) => Some(err), + _ => None, + } + } +} + +bitflags!( + /// Buffer usage flags. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct Usage: u32 { + /// + const TRANSFER_SRC = 0x1; + /// + const TRANSFER_DST = 0x2; + /// + const UNIFORM_TEXEL = 0x4; + /// + const STORAGE_TEXEL = 0x8; + /// + const UNIFORM = 0x10; + /// + const STORAGE = 0x20; + /// + const INDEX = 0x40; + /// + const VERTEX = 0x80; + /// + const INDIRECT = 0x100; + } +); + +impl Usage { + /// Returns if the buffer can be used in transfer operations. + pub fn can_transfer(&self) -> bool { + self.intersects(Usage::TRANSFER_SRC | Usage::TRANSFER_DST) + } +} + +bitflags!( + /// Buffer access flags. + /// + /// Access of buffers by the pipeline or shaders. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct Access: u32 { + /// Read commands instruction for indirect execution. + const INDIRECT_COMMAND_READ = 0x1; + /// Read index values for indexed draw commands. + /// + /// See [`draw_indexed`](../command/trait.RawCommandBuffer.html#tymethod.draw_indexed) + /// and [`draw_indexed_indirect`](../command/trait.RawCommandBuffer.html#tymethod.draw_indexed_indirect). + const INDEX_BUFFER_READ = 0x2; + /// Read vertices from vertex buffer for draw commands in the [`VERTEX_INPUT`]( + /// ../pso/struct.PipelineStage.html#associatedconstant.VERTEX_INPUT) stage. + const VERTEX_BUFFER_READ = 0x4; + /// + const UNIFORM_READ = 0x8; + /// + const SHADER_READ = 0x20; + /// + const SHADER_WRITE = 0x40; + /// + const TRANSFER_READ = 0x800; + /// + const TRANSFER_WRITE = 0x1000; + /// + const HOST_READ = 0x2000; + /// + const HOST_WRITE = 0x4000; + /// + const MEMORY_READ = 0x8000; + /// + const MEMORY_WRITE = 0x10000; + } +); + +/// Index buffer view for `bind_index_buffer`. +/// +/// Defines a buffer slice used for acquiring the indices on draw commands. +/// Indices are used to lookup vertex indices in the vertex buffers. +#[derive(Debug)] +pub struct IndexBufferView<'a, B: Backend> { + /// The buffer to bind. + pub buffer: &'a B::Buffer, + /// The subrange of the buffer. + pub range: SubRange, + /// The type of the table elements (`u16` or `u32`). + pub index_type: IndexType, +} diff --git a/third_party/rust/gfx-hal/src/command/clear.rs b/third_party/rust/gfx-hal/src/command/clear.rs old mode 100755 new mode 100644 index 753872e1f78e..81ef0eb76435 --- a/third_party/rust/gfx-hal/src/command/clear.rs +++ b/third_party/rust/gfx-hal/src/command/clear.rs @@ -1,70 +1,70 @@ -use crate::pso; -use std::fmt; - -/// A clear color union, which can be either f32, i32, or u32. -#[repr(C)] -#[derive(Clone, Copy)] -pub union ClearColor { - /// `f32` variant - pub float32: [f32; 4], - /// `i32` variant - pub sint32: [i32; 4], - /// `u32` variant - pub uint32: [u32; 4], -} - -impl fmt::Debug for ClearColor { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - writeln![f, "ClearColor"] - } -} - -/// A combination of depth and stencil clear values. -#[repr(C)] -#[derive(Clone, Copy, Debug)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct ClearDepthStencil { - /// Depth value - pub depth: f32, - /// Stencil value - pub stencil: u32, -} - -/// A set of clear values for a single attachment. -#[repr(C)] -#[derive(Clone, Copy)] -pub union ClearValue { - /// Clear color - pub color: ClearColor, - /// Clear depth and stencil - pub depth_stencil: ClearDepthStencil, - _align: [u32; 4], -} - -impl fmt::Debug for ClearValue { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("ClearValue") - .field("color", unsafe { &self.color.uint32 }) - .field("depth_stencil", unsafe { &self.depth_stencil }) - .finish() - } -} - -/// Attachment clear description for the current subpass. -#[derive(Clone, Copy, Debug)] -pub enum AttachmentClear { - /// Clear color attachment. - Color { - /// Index inside the `SubpassDesc::colors` array. - index: usize, - /// Value to clear with. - value: ClearColor, - }, - /// Clear depth-stencil attachment. - DepthStencil { - /// Depth value to clear with. - depth: Option, - /// Stencil value to clear with. - stencil: Option, - }, -} +use crate::pso; +use std::fmt; + +/// A clear color union, which can be either f32, i32, or u32. +#[repr(C)] +#[derive(Clone, Copy)] +pub union ClearColor { + /// `f32` variant + pub float32: [f32; 4], + /// `i32` variant + pub sint32: [i32; 4], + /// `u32` variant + pub uint32: [u32; 4], +} + +impl fmt::Debug for ClearColor { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + writeln![f, "ClearColor"] + } +} + +/// A combination of depth and stencil clear values. +#[repr(C)] +#[derive(Clone, Copy, Debug)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct ClearDepthStencil { + /// Depth value + pub depth: f32, + /// Stencil value + pub stencil: u32, +} + +/// A set of clear values for a single attachment. +#[repr(C)] +#[derive(Clone, Copy)] +pub union ClearValue { + /// Clear color + pub color: ClearColor, + /// Clear depth and stencil + pub depth_stencil: ClearDepthStencil, + _align: [u32; 4], +} + +impl fmt::Debug for ClearValue { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("ClearValue") + .field("color", unsafe { &self.color.uint32 }) + .field("depth_stencil", unsafe { &self.depth_stencil }) + .finish() + } +} + +/// Attachment clear description for the current subpass. +#[derive(Clone, Copy, Debug)] +pub enum AttachmentClear { + /// Clear color attachment. + Color { + /// Index inside the `SubpassDesc::colors` array. + index: usize, + /// Value to clear with. + value: ClearColor, + }, + /// Clear depth-stencil attachment. + DepthStencil { + /// Depth value to clear with. + depth: Option, + /// Stencil value to clear with. + stencil: Option, + }, +} diff --git a/third_party/rust/gfx-hal/src/command/mod.rs b/third_party/rust/gfx-hal/src/command/mod.rs old mode 100755 new mode 100644 index 251f1d38b5f3..bf7c4362ab75 --- a/third_party/rust/gfx-hal/src/command/mod.rs +++ b/third_party/rust/gfx-hal/src/command/mod.rs @@ -1,564 +1,568 @@ -//! Command buffers. -//! -//! A command buffer collects a list of commands to be submitted to the device. -//! Each command buffer has specific capabilities for graphics, compute or transfer operations, -//! and can be either a "primary" command buffer or a "secondary" command buffer. Operations -//! always start from a primary command buffer, but a primary command buffer can contain calls -//! to secondary command buffers that contain snippets of commands that do specific things, similar -//! to function calls. -//! -//! All the possible commands are implemented in the `RawCommandBuffer` trait, and then the `CommandBuffer` -//! and related types make a generic, strongly-typed wrapper around it that only expose the methods that -//! are valid for the capabilities it provides. - -// TODO: Document pipelines and subpasses better. - -mod clear; -mod structs; - -use std::any::Any; -use std::borrow::Borrow; -use std::fmt; -use std::ops::Range; - -use crate::image::{Filter, Layout, SubresourceRange}; -use crate::memory::{Barrier, Dependencies}; -use crate::range::RangeArg; -use crate::{buffer, pass, pso, query}; -use crate::{ - Backend, - DrawCount, - IndexCount, - InstanceCount, - VertexCount, - VertexOffset, - WorkGroupCount, -}; - -pub use self::clear::*; -pub use self::structs::*; - - -/// Offset for dynamic descriptors. -pub type DescriptorSetOffset = u32; - -bitflags! { - /// Option flags for various command buffer settings. - #[derive(Default)] - pub struct CommandBufferFlags: u32 { - // TODO: Remove once 'const fn' is stabilized: https://github.com/rust-lang/rust/issues/24111 - /// No flags. - const EMPTY = 0x0; - - /// Says that the command buffer will be recorded, submitted only once, and then reset and re-filled - /// for another submission. - const ONE_TIME_SUBMIT = 0x1; - - /// If set on a secondary command buffer, it says the command buffer takes place entirely inside - /// a render pass. Ignored on primary command buffer. - const RENDER_PASS_CONTINUE = 0x2; - - /// Says that a command buffer can be recorded into multiple primary command buffers, - /// and submitted to a queue while it is still pending. - const SIMULTANEOUS_USE = 0x4; - } -} - -/// An enum that indicates at runtime whether a command buffer -/// is primary or secondary, similar to what `command::Primary` -/// and `command::Secondary` do at compile-time. -#[allow(missing_docs)] -#[derive(Clone, Copy, Debug, PartialEq)] -pub enum Level { - Primary, - Secondary, -} - -/// Specifies how commands for the following renderpasses will be recorded. -#[derive(Debug)] -pub enum SubpassContents { - /// Contents of the subpass will be inline in the command buffer, - /// NOT in secondary command buffers. - Inline, - /// Contents of the subpass will be in secondary command buffers, and - /// the primary command buffer will only contain `execute_command()` calls - /// until the subpass or render pass is complete. - SecondaryBuffers, -} - -#[allow(missing_docs)] -#[derive(Debug)] -pub struct CommandBufferInheritanceInfo<'a, B: Backend> { - pub subpass: Option>, - pub framebuffer: Option<&'a B::Framebuffer>, - pub occlusion_query_enable: bool, - pub occlusion_query_flags: query::ControlFlags, - pub pipeline_statistics: query::PipelineStatistic, -} - -impl<'a, B: Backend> Default for CommandBufferInheritanceInfo<'a, B> { - fn default() -> Self { - CommandBufferInheritanceInfo { - subpass: None, - framebuffer: None, - occlusion_query_enable: false, - occlusion_query_flags: query::ControlFlags::empty(), - pipeline_statistics: query::PipelineStatistic::empty(), - } - } -} - -/// A trait that describes all the operations that must be -/// provided by a `Backend`'s command buffer. -pub trait CommandBuffer: fmt::Debug + Any + Send + Sync { - /// Begins recording commands to a command buffer. - unsafe fn begin( - &mut self, - flags: CommandBufferFlags, - inheritance_info: CommandBufferInheritanceInfo, - ); - - /// Begins recording a primary command buffer - /// (that has no inheritance information). - unsafe fn begin_primary(&mut self, flags: CommandBufferFlags) { - self.begin(flags, CommandBufferInheritanceInfo::default()); - } - - /// Finish recording commands to a command buffer. - unsafe fn finish(&mut self); - - /// Empties the command buffer, optionally releasing all - /// resources from the commands that have been submitted. - unsafe fn reset(&mut self, release_resources: bool); - - // TODO: This REALLY needs to be deeper, but it's complicated. - // Should probably be a whole book chapter on synchronization and stuff really. - /// Inserts a synchronization dependency between pipeline stages - /// in the command buffer. - unsafe fn pipeline_barrier<'a, T>( - &mut self, - stages: Range, - dependencies: Dependencies, - barriers: T, - ) where - T: IntoIterator, - T::Item: Borrow>; - - /// Fill a buffer with the given `u32` value. - unsafe fn fill_buffer(&mut self, buffer: &B::Buffer, range: R, data: u32) - where - R: RangeArg; - - /// Copy data from the given slice into a buffer. - unsafe fn update_buffer(&mut self, buffer: &B::Buffer, offset: buffer::Offset, data: &[u8]); - - /// Clears an image to the given color/depth/stencil. - unsafe fn clear_image( - &mut self, - image: &B::Image, - layout: Layout, - value: ClearValue, - subresource_ranges: T, - ) where - T: IntoIterator, - T::Item: Borrow; - - /// Takes an iterator of attachments and an iterator of rect's, - /// and clears the given rect's for *each* attachment. - unsafe fn clear_attachments(&mut self, clears: T, rects: U) - where - T: IntoIterator, - T::Item: Borrow, - U: IntoIterator, - U::Item: Borrow; - - /// "Resolves" a multisampled image, converting it into a non-multisampled - /// image. Takes an iterator of regions to apply the resolution to. - unsafe fn resolve_image( - &mut self, - src: &B::Image, - src_layout: Layout, - dst: &B::Image, - dst_layout: Layout, - regions: T, - ) where - T: IntoIterator, - T::Item: Borrow; - - /// Copies regions from the source to destination image, - /// applying scaling, filtering and potentially format conversion. - unsafe fn blit_image( - &mut self, - src: &B::Image, - src_layout: Layout, - dst: &B::Image, - dst_layout: Layout, - filter: Filter, - regions: T, - ) where - T: IntoIterator, - T::Item: Borrow; - - /// Bind the index buffer view, making it the "current" one that draw commands - /// will operate on. - unsafe fn bind_index_buffer(&mut self, view: buffer::IndexBufferView); - - /// Bind the vertex buffer set, making it the "current" one that draw commands - /// will operate on. - /// - /// Each buffer passed corresponds to the vertex input binding with the same index, - /// starting from an offset index `first_binding`. For example an iterator with - /// two items and `first_binding` of 1 would fill vertex buffer binding numbers - /// 1 and 2. - /// - /// This binding number refers only to binding points for vertex buffers and is - /// completely separate from the binding numbers of `Descriptor`s in `DescriptorSet`s. - /// It needs to match with the `VertexBufferDesc` and `AttributeDesc`s to which the - /// data from each bound vertex buffer should flow. - /// - /// The `buffers` iterator should yield the `Buffer` to bind, as well as an - /// offset, in bytes, into that buffer where the vertex data that should be bound - /// starts. - unsafe fn bind_vertex_buffers(&mut self, first_binding: pso::BufferIndex, buffers: I) - where - I: IntoIterator, - T: Borrow; - - /// Set the viewport parameters for the rasterizer. - /// - /// Each viewport passed corresponds to the viewport with the same index, - /// starting from an offset index `first_viewport`. - /// - /// # Errors - /// - /// This function does not return an error. Invalid usage of this function - /// will result in undefined behavior. - /// - /// - Command buffer must be in recording state. - /// - Number of viewports must be between 1 and `max_viewports - first_viewport`. - /// - The first viewport must be less than `max_viewports`. - /// - Only queues with graphics capability support this function. - /// - The bound pipeline must not have baked viewport state. - /// - All viewports used by the pipeline must be specified before the first - /// draw call. - unsafe fn set_viewports(&mut self, first_viewport: u32, viewports: T) - where - T: IntoIterator, - T::Item: Borrow; - - /// Set the scissor rectangles for the rasterizer. - /// - /// Each scissor corresponds to the viewport with the same index, starting - /// from an offset index `first_scissor`. - /// - /// # Errors - /// - /// This function does not return an error. Invalid usage of this function - /// will result in undefined behavior. - /// - /// - Command buffer must be in recording state. - /// - Number of scissors must be between 1 and `max_viewports - first_scissor`. - /// - The first scissor must be less than `max_viewports`. - /// - Only queues with graphics capability support this function. - /// - The bound pipeline must not have baked scissor state. - /// - All scissors used by the pipeline must be specified before the first draw - /// call. - unsafe fn set_scissors(&mut self, first_scissor: u32, rects: T) - where - T: IntoIterator, - T::Item: Borrow; - - /// Sets the stencil reference value for comparison operations and store operations. - /// Will be used on the LHS of stencil compare ops and as store value when the - /// store op is Reference. - unsafe fn set_stencil_reference(&mut self, faces: pso::Face, value: pso::StencilValue); - - /// Sets the stencil read mask. - unsafe fn set_stencil_read_mask(&mut self, faces: pso::Face, value: pso::StencilValue); - - /// Sets the stencil write mask. - unsafe fn set_stencil_write_mask(&mut self, faces: pso::Face, value: pso::StencilValue); - - /// Set the blend constant values dynamically. - unsafe fn set_blend_constants(&mut self, color: pso::ColorValue); - - /// Set the depth bounds test values dynamically. - unsafe fn set_depth_bounds(&mut self, bounds: Range); - - /// Set the line width dynamically. - unsafe fn set_line_width(&mut self, width: f32); - - /// Set the depth bias dynamically. - unsafe fn set_depth_bias(&mut self, depth_bias: pso::DepthBias); - - /// Begins recording commands for a render pass on the given framebuffer. - /// `render_area` is the section of the framebuffer to render, - /// `clear_values` is an iterator of `ClearValueRaw`'s to use to use for - /// `clear_*` commands, one for each attachment of the render pass - /// that has a clear operation. - /// `first_subpass` specifies, for the first subpass, whether the - /// rendering commands are provided inline or whether the render - /// pass is composed of subpasses. - unsafe fn begin_render_pass( - &mut self, - render_pass: &B::RenderPass, - framebuffer: &B::Framebuffer, - render_area: pso::Rect, - clear_values: T, - first_subpass: SubpassContents, - ) where - T: IntoIterator, - T::Item: Borrow; - - /// Steps to the next subpass in the current render pass. - unsafe fn next_subpass(&mut self, contents: SubpassContents); - - /// Finishes recording commands for the current a render pass. - unsafe fn end_render_pass(&mut self); - - /// Bind a graphics pipeline. - /// - /// # Errors - /// - /// This function does not return an error. Invalid usage of this function - /// will result in an error on `finish`. - /// - /// - Command buffer must be in recording state. - /// - Only queues with graphics capability support this function. - unsafe fn bind_graphics_pipeline(&mut self, pipeline: &B::GraphicsPipeline); - - /// Takes an iterator of graphics `DescriptorSet`'s, and binds them to the command buffer. - /// `first_set` is the index that the first descriptor is mapped to in the command buffer. - unsafe fn bind_graphics_descriptor_sets( - &mut self, - layout: &B::PipelineLayout, - first_set: usize, - sets: I, - offsets: J, - ) where - I: IntoIterator, - I::Item: Borrow, - J: IntoIterator, - J::Item: Borrow; - - /// Bind a compute pipeline. - /// - /// # Errors - /// - /// This function does not return an error. Invalid usage of this function - /// will result in an error on `finish`. - /// - /// - Command buffer must be in recording state. - /// - Only queues with compute capability support this function. - unsafe fn bind_compute_pipeline(&mut self, pipeline: &B::ComputePipeline); - - /// Takes an iterator of compute `DescriptorSet`'s, and binds them to the command buffer, - /// `first_set` is the index that the first descriptor is mapped to in the command buffer. - unsafe fn bind_compute_descriptor_sets( - &mut self, - layout: &B::PipelineLayout, - first_set: usize, - sets: I, - offsets: J, - ) where - I: IntoIterator, - I::Item: Borrow, - J: IntoIterator, - J::Item: Borrow; - - /// Execute a workgroup in the compute pipeline. `x`, `y` and `z` are the - /// number of local workgroups to dispatch along each "axis"; a total of `x`*`y`*`z` - /// local workgroups will be created. - /// - /// # Errors - /// - /// This function does not return an error. Invalid usage of this function - /// will result in an error on `finish`. - /// - /// - Command buffer must be in recording state. - /// - A compute pipeline must be bound using `bind_compute_pipeline`. - /// - Only queues with compute capability support this function. - /// - This function must be called outside of a render pass. - /// - `count` must be less than or equal to `Limits::max_compute_work_group_count` - /// - /// TODO: - unsafe fn dispatch(&mut self, count: WorkGroupCount); - - /// Works similarly to `dispatch()` but reads parameters from the given - /// buffer during execution. - unsafe fn dispatch_indirect(&mut self, buffer: &B::Buffer, offset: buffer::Offset); - - /// Adds a command to copy regions from the source to destination buffer. - unsafe fn copy_buffer(&mut self, src: &B::Buffer, dst: &B::Buffer, regions: T) - where - T: IntoIterator, - T::Item: Borrow; - - /// Copies regions from the source to the destination images, which - /// have the given layouts. No format conversion is done; the source and destination - /// `Layout`'s **must** have the same sized image formats (such as `Rgba8Unorm` and - /// `R32`, both of which are 32 bits). - unsafe fn copy_image( - &mut self, - src: &B::Image, - src_layout: Layout, - dst: &B::Image, - dst_layout: Layout, - regions: T, - ) where - T: IntoIterator, - T::Item: Borrow; - - /// Copies regions from the source buffer to the destination image. - unsafe fn copy_buffer_to_image( - &mut self, - src: &B::Buffer, - dst: &B::Image, - dst_layout: Layout, - regions: T, - ) where - T: IntoIterator, - T::Item: Borrow; - - /// Copies regions from the source image to the destination buffer. - unsafe fn copy_image_to_buffer( - &mut self, - src: &B::Image, - src_layout: Layout, - dst: &B::Buffer, - regions: T, - ) where - T: IntoIterator, - T::Item: Borrow; - - // TODO: This explanation needs improvement. - /// Performs a non-indexed drawing operation, fetching vertex attributes - /// from the currently bound vertex buffers. It performs instanced - /// drawing, drawing `instances.len()` - /// times with an `instanceIndex` starting with the start of the range. - unsafe fn draw(&mut self, vertices: Range, instances: Range); - - /// Performs indexed drawing, drawing the range of indices - /// given by the current index buffer and any bound vertex buffers. - /// `base_vertex` specifies the vertex offset corresponding to index 0. - /// That is, the offset into the vertex buffer is `(current_index + base_vertex)` - /// - /// It also performs instanced drawing, identical to `draw()`. - unsafe fn draw_indexed( - &mut self, - indices: Range, - base_vertex: VertexOffset, - instances: Range, - ); - - /// Functions identically to `draw()`, except the parameters are read - /// from the given buffer, starting at `offset` and increasing `stride` - /// bytes with each successive draw. Performs `draw_count` draws total. - /// `draw_count` may be zero. - /// - /// Each draw command in the buffer is a series of 4 `u32` values specifying, - /// in order, the number of vertices to draw, the number of instances to draw, - /// the index of the first vertex to draw, and the instance ID of the first - /// instance to draw. - unsafe fn draw_indirect( - &mut self, - buffer: &B::Buffer, - offset: buffer::Offset, - draw_count: DrawCount, - stride: u32, - ); - - /// Like `draw_indirect()`, this does indexed drawing a la `draw_indexed()` but - /// reads the draw parameters out of the given buffer. - /// - /// Each draw command in the buffer is a series of 5 values specifying, - /// in order, the number of indices, the number of instances, the first index, - /// the vertex offset, and the first instance. All are `u32`'s except - /// the vertex offset, which is an `i32`. - unsafe fn draw_indexed_indirect( - &mut self, - buffer: &B::Buffer, - offset: buffer::Offset, - draw_count: DrawCount, - stride: u32, - ); - - /// Signals an event once all specified stages of the shader pipeline have completed. - unsafe fn set_event(&mut self, event: &B::Event, stages: pso::PipelineStage); - - /// Resets an event once all specified stages of the shader pipeline have completed. - unsafe fn reset_event(&mut self, event: &B::Event, stages: pso::PipelineStage); - - /// Waits at some shader stage(s) until all events have been signalled. - /// - /// - `src_stages` specifies the shader pipeline stages in which the events were signalled. - /// - `dst_stages` specifies the shader pipeline stages at which execution should wait. - /// - `barriers` specifies a series of memory barriers to be executed before pipeline execution - /// resumes. - unsafe fn wait_events<'a, I, J>( - &mut self, - events: I, - stages: Range, - barriers: J, - ) where - I: IntoIterator, - I::Item: Borrow, - J: IntoIterator, - J::Item: Borrow>; - - /// Begins a query operation. Queries count operations or record timestamps - /// resulting from commands that occur between the beginning and end of the query, - /// and save the results to the query pool. - unsafe fn begin_query(&mut self, query: query::Query, flags: query::ControlFlags); - - /// End a query. - unsafe fn end_query(&mut self, query: query::Query); - - /// Reset/clear the values in the given range of the query pool. - unsafe fn reset_query_pool(&mut self, pool: &B::QueryPool, queries: Range); - - /// Copy query results into a buffer. - unsafe fn copy_query_pool_results( - &mut self, - pool: &B::QueryPool, - queries: Range, - buffer: &B::Buffer, - offset: buffer::Offset, - stride: buffer::Offset, - flags: query::ResultFlags, - ); - - /// Requests a timestamp to be written. - unsafe fn write_timestamp(&mut self, stage: pso::PipelineStage, query: query::Query); - - /// Modify constant data in a graphics pipeline. Push constants are intended to modify data in a - /// pipeline more quickly than a updating the values inside a descriptor set. - /// - /// Push constants must be aligned to 4 bytes, and to guarantee alignment, this function takes a - /// `&[u32]` instead of a `&[u8]`. Note that the offset is still specified in units of bytes. - unsafe fn push_graphics_constants( - &mut self, - layout: &B::PipelineLayout, - stages: pso::ShaderStageFlags, - offset: u32, - constants: &[u32], - ); - - /// Modify constant data in a compute pipeline. Push constants are intended to modify data in a - /// pipeline more quickly than a updating the values inside a descriptor set. - /// - /// Push constants must be aligned to 4 bytes, and to guarantee alignment, this function takes a - /// `&[u32]` instead of a `&[u8]`. Note that the offset is still specified in units of bytes. - unsafe fn push_compute_constants( - &mut self, - layout: &B::PipelineLayout, - offset: u32, - constants: &[u32], - ); - - /// Execute the given secondary command buffers. - unsafe fn execute_commands<'a, T, I>(&mut self, cmd_buffers: I) - where - T: 'a + Borrow, - I: IntoIterator; -} +//! Command buffers. +//! +//! A command buffer collects a list of commands to be submitted to the device. +//! Each command buffer has specific capabilities for graphics, compute or transfer operations, +//! and can be either a "primary" command buffer or a "secondary" command buffer. Operations +//! always start from a primary command buffer, but a primary command buffer can contain calls +//! to secondary command buffers that contain snippets of commands that do specific things, similar +//! to function calls. +//! +//! All the possible commands are implemented in the `RawCommandBuffer` trait, and then the `CommandBuffer` +//! and related types make a generic, strongly-typed wrapper around it that only expose the methods that +//! are valid for the capabilities it provides. + +// TODO: Document pipelines and subpasses better. + +mod clear; +mod structs; + +use std::any::Any; +use std::borrow::Borrow; +use std::fmt; +use std::ops::Range; + +use crate::image::{Filter, Layout, SubresourceRange}; +use crate::memory::{Barrier, Dependencies}; +use crate::{buffer, pass, pso, query}; +use crate::{ + Backend, + DrawCount, + IndexCount, + InstanceCount, + VertexCount, + VertexOffset, + WorkGroupCount, +}; + +pub use self::clear::*; +pub use self::structs::*; + +/// Offset for dynamic descriptors. +pub type DescriptorSetOffset = u32; + +bitflags! { + /// Option flags for various command buffer settings. + #[derive(Default)] + pub struct CommandBufferFlags: u32 { + // TODO: Remove once 'const fn' is stabilized: https://github.com/rust-lang/rust/issues/24111 + /// No flags. + const EMPTY = 0x0; + + /// Says that the command buffer will be recorded, submitted only once, and then reset and re-filled + /// for another submission. + const ONE_TIME_SUBMIT = 0x1; + + /// If set on a secondary command buffer, it says the command buffer takes place entirely inside + /// a render pass. Ignored on primary command buffer. + const RENDER_PASS_CONTINUE = 0x2; + + /// Says that a command buffer can be recorded into multiple primary command buffers, + /// and submitted to a queue while it is still pending. + const SIMULTANEOUS_USE = 0x4; + } +} + +/// An enum that indicates at runtime whether a command buffer +/// is primary or secondary, similar to what `command::Primary` +/// and `command::Secondary` do at compile-time. +#[allow(missing_docs)] +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum Level { + Primary, + Secondary, +} + +/// Specifies how commands for the following renderpasses will be recorded. +#[derive(Debug)] +pub enum SubpassContents { + /// Contents of the subpass will be inline in the command buffer, + /// NOT in secondary command buffers. + Inline, + /// Contents of the subpass will be in secondary command buffers, and + /// the primary command buffer will only contain `execute_command()` calls + /// until the subpass or render pass is complete. + SecondaryBuffers, +} + +#[allow(missing_docs)] +#[derive(Debug)] +pub struct CommandBufferInheritanceInfo<'a, B: Backend> { + pub subpass: Option>, + pub framebuffer: Option<&'a B::Framebuffer>, + pub occlusion_query_enable: bool, + pub occlusion_query_flags: query::ControlFlags, + pub pipeline_statistics: query::PipelineStatistic, +} + +impl<'a, B: Backend> Default for CommandBufferInheritanceInfo<'a, B> { + fn default() -> Self { + CommandBufferInheritanceInfo { + subpass: None, + framebuffer: None, + occlusion_query_enable: false, + occlusion_query_flags: query::ControlFlags::empty(), + pipeline_statistics: query::PipelineStatistic::empty(), + } + } +} + +/// A trait that describes all the operations that must be +/// provided by a `Backend`'s command buffer. +pub trait CommandBuffer: fmt::Debug + Any + Send + Sync { + /// Begins recording commands to a command buffer. + unsafe fn begin( + &mut self, + flags: CommandBufferFlags, + inheritance_info: CommandBufferInheritanceInfo, + ); + + /// Begins recording a primary command buffer + /// (that has no inheritance information). + unsafe fn begin_primary(&mut self, flags: CommandBufferFlags) { + self.begin(flags, CommandBufferInheritanceInfo::default()); + } + + /// Finish recording commands to a command buffer. + unsafe fn finish(&mut self); + + /// Empties the command buffer, optionally releasing all + /// resources from the commands that have been submitted. + unsafe fn reset(&mut self, release_resources: bool); + + // TODO: This REALLY needs to be deeper, but it's complicated. + // Should probably be a whole book chapter on synchronization and stuff really. + /// Inserts a synchronization dependency between pipeline stages + /// in the command buffer. + unsafe fn pipeline_barrier<'a, T>( + &mut self, + stages: Range, + dependencies: Dependencies, + barriers: T, + ) where + T: IntoIterator, + T::Item: Borrow>; + + /// Fill a buffer with the given `u32` value. + unsafe fn fill_buffer(&mut self, buffer: &B::Buffer, range: buffer::SubRange, data: u32); + + /// Copy data from the given slice into a buffer. + unsafe fn update_buffer(&mut self, buffer: &B::Buffer, offset: buffer::Offset, data: &[u8]); + + /// Clears an image to the given color/depth/stencil. + unsafe fn clear_image( + &mut self, + image: &B::Image, + layout: Layout, + value: ClearValue, + subresource_ranges: T, + ) where + T: IntoIterator, + T::Item: Borrow; + + /// Takes an iterator of attachments and an iterator of rect's, + /// and clears the given rect's for *each* attachment. + unsafe fn clear_attachments(&mut self, clears: T, rects: U) + where + T: IntoIterator, + T::Item: Borrow, + U: IntoIterator, + U::Item: Borrow; + + /// "Resolves" a multisampled image, converting it into a non-multisampled + /// image. Takes an iterator of regions to apply the resolution to. + unsafe fn resolve_image( + &mut self, + src: &B::Image, + src_layout: Layout, + dst: &B::Image, + dst_layout: Layout, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow; + + /// Copies regions from the source to destination image, + /// applying scaling, filtering and potentially format conversion. + unsafe fn blit_image( + &mut self, + src: &B::Image, + src_layout: Layout, + dst: &B::Image, + dst_layout: Layout, + filter: Filter, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow; + + /// Bind the index buffer view, making it the "current" one that draw commands + /// will operate on. + unsafe fn bind_index_buffer(&mut self, view: buffer::IndexBufferView); + + /// Bind the vertex buffer set, making it the "current" one that draw commands + /// will operate on. + /// + /// Each buffer passed corresponds to the vertex input binding with the same index, + /// starting from an offset index `first_binding`. For example an iterator with + /// two items and `first_binding` of 1 would fill vertex buffer binding numbers + /// 1 and 2. + /// + /// This binding number refers only to binding points for vertex buffers and is + /// completely separate from the binding numbers of `Descriptor`s in `DescriptorSet`s. + /// It needs to match with the `VertexBufferDesc` and `AttributeDesc`s to which the + /// data from each bound vertex buffer should flow. + /// + /// The `buffers` iterator should yield the `Buffer` to bind, as well as a subrange, + /// in bytes, into that buffer where the vertex data that should be bound. + unsafe fn bind_vertex_buffers(&mut self, first_binding: pso::BufferIndex, buffers: I) + where + I: IntoIterator, + T: Borrow; + + /// Set the viewport parameters for the rasterizer. + /// + /// Each viewport passed corresponds to the viewport with the same index, + /// starting from an offset index `first_viewport`. + /// + /// # Errors + /// + /// This function does not return an error. Invalid usage of this function + /// will result in undefined behavior. + /// + /// - Command buffer must be in recording state. + /// - Number of viewports must be between 1 and `max_viewports - first_viewport`. + /// - The first viewport must be less than `max_viewports`. + /// - Only queues with graphics capability support this function. + /// - The bound pipeline must not have baked viewport state. + /// - All viewports used by the pipeline must be specified before the first + /// draw call. + unsafe fn set_viewports(&mut self, first_viewport: u32, viewports: T) + where + T: IntoIterator, + T::Item: Borrow; + + /// Set the scissor rectangles for the rasterizer. + /// + /// Each scissor corresponds to the viewport with the same index, starting + /// from an offset index `first_scissor`. + /// + /// # Errors + /// + /// This function does not return an error. Invalid usage of this function + /// will result in undefined behavior. + /// + /// - Command buffer must be in recording state. + /// - Number of scissors must be between 1 and `max_viewports - first_scissor`. + /// - The first scissor must be less than `max_viewports`. + /// - Only queues with graphics capability support this function. + /// - The bound pipeline must not have baked scissor state. + /// - All scissors used by the pipeline must be specified before the first draw + /// call. + unsafe fn set_scissors(&mut self, first_scissor: u32, rects: T) + where + T: IntoIterator, + T::Item: Borrow; + + /// Sets the stencil reference value for comparison operations and store operations. + /// Will be used on the LHS of stencil compare ops and as store value when the + /// store op is Reference. + unsafe fn set_stencil_reference(&mut self, faces: pso::Face, value: pso::StencilValue); + + /// Sets the stencil read mask. + unsafe fn set_stencil_read_mask(&mut self, faces: pso::Face, value: pso::StencilValue); + + /// Sets the stencil write mask. + unsafe fn set_stencil_write_mask(&mut self, faces: pso::Face, value: pso::StencilValue); + + /// Set the blend constant values dynamically. + unsafe fn set_blend_constants(&mut self, color: pso::ColorValue); + + /// Set the depth bounds test values dynamically. + unsafe fn set_depth_bounds(&mut self, bounds: Range); + + /// Set the line width dynamically. + /// + /// Only valid to call if `Features::LINE_WIDTH` is enabled. + unsafe fn set_line_width(&mut self, width: f32); + + /// Set the depth bias dynamically. + unsafe fn set_depth_bias(&mut self, depth_bias: pso::DepthBias); + + /// Begins recording commands for a render pass on the given framebuffer. + /// `render_area` is the section of the framebuffer to render, + /// `clear_values` is an iterator of `ClearValueRaw`'s to use to use for + /// `clear_*` commands, one for each attachment of the render pass + /// that has a clear operation. + /// `first_subpass` specifies, for the first subpass, whether the + /// rendering commands are provided inline or whether the render + /// pass is composed of subpasses. + unsafe fn begin_render_pass( + &mut self, + render_pass: &B::RenderPass, + framebuffer: &B::Framebuffer, + render_area: pso::Rect, + clear_values: T, + first_subpass: SubpassContents, + ) where + T: IntoIterator, + T::Item: Borrow; + + /// Steps to the next subpass in the current render pass. + unsafe fn next_subpass(&mut self, contents: SubpassContents); + + /// Finishes recording commands for the current a render pass. + unsafe fn end_render_pass(&mut self); + + /// Bind a graphics pipeline. + /// + /// # Errors + /// + /// This function does not return an error. Invalid usage of this function + /// will result in an error on `finish`. + /// + /// - Command buffer must be in recording state. + /// - Only queues with graphics capability support this function. + unsafe fn bind_graphics_pipeline(&mut self, pipeline: &B::GraphicsPipeline); + + /// Takes an iterator of graphics `DescriptorSet`'s, and binds them to the command buffer. + /// `first_set` is the index that the first descriptor is mapped to in the command buffer. + unsafe fn bind_graphics_descriptor_sets( + &mut self, + layout: &B::PipelineLayout, + first_set: usize, + sets: I, + offsets: J, + ) where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow; + + /// Bind a compute pipeline. + /// + /// # Errors + /// + /// This function does not return an error. Invalid usage of this function + /// will result in an error on `finish`. + /// + /// - Command buffer must be in recording state. + /// - Only queues with compute capability support this function. + unsafe fn bind_compute_pipeline(&mut self, pipeline: &B::ComputePipeline); + + /// Takes an iterator of compute `DescriptorSet`'s, and binds them to the command buffer, + /// `first_set` is the index that the first descriptor is mapped to in the command buffer. + unsafe fn bind_compute_descriptor_sets( + &mut self, + layout: &B::PipelineLayout, + first_set: usize, + sets: I, + offsets: J, + ) where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow; + + /// Execute a workgroup in the compute pipeline. `x`, `y` and `z` are the + /// number of local workgroups to dispatch along each "axis"; a total of `x`*`y`*`z` + /// local workgroups will be created. + /// + /// # Errors + /// + /// This function does not return an error. Invalid usage of this function + /// will result in an error on `finish`. + /// + /// - Command buffer must be in recording state. + /// - A compute pipeline must be bound using `bind_compute_pipeline`. + /// - Only queues with compute capability support this function. + /// - This function must be called outside of a render pass. + /// - `count` must be less than or equal to `Limits::max_compute_work_group_count` + /// + /// TODO: + unsafe fn dispatch(&mut self, count: WorkGroupCount); + + /// Works similarly to `dispatch()` but reads parameters from the given + /// buffer during execution. + unsafe fn dispatch_indirect(&mut self, buffer: &B::Buffer, offset: buffer::Offset); + + /// Adds a command to copy regions from the source to destination buffer. + unsafe fn copy_buffer(&mut self, src: &B::Buffer, dst: &B::Buffer, regions: T) + where + T: IntoIterator, + T::Item: Borrow; + + /// Copies regions from the source to the destination images, which + /// have the given layouts. No format conversion is done; the source and destination + /// `Layout`'s **must** have the same sized image formats (such as `Rgba8Unorm` and + /// `R32`, both of which are 32 bits). + unsafe fn copy_image( + &mut self, + src: &B::Image, + src_layout: Layout, + dst: &B::Image, + dst_layout: Layout, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow; + + /// Copies regions from the source buffer to the destination image. + unsafe fn copy_buffer_to_image( + &mut self, + src: &B::Buffer, + dst: &B::Image, + dst_layout: Layout, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow; + + /// Copies regions from the source image to the destination buffer. + unsafe fn copy_image_to_buffer( + &mut self, + src: &B::Image, + src_layout: Layout, + dst: &B::Buffer, + regions: T, + ) where + T: IntoIterator, + T::Item: Borrow; + + // TODO: This explanation needs improvement. + /// Performs a non-indexed drawing operation, fetching vertex attributes + /// from the currently bound vertex buffers. It performs instanced + /// drawing, drawing `instances.len()` + /// times with an `instanceIndex` starting with the start of the range. + unsafe fn draw(&mut self, vertices: Range, instances: Range); + + /// Performs indexed drawing, drawing the range of indices + /// given by the current index buffer and any bound vertex buffers. + /// `base_vertex` specifies the vertex offset corresponding to index 0. + /// That is, the offset into the vertex buffer is `(current_index + base_vertex)` + /// + /// It also performs instanced drawing, identical to `draw()`. + unsafe fn draw_indexed( + &mut self, + indices: Range, + base_vertex: VertexOffset, + instances: Range, + ); + + /// Functions identically to `draw()`, except the parameters are read + /// from the given buffer, starting at `offset` and increasing `stride` + /// bytes with each successive draw. Performs `draw_count` draws total. + /// `draw_count` may be zero. + /// + /// Each draw command in the buffer is a series of 4 `u32` values specifying, + /// in order, the number of vertices to draw, the number of instances to draw, + /// the index of the first vertex to draw, and the instance ID of the first + /// instance to draw. + unsafe fn draw_indirect( + &mut self, + buffer: &B::Buffer, + offset: buffer::Offset, + draw_count: DrawCount, + stride: u32, + ); + + /// Like `draw_indirect()`, this does indexed drawing a la `draw_indexed()` but + /// reads the draw parameters out of the given buffer. + /// + /// Each draw command in the buffer is a series of 5 values specifying, + /// in order, the number of indices, the number of instances, the first index, + /// the vertex offset, and the first instance. All are `u32`'s except + /// the vertex offset, which is an `i32`. + unsafe fn draw_indexed_indirect( + &mut self, + buffer: &B::Buffer, + offset: buffer::Offset, + draw_count: DrawCount, + stride: u32, + ); + + /// Signals an event once all specified stages of the shader pipeline have completed. + unsafe fn set_event(&mut self, event: &B::Event, stages: pso::PipelineStage); + + /// Resets an event once all specified stages of the shader pipeline have completed. + unsafe fn reset_event(&mut self, event: &B::Event, stages: pso::PipelineStage); + + /// Waits at some shader stage(s) until all events have been signalled. + /// + /// - `src_stages` specifies the shader pipeline stages in which the events were signalled. + /// - `dst_stages` specifies the shader pipeline stages at which execution should wait. + /// - `barriers` specifies a series of memory barriers to be executed before pipeline execution + /// resumes. + unsafe fn wait_events<'a, I, J>( + &mut self, + events: I, + stages: Range, + barriers: J, + ) where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow>; + + /// Begins a query operation. Queries count operations or record timestamps + /// resulting from commands that occur between the beginning and end of the query, + /// and save the results to the query pool. + unsafe fn begin_query(&mut self, query: query::Query, flags: query::ControlFlags); + + /// End a query. + unsafe fn end_query(&mut self, query: query::Query); + + /// Reset/clear the values in the given range of the query pool. + unsafe fn reset_query_pool(&mut self, pool: &B::QueryPool, queries: Range); + + /// Copy query results into a buffer. + unsafe fn copy_query_pool_results( + &mut self, + pool: &B::QueryPool, + queries: Range, + buffer: &B::Buffer, + offset: buffer::Offset, + stride: buffer::Offset, + flags: query::ResultFlags, + ); + + /// Requests a timestamp to be written. + unsafe fn write_timestamp(&mut self, stage: pso::PipelineStage, query: query::Query); + + /// Modify constant data in a graphics pipeline. Push constants are intended to modify data in a + /// pipeline more quickly than a updating the values inside a descriptor set. + /// + /// Push constants must be aligned to 4 bytes, and to guarantee alignment, this function takes a + /// `&[u32]` instead of a `&[u8]`. Note that the offset is still specified in units of bytes. + unsafe fn push_graphics_constants( + &mut self, + layout: &B::PipelineLayout, + stages: pso::ShaderStageFlags, + offset: u32, + constants: &[u32], + ); + + /// Modify constant data in a compute pipeline. Push constants are intended to modify data in a + /// pipeline more quickly than a updating the values inside a descriptor set. + /// + /// Push constants must be aligned to 4 bytes, and to guarantee alignment, this function takes a + /// `&[u32]` instead of a `&[u8]`. Note that the offset is still specified in units of bytes. + unsafe fn push_compute_constants( + &mut self, + layout: &B::PipelineLayout, + offset: u32, + constants: &[u32], + ); + + /// Execute the given secondary command buffers. + unsafe fn execute_commands<'a, T, I>(&mut self, cmd_buffers: I) + where + T: 'a + Borrow, + I: IntoIterator; + + /// Debug mark the current spot in the command buffer. + unsafe fn insert_debug_marker(&mut self, name: &str, color: u32); + /// Start a debug marker at the current place in the command buffer. + unsafe fn begin_debug_marker(&mut self, name: &str, color: u32); + /// End the last started debug marker scope. + unsafe fn end_debug_marker(&mut self); +} diff --git a/third_party/rust/gfx-hal/src/command/structs.rs b/third_party/rust/gfx-hal/src/command/structs.rs old mode 100755 new mode 100644 index d3c412f8c442..3d42a0df2080 --- a/third_party/rust/gfx-hal/src/command/structs.rs +++ b/third_party/rust/gfx-hal/src/command/structs.rs @@ -1,86 +1,86 @@ -use crate::{buffer, image}; - -use std::ops::Range; - -/// Specifies a source region and a destination -/// region in a buffer for copying. All values -/// are in units of bytes. -#[derive(Clone, Copy, Debug)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct BufferCopy { - /// Buffer region source offset. - pub src: buffer::Offset, - /// Buffer region destination offset. - pub dst: buffer::Offset, - /// Region size. - pub size: buffer::Offset, -} - -/// Bundles together all the parameters needed to copy data from one `Image` -/// to another. -#[derive(Clone, Debug)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct ImageCopy { - /// The image subresource to copy from. - pub src_subresource: image::SubresourceLayers, - /// The source offset. - pub src_offset: image::Offset, - /// The image subresource to copy to. - pub dst_subresource: image::SubresourceLayers, - /// The destination offset. - pub dst_offset: image::Offset, - /// The extent of the region to copy. - pub extent: image::Extent, -} - -/// Bundles together all the parameters needed to copy a buffer -/// to an image or vice-versa. -#[derive(Clone, Debug)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct BufferImageCopy { - /// Buffer offset in bytes. - pub buffer_offset: buffer::Offset, - /// Width of a buffer 'row' in texels. - pub buffer_width: u32, - /// Height of a buffer 'image slice' in texels. - pub buffer_height: u32, - /// The image subresource. - pub image_layers: image::SubresourceLayers, - /// The offset of the portion of the image to copy. - pub image_offset: image::Offset, - /// Size of the portion of the image to copy. - pub image_extent: image::Extent, -} - -/// Parameters for an image resolve operation, -/// where a multi-sampled image is copied into a single-sampled -/// image. -#[derive(Clone, Debug)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct ImageResolve { - /// Source image and layers. - pub src_subresource: image::SubresourceLayers, - /// Source image offset. - pub src_offset: image::Offset, - /// Destination image and layers. - pub dst_subresource: image::SubresourceLayers, - /// Destination image offset. - pub dst_offset: image::Offset, - /// Image extent. - pub extent: image::Extent, -} - -/// Parameters for an image blit operation, where a portion of one image -/// is copied into another, possibly with scaling and filtering. -#[derive(Clone, Debug)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct ImageBlit { - /// Source image and layers. - pub src_subresource: image::SubresourceLayers, - /// Source image bounds. - pub src_bounds: Range, - /// Destination image and layers. - pub dst_subresource: image::SubresourceLayers, - /// Destination image bounds. - pub dst_bounds: Range, -} +use crate::{buffer, image}; + +use std::ops::Range; + +/// Specifies a source region and a destination +/// region in a buffer for copying. All values +/// are in units of bytes. +#[derive(Clone, Copy, Debug)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct BufferCopy { + /// Buffer region source offset. + pub src: buffer::Offset, + /// Buffer region destination offset. + pub dst: buffer::Offset, + /// Region size. + pub size: buffer::Offset, +} + +/// Bundles together all the parameters needed to copy data from one `Image` +/// to another. +#[derive(Clone, Debug)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct ImageCopy { + /// The image subresource to copy from. + pub src_subresource: image::SubresourceLayers, + /// The source offset. + pub src_offset: image::Offset, + /// The image subresource to copy to. + pub dst_subresource: image::SubresourceLayers, + /// The destination offset. + pub dst_offset: image::Offset, + /// The extent of the region to copy. + pub extent: image::Extent, +} + +/// Bundles together all the parameters needed to copy a buffer +/// to an image or vice-versa. +#[derive(Clone, Debug)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct BufferImageCopy { + /// Buffer offset in bytes. + pub buffer_offset: buffer::Offset, + /// Width of a buffer 'row' in texels. + pub buffer_width: u32, + /// Height of a buffer 'image slice' in texels. + pub buffer_height: u32, + /// The image subresource. + pub image_layers: image::SubresourceLayers, + /// The offset of the portion of the image to copy. + pub image_offset: image::Offset, + /// Size of the portion of the image to copy. + pub image_extent: image::Extent, +} + +/// Parameters for an image resolve operation, +/// where a multi-sampled image is copied into a single-sampled +/// image. +#[derive(Clone, Debug)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct ImageResolve { + /// Source image and layers. + pub src_subresource: image::SubresourceLayers, + /// Source image offset. + pub src_offset: image::Offset, + /// Destination image and layers. + pub dst_subresource: image::SubresourceLayers, + /// Destination image offset. + pub dst_offset: image::Offset, + /// Image extent. + pub extent: image::Extent, +} + +/// Parameters for an image blit operation, where a portion of one image +/// is copied into another, possibly with scaling and filtering. +#[derive(Clone, Debug)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct ImageBlit { + /// Source image and layers. + pub src_subresource: image::SubresourceLayers, + /// Source image bounds. + pub src_bounds: Range, + /// Destination image and layers. + pub dst_subresource: image::SubresourceLayers, + /// Destination image bounds. + pub dst_bounds: Range, +} diff --git a/third_party/rust/gfx-hal/src/device.rs b/third_party/rust/gfx-hal/src/device.rs old mode 100755 new mode 100644 index ba57b05ed52d..5bccad563a62 --- a/third_party/rust/gfx-hal/src/device.rs +++ b/third_party/rust/gfx-hal/src/device.rs @@ -1,927 +1,973 @@ -//! Logical device -//! -//! # Device -//! -//! This module exposes the `Device` trait, which provides methods for creating -//! and managing graphics resources such as buffers, images and memory. -//! -//! The `Adapter` and `Device` types are very similar to the Vulkan concept of -//! "physical devices" vs. "logical devices"; an `Adapter` is single GPU -//! (or CPU) that implements a backend, a `Device` is a -//! handle to that physical device that has the requested capabilities -//! and is used to actually do things. - -use std::any::Any; -use std::borrow::Borrow; -use std::ops::Range; -use std::{fmt, iter}; - -use crate::{ - buffer, format, image, pass, pso, query, - memory::Requirements, - pool::CommandPoolCreateFlags, - pso::DescriptorPoolCreateFlags, - queue::QueueFamilyId, - range::RangeArg, - window::{self, SwapchainConfig}, - Backend, MemoryTypeId, -}; - -/// Error occurred caused device to be lost. -#[derive(Clone, Debug, PartialEq)] -pub struct DeviceLost; - -impl std::fmt::Display for DeviceLost { - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - fmt.write_str("Device lost") - } -} - -impl std::error::Error for DeviceLost {} - -/// Error occurred caused surface to be lost. -#[derive(Clone, Debug, PartialEq)] -pub struct SurfaceLost; - -impl std::fmt::Display for SurfaceLost { - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - fmt.write_str("Surface lost") - } -} - -impl std::error::Error for SurfaceLost {} - -/// Native window is already in use by graphics API. -#[derive(Clone, Debug, PartialEq)] -pub struct WindowInUse; - -impl std::fmt::Display for WindowInUse { - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - fmt.write_str("Window is in use") - } -} - -impl std::error::Error for WindowInUse {} - -/// Error allocating memory. -#[derive(Clone, Debug, PartialEq)] -pub enum OutOfMemory { - /// Host memory exhausted. - Host, - /// Device memory exhausted. - Device, -} - -impl std::fmt::Display for OutOfMemory { - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - OutOfMemory::Host => write!(fmt, "Out of host memory"), - OutOfMemory::Device => write!(fmt, "Out of device memory"), - } - } -} - -impl std::error::Error for OutOfMemory {} - -/// Error occurred caused device to be lost -/// or out of memory error. -#[derive(Clone, Debug, PartialEq)] -pub enum OomOrDeviceLost { - /// Out of either host or device memory. - OutOfMemory(OutOfMemory), - /// Device is lost - DeviceLost(DeviceLost), -} - -impl From for OomOrDeviceLost { - fn from(error: OutOfMemory) -> Self { - OomOrDeviceLost::OutOfMemory(error) - } -} - -impl From for OomOrDeviceLost { - fn from(error: DeviceLost) -> Self { - OomOrDeviceLost::DeviceLost(error) - } -} - -/// Possible cause of allocation failure. -#[derive(Clone, Debug, PartialEq)] -pub enum AllocationError { - /// Out of either host or device memory. - OutOfMemory(OutOfMemory), - - /// Cannot create any more objects. - TooManyObjects, -} - -impl From for AllocationError { - fn from(error: OutOfMemory) -> Self { - AllocationError::OutOfMemory(error) - } -} - -impl std::fmt::Display for AllocationError { - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - AllocationError::OutOfMemory(err) => write!(fmt, "Failed to allocate object: {}", err), - AllocationError::TooManyObjects => write!(fmt, "Failed to allocate object: Too many objects"), - } - } -} - -impl std::error::Error for AllocationError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - AllocationError::OutOfMemory(err) => Some(err), - _ => None, - } - } -} - -/// Device creation errors during `open`. -#[derive(Clone, Debug, PartialEq)] -pub enum CreationError { - /// Out of either host or device memory. - OutOfMemory(OutOfMemory), - /// Device initialization failed due to implementation specific errors. - InitializationFailed, - /// At least one of the user requested extensions if not supported by the - /// physical device. - MissingExtension, - /// At least one of the user requested features if not supported by the - /// physical device. - /// - /// Use [`features`](trait.PhysicalDevice.html#tymethod.features) - /// for checking the supported features. - MissingFeature, - /// Too many logical devices have been created from this physical device. - /// - /// The implementation may only support one logical device for each physical - /// device or lacks resources to allocate a new device. - TooManyObjects, - /// The logical or physical device are lost during the device creation - /// process. - /// - /// This may be caused by hardware failure, physical device removal, - /// power outage, etc. - DeviceLost, -} - -impl std::fmt::Display for CreationError { - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - CreationError::OutOfMemory(err) => write!(fmt, "Failed to create device: {}", err), - CreationError::InitializationFailed => write!(fmt, "Failed to create device: Implementation specific error occurred"), - CreationError::MissingExtension => write!(fmt, "Failed to create device: Requested extension is missing"), - CreationError::MissingFeature => write!(fmt, "Failed to create device: Requested feature is missing"), - CreationError::TooManyObjects => write!(fmt, "Failed to create device: Too many objects"), - CreationError::DeviceLost => write!(fmt, "Failed to create device: Logical or Physical device was lost during creation"), - } - } -} - -impl std::error::Error for CreationError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - CreationError::OutOfMemory(err) => Some(err), - _ => None, - } - } -} - -/// Error accessing a mapping. -#[derive(Clone, Debug, PartialEq)] -pub enum MapError { - /// Out of either host or device memory. - OutOfMemory(OutOfMemory), - /// The requested mapping range is outside of the resource. - OutOfBounds, - /// Failed to allocate an appropriately sized contiguous virtual address range - MappingFailed, -} - -impl From for MapError { - fn from(error: OutOfMemory) -> Self { - MapError::OutOfMemory(error) - } -} - -impl std::fmt::Display for MapError { - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - MapError::OutOfMemory(err) => write!(fmt, "Failed to map memory: {}", err), - MapError::OutOfBounds => write!(fmt, "Failed to map memory: Requested range is outside the resource"), - MapError::MappingFailed => write!(fmt, "Failed to map memory: Unable to allocate an appropriately sized contiguous virtual address range"), - } - } -} - -impl std::error::Error for MapError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - MapError::OutOfMemory(err) => Some(err), - _ => None, - } - } -} - -/// Error binding a resource to memory allocation. -#[derive(Clone, Debug, PartialEq)] -pub enum BindError { - /// Out of either host or device memory. - OutOfMemory(OutOfMemory), - /// Requested binding to memory that doesn't support the required operations. - WrongMemory, - /// Requested binding to an invalid memory. - OutOfBounds, -} - -impl From for BindError { - fn from(error: OutOfMemory) -> Self { - BindError::OutOfMemory(error) - } -} - -impl std::fmt::Display for BindError { - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - BindError::OutOfMemory(err) => write!(fmt, "Failed to bind object to memory range: {}", err), - BindError::OutOfBounds => write!(fmt, "Failed to bind object to memory range: Requested range is outside the resource"), - BindError::WrongMemory => write!(fmt, "Failed to bind object to memory range: Wrong memory"), - } - } -} - -impl std::error::Error for BindError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - BindError::OutOfMemory(err) => Some(err), - _ => None, - } - } -} - -/// Specifies the waiting targets. -#[derive(Clone, Debug, PartialEq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum WaitFor { - /// Wait for any target. - Any, - /// Wait for all targets at once. - All, -} - -/// An error from creating a shader module. -#[derive(Clone, Debug, PartialEq)] -pub enum ShaderError { - /// The shader failed to compile. - CompilationFailed(String), - /// The shader is missing an entry point. - MissingEntryPoint(String), - /// The shader has a mismatch of interface (e.g missing push constants). - InterfaceMismatch(String), - /// The shader stage is not supported. - UnsupportedStage(pso::Stage), - /// Out of either host or device memory. - OutOfMemory(OutOfMemory), -} - -impl From for ShaderError { - fn from(error: OutOfMemory) -> Self { - ShaderError::OutOfMemory(error) - } -} - -impl std::fmt::Display for ShaderError { - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - ShaderError::OutOfMemory(err) => write!(fmt, "Shader error: {}", err), - ShaderError::CompilationFailed(string) => write!(fmt, "Shader error: Compilation failed: {}", string), - ShaderError::MissingEntryPoint(string) => write!(fmt, "Shader error: Missing entry point: {}", string), - ShaderError::InterfaceMismatch(string) => write!(fmt, "Shader error: Interface mismatch: {}", string), - ShaderError::UnsupportedStage(stage) => write!(fmt, "Shader error: Unsupported stage: {:?}", stage), - } - } -} - -impl std::error::Error for ShaderError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - ShaderError::OutOfMemory(err) => Some(err), - _ => None, - } - } -} - -/// # Overview -/// -/// A `Device` is responsible for creating and managing resources for the physical device -/// it was created from. -/// -/// ## Resource Construction and Handling -/// -/// This device structure can then be used to create and manage different resources, like buffers, -/// shader programs and textures. See the individual methods for more information. -/// -/// ## Mutability -/// -/// All the methods get `&self`. Any internal mutability of the `Device` is hidden from the user. -/// -/// ## Synchronization -/// -/// `Device` should be usable concurrently from multiple threads. The `Send` and `Sync` bounds -/// are not enforced at the HAL level due to OpenGL constraint (to be revised). Users can still -/// benefit from the backends that support synchronization of the `Device`. -/// -pub trait Device: fmt::Debug + Any + Send + Sync { - /// Allocates a memory segment of a specified type. - /// - /// There is only a limited amount of allocations allowed depending on the implementation! - /// - /// # Arguments - /// - /// * `memory_type` - Index of the memory type in the memory properties of the associated physical device. - /// * `size` - Size of the allocation. - unsafe fn allocate_memory( - &self, - memory_type: MemoryTypeId, - size: u64, - ) -> Result; - - /// Free device memory - unsafe fn free_memory(&self, memory: B::Memory); - - /// Create a new command pool for a given queue family. - /// - /// *Note*: the family has to be associated by one as the `Gpu::queue_groups`. - unsafe fn create_command_pool( - &self, - family: QueueFamilyId, - create_flags: CommandPoolCreateFlags, - ) -> Result; - - /// Destroy a command pool. - unsafe fn destroy_command_pool(&self, pool: B::CommandPool); - - /// Create a render pass with the given attachments and subpasses. - /// - /// A *render pass* represents a collection of attachments, subpasses, and dependencies between - /// the subpasses, and describes how the attachments are used over the course of the subpasses. - /// The use of a render pass in a command buffer is a *render pass* instance. - unsafe fn create_render_pass<'a, IA, IS, ID>( - &self, - attachments: IA, - subpasses: IS, - dependencies: ID, - ) -> Result - where - IA: IntoIterator, - IA::Item: Borrow, - IS: IntoIterator, - IS::Item: Borrow>, - ID: IntoIterator, - ID::Item: Borrow; - - /// Destroy a `RenderPass`. - unsafe fn destroy_render_pass(&self, rp: B::RenderPass); - - /// Create a new pipeline layout object. - /// - /// # Arguments - /// - /// * `set_layouts` - Descriptor set layouts - /// * `push_constants` - Ranges of push constants. A shader stage may only contain one push - /// constant block. The range is defined in units of bytes. - /// - /// # PipelineLayout - /// - /// Access to descriptor sets from a pipeline is accomplished through a *pipeline layout*. - /// Zero or more descriptor set layouts and zero or more push constant ranges are combined to - /// form a pipeline layout object which describes the complete set of resources that **can** be - /// accessed by a pipeline. The pipeline layout represents a sequence of descriptor sets with - /// each having a specific layout. This sequence of layouts is used to determine the interface - /// between shader stages and shader resources. Each pipeline is created using a pipeline layout. - unsafe fn create_pipeline_layout( - &self, - set_layouts: IS, - push_constant: IR, - ) -> Result - where - IS: IntoIterator, - IS::Item: Borrow, - IR: IntoIterator, - IR::Item: Borrow<(pso::ShaderStageFlags, Range)>; - - /// Destroy a pipeline layout object - unsafe fn destroy_pipeline_layout(&self, layout: B::PipelineLayout); - - /// Create a pipeline cache object. - unsafe fn create_pipeline_cache( - &self, - data: Option<&[u8]>, - ) -> Result; - - /// Retrieve data from pipeline cache object. - unsafe fn get_pipeline_cache_data( - &self, - cache: &B::PipelineCache, - ) -> Result, OutOfMemory>; - - /// Merge a number of source pipeline caches into the target one. - unsafe fn merge_pipeline_caches( - &self, - target: &B::PipelineCache, - sources: I, - ) -> Result<(), OutOfMemory> - where - I: IntoIterator, - I::Item: Borrow; - - /// Destroy a pipeline cache object. - unsafe fn destroy_pipeline_cache(&self, cache: B::PipelineCache); - - /// Create a graphics pipeline. - unsafe fn create_graphics_pipeline<'a>( - &self, - desc: &pso::GraphicsPipelineDesc<'a, B>, - cache: Option<&B::PipelineCache>, - ) -> Result; - - /// Create graphics pipelines. - unsafe fn create_graphics_pipelines<'a, I>( - &self, - descs: I, - cache: Option<&B::PipelineCache>, - ) -> Vec> - where - I: IntoIterator, - I::Item: Borrow>, - { - descs - .into_iter() - .map(|desc| self.create_graphics_pipeline(desc.borrow(), cache)) - .collect() - } - - /// Destroy a graphics pipeline. - /// - /// The graphics pipeline shouldn't be destroyed before any submitted command buffer, - /// which references the graphics pipeline, has finished execution. - unsafe fn destroy_graphics_pipeline(&self, pipeline: B::GraphicsPipeline); - - /// Create a compute pipeline. - unsafe fn create_compute_pipeline<'a>( - &self, - desc: &pso::ComputePipelineDesc<'a, B>, - cache: Option<&B::PipelineCache>, - ) -> Result; - - /// Create compute pipelines. - unsafe fn create_compute_pipelines<'a, I>( - &self, - descs: I, - cache: Option<&B::PipelineCache>, - ) -> Vec> - where - I: IntoIterator, - I::Item: Borrow>, - { - descs - .into_iter() - .map(|desc| self.create_compute_pipeline(desc.borrow(), cache)) - .collect() - } - - /// Destroy a compute pipeline. - /// - /// The compute pipeline shouldn't be destroyed before any submitted command buffer, - /// which references the compute pipeline, has finished execution. - unsafe fn destroy_compute_pipeline(&self, pipeline: B::ComputePipeline); - - /// Create a new framebuffer object. - /// - /// # Safety - /// - `extent.width`, `extent.height` and `extent.depth` **must** be greater than `0`. - unsafe fn create_framebuffer( - &self, - pass: &B::RenderPass, - attachments: I, - extent: image::Extent, - ) -> Result - where - I: IntoIterator, - I::Item: Borrow; - - /// Destroy a framebuffer. - /// - /// The framebuffer shouldn't be destroy before any submitted command buffer, - /// which references the framebuffer, has finished execution. - unsafe fn destroy_framebuffer(&self, buf: B::Framebuffer); - - /// Create a new shader module object through the SPIR-V binary data. - /// - /// Once a shader module has been created, any entry points it contains can be used in pipeline - /// shader stages as described in *Compute Pipelines* and *Graphics Pipelines*. - unsafe fn create_shader_module( - &self, - spirv_data: &[u32], - ) -> Result; - - /// Destroy a shader module module - /// - /// A shader module can be destroyed while pipelines created using its shaders are still in use. - unsafe fn destroy_shader_module(&self, shader: B::ShaderModule); - - /// Create a new buffer (unbound). - /// - /// The created buffer won't have associated memory until `bind_buffer_memory` is called. - unsafe fn create_buffer( - &self, - size: u64, - usage: buffer::Usage, - ) -> Result; - - /// Get memory requirements for the buffer - unsafe fn get_buffer_requirements(&self, buf: &B::Buffer) -> Requirements; - - /// Bind memory to a buffer. - /// - /// Be sure to check that there is enough memory available for the buffer. - /// Use `get_buffer_requirements` to acquire the memory requirements. - unsafe fn bind_buffer_memory( - &self, - memory: &B::Memory, - offset: u64, - buf: &mut B::Buffer, - ) -> Result<(), BindError>; - - /// Destroy a buffer. - /// - /// The buffer shouldn't be destroyed before any submitted command buffer, - /// which references the images, has finished execution. - unsafe fn destroy_buffer(&self, buffer: B::Buffer); - - /// Create a new buffer view object - unsafe fn create_buffer_view>( - &self, - buf: &B::Buffer, - fmt: Option, - range: R, - ) -> Result; - - /// Destroy a buffer view object - unsafe fn destroy_buffer_view(&self, view: B::BufferView); - - /// Create a new image object - unsafe fn create_image( - &self, - kind: image::Kind, - mip_levels: image::Level, - format: format::Format, - tiling: image::Tiling, - usage: image::Usage, - view_caps: image::ViewCapabilities, - ) -> Result; - - /// Get memory requirements for the Image - unsafe fn get_image_requirements(&self, image: &B::Image) -> Requirements; - - /// - unsafe fn get_image_subresource_footprint( - &self, - image: &B::Image, - subresource: image::Subresource, - ) -> image::SubresourceFootprint; - - /// Bind device memory to an image object - unsafe fn bind_image_memory( - &self, - memory: &B::Memory, - offset: u64, - image: &mut B::Image, - ) -> Result<(), BindError>; - - /// Destroy an image. - /// - /// The image shouldn't be destroyed before any submitted command buffer, - /// which references the images, has finished execution. - unsafe fn destroy_image(&self, image: B::Image); - - /// Create an image view from an existing image - unsafe fn create_image_view( - &self, - image: &B::Image, - view_kind: image::ViewKind, - format: format::Format, - swizzle: format::Swizzle, - range: image::SubresourceRange, - ) -> Result; - - /// Destroy an image view object - unsafe fn destroy_image_view(&self, view: B::ImageView); - - /// Create a new sampler object - unsafe fn create_sampler( - &self, - desc: &image::SamplerDesc, - ) -> Result; - - /// Destroy a sampler object - unsafe fn destroy_sampler(&self, sampler: B::Sampler); - - /// Create a descriptor pool. - /// - /// Descriptor pools allow allocation of descriptor sets. - /// The pool can't be modified directly, only through updating descriptor sets. - unsafe fn create_descriptor_pool( - &self, - max_sets: usize, - descriptor_ranges: I, - flags: DescriptorPoolCreateFlags, - ) -> Result - where - I: IntoIterator, - I::Item: Borrow; - - /// Destroy a descriptor pool object - /// - /// When a pool is destroyed, all descriptor sets allocated from the pool are implicitly freed - /// and become invalid. Descriptor sets allocated from a given pool do not need to be freed - /// before destroying that descriptor pool. - unsafe fn destroy_descriptor_pool(&self, pool: B::DescriptorPool); - - /// Create a descriptor set layout. - /// - /// A descriptor set layout object is defined by an array of zero or more descriptor bindings. - /// Each individual descriptor binding is specified by a descriptor type, a count (array size) - /// of the number of descriptors in the binding, a set of shader stages that **can** access the - /// binding, and (if using immutable samplers) an array of sampler descriptors. - unsafe fn create_descriptor_set_layout( - &self, - bindings: I, - immutable_samplers: J, - ) -> Result - where - I: IntoIterator, - I::Item: Borrow, - J: IntoIterator, - J::Item: Borrow; - - /// Destroy a descriptor set layout object - unsafe fn destroy_descriptor_set_layout(&self, layout: B::DescriptorSetLayout); - - /// Specifying the parameters of a descriptor set write operation - unsafe fn write_descriptor_sets<'a, I, J>(&self, write_iter: I) - where - I: IntoIterator>, - J: IntoIterator, - J::Item: Borrow>; - - /// Structure specifying a copy descriptor set operation - unsafe fn copy_descriptor_sets<'a, I>(&self, copy_iter: I) - where - I: IntoIterator, - I::Item: Borrow>; - - /// Map a memory object into application address space - /// - /// Call `map_memory()` to retrieve a host virtual address pointer to a region of a mappable memory object - unsafe fn map_memory(&self, memory: &B::Memory, range: R) -> Result<*mut u8, MapError> - where - R: RangeArg; - - /// Flush mapped memory ranges - unsafe fn flush_mapped_memory_ranges<'a, I, R>(&self, ranges: I) -> Result<(), OutOfMemory> - where - I: IntoIterator, - I::Item: Borrow<(&'a B::Memory, R)>, - R: RangeArg; - - /// Invalidate ranges of non-coherent memory from the host caches - unsafe fn invalidate_mapped_memory_ranges<'a, I, R>( - &self, - ranges: I, - ) -> Result<(), OutOfMemory> - where - I: IntoIterator, - I::Item: Borrow<(&'a B::Memory, R)>, - R: RangeArg; - - /// Unmap a memory object once host access to it is no longer needed by the application - unsafe fn unmap_memory(&self, memory: &B::Memory); - - /// Create a new semaphore object - fn create_semaphore(&self) -> Result; - - /// Destroy a semaphore object - unsafe fn destroy_semaphore(&self, semaphore: B::Semaphore); - - /// Create a new fence object - /// - /// Fences are a synchronization primitive that **can** be used to insert a dependency from - /// a queue to the host. Fences have two states - signaled and unsignaled. A fence **can** be - /// signaled as part of the execution of a *queue submission* command. Fences **can** be unsignaled - /// on the host with *reset_fences*. Fences **can** be waited on by the host with the - /// *wait_for_fences* command, and the current state **can** be queried with *get_fence_status*. - fn create_fence(&self, signaled: bool) -> Result; - - /// - unsafe fn reset_fence(&self, fence: &B::Fence) -> Result<(), OutOfMemory> { - self.reset_fences(iter::once(fence)) - } - - /// - unsafe fn reset_fences(&self, fences: I) -> Result<(), OutOfMemory> - where - I: IntoIterator, - I::Item: Borrow, - { - for fence in fences { - self.reset_fence(fence.borrow())?; - } - Ok(()) - } - - /// Blocks until the given fence is signaled. - /// Returns true if the fence was signaled before the timeout. - unsafe fn wait_for_fence( - &self, - fence: &B::Fence, - timeout_ns: u64, - ) -> Result { - self.wait_for_fences(iter::once(fence), WaitFor::All, timeout_ns) - } - - /// Blocks until all or one of the given fences are signaled. - /// Returns true if fences were signaled before the timeout. - unsafe fn wait_for_fences( - &self, - fences: I, - wait: WaitFor, - timeout_ns: u64, - ) -> Result - where - I: IntoIterator, - I::Item: Borrow, - { - use std::{thread, time}; - fn to_ns(duration: time::Duration) -> u64 { - duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64 - } - - let start = time::Instant::now(); - match wait { - WaitFor::All => { - for fence in fences { - if !self.wait_for_fence(fence.borrow(), 0)? { - let elapsed_ns = to_ns(start.elapsed()); - if elapsed_ns > timeout_ns { - return Ok(false); - } - if !self.wait_for_fence(fence.borrow(), timeout_ns - elapsed_ns)? { - return Ok(false); - } - } - } - Ok(true) - } - WaitFor::Any => { - let fences: Vec<_> = fences.into_iter().collect(); - loop { - for fence in &fences { - if self.wait_for_fence(fence.borrow(), 0)? { - return Ok(true); - } - } - if to_ns(start.elapsed()) >= timeout_ns { - return Ok(false); - } - thread::sleep(time::Duration::from_millis(1)); - } - } - } - } - - /// true for signaled, false for not ready - unsafe fn get_fence_status(&self, fence: &B::Fence) -> Result; - - /// Destroy a fence object - unsafe fn destroy_fence(&self, fence: B::Fence); - - /// Create an event object. - fn create_event(&self) -> Result; - - /// Destroy an event object. - unsafe fn destroy_event(&self, event: B::Event); - - /// Query the status of an event. - /// - /// Returns `true` if the event is set, or `false` if it is reset. - unsafe fn get_event_status(&self, event: &B::Event) -> Result; - - /// Sets an event. - unsafe fn set_event(&self, event: &B::Event) -> Result<(), OutOfMemory>; - - /// Resets an event. - unsafe fn reset_event(&self, event: &B::Event) -> Result<(), OutOfMemory>; - - /// Create a new query pool object - /// - /// Queries are managed using query pool objects. Each query pool is a collection of a specific - /// number of queries of a particular type. - unsafe fn create_query_pool( - &self, - ty: query::Type, - count: query::Id, - ) -> Result; - - /// Destroy a query pool object - unsafe fn destroy_query_pool(&self, pool: B::QueryPool); - - /// Get query pool results into the specified CPU memory. - /// Returns `Ok(false)` if the results are not ready yet and neither of `WAIT` or `PARTIAL` flags are set. - unsafe fn get_query_pool_results( - &self, - pool: &B::QueryPool, - queries: Range, - data: &mut [u8], - stride: buffer::Offset, - flags: query::ResultFlags, - ) -> Result; - - /// Create a new swapchain from a surface and a queue family, optionally providing the old - /// swapchain to aid in resource reuse and rendering continuity. - /// - /// *Note*: The number of exposed images in the back buffer might differ - /// from number of internally used buffers. - /// - /// # Safety - /// - /// The queue family _must_ support surface presentation. - /// This can be checked by calling [`supports_queue_family`](trait.Surface.html#tymethod.supports_queue_family) - /// on this surface. - /// - /// # Examples - /// - /// ```no_run - /// # extern crate gfx_backend_empty as empty; - /// # extern crate gfx_hal; - /// # fn main() { - /// use gfx_hal::{prelude::*, format::Format, window::SwapchainConfig}; - /// - /// # let mut surface: empty::Surface = return; - /// # let device: empty::Device = return; - /// # unsafe { - /// let swapchain_config = SwapchainConfig::new(100, 100, Format::Rgba8Srgb, 2); - /// device.create_swapchain(&mut surface, swapchain_config, None); - /// # }} - /// ``` - unsafe fn create_swapchain( - &self, - surface: &mut B::Surface, - config: SwapchainConfig, - old_swapchain: Option, - ) -> Result<(B::Swapchain, Vec), window::CreationError>; - - /// - unsafe fn destroy_swapchain(&self, swapchain: B::Swapchain); - - /// Wait for all queues associated with this device to idle. - /// - /// Host access to all queues needs to be **externally** sycnhronized! - fn wait_idle(&self) -> Result<(), OutOfMemory>; - - /// Associate a name with an image, for easier debugging in external tools or with validation - /// layers that can print a friendly name when referring to objects in error messages - unsafe fn set_image_name(&self, image: &mut B::Image, name: &str); - /// Associate a name with a buffer, for easier debugging in external tools or with validation - /// layers that can print a friendly name when referring to objects in error messages - unsafe fn set_buffer_name(&self, buffer: &mut B::Buffer, name: &str); - /// Associate a name with a command buffer, for easier debugging in external tools or with - /// validation layers that can print a friendly name when referring to objects in error messages - unsafe fn set_command_buffer_name(&self, command_buffer: &mut B::CommandBuffer, name: &str); - /// Associate a name with a semaphore, for easier debugging in external tools or with validation - /// layers that can print a friendly name when referring to objects in error messages - unsafe fn set_semaphore_name(&self, semaphore: &mut B::Semaphore, name: &str); - /// Associate a name with a fence, for easier debugging in external tools or with validation - /// layers that can print a friendly name when referring to objects in error messages - unsafe fn set_fence_name(&self, fence: &mut B::Fence, name: &str); - /// Associate a name with a framebuffer, for easier debugging in external tools or with - /// validation layers that can print a friendly name when referring to objects in error messages - unsafe fn set_framebuffer_name(&self, framebuffer: &mut B::Framebuffer, name: &str); - /// Associate a name with a render pass, for easier debugging in external tools or with - /// validation layers that can print a friendly name when referring to objects in error messages - unsafe fn set_render_pass_name(&self, render_pass: &mut B::RenderPass, name: &str); - /// Associate a name with a descriptor set, for easier debugging in external tools or with - /// validation layers that can print a friendly name when referring to objects in error messages - unsafe fn set_descriptor_set_name(&self, descriptor_set: &mut B::DescriptorSet, name: &str); - /// Associate a name with a descriptor set layout, for easier debugging in external tools or - /// with validation layers that can print a friendly name when referring to objects in error - /// messages - unsafe fn set_descriptor_set_layout_name( - &self, - descriptor_set_layout: &mut B::DescriptorSetLayout, - name: &str, - ); -} +//! Logical device +//! +//! # Device +//! +//! This module exposes the `Device` trait, which provides methods for creating +//! and managing graphics resources such as buffers, images and memory. +//! +//! The `Adapter` and `Device` types are very similar to the Vulkan concept of +//! "physical devices" vs. "logical devices"; an `Adapter` is single GPU +//! (or CPU) that implements a backend, a `Device` is a +//! handle to that physical device that has the requested capabilities +//! and is used to actually do things. + +use std::any::Any; +use std::borrow::Borrow; +use std::ops::Range; +use std::{fmt, iter}; + +use crate::{ + buffer, + format, + image, + memory::{Requirements, Segment}, + pass, + pool::CommandPoolCreateFlags, + pso, + pso::DescriptorPoolCreateFlags, + query, + queue::QueueFamilyId, + window::{self, SwapchainConfig}, + Backend, + MemoryTypeId, +}; + +/// Error occurred caused device to be lost. +#[derive(Clone, Debug, PartialEq)] +pub struct DeviceLost; + +impl std::fmt::Display for DeviceLost { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + fmt.write_str("Device lost") + } +} + +impl std::error::Error for DeviceLost {} + +/// Error occurred caused surface to be lost. +#[derive(Clone, Debug, PartialEq)] +pub struct SurfaceLost; + +impl std::fmt::Display for SurfaceLost { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + fmt.write_str("Surface lost") + } +} + +impl std::error::Error for SurfaceLost {} + +/// Native window is already in use by graphics API. +#[derive(Clone, Debug, PartialEq)] +pub struct WindowInUse; + +impl std::fmt::Display for WindowInUse { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + fmt.write_str("Window is in use") + } +} + +impl std::error::Error for WindowInUse {} + +/// Error allocating memory. +#[derive(Clone, Debug, PartialEq)] +pub enum OutOfMemory { + /// Host memory exhausted. + Host, + /// Device memory exhausted. + Device, +} + +impl std::fmt::Display for OutOfMemory { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + OutOfMemory::Host => write!(fmt, "Out of host memory"), + OutOfMemory::Device => write!(fmt, "Out of device memory"), + } + } +} + +impl std::error::Error for OutOfMemory {} + +/// Error occurred caused device to be lost +/// or out of memory error. +#[derive(Clone, Debug, PartialEq)] +pub enum OomOrDeviceLost { + /// Out of either host or device memory. + OutOfMemory(OutOfMemory), + /// Device is lost + DeviceLost(DeviceLost), +} + +impl From for OomOrDeviceLost { + fn from(error: OutOfMemory) -> Self { + OomOrDeviceLost::OutOfMemory(error) + } +} + +impl From for OomOrDeviceLost { + fn from(error: DeviceLost) -> Self { + OomOrDeviceLost::DeviceLost(error) + } +} + +impl std::fmt::Display for OomOrDeviceLost { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + OomOrDeviceLost::DeviceLost(err) => write!(fmt, "Failed querying device: {}", err), + OomOrDeviceLost::OutOfMemory(err) => write!(fmt, "Failed querying device: {}", err), + } + } +} + +impl std::error::Error for OomOrDeviceLost { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + OomOrDeviceLost::DeviceLost(err) => Some(err), + OomOrDeviceLost::OutOfMemory(err) => Some(err), + } + } +} + +/// Possible cause of allocation failure. +#[derive(Clone, Debug, PartialEq)] +pub enum AllocationError { + /// Out of either host or device memory. + OutOfMemory(OutOfMemory), + + /// Cannot create any more objects. + TooManyObjects, +} + +impl From for AllocationError { + fn from(error: OutOfMemory) -> Self { + AllocationError::OutOfMemory(error) + } +} + +impl std::fmt::Display for AllocationError { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + AllocationError::OutOfMemory(err) => write!(fmt, "Failed to allocate object: {}", err), + AllocationError::TooManyObjects => { + write!(fmt, "Failed to allocate object: Too many objects") + } + } + } +} + +impl std::error::Error for AllocationError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + AllocationError::OutOfMemory(err) => Some(err), + _ => None, + } + } +} + +/// Device creation errors during `open`. +#[derive(Clone, Debug, PartialEq)] +pub enum CreationError { + /// Out of either host or device memory. + OutOfMemory(OutOfMemory), + /// Device initialization failed due to implementation specific errors. + InitializationFailed, + /// At least one of the user requested extensions if not supported by the + /// physical device. + MissingExtension, + /// At least one of the user requested features if not supported by the + /// physical device. + /// + /// Use [`features`](trait.PhysicalDevice.html#tymethod.features) + /// for checking the supported features. + MissingFeature, + /// Too many logical devices have been created from this physical device. + /// + /// The implementation may only support one logical device for each physical + /// device or lacks resources to allocate a new device. + TooManyObjects, + /// The logical or physical device are lost during the device creation + /// process. + /// + /// This may be caused by hardware failure, physical device removal, + /// power outage, etc. + DeviceLost, +} + +impl std::fmt::Display for CreationError { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + CreationError::OutOfMemory(err) => write!(fmt, "Failed to create device: {}", err), + CreationError::InitializationFailed => write!( + fmt, + "Failed to create device: Implementation specific error occurred" + ), + CreationError::MissingExtension => write!( + fmt, + "Failed to create device: Requested extension is missing" + ), + CreationError::MissingFeature => { + write!(fmt, "Failed to create device: Requested feature is missing") + } + CreationError::TooManyObjects => { + write!(fmt, "Failed to create device: Too many objects") + } + CreationError::DeviceLost => write!( + fmt, + "Failed to create device: Logical or Physical device was lost during creation" + ), + } + } +} + +impl std::error::Error for CreationError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + CreationError::OutOfMemory(err) => Some(err), + _ => None, + } + } +} + +/// Error accessing a mapping. +#[derive(Clone, Debug, PartialEq)] +pub enum MapError { + /// Out of either host or device memory. + OutOfMemory(OutOfMemory), + /// The requested mapping range is outside of the resource. + OutOfBounds, + /// Failed to allocate an appropriately sized contiguous virtual address range + MappingFailed, +} + +impl From for MapError { + fn from(error: OutOfMemory) -> Self { + MapError::OutOfMemory(error) + } +} + +impl std::fmt::Display for MapError { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + MapError::OutOfMemory(err) => write!(fmt, "Failed to map memory: {}", err), + MapError::OutOfBounds => write!(fmt, "Failed to map memory: Requested range is outside the resource"), + MapError::MappingFailed => write!(fmt, "Failed to map memory: Unable to allocate an appropriately sized contiguous virtual address range"), + } + } +} + +impl std::error::Error for MapError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + MapError::OutOfMemory(err) => Some(err), + _ => None, + } + } +} + +/// Error binding a resource to memory allocation. +#[derive(Clone, Debug, PartialEq)] +pub enum BindError { + /// Out of either host or device memory. + OutOfMemory(OutOfMemory), + /// Requested binding to memory that doesn't support the required operations. + WrongMemory, + /// Requested binding to an invalid memory. + OutOfBounds, +} + +impl From for BindError { + fn from(error: OutOfMemory) -> Self { + BindError::OutOfMemory(error) + } +} + +impl std::fmt::Display for BindError { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + BindError::OutOfMemory(err) => { + write!(fmt, "Failed to bind object to memory range: {}", err) + } + BindError::OutOfBounds => write!( + fmt, + "Failed to bind object to memory range: Requested range is outside the resource" + ), + BindError::WrongMemory => { + write!(fmt, "Failed to bind object to memory range: Wrong memory") + } + } + } +} + +impl std::error::Error for BindError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + BindError::OutOfMemory(err) => Some(err), + _ => None, + } + } +} + +/// Specifies the waiting targets. +#[derive(Clone, Debug, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum WaitFor { + /// Wait for any target. + Any, + /// Wait for all targets at once. + All, +} + +/// An error from creating a shader module. +#[derive(Clone, Debug, PartialEq)] +pub enum ShaderError { + /// The shader failed to compile. + CompilationFailed(String), + /// The shader is missing an entry point. + MissingEntryPoint(String), + /// The shader has a mismatch of interface (e.g missing push constants). + InterfaceMismatch(String), + /// The shader stage is not supported. + UnsupportedStage(pso::Stage), + /// Out of either host or device memory. + OutOfMemory(OutOfMemory), +} + +impl From for ShaderError { + fn from(error: OutOfMemory) -> Self { + ShaderError::OutOfMemory(error) + } +} + +impl std::fmt::Display for ShaderError { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ShaderError::OutOfMemory(err) => write!(fmt, "Shader error: {}", err), + ShaderError::CompilationFailed(string) => { + write!(fmt, "Shader error: Compilation failed: {}", string) + } + ShaderError::MissingEntryPoint(string) => { + write!(fmt, "Shader error: Missing entry point: {}", string) + } + ShaderError::InterfaceMismatch(string) => { + write!(fmt, "Shader error: Interface mismatch: {}", string) + } + ShaderError::UnsupportedStage(stage) => { + write!(fmt, "Shader error: Unsupported stage: {:?}", stage) + } + } + } +} + +impl std::error::Error for ShaderError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + ShaderError::OutOfMemory(err) => Some(err), + _ => None, + } + } +} + +/// # Overview +/// +/// A `Device` is responsible for creating and managing resources for the physical device +/// it was created from. +/// +/// ## Resource Construction and Handling +/// +/// This device structure can then be used to create and manage different resources, like buffers, +/// shader programs and textures. See the individual methods for more information. +/// +/// ## Mutability +/// +/// All the methods get `&self`. Any internal mutability of the `Device` is hidden from the user. +/// +/// ## Synchronization +/// +/// `Device` should be usable concurrently from multiple threads. The `Send` and `Sync` bounds +/// are not enforced at the HAL level due to OpenGL constraint (to be revised). Users can still +/// benefit from the backends that support synchronization of the `Device`. +/// +pub trait Device: fmt::Debug + Any + Send + Sync { + /// Allocates a memory segment of a specified type. + /// + /// There is only a limited amount of allocations allowed depending on the implementation! + /// + /// # Arguments + /// + /// * `memory_type` - Index of the memory type in the memory properties of the associated physical device. + /// * `size` - Size of the allocation. + unsafe fn allocate_memory( + &self, + memory_type: MemoryTypeId, + size: u64, + ) -> Result; + + /// Free device memory + unsafe fn free_memory(&self, memory: B::Memory); + + /// Create a new command pool for a given queue family. + /// + /// *Note*: the family has to be associated by one as the `Gpu::queue_groups`. + unsafe fn create_command_pool( + &self, + family: QueueFamilyId, + create_flags: CommandPoolCreateFlags, + ) -> Result; + + /// Destroy a command pool. + unsafe fn destroy_command_pool(&self, pool: B::CommandPool); + + /// Create a render pass with the given attachments and subpasses. + /// + /// A *render pass* represents a collection of attachments, subpasses, and dependencies between + /// the subpasses, and describes how the attachments are used over the course of the subpasses. + /// The use of a render pass in a command buffer is a *render pass* instance. + unsafe fn create_render_pass<'a, IA, IS, ID>( + &self, + attachments: IA, + subpasses: IS, + dependencies: ID, + ) -> Result + where + IA: IntoIterator, + IA::Item: Borrow, + IS: IntoIterator, + IS::Item: Borrow>, + ID: IntoIterator, + ID::Item: Borrow; + + /// Destroy a `RenderPass`. + unsafe fn destroy_render_pass(&self, rp: B::RenderPass); + + /// Create a new pipeline layout object. + /// + /// # Arguments + /// + /// * `set_layouts` - Descriptor set layouts + /// * `push_constants` - Ranges of push constants. A shader stage may only contain one push + /// constant block. The range is defined in units of bytes. + /// + /// # PipelineLayout + /// + /// Access to descriptor sets from a pipeline is accomplished through a *pipeline layout*. + /// Zero or more descriptor set layouts and zero or more push constant ranges are combined to + /// form a pipeline layout object which describes the complete set of resources that **can** be + /// accessed by a pipeline. The pipeline layout represents a sequence of descriptor sets with + /// each having a specific layout. This sequence of layouts is used to determine the interface + /// between shader stages and shader resources. Each pipeline is created using a pipeline layout. + unsafe fn create_pipeline_layout( + &self, + set_layouts: IS, + push_constant: IR, + ) -> Result + where + IS: IntoIterator, + IS::Item: Borrow, + IR: IntoIterator, + IR::Item: Borrow<(pso::ShaderStageFlags, Range)>; + + /// Destroy a pipeline layout object + unsafe fn destroy_pipeline_layout(&self, layout: B::PipelineLayout); + + /// Create a pipeline cache object. + unsafe fn create_pipeline_cache( + &self, + data: Option<&[u8]>, + ) -> Result; + + /// Retrieve data from pipeline cache object. + unsafe fn get_pipeline_cache_data( + &self, + cache: &B::PipelineCache, + ) -> Result, OutOfMemory>; + + /// Merge a number of source pipeline caches into the target one. + unsafe fn merge_pipeline_caches( + &self, + target: &B::PipelineCache, + sources: I, + ) -> Result<(), OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow; + + /// Destroy a pipeline cache object. + unsafe fn destroy_pipeline_cache(&self, cache: B::PipelineCache); + + /// Create a graphics pipeline. + unsafe fn create_graphics_pipeline<'a>( + &self, + desc: &pso::GraphicsPipelineDesc<'a, B>, + cache: Option<&B::PipelineCache>, + ) -> Result; + + /// Create graphics pipelines. + unsafe fn create_graphics_pipelines<'a, I>( + &self, + descs: I, + cache: Option<&B::PipelineCache>, + ) -> Vec> + where + I: IntoIterator, + I::Item: Borrow>, + { + descs + .into_iter() + .map(|desc| self.create_graphics_pipeline(desc.borrow(), cache)) + .collect() + } + + /// Destroy a graphics pipeline. + /// + /// The graphics pipeline shouldn't be destroyed before any submitted command buffer, + /// which references the graphics pipeline, has finished execution. + unsafe fn destroy_graphics_pipeline(&self, pipeline: B::GraphicsPipeline); + + /// Create a compute pipeline. + unsafe fn create_compute_pipeline<'a>( + &self, + desc: &pso::ComputePipelineDesc<'a, B>, + cache: Option<&B::PipelineCache>, + ) -> Result; + + /// Create compute pipelines. + unsafe fn create_compute_pipelines<'a, I>( + &self, + descs: I, + cache: Option<&B::PipelineCache>, + ) -> Vec> + where + I: IntoIterator, + I::Item: Borrow>, + { + descs + .into_iter() + .map(|desc| self.create_compute_pipeline(desc.borrow(), cache)) + .collect() + } + + /// Destroy a compute pipeline. + /// + /// The compute pipeline shouldn't be destroyed before any submitted command buffer, + /// which references the compute pipeline, has finished execution. + unsafe fn destroy_compute_pipeline(&self, pipeline: B::ComputePipeline); + + /// Create a new framebuffer object. + /// + /// # Safety + /// - `extent.width`, `extent.height` and `extent.depth` **must** be greater than `0`. + unsafe fn create_framebuffer( + &self, + pass: &B::RenderPass, + attachments: I, + extent: image::Extent, + ) -> Result + where + I: IntoIterator, + I::Item: Borrow; + + /// Destroy a framebuffer. + /// + /// The framebuffer shouldn't be destroy before any submitted command buffer, + /// which references the framebuffer, has finished execution. + unsafe fn destroy_framebuffer(&self, buf: B::Framebuffer); + + /// Create a new shader module object through the SPIR-V binary data. + /// + /// Once a shader module has been created, any entry points it contains can be used in pipeline + /// shader stages as described in *Compute Pipelines* and *Graphics Pipelines*. + unsafe fn create_shader_module( + &self, + spirv_data: &[u32], + ) -> Result; + + /// Destroy a shader module module + /// + /// A shader module can be destroyed while pipelines created using its shaders are still in use. + unsafe fn destroy_shader_module(&self, shader: B::ShaderModule); + + /// Create a new buffer (unbound). + /// + /// The created buffer won't have associated memory until `bind_buffer_memory` is called. + unsafe fn create_buffer( + &self, + size: u64, + usage: buffer::Usage, + ) -> Result; + + /// Get memory requirements for the buffer + unsafe fn get_buffer_requirements(&self, buf: &B::Buffer) -> Requirements; + + /// Bind memory to a buffer. + /// + /// Be sure to check that there is enough memory available for the buffer. + /// Use `get_buffer_requirements` to acquire the memory requirements. + unsafe fn bind_buffer_memory( + &self, + memory: &B::Memory, + offset: u64, + buf: &mut B::Buffer, + ) -> Result<(), BindError>; + + /// Destroy a buffer. + /// + /// The buffer shouldn't be destroyed before any submitted command buffer, + /// which references the images, has finished execution. + unsafe fn destroy_buffer(&self, buffer: B::Buffer); + + /// Create a new buffer view object + unsafe fn create_buffer_view( + &self, + buf: &B::Buffer, + fmt: Option, + range: buffer::SubRange, + ) -> Result; + + /// Destroy a buffer view object + unsafe fn destroy_buffer_view(&self, view: B::BufferView); + + /// Create a new image object + unsafe fn create_image( + &self, + kind: image::Kind, + mip_levels: image::Level, + format: format::Format, + tiling: image::Tiling, + usage: image::Usage, + view_caps: image::ViewCapabilities, + ) -> Result; + + /// Get memory requirements for the Image + unsafe fn get_image_requirements(&self, image: &B::Image) -> Requirements; + + /// + unsafe fn get_image_subresource_footprint( + &self, + image: &B::Image, + subresource: image::Subresource, + ) -> image::SubresourceFootprint; + + /// Bind device memory to an image object + unsafe fn bind_image_memory( + &self, + memory: &B::Memory, + offset: u64, + image: &mut B::Image, + ) -> Result<(), BindError>; + + /// Destroy an image. + /// + /// The image shouldn't be destroyed before any submitted command buffer, + /// which references the images, has finished execution. + unsafe fn destroy_image(&self, image: B::Image); + + /// Create an image view from an existing image + unsafe fn create_image_view( + &self, + image: &B::Image, + view_kind: image::ViewKind, + format: format::Format, + swizzle: format::Swizzle, + range: image::SubresourceRange, + ) -> Result; + + /// Destroy an image view object + unsafe fn destroy_image_view(&self, view: B::ImageView); + + /// Create a new sampler object + unsafe fn create_sampler( + &self, + desc: &image::SamplerDesc, + ) -> Result; + + /// Destroy a sampler object + unsafe fn destroy_sampler(&self, sampler: B::Sampler); + + /// Create a descriptor pool. + /// + /// Descriptor pools allow allocation of descriptor sets. + /// The pool can't be modified directly, only through updating descriptor sets. + unsafe fn create_descriptor_pool( + &self, + max_sets: usize, + descriptor_ranges: I, + flags: DescriptorPoolCreateFlags, + ) -> Result + where + I: IntoIterator, + I::Item: Borrow; + + /// Destroy a descriptor pool object + /// + /// When a pool is destroyed, all descriptor sets allocated from the pool are implicitly freed + /// and become invalid. Descriptor sets allocated from a given pool do not need to be freed + /// before destroying that descriptor pool. + unsafe fn destroy_descriptor_pool(&self, pool: B::DescriptorPool); + + /// Create a descriptor set layout. + /// + /// A descriptor set layout object is defined by an array of zero or more descriptor bindings. + /// Each individual descriptor binding is specified by a descriptor type, a count (array size) + /// of the number of descriptors in the binding, a set of shader stages that **can** access the + /// binding, and (if using immutable samplers) an array of sampler descriptors. + unsafe fn create_descriptor_set_layout( + &self, + bindings: I, + immutable_samplers: J, + ) -> Result + where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow; + + /// Destroy a descriptor set layout object + unsafe fn destroy_descriptor_set_layout(&self, layout: B::DescriptorSetLayout); + + /// Specifying the parameters of a descriptor set write operation + unsafe fn write_descriptor_sets<'a, I, J>(&self, write_iter: I) + where + I: IntoIterator>, + J: IntoIterator, + J::Item: Borrow>; + + /// Structure specifying a copy descriptor set operation + unsafe fn copy_descriptor_sets<'a, I>(&self, copy_iter: I) + where + I: IntoIterator, + I::Item: Borrow>; + + /// Map a memory object into application address space + /// + /// Call `map_memory()` to retrieve a host virtual address pointer to a region of a mappable memory object + unsafe fn map_memory(&self, memory: &B::Memory, segment: Segment) -> Result<*mut u8, MapError>; + + /// Flush mapped memory ranges + unsafe fn flush_mapped_memory_ranges<'a, I>(&self, ranges: I) -> Result<(), OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow<(&'a B::Memory, Segment)>; + + /// Invalidate ranges of non-coherent memory from the host caches + unsafe fn invalidate_mapped_memory_ranges<'a, I>(&self, ranges: I) -> Result<(), OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow<(&'a B::Memory, Segment)>; + + /// Unmap a memory object once host access to it is no longer needed by the application + unsafe fn unmap_memory(&self, memory: &B::Memory); + + /// Create a new semaphore object + fn create_semaphore(&self) -> Result; + + /// Destroy a semaphore object + unsafe fn destroy_semaphore(&self, semaphore: B::Semaphore); + + /// Create a new fence object + /// + /// Fences are a synchronization primitive that **can** be used to insert a dependency from + /// a queue to the host. Fences have two states - signaled and unsignaled. A fence **can** be + /// signaled as part of the execution of a *queue submission* command. Fences **can** be unsignaled + /// on the host with *reset_fences*. Fences **can** be waited on by the host with the + /// *wait_for_fences* command, and the current state **can** be queried with *get_fence_status*. + fn create_fence(&self, signaled: bool) -> Result; + + /// + unsafe fn reset_fence(&self, fence: &B::Fence) -> Result<(), OutOfMemory> { + self.reset_fences(iter::once(fence)) + } + + /// + unsafe fn reset_fences(&self, fences: I) -> Result<(), OutOfMemory> + where + I: IntoIterator, + I::Item: Borrow, + { + for fence in fences { + self.reset_fence(fence.borrow())?; + } + Ok(()) + } + + /// Blocks until the given fence is signaled. + /// Returns true if the fence was signaled before the timeout. + unsafe fn wait_for_fence( + &self, + fence: &B::Fence, + timeout_ns: u64, + ) -> Result { + self.wait_for_fences(iter::once(fence), WaitFor::All, timeout_ns) + } + + /// Blocks until all or one of the given fences are signaled. + /// Returns true if fences were signaled before the timeout. + unsafe fn wait_for_fences( + &self, + fences: I, + wait: WaitFor, + timeout_ns: u64, + ) -> Result + where + I: IntoIterator, + I::Item: Borrow, + { + use std::{thread, time}; + fn to_ns(duration: time::Duration) -> u64 { + duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64 + } + + let start = time::Instant::now(); + match wait { + WaitFor::All => { + for fence in fences { + if !self.wait_for_fence(fence.borrow(), 0)? { + let elapsed_ns = to_ns(start.elapsed()); + if elapsed_ns > timeout_ns { + return Ok(false); + } + if !self.wait_for_fence(fence.borrow(), timeout_ns - elapsed_ns)? { + return Ok(false); + } + } + } + Ok(true) + } + WaitFor::Any => { + let fences: Vec<_> = fences.into_iter().collect(); + loop { + for fence in &fences { + if self.wait_for_fence(fence.borrow(), 0)? { + return Ok(true); + } + } + if to_ns(start.elapsed()) >= timeout_ns { + return Ok(false); + } + thread::sleep(time::Duration::from_millis(1)); + } + } + } + } + + /// true for signaled, false for not ready + unsafe fn get_fence_status(&self, fence: &B::Fence) -> Result; + + /// Destroy a fence object + unsafe fn destroy_fence(&self, fence: B::Fence); + + /// Create an event object. + fn create_event(&self) -> Result; + + /// Destroy an event object. + unsafe fn destroy_event(&self, event: B::Event); + + /// Query the status of an event. + /// + /// Returns `true` if the event is set, or `false` if it is reset. + unsafe fn get_event_status(&self, event: &B::Event) -> Result; + + /// Sets an event. + unsafe fn set_event(&self, event: &B::Event) -> Result<(), OutOfMemory>; + + /// Resets an event. + unsafe fn reset_event(&self, event: &B::Event) -> Result<(), OutOfMemory>; + + /// Create a new query pool object + /// + /// Queries are managed using query pool objects. Each query pool is a collection of a specific + /// number of queries of a particular type. + unsafe fn create_query_pool( + &self, + ty: query::Type, + count: query::Id, + ) -> Result; + + /// Destroy a query pool object + unsafe fn destroy_query_pool(&self, pool: B::QueryPool); + + /// Get query pool results into the specified CPU memory. + /// Returns `Ok(false)` if the results are not ready yet and neither of `WAIT` or `PARTIAL` flags are set. + unsafe fn get_query_pool_results( + &self, + pool: &B::QueryPool, + queries: Range, + data: &mut [u8], + stride: buffer::Offset, + flags: query::ResultFlags, + ) -> Result; + + /// Create a new swapchain from a surface and a queue family, optionally providing the old + /// swapchain to aid in resource reuse and rendering continuity. + /// + /// *Note*: The number of exposed images in the back buffer might differ + /// from number of internally used buffers. + /// + /// # Safety + /// + /// The queue family _must_ support surface presentation. + /// This can be checked by calling [`supports_queue_family`](trait.Surface.html#tymethod.supports_queue_family) + /// on this surface. + /// + /// # Examples + /// + /// ```no_run + /// # extern crate gfx_backend_empty as empty; + /// # extern crate gfx_hal; + /// # fn main() { + /// use gfx_hal::{prelude::*, format::Format, window::SwapchainConfig}; + /// + /// # let mut surface: empty::Surface = return; + /// # let device: empty::Device = return; + /// # unsafe { + /// let swapchain_config = SwapchainConfig::new(100, 100, Format::Rgba8Srgb, 2); + /// device.create_swapchain(&mut surface, swapchain_config, None); + /// # }} + /// ``` + unsafe fn create_swapchain( + &self, + surface: &mut B::Surface, + config: SwapchainConfig, + old_swapchain: Option, + ) -> Result<(B::Swapchain, Vec), window::CreationError>; + + /// + unsafe fn destroy_swapchain(&self, swapchain: B::Swapchain); + + /// Wait for all queues associated with this device to idle. + /// + /// Host access to all queues needs to be **externally** sycnhronized! + fn wait_idle(&self) -> Result<(), OutOfMemory>; + + /// Associate a name with an image, for easier debugging in external tools or with validation + /// layers that can print a friendly name when referring to objects in error messages + unsafe fn set_image_name(&self, image: &mut B::Image, name: &str); + /// Associate a name with a buffer, for easier debugging in external tools or with validation + /// layers that can print a friendly name when referring to objects in error messages + unsafe fn set_buffer_name(&self, buffer: &mut B::Buffer, name: &str); + /// Associate a name with a command buffer, for easier debugging in external tools or with + /// validation layers that can print a friendly name when referring to objects in error messages + unsafe fn set_command_buffer_name(&self, command_buffer: &mut B::CommandBuffer, name: &str); + /// Associate a name with a semaphore, for easier debugging in external tools or with validation + /// layers that can print a friendly name when referring to objects in error messages + unsafe fn set_semaphore_name(&self, semaphore: &mut B::Semaphore, name: &str); + /// Associate a name with a fence, for easier debugging in external tools or with validation + /// layers that can print a friendly name when referring to objects in error messages + unsafe fn set_fence_name(&self, fence: &mut B::Fence, name: &str); + /// Associate a name with a framebuffer, for easier debugging in external tools or with + /// validation layers that can print a friendly name when referring to objects in error messages + unsafe fn set_framebuffer_name(&self, framebuffer: &mut B::Framebuffer, name: &str); + /// Associate a name with a render pass, for easier debugging in external tools or with + /// validation layers that can print a friendly name when referring to objects in error messages + unsafe fn set_render_pass_name(&self, render_pass: &mut B::RenderPass, name: &str); + /// Associate a name with a descriptor set, for easier debugging in external tools or with + /// validation layers that can print a friendly name when referring to objects in error messages + unsafe fn set_descriptor_set_name(&self, descriptor_set: &mut B::DescriptorSet, name: &str); + /// Associate a name with a descriptor set layout, for easier debugging in external tools or + /// with validation layers that can print a friendly name when referring to objects in error + /// messages + unsafe fn set_descriptor_set_layout_name( + &self, + descriptor_set_layout: &mut B::DescriptorSetLayout, + name: &str, + ); +} diff --git a/third_party/rust/gfx-hal/src/format.rs b/third_party/rust/gfx-hal/src/format.rs old mode 100755 new mode 100644 index f6478f15aa3e..4807e5ce802d --- a/third_party/rust/gfx-hal/src/format.rs +++ b/third_party/rust/gfx-hal/src/format.rs @@ -1,623 +1,624 @@ -//! Universal format specification. -//! Applicable to textures, views, and vertex buffers. -//! -//! For a more detailed description of all the specific format specifiers, -//! please see [the official Vulkan documentation](https://www.khronos.org/registry/vulkan/specs/1.0/man/html/VkFormat.html) -//! -//! `gfx-rs` splits a `Format` into two sub-components, a `SurfaceType` and -//! a `ChannelType`. The `SurfaceType` specifies how the large the channels are, -//! for instance `R32_G32_B32_A32`. The `ChannelType` specifies how the -//! components are interpreted, for instance `Sfloat` or `Sint`. - -bitflags!( - /// Bitflags which describe what properties of an image - /// a format specifies or does not specify. For example, - /// the `Rgba8Unorm` format only specifies a `COLOR` aspect, - /// while `D32SfloatS8Uint` specifies both a depth and stencil - /// aspect but no color. - #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] - pub struct Aspects: u8 { - /// Color aspect. - const COLOR = 0x1; - /// Depth aspect. - const DEPTH = 0x2; - /// Stencil aspect. - const STENCIL = 0x4; - } -); - -/// Description of a format. -#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -pub struct FormatDesc { - /// Total number of bits. - /// - /// * Depth/Stencil formats are opaque formats, where the total number of bits is unknown. - /// A dummy value is used for these formats instead (sum of depth and stencil bits). - /// For copy operations, the number of bits of the corresponding aspect should be used. - /// * The total number can be larger than the sum of individual format bits - /// (`color`, `alpha`, `depth` and `stencil`) for packed formats. - /// * For compressed formats, this denotes the number of bits per block. - pub bits: u16, - /// Dimensions (width, height) of the texel blocks. - pub dim: (u8, u8), - /// The format representation depends on the endianness of the platform. - /// - /// * On little-endian systems, the actual oreder of components is reverse of what - /// a surface type specifies. - pub packed: bool, - /// Format aspects - pub aspects: Aspects, -} - -impl FormatDesc { - /// Check if the format is compressed. - pub fn is_compressed(&self) -> bool { - self.dim != (1, 1) - } -} - -/// Description of the bits distribution of a format. -#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -pub struct FormatBits { - /// Number of color bits (summed for R/G/B). - /// - /// For compressed formats, this value is 0. - pub color: u8, - /// Number of alpha bits. - /// - /// For compressed formats, this value is 0. - pub alpha: u8, - /// Number of depth bits - pub depth: u8, - /// Number of stencil bits - pub stencil: u8, -} - -/// Format bits configuration with no bits assigned. -pub const BITS_ZERO: FormatBits = FormatBits { - color: 0, - alpha: 0, - depth: 0, - stencil: 0, -}; - -/// Source channel in a swizzle configuration. Some may redirect onto -/// different physical channels, some may be hardcoded to 0 or 1. -#[repr(u8)] -#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum Component { - /// Hardcoded zero - Zero, - /// Hardcoded one - One, - /// Red channel - R, - /// Green channel - G, - /// Blue channel - B, - /// Alpha channel. - A, -} - -/// Channel swizzle configuration for the resource views. -/// This specifies a "swizzle" operation which remaps the various -/// channels of a format into a different order. For example, -/// `Swizzle(Component::B, Component::G, Component::R, Component::A)` -/// will swap `RGBA` formats into `BGRA` formats and back. -/// -/// Note: It's not currently mirrored at compile-time, -/// thus providing less safety and convenience. -#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct Swizzle(pub Component, pub Component, pub Component, pub Component); - -impl Swizzle { - /// A trivially non-swizzling configuration; performs no changes. - pub const NO: Swizzle = Swizzle(Component::R, Component::G, Component::B, Component::A); -} - -impl Default for Swizzle { - fn default() -> Self { - Self::NO - } -} - -/// Format properties of the physical device. -#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Hash)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct Properties { - /// A bitmask of the features supported when an image with linear tiling is requested. - /// Linear tiling has a known layout in-memory so data can be copied to and from host - /// memory. - pub linear_tiling: ImageFeature, - /// A bitmask of the features supported when an image with optimal tiling is requested. - /// Optimal tiling is arranged however the GPU wants; its exact layout is undefined. - pub optimal_tiling: ImageFeature, - /// The features supported by buffers. - pub buffer_features: BufferFeature, -} - -bitflags!( - /// Image feature flags. - #[derive(Default)] - #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] - pub struct ImageFeature: u32 { - /// Image view can be sampled. - const SAMPLED = 0x1; - /// Image view can be used as storage image. - const STORAGE = 0x2; - /// Image view can be used as storage image (with atomics). - const STORAGE_ATOMIC = 0x4; - /// Image view can be used as color and input attachment. - const COLOR_ATTACHMENT = 0x80; - /// Image view can be used as color (with blending) and input attachment. - const COLOR_ATTACHMENT_BLEND = 0x100; - /// Image view can be used as depth-stencil and input attachment. - const DEPTH_STENCIL_ATTACHMENT = 0x200; - /// Image can be used as source for blit commands. - const BLIT_SRC = 0x400; - /// Image can be used as destination for blit commands. - const BLIT_DST = 0x800; - /// Image can be sampled with a (mipmap) linear sampler or as blit source - /// with linear sampling. - /// Requires `SAMPLED` or `BLIT_SRC` flag. - const SAMPLED_LINEAR = 0x1000; - } -); - -bitflags!( - /// Buffer feature flags. - #[derive(Default)] - #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] - pub struct BufferFeature: u32 { - /// Buffer view can be used as uniform texel buffer. - const UNIFORM_TEXEL = 0x8; - /// Buffer view can be used as storage texel buffer. - const STORAGE_TEXEL = 0x10; - /// Buffer view can be used as storage texel buffer (with atomics). - const STORAGE_TEXEL_ATOMIC = 0x20; - /// Image view can be used as vertex buffer. - const VERTEX = 0x40; - } -); - -/// Type of a surface channel. This is how we interpret the -/// storage allocated with `SurfaceType`. -#[repr(u8)] -#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum ChannelType { - /// Unsigned normalized. - Unorm, - /// Signed normalized. - Snorm, - /// Unsigned integer. - Uint, - /// Signed integer. - Sint, - /// Unsigned floating-point. - Ufloat, - /// Signed floating-point. - Sfloat, - /// Unsigned scaled integer. - Uscaled, - /// Signed scaled integer. - Sscaled, - /// Unsigned normalized, SRGB non-linear encoded. - Srgb, -} - -macro_rules! surface_types { - { $($name:ident { $total:expr, $($aspect:ident)|*, $dim:expr $( ,$component:ident : $bits:expr )*} ,)* } => { - /// Type of the allocated texture surface. It is supposed to only - /// carry information about the number of bits per each channel. - /// The actual types are up to the views to decide and interpret. - /// The actual components are up to the swizzle to define. - #[repr(u8)] - #[allow(missing_docs, non_camel_case_types)] - #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] - #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] - pub enum SurfaceType { - $( $name, )* - } - - impl SurfaceType { - /// Return the bits for this format. - pub fn describe_bits(&self) -> FormatBits { - match *self { - $( SurfaceType::$name => FormatBits { - $( $component: $bits, )* - .. BITS_ZERO - }, )* - } - } - - /// Return the format descriptor. - pub fn desc(&self) -> FormatDesc { - match *self { - $( SurfaceType::$name => FormatDesc { - bits: $total.min(!$total), - dim: $dim, - packed: $total > 0x1000, - aspects: $(Aspects::$aspect)|*, - }, )* - } - } - } - } -} - -// ident { num_bits, aspects, dim, (color, alpha, ..) } -// if the number of bits is given with exclamation (e.g. `!16`), the format is considered packed -surface_types! { - R4_G4 { !8, COLOR, (1, 1), color: 8 }, - R4_G4_B4_A4 { !16, COLOR, (1, 1), color: 12, alpha: 4 }, - B4_G4_R4_A4 { !16, COLOR, (1, 1), color: 12, alpha: 4 }, - R5_G6_B5 { !16, COLOR, (1, 1), color: 16 }, - B5_G6_R5 { !16, COLOR, (1, 1), color: 16 }, - R5_G5_B5_A1 { !16, COLOR, (1, 1), color: 15, alpha: 1 }, - B5_G5_R5_A1 { !16, COLOR, (1, 1), color: 15, alpha: 1 }, - A1_R5_G5_B5 { !16, COLOR, (1, 1), color: 15, alpha: 1 }, - R8 { 8, COLOR, (1, 1), color: 8 }, - R8_G8 { 16, COLOR, (1, 1), color: 16 }, - R8_G8_B8 { 24, COLOR, (1, 1), color: 24 }, - B8_G8_R8 { 24, COLOR, (1, 1), color: 24 }, - R8_G8_B8_A8 { 32, COLOR, (1, 1), color: 24, alpha: 8 }, - B8_G8_R8_A8 { 32, COLOR, (1, 1), color: 24, alpha: 8 }, - A8_B8_G8_R8 { !32, COLOR, (1, 1), color: 24, alpha: 8 }, - A2_R10_G10_B10 { !32, COLOR, (1, 1), color: 30, alpha: 2 }, - A2_B10_G10_R10 { !32, COLOR, (1, 1), color: 30, alpha: 2 }, - R16 { 16, COLOR, (1, 1), color: 16 }, - R16_G16 { 32, COLOR, (1, 1), color: 32 }, - R16_G16_B16 { 48, COLOR, (1, 1), color: 48 }, - R16_G16_B16_A16 { 64, COLOR, (1, 1), color: 48, alpha: 16 }, - R32 { 32, COLOR, (1, 1), color: 32 }, - R32_G32 { 64, COLOR, (1, 1), color: 64 }, - R32_G32_B32 { 96, COLOR, (1, 1), color: 96 }, - R32_G32_B32_A32 { 128, COLOR, (1, 1), color: 96, alpha: 32 }, - R64 { 64, COLOR, (1, 1), color: 64 }, - R64_G64 { 128, COLOR, (1, 1), color: 128 }, - R64_G64_B64 { 192, COLOR, (1, 1), color: 192 }, - R64_G64_B64_A64 { 256, COLOR, (1, 1), color: 192, alpha: 64 }, - B10_G11_R11 { !32, COLOR, (1, 1), color: 32 }, - E5_B9_G9_R9 { !32, COLOR, (1, 1), color: 27 }, - D16 { 16, DEPTH, (1, 1), depth: 16 }, - X8D24 { !32, DEPTH, (1, 1), depth: 24 }, - D32 { 32, DEPTH, (1, 1), depth: 32 }, - S8 { 8, STENCIL, (1, 1), stencil: 8 }, - D16_S8 { 24, DEPTH | STENCIL, (1, 1), depth: 16, stencil: 8 }, - D24_S8 { 32, DEPTH | STENCIL, (1, 1), depth: 24, stencil: 8 }, - D32_S8 { 40, DEPTH | STENCIL, (1, 1), depth: 32, stencil: 8 }, - BC1_RGB { 64, COLOR, (4, 4) }, - BC1_RGBA { 64, COLOR, (4, 4) }, - BC2 { 128, COLOR, (4, 4) }, - BC3 { 128, COLOR, (4, 4) }, - BC4 { 64, COLOR, (4, 4) }, - BC5 { 128, COLOR, (4, 4) }, - BC6 { 128, COLOR, (4, 4) }, - BC7 { 128, COLOR, (4, 4) }, - ETC2_R8_G8_B8 { 64, COLOR, (4, 4) }, - ETC2_R8_G8_B8_A1 { 64, COLOR, (4, 4) }, - ETC2_R8_G8_B8_A8 { 128, COLOR, (4, 4) }, - EAC_R11 { 64, COLOR, (4, 4) }, - EAC_R11_G11 { 128, COLOR, (4, 4) }, - ASTC_4x4 { 128, COLOR, (4, 4) }, - ASTC_5x4 { 128, COLOR, (5, 4) }, - ASTC_5x5 { 128, COLOR, (5, 5) }, - ASTC_6x5 { 128, COLOR, (6, 5) }, - ASTC_6x6 { 128, COLOR, (6, 6) }, - ASTC_8x5 { 128, COLOR, (8, 5) }, - ASTC_8x6 { 128, COLOR, (8, 6) }, - ASTC_8x8 { 128, COLOR, (8, 8) }, - ASTC_10x5 { 128, COLOR, (10, 5) }, - ASTC_10x6 { 128, COLOR, (10, 6) }, - ASTC_10x8 { 128, COLOR, (10, 8) }, - ASTC_10x10 { 128, COLOR, (10, 10) }, - ASTC_12x10 { 128, COLOR, (12, 10) }, - ASTC_12x12 { 128, COLOR, (12, 12) }, -} - -/// Generic run-time base format. -#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct BaseFormat(pub SurfaceType, pub ChannelType); - -/// Conversion trait into `Format`; -pub trait AsFormat { - /// Associated format. - const SELF: Format; -} - -macro_rules! formats { - { - $name:ident = ($surface:ident, $channel:ident), - $($name_tail:ident = ($surface_tail:ident, $channel_tail:ident),)* - } => { - /// A format descriptor that describes the channels present in a - /// texture or view, how they are laid out, what size they are, - /// and how the elements of the channels are interpreted (integer, - /// float, etc.) - #[allow(missing_docs)] - #[repr(u32)] - #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] - #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] - pub enum Format { - $name = 1, - $( $name_tail, )* - - // This serves as safety net for conversion from Vulkan -> HAL, - // in case Vulkan adds new formats: - // 1. We can check if a format is out of range - // 2. We 'ensure' that backend implementations do non-exhaustive matching - #[doc(hidden)] - __NumFormats, - } - - /// Number of formats. - pub const NUM_FORMATS: usize = Format::__NumFormats as _; - - /// Conversion table from `Format` to `BaseFormat`, excluding `Undefined`. - pub const BASE_FORMATS: [BaseFormat; NUM_FORMATS-1] = [ - BaseFormat(SurfaceType::$surface, ChannelType::$channel), - $(BaseFormat(SurfaceType::$surface_tail, ChannelType::$channel_tail), )* - ]; - - /// A struct equivalent to the matching `Format` enum member, which allows - /// an API to be strongly typed on particular formats. - #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] - #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] - pub struct $name; - - impl AsFormat for $name { - const SELF: Format = Format::$name; - } - - $( - /// A struct equivalent to the matching `Format` enum member, which allows - /// an API to be strongly typed on particular formats. - #[allow(missing_docs)] - #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] - #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] - pub struct $name_tail; - - impl AsFormat for $name_tail { - const SELF: Format = Format::$name_tail; - } - - )* - } -} - -// Format order has to match the order exposed by the Vulkan API. -formats! { - Rg4Unorm = (R4_G4, Unorm), - Rgba4Unorm = (R4_G4_B4_A4, Unorm), - Bgra4Unorm = (B4_G4_R4_A4, Unorm), - R5g6b5Unorm = (R5_G6_B5, Unorm), - B5g6r5Unorm = (B5_G6_R5, Unorm), - R5g5b5a1Unorm = (R5_G5_B5_A1, Unorm), - B5g5r5a1Unorm = (B5_G5_R5_A1, Unorm), - A1r5g5b5Unorm = (A1_R5_G5_B5, Unorm), - R8Unorm = (R8, Unorm), - R8Snorm = (R8, Snorm), - R8Uscaled = (R8, Uscaled), - R8Sscaled = (R8, Sscaled), - R8Uint = (R8, Uint), - R8Sint = (R8, Sint), - R8Srgb = (R8, Srgb), - Rg8Unorm = (R8_G8, Unorm), - Rg8Snorm = (R8_G8, Snorm), - Rg8Uscaled = (R8_G8, Uscaled), - Rg8Sscaled = (R8_G8, Sscaled), - Rg8Uint = (R8_G8, Uint), - Rg8Sint = (R8_G8, Sint), - Rg8Srgb = (R8_G8, Srgb), - Rgb8Unorm = (R8_G8_B8, Unorm), - Rgb8Snorm = (R8_G8_B8, Snorm), - Rgb8Uscaled = (R8_G8_B8, Uscaled), - Rgb8Sscaled = (R8_G8_B8, Sscaled), - Rgb8Uint = (R8_G8_B8, Uint), - Rgb8Sint = (R8_G8_B8, Sint), - Rgb8Srgb = (R8_G8_B8, Srgb), - Bgr8Unorm = (B8_G8_R8, Unorm), - Bgr8Snorm = (B8_G8_R8, Snorm), - Bgr8Uscaled = (B8_G8_R8, Uscaled), - Bgr8Sscaled = (B8_G8_R8, Sscaled), - Bgr8Uint = (B8_G8_R8, Uint), - Bgr8Sint = (B8_G8_R8, Sint), - Bgr8Srgb = (B8_G8_R8, Srgb), - Rgba8Unorm = (R8_G8_B8_A8, Unorm), - Rgba8Snorm = (R8_G8_B8_A8, Snorm), - Rgba8Uscaled = (R8_G8_B8_A8, Uscaled), - Rgba8Sscaled = (R8_G8_B8_A8, Sscaled), - Rgba8Uint = (R8_G8_B8_A8, Uint), - Rgba8Sint = (R8_G8_B8_A8, Sint), - Rgba8Srgb = (R8_G8_B8_A8, Srgb), - Bgra8Unorm = (B8_G8_R8_A8, Unorm), - Bgra8Snorm = (B8_G8_R8_A8, Snorm), - Bgra8Uscaled = (B8_G8_R8_A8, Uscaled), - Bgra8Sscaled = (B8_G8_R8_A8, Sscaled), - Bgra8Uint = (B8_G8_R8_A8, Uint), - Bgra8Sint = (B8_G8_R8_A8, Sint), - Bgra8Srgb = (B8_G8_R8_A8, Srgb), - Abgr8Unorm = (A8_B8_G8_R8, Unorm), - Abgr8Snorm = (A8_B8_G8_R8, Snorm), - Abgr8Uscaled = (A8_B8_G8_R8, Uscaled), - Abgr8Sscaled = (A8_B8_G8_R8, Sscaled), - Abgr8Uint = (A8_B8_G8_R8, Uint), - Abgr8Sint = (A8_B8_G8_R8, Sint), - Abgr8Srgb = (A8_B8_G8_R8, Srgb), - A2r10g10b10Unorm = (A2_R10_G10_B10, Unorm), - A2r10g10b10Snorm = (A2_R10_G10_B10, Snorm), - A2r10g10b10Uscaled = (A2_R10_G10_B10, Uscaled), - A2r10g10b10Sscaled = (A2_R10_G10_B10, Sscaled), - A2r10g10b10Uint = (A2_R10_G10_B10, Uint), - A2r10g10b10Sint = (A2_R10_G10_B10, Sint), - A2b10g10r10Unorm = (A2_B10_G10_R10, Unorm), - A2b10g10r10Snorm = (A2_B10_G10_R10, Snorm), - A2b10g10r10Uscaled = (A2_B10_G10_R10, Uscaled), - A2b10g10r10Sscaled = (A2_B10_G10_R10, Sscaled), - A2b10g10r10Uint = (A2_B10_G10_R10, Uint), - A2b10g10r10Sint = (A2_B10_G10_R10, Sint), - R16Unorm = (R16, Unorm), - R16Snorm = (R16, Snorm), - R16Uscaled = (R16, Uscaled), - R16Sscaled = (R16, Sscaled), - R16Uint = (R16, Uint), - R16Sint = (R16, Sint), - R16Sfloat = (R16, Sfloat), - Rg16Unorm = (R16_G16, Unorm), - Rg16Snorm = (R16_G16, Snorm), - Rg16Uscaled = (R16_G16, Uscaled), - Rg16Sscaled = (R16_G16, Sscaled), - Rg16Uint = (R16_G16, Uint), - Rg16Sint = (R16_G16, Sint), - Rg16Sfloat = (R16_G16, Sfloat), - Rgb16Unorm = (R16_G16_B16, Unorm), - Rgb16Snorm = (R16_G16_B16, Snorm), - Rgb16Uscaled = (R16_G16_B16, Uscaled), - Rgb16Sscaled = (R16_G16_B16, Sscaled), - Rgb16Uint = (R16_G16_B16, Uint), - Rgb16Sint = (R16_G16_B16, Sint), - Rgb16Sfloat = (R16_G16_B16, Sfloat), - Rgba16Unorm = (R16_G16_B16_A16, Unorm), - Rgba16Snorm = (R16_G16_B16_A16, Snorm), - Rgba16Uscaled = (R16_G16_B16_A16, Uscaled), - Rgba16Sscaled = (R16_G16_B16_A16, Sscaled), - Rgba16Uint = (R16_G16_B16_A16, Uint), - Rgba16Sint = (R16_G16_B16_A16, Sint), - Rgba16Sfloat = (R16_G16_B16_A16, Sfloat), - R32Uint = (R32, Uint), - R32Sint = (R32, Sint), - R32Sfloat = (R32, Sfloat), - Rg32Uint = (R32_G32, Uint), - Rg32Sint = (R32_G32, Sint), - Rg32Sfloat = (R32_G32, Sfloat), - Rgb32Uint = (R32_G32_B32, Uint), - Rgb32Sint = (R32_G32_B32, Sint), - Rgb32Sfloat = (R32_G32_B32, Sfloat), - Rgba32Uint = (R32_G32_B32_A32, Uint), - Rgba32Sint = (R32_G32_B32_A32, Sint), - Rgba32Sfloat = (R32_G32_B32_A32, Sfloat), - R64Uint = (R64, Uint), - R64Sint = (R64, Sint), - R64Sfloat = (R64, Sfloat), - Rg64Uint = (R64_G64, Uint), - Rg64Sint = (R64_G64, Sint), - Rg64Sfloat = (R64_G64, Sfloat), - Rgb64Uint = (R64_G64_B64, Uint), - Rgb64Sint = (R64_G64_B64, Sint), - Rgb64Sfloat = (R64_G64_B64, Sfloat), - Rgba64Uint = (R64_G64_B64_A64, Uint), - Rgba64Sint = (R64_G64_B64_A64, Sint), - Rgba64Sfloat = (R64_G64_B64_A64, Sfloat), - B10g11r11Ufloat = (B10_G11_R11, Ufloat), - E5b9g9r9Ufloat = (E5_B9_G9_R9, Ufloat), - D16Unorm = (D16, Unorm), - X8D24Unorm = (X8D24, Unorm), - D32Sfloat = (D32, Sfloat), - S8Uint = (S8, Uint), - D16UnormS8Uint = (D16_S8, Unorm), - D24UnormS8Uint = (D24_S8, Unorm), - D32SfloatS8Uint = (D32_S8, Sfloat), - Bc1RgbUnorm = (BC1_RGB, Unorm), - Bc1RgbSrgb = (BC1_RGB, Srgb), - Bc1RgbaUnorm = (BC1_RGBA, Unorm), - Bc1RgbaSrgb = (BC1_RGBA, Srgb), - Bc2Unorm = (BC2, Unorm), - Bc2Srgb = (BC2, Srgb), - Bc3Unorm = (BC3, Unorm), - Bc3Srgb = (BC3, Srgb), - Bc4Unorm = (BC4, Unorm), - Bc4Snorm = (BC4, Snorm), - Bc5Unorm = (BC5, Unorm), - Bc5Snorm = (BC5, Snorm), - Bc6hUfloat = (BC6, Ufloat), - Bc6hSfloat = (BC6, Sfloat), - Bc7Unorm = (BC7, Unorm), - Bc7Srgb = (BC7, Srgb), - Etc2R8g8b8Unorm = (ETC2_R8_G8_B8, Unorm), - Etc2R8g8b8Srgb = (ETC2_R8_G8_B8, Srgb), - Etc2R8g8b8a1Unorm = (ETC2_R8_G8_B8_A1, Unorm), - Etc2R8g8b8a1Srgb = (ETC2_R8_G8_B8_A1, Srgb), - Etc2R8g8b8a8Unorm = (ETC2_R8_G8_B8_A8, Unorm), - Etc2R8g8b8a8Srgb = (ETC2_R8_G8_B8_A8, Srgb), - EacR11Unorm = (EAC_R11, Unorm), - EacR11Snorm = (EAC_R11, Snorm), - EacR11g11Unorm = (EAC_R11_G11, Unorm), - EacR11g11Snorm = (EAC_R11_G11, Snorm), - Astc4x4Unorm = (ASTC_4x4, Unorm), - Astc4x4Srgb = (ASTC_4x4, Srgb), - Astc5x4Unorm = (ASTC_5x4, Unorm), - Astc5x4Srgb = (ASTC_5x4, Srgb), - Astc5x5Unorm = (ASTC_5x5, Unorm), - Astc5x5Srgb = (ASTC_5x5, Srgb), - Astc6x5Unorm = (ASTC_6x5, Unorm), - Astc6x5Srgb = (ASTC_6x5, Srgb), - Astc6x6Unorm = (ASTC_6x6, Unorm), - Astc6x6Srgb = (ASTC_6x6, Srgb), - Astc8x5Unorm = (ASTC_8x5, Unorm), - Astc8x5Srgb = (ASTC_8x5, Srgb), - Astc8x6Unorm = (ASTC_8x6, Unorm), - Astc8x6Srgb = (ASTC_8x6, Srgb), - Astc8x8Unorm = (ASTC_8x8, Unorm), - Astc8x8Srgb = (ASTC_8x8, Srgb), - Astc10x5Unorm = (ASTC_10x5, Unorm), - Astc10x5Srgb = (ASTC_10x5, Srgb), - Astc10x6Unorm = (ASTC_10x6, Unorm), - Astc10x6Srgb = (ASTC_10x6, Srgb), - Astc10x8Unorm = (ASTC_10x8, Unorm), - Astc10x8Srgb = (ASTC_10x8, Srgb), - Astc10x10Unorm = (ASTC_10x10, Unorm), - Astc10x10Srgb = (ASTC_10x10, Srgb), - Astc12x10Unorm = (ASTC_12x10, Unorm), - Astc12x10Srgb = (ASTC_12x10, Srgb), - Astc12x12Unorm = (ASTC_12x12, Unorm), - Astc12x12Srgb = (ASTC_12x12, Srgb), -} - -impl Format { - /// Get base format. - /// - /// Returns `None` if format is `Undefined`. - pub fn base_format(self) -> BaseFormat { - assert!(self as usize != 0 && NUM_FORMATS > self as usize); - BASE_FORMATS[self as usize - 1] - } - - /// A shortcut to obtain surface format description. - pub fn surface_desc(&self) -> FormatDesc { - self.base_format().0.desc() - } - - /// Returns if the format has a color aspect. - pub fn is_color(self) -> bool { - self.surface_desc().aspects.contains(Aspects::COLOR) - } - - /// Returns if the format has a depth aspect. - pub fn is_depth(self) -> bool { - self.surface_desc().aspects.contains(Aspects::DEPTH) - } - - /// Returns if the format has a stencil aspect. - pub fn is_stencil(self) -> bool { - self.surface_desc().aspects.contains(Aspects::STENCIL) - } -} - -// Common vertex attribute formats -impl AsFormat for f32 { - const SELF: Format = Format::R32Sfloat; -} -impl AsFormat for [f32; 2] { - const SELF: Format = Format::Rg32Sfloat; -} -impl AsFormat for [f32; 3] { - const SELF: Format = Format::Rgb32Sfloat; -} -impl AsFormat for [f32; 4] { - const SELF: Format = Format::Rgba32Sfloat; -} +//! Universal format specification. +//! Applicable to textures, views, and vertex buffers. +//! +//! For a more detailed description of all the specific format specifiers, +//! please see [the official Vulkan documentation](https://www.khronos.org/registry/vulkan/specs/1.0/man/html/VkFormat.html) +//! +//! `gfx-rs` splits a `Format` into two sub-components, a `SurfaceType` and +//! a `ChannelType`. The `SurfaceType` specifies how the large the channels are, +//! for instance `R32_G32_B32_A32`. The `ChannelType` specifies how the +//! components are interpreted, for instance `Sfloat` or `Sint`. + +bitflags!( + /// Bitflags which describe what properties of an image + /// a format specifies or does not specify. For example, + /// the `Rgba8Unorm` format only specifies a `COLOR` aspect, + /// while `D32SfloatS8Uint` specifies both a depth and stencil + /// aspect but no color. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct Aspects: u8 { + /// Color aspect. + const COLOR = 0x1; + /// Depth aspect. + const DEPTH = 0x2; + /// Stencil aspect. + const STENCIL = 0x4; + } +); + +/// Description of a format. +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct FormatDesc { + /// Total number of bits. + /// + /// * Depth/Stencil formats are opaque formats, where the total number of bits is unknown. + /// A dummy value is used for these formats instead (sum of depth and stencil bits). + /// For copy operations, the number of bits of the corresponding aspect should be used. + /// * The total number can be larger than the sum of individual format bits + /// (`color`, `alpha`, `depth` and `stencil`) for packed formats. + /// * For compressed formats, this denotes the number of bits per block. + pub bits: u16, + /// Dimensions (width, height) of the texel blocks. + pub dim: (u8, u8), + /// The format representation depends on the endianness of the platform. + /// + /// * On little-endian systems, the actual oreder of components is reverse of what + /// a surface type specifies. + pub packed: bool, + /// Format aspects + pub aspects: Aspects, +} + +impl FormatDesc { + /// Check if the format is compressed. + pub fn is_compressed(&self) -> bool { + self.dim != (1, 1) + } +} + +/// Description of the bits distribution of a format. +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct FormatBits { + /// Number of color bits (summed for R/G/B). + /// + /// For compressed formats, this value is 0. + pub color: u8, + /// Number of alpha bits. + /// + /// For compressed formats, this value is 0. + pub alpha: u8, + /// Number of depth bits + pub depth: u8, + /// Number of stencil bits + pub stencil: u8, +} + +/// Format bits configuration with no bits assigned. +pub const BITS_ZERO: FormatBits = FormatBits { + color: 0, + alpha: 0, + depth: 0, + stencil: 0, +}; + +/// Source channel in a swizzle configuration. Some may redirect onto +/// different physical channels, some may be hardcoded to 0 or 1. +#[repr(u8)] +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum Component { + //TODO: add `Identity = 0`? + /// Hardcoded zero + Zero = 1, + /// Hardcoded one + One = 2, + /// Red channel + R = 3, + /// Green channel + G = 4, + /// Blue channel + B = 5, + /// Alpha channel. + A = 6, +} + +/// Channel swizzle configuration for the resource views. +/// This specifies a "swizzle" operation which remaps the various +/// channels of a format into a different order. For example, +/// `Swizzle(Component::B, Component::G, Component::R, Component::A)` +/// will swap `RGBA` formats into `BGRA` formats and back. +/// +/// Note: It's not currently mirrored at compile-time, +/// thus providing less safety and convenience. +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Swizzle(pub Component, pub Component, pub Component, pub Component); + +impl Swizzle { + /// A trivially non-swizzling configuration; performs no changes. + pub const NO: Swizzle = Swizzle(Component::R, Component::G, Component::B, Component::A); +} + +impl Default for Swizzle { + fn default() -> Self { + Self::NO + } +} + +/// Format properties of the physical device. +#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Properties { + /// A bitmask of the features supported when an image with linear tiling is requested. + /// Linear tiling has a known layout in-memory so data can be copied to and from host + /// memory. + pub linear_tiling: ImageFeature, + /// A bitmask of the features supported when an image with optimal tiling is requested. + /// Optimal tiling is arranged however the GPU wants; its exact layout is undefined. + pub optimal_tiling: ImageFeature, + /// The features supported by buffers. + pub buffer_features: BufferFeature, +} + +bitflags!( + /// Image feature flags. + #[derive(Default)] + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct ImageFeature: u32 { + /// Image view can be sampled. + const SAMPLED = 0x1; + /// Image view can be used as storage image. + const STORAGE = 0x2; + /// Image view can be used as storage image (with atomics). + const STORAGE_ATOMIC = 0x4; + /// Image view can be used as color and input attachment. + const COLOR_ATTACHMENT = 0x80; + /// Image view can be used as color (with blending) and input attachment. + const COLOR_ATTACHMENT_BLEND = 0x100; + /// Image view can be used as depth-stencil and input attachment. + const DEPTH_STENCIL_ATTACHMENT = 0x200; + /// Image can be used as source for blit commands. + const BLIT_SRC = 0x400; + /// Image can be used as destination for blit commands. + const BLIT_DST = 0x800; + /// Image can be sampled with a (mipmap) linear sampler or as blit source + /// with linear sampling. + /// Requires `SAMPLED` or `BLIT_SRC` flag. + const SAMPLED_LINEAR = 0x1000; + } +); + +bitflags!( + /// Buffer feature flags. + #[derive(Default)] + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct BufferFeature: u32 { + /// Buffer view can be used as uniform texel buffer. + const UNIFORM_TEXEL = 0x8; + /// Buffer view can be used as storage texel buffer. + const STORAGE_TEXEL = 0x10; + /// Buffer view can be used as storage texel buffer (with atomics). + const STORAGE_TEXEL_ATOMIC = 0x20; + /// Image view can be used as vertex buffer. + const VERTEX = 0x40; + } +); + +/// Type of a surface channel. This is how we interpret the +/// storage allocated with `SurfaceType`. +#[repr(u8)] +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum ChannelType { + /// Unsigned normalized. + Unorm, + /// Signed normalized. + Snorm, + /// Unsigned integer. + Uint, + /// Signed integer. + Sint, + /// Unsigned floating-point. + Ufloat, + /// Signed floating-point. + Sfloat, + /// Unsigned scaled integer. + Uscaled, + /// Signed scaled integer. + Sscaled, + /// Unsigned normalized, SRGB non-linear encoded. + Srgb, +} + +macro_rules! surface_types { + { $($name:ident { $total:expr, $($aspect:ident)|*, $dim:expr $( ,$component:ident : $bits:expr )*} ,)* } => { + /// Type of the allocated texture surface. It is supposed to only + /// carry information about the number of bits per each channel. + /// The actual types are up to the views to decide and interpret. + /// The actual components are up to the swizzle to define. + #[repr(u8)] + #[allow(missing_docs, non_camel_case_types)] + #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub enum SurfaceType { + $( $name, )* + } + + impl SurfaceType { + /// Return the bits for this format. + pub fn describe_bits(&self) -> FormatBits { + match *self { + $( SurfaceType::$name => FormatBits { + $( $component: $bits, )* + .. BITS_ZERO + }, )* + } + } + + /// Return the format descriptor. + pub fn desc(&self) -> FormatDesc { + match *self { + $( SurfaceType::$name => FormatDesc { + bits: $total.min(!$total), + dim: $dim, + packed: $total > 0x1000, + aspects: $(Aspects::$aspect)|*, + }, )* + } + } + } + } +} + +// ident { num_bits, aspects, dim, (color, alpha, ..) } +// if the number of bits is given with exclamation (e.g. `!16`), the format is considered packed +surface_types! { + R4_G4 { !8, COLOR, (1, 1), color: 8 }, + R4_G4_B4_A4 { !16, COLOR, (1, 1), color: 12, alpha: 4 }, + B4_G4_R4_A4 { !16, COLOR, (1, 1), color: 12, alpha: 4 }, + R5_G6_B5 { !16, COLOR, (1, 1), color: 16 }, + B5_G6_R5 { !16, COLOR, (1, 1), color: 16 }, + R5_G5_B5_A1 { !16, COLOR, (1, 1), color: 15, alpha: 1 }, + B5_G5_R5_A1 { !16, COLOR, (1, 1), color: 15, alpha: 1 }, + A1_R5_G5_B5 { !16, COLOR, (1, 1), color: 15, alpha: 1 }, + R8 { 8, COLOR, (1, 1), color: 8 }, + R8_G8 { 16, COLOR, (1, 1), color: 16 }, + R8_G8_B8 { 24, COLOR, (1, 1), color: 24 }, + B8_G8_R8 { 24, COLOR, (1, 1), color: 24 }, + R8_G8_B8_A8 { 32, COLOR, (1, 1), color: 24, alpha: 8 }, + B8_G8_R8_A8 { 32, COLOR, (1, 1), color: 24, alpha: 8 }, + A8_B8_G8_R8 { !32, COLOR, (1, 1), color: 24, alpha: 8 }, + A2_R10_G10_B10 { !32, COLOR, (1, 1), color: 30, alpha: 2 }, + A2_B10_G10_R10 { !32, COLOR, (1, 1), color: 30, alpha: 2 }, + R16 { 16, COLOR, (1, 1), color: 16 }, + R16_G16 { 32, COLOR, (1, 1), color: 32 }, + R16_G16_B16 { 48, COLOR, (1, 1), color: 48 }, + R16_G16_B16_A16 { 64, COLOR, (1, 1), color: 48, alpha: 16 }, + R32 { 32, COLOR, (1, 1), color: 32 }, + R32_G32 { 64, COLOR, (1, 1), color: 64 }, + R32_G32_B32 { 96, COLOR, (1, 1), color: 96 }, + R32_G32_B32_A32 { 128, COLOR, (1, 1), color: 96, alpha: 32 }, + R64 { 64, COLOR, (1, 1), color: 64 }, + R64_G64 { 128, COLOR, (1, 1), color: 128 }, + R64_G64_B64 { 192, COLOR, (1, 1), color: 192 }, + R64_G64_B64_A64 { 256, COLOR, (1, 1), color: 192, alpha: 64 }, + B10_G11_R11 { !32, COLOR, (1, 1), color: 32 }, + E5_B9_G9_R9 { !32, COLOR, (1, 1), color: 27 }, + D16 { 16, DEPTH, (1, 1), depth: 16 }, + X8D24 { !32, DEPTH, (1, 1), depth: 24 }, + D32 { 32, DEPTH, (1, 1), depth: 32 }, + S8 { 8, STENCIL, (1, 1), stencil: 8 }, + D16_S8 { 24, DEPTH | STENCIL, (1, 1), depth: 16, stencil: 8 }, + D24_S8 { 32, DEPTH | STENCIL, (1, 1), depth: 24, stencil: 8 }, + D32_S8 { 40, DEPTH | STENCIL, (1, 1), depth: 32, stencil: 8 }, + BC1_RGB { 64, COLOR, (4, 4) }, + BC1_RGBA { 64, COLOR, (4, 4) }, + BC2 { 128, COLOR, (4, 4) }, + BC3 { 128, COLOR, (4, 4) }, + BC4 { 64, COLOR, (4, 4) }, + BC5 { 128, COLOR, (4, 4) }, + BC6 { 128, COLOR, (4, 4) }, + BC7 { 128, COLOR, (4, 4) }, + ETC2_R8_G8_B8 { 64, COLOR, (4, 4) }, + ETC2_R8_G8_B8_A1 { 64, COLOR, (4, 4) }, + ETC2_R8_G8_B8_A8 { 128, COLOR, (4, 4) }, + EAC_R11 { 64, COLOR, (4, 4) }, + EAC_R11_G11 { 128, COLOR, (4, 4) }, + ASTC_4x4 { 128, COLOR, (4, 4) }, + ASTC_5x4 { 128, COLOR, (5, 4) }, + ASTC_5x5 { 128, COLOR, (5, 5) }, + ASTC_6x5 { 128, COLOR, (6, 5) }, + ASTC_6x6 { 128, COLOR, (6, 6) }, + ASTC_8x5 { 128, COLOR, (8, 5) }, + ASTC_8x6 { 128, COLOR, (8, 6) }, + ASTC_8x8 { 128, COLOR, (8, 8) }, + ASTC_10x5 { 128, COLOR, (10, 5) }, + ASTC_10x6 { 128, COLOR, (10, 6) }, + ASTC_10x8 { 128, COLOR, (10, 8) }, + ASTC_10x10 { 128, COLOR, (10, 10) }, + ASTC_12x10 { 128, COLOR, (12, 10) }, + ASTC_12x12 { 128, COLOR, (12, 12) }, +} + +/// Generic run-time base format. +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct BaseFormat(pub SurfaceType, pub ChannelType); + +/// Conversion trait into `Format`; +pub trait AsFormat { + /// Associated format. + const SELF: Format; +} + +macro_rules! formats { + { + $name:ident = ($surface:ident, $channel:ident), + $($name_tail:ident = ($surface_tail:ident, $channel_tail:ident),)* + } => { + /// A format descriptor that describes the channels present in a + /// texture or view, how they are laid out, what size they are, + /// and how the elements of the channels are interpreted (integer, + /// float, etc.) + #[allow(missing_docs)] + #[repr(u32)] + #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub enum Format { + $name = 1, + $( $name_tail, )* + + // This serves as safety net for conversion from Vulkan -> HAL, + // in case Vulkan adds new formats: + // 1. We can check if a format is out of range + // 2. We 'ensure' that backend implementations do non-exhaustive matching + #[doc(hidden)] + __NumFormats, + } + + /// Number of formats. + pub const NUM_FORMATS: usize = Format::__NumFormats as _; + + /// Conversion table from `Format` to `BaseFormat`, excluding `Undefined`. + pub const BASE_FORMATS: [BaseFormat; NUM_FORMATS-1] = [ + BaseFormat(SurfaceType::$surface, ChannelType::$channel), + $(BaseFormat(SurfaceType::$surface_tail, ChannelType::$channel_tail), )* + ]; + + /// A struct equivalent to the matching `Format` enum member, which allows + /// an API to be strongly typed on particular formats. + #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct $name; + + impl AsFormat for $name { + const SELF: Format = Format::$name; + } + + $( + /// A struct equivalent to the matching `Format` enum member, which allows + /// an API to be strongly typed on particular formats. + #[allow(missing_docs)] + #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct $name_tail; + + impl AsFormat for $name_tail { + const SELF: Format = Format::$name_tail; + } + + )* + } +} + +// Format order has to match the order exposed by the Vulkan API. +formats! { + Rg4Unorm = (R4_G4, Unorm), + Rgba4Unorm = (R4_G4_B4_A4, Unorm), + Bgra4Unorm = (B4_G4_R4_A4, Unorm), + R5g6b5Unorm = (R5_G6_B5, Unorm), + B5g6r5Unorm = (B5_G6_R5, Unorm), + R5g5b5a1Unorm = (R5_G5_B5_A1, Unorm), + B5g5r5a1Unorm = (B5_G5_R5_A1, Unorm), + A1r5g5b5Unorm = (A1_R5_G5_B5, Unorm), + R8Unorm = (R8, Unorm), + R8Snorm = (R8, Snorm), + R8Uscaled = (R8, Uscaled), + R8Sscaled = (R8, Sscaled), + R8Uint = (R8, Uint), + R8Sint = (R8, Sint), + R8Srgb = (R8, Srgb), + Rg8Unorm = (R8_G8, Unorm), + Rg8Snorm = (R8_G8, Snorm), + Rg8Uscaled = (R8_G8, Uscaled), + Rg8Sscaled = (R8_G8, Sscaled), + Rg8Uint = (R8_G8, Uint), + Rg8Sint = (R8_G8, Sint), + Rg8Srgb = (R8_G8, Srgb), + Rgb8Unorm = (R8_G8_B8, Unorm), + Rgb8Snorm = (R8_G8_B8, Snorm), + Rgb8Uscaled = (R8_G8_B8, Uscaled), + Rgb8Sscaled = (R8_G8_B8, Sscaled), + Rgb8Uint = (R8_G8_B8, Uint), + Rgb8Sint = (R8_G8_B8, Sint), + Rgb8Srgb = (R8_G8_B8, Srgb), + Bgr8Unorm = (B8_G8_R8, Unorm), + Bgr8Snorm = (B8_G8_R8, Snorm), + Bgr8Uscaled = (B8_G8_R8, Uscaled), + Bgr8Sscaled = (B8_G8_R8, Sscaled), + Bgr8Uint = (B8_G8_R8, Uint), + Bgr8Sint = (B8_G8_R8, Sint), + Bgr8Srgb = (B8_G8_R8, Srgb), + Rgba8Unorm = (R8_G8_B8_A8, Unorm), + Rgba8Snorm = (R8_G8_B8_A8, Snorm), + Rgba8Uscaled = (R8_G8_B8_A8, Uscaled), + Rgba8Sscaled = (R8_G8_B8_A8, Sscaled), + Rgba8Uint = (R8_G8_B8_A8, Uint), + Rgba8Sint = (R8_G8_B8_A8, Sint), + Rgba8Srgb = (R8_G8_B8_A8, Srgb), + Bgra8Unorm = (B8_G8_R8_A8, Unorm), + Bgra8Snorm = (B8_G8_R8_A8, Snorm), + Bgra8Uscaled = (B8_G8_R8_A8, Uscaled), + Bgra8Sscaled = (B8_G8_R8_A8, Sscaled), + Bgra8Uint = (B8_G8_R8_A8, Uint), + Bgra8Sint = (B8_G8_R8_A8, Sint), + Bgra8Srgb = (B8_G8_R8_A8, Srgb), + Abgr8Unorm = (A8_B8_G8_R8, Unorm), + Abgr8Snorm = (A8_B8_G8_R8, Snorm), + Abgr8Uscaled = (A8_B8_G8_R8, Uscaled), + Abgr8Sscaled = (A8_B8_G8_R8, Sscaled), + Abgr8Uint = (A8_B8_G8_R8, Uint), + Abgr8Sint = (A8_B8_G8_R8, Sint), + Abgr8Srgb = (A8_B8_G8_R8, Srgb), + A2r10g10b10Unorm = (A2_R10_G10_B10, Unorm), + A2r10g10b10Snorm = (A2_R10_G10_B10, Snorm), + A2r10g10b10Uscaled = (A2_R10_G10_B10, Uscaled), + A2r10g10b10Sscaled = (A2_R10_G10_B10, Sscaled), + A2r10g10b10Uint = (A2_R10_G10_B10, Uint), + A2r10g10b10Sint = (A2_R10_G10_B10, Sint), + A2b10g10r10Unorm = (A2_B10_G10_R10, Unorm), + A2b10g10r10Snorm = (A2_B10_G10_R10, Snorm), + A2b10g10r10Uscaled = (A2_B10_G10_R10, Uscaled), + A2b10g10r10Sscaled = (A2_B10_G10_R10, Sscaled), + A2b10g10r10Uint = (A2_B10_G10_R10, Uint), + A2b10g10r10Sint = (A2_B10_G10_R10, Sint), + R16Unorm = (R16, Unorm), + R16Snorm = (R16, Snorm), + R16Uscaled = (R16, Uscaled), + R16Sscaled = (R16, Sscaled), + R16Uint = (R16, Uint), + R16Sint = (R16, Sint), + R16Sfloat = (R16, Sfloat), + Rg16Unorm = (R16_G16, Unorm), + Rg16Snorm = (R16_G16, Snorm), + Rg16Uscaled = (R16_G16, Uscaled), + Rg16Sscaled = (R16_G16, Sscaled), + Rg16Uint = (R16_G16, Uint), + Rg16Sint = (R16_G16, Sint), + Rg16Sfloat = (R16_G16, Sfloat), + Rgb16Unorm = (R16_G16_B16, Unorm), + Rgb16Snorm = (R16_G16_B16, Snorm), + Rgb16Uscaled = (R16_G16_B16, Uscaled), + Rgb16Sscaled = (R16_G16_B16, Sscaled), + Rgb16Uint = (R16_G16_B16, Uint), + Rgb16Sint = (R16_G16_B16, Sint), + Rgb16Sfloat = (R16_G16_B16, Sfloat), + Rgba16Unorm = (R16_G16_B16_A16, Unorm), + Rgba16Snorm = (R16_G16_B16_A16, Snorm), + Rgba16Uscaled = (R16_G16_B16_A16, Uscaled), + Rgba16Sscaled = (R16_G16_B16_A16, Sscaled), + Rgba16Uint = (R16_G16_B16_A16, Uint), + Rgba16Sint = (R16_G16_B16_A16, Sint), + Rgba16Sfloat = (R16_G16_B16_A16, Sfloat), + R32Uint = (R32, Uint), + R32Sint = (R32, Sint), + R32Sfloat = (R32, Sfloat), + Rg32Uint = (R32_G32, Uint), + Rg32Sint = (R32_G32, Sint), + Rg32Sfloat = (R32_G32, Sfloat), + Rgb32Uint = (R32_G32_B32, Uint), + Rgb32Sint = (R32_G32_B32, Sint), + Rgb32Sfloat = (R32_G32_B32, Sfloat), + Rgba32Uint = (R32_G32_B32_A32, Uint), + Rgba32Sint = (R32_G32_B32_A32, Sint), + Rgba32Sfloat = (R32_G32_B32_A32, Sfloat), + R64Uint = (R64, Uint), + R64Sint = (R64, Sint), + R64Sfloat = (R64, Sfloat), + Rg64Uint = (R64_G64, Uint), + Rg64Sint = (R64_G64, Sint), + Rg64Sfloat = (R64_G64, Sfloat), + Rgb64Uint = (R64_G64_B64, Uint), + Rgb64Sint = (R64_G64_B64, Sint), + Rgb64Sfloat = (R64_G64_B64, Sfloat), + Rgba64Uint = (R64_G64_B64_A64, Uint), + Rgba64Sint = (R64_G64_B64_A64, Sint), + Rgba64Sfloat = (R64_G64_B64_A64, Sfloat), + B10g11r11Ufloat = (B10_G11_R11, Ufloat), + E5b9g9r9Ufloat = (E5_B9_G9_R9, Ufloat), + D16Unorm = (D16, Unorm), + X8D24Unorm = (X8D24, Unorm), + D32Sfloat = (D32, Sfloat), + S8Uint = (S8, Uint), + D16UnormS8Uint = (D16_S8, Unorm), + D24UnormS8Uint = (D24_S8, Unorm), + D32SfloatS8Uint = (D32_S8, Sfloat), + Bc1RgbUnorm = (BC1_RGB, Unorm), + Bc1RgbSrgb = (BC1_RGB, Srgb), + Bc1RgbaUnorm = (BC1_RGBA, Unorm), + Bc1RgbaSrgb = (BC1_RGBA, Srgb), + Bc2Unorm = (BC2, Unorm), + Bc2Srgb = (BC2, Srgb), + Bc3Unorm = (BC3, Unorm), + Bc3Srgb = (BC3, Srgb), + Bc4Unorm = (BC4, Unorm), + Bc4Snorm = (BC4, Snorm), + Bc5Unorm = (BC5, Unorm), + Bc5Snorm = (BC5, Snorm), + Bc6hUfloat = (BC6, Ufloat), + Bc6hSfloat = (BC6, Sfloat), + Bc7Unorm = (BC7, Unorm), + Bc7Srgb = (BC7, Srgb), + Etc2R8g8b8Unorm = (ETC2_R8_G8_B8, Unorm), + Etc2R8g8b8Srgb = (ETC2_R8_G8_B8, Srgb), + Etc2R8g8b8a1Unorm = (ETC2_R8_G8_B8_A1, Unorm), + Etc2R8g8b8a1Srgb = (ETC2_R8_G8_B8_A1, Srgb), + Etc2R8g8b8a8Unorm = (ETC2_R8_G8_B8_A8, Unorm), + Etc2R8g8b8a8Srgb = (ETC2_R8_G8_B8_A8, Srgb), + EacR11Unorm = (EAC_R11, Unorm), + EacR11Snorm = (EAC_R11, Snorm), + EacR11g11Unorm = (EAC_R11_G11, Unorm), + EacR11g11Snorm = (EAC_R11_G11, Snorm), + Astc4x4Unorm = (ASTC_4x4, Unorm), + Astc4x4Srgb = (ASTC_4x4, Srgb), + Astc5x4Unorm = (ASTC_5x4, Unorm), + Astc5x4Srgb = (ASTC_5x4, Srgb), + Astc5x5Unorm = (ASTC_5x5, Unorm), + Astc5x5Srgb = (ASTC_5x5, Srgb), + Astc6x5Unorm = (ASTC_6x5, Unorm), + Astc6x5Srgb = (ASTC_6x5, Srgb), + Astc6x6Unorm = (ASTC_6x6, Unorm), + Astc6x6Srgb = (ASTC_6x6, Srgb), + Astc8x5Unorm = (ASTC_8x5, Unorm), + Astc8x5Srgb = (ASTC_8x5, Srgb), + Astc8x6Unorm = (ASTC_8x6, Unorm), + Astc8x6Srgb = (ASTC_8x6, Srgb), + Astc8x8Unorm = (ASTC_8x8, Unorm), + Astc8x8Srgb = (ASTC_8x8, Srgb), + Astc10x5Unorm = (ASTC_10x5, Unorm), + Astc10x5Srgb = (ASTC_10x5, Srgb), + Astc10x6Unorm = (ASTC_10x6, Unorm), + Astc10x6Srgb = (ASTC_10x6, Srgb), + Astc10x8Unorm = (ASTC_10x8, Unorm), + Astc10x8Srgb = (ASTC_10x8, Srgb), + Astc10x10Unorm = (ASTC_10x10, Unorm), + Astc10x10Srgb = (ASTC_10x10, Srgb), + Astc12x10Unorm = (ASTC_12x10, Unorm), + Astc12x10Srgb = (ASTC_12x10, Srgb), + Astc12x12Unorm = (ASTC_12x12, Unorm), + Astc12x12Srgb = (ASTC_12x12, Srgb), +} + +impl Format { + /// Get base format. + /// + /// Returns `None` if format is `Undefined`. + pub fn base_format(self) -> BaseFormat { + assert!(self as usize != 0 && NUM_FORMATS > self as usize); + BASE_FORMATS[self as usize - 1] + } + + /// A shortcut to obtain surface format description. + pub fn surface_desc(&self) -> FormatDesc { + self.base_format().0.desc() + } + + /// Returns if the format has a color aspect. + pub fn is_color(self) -> bool { + self.surface_desc().aspects.contains(Aspects::COLOR) + } + + /// Returns if the format has a depth aspect. + pub fn is_depth(self) -> bool { + self.surface_desc().aspects.contains(Aspects::DEPTH) + } + + /// Returns if the format has a stencil aspect. + pub fn is_stencil(self) -> bool { + self.surface_desc().aspects.contains(Aspects::STENCIL) + } +} + +// Common vertex attribute formats +impl AsFormat for f32 { + const SELF: Format = Format::R32Sfloat; +} +impl AsFormat for [f32; 2] { + const SELF: Format = Format::Rg32Sfloat; +} +impl AsFormat for [f32; 3] { + const SELF: Format = Format::Rgb32Sfloat; +} +impl AsFormat for [f32; 4] { + const SELF: Format = Format::Rgba32Sfloat; +} diff --git a/third_party/rust/gfx-hal/src/image.rs b/third_party/rust/gfx-hal/src/image.rs old mode 100755 new mode 100644 index d2d4edf20920..9792a601e75b --- a/third_party/rust/gfx-hal/src/image.rs +++ b/third_party/rust/gfx-hal/src/image.rs @@ -1,694 +1,696 @@ -//! Image related structures. -//! -//! An image is a block of GPU memory representing a grid of texels. - -use crate::{ - buffer::Offset as RawOffset, - device, - format, - pso::{Comparison, Rect}, -}; -use std::{f32, hash, ops::Range}; - -/// Dimension size. -pub type Size = u32; -/// Number of MSAA samples. -pub type NumSamples = u8; -/// Image layer. -pub type Layer = u16; -/// Image mipmap level. -pub type Level = u8; -/// Maximum accessible mipmap level of an image. -pub const MAX_LEVEL: Level = 15; -/// A texel coordinate in an image. -pub type TexelCoordinate = i32; - -/// Describes the size of an image, which may be up to three dimensional. -#[derive(Clone, Copy, Debug, Default, Hash, PartialEq, Eq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct Extent { - /// Image width - pub width: Size, - /// Image height - pub height: Size, - /// Image depth. - pub depth: Size, -} - -impl Extent { - /// Return true if one of the dimensions is zero. - pub fn is_empty(&self) -> bool { - self.width == 0 || self.height == 0 || self.depth == 0 - } - /// Get the extent at a particular mipmap level. - pub fn at_level(&self, level: Level) -> Self { - Extent { - width: 1.max(self.width >> level), - height: 1.max(self.height >> level), - depth: 1.max(self.depth >> level), - } - } - /// Get a rectangle for the full area of extent. - pub fn rect(&self) -> Rect { - Rect { - x: 0, - y: 0, - w: self.width as i16, - h: self.height as i16, - } - } -} - -/// An offset into an `Image` used for image-to-image -/// copy operations. All offsets are in texels, and -/// specifying offsets other than 0 for dimensions -/// that do not exist is undefined behavior -- for -/// example, specifying a `z` offset of `1` in a -/// two-dimensional image. -#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct Offset { - /// X offset. - pub x: TexelCoordinate, - /// Y offset. - pub y: TexelCoordinate, - /// Z offset. - pub z: TexelCoordinate, -} - -impl Offset { - /// Zero offset shortcut. - pub const ZERO: Self = Offset { x: 0, y: 0, z: 0 }; - - /// Convert the offset into 2-sided bounds given the extent. - pub fn into_bounds(self, extent: &Extent) -> Range { - let end = Offset { - x: self.x + extent.width as i32, - y: self.y + extent.height as i32, - z: self.z + extent.depth as i32, - }; - self .. end - } -} - -/// Image tiling modes. -#[repr(u32)] -#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum Tiling { - /// Optimal tiling for GPU memory access. Implementation-dependent. - Optimal, - /// Optimal for CPU read/write. Texels are laid out in row-major order, - /// possibly with some padding on each row. - Linear, -} - -/// Pure image object creation error. -#[derive(Clone, Debug, PartialEq)] -pub enum CreationError { - /// Out of either host or device memory. - OutOfMemory(device::OutOfMemory), - /// The format is not supported by the device. - Format(format::Format), - /// The kind doesn't support a particular operation. - Kind, - /// Failed to map a given multisampled kind to the device. - Samples(NumSamples), - /// Unsupported size in one of the dimensions. - Size(Size), - /// The given data has a different size than the target image slice. - Data(usize), - /// The mentioned usage mode is not supported - Usage(Usage), -} - -impl From for CreationError { - fn from(error: device::OutOfMemory) -> Self { - CreationError::OutOfMemory(error) - } -} - -impl std::fmt::Display for CreationError { - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - CreationError::OutOfMemory(err) => write!(fmt, "Failed to create image: {}", err), - CreationError::Format(format) => write!(fmt, "Failed to create image: Unsupported format: {:?}", format), - CreationError::Kind => write!(fmt, "Failed to create image: Specified kind doesn't support particular operation"), // Room for improvement. - CreationError::Samples(samples) => write!(fmt, "Failed to create image: Specified format doesn't support specified sampling {}", samples), - CreationError::Size(size) => write!(fmt, "Failed to create image: Unsupported size in one of the dimensions {}", size), - CreationError::Data(data) => write!(fmt, "Failed to create image: The given data has a different size {{{}}} than the target image slice", data), // Actually nothing emits this. - CreationError::Usage(usage) => write!(fmt, "Failed to create image: Unsupported usage: {:?}", usage), - } - } -} - -impl std::error::Error for CreationError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - CreationError::OutOfMemory(err) => Some(err), - _ => None, - } - } -} - -/// Error creating an `ImageView`. -#[derive(Clone, Debug, PartialEq)] -pub enum ViewError { // TODO: Rename this or `buffer::ViewCreationError` - /// The required usage flag is not present in the image. - Usage(Usage), - /// Selected mip level doesn't exist. - Level(Level), - /// Selected array layer doesn't exist. - Layer(LayerError), - /// An incompatible format was requested for the view. - BadFormat(format::Format), - /// An incompatible view kind was requested for the view. - BadKind(ViewKind), - /// Out of either Host or Device memory - OutOfMemory(device::OutOfMemory), - /// The backend refused for some reason. - Unsupported, -} - -impl From for ViewError { - fn from(error: device::OutOfMemory) -> Self { - ViewError::OutOfMemory(error) - } -} - -impl std::fmt::Display for ViewError { - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - ViewError::Usage(usage) => write!(fmt, "Failed to create image view: Specified usage flags are not present in the image {:?}", usage), - ViewError::Level(level) => write!(fmt, "Failed to create image view: Selected level doesn't exist in the image {}", level), - ViewError::Layer(err) => write!(fmt, "Failed to create image view: {}", err), - ViewError::BadFormat(format) => write!(fmt, "Failed to create image view: Incompatible format {:?}", format), - ViewError::BadKind(kind) => write!(fmt, "Failed to create image view: Incompatible kind {:?}", kind), - ViewError::OutOfMemory(err) => write!(fmt, "Failed to create image view: {}", err), - ViewError::Unsupported => write!(fmt, "Failed to create image view: Implementation specific error occurred"), - } - } -} - -impl std::error::Error for ViewError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - ViewError::OutOfMemory(err) => Some(err), - _ => None, - } - } -} - -/// An error associated with selected image layer. -#[derive(Clone, Debug, PartialEq)] -pub enum LayerError { - /// The source image kind doesn't support array slices. - NotExpected(Kind), - /// Selected layers are outside of the provided range. - OutOfBounds(Range), -} - -impl std::fmt::Display for LayerError { - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - LayerError::NotExpected(kind) => write!(fmt, "Kind {{{:?}}} does not support arrays", kind), - LayerError::OutOfBounds(layers) => write!(fmt, "Out of bounds layers {} .. {}", layers.start, layers.end), - } - } -} - -/// How to [filter](https://en.wikipedia.org/wiki/Texture_filtering) the -/// image when sampling. They correspond to increasing levels of quality, -/// but also cost. -#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum Filter { - /// Selects a single texel from the current mip level and uses its value. - /// - /// Mip filtering selects the filtered value from one level. - Nearest, - /// Selects multiple texels and calculates the value via multivariate interpolation. - /// * 1D: Linear interpolation - /// * 2D/Cube: Bilinear interpolation - /// * 3D: Trilinear interpolation - Linear, -} - -/// Anisotropic filtering description for the sampler. -#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum Anisotropic { - /// Disable anisotropic filtering. - Off, - /// Enable anisotropic filtering with the anisotropy clamp value. - On(u8), -} - -/// The face of a cube image to do an operation on. -#[allow(missing_docs)] -#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[repr(u8)] -pub enum CubeFace { - PosX, - NegX, - PosY, - NegY, - PosZ, - NegZ, -} - -/// A constant array of cube faces in the order they map to the hardware. -pub const CUBE_FACES: [CubeFace; 6] = [ - CubeFace::PosX, - CubeFace::NegX, - CubeFace::PosY, - CubeFace::NegY, - CubeFace::PosZ, - CubeFace::NegZ, -]; - -/// Specifies the dimensionality of an image to be allocated, -/// along with the number of mipmap layers and MSAA samples -/// if applicable. -#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum Kind { - /// A single one-dimensional row of texels. - D1(Size, Layer), - /// Two-dimensional image. - D2(Size, Size, Layer, NumSamples), - /// Volumetric image. - D3(Size, Size, Size), -} - -impl Kind { - /// Get the image extent. - pub fn extent(&self) -> Extent { - match *self { - Kind::D1(width, _) => Extent { - width, - height: 1, - depth: 1, - }, - Kind::D2(width, height, _, _) => Extent { - width, - height, - depth: 1, - }, - Kind::D3(width, height, depth) => Extent { - width, - height, - depth, - }, - } - } - - /// Get the extent of a particular mipmap level. - pub fn level_extent(&self, level: Level) -> Extent { - use std::cmp::{max, min}; - // must be at least 1 - let map = |val| max(min(val, 1), val >> min(level, MAX_LEVEL)); - match *self { - Kind::D1(w, _) => Extent { - width: map(w), - height: 1, - depth: 1, - }, - Kind::D2(w, h, _, _) => Extent { - width: map(w), - height: map(h), - depth: 1, - }, - Kind::D3(w, h, d) => Extent { - width: map(w), - height: map(h), - depth: map(d), - }, - } - } - - /// Count the number of mipmap levels. - pub fn num_levels(&self) -> Level { - use std::cmp::max; - match *self { - Kind::D2(_, _, _, s) if s > 1 => { - // anti-aliased images can't have mipmaps - 1 - } - _ => { - let extent = self.extent(); - let dominant = max(max(extent.width, extent.height), extent.depth); - (1 ..).find(|level| dominant >> level == 0).unwrap() - } - } - } - - /// Return the number of layers in an array type. - /// - /// Each cube face counts as separate layer. - pub fn num_layers(&self) -> Layer { - match *self { - Kind::D1(_, a) | Kind::D2(_, _, a, _) => a, - Kind::D3(..) => 1, - } - } - - /// Return the number of MSAA samples for the kind. - pub fn num_samples(&self) -> NumSamples { - match *self { - Kind::D1(..) => 1, - Kind::D2(_, _, _, s) => s, - Kind::D3(..) => 1, - } - } -} - -/// Specifies the kind/dimensionality of an image view. -#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum ViewKind { - /// A single one-dimensional row of texels. - D1, - /// An array of rows of texels. Equivalent to `D2` except that texels - /// in different rows are not sampled, so filtering will be constrained - /// to a single row of texels at a time. - D1Array, - /// A traditional 2D image, with rows arranged contiguously. - D2, - /// An array of 2D images. Equivalent to `D3` except that texels in - /// a different depth level are not sampled. - D2Array, - /// A volume image, with each 2D layer arranged contiguously. - D3, - /// A set of 6 2D images, one for each face of a cube. - Cube, - /// An array of Cube images. - CubeArray, -} - -bitflags!( - /// Capabilities to create views into an image. - #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] - pub struct ViewCapabilities: u32 { - /// Support creation of views with different formats. - const MUTABLE_FORMAT = 0x0000_0008; - /// Support creation of `Cube` and `CubeArray` kinds of views. - const KIND_CUBE = 0x0000_0010; - /// Support creation of `D2Array` kind of view. - const KIND_2D_ARRAY = 0x0000_0020; - } -); - -bitflags!( - /// TODO: Find out if TRANSIENT_ATTACHMENT + INPUT_ATTACHMENT - /// are applicable on backends other than Vulkan. --AP - /// Image usage flags - #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] - pub struct Usage: u32 { - /// The image is used as a transfer source. - const TRANSFER_SRC = 0x1; - /// The image is used as a transfer destination. - const TRANSFER_DST = 0x2; - /// The image is a [sampled image](https://www.khronos.org/registry/vulkan/specs/1.0/html/vkspec.html#descriptorsets-sampledimage) - const SAMPLED = 0x4; - /// The image is a [storage image](https://www.khronos.org/registry/vulkan/specs/1.0/html/vkspec.html#descriptorsets-storageimage) - const STORAGE = 0x8; - /// The image is used as a color attachment -- that is, color input to a rendering pass. - const COLOR_ATTACHMENT = 0x10; - /// The image is used as a depth attachment. - const DEPTH_STENCIL_ATTACHMENT = 0x20; - /// - const TRANSIENT_ATTACHMENT = 0x40; - /// - const INPUT_ATTACHMENT = 0x80; - - } -); - -impl Usage { - /// Returns true if this image can be used in transfer operations. - pub fn can_transfer(&self) -> bool { - self.intersects(Usage::TRANSFER_SRC | Usage::TRANSFER_DST) - } - - /// Returns true if this image can be used as a target. - pub fn can_target(&self) -> bool { - self.intersects(Usage::COLOR_ATTACHMENT | Usage::DEPTH_STENCIL_ATTACHMENT) - } -} - -/// Specifies how image coordinates outside the range `[0, 1]` are handled. -#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum WrapMode { - /// Tile the image, that is, sample the coordinate modulo `1.0`, so - /// addressing the image beyond an edge will "wrap" back from the - /// other edge. - Tile, - /// Mirror the image. Like tile, but uses abs(coord) before the modulo. - Mirror, - /// Clamp the image to the value at `0.0` or `1.0` respectively. - Clamp, - /// Use border color. - Border, -} - -/// A wrapper for the LOD level of an image. Needed so that we can -/// implement Eq and Hash for it. -#[derive(Clone, Copy, Debug, PartialEq, PartialOrd)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct Lod(pub f32); - -impl Lod { - /// Possible LOD range. - pub const RANGE: Range = Lod(f32::MIN) .. Lod(f32::MAX); -} - -impl Eq for Lod {} -impl hash::Hash for Lod { - fn hash(&self, state: &mut H) { - self.0.to_bits().hash(state) - } -} - -/// A wrapper for an RGBA color with 8 bits per texel, encoded as a u32. -#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct PackedColor(pub u32); - -impl From<[f32; 4]> for PackedColor { - fn from(c: [f32; 4]) -> PackedColor { - PackedColor( - c.iter() - .rev() - .fold(0, |u, &c| (u << 8) + (c * 255.0) as u32), - ) - } -} - -impl Into<[f32; 4]> for PackedColor { - fn into(self) -> [f32; 4] { - let mut out = [0.0; 4]; - for (i, channel) in out.iter_mut().enumerate() { - let byte = (self.0 >> (i << 3)) & 0xFF; - *channel = byte as f32 / 255.0; - } - out - } -} - -/// Specifies how to sample from an image. These are all the parameters -/// available that alter how the GPU goes from a coordinate in an image -/// to producing an actual value from the texture, including filtering/ -/// scaling, wrap mode, etc. -// TODO: document the details of sampling. -#[derive(Clone, Debug, Eq, Hash, PartialEq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct SamplerDesc { - /// Minification filter method to use. - pub min_filter: Filter, - /// Magnification filter method to use. - pub mag_filter: Filter, - /// Mip filter method to use. - pub mip_filter: Filter, - /// Wrapping mode for each of the U, V, and W axis (S, T, and R in OpenGL - /// speak). - pub wrap_mode: (WrapMode, WrapMode, WrapMode), - /// This bias is added to every computed mipmap level (N + lod_bias). For - /// example, if it would select mipmap level 2 and lod_bias is 1, it will - /// use mipmap level 3. - pub lod_bias: Lod, - /// This range is used to clamp LOD level used for sampling. - pub lod_range: Range, - /// Comparison mode, used primary for a shadow map. - pub comparison: Option, - /// Border color is used when one of the wrap modes is set to border. - pub border: PackedColor, - /// Specifies whether the texture coordinates are normalized. - pub normalized: bool, - /// Anisotropic filtering. - pub anisotropic: Anisotropic, -} - -impl SamplerDesc { - /// Create a new sampler description with a given filter method for all filtering operations - /// and a wrapping mode, using no LOD modifications. - pub fn new(filter: Filter, wrap: WrapMode) -> Self { - SamplerDesc { - min_filter: filter, - mag_filter: filter, - mip_filter: filter, - wrap_mode: (wrap, wrap, wrap), - lod_bias: Lod(0.0), - lod_range: Lod::RANGE.clone(), - comparison: None, - border: PackedColor(0), - normalized: true, - anisotropic: Anisotropic::Off, - } - } -} - -/// Specifies options for how memory for an image is arranged. -/// These are hints to the GPU driver and may or may not have actual -/// performance effects, but describe constraints on how the data -/// may be used that a program *must* obey. They do not specify -/// how channel values or such are laid out in memory; the actual -/// image data is considered opaque. -/// -/// Details may be found in [the Vulkan spec](https://www.khronos.org/registry/vulkan/specs/1.0/html/vkspec.html#resources-image-layouts) -#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum Layout { - /// General purpose, no restrictions on usage. - General, - /// Must only be used as a color attachment in a framebuffer. - ColorAttachmentOptimal, - /// Must only be used as a depth attachment in a framebuffer. - DepthStencilAttachmentOptimal, - /// Must only be used as a depth attachment in a framebuffer, - /// or as a read-only depth or stencil buffer in a shader. - DepthStencilReadOnlyOptimal, - /// Must only be used as a read-only image in a shader. - ShaderReadOnlyOptimal, - /// Must only be used as the source for a transfer command. - TransferSrcOptimal, - /// Must only be used as the destination for a transfer command. - TransferDstOptimal, - /// No layout, does not support device access. Only valid as a - /// source layout when transforming data to a specific destination - /// layout or initializing data. Does NOT guarentee that the contents - /// of the source buffer are preserved. - Undefined, //TODO: consider Option<> instead? - /// Like `Undefined`, but does guarentee that the contents of the source - /// buffer are preserved. - Preinitialized, - /// The layout that an image must be in to be presented to the display. - Present, -} - -bitflags!( - /// Bitflags to describe how memory in an image or buffer can be accessed. - #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] - pub struct Access: u32 { - /// Read access to an input attachment from within a fragment shader. - const INPUT_ATTACHMENT_READ = 0x10; - /// Read-only state for SRV access, or combine with `SHADER_WRITE` to have r/w access to UAV. - const SHADER_READ = 0x20; - /// Writeable state for UAV access. - /// Combine with `SHADER_READ` to have r/w access to UAV. - const SHADER_WRITE = 0x40; - /// Read state but can only be combined with `COLOR_ATTACHMENT_WRITE`. - const COLOR_ATTACHMENT_READ = 0x80; - /// Write-only state but can be combined with `COLOR_ATTACHMENT_READ`. - const COLOR_ATTACHMENT_WRITE = 0x100; - /// Read access to a depth/stencil attachment in a depth or stencil operation. - const DEPTH_STENCIL_ATTACHMENT_READ = 0x200; - /// Write access to a depth/stencil attachment in a depth or stencil operation. - const DEPTH_STENCIL_ATTACHMENT_WRITE = 0x400; - /// Read access to the buffer in a copy operation. - const TRANSFER_READ = 0x800; - /// Write access to the buffer in a copy operation. - const TRANSFER_WRITE = 0x1000; - /// Read access for raw memory to be accessed by the host system (ie, CPU). - const HOST_READ = 0x2000; - /// Write access for raw memory to be accessed by the host system. - const HOST_WRITE = 0x4000; - /// Read access for memory to be accessed by a non-specific entity. This may - /// be the host system, or it may be something undefined or specified by an - /// extension. - const MEMORY_READ = 0x8000; - /// Write access for memory to be accessed by a non-specific entity. - const MEMORY_WRITE = 0x10000; - } -); - -/// Image state, combining access methods and the image's layout. -pub type State = (Access, Layout); - -/// Selector of a concrete subresource in an image. -#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct Subresource { - /// Included aspects: color/depth/stencil - pub aspects: format::Aspects, - /// Selected mipmap level - pub level: Level, - /// Selected array level - pub layer: Layer, -} - -/// A subset of resource layers contained within an image's level. -#[derive(Clone, Debug, Hash, PartialEq, Eq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct SubresourceLayers { - /// Included aspects: color/depth/stencil - pub aspects: format::Aspects, - /// Selected mipmap level - pub level: Level, - /// Included array levels - pub layers: Range, -} - -/// A subset of resources contained within an image. -#[derive(Clone, Debug, Hash, PartialEq, Eq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct SubresourceRange { - /// Included aspects: color/depth/stencil - pub aspects: format::Aspects, - /// Included mipmap levels - pub levels: Range, - /// Included array levels - pub layers: Range, -} - -/// Image format properties. -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct FormatProperties { - /// Maximum extent. - pub max_extent: Extent, - /// Max number of mipmap levels. - pub max_levels: Level, - /// Max number of array layers. - pub max_layers: Layer, - /// Bit mask of supported sample counts. - pub sample_count_mask: NumSamples, - /// Maximum size of the resource in bytes. - pub max_resource_size: usize, -} - -/// Footprint of a subresource in memory. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct SubresourceFootprint { - /// Byte slice occupied by the subresource. - pub slice: Range, - /// Byte distance between rows. - pub row_pitch: RawOffset, - /// Byte distance between array layers. - pub array_pitch: RawOffset, - /// Byte distance between depth slices. - pub depth_pitch: RawOffset, -} +//! Image related structures. +//! +//! An image is a block of GPU memory representing a grid of texels. + +use crate::{ + buffer::Offset as RawOffset, + device, + format, + pso::{Comparison, Rect}, +}; +use std::{f32, hash, ops::Range}; + +/// Dimension size. +pub type Size = u32; +/// Number of MSAA samples. +pub type NumSamples = u8; +/// Image layer. +pub type Layer = u16; +/// Image mipmap level. +pub type Level = u8; +/// Maximum accessible mipmap level of an image. +pub const MAX_LEVEL: Level = 15; +/// A texel coordinate in an image. +pub type TexelCoordinate = i32; + +/// Describes the size of an image, which may be up to three dimensional. +#[derive(Clone, Copy, Debug, Default, Hash, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Extent { + /// Image width + pub width: Size, + /// Image height + pub height: Size, + /// Image depth. + pub depth: Size, +} + +impl Extent { + /// Return true if one of the dimensions is zero. + pub fn is_empty(&self) -> bool { + self.width == 0 || self.height == 0 || self.depth == 0 + } + /// Get the extent at a particular mipmap level. + pub fn at_level(&self, level: Level) -> Self { + Extent { + width: 1.max(self.width >> level), + height: 1.max(self.height >> level), + depth: 1.max(self.depth >> level), + } + } + /// Get a rectangle for the full area of extent. + pub fn rect(&self) -> Rect { + Rect { + x: 0, + y: 0, + w: self.width as i16, + h: self.height as i16, + } + } +} + +/// An offset into an `Image` used for image-to-image +/// copy operations. All offsets are in texels, and +/// specifying offsets other than 0 for dimensions +/// that do not exist is undefined behavior -- for +/// example, specifying a `z` offset of `1` in a +/// two-dimensional image. +#[derive(Clone, Copy, Debug, Default, Hash, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Offset { + /// X offset. + pub x: TexelCoordinate, + /// Y offset. + pub y: TexelCoordinate, + /// Z offset. + pub z: TexelCoordinate, +} + +impl Offset { + /// Zero offset shortcut. + pub const ZERO: Self = Offset { x: 0, y: 0, z: 0 }; + + /// Convert the offset into 2-sided bounds given the extent. + pub fn into_bounds(self, extent: &Extent) -> Range { + let end = Offset { + x: self.x + extent.width as i32, + y: self.y + extent.height as i32, + z: self.z + extent.depth as i32, + }; + self .. end + } +} + +/// Image tiling modes. +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum Tiling { + /// Optimal tiling for GPU memory access. Implementation-dependent. + Optimal = 0, + /// Optimal for CPU read/write. Texels are laid out in row-major order, + /// possibly with some padding on each row. + Linear = 1, +} + +/// Pure image object creation error. +#[derive(Clone, Debug, PartialEq)] +pub enum CreationError { + /// Out of either host or device memory. + OutOfMemory(device::OutOfMemory), + /// The format is not supported by the device. + Format(format::Format), + /// The kind doesn't support a particular operation. + Kind, + /// Failed to map a given multisampled kind to the device. + Samples(NumSamples), + /// Unsupported size in one of the dimensions. + Size(Size), + /// The given data has a different size than the target image slice. + Data(usize), + /// The mentioned usage mode is not supported + Usage(Usage), +} + +impl From for CreationError { + fn from(error: device::OutOfMemory) -> Self { + CreationError::OutOfMemory(error) + } +} + +impl std::fmt::Display for CreationError { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + CreationError::OutOfMemory(err) => write!(fmt, "Failed to create image: {}", err), + CreationError::Format(format) => write!(fmt, "Failed to create image: Unsupported format: {:?}", format), + CreationError::Kind => write!(fmt, "Failed to create image: Specified kind doesn't support particular operation"), // Room for improvement. + CreationError::Samples(samples) => write!(fmt, "Failed to create image: Specified format doesn't support specified sampling {}", samples), + CreationError::Size(size) => write!(fmt, "Failed to create image: Unsupported size in one of the dimensions {}", size), + CreationError::Data(data) => write!(fmt, "Failed to create image: The given data has a different size {{{}}} than the target image slice", data), // Actually nothing emits this. + CreationError::Usage(usage) => write!(fmt, "Failed to create image: Unsupported usage: {:?}", usage), + } + } +} + +impl std::error::Error for CreationError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + CreationError::OutOfMemory(err) => Some(err), + _ => None, + } + } +} + +/// Error creating an `ImageView`. +#[derive(Clone, Debug, PartialEq)] +pub enum ViewCreationError { + /// The required usage flag is not present in the image. + Usage(Usage), + /// Selected mip level doesn't exist. + Level(Level), + /// Selected array layer doesn't exist. + Layer(LayerError), + /// An incompatible format was requested for the view. + BadFormat(format::Format), + /// An incompatible view kind was requested for the view. + BadKind(ViewKind), + /// Out of either Host or Device memory + OutOfMemory(device::OutOfMemory), + /// The backend refused for some reason. + Unsupported, +} + +impl From for ViewCreationError { + fn from(error: device::OutOfMemory) -> Self { + ViewCreationError::OutOfMemory(error) + } +} + +impl std::fmt::Display for ViewCreationError { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ViewCreationError::Usage(usage) => write!(fmt, "Failed to create image view: Specified usage flags are not present in the image {:?}", usage), + ViewCreationError::Level(level) => write!(fmt, "Failed to create image view: Selected level doesn't exist in the image {}", level), + ViewCreationError::Layer(err) => write!(fmt, "Failed to create image view: {}", err), + ViewCreationError::BadFormat(format) => write!(fmt, "Failed to create image view: Incompatible format {:?}", format), + ViewCreationError::BadKind(kind) => write!(fmt, "Failed to create image view: Incompatible kind {:?}", kind), + ViewCreationError::OutOfMemory(err) => write!(fmt, "Failed to create image view: {}", err), + ViewCreationError::Unsupported => write!(fmt, "Failed to create image view: Implementation specific error occurred"), + } + } +} + +impl std::error::Error for ViewCreationError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + ViewCreationError::OutOfMemory(err) => Some(err), + _ => None, + } + } +} + +/// An error associated with selected image layer. +#[derive(Clone, Debug, PartialEq)] +pub enum LayerError { + /// The source image kind doesn't support array slices. + NotExpected(Kind), + /// Selected layers are outside of the provided range. + OutOfBounds(Range), +} + +impl std::fmt::Display for LayerError { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + LayerError::NotExpected(kind) => { + write!(fmt, "Kind {{{:?}}} does not support arrays", kind) + } + LayerError::OutOfBounds(layers) => write!( + fmt, + "Out of bounds layers {} .. {}", + layers.start, layers.end + ), + } + } +} + +/// How to [filter](https://en.wikipedia.org/wiki/Texture_filtering) the +/// image when sampling. They correspond to increasing levels of quality, +/// but also cost. +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum Filter { + /// Selects a single texel from the current mip level and uses its value. + /// + /// Mip filtering selects the filtered value from one level. + Nearest, + /// Selects multiple texels and calculates the value via multivariate interpolation. + /// * 1D: Linear interpolation + /// * 2D/Cube: Bilinear interpolation + /// * 3D: Trilinear interpolation + Linear, +} + +/// The face of a cube image to do an operation on. +#[allow(missing_docs)] +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[repr(u8)] +pub enum CubeFace { + PosX, + NegX, + PosY, + NegY, + PosZ, + NegZ, +} + +/// A constant array of cube faces in the order they map to the hardware. +pub const CUBE_FACES: [CubeFace; 6] = [ + CubeFace::PosX, + CubeFace::NegX, + CubeFace::PosY, + CubeFace::NegY, + CubeFace::PosZ, + CubeFace::NegZ, +]; + +/// Specifies the dimensionality of an image to be allocated, +/// along with the number of mipmap layers and MSAA samples +/// if applicable. +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum Kind { + /// A single one-dimensional row of texels. + D1(Size, Layer), + /// Two-dimensional image. + D2(Size, Size, Layer, NumSamples), + /// Volumetric image. + D3(Size, Size, Size), +} + +impl Kind { + /// Get the image extent. + pub fn extent(&self) -> Extent { + match *self { + Kind::D1(width, _) => Extent { + width, + height: 1, + depth: 1, + }, + Kind::D2(width, height, _, _) => Extent { + width, + height, + depth: 1, + }, + Kind::D3(width, height, depth) => Extent { + width, + height, + depth, + }, + } + } + + /// Get the extent of a particular mipmap level. + pub fn level_extent(&self, level: Level) -> Extent { + use std::cmp::{max, min}; + // must be at least 1 + let map = |val| max(min(val, 1), val >> min(level, MAX_LEVEL)); + match *self { + Kind::D1(w, _) => Extent { + width: map(w), + height: 1, + depth: 1, + }, + Kind::D2(w, h, _, _) => Extent { + width: map(w), + height: map(h), + depth: 1, + }, + Kind::D3(w, h, d) => Extent { + width: map(w), + height: map(h), + depth: map(d), + }, + } + } + + /// Count the number of mipmap levels. + pub fn num_levels(&self) -> Level { + use std::cmp::max; + match *self { + Kind::D2(_, _, _, s) if s > 1 => { + // anti-aliased images can't have mipmaps + 1 + } + _ => { + let extent = self.extent(); + let dominant = max(max(extent.width, extent.height), extent.depth); + (1 ..).find(|level| dominant >> level == 0).unwrap() + } + } + } + + /// Return the number of layers in an array type. + /// + /// Each cube face counts as separate layer. + pub fn num_layers(&self) -> Layer { + match *self { + Kind::D1(_, a) | Kind::D2(_, _, a, _) => a, + Kind::D3(..) => 1, + } + } + + /// Return the number of MSAA samples for the kind. + pub fn num_samples(&self) -> NumSamples { + match *self { + Kind::D1(..) => 1, + Kind::D2(_, _, _, s) => s, + Kind::D3(..) => 1, + } + } +} + +/// Specifies the kind/dimensionality of an image view. +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum ViewKind { + /// A single one-dimensional row of texels. + D1, + /// An array of rows of texels. Equivalent to `D2` except that texels + /// in different rows are not sampled, so filtering will be constrained + /// to a single row of texels at a time. + D1Array, + /// A traditional 2D image, with rows arranged contiguously. + D2, + /// An array of 2D images. Equivalent to `D3` except that texels in + /// a different depth level are not sampled. + D2Array, + /// A volume image, with each 2D layer arranged contiguously. + D3, + /// A set of 6 2D images, one for each face of a cube. + Cube, + /// An array of Cube images. + CubeArray, +} + +bitflags!( + /// Capabilities to create views into an image. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct ViewCapabilities: u32 { + /// Support creation of views with different formats. + const MUTABLE_FORMAT = 0x0000_0008; + /// Support creation of `Cube` and `CubeArray` kinds of views. + const KIND_CUBE = 0x0000_0010; + /// Support creation of `D2Array` kind of view. + const KIND_2D_ARRAY = 0x0000_0020; + } +); + +bitflags!( + /// TODO: Find out if TRANSIENT_ATTACHMENT + INPUT_ATTACHMENT + /// are applicable on backends other than Vulkan. --AP + /// Image usage flags + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct Usage: u32 { + /// The image is used as a transfer source. + const TRANSFER_SRC = 0x1; + /// The image is used as a transfer destination. + const TRANSFER_DST = 0x2; + /// The image is a [sampled image](https://www.khronos.org/registry/vulkan/specs/1.0/html/vkspec.html#descriptorsets-sampledimage) + const SAMPLED = 0x4; + /// The image is a [storage image](https://www.khronos.org/registry/vulkan/specs/1.0/html/vkspec.html#descriptorsets-storageimage) + const STORAGE = 0x8; + /// The image is used as a color attachment -- that is, color input to a rendering pass. + const COLOR_ATTACHMENT = 0x10; + /// The image is used as a depth attachment. + const DEPTH_STENCIL_ATTACHMENT = 0x20; + /// + const TRANSIENT_ATTACHMENT = 0x40; + /// + const INPUT_ATTACHMENT = 0x80; + + } +); + +impl Usage { + /// Returns true if this image can be used in transfer operations. + pub fn can_transfer(&self) -> bool { + self.intersects(Usage::TRANSFER_SRC | Usage::TRANSFER_DST) + } + + /// Returns true if this image can be used as a target. + pub fn can_target(&self) -> bool { + self.intersects(Usage::COLOR_ATTACHMENT | Usage::DEPTH_STENCIL_ATTACHMENT) + } +} + +/// Specifies how image coordinates outside the range `[0, 1]` are handled. +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum WrapMode { + /// Tile the image, that is, sample the coordinate modulo `1.0`, so + /// addressing the image beyond an edge will "wrap" back from the + /// other edge. + Tile, + /// Mirror the image. Like tile, but uses abs(coord) before the modulo. + Mirror, + /// Clamp the image to the value at `0.0` or `1.0` respectively. + Clamp, + /// Use border color. + Border, + /// Mirror once and clamp to edge otherwise. + /// + /// Only valid if `Features::SAMPLER_MIRROR_CLAMP_EDGE` is enabled. + MirrorClamp, +} + +/// A wrapper for the LOD level of an image. Needed so that we can +/// implement Eq and Hash for it. +#[derive(Clone, Copy, Debug, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Lod(pub f32); + +impl Lod { + /// Possible LOD range. + pub const RANGE: Range = Lod(f32::MIN) .. Lod(f32::MAX); +} + +impl Eq for Lod {} +impl hash::Hash for Lod { + fn hash(&self, state: &mut H) { + self.0.to_bits().hash(state) + } +} + +/// A wrapper for an RGBA color with 8 bits per texel, encoded as a u32. +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct PackedColor(pub u32); + +impl From<[f32; 4]> for PackedColor { + fn from(c: [f32; 4]) -> PackedColor { + PackedColor( + c.iter() + .rev() + .fold(0, |u, &c| (u << 8) + (c * 255.0) as u32), + ) + } +} + +impl Into<[f32; 4]> for PackedColor { + fn into(self) -> [f32; 4] { + let mut out = [0.0; 4]; + for (i, channel) in out.iter_mut().enumerate() { + let byte = (self.0 >> (i << 3)) & 0xFF; + *channel = byte as f32 / 255.0; + } + out + } +} + +/// Specifies how to sample from an image. These are all the parameters +/// available that alter how the GPU goes from a coordinate in an image +/// to producing an actual value from the texture, including filtering/ +/// scaling, wrap mode, etc. +// TODO: document the details of sampling. +#[derive(Clone, Debug, Eq, Hash, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct SamplerDesc { + /// Minification filter method to use. + pub min_filter: Filter, + /// Magnification filter method to use. + pub mag_filter: Filter, + /// Mip filter method to use. + pub mip_filter: Filter, + /// Wrapping mode for each of the U, V, and W axis (S, T, and R in OpenGL + /// speak). + pub wrap_mode: (WrapMode, WrapMode, WrapMode), + /// This bias is added to every computed mipmap level (N + lod_bias). For + /// example, if it would select mipmap level 2 and lod_bias is 1, it will + /// use mipmap level 3. + pub lod_bias: Lod, + /// This range is used to clamp LOD level used for sampling. + pub lod_range: Range, + /// Comparison mode, used primary for a shadow map. + pub comparison: Option, + /// Border color is used when one of the wrap modes is set to border. + pub border: PackedColor, + /// Specifies whether the texture coordinates are normalized. + pub normalized: bool, + /// Anisotropic filtering. + /// + /// Can be `Some(_)` only if `Features::SAMPLER_ANISOTROPY` is enabled. + pub anisotropy_clamp: Option, +} + +impl SamplerDesc { + /// Create a new sampler description with a given filter method for all filtering operations + /// and a wrapping mode, using no LOD modifications. + pub fn new(filter: Filter, wrap: WrapMode) -> Self { + SamplerDesc { + min_filter: filter, + mag_filter: filter, + mip_filter: filter, + wrap_mode: (wrap, wrap, wrap), + lod_bias: Lod(0.0), + lod_range: Lod::RANGE.clone(), + comparison: None, + border: PackedColor(0), + normalized: true, + anisotropy_clamp: None, + } + } +} + +/// Specifies options for how memory for an image is arranged. +/// These are hints to the GPU driver and may or may not have actual +/// performance effects, but describe constraints on how the data +/// may be used that a program *must* obey. They do not specify +/// how channel values or such are laid out in memory; the actual +/// image data is considered opaque. +/// +/// Details may be found in [the Vulkan spec](https://www.khronos.org/registry/vulkan/specs/1.0/html/vkspec.html#resources-image-layouts) +#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum Layout { + /// General purpose, no restrictions on usage. + General, + /// Must only be used as a color attachment in a framebuffer. + ColorAttachmentOptimal, + /// Must only be used as a depth attachment in a framebuffer. + DepthStencilAttachmentOptimal, + /// Must only be used as a depth attachment in a framebuffer, + /// or as a read-only depth or stencil buffer in a shader. + DepthStencilReadOnlyOptimal, + /// Must only be used as a read-only image in a shader. + ShaderReadOnlyOptimal, + /// Must only be used as the source for a transfer command. + TransferSrcOptimal, + /// Must only be used as the destination for a transfer command. + TransferDstOptimal, + /// No layout, does not support device access. Only valid as a + /// source layout when transforming data to a specific destination + /// layout or initializing data. Does NOT guarentee that the contents + /// of the source buffer are preserved. + Undefined, + /// Like `Undefined`, but does guarentee that the contents of the source + /// buffer are preserved. + Preinitialized, + /// The layout that an image must be in to be presented to the display. + Present, +} + +bitflags!( + /// Bitflags to describe how memory in an image or buffer can be accessed. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct Access: u32 { + /// Read access to an input attachment from within a fragment shader. + const INPUT_ATTACHMENT_READ = 0x10; + /// Read-only state for SRV access, or combine with `SHADER_WRITE` to have r/w access to UAV. + const SHADER_READ = 0x20; + /// Writeable state for UAV access. + /// Combine with `SHADER_READ` to have r/w access to UAV. + const SHADER_WRITE = 0x40; + /// Read state but can only be combined with `COLOR_ATTACHMENT_WRITE`. + const COLOR_ATTACHMENT_READ = 0x80; + /// Write-only state but can be combined with `COLOR_ATTACHMENT_READ`. + const COLOR_ATTACHMENT_WRITE = 0x100; + /// Read access to a depth/stencil attachment in a depth or stencil operation. + const DEPTH_STENCIL_ATTACHMENT_READ = 0x200; + /// Write access to a depth/stencil attachment in a depth or stencil operation. + const DEPTH_STENCIL_ATTACHMENT_WRITE = 0x400; + /// Read access to the buffer in a copy operation. + const TRANSFER_READ = 0x800; + /// Write access to the buffer in a copy operation. + const TRANSFER_WRITE = 0x1000; + /// Read access for raw memory to be accessed by the host system (ie, CPU). + const HOST_READ = 0x2000; + /// Write access for raw memory to be accessed by the host system. + const HOST_WRITE = 0x4000; + /// Read access for memory to be accessed by a non-specific entity. This may + /// be the host system, or it may be something undefined or specified by an + /// extension. + const MEMORY_READ = 0x8000; + /// Write access for memory to be accessed by a non-specific entity. + const MEMORY_WRITE = 0x10000; + } +); + +/// Image state, combining access methods and the image's layout. +pub type State = (Access, Layout); + +/// Selector of a concrete subresource in an image. +#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Subresource { + /// Included aspects: color/depth/stencil + pub aspects: format::Aspects, + /// Selected mipmap level + pub level: Level, + /// Selected array level + pub layer: Layer, +} + +/// A subset of resource layers contained within an image's level. +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct SubresourceLayers { + /// Included aspects: color/depth/stencil + pub aspects: format::Aspects, + /// Selected mipmap level + pub level: Level, + /// Included array levels + pub layers: Range, +} + +/// A subset of resources contained within an image. +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct SubresourceRange { + /// Included aspects: color/depth/stencil + pub aspects: format::Aspects, + /// Included mipmap levels + pub levels: Range, + /// Included array levels + pub layers: Range, +} + +/// Image format properties. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct FormatProperties { + /// Maximum extent. + pub max_extent: Extent, + /// Max number of mipmap levels. + pub max_levels: Level, + /// Max number of array layers. + pub max_layers: Layer, + /// Bit mask of supported sample counts. + pub sample_count_mask: NumSamples, + /// Maximum size of the resource in bytes. + pub max_resource_size: usize, +} + +/// Footprint of a subresource in memory. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct SubresourceFootprint { + /// Byte slice occupied by the subresource. + pub slice: Range, + /// Byte distance between rows. + pub row_pitch: RawOffset, + /// Byte distance between array layers. + pub array_pitch: RawOffset, + /// Byte distance between depth slices. + pub depth_pitch: RawOffset, +} diff --git a/third_party/rust/gfx-hal/src/lib.rs b/third_party/rust/gfx-hal/src/lib.rs old mode 100755 new mode 100644 index 8a01dd055e2d..50856a66b68e --- a/third_party/rust/gfx-hal/src/lib.rs +++ b/third_party/rust/gfx-hal/src/lib.rs @@ -1,459 +1,484 @@ -#![deny(missing_debug_implementations, missing_docs, unused)] - -//! Low-level graphics abstraction for Rust. Mostly operates on data, not types. -//! Designed for use by libraries and higher-level abstractions only. - -#[macro_use] -extern crate bitflags; - -#[cfg(feature = "serde")] -#[macro_use] -extern crate serde; - -use std::any::Any; -use std::fmt; -use std::hash::Hash; - -pub mod adapter; -pub mod buffer; -pub mod command; -pub mod device; -pub mod format; -pub mod image; -pub mod memory; -pub mod pass; -pub mod pool; -pub mod pso; -pub mod query; -pub mod queue; -pub mod range; -pub mod window; - -/// Prelude module re-exports all the traits necessary to use gfx-hal. -pub mod prelude { - pub use crate::{ - adapter::PhysicalDevice as _, - command::CommandBuffer as _, - device::Device as _, - pool::CommandPool as _, - pso::DescriptorPool as _, - queue::{CommandQueue as _, QueueFamily as _}, - window::{PresentationSurface as _, Surface as _, Swapchain as _}, - Instance as _, - }; -} - -/// Draw vertex count. -pub type VertexCount = u32; -/// Draw vertex base offset. -pub type VertexOffset = i32; -/// Draw number of indices. -pub type IndexCount = u32; -/// Draw number of instances. -pub type InstanceCount = u32; -/// Indirect draw calls count. -pub type DrawCount = u32; -/// Number of work groups. -pub type WorkGroupCount = [u32; 3]; - -bitflags! { - //TODO: add a feature for non-normalized samplers - //TODO: add a feature for mutable comparison samplers - /// Features that the device supports. - /// These only include features of the core interface and not API extensions. - #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] - pub struct Features: u64 { - /// Bit mask of Vulkan Core features. - const CORE_MASK = 0x0FFF_FFFF_FFFF_FFFF; - /// Bit mask of Vulkan Portability features. - const PORTABILITY_MASK = 0xF000_0000_0000_0000; - - /// Support for robust buffer access. - /// Buffer access by SPIR-V shaders is checked against the buffer/image boundaries. - const ROBUST_BUFFER_ACCESS = 0x000_0000_0000_0001; - /// Support the full 32-bit range of indexed for draw calls. - /// If not supported, the maximum index value is determined by `Limits::max_draw_index_value`. - const FULL_DRAW_INDEX_U32 = 0x000_0000_0000_0002; - /// Support cube array image views. - const IMAGE_CUBE_ARRAY = 0x000_0000_0000_0004; - /// Support different color blending settings per attachments on graphics pipeline creation. - const INDEPENDENT_BLENDING = 0x000_0000_0000_0008; - /// Support geometry shader. - const GEOMETRY_SHADER = 0x000_0000_0000_0010; - /// Support tessellation shaders. - const TESSELLATION_SHADER = 0x000_0000_0000_0020; - /// Support per-sample shading and multisample interpolation. - const SAMPLE_RATE_SHADING = 0x000_0000_0000_0040; - /// Support dual source blending. - const DUAL_SRC_BLENDING = 0x000_0000_0000_0080; - /// Support logic operations. - const LOGIC_OP = 0x000_0000_0000_0100; - /// Support multiple draws per indirect call. - const MULTI_DRAW_INDIRECT = 0x000_0000_0000_0200; - /// Support indirect drawing with first instance value. - /// If not supported the first instance value **must** be 0. - const DRAW_INDIRECT_FIRST_INSTANCE = 0x00_0000_0000_0400; - /// Support depth clamping. - const DEPTH_CLAMP = 0x000_0000_0000_0800; - /// Support depth bias clamping. - const DEPTH_BIAS_CLAMP = 0x000_0000_0000_1000; - /// Support non-fill polygon modes. - const NON_FILL_POLYGON_MODE = 0x000_0000_0000_2000; - /// Support depth bounds test. - const DEPTH_BOUNDS = 0x000_0000_0000_4000; - /// Support lines with width other than 1.0. - const LINE_WIDTH = 0x000_0000_0000_8000; - /// Support points with size greater than 1.0. - const POINT_SIZE = 0x000_0000_0001_0000; - /// Support replacing alpha values with 1.0. - const ALPHA_TO_ONE = 0x000_0000_0002_0000; - /// Support multiple viewports and scissors. - const MULTI_VIEWPORTS = 0x000_0000_0004_0000; - /// Support anisotropic filtering. - const SAMPLER_ANISOTROPY = 0x000_0000_0008_0000; - /// Support ETC2 texture compression formats. - const FORMAT_ETC2 = 0x000_0000_0010_0000; - /// Support ASTC (LDR) texture compression formats. - const FORMAT_ASTC_LDR = 0x000_0000_0020_0000; - /// Support BC texture compression formats. - const FORMAT_BC = 0x000_0000_0040_0000; - /// Support precise occlusion queries, returning the actual number of samples. - /// If not supported, queries return a non-zero value when at least **one** sample passes. - const PRECISE_OCCLUSION_QUERY = 0x000_0000_0080_0000; - /// Support query of pipeline statistics. - const PIPELINE_STATISTICS_QUERY = 0x000_0000_0100_0000; - /// Support unordered access stores and atomic ops in the vertex, geometry - /// and tessellation shader stage. - /// If not supported, the shader resources **must** be annotated as read-only. - const VERTEX_STORES_AND_ATOMICS = 0x000_0000_0200_0000; - /// Support unordered access stores and atomic ops in the fragment shader stage - /// If not supported, the shader resources **must** be annotated as read-only. - const FRAGMENT_STORES_AND_ATOMICS = 0x000_0000_0400_0000; - /// - const SHADER_TESSELLATION_AND_GEOMETRY_POINT_SIZE = 0x000_0000_0800_0000; - /// - const SHADER_IMAGE_GATHER_EXTENDED = 0x000_0000_1000_0000; - /// - const SHADER_STORAGE_IMAGE_EXTENDED_FORMATS = 0x000_0000_2000_0000; - /// - const SHADER_STORAGE_IMAGE_MULTISAMPLE = 0x000_0000_4000_0000; - /// - const SHADER_STORAGE_IMAGE_READ_WITHOUT_FORMAT = 0x000_0000_8000_0000; - /// - const SHADER_STORAGE_IMAGE_WRITE_WITHOUT_FORMAT = 0x000_0001_0000_0000; - /// - const SHADER_UNIFORM_BUFFER_ARRAY_DYNAMIC_INDEXING = 0x000_0002_0000_0000; - /// - const SHADER_SAMPLED_IMAGE_ARRAY_DYNAMIC_INDEXING = 0x000_0004_0000_0000; - /// - const SHADER_STORAGE_BUFFER_ARRAY_DYNAMIC_INDEXING = 0x000_0008_0000_0000; - /// - const SHADER_STORAGE_IMAGE_ARRAY_DYNAMIC_INDEXING = 0x000_0010_0000_0000; - /// - const SHADER_CLIP_DISTANCE = 0x000_0020_0000_0000; - /// - const SHADER_CULL_DISTANCE = 0x000_0040_0000_0000; - /// - const SHADER_FLOAT64 = 0x000_0080_0000_0000; - /// - const SHADER_INT64 = 0x000_0100_0000_0000; - /// - const SHADER_INT16 = 0x000_0200_0000_0000; - /// - const SHADER_RESOURCE_RESIDENCY = 0x000_0400_0000_0000; - /// - const SHADER_RESOURCE_MIN_LOD = 0x000_0800_0000_0000; - /// - const SPARSE_BINDING = 0x000_1000_0000_0000; - /// - const SPARSE_RESIDENCY_BUFFER = 0x000_2000_0000_0000; - /// - const SPARSE_RESIDENCY_IMAGE_2D = 0x000_4000_0000_0000; - /// - const SPARSE_RESIDENCY_IMAGE_3D = 0x000_8000_0000_0000; - /// - const SPARSE_RESIDENCY_2_SAMPLES = 0x001_0000_0000_0000; - /// - const SPARSE_RESIDENCY_4_SAMPLES = 0x002_0000_0000_0000; - /// - const SPARSE_RESIDENCY_8_SAMPLES = 0x004_0000_0000_0000; - /// - const SPARSE_RESIDENCY_16_SAMPLES = 0x008_0000_0000_0000; - /// - const SPARSE_RESIDENCY_ALIASED = 0x010_0000_0000_0000; - /// - const VARIABLE_MULTISAMPLE_RATE = 0x020_0000_0000_0000; - /// - const INHERITED_QUERIES = 0x040_0000_0000_0000; - - /// Support triangle fan primitive topology. - const TRIANGLE_FAN = 0x1000_0000_0000_0000; - /// Support separate stencil reference values for front and back sides. - const SEPARATE_STENCIL_REF_VALUES = 0x2000_0000_0000_0000; - /// Support manually specified vertex attribute rates (divisors). - const INSTANCE_RATE = 0x4000_0000_0000_0000; - /// Support non-zero mipmap bias on samplers. - const SAMPLER_MIP_LOD_BIAS = 0x8000_0000_0000_0000; - } -} - -/// Resource limits of a particular graphics device. -#[derive(Clone, Copy, Debug, Default, PartialEq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct Limits { - /// Maximum supported image 1D size. - pub max_image_1d_size: image::Size, - /// Maximum supported image 2D size. - pub max_image_2d_size: image::Size, - /// Maximum supported image 3D size. - pub max_image_3d_size: image::Size, - /// Maximum supported image cube size. - pub max_image_cube_size: image::Size, - /// Maximum supporter image array size. - pub max_image_array_layers: image::Layer, - /// Maximum number of elements for the BufferView to see. - pub max_texel_elements: usize, - /// - pub max_uniform_buffer_range: buffer::Offset, - /// - pub max_storage_buffer_range: buffer::Offset, - /// - pub max_push_constants_size: usize, - /// - pub max_memory_allocation_count: usize, - /// - pub max_sampler_allocation_count: usize, - /// - pub max_bound_descriptor_sets: pso::DescriptorSetIndex, - /// - pub max_framebuffer_layers: usize, - /// - pub max_per_stage_descriptor_samplers: usize, - /// - pub max_per_stage_descriptor_uniform_buffers: usize, - /// - pub max_per_stage_descriptor_storage_buffers: usize, - /// - pub max_per_stage_descriptor_sampled_images: usize, - /// - pub max_per_stage_descriptor_storage_images: usize, - /// - pub max_per_stage_descriptor_input_attachments: usize, - /// - pub max_per_stage_resources: usize, - - /// - pub max_descriptor_set_samplers: usize, - /// - pub max_descriptor_set_uniform_buffers: usize, - /// - pub max_descriptor_set_uniform_buffers_dynamic: usize, - /// - pub max_descriptor_set_storage_buffers: usize, - /// - pub max_descriptor_set_storage_buffers_dynamic: usize, - /// - pub max_descriptor_set_sampled_images: usize, - /// - pub max_descriptor_set_storage_images: usize, - /// - pub max_descriptor_set_input_attachments: usize, - - /// Maximum number of vertex input attributes that can be specified for a graphics pipeline. - pub max_vertex_input_attributes: usize, - /// Maximum number of vertex buffers that can be specified for providing vertex attributes to a graphics pipeline. - pub max_vertex_input_bindings: usize, - /// Maximum vertex input attribute offset that can be added to the vertex input binding stride. - pub max_vertex_input_attribute_offset: usize, - /// Maximum vertex input binding stride that can be specified in a vertex input binding. - pub max_vertex_input_binding_stride: usize, - /// Maximum number of components of output variables which can be output by a vertex shader. - pub max_vertex_output_components: usize, - - /// Maximum number of vertices for each patch. - pub max_patch_size: pso::PatchSize, - /// - pub max_geometry_shader_invocations: usize, - /// - pub max_geometry_input_components: usize, - /// - pub max_geometry_output_components: usize, - /// - pub max_geometry_output_vertices: usize, - /// - pub max_geometry_total_output_components: usize, - /// - pub max_fragment_input_components: usize, - /// - pub max_fragment_output_attachments: usize, - /// - pub max_fragment_dual_source_attachments: usize, - /// - pub max_fragment_combined_output_resources: usize, - - /// - pub max_compute_shared_memory_size: usize, - /// - pub max_compute_work_group_count: WorkGroupCount, - /// - pub max_compute_work_group_invocations: usize, - /// - pub max_compute_work_group_size: [u32; 3], - - /// - pub max_draw_indexed_index_value: IndexCount, - /// - pub max_draw_indirect_count: InstanceCount, - - /// - pub max_sampler_lod_bias: f32, - /// Maximum degree of sampler anisotropy. - pub max_sampler_anisotropy: f32, - - /// Maximum number of viewports. - pub max_viewports: usize, - /// - pub max_viewport_dimensions: [image::Size; 2], - /// - pub max_framebuffer_extent: image::Extent, - - /// - pub min_memory_map_alignment: usize, - /// - pub buffer_image_granularity: buffer::Offset, - /// The alignment of the start of buffer used as a texel buffer, in bytes, non-zero. - pub min_texel_buffer_offset_alignment: buffer::Offset, - /// The alignment of the start of buffer used for uniform buffer updates, in bytes, non-zero. - pub min_uniform_buffer_offset_alignment: buffer::Offset, - /// The alignment of the start of buffer used as a storage buffer, in bytes, non-zero. - pub min_storage_buffer_offset_alignment: buffer::Offset, - /// Number of samples supported for color attachments of framebuffers (floating/fixed point). - pub framebuffer_color_sample_counts: image::NumSamples, - /// Number of samples supported for depth attachments of framebuffers. - pub framebuffer_depth_sample_counts: image::NumSamples, - /// Number of samples supported for stencil attachments of framebuffers. - pub framebuffer_stencil_sample_counts: image::NumSamples, - /// Maximum number of color attachments that can be used by a subpass in a render pass. - pub max_color_attachments: usize, - /// - pub standard_sample_locations: bool, - /// The alignment of the start of the buffer used as a GPU copy source, in bytes, non-zero. - pub optimal_buffer_copy_offset_alignment: buffer::Offset, - /// The alignment of the row pitch of the texture data stored in a buffer that is - /// used in a GPU copy operation, in bytes, non-zero. - pub optimal_buffer_copy_pitch_alignment: buffer::Offset, - /// Size and alignment in bytes that bounds concurrent access to host-mapped device memory. - pub non_coherent_atom_size: usize, - - /// The alignment of the vertex buffer stride. - pub min_vertex_input_binding_stride_alignment: buffer::Offset, -} - -/// An enum describing the type of an index value in a slice's index buffer -#[allow(missing_docs)] -#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[repr(u8)] -pub enum IndexType { - U16, - U32, -} - -/// Error creating an instance of a backend on the platform that -/// doesn't support this backend. -#[derive(Clone, Debug, PartialEq)] -pub struct UnsupportedBackend; - -/// An instantiated backend. -/// -/// Any startup the backend needs to perform will be done when creating the type that implements -/// `Instance`. -/// -/// # Examples -/// -/// ```rust -/// # extern crate gfx_backend_empty; -/// # extern crate gfx_hal; -/// use gfx_backend_empty as backend; -/// use gfx_hal as hal; -/// -/// // Create a concrete instance of our backend (this is backend-dependent and may be more -/// // complicated for some backends). -/// let instance = backend::Instance; -/// // We can get a list of the available adapters, which are either physical graphics -/// // devices, or virtual adapters. Because we are using the dummy `empty` backend, -/// // there will be nothing in this list. -/// for (idx, adapter) in hal::Instance::enumerate_adapters(&instance).iter().enumerate() { -/// println!("Adapter {}: {:?}", idx, adapter.info); -/// } -/// ``` -pub trait Instance: Any + Send + Sync + Sized { - /// Create a new instance. - fn create(name: &str, version: u32) -> Result; - /// Return all available adapters. - fn enumerate_adapters(&self) -> Vec>; - /// Create a new surface. - unsafe fn create_surface( - &self, - _: &impl raw_window_handle::HasRawWindowHandle, - ) -> Result; - /// Destroy a surface. - /// - /// The surface shouldn't be destroyed before the attached - /// swapchain is destroyed. - unsafe fn destroy_surface(&self, surface: B::Surface); -} - -/// A strongly-typed index to a particular `MemoryType`. -#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct MemoryTypeId(pub usize); - -impl From for MemoryTypeId { - fn from(id: usize) -> Self { - MemoryTypeId(id) - } -} - -/// The `Backend` trait wraps together all the types needed -/// for a graphics backend. Each backend module, such as OpenGL -/// or Metal, will implement this trait with its own concrete types. -#[allow(missing_docs)] -pub trait Backend: 'static + Sized + Eq + Clone + Hash + fmt::Debug + Any + Send + Sync { - type Instance: Instance; - type PhysicalDevice: adapter::PhysicalDevice; - type Device: device::Device; - - type Surface: window::PresentationSurface; - type Swapchain: window::Swapchain; - - type QueueFamily: queue::QueueFamily; - type CommandQueue: queue::CommandQueue; - type CommandBuffer: command::CommandBuffer; - - type ShaderModule: fmt::Debug + Any + Send + Sync; - type RenderPass: fmt::Debug + Any + Send + Sync; - type Framebuffer: fmt::Debug + Any + Send + Sync; - - type Memory: fmt::Debug + Any + Send + Sync; - type CommandPool: pool::CommandPool; - - type Buffer: fmt::Debug + Any + Send + Sync; - type BufferView: fmt::Debug + Any + Send + Sync; - type Image: fmt::Debug + Any + Send + Sync; - type ImageView: fmt::Debug + Any + Send + Sync; - type Sampler: fmt::Debug + Any + Send + Sync; - - type ComputePipeline: fmt::Debug + Any + Send + Sync; - type GraphicsPipeline: fmt::Debug + Any + Send + Sync; - type PipelineCache: fmt::Debug + Any + Send + Sync; - type PipelineLayout: fmt::Debug + Any + Send + Sync; - type DescriptorPool: pso::DescriptorPool; - type DescriptorSet: fmt::Debug + Any + Send + Sync; - type DescriptorSetLayout: fmt::Debug + Any + Send + Sync; - - type Fence: fmt::Debug + Any + Send + Sync; - type Semaphore: fmt::Debug + Any + Send + Sync; - type Event: fmt::Debug + Any + Send + Sync; - type QueryPool: fmt::Debug + Any + Send + Sync; -} +#![deny(missing_debug_implementations, missing_docs, unused)] + +//! Low-level graphics abstraction for Rust. Mostly operates on data, not types. +//! Designed for use by libraries and higher-level abstractions only. + +#[macro_use] +extern crate bitflags; + +#[cfg(feature = "serde")] +#[macro_use] +extern crate serde; + +use std::any::Any; +use std::fmt; +use std::hash::Hash; + +pub mod adapter; +pub mod buffer; +pub mod command; +pub mod device; +pub mod format; +pub mod image; +pub mod memory; +pub mod pass; +pub mod pool; +pub mod pso; +pub mod query; +pub mod queue; +pub mod window; + +/// Prelude module re-exports all the traits necessary to use gfx-hal. +pub mod prelude { + pub use crate::{ + adapter::PhysicalDevice as _, + command::CommandBuffer as _, + device::Device as _, + pool::CommandPool as _, + pso::DescriptorPool as _, + queue::{CommandQueue as _, QueueFamily as _}, + window::{PresentationSurface as _, Surface as _, Swapchain as _}, + Instance as _, + }; +} + +/// Draw vertex count. +pub type VertexCount = u32; +/// Draw vertex base offset. +pub type VertexOffset = i32; +/// Draw number of indices. +pub type IndexCount = u32; +/// Draw number of instances. +pub type InstanceCount = u32; +/// Indirect draw calls count. +pub type DrawCount = u32; +/// Number of work groups. +pub type WorkGroupCount = [u32; 3]; + +bitflags! { + //TODO: add a feature for non-normalized samplers + //TODO: add a feature for mutable comparison samplers + /// Features that the device supports. + /// These only include features of the core interface and not API extensions. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct Features: u128 { + /// Bit mask of Vulkan Core features. + const CORE_MASK = 0xFFFF_FFFF_FFFF_FFFF; + /// Bit mask of Vulkan Portability features. + const PORTABILITY_MASK = 0x0000_FFFF_0000_0000_0000_0000; + /// Bit mask for extra WebGPU features. + const WEBGPU_MASK = 0xFFFF_0000_0000_0000_0000_0000; + + /// Support for robust buffer access. + /// Buffer access by SPIR-V shaders is checked against the buffer/image boundaries. + const ROBUST_BUFFER_ACCESS = 0x0000_0000_0000_0001; + /// Support the full 32-bit range of indexed for draw calls. + /// If not supported, the maximum index value is determined by `Limits::max_draw_index_value`. + const FULL_DRAW_INDEX_U32 = 0x0000_0000_0000_0002; + /// Support cube array image views. + const IMAGE_CUBE_ARRAY = 0x0000_0000_0000_0004; + /// Support different color blending settings per attachments on graphics pipeline creation. + const INDEPENDENT_BLENDING = 0x0000_0000_0000_0008; + /// Support geometry shader. + const GEOMETRY_SHADER = 0x0000_0000_0000_0010; + /// Support tessellation shaders. + const TESSELLATION_SHADER = 0x0000_0000_0000_0020; + /// Support per-sample shading and multisample interpolation. + const SAMPLE_RATE_SHADING = 0x0000_0000_0000_0040; + /// Support dual source blending. + const DUAL_SRC_BLENDING = 0x0000_0000_0000_0080; + /// Support logic operations. + const LOGIC_OP = 0x0000_0000_0000_0100; + /// Support multiple draws per indirect call. + const MULTI_DRAW_INDIRECT = 0x0000_0000_0000_0200; + /// Support indirect drawing with first instance value. + /// If not supported the first instance value **must** be 0. + const DRAW_INDIRECT_FIRST_INSTANCE = 0x0000_0000_0000_0400; + /// Support depth clamping. + const DEPTH_CLAMP = 0x0000_0000_0000_0800; + /// Support depth bias clamping. + const DEPTH_BIAS_CLAMP = 0x0000_0000_0000_1000; + /// Support non-fill polygon modes. + const NON_FILL_POLYGON_MODE = 0x0000_0000_0000_2000; + /// Support depth bounds test. + const DEPTH_BOUNDS = 0x0000_0000_0000_4000; + /// Support lines with width other than 1.0. + const LINE_WIDTH = 0x0000_0000_0000_8000; + /// Support points with size greater than 1.0. + const POINT_SIZE = 0x0000_0000_0001_0000; + /// Support replacing alpha values with 1.0. + const ALPHA_TO_ONE = 0x0000_0000_0002_0000; + /// Support multiple viewports and scissors. + const MULTI_VIEWPORTS = 0x0000_0000_0004_0000; + /// Support anisotropic filtering. + const SAMPLER_ANISOTROPY = 0x0000_0000_0008_0000; + /// Support ETC2 texture compression formats. + const FORMAT_ETC2 = 0x0000_0000_0010_0000; + /// Support ASTC (LDR) texture compression formats. + const FORMAT_ASTC_LDR = 0x0000_0000_0020_0000; + /// Support BC texture compression formats. + const FORMAT_BC = 0x0000_0000_0040_0000; + /// Support precise occlusion queries, returning the actual number of samples. + /// If not supported, queries return a non-zero value when at least **one** sample passes. + const PRECISE_OCCLUSION_QUERY = 0x0000_0000_0080_0000; + /// Support query of pipeline statistics. + const PIPELINE_STATISTICS_QUERY = 0x0000_0000_0100_0000; + /// Support unordered access stores and atomic ops in the vertex, geometry + /// and tessellation shader stage. + /// If not supported, the shader resources **must** be annotated as read-only. + const VERTEX_STORES_AND_ATOMICS = 0x0000_0000_0200_0000; + /// Support unordered access stores and atomic ops in the fragment shader stage + /// If not supported, the shader resources **must** be annotated as read-only. + const FRAGMENT_STORES_AND_ATOMICS = 0x0000_0000_0400_0000; + /// + const SHADER_TESSELLATION_AND_GEOMETRY_POINT_SIZE = 0x0000_0000_0800_0000; + /// + const SHADER_IMAGE_GATHER_EXTENDED = 0x0000_0000_1000_0000; + /// + const SHADER_STORAGE_IMAGE_EXTENDED_FORMATS = 0x0000_0000_2000_0000; + /// + const SHADER_STORAGE_IMAGE_MULTISAMPLE = 0x0000_0000_4000_0000; + /// + const SHADER_STORAGE_IMAGE_READ_WITHOUT_FORMAT = 0x0000_0000_8000_0000; + /// + const SHADER_STORAGE_IMAGE_WRITE_WITHOUT_FORMAT = 0x0000_0001_0000_0000; + /// + const SHADER_UNIFORM_BUFFER_ARRAY_DYNAMIC_INDEXING = 0x0000_0002_0000_0000; + /// + const SHADER_SAMPLED_IMAGE_ARRAY_DYNAMIC_INDEXING = 0x0000_0004_0000_0000; + /// + const SHADER_STORAGE_BUFFER_ARRAY_DYNAMIC_INDEXING = 0x0000_0008_0000_0000; + /// + const SHADER_STORAGE_IMAGE_ARRAY_DYNAMIC_INDEXING = 0x0000_0010_0000_0000; + /// + const SHADER_CLIP_DISTANCE = 0x0000_0020_0000_0000; + /// + const SHADER_CULL_DISTANCE = 0x0000_0040_0000_0000; + /// + const SHADER_FLOAT64 = 0x0000_0080_0000_0000; + /// + const SHADER_INT64 = 0x0000_0100_0000_0000; + /// + const SHADER_INT16 = 0x0000_0200_0000_0000; + /// + const SHADER_RESOURCE_RESIDENCY = 0x0000_0400_0000_0000; + /// + const SHADER_RESOURCE_MIN_LOD = 0x0000_0800_0000_0000; + /// + const SPARSE_BINDING = 0x0000_1000_0000_0000; + /// + const SPARSE_RESIDENCY_BUFFER = 0x0000_2000_0000_0000; + /// + const SPARSE_RESIDENCY_IMAGE_2D = 0x0000_4000_0000_0000; + /// + const SPARSE_RESIDENCY_IMAGE_3D = 0x0000_8000_0000_0000; + /// + const SPARSE_RESIDENCY_2_SAMPLES = 0x0001_0000_0000_0000; + /// + const SPARSE_RESIDENCY_4_SAMPLES = 0x0002_0000_0000_0000; + /// + const SPARSE_RESIDENCY_8_SAMPLES = 0x0004_0000_0000_0000; + /// + const SPARSE_RESIDENCY_16_SAMPLES = 0x0008_0000_0000_0000; + /// + const SPARSE_RESIDENCY_ALIASED = 0x0010_0000_0000_0000; + /// + const VARIABLE_MULTISAMPLE_RATE = 0x0020_0000_0000_0000; + /// + const INHERITED_QUERIES = 0x0040_0000_0000_0000; + /// Support for + const SAMPLER_MIRROR_CLAMP_EDGE = 0x0100_0000_0000_0000; + + /// Support triangle fan primitive topology. + const TRIANGLE_FAN = 0x0001 << 64; + /// Support separate stencil reference values for front and back sides. + const SEPARATE_STENCIL_REF_VALUES = 0x0002 << 64; + /// Support manually specified vertex attribute rates (divisors). + const INSTANCE_RATE = 0x0004 << 64; + /// Support non-zero mipmap bias on samplers. + const SAMPLER_MIP_LOD_BIAS = 0x0008 << 64; + + /// Make the NDC coordinate system pointing Y up, to match D3D and Metal. + const NDC_Y_UP = 0x01 << 80; + } +} + +bitflags! { + /// Features that the device supports natively, but is able to emulate. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct Hints: u32 { + /// Support indexed, instanced drawing with base vertex and instance. + const BASE_VERTEX_INSTANCE_DRAWING = 0x0001; + } +} + +/// Resource limits of a particular graphics device. +#[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Limits { + /// Maximum supported image 1D size. + pub max_image_1d_size: image::Size, + /// Maximum supported image 2D size. + pub max_image_2d_size: image::Size, + /// Maximum supported image 3D size. + pub max_image_3d_size: image::Size, + /// Maximum supported image cube size. + pub max_image_cube_size: image::Size, + /// Maximum supporter image array size. + pub max_image_array_layers: image::Layer, + /// Maximum number of elements for the BufferView to see. + pub max_texel_elements: usize, + /// + pub max_uniform_buffer_range: buffer::Offset, + /// + pub max_storage_buffer_range: buffer::Offset, + /// + pub max_push_constants_size: usize, + /// + pub max_memory_allocation_count: usize, + /// + pub max_sampler_allocation_count: usize, + /// + pub max_bound_descriptor_sets: pso::DescriptorSetIndex, + /// + pub max_framebuffer_layers: usize, + /// + pub max_per_stage_descriptor_samplers: usize, + /// + pub max_per_stage_descriptor_uniform_buffers: usize, + /// + pub max_per_stage_descriptor_storage_buffers: usize, + /// + pub max_per_stage_descriptor_sampled_images: usize, + /// + pub max_per_stage_descriptor_storage_images: usize, + /// + pub max_per_stage_descriptor_input_attachments: usize, + /// + pub max_per_stage_resources: usize, + + /// + pub max_descriptor_set_samplers: usize, + /// + pub max_descriptor_set_uniform_buffers: usize, + /// + pub max_descriptor_set_uniform_buffers_dynamic: usize, + /// + pub max_descriptor_set_storage_buffers: usize, + /// + pub max_descriptor_set_storage_buffers_dynamic: usize, + /// + pub max_descriptor_set_sampled_images: usize, + /// + pub max_descriptor_set_storage_images: usize, + /// + pub max_descriptor_set_input_attachments: usize, + + /// Maximum number of vertex input attributes that can be specified for a graphics pipeline. + pub max_vertex_input_attributes: usize, + /// Maximum number of vertex buffers that can be specified for providing vertex attributes to a graphics pipeline. + pub max_vertex_input_bindings: usize, + /// Maximum vertex input attribute offset that can be added to the vertex input binding stride. + pub max_vertex_input_attribute_offset: usize, + /// Maximum vertex input binding stride that can be specified in a vertex input binding. + pub max_vertex_input_binding_stride: usize, + /// Maximum number of components of output variables which can be output by a vertex shader. + pub max_vertex_output_components: usize, + + /// Maximum number of vertices for each patch. + pub max_patch_size: pso::PatchSize, + /// + pub max_geometry_shader_invocations: usize, + /// + pub max_geometry_input_components: usize, + /// + pub max_geometry_output_components: usize, + /// + pub max_geometry_output_vertices: usize, + /// + pub max_geometry_total_output_components: usize, + /// + pub max_fragment_input_components: usize, + /// + pub max_fragment_output_attachments: usize, + /// + pub max_fragment_dual_source_attachments: usize, + /// + pub max_fragment_combined_output_resources: usize, + + /// + pub max_compute_shared_memory_size: usize, + /// + pub max_compute_work_group_count: WorkGroupCount, + /// + pub max_compute_work_group_invocations: usize, + /// + pub max_compute_work_group_size: [u32; 3], + + /// + pub max_draw_indexed_index_value: IndexCount, + /// + pub max_draw_indirect_count: InstanceCount, + + /// + pub max_sampler_lod_bias: f32, + /// Maximum degree of sampler anisotropy. + pub max_sampler_anisotropy: f32, + + /// Maximum number of viewports. + pub max_viewports: usize, + /// + pub max_viewport_dimensions: [image::Size; 2], + /// + pub max_framebuffer_extent: image::Extent, + + /// + pub min_memory_map_alignment: usize, + /// + pub buffer_image_granularity: buffer::Offset, + /// The alignment of the start of buffer used as a texel buffer, in bytes, non-zero. + pub min_texel_buffer_offset_alignment: buffer::Offset, + /// The alignment of the start of buffer used for uniform buffer updates, in bytes, non-zero. + pub min_uniform_buffer_offset_alignment: buffer::Offset, + /// The alignment of the start of buffer used as a storage buffer, in bytes, non-zero. + pub min_storage_buffer_offset_alignment: buffer::Offset, + /// Number of samples supported for color attachments of framebuffers (floating/fixed point). + pub framebuffer_color_sample_counts: image::NumSamples, + /// Number of samples supported for depth attachments of framebuffers. + pub framebuffer_depth_sample_counts: image::NumSamples, + /// Number of samples supported for stencil attachments of framebuffers. + pub framebuffer_stencil_sample_counts: image::NumSamples, + /// Maximum number of color attachments that can be used by a subpass in a render pass. + pub max_color_attachments: usize, + /// + pub standard_sample_locations: bool, + /// The alignment of the start of the buffer used as a GPU copy source, in bytes, non-zero. + pub optimal_buffer_copy_offset_alignment: buffer::Offset, + /// The alignment of the row pitch of the texture data stored in a buffer that is + /// used in a GPU copy operation, in bytes, non-zero. + pub optimal_buffer_copy_pitch_alignment: buffer::Offset, + /// Size and alignment in bytes that bounds concurrent access to host-mapped device memory. + pub non_coherent_atom_size: usize, + + /// The alignment of the vertex buffer stride. + pub min_vertex_input_binding_stride_alignment: buffer::Offset, +} + +/// An enum describing the type of an index value in a slice's index buffer +#[allow(missing_docs)] +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[repr(u8)] +pub enum IndexType { + U16, + U32, +} + +/// Error creating an instance of a backend on the platform that +/// doesn't support this backend. +#[derive(Clone, Debug, PartialEq)] +pub struct UnsupportedBackend; + +/// An instantiated backend. +/// +/// Any startup the backend needs to perform will be done when creating the type that implements +/// `Instance`. +/// +/// # Examples +/// +/// ```rust +/// # extern crate gfx_backend_empty; +/// # extern crate gfx_hal; +/// use gfx_backend_empty as backend; +/// use gfx_hal as hal; +/// +/// // Create a concrete instance of our backend (this is backend-dependent and may be more +/// // complicated for some backends). +/// let instance = backend::Instance; +/// // We can get a list of the available adapters, which are either physical graphics +/// // devices, or virtual adapters. Because we are using the dummy `empty` backend, +/// // there will be nothing in this list. +/// for (idx, adapter) in hal::Instance::enumerate_adapters(&instance).iter().enumerate() { +/// println!("Adapter {}: {:?}", idx, adapter.info); +/// } +/// ``` +pub trait Instance: Any + Send + Sync + Sized { + /// Create a new instance. + fn create(name: &str, version: u32) -> Result; + /// Return all available adapters. + fn enumerate_adapters(&self) -> Vec>; + /// Create a new surface. + unsafe fn create_surface( + &self, + _: &impl raw_window_handle::HasRawWindowHandle, + ) -> Result; + /// Destroy a surface. + /// + /// The surface shouldn't be destroyed before the attached + /// swapchain is destroyed. + unsafe fn destroy_surface(&self, surface: B::Surface); +} + +/// A strongly-typed index to a particular `MemoryType`. +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct MemoryTypeId(pub usize); + +impl From for MemoryTypeId { + fn from(id: usize) -> Self { + MemoryTypeId(id) + } +} + +struct PseudoVec(Option); + +impl std::iter::Extend for PseudoVec { + fn extend>(&mut self, iter: I) { + let mut iter = iter.into_iter(); + self.0 = iter.next(); + assert!(iter.next().is_none()); + } +} + +/// The `Backend` trait wraps together all the types needed +/// for a graphics backend. Each backend module, such as OpenGL +/// or Metal, will implement this trait with its own concrete types. +#[allow(missing_docs)] +pub trait Backend: 'static + Sized + Eq + Clone + Hash + fmt::Debug + Any + Send + Sync { + type Instance: Instance; + type PhysicalDevice: adapter::PhysicalDevice; + type Device: device::Device; + + type Surface: window::PresentationSurface; + type Swapchain: window::Swapchain; + + type QueueFamily: queue::QueueFamily; + type CommandQueue: queue::CommandQueue; + type CommandBuffer: command::CommandBuffer; + + type ShaderModule: fmt::Debug + Any + Send + Sync; + type RenderPass: fmt::Debug + Any + Send + Sync; + type Framebuffer: fmt::Debug + Any + Send + Sync; + + type Memory: fmt::Debug + Any + Send + Sync; + type CommandPool: pool::CommandPool; + + type Buffer: fmt::Debug + Any + Send + Sync; + type BufferView: fmt::Debug + Any + Send + Sync; + type Image: fmt::Debug + Any + Send + Sync; + type ImageView: fmt::Debug + Any + Send + Sync; + type Sampler: fmt::Debug + Any + Send + Sync; + + type ComputePipeline: fmt::Debug + Any + Send + Sync; + type GraphicsPipeline: fmt::Debug + Any + Send + Sync; + type PipelineCache: fmt::Debug + Any + Send + Sync; + type PipelineLayout: fmt::Debug + Any + Send + Sync; + type DescriptorPool: pso::DescriptorPool; + type DescriptorSet: fmt::Debug + Any + Send + Sync; + type DescriptorSetLayout: fmt::Debug + Any + Send + Sync; + + type Fence: fmt::Debug + Any + Send + Sync; + type Semaphore: fmt::Debug + Any + Send + Sync; + type Event: fmt::Debug + Any + Send + Sync; + type QueryPool: fmt::Debug + Any + Send + Sync; +} diff --git a/third_party/rust/gfx-hal/src/memory.rs b/third_party/rust/gfx-hal/src/memory.rs old mode 100755 new mode 100644 index 0036d54089f4..0c5388a64720 --- a/third_party/rust/gfx-hal/src/memory.rs +++ b/third_party/rust/gfx-hal/src/memory.rs @@ -1,101 +1,121 @@ -//! Types to describe the properties of memory allocated for gfx resources. - -use crate::{buffer, image, queue, Backend}; -use std::ops::Range; - -bitflags!( - /// Memory property flags. - #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] - pub struct Properties: u16 { - /// Device local memory on the GPU. - const DEVICE_LOCAL = 0x1; - - /// Host visible memory can be accessed by the CPU. - /// - /// Backends must provide at least one cpu visible memory. - const CPU_VISIBLE = 0x2; - - /// CPU-GPU coherent. - /// - /// Non-coherent memory requires explicit flushing. - const COHERENT = 0x4; - - /// Cached memory by the CPU - const CPU_CACHED = 0x8; - - /// Memory that may be lazily allocated as needed on the GPU - /// and *must not* be visible to the CPU. - const LAZILY_ALLOCATED = 0x10; - } -); - -bitflags!( - /// Barrier dependency flags. - #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] - pub struct Dependencies: u32 { - /// Specifies the memory dependency to be framebuffer-local. - const BY_REGION = 0x1; - //const VIEW_LOCAL = 0x2; - //const DEVICE_GROUP = 0x4; - } -); - -// DOC TODO: Could be better, but I don't know how to do this without -// trying to explain the whole synchronization model. -/// A [memory barrier](https://www.khronos.org/registry/vulkan/specs/1.0/html/vkspec.html#synchronization-memory-barriers) -/// type for either buffers or images. -#[allow(missing_docs)] -#[derive(Clone, Debug)] -pub enum Barrier<'a, B: Backend> { - /// Applies the given access flags to all buffers in the range. - AllBuffers(Range), - /// Applies the given access flags to all images in the range. - AllImages(Range), - /// A memory barrier that defines access to a buffer. - Buffer { - /// The access flags controlling the buffer. - states: Range, - /// The buffer the barrier controls. - target: &'a B::Buffer, - /// The source and destination Queue family IDs, for a [queue family ownership transfer](https://www.khronos.org/registry/vulkan/specs/1.0/html/vkspec.html#synchronization-queue-transfers) - /// Can be `None` to indicate no ownership transfer. - families: Option>, - /// Range of the buffer the barrier applies to. - range: Range>, - }, - /// A memory barrier that defines access to (a subset of) an image. - Image { - /// The access flags controlling the image. - states: Range, - /// The image the barrier controls. - target: &'a B::Image, - /// The source and destination Queue family IDs, for a [queue family ownership transfer](https://www.khronos.org/registry/vulkan/specs/1.0/html/vkspec.html#synchronization-queue-transfers) - /// Can be `None` to indicate no ownership transfer. - families: Option>, - /// A `SubresourceRange` that defines which section of an image the barrier applies to. - range: image::SubresourceRange, - }, -} - -impl<'a, B: Backend> Barrier<'a, B> { - /// Create a barrier for the whole buffer between the given states. - pub fn whole_buffer(target: &'a B::Buffer, states: Range) -> Self { - Barrier::Buffer { - states, - target, - families: None, - range: None .. None, - } - } -} - -/// Memory requirements for a certain resource (buffer/image). -#[derive(Clone, Copy, Debug)] -pub struct Requirements { - /// Size in the memory. - pub size: u64, - /// Memory alignment. - pub alignment: u64, - /// Supported memory types. - pub type_mask: u64, -} +//! Types to describe the properties of memory allocated for gfx resources. + +use crate::{buffer, image, queue, Backend}; +use std::ops::Range; + +bitflags!( + /// Memory property flags. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct Properties: u16 { + /// Device local memory on the GPU. + const DEVICE_LOCAL = 0x1; + + /// Host visible memory can be accessed by the CPU. + /// + /// Backends must provide at least one cpu visible memory. + const CPU_VISIBLE = 0x2; + + /// CPU-GPU coherent. + /// + /// Non-coherent memory requires explicit flushing. + const COHERENT = 0x4; + + /// Cached memory by the CPU + const CPU_CACHED = 0x8; + + /// Memory that may be lazily allocated as needed on the GPU + /// and *must not* be visible to the CPU. + const LAZILY_ALLOCATED = 0x10; + } +); + +bitflags!( + /// Barrier dependency flags. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct Dependencies: u32 { + /// Specifies the memory dependency to be framebuffer-local. + const BY_REGION = 0x1; + /// + const VIEW_LOCAL = 0x2; + /// + const DEVICE_GROUP = 0x4; + } +); + +// DOC TODO: Could be better, but I don't know how to do this without +// trying to explain the whole synchronization model. +/// A [memory barrier](https://www.khronos.org/registry/vulkan/specs/1.0/html/vkspec.html#synchronization-memory-barriers) +/// type for either buffers or images. +#[allow(missing_docs)] +#[derive(Clone, Debug)] +pub enum Barrier<'a, B: Backend> { + /// Applies the given access flags to all buffers in the range. + AllBuffers(Range), + /// Applies the given access flags to all images in the range. + AllImages(Range), + /// A memory barrier that defines access to a buffer. + Buffer { + /// The access flags controlling the buffer. + states: Range, + /// The buffer the barrier controls. + target: &'a B::Buffer, + /// Subrange of the buffer the barrier applies to. + range: buffer::SubRange, + /// The source and destination Queue family IDs, for a [queue family ownership transfer](https://www.khronos.org/registry/vulkan/specs/1.0/html/vkspec.html#synchronization-queue-transfers) + /// Can be `None` to indicate no ownership transfer. + families: Option>, + }, + /// A memory barrier that defines access to (a subset of) an image. + Image { + /// The access flags controlling the image. + states: Range, + /// The image the barrier controls. + target: &'a B::Image, + /// A `SubresourceRange` that defines which section of an image the barrier applies to. + range: image::SubresourceRange, + /// The source and destination Queue family IDs, for a [queue family ownership transfer](https://www.khronos.org/registry/vulkan/specs/1.0/html/vkspec.html#synchronization-queue-transfers) + /// Can be `None` to indicate no ownership transfer. + families: Option>, + }, +} + +impl<'a, B: Backend> Barrier<'a, B> { + /// Create a barrier for the whole buffer between the given states. + pub fn whole_buffer(target: &'a B::Buffer, states: Range) -> Self { + Barrier::Buffer { + states, + target, + families: None, + range: buffer::SubRange::WHOLE, + } + } +} + +/// Memory requirements for a certain resource (buffer/image). +#[derive(Clone, Copy, Debug)] +pub struct Requirements { + /// Size in the memory. + pub size: u64, + /// Memory alignment. + pub alignment: u64, + /// Supported memory types. + pub type_mask: u64, +} + +/// A linear segment within a memory block. +#[derive(Clone, Debug, Default, Hash, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Segment { + /// Offset to the segment. + pub offset: u64, + /// Size of the segment, or None if unbound. + pub size: Option, +} + +impl Segment { + /// All the memory available. + pub const ALL: Self = Segment { + offset: 0, + size: None, + }; +} diff --git a/third_party/rust/gfx-hal/src/pass.rs b/third_party/rust/gfx-hal/src/pass.rs old mode 100755 new mode 100644 index 8fb1c2a798da..64f8c303717c --- a/third_party/rust/gfx-hal/src/pass.rs +++ b/third_party/rust/gfx-hal/src/pass.rs @@ -1,185 +1,180 @@ -//! RenderPass handling. - -use crate::{format::Format, image, memory::Dependencies, pso::PipelineStage, Backend}; -use std::ops::Range; - -/// Specifies the operation which will be applied at the beginning of a subpass. -#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum AttachmentLoadOp { - /// Preserve existing content in the attachment. - Load, - /// Clear the attachment. - Clear, - /// Attachment content will be undefined. - DontCare, -} - -/// -#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum AttachmentStoreOp { - /// Content written to the attachment will be preserved. - Store, - /// Attachment content will be undefined. - DontCare, -} - -/// Image layout of an attachment. -pub type AttachmentLayout = image::Layout; - -/// Attachment operations. -#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct AttachmentOps { - /// Indicates how the data of the attachment will be loaded at first usage at - /// the beginning of the subpass. - pub load: AttachmentLoadOp, - /// Whether or not data from the store operation will be preserved after the subpass. - pub store: AttachmentStoreOp, -} - -impl AttachmentOps { - /// Specifies `DontCare` for both load and store op. - pub const DONT_CARE: Self = AttachmentOps { - load: AttachmentLoadOp::DontCare, - store: AttachmentStoreOp::DontCare, - }; - - /// Specifies `Load` for load op and `Store` for store op. - pub const PRESERVE: Self = AttachmentOps { - load: AttachmentLoadOp::Load, - store: AttachmentStoreOp::Store, - }; - - /// Convenience function to create a new `AttachmentOps`. - pub fn new(load: AttachmentLoadOp, store: AttachmentStoreOp) -> Self { - AttachmentOps { load, store } - } - - /// A method to provide `AttachmentOps::DONT_CARE` to things that expect - /// a default function rather than a value. - #[cfg(feature = "serde")] - fn whatever() -> Self { - Self::DONT_CARE - } -} - -/// An `Attachment` is a description of a resource provided to a render subpass. -/// It includes things such as render targets, images that were produced from -/// previous subpasses, etc. -#[derive(Clone, Debug, Hash, PartialEq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct Attachment { - /// Attachment format - /// - /// In the most cases `format` is not `None`. It should be only used for - /// creating dummy renderpasses, which are used as placeholder for compatible - /// renderpasses. - pub format: Option, - /// Number of samples. - pub samples: image::NumSamples, - /// Load and store operations of the attachment - pub ops: AttachmentOps, - /// Load and store operations of the stencil aspect, if any - #[cfg_attr(feature = "serde", serde(default = "AttachmentOps::whatever"))] - pub stencil_ops: AttachmentOps, - /// Initial and final image layouts of the renderpass. - pub layouts: Range, -} - -impl Attachment { - /// Returns true if this attachment has some clear operations. This is useful - /// when starting a render pass, since there has to be a clear value provided. - pub fn has_clears(&self) -> bool { - self.ops.load == AttachmentLoadOp::Clear || self.stencil_ops.load == AttachmentLoadOp::Clear - } -} - -/// Index of an attachment within a framebuffer/renderpass, -pub type AttachmentId = usize; -/// Reference to an attachment by index and expected image layout. -pub type AttachmentRef = (AttachmentId, AttachmentLayout); -/// An AttachmentId that can be used instead of providing an attachment. -pub const ATTACHMENT_UNUSED: AttachmentId = !0; - -/// Which other subpasses a particular subpass depends on. -#[derive(Copy, Clone, Debug, Hash, PartialEq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum SubpassRef { - /// The subpass depends on something that was submitted to the - /// queue before or after the render pass began. - External, - /// The subpass depends on another subpass with the given index, - /// which must be less than or equal to the index of the current - /// subpass. The index here refers to the corresponding - /// `SubpassId` of a `Subpass`. - Pass(usize), -} - -/// Expresses a dependency between multiple subpasses. This is used -/// both to describe a source or destination subpass; data either -/// explicitly passes from this subpass to the next or from another -/// subpass into this one. -#[derive(Clone, Debug, Hash)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct SubpassDependency { - /// Other subpasses this one depends on. - pub passes: Range, - /// Other pipeline stages this subpass depends on. - pub stages: Range, - /// Resource accesses this subpass depends on. - pub accesses: Range, - /// Dependency flags. - pub flags: Dependencies, -} - -/// Description of a subpass for renderpass creation. -#[derive(Clone, Debug)] -pub struct SubpassDesc<'a> { - /// Which attachments will be used as color buffers. - pub colors: &'a [AttachmentRef], - /// Which attachments will be used as depth/stencil buffers. - pub depth_stencil: Option<&'a AttachmentRef>, - /// Which attachments will be used as input attachments. - pub inputs: &'a [AttachmentRef], - /// Which attachments will be used as resolve destinations. - /// - /// The number of resolve attachments may be zero or equal to the number of color attachments. - /// At the end of a subpass the color attachment will be resolved to the corresponding - /// resolve attachment. The resolve attachment must not be multisampled. - pub resolves: &'a [AttachmentRef], - /// Attachments that are not used by the subpass but must be preserved to be - /// passed on to subsequent passes. - pub preserves: &'a [AttachmentId], -} - -/// Index of a subpass. -pub type SubpassId = usize; - -/// A sub-pass borrow of a pass. -#[derive(Debug)] -pub struct Subpass<'a, B: Backend> { - /// Index of the subpass - pub index: SubpassId, - /// Main pass borrow. - pub main_pass: &'a B::RenderPass, -} - -impl<'a, B: Backend> Clone for Subpass<'a, B> { - fn clone(&self) -> Self { - Subpass { - index: self.index, - main_pass: self.main_pass, - } - } -} - -impl<'a, B: Backend> PartialEq for Subpass<'a, B> { - fn eq(&self, other: &Self) -> bool { - self.index == other.index && self.main_pass as *const _ == other.main_pass as *const _ - } -} - -impl<'a, B: Backend> Copy for Subpass<'a, B> {} -impl<'a, B: Backend> Eq for Subpass<'a, B> {} +//! RenderPass handling. + +use crate::{format::Format, image, memory::Dependencies, pso::PipelineStage, Backend}; +use std::ops::Range; + +/// Specifies the operation which will be applied at the beginning of a subpass. +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum AttachmentLoadOp { + /// Preserve existing content in the attachment. + Load, + /// Clear the attachment. + Clear, + /// Attachment content will be undefined. + DontCare, +} + +/// +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum AttachmentStoreOp { + /// Content written to the attachment will be preserved. + Store, + /// Attachment content will be undefined. + DontCare, +} + +/// Image layout of an attachment. +pub type AttachmentLayout = image::Layout; + +/// Attachment operations. +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct AttachmentOps { + /// Indicates how the data of the attachment will be loaded at first usage at + /// the beginning of the subpass. + pub load: AttachmentLoadOp, + /// Whether or not data from the store operation will be preserved after the subpass. + pub store: AttachmentStoreOp, +} + +impl AttachmentOps { + /// Specifies `DontCare` for both load and store op. + pub const DONT_CARE: Self = AttachmentOps { + load: AttachmentLoadOp::DontCare, + store: AttachmentStoreOp::DontCare, + }; + + /// Specifies `Clear` for load op and `Store` for store op. + pub const INIT: Self = AttachmentOps { + load: AttachmentLoadOp::Clear, + store: AttachmentStoreOp::Store, + }; + + /// Specifies `Load` for load op and `Store` for store op. + pub const PRESERVE: Self = AttachmentOps { + load: AttachmentLoadOp::Load, + store: AttachmentStoreOp::Store, + }; + + /// Convenience function to create a new `AttachmentOps`. + pub fn new(load: AttachmentLoadOp, store: AttachmentStoreOp) -> Self { + AttachmentOps { load, store } + } + + /// A method to provide `AttachmentOps::DONT_CARE` to things that expect + /// a default function rather than a value. + #[cfg(feature = "serde")] + fn whatever() -> Self { + Self::DONT_CARE + } +} + +/// An `Attachment` is a description of a resource provided to a render subpass. +/// It includes things such as render targets, images that were produced from +/// previous subpasses, etc. +#[derive(Clone, Debug, Hash, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Attachment { + /// Attachment format + /// + /// In the most cases `format` is not `None`. It should be only used for + /// creating dummy renderpasses, which are used as placeholder for compatible + /// renderpasses. + pub format: Option, + /// Number of samples. + pub samples: image::NumSamples, + /// Load and store operations of the attachment + pub ops: AttachmentOps, + /// Load and store operations of the stencil aspect, if any + #[cfg_attr(feature = "serde", serde(default = "AttachmentOps::whatever"))] + pub stencil_ops: AttachmentOps, + /// Initial and final image layouts of the renderpass. + pub layouts: Range, +} + +impl Attachment { + /// Returns true if this attachment has some clear operations. This is useful + /// when starting a render pass, since there has to be a clear value provided. + pub fn has_clears(&self) -> bool { + self.ops.load == AttachmentLoadOp::Clear || self.stencil_ops.load == AttachmentLoadOp::Clear + } +} + +/// Index of an attachment within a framebuffer/renderpass, +pub type AttachmentId = usize; +/// Reference to an attachment by index and expected image layout. +pub type AttachmentRef = (AttachmentId, AttachmentLayout); +/// An AttachmentId that can be used instead of providing an attachment. +pub const ATTACHMENT_UNUSED: AttachmentId = !0; + +/// Index of a subpass. +pub type SubpassId = u8; + +/// Expresses a dependency between multiple subpasses. This is used +/// both to describe a source or destination subpass; data either +/// explicitly passes from this subpass to the next or from another +/// subpass into this one. +#[derive(Clone, Debug, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct SubpassDependency { + /// Other subpasses this one depends on. + /// + /// If one of the range sides is `None`, it refers to the external + /// scope either before or after the whole render pass. + pub passes: Range>, + /// Other pipeline stages this subpass depends on. + pub stages: Range, + /// Resource accesses this subpass depends on. + pub accesses: Range, + /// Dependency flags. + pub flags: Dependencies, +} + +/// Description of a subpass for renderpass creation. +#[derive(Clone, Debug)] +pub struct SubpassDesc<'a> { + /// Which attachments will be used as color buffers. + pub colors: &'a [AttachmentRef], + /// Which attachments will be used as depth/stencil buffers. + pub depth_stencil: Option<&'a AttachmentRef>, + /// Which attachments will be used as input attachments. + pub inputs: &'a [AttachmentRef], + /// Which attachments will be used as resolve destinations. + /// + /// The number of resolve attachments may be zero or equal to the number of color attachments. + /// At the end of a subpass the color attachment will be resolved to the corresponding + /// resolve attachment. The resolve attachment must not be multisampled. + pub resolves: &'a [AttachmentRef], + /// Attachments that are not used by the subpass but must be preserved to be + /// passed on to subsequent passes. + pub preserves: &'a [AttachmentId], +} + +/// A sub-pass borrow of a pass. +#[derive(Debug)] +pub struct Subpass<'a, B: Backend> { + /// Index of the subpass + pub index: SubpassId, + /// Main pass borrow. + pub main_pass: &'a B::RenderPass, +} + +impl<'a, B: Backend> Clone for Subpass<'a, B> { + fn clone(&self) -> Self { + Subpass { + index: self.index, + main_pass: self.main_pass, + } + } +} + +impl<'a, B: Backend> PartialEq for Subpass<'a, B> { + fn eq(&self, other: &Self) -> bool { + self.index == other.index && self.main_pass as *const _ == other.main_pass as *const _ + } +} + +impl<'a, B: Backend> Copy for Subpass<'a, B> {} +impl<'a, B: Backend> Eq for Subpass<'a, B> {} diff --git a/third_party/rust/gfx-hal/src/pool.rs b/third_party/rust/gfx-hal/src/pool.rs old mode 100755 new mode 100644 index 8f45b9b784ed..190f23854f2c --- a/third_party/rust/gfx-hal/src/pool.rs +++ b/third_party/rust/gfx-hal/src/pool.rs @@ -1,43 +1,47 @@ -//! Command pools - -use crate::command::Level; -use crate::Backend; - -use smallvec::SmallVec; -use std::any::Any; -use std::fmt; - -bitflags!( - /// Command pool creation flags. - #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] - pub struct CommandPoolCreateFlags: u8 { - /// Indicates short-lived command buffers. - /// Memory optimization hint for implementations. - const TRANSIENT = 0x1; - /// Allow command buffers to be reset individually. - const RESET_INDIVIDUAL = 0x2; - } -); - -/// The allocated command buffers are associated with the creating command queue. -pub trait CommandPool: fmt::Debug + Any + Send + Sync { - /// Reset the command pool and the corresponding command buffers. - /// - /// # Synchronization: You may _not_ free the pool if a command buffer is still in use (pool memory still in use) - unsafe fn reset(&mut self, release_resources: bool); - - /// Allocate a single command buffers from the pool. - unsafe fn allocate_one(&mut self, level: Level) -> B::CommandBuffer { - self.allocate_vec(1, level).pop().unwrap() - } - - /// Allocate new command buffers from the pool. - unsafe fn allocate_vec(&mut self, num: usize, level: Level) -> SmallVec<[B::CommandBuffer; 1]> { - (0 .. num).map(|_| self.allocate_one(level)).collect() - } - - /// Free command buffers which are allocated from this pool. - unsafe fn free(&mut self, buffers: I) - where - I: IntoIterator; -} +//! Command pools + +use crate::command::Level; +use crate::{Backend, PseudoVec}; + +use std::any::Any; +use std::fmt; + +bitflags!( + /// Command pool creation flags. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct CommandPoolCreateFlags: u8 { + /// Indicates short-lived command buffers. + /// Memory optimization hint for implementations. + const TRANSIENT = 0x1; + /// Allow command buffers to be reset individually. + const RESET_INDIVIDUAL = 0x2; + } +); + +/// The allocated command buffers are associated with the creating command queue. +pub trait CommandPool: fmt::Debug + Any + Send + Sync { + /// Reset the command pool and the corresponding command buffers. + /// + /// # Synchronization: You may _not_ free the pool if a command buffer is still in use (pool memory still in use) + unsafe fn reset(&mut self, release_resources: bool); + + /// Allocate a single command buffers from the pool. + unsafe fn allocate_one(&mut self, level: Level) -> B::CommandBuffer { + let mut result = PseudoVec(None); + self.allocate(1, level, &mut result); + result.0.unwrap() + } + + /// Allocate new command buffers from the pool. + unsafe fn allocate(&mut self, num: usize, level: Level, list: &mut E) + where + E: Extend, + { + list.extend((0 .. num).map(|_| self.allocate_one(level))); + } + + /// Free command buffers which are allocated from this pool. + unsafe fn free(&mut self, buffers: I) + where + I: IntoIterator; +} diff --git a/third_party/rust/gfx-hal/src/pso/compute.rs b/third_party/rust/gfx-hal/src/pso/compute.rs old mode 100755 new mode 100644 index 8757e9a4e15e..07097ee92750 --- a/third_party/rust/gfx-hal/src/pso/compute.rs +++ b/third_party/rust/gfx-hal/src/pso/compute.rs @@ -1,31 +1,31 @@ -//! Compute pipeline descriptor. - -use crate::{ - pso::{BasePipeline, EntryPoint, PipelineCreationFlags}, - Backend, -}; - -/// A description of the data needed to construct a compute pipeline. -#[derive(Debug)] -pub struct ComputePipelineDesc<'a, B: Backend> { - /// The shader entry point that performs the computation. - pub shader: EntryPoint<'a, B>, - /// Pipeline layout. - pub layout: &'a B::PipelineLayout, - /// Any flags necessary for the pipeline creation. - pub flags: PipelineCreationFlags, - /// The parent pipeline to this one, if any. - pub parent: BasePipeline<'a, B::ComputePipeline>, -} - -impl<'a, B: Backend> ComputePipelineDesc<'a, B> { - /// Create a new empty PSO descriptor. - pub fn new(shader: EntryPoint<'a, B>, layout: &'a B::PipelineLayout) -> Self { - ComputePipelineDesc { - shader, - layout, - flags: PipelineCreationFlags::empty(), - parent: BasePipeline::None, - } - } -} +//! Compute pipeline descriptor. + +use crate::{ + pso::{BasePipeline, EntryPoint, PipelineCreationFlags}, + Backend, +}; + +/// A description of the data needed to construct a compute pipeline. +#[derive(Debug)] +pub struct ComputePipelineDesc<'a, B: Backend> { + /// The shader entry point that performs the computation. + pub shader: EntryPoint<'a, B>, + /// Pipeline layout. + pub layout: &'a B::PipelineLayout, + /// Any flags necessary for the pipeline creation. + pub flags: PipelineCreationFlags, + /// The parent pipeline to this one, if any. + pub parent: BasePipeline<'a, B::ComputePipeline>, +} + +impl<'a, B: Backend> ComputePipelineDesc<'a, B> { + /// Create a new empty PSO descriptor. + pub fn new(shader: EntryPoint<'a, B>, layout: &'a B::PipelineLayout) -> Self { + ComputePipelineDesc { + shader, + layout, + flags: PipelineCreationFlags::empty(), + parent: BasePipeline::None, + } + } +} diff --git a/third_party/rust/gfx-hal/src/pso/descriptor.rs b/third_party/rust/gfx-hal/src/pso/descriptor.rs old mode 100755 new mode 100644 index a81261e62834..67d973205bf5 --- a/third_party/rust/gfx-hal/src/pso/descriptor.rs +++ b/third_party/rust/gfx-hal/src/pso/descriptor.rs @@ -1,267 +1,294 @@ -//! Descriptor sets and layouts. -//! -//! A [`Descriptor`] is an object that describes the connection between a resource, such as -//! an `Image` or `Buffer`, and a variable in a shader. Descriptors are organized into -//! `DescriptorSet`s, each of which contains multiple descriptors that are bound and unbound to -//! shaders as a single unit. The contents of each descriptor in a set is defined by a -//! `DescriptorSetLayout` which is in turn built of [`DescriptorSetLayoutBinding`]s. A `DescriptorSet` -//! is then allocated from a [`DescriptorPool`] using the `DescriptorSetLayout`, and specific [`Descriptor`]s are -//! then bound to each binding point in the set using a [`DescriptorSetWrite`] and/or [`DescriptorSetCopy`]. -//! Each descriptor set may contain descriptors to multiple different sorts of resources, and a shader may -//! use multiple descriptor sets at a time. -//! -//! [`Descriptor`]: enum.Descriptor.html -//! [`DescriptorSetLayoutBinding`]: struct.DescriptorSetLayoutBinding.html -//! [`DescriptorPool`]: trait.DescriptorPool.html -//! [`DescriptorSetWrite`]: struct.DescriptorSetWrite.html -//! [`DescriptorSetCopy`]: struct.DescriptorSetWrite.html - -use smallvec::SmallVec; -use std::{borrow::Borrow, fmt, iter, ops::Range}; - -use crate::{ - buffer::Offset, - image::Layout, - pso::ShaderStageFlags, - Backend, -}; - -/// -pub type DescriptorSetIndex = u16; -/// -pub type DescriptorBinding = u32; -/// -pub type DescriptorArrayIndex = usize; - -/// DOC TODO: Grasping and remembering the differences between these -/// types is a tough task. We might be able to come up with better names? -/// Or even use tuples to describe functionality instead of coming up with fancy names. -#[repr(C)] -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum DescriptorType { - /// Controls filtering parameters for sampling from images. - Sampler = 0, - /// - CombinedImageSampler = 1, - /// Allows sampling (filtered loading) from associated image memory. - /// Usually combined with a `Sampler`. - SampledImage = 2, - /// Allows atomic operations, (non-filtered) loads and stores on image memory. - StorageImage = 3, - /// Read-only, formatted buffer. - UniformTexelBuffer = 4, - /// Read-Write, formatted buffer. - StorageTexelBuffer = 5, - /// Read-only, structured buffer. - UniformBuffer = 6, - /// Read-Write, structured buffer. - StorageBuffer = 7, - /// A uniform buffer that can be bound with an offset into its memory with minimal performance impact, - /// usually used to store pieces of "uniform" data that change per draw call rather than - /// per render pass. - UniformBufferDynamic = 8, - /// - StorageBufferDynamic = 9, - /// Allows unfiltered loads of pixel local data in the fragment shader. - InputAttachment = 10, -} - -/// Information about the contents of and in which stages descriptors may be bound to a descriptor -/// set at a certain binding point. Multiple `DescriptorSetLayoutBinding`s are assembled into -/// a `DescriptorSetLayout`, which is then allocated into a `DescriptorSet` using a -/// [`DescriptorPool`]. -/// -/// A descriptor set consists of multiple binding points. -/// Each binding point contains one or multiple descriptors of a certain type. -/// The binding point is only valid for the pipelines stages specified. -/// -/// The binding _must_ match with the corresponding shader interface. -/// -/// [`DescriptorPool`]: trait.DescriptorPool.html -#[derive(Clone, Debug)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct DescriptorSetLayoutBinding { - /// Descriptor bindings range. - pub binding: DescriptorBinding, - /// Type of the bound descriptors. - pub ty: DescriptorType, - /// Number of descriptors in the array. - /// - /// *Note*: If count is zero, the binding point is reserved - /// and can't be accessed from any shader stages. - pub count: DescriptorArrayIndex, - /// Valid shader stages. - pub stage_flags: ShaderStageFlags, - /// Use the associated list of immutable samplers. - pub immutable_samplers: bool, -} - -/// Set of descriptors of a specific type. -#[derive(Clone, Copy, Debug)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct DescriptorRangeDesc { - /// Type of the stored descriptors. - pub ty: DescriptorType, - /// Amount of space. - pub count: usize, -} - -/// An error allocating descriptor sets from a pool. -#[derive(Clone, Debug, PartialEq)] -pub enum AllocationError { - /// Memory allocation on the host side failed. - /// This could be caused by a lack of memory or pool fragmentation. - Host, - /// Memory allocation on the host side failed. - /// This could be caused by a lack of memory or pool fragmentation. - Device, - /// Memory allocation failed as there is not enough in the pool. - /// This could be caused by too many descriptor sets being created. - OutOfPoolMemory, - /// Memory allocation failed due to pool fragmentation. - FragmentedPool, - /// Descriptor set allocation failed as the layout is incompatible with the pool. - IncompatibleLayout, -} - -impl std::fmt::Display for AllocationError { - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - AllocationError::Host => write!(fmt, "Failed to allocate descriptor set: Out of host memory"), - AllocationError::Device => write!(fmt, "Failed to allocate descriptor set: Out of device memory"), - AllocationError::OutOfPoolMemory => write!(fmt, "Failed to allocate descriptor set: Out of pool memory"), - AllocationError::FragmentedPool => write!(fmt, "Failed to allocate descriptor set: Pool is fragmented"), - AllocationError::IncompatibleLayout => write!(fmt, "Failed to allocate descriptor set: Incompatible layout"), - } - } -} - -impl std::error::Error for AllocationError {} - -/// A descriptor pool is a collection of memory from which descriptor sets are allocated. -pub trait DescriptorPool: Send + Sync + fmt::Debug { - /// Allocate a descriptor set from the pool. - /// - /// The descriptor set will be allocated from the pool according to the corresponding set layout. However, - /// specific descriptors must still be written to the set before use using a [`DescriptorSetWrite`] or - /// [`DescriptorSetCopy`]. - /// - /// Descriptors will become invalid once the pool is reset. Usage of invalidated descriptor sets results - /// in undefined behavior. - /// - /// [`DescriptorSetWrite`]: struct.DescriptorSetWrite.html - /// [`DescriptorSetCopy`]: struct.DescriptorSetCopy.html - unsafe fn allocate_set( - &mut self, - layout: &B::DescriptorSetLayout, - ) -> Result { - let mut sets = SmallVec::new(); - self.allocate_sets(iter::once(layout), &mut sets) - .map(|_| sets.remove(0)) - } - - /// Allocate one or multiple descriptor sets from the pool. - /// - /// The descriptor set will be allocated from the pool according to the corresponding set layout. However, - /// specific descriptors must still be written to the set before use using a [`DescriptorSetWrite`] or - /// [`DescriptorSetCopy`]. - /// - /// Each descriptor set will be allocated from the pool according to the corresponding set layout. - /// Descriptors will become invalid once the pool is reset. Usage of invalidated descriptor sets results - /// in undefined behavior. - /// - /// [`DescriptorSetWrite`]: struct.DescriptorSetWrite.html - /// [`DescriptorSetCopy`]: struct.DescriptorSetCopy.html - unsafe fn allocate_sets( - &mut self, - layouts: I, - sets: &mut SmallVec<[B::DescriptorSet; 1]>, - ) -> Result<(), AllocationError> - where - I: IntoIterator, - I::Item: Borrow, - { - let base = sets.len(); - for layout in layouts { - match self.allocate_set(layout.borrow()) { - Ok(set) => sets.push(set), - Err(e) => { - while sets.len() != base { - self.free_sets(sets.pop()); - } - return Err(e); - } - } - } - Ok(()) - } - - /// Free the given descriptor sets provided as an iterator. - unsafe fn free_sets(&mut self, descriptor_sets: I) - where - I: IntoIterator; - - /// Resets a descriptor pool, releasing all resources from all the descriptor sets - /// allocated from it and freeing the descriptor sets. Invalidates all descriptor - /// sets allocated from the pool; trying to use one after the pool has been reset - /// is undefined behavior. - unsafe fn reset(&mut self); -} - -/// Writes the actual descriptors to be bound into a descriptor set. Should be provided -/// to the `write_descriptor_sets` method of a `Device`. -#[allow(missing_docs)] -#[derive(Debug)] -pub struct DescriptorSetWrite<'a, B: Backend, WI> -where - WI: IntoIterator, - WI::Item: Borrow>, -{ - pub set: &'a B::DescriptorSet, - /// *Note*: when there is more descriptors provided than - /// array elements left in the specified binding starting - /// at specified, offset, the updates are spilled onto - /// the next binding (starting with offset 0), and so on. - pub binding: DescriptorBinding, - pub array_offset: DescriptorArrayIndex, - pub descriptors: WI, -} - -/// A handle to a specific shader resource that can be bound for use in a `DescriptorSet`. -/// Usually provided in a [`DescriptorSetWrite`] -/// -/// [`DescriptorSetWrite`]: struct.DescriptorSetWrite.html -#[allow(missing_docs)] -#[derive(Clone, Debug)] -pub enum Descriptor<'a, B: Backend> { - Sampler(&'a B::Sampler), - Image(&'a B::ImageView, Layout), - CombinedImageSampler(&'a B::ImageView, Layout, &'a B::Sampler), - Buffer(&'a B::Buffer, Range>), - UniformTexelBuffer(&'a B::BufferView), - StorageTexelBuffer(&'a B::BufferView), -} - -/// Copies a range of descriptors to be bound from one descriptor set to another Should be -/// provided to the `copy_descriptor_sets` method of a `Device`. -#[allow(missing_docs)] -#[derive(Clone, Copy, Debug)] -pub struct DescriptorSetCopy<'a, B: Backend> { - pub src_set: &'a B::DescriptorSet, - pub src_binding: DescriptorBinding, - pub src_array_offset: DescriptorArrayIndex, - pub dst_set: &'a B::DescriptorSet, - pub dst_binding: DescriptorBinding, - pub dst_array_offset: DescriptorArrayIndex, - pub count: usize, -} - -bitflags! { - /// Descriptor pool creation flags. - pub struct DescriptorPoolCreateFlags: u32 { - /// Specifies that descriptor sets are allowed to be freed from the pool - /// individually. - const FREE_DESCRIPTOR_SET = 0x1; - } -} +//! Descriptor sets and layouts. +//! +//! A [`Descriptor`] is an object that describes the connection between a resource, such as +//! an `Image` or `Buffer`, and a variable in a shader. Descriptors are organized into +//! `DescriptorSet`s, each of which contains multiple descriptors that are bound and unbound to +//! shaders as a single unit. The contents of each descriptor in a set is defined by a +//! `DescriptorSetLayout` which is in turn built of [`DescriptorSetLayoutBinding`]s. A `DescriptorSet` +//! is then allocated from a [`DescriptorPool`] using the `DescriptorSetLayout`, and specific [`Descriptor`]s are +//! then bound to each binding point in the set using a [`DescriptorSetWrite`] and/or [`DescriptorSetCopy`]. +//! Each descriptor set may contain descriptors to multiple different sorts of resources, and a shader may +//! use multiple descriptor sets at a time. +//! +//! [`Descriptor`]: enum.Descriptor.html +//! [`DescriptorSetLayoutBinding`]: struct.DescriptorSetLayoutBinding.html +//! [`DescriptorPool`]: trait.DescriptorPool.html +//! [`DescriptorSetWrite`]: struct.DescriptorSetWrite.html +//! [`DescriptorSetCopy`]: struct.DescriptorSetWrite.html + +use std::{borrow::Borrow, fmt, iter}; + +use crate::{buffer::SubRange, image::Layout, pso::ShaderStageFlags, Backend, PseudoVec}; + +/// +pub type DescriptorSetIndex = u16; +/// +pub type DescriptorBinding = u32; +/// +pub type DescriptorArrayIndex = usize; + +/// Specific type of a buffer. +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum BufferDescriptorType { + /// Storage buffers allow load, store, and atomic operations. + Storage { + /// If true, store operations are not permitted on this buffer. + read_only: bool, + }, + /// Uniform buffers provide constant data to be accessed in a shader. + Uniform, +} + +/// Format of a buffer. +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum BufferDescriptorFormat { + /// The buffer is interpreted as a structure defined in a shader. + Structured { + /// If true, the buffer is accessed by an additional offset specified in + /// the `offsets` parameter of `CommandBuffer::bind_*_descriptor_sets`. + dynamic_offset: bool, + }, + /// The buffer is interpreted as a 1-D array of texels, which undergo format + /// conversion when loaded in a shader. + Texel, +} + +/// Specific type of an image descriptor. +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum ImageDescriptorType { + /// A sampled image allows sampling operations. + Sampled { + /// If true, this descriptor corresponds to both a sampled image and a + /// sampler to be used with that image. + with_sampler: bool, + }, + /// A storage image allows load, store and atomic operations. + Storage { + /// If true, store operations are not permitted on this image. + read_only: bool, + }, +} + +/// The type of a descriptor. +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum DescriptorType { + /// A descriptor associated with sampler. + Sampler, + /// A descriptor associated with an image. + Image { + /// The specific type of this image descriptor. + ty: ImageDescriptorType, + }, + /// A descriptor associated with a buffer. + Buffer { + /// The type of this buffer descriptor. + ty: BufferDescriptorType, + /// The format of this buffer descriptor. + format: BufferDescriptorFormat, + }, + /// A descriptor associated with an input attachment. + InputAttachment, +} + +/// Information about the contents of and in which stages descriptors may be bound to a descriptor +/// set at a certain binding point. Multiple `DescriptorSetLayoutBinding`s are assembled into +/// a `DescriptorSetLayout`, which is then allocated into a `DescriptorSet` using a +/// [`DescriptorPool`]. +/// +/// A descriptor set consists of multiple binding points. +/// Each binding point contains one or multiple descriptors of a certain type. +/// The binding point is only valid for the pipelines stages specified. +/// +/// The binding _must_ match with the corresponding shader interface. +/// +/// [`DescriptorPool`]: trait.DescriptorPool.html +#[derive(Clone, Debug)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct DescriptorSetLayoutBinding { + /// Descriptor bindings range. + pub binding: DescriptorBinding, + /// Type of the bound descriptors. + pub ty: DescriptorType, + /// Number of descriptors in the array. + /// + /// *Note*: If count is zero, the binding point is reserved + /// and can't be accessed from any shader stages. + pub count: DescriptorArrayIndex, + /// Valid shader stages. + pub stage_flags: ShaderStageFlags, + /// Use the associated list of immutable samplers. + pub immutable_samplers: bool, +} + +/// Set of descriptors of a specific type. +#[derive(Clone, Copy, Debug)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct DescriptorRangeDesc { + /// Type of the stored descriptors. + pub ty: DescriptorType, + /// Amount of space. + pub count: usize, +} + +/// An error allocating descriptor sets from a pool. +#[derive(Clone, Debug, PartialEq)] +pub enum AllocationError { + /// Memory allocation on the host side failed. + /// This could be caused by a lack of memory or pool fragmentation. + Host, + /// Memory allocation on the host side failed. + /// This could be caused by a lack of memory or pool fragmentation. + Device, + /// Memory allocation failed as there is not enough in the pool. + /// This could be caused by too many descriptor sets being created. + OutOfPoolMemory, + /// Memory allocation failed due to pool fragmentation. + FragmentedPool, + /// Descriptor set allocation failed as the layout is incompatible with the pool. + IncompatibleLayout, +} + +impl std::fmt::Display for AllocationError { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + AllocationError::Host => { + write!(fmt, "Failed to allocate descriptor set: Out of host memory") + } + AllocationError::Device => write!( + fmt, + "Failed to allocate descriptor set: Out of device memory" + ), + AllocationError::OutOfPoolMemory => { + write!(fmt, "Failed to allocate descriptor set: Out of pool memory") + } + AllocationError::FragmentedPool => { + write!(fmt, "Failed to allocate descriptor set: Pool is fragmented") + } + AllocationError::IncompatibleLayout => write!( + fmt, + "Failed to allocate descriptor set: Incompatible layout" + ), + } + } +} + +impl std::error::Error for AllocationError {} + +/// A descriptor pool is a collection of memory from which descriptor sets are allocated. +pub trait DescriptorPool: Send + Sync + fmt::Debug { + /// Allocate a descriptor set from the pool. + /// + /// The descriptor set will be allocated from the pool according to the corresponding set layout. However, + /// specific descriptors must still be written to the set before use using a [`DescriptorSetWrite`] or + /// [`DescriptorSetCopy`]. + /// + /// Descriptors will become invalid once the pool is reset. Usage of invalidated descriptor sets results + /// in undefined behavior. + /// + /// [`DescriptorSetWrite`]: struct.DescriptorSetWrite.html + /// [`DescriptorSetCopy`]: struct.DescriptorSetCopy.html + unsafe fn allocate_set( + &mut self, + layout: &B::DescriptorSetLayout, + ) -> Result { + let mut result = PseudoVec(None); + self.allocate(iter::once(layout), &mut result)?; + Ok(result.0.unwrap()) + } + + /// Allocate multiple descriptor sets from the pool. + /// + /// The descriptor set will be allocated from the pool according to the corresponding set layout. However, + /// specific descriptors must still be written to the set before use using a [`DescriptorSetWrite`] or + /// [`DescriptorSetCopy`]. + /// + /// Each descriptor set will be allocated from the pool according to the corresponding set layout. + /// Descriptors will become invalid once the pool is reset. Usage of invalidated descriptor sets results + /// in undefined behavior. + /// + /// [`DescriptorSetWrite`]: struct.DescriptorSetWrite.html + /// [`DescriptorSetCopy`]: struct.DescriptorSetCopy.html + unsafe fn allocate(&mut self, layouts: I, list: &mut E) -> Result<(), AllocationError> + where + I: IntoIterator, + I::Item: Borrow, + E: Extend, + { + for layout in layouts { + let set = self.allocate_set(layout.borrow())?; + list.extend(iter::once(set)); + } + Ok(()) + } + + /// Free the given descriptor sets provided as an iterator. + unsafe fn free_sets(&mut self, descriptor_sets: I) + where + I: IntoIterator; + + /// Resets a descriptor pool, releasing all resources from all the descriptor sets + /// allocated from it and freeing the descriptor sets. Invalidates all descriptor + /// sets allocated from the pool; trying to use one after the pool has been reset + /// is undefined behavior. + unsafe fn reset(&mut self); +} + +/// Writes the actual descriptors to be bound into a descriptor set. Should be provided +/// to the `write_descriptor_sets` method of a `Device`. +#[allow(missing_docs)] +#[derive(Debug)] +pub struct DescriptorSetWrite<'a, B: Backend, WI> +where + WI: IntoIterator, + WI::Item: Borrow>, +{ + pub set: &'a B::DescriptorSet, + /// *Note*: when there is more descriptors provided than + /// array elements left in the specified binding starting + /// at specified, offset, the updates are spilled onto + /// the next binding (starting with offset 0), and so on. + pub binding: DescriptorBinding, + pub array_offset: DescriptorArrayIndex, + pub descriptors: WI, +} + +/// A handle to a specific shader resource that can be bound for use in a `DescriptorSet`. +/// Usually provided in a [`DescriptorSetWrite`] +/// +/// [`DescriptorSetWrite`]: struct.DescriptorSetWrite.html +#[allow(missing_docs)] +#[derive(Clone, Debug)] +pub enum Descriptor<'a, B: Backend> { + Sampler(&'a B::Sampler), + Image(&'a B::ImageView, Layout), + CombinedImageSampler(&'a B::ImageView, Layout, &'a B::Sampler), + Buffer(&'a B::Buffer, SubRange), + TexelBuffer(&'a B::BufferView), +} + +/// Copies a range of descriptors to be bound from one descriptor set to another Should be +/// provided to the `copy_descriptor_sets` method of a `Device`. +#[allow(missing_docs)] +#[derive(Clone, Copy, Debug)] +pub struct DescriptorSetCopy<'a, B: Backend> { + pub src_set: &'a B::DescriptorSet, + pub src_binding: DescriptorBinding, + pub src_array_offset: DescriptorArrayIndex, + pub dst_set: &'a B::DescriptorSet, + pub dst_binding: DescriptorBinding, + pub dst_array_offset: DescriptorArrayIndex, + pub count: usize, +} + +bitflags! { + /// Descriptor pool creation flags. + pub struct DescriptorPoolCreateFlags: u32 { + /// Specifies that descriptor sets are allowed to be freed from the pool + /// individually. + const FREE_DESCRIPTOR_SET = 0x1; + } +} diff --git a/third_party/rust/gfx-hal/src/pso/graphics.rs b/third_party/rust/gfx-hal/src/pso/graphics.rs old mode 100755 new mode 100644 index de45ec9ffe1b..34aac49b4e40 --- a/third_party/rust/gfx-hal/src/pso/graphics.rs +++ b/third_party/rust/gfx-hal/src/pso/graphics.rs @@ -1,289 +1,295 @@ -//! Graphics pipeline descriptor. - -use crate::{ - image, - pass, - pso::{ - input_assembler::{AttributeDesc, InputAssemblerDesc, Primitive, VertexBufferDesc}, - output_merger::{ColorBlendDesc, DepthStencilDesc, Face}, - BasePipeline, EntryPoint, PipelineCreationFlags, State, - }, - Backend, -}; - -use std::ops::Range; - -/// A simple struct describing a rect with integer coordinates. -#[derive(Clone, Copy, Debug, Hash, PartialEq, PartialOrd)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct Rect { - /// X position. - pub x: i16, - /// Y position. - pub y: i16, - /// Width. - pub w: i16, - /// Height. - pub h: i16, -} - -/// A simple struct describing a rect with integer coordinates. -#[derive(Clone, Debug, PartialEq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct ClearRect { - /// 2D region. - pub rect: Rect, - /// Layer range. - pub layers: Range, -} - -/// A viewport, generally equating to a window on a display. -#[derive(Clone, Debug, PartialEq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct Viewport { - /// The viewport boundaries. - pub rect: Rect, - /// The viewport depth limits. - pub depth: Range, -} - -/// A single RGBA float color. -pub type ColorValue = [f32; 4]; -/// A single depth value from a depth buffer. -pub type DepthValue = f32; -/// A single value from a stencil buffer. -pub type StencilValue = u32; - -/// A complete set of shaders to build a graphics pipeline. -/// -/// All except the vertex shader are optional; omitting them -/// passes through the inputs without change. -/// -/// If a fragment shader is omitted, the results of fragment -/// processing are undefined. Specifically, any fragment color -/// outputs are considered to have undefined values, and the -/// fragment depth is considered to be unmodified. This can -/// be useful for depth-only rendering. -#[derive(Clone, Debug)] -pub struct GraphicsShaderSet<'a, B: Backend> { - /// A shader that outputs a vertex in a model. - pub vertex: EntryPoint<'a, B>, - /// A hull shader takes in an input patch (values representing - /// a small portion of a shape, which may be actual geometry or may - /// be parameters for creating geometry) and produces one or more - /// output patches. - pub hull: Option>, - /// A shader that takes in domains produced from a hull shader's output - /// patches and computes actual vertex positions. - pub domain: Option>, - /// A shader that takes given input vertexes and outputs zero - /// or more output vertexes. - pub geometry: Option>, - /// A shader that outputs a value for a fragment. - /// Usually this value is a color that is then displayed as a - /// pixel on a screen. - pub fragment: Option>, -} - -/// Baked-in pipeline states. -#[derive(Clone, Debug, Default, PartialEq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct BakedStates { - /// Static viewport. TODO: multiple viewports - pub viewport: Option, - /// Static scissor. TODO: multiple scissors - pub scissor: Option, - /// Static blend constant color. - pub blend_color: Option, - /// Static depth bounds. - pub depth_bounds: Option>, -} - -/// A description of all the settings that can be altered -/// when creating a graphics pipeline. -#[derive(Debug)] -pub struct GraphicsPipelineDesc<'a, B: Backend> { - /// A set of graphics shaders to use for the pipeline. - pub shaders: GraphicsShaderSet<'a, B>, - /// Rasterizer setup - pub rasterizer: Rasterizer, - /// Vertex buffers (IA) - pub vertex_buffers: Vec, - /// Vertex attributes (IA) - pub attributes: Vec, - /// Input assembler attributes, describes how - /// vertices are assembled into primitives (such as triangles). - pub input_assembler: InputAssemblerDesc, - /// Description of how blend operations should be performed. - pub blender: BlendDesc, - /// Depth stencil (DSV) - pub depth_stencil: DepthStencilDesc, - /// Multisampling. - pub multisampling: Option, - /// Static pipeline states. - pub baked_states: BakedStates, - /// Pipeline layout. - pub layout: &'a B::PipelineLayout, - /// Subpass in which the pipeline can be executed. - pub subpass: pass::Subpass<'a, B>, - /// Options that may be set to alter pipeline properties. - pub flags: PipelineCreationFlags, - /// The parent pipeline, which may be - /// `BasePipeline::None`. - pub parent: BasePipeline<'a, B::GraphicsPipeline>, -} - -impl<'a, B: Backend> GraphicsPipelineDesc<'a, B> { - /// Create a new empty PSO descriptor. - pub fn new( - shaders: GraphicsShaderSet<'a, B>, - primitive: Primitive, - rasterizer: Rasterizer, - layout: &'a B::PipelineLayout, - subpass: pass::Subpass<'a, B>, - ) -> Self { - GraphicsPipelineDesc { - shaders, - rasterizer, - vertex_buffers: Vec::new(), - attributes: Vec::new(), - input_assembler: InputAssemblerDesc::new(primitive), - blender: BlendDesc::default(), - depth_stencil: DepthStencilDesc::default(), - multisampling: None, - baked_states: BakedStates::default(), - layout, - subpass, - flags: PipelineCreationFlags::empty(), - parent: BasePipeline::None, - } - } -} - -/// Methods for rasterizing polygons, ie, turning the mesh -/// into a raster image. -#[derive(Clone, Copy, Debug, PartialEq, PartialOrd)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum PolygonMode { - /// Rasterize as a point. - Point, - /// Rasterize as a line with the given width. - Line(State), - /// Rasterize as a face. - Fill, -} - -/// The front face winding order of a set of vertices. This is -/// the order of vertexes that define which side of a face is -/// the "front". -#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum FrontFace { - /// Clockwise winding order. - Clockwise, - /// Counter-clockwise winding order. - CounterClockwise, -} - -/// A depth bias allows changing the produced depth values -/// for fragments slightly but consistently. This permits -/// drawing of multiple polygons in the same plane without -/// Z-fighting, such as when trying to draw shadows on a wall. -/// -/// For details of the algorithm and equations, see -/// [the Vulkan spec](https://www.khronos.org/registry/vulkan/specs/1.0/html/vkspec.html#primsrast-depthbias). -#[derive(Copy, Clone, Debug, Default, PartialEq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct DepthBias { - /// A constant depth value added to each fragment. - pub const_factor: f32, - /// The minimum or maximum depth bias of a fragment. - pub clamp: f32, - /// A constant bias applied to the fragment's slope. - pub slope_factor: f32, -} - -/// Rasterization state. -#[derive(Copy, Clone, Debug, PartialEq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct Rasterizer { - /// How to rasterize this primitive. - pub polygon_mode: PolygonMode, - /// Which face should be culled. - pub cull_face: Face, - /// Which vertex winding is considered to be the front face for culling. - pub front_face: FrontFace, - /// Whether or not to enable depth clamping; when enabled, instead of - /// fragments being omitted when they are outside the bounds of the z-plane, - /// they will be clamped to the min or max z value. - pub depth_clamping: bool, - /// What depth bias, if any, to use for the drawn primitives. - pub depth_bias: Option>, - /// Controls how triangles will be rasterized depending on their overlap with pixels. - pub conservative: bool, -} - -impl Rasterizer { - /// Simple polygon-filling rasterizer state - pub const FILL: Self = Rasterizer { - polygon_mode: PolygonMode::Fill, - cull_face: Face::NONE, - front_face: FrontFace::CounterClockwise, - depth_clamping: false, - depth_bias: None, - conservative: false, - }; -} - -/// A description of an equation for how to blend transparent, overlapping fragments. -#[derive(Clone, Debug, Default, PartialEq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct BlendDesc { - /// The logic operation to apply to the blending equation, if any. - pub logic_op: Option, - /// Which color targets to apply the blending operation to. - pub targets: Vec, -} - -/// Logic operations used for specifying blend equations. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[allow(missing_docs)] -pub enum LogicOp { - Clear = 0, - And = 1, - AndReverse = 2, - Copy = 3, - AndInverted = 4, - NoOp = 5, - Xor = 6, - Or = 7, - Nor = 8, - Equivalent = 9, - Invert = 10, - OrReverse = 11, - CopyInverted = 12, - OrInverted = 13, - Nand = 14, - Set = 15, -} - -/// -pub type SampleMask = u64; - -/// -#[derive(Clone, Debug, PartialEq)] -pub struct Multisampling { - /// - pub rasterization_samples: image::NumSamples, - /// - pub sample_shading: Option, - /// - pub sample_mask: SampleMask, - /// Toggles alpha-to-coverage multisampling, which can produce nicer edges - /// when many partially-transparent polygons are overlapping. - /// See [here]( https://msdn.microsoft.com/en-us/library/windows/desktop/bb205072(v=vs.85).aspx#Alpha_To_Coverage) for a full description. - pub alpha_coverage: bool, - /// - pub alpha_to_one: bool, -} +//! Graphics pipeline descriptor. + +use crate::{ + image, + pass, + pso::{ + input_assembler::{AttributeDesc, InputAssemblerDesc, Primitive, VertexBufferDesc}, + output_merger::{ColorBlendDesc, DepthStencilDesc, Face}, + BasePipeline, + EntryPoint, + PipelineCreationFlags, + State, + }, + Backend, +}; + +use std::ops::Range; + +/// A simple struct describing a rect with integer coordinates. +#[derive(Clone, Copy, Debug, Hash, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Rect { + /// X position. + pub x: i16, + /// Y position. + pub y: i16, + /// Width. + pub w: i16, + /// Height. + pub h: i16, +} + +/// A simple struct describing a rect with integer coordinates. +#[derive(Clone, Debug, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct ClearRect { + /// 2D region. + pub rect: Rect, + /// Layer range. + pub layers: Range, +} + +/// A viewport, generally equating to a window on a display. +#[derive(Clone, Debug, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Viewport { + /// The viewport boundaries. + pub rect: Rect, + /// The viewport depth limits. + pub depth: Range, +} + +/// A single RGBA float color. +pub type ColorValue = [f32; 4]; +/// A single depth value from a depth buffer. +pub type DepthValue = f32; +/// A single value from a stencil buffer. +pub type StencilValue = u32; + +/// A complete set of shaders to build a graphics pipeline. +/// +/// All except the vertex shader are optional; omitting them +/// passes through the inputs without change. +/// +/// If a fragment shader is omitted, the results of fragment +/// processing are undefined. Specifically, any fragment color +/// outputs are considered to have undefined values, and the +/// fragment depth is considered to be unmodified. This can +/// be useful for depth-only rendering. +#[derive(Clone, Debug)] +pub struct GraphicsShaderSet<'a, B: Backend> { + /// A shader that outputs a vertex in a model. + pub vertex: EntryPoint<'a, B>, + /// A hull shader takes in an input patch (values representing + /// a small portion of a shape, which may be actual geometry or may + /// be parameters for creating geometry) and produces one or more + /// output patches. + pub hull: Option>, + /// A shader that takes in domains produced from a hull shader's output + /// patches and computes actual vertex positions. + pub domain: Option>, + /// A shader that takes given input vertexes and outputs zero + /// or more output vertexes. + pub geometry: Option>, + /// A shader that outputs a value for a fragment. + /// Usually this value is a color that is then displayed as a + /// pixel on a screen. + pub fragment: Option>, +} + +/// Baked-in pipeline states. +#[derive(Clone, Debug, Default, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct BakedStates { + /// Static viewport. TODO: multiple viewports + pub viewport: Option, + /// Static scissor. TODO: multiple scissors + pub scissor: Option, + /// Static blend constant color. + pub blend_color: Option, + /// Static depth bounds. + pub depth_bounds: Option>, +} + +/// A description of all the settings that can be altered +/// when creating a graphics pipeline. +#[derive(Debug)] +pub struct GraphicsPipelineDesc<'a, B: Backend> { + /// A set of graphics shaders to use for the pipeline. + pub shaders: GraphicsShaderSet<'a, B>, + /// Rasterizer setup + pub rasterizer: Rasterizer, + /// Vertex buffers (IA) + pub vertex_buffers: Vec, + /// Vertex attributes (IA) + pub attributes: Vec, + /// Input assembler attributes, describes how + /// vertices are assembled into primitives (such as triangles). + pub input_assembler: InputAssemblerDesc, + /// Description of how blend operations should be performed. + pub blender: BlendDesc, + /// Depth stencil (DSV) + pub depth_stencil: DepthStencilDesc, + /// Multisampling. + pub multisampling: Option, + /// Static pipeline states. + pub baked_states: BakedStates, + /// Pipeline layout. + pub layout: &'a B::PipelineLayout, + /// Subpass in which the pipeline can be executed. + pub subpass: pass::Subpass<'a, B>, + /// Options that may be set to alter pipeline properties. + pub flags: PipelineCreationFlags, + /// The parent pipeline, which may be + /// `BasePipeline::None`. + pub parent: BasePipeline<'a, B::GraphicsPipeline>, +} + +impl<'a, B: Backend> GraphicsPipelineDesc<'a, B> { + /// Create a new empty PSO descriptor. + pub fn new( + shaders: GraphicsShaderSet<'a, B>, + primitive: Primitive, + rasterizer: Rasterizer, + layout: &'a B::PipelineLayout, + subpass: pass::Subpass<'a, B>, + ) -> Self { + GraphicsPipelineDesc { + shaders, + rasterizer, + vertex_buffers: Vec::new(), + attributes: Vec::new(), + input_assembler: InputAssemblerDesc::new(primitive), + blender: BlendDesc::default(), + depth_stencil: DepthStencilDesc::default(), + multisampling: None, + baked_states: BakedStates::default(), + layout, + subpass, + flags: PipelineCreationFlags::empty(), + parent: BasePipeline::None, + } + } +} + +/// Methods for rasterizing polygons, ie, turning the mesh +/// into a raster image. +#[derive(Clone, Copy, Debug, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum PolygonMode { + /// Rasterize as a point. + Point, + /// Rasterize as a line with the given width. + Line, + /// Rasterize as a face. + Fill, +} + +/// The front face winding order of a set of vertices. This is +/// the order of vertexes that define which side of a face is +/// the "front". +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum FrontFace { + /// Clockwise winding order. + Clockwise, + /// Counter-clockwise winding order. + CounterClockwise, +} + +/// A depth bias allows changing the produced depth values +/// for fragments slightly but consistently. This permits +/// drawing of multiple polygons in the same plane without +/// Z-fighting, such as when trying to draw shadows on a wall. +/// +/// For details of the algorithm and equations, see +/// [the Vulkan spec](https://www.khronos.org/registry/vulkan/specs/1.0/html/vkspec.html#primsrast-depthbias). +#[derive(Copy, Clone, Debug, Default, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct DepthBias { + /// A constant depth value added to each fragment. + pub const_factor: f32, + /// The minimum or maximum depth bias of a fragment. + pub clamp: f32, + /// A constant bias applied to the fragment's slope. + pub slope_factor: f32, +} + +/// Rasterization state. +#[derive(Copy, Clone, Debug, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Rasterizer { + /// How to rasterize this primitive. + pub polygon_mode: PolygonMode, + /// Which face should be culled. + pub cull_face: Face, + /// Which vertex winding is considered to be the front face for culling. + pub front_face: FrontFace, + /// Whether or not to enable depth clamping; when enabled, instead of + /// fragments being omitted when they are outside the bounds of the z-plane, + /// they will be clamped to the min or max z value. + pub depth_clamping: bool, + /// What depth bias, if any, to use for the drawn primitives. + pub depth_bias: Option>, + /// Controls how triangles will be rasterized depending on their overlap with pixels. + pub conservative: bool, + /// Controls width of rasterized line segments. + pub line_width: State, +} + +impl Rasterizer { + /// Simple polygon-filling rasterizer state + pub const FILL: Self = Rasterizer { + polygon_mode: PolygonMode::Fill, + cull_face: Face::NONE, + front_face: FrontFace::CounterClockwise, + depth_clamping: false, + depth_bias: None, + conservative: false, + line_width: State::Static(1.0), + }; +} + +/// A description of an equation for how to blend transparent, overlapping fragments. +#[derive(Clone, Debug, Default, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct BlendDesc { + /// The logic operation to apply to the blending equation, if any. + pub logic_op: Option, + /// Which color targets to apply the blending operation to. + pub targets: Vec, +} + +/// Logic operations used for specifying blend equations. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[allow(missing_docs)] +pub enum LogicOp { + Clear = 0, + And = 1, + AndReverse = 2, + Copy = 3, + AndInverted = 4, + NoOp = 5, + Xor = 6, + Or = 7, + Nor = 8, + Equivalent = 9, + Invert = 10, + OrReverse = 11, + CopyInverted = 12, + OrInverted = 13, + Nand = 14, + Set = 15, +} + +/// +pub type SampleMask = u64; + +/// +#[derive(Clone, Debug, PartialEq)] +pub struct Multisampling { + /// + pub rasterization_samples: image::NumSamples, + /// + pub sample_shading: Option, + /// + pub sample_mask: SampleMask, + /// Toggles alpha-to-coverage multisampling, which can produce nicer edges + /// when many partially-transparent polygons are overlapping. + /// See [here]( https://msdn.microsoft.com/en-us/library/windows/desktop/bb205072(v=vs.85).aspx#Alpha_To_Coverage) for a full description. + pub alpha_coverage: bool, + /// + pub alpha_to_one: bool, +} diff --git a/third_party/rust/gfx-hal/src/pso/input_assembler.rs b/third_party/rust/gfx-hal/src/pso/input_assembler.rs old mode 100755 new mode 100644 index 2d9b7b067fa6..d810f1b862f4 --- a/third_party/rust/gfx-hal/src/pso/input_assembler.rs +++ b/third_party/rust/gfx-hal/src/pso/input_assembler.rs @@ -1,146 +1,145 @@ -//! Input Assembler (IA) stage description. -//! The input assembler collects raw vertex and index data. - -use crate::{format, IndexType}; - -/// Shader binding location. -pub type Location = u32; -/// Index of a vertex buffer. -pub type BufferIndex = u32; -/// Offset of an attribute from the start of the buffer, in bytes -pub type ElemOffset = u32; -/// Offset between attribute values, in bytes -pub type ElemStride = u32; -/// Number of instances between each advancement of the vertex buffer. -pub type InstanceRate = u8; -/// Number of vertices in a patch -pub type PatchSize = u8; - - -/// The rate at which to advance input data to shaders for the given buffer -#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum VertexInputRate { - /// Advance the buffer after every vertex - Vertex, - /// Advance the buffer after every instance - Instance(InstanceRate), -} - -impl VertexInputRate { - /// Get the numeric representation of the rate - pub fn as_uint(&self) -> u8 { - match *self { - VertexInputRate::Vertex => 0, - VertexInputRate::Instance(divisor) => divisor, - } - } -} - -/// A struct element descriptor. -#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct Element { - /// Element format - pub format: F, - /// Offset from the beginning of the container, in bytes - pub offset: ElemOffset, -} - -/// Vertex buffer description. Notably, completely separate from resource `Descriptor`s -/// used in `DescriptorSet`s. -#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct VertexBufferDesc { - /// Binding number of this vertex buffer. This binding number is - /// used only for vertex buffers, and is completely separate from - /// `Descriptor` and `DescriptorSet` bind points. - pub binding: BufferIndex, - /// Total container size, in bytes. - /// Specifies the byte distance between two consecutive elements. - pub stride: ElemStride, - /// The rate at which to advance data for the given buffer - /// - /// i.e. the rate at which data passed to shaders will get advanced by - /// `stride` bytes - pub rate: VertexInputRate, -} - -/// Vertex attribute description. Notably, completely separate from resource `Descriptor`s -/// used in `DescriptorSet`s. -#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct AttributeDesc { - /// Attribute binding location in the shader. Attribute locations are - /// shared between all vertex buffers in a pipeline, meaning that even if the - /// data for this attribute comes from a different vertex buffer, it still cannot - /// share the same location with another attribute. - pub location: Location, - /// Binding number of the associated vertex buffer. - pub binding: BufferIndex, - /// Attribute element description. - pub element: Element, -} - -/// Describes the type of geometric primitives, -/// created from vertex data. -#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[repr(u8)] -pub enum Primitive { - /// Each vertex represents a single point. - PointList, - /// Each pair of vertices represent a single line segment. For example, with `[a, b, c, d, - /// e]`, `a` and `b` form a line, `c` and `d` form a line, and `e` is discarded. - LineList, - /// Every two consecutive vertices represent a single line segment. Visually forms a "path" of - /// lines, as they are all connected. For example, with `[a, b, c]`, `a` and `b` form a line - /// line, and `b` and `c` form a line. - LineStrip, - /// Each triplet of vertices represent a single triangle. For example, with `[a, b, c, d, e]`, - /// `a`, `b`, and `c` form a triangle, `d` and `e` are discarded. - TriangleList, - /// Every three consecutive vertices represent a single triangle. For example, with `[a, b, c, - /// d]`, `a`, `b`, and `c` form a triangle, and `b`, `c`, and `d` form a triangle. - TriangleStrip, - /// Patch list, - /// used with shaders capable of producing primitives on their own (tessellation) - PatchList(PatchSize), -} - -/// All the information needed to create an input assembler. -#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct InputAssemblerDesc { - /// Type of the primitive - pub primitive: Primitive, - /// When adjacency information is enabled, every even-numbered vertex - /// (every other starting from the first) represents an additional - /// vertex for the primitive, while odd-numbered vertices (every other starting from the - /// second) represent adjacent vertices. - /// - /// For example, with `[a, b, c, d, e, f, g, h]`, `[a, c, - /// e, g]` form a triangle strip, and `[b, d, f, h]` are the adjacent vertices, where `b`, `d`, - /// and `f` are adjacent to the first triangle in the strip, and `d`, `f`, and `h` are adjacent - /// to the second. - pub with_adjacency: bool, - /// Describes whether or not primitive restart is supported for - /// an input assembler. Primitive restart is a feature that - /// allows a mark to be placed in an index buffer where it is - /// is "broken" into multiple pieces of geometry. - /// - /// See - /// for more detail. - pub restart_index: Option, -} - -impl InputAssemblerDesc { - /// Create a new IA descriptor without primitive restart or adjucency. - pub fn new(primitive: Primitive) -> Self { - InputAssemblerDesc { - primitive, - with_adjacency: false, - restart_index: None, - } - } -} +//! Input Assembler (IA) stage description. +//! The input assembler collects raw vertex and index data. + +use crate::{format, IndexType}; + +/// Shader binding location. +pub type Location = u32; +/// Index of a vertex buffer. +pub type BufferIndex = u32; +/// Offset of an attribute from the start of the buffer, in bytes +pub type ElemOffset = u32; +/// Offset between attribute values, in bytes +pub type ElemStride = u32; +/// Number of instances between each advancement of the vertex buffer. +pub type InstanceRate = u8; +/// Number of vertices in a patch +pub type PatchSize = u8; + +/// The rate at which to advance input data to shaders for the given buffer +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum VertexInputRate { + /// Advance the buffer after every vertex + Vertex, + /// Advance the buffer after every instance + Instance(InstanceRate), +} + +impl VertexInputRate { + /// Get the numeric representation of the rate + pub fn as_uint(&self) -> u8 { + match *self { + VertexInputRate::Vertex => 0, + VertexInputRate::Instance(divisor) => divisor, + } + } +} + +/// A struct element descriptor. +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Element { + /// Element format + pub format: F, + /// Offset from the beginning of the container, in bytes + pub offset: ElemOffset, +} + +/// Vertex buffer description. Notably, completely separate from resource `Descriptor`s +/// used in `DescriptorSet`s. +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct VertexBufferDesc { + /// Binding number of this vertex buffer. This binding number is + /// used only for vertex buffers, and is completely separate from + /// `Descriptor` and `DescriptorSet` bind points. + pub binding: BufferIndex, + /// Total container size, in bytes. + /// Specifies the byte distance between two consecutive elements. + pub stride: ElemStride, + /// The rate at which to advance data for the given buffer + /// + /// i.e. the rate at which data passed to shaders will get advanced by + /// `stride` bytes + pub rate: VertexInputRate, +} + +/// Vertex attribute description. Notably, completely separate from resource `Descriptor`s +/// used in `DescriptorSet`s. +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct AttributeDesc { + /// Attribute binding location in the shader. Attribute locations are + /// shared between all vertex buffers in a pipeline, meaning that even if the + /// data for this attribute comes from a different vertex buffer, it still cannot + /// share the same location with another attribute. + pub location: Location, + /// Binding number of the associated vertex buffer. + pub binding: BufferIndex, + /// Attribute element description. + pub element: Element, +} + +/// Describes the type of geometric primitives, +/// created from vertex data. +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[repr(u8)] +pub enum Primitive { + /// Each vertex represents a single point. + PointList, + /// Each pair of vertices represent a single line segment. For example, with `[a, b, c, d, + /// e]`, `a` and `b` form a line, `c` and `d` form a line, and `e` is discarded. + LineList, + /// Every two consecutive vertices represent a single line segment. Visually forms a "path" of + /// lines, as they are all connected. For example, with `[a, b, c]`, `a` and `b` form a line + /// line, and `b` and `c` form a line. + LineStrip, + /// Each triplet of vertices represent a single triangle. For example, with `[a, b, c, d, e]`, + /// `a`, `b`, and `c` form a triangle, `d` and `e` are discarded. + TriangleList, + /// Every three consecutive vertices represent a single triangle. For example, with `[a, b, c, + /// d]`, `a`, `b`, and `c` form a triangle, and `b`, `c`, and `d` form a triangle. + TriangleStrip, + /// Patch list, + /// used with shaders capable of producing primitives on their own (tessellation) + PatchList(PatchSize), +} + +/// All the information needed to create an input assembler. +#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct InputAssemblerDesc { + /// Type of the primitive + pub primitive: Primitive, + /// When adjacency information is enabled, every even-numbered vertex + /// (every other starting from the first) represents an additional + /// vertex for the primitive, while odd-numbered vertices (every other starting from the + /// second) represent adjacent vertices. + /// + /// For example, with `[a, b, c, d, e, f, g, h]`, `[a, c, + /// e, g]` form a triangle strip, and `[b, d, f, h]` are the adjacent vertices, where `b`, `d`, + /// and `f` are adjacent to the first triangle in the strip, and `d`, `f`, and `h` are adjacent + /// to the second. + pub with_adjacency: bool, + /// Describes whether or not primitive restart is supported for + /// an input assembler. Primitive restart is a feature that + /// allows a mark to be placed in an index buffer where it is + /// is "broken" into multiple pieces of geometry. + /// + /// See + /// for more detail. + pub restart_index: Option, +} + +impl InputAssemblerDesc { + /// Create a new IA descriptor without primitive restart or adjucency. + pub fn new(primitive: Primitive) -> Self { + InputAssemblerDesc { + primitive, + with_adjacency: false, + restart_index: None, + } + } +} diff --git a/third_party/rust/gfx-hal/src/pso/mod.rs b/third_party/rust/gfx-hal/src/pso/mod.rs old mode 100755 new mode 100644 index 882ad9194c64..7d52137be56b --- a/third_party/rust/gfx-hal/src/pso/mod.rs +++ b/third_party/rust/gfx-hal/src/pso/mod.rs @@ -1,301 +1,311 @@ -//! Raw Pipeline State Objects -//! -//! This module contains items used to create and manage Pipelines. - -use crate::{device, pass, Backend}; -use std::{fmt, io, slice}; - -mod compute; -mod descriptor; -mod graphics; -mod input_assembler; -mod output_merger; -mod specialization; - -pub use self::{ - compute::*, - descriptor::*, - graphics::*, - input_assembler::*, - output_merger::*, - specialization::*, -}; - -/// Error types happening upon PSO creation on the device side. -#[derive(Clone, Debug, PartialEq)] -pub enum CreationError { - /// Unknown other error. - Other, - /// Invalid subpass (not part of renderpass). - InvalidSubpass(pass::SubpassId), - /// Shader compilation error. - Shader(device::ShaderError), - /// Out of either host or device memory. - OutOfMemory(device::OutOfMemory), -} - -impl From for CreationError { - fn from(err: device::OutOfMemory) -> Self { - CreationError::OutOfMemory(err) - } -} - -impl std::fmt::Display for CreationError { - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - CreationError::OutOfMemory(err) => write!(fmt, "Failed to create pipeline: {}", err), - CreationError::Other => write!(fmt, "Failed to create pipeline: Unsupported usage: Implementation specific error occurred"), - CreationError::InvalidSubpass(subpass) => write!(fmt, "Failed to create pipeline: Invalid subpass: {}", subpass), - CreationError::Shader(err) => write!(fmt, "Failed to create pipeline: {}", err), - } - } -} - -bitflags!( - /// Stages of the logical pipeline. - /// - /// The pipeline is structured by the ordering of the flags. - /// Some stages are queue type dependent. - #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] - pub struct PipelineStage: u32 { - /// Beginning of the command queue. - const TOP_OF_PIPE = 0x1; - /// Indirect data consumption. - const DRAW_INDIRECT = 0x2; - /// Vertex data consumption. - const VERTEX_INPUT = 0x4; - /// Vertex shader execution. - const VERTEX_SHADER = 0x8; - /// Hull shader execution. - const HULL_SHADER = 0x10; - /// Domain shader execution. - const DOMAIN_SHADER = 0x20; - /// Geometry shader execution. - const GEOMETRY_SHADER = 0x40; - /// Fragment shader execution. - const FRAGMENT_SHADER = 0x80; - /// Stage of early depth and stencil test. - const EARLY_FRAGMENT_TESTS = 0x100; - /// Stage of late depth and stencil test. - const LATE_FRAGMENT_TESTS = 0x200; - /// Stage of final color value calculation. - const COLOR_ATTACHMENT_OUTPUT = 0x400; - /// Compute shader execution, - const COMPUTE_SHADER = 0x800; - /// Copy/Transfer command execution. - const TRANSFER = 0x1000; - /// End of the command queue. - const BOTTOM_OF_PIPE = 0x2000; - /// Read/Write access from host. - /// (Not a real pipeline stage) - const HOST = 0x4000; - } -); - -bitflags!( - /// Combination of different shader pipeline stages. - #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] - pub struct ShaderStageFlags: u32 { - /// Vertex shader stage. - const VERTEX = 0x1; - /// Hull (tessellation) shader stage. - const HULL = 0x2; - /// Domain (tessellation) shader stage. - const DOMAIN = 0x4; - /// Geometry shader stage. - const GEOMETRY = 0x8; - /// Fragment shader stage. - const FRAGMENT = 0x10; - /// Compute shader stage. - const COMPUTE = 0x20; - /// All graphics pipeline shader stages. - const GRAPHICS = Self::VERTEX.bits | Self::HULL.bits | - Self::DOMAIN.bits | Self::GEOMETRY.bits | Self::FRAGMENT.bits; - /// All shader stages (matches Vulkan). - const ALL = 0x7FFFFFFF; - } -); - -// Note: this type is only needed for backends, not used anywhere within gfx_hal. -/// Which program stage this shader represents. -#[allow(missing_docs)] -#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[repr(u8)] -pub enum Stage { - Vertex, - Hull, - Domain, - Geometry, - Fragment, - Compute, -} - -impl From for ShaderStageFlags { - fn from(stage: Stage) -> Self { - match stage { - Stage::Vertex => ShaderStageFlags::VERTEX, - Stage::Hull => ShaderStageFlags::HULL, - Stage::Domain => ShaderStageFlags::DOMAIN, - Stage::Geometry => ShaderStageFlags::GEOMETRY, - Stage::Fragment => ShaderStageFlags::FRAGMENT, - Stage::Compute => ShaderStageFlags::COMPUTE, - } - } -} - -impl fmt::Display for Stage { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(match self { - Stage::Vertex => "vertex", - Stage::Hull => "hull", - Stage::Domain => "domain", - Stage::Geometry => "geometry", - Stage::Fragment => "fragment", - Stage::Compute => "compute", - }) - } -} - -/// Shader entry point. -#[derive(Debug)] -pub struct EntryPoint<'a, B: Backend> { - /// Entry point name. - pub entry: &'a str, - /// Shader module reference. - pub module: &'a B::ShaderModule, - /// Specialization. - pub specialization: Specialization<'a>, -} - -impl<'a, B: Backend> Clone for EntryPoint<'a, B> { - fn clone(&self) -> Self { - EntryPoint { - entry: self.entry, - module: self.module, - specialization: self.specialization.clone(), - } - } -} - -bitflags!( - /// Pipeline creation flags. - #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] - pub struct PipelineCreationFlags: u32 { - /// Disable pipeline optimizations. - /// - /// May speedup pipeline creation. - const DISABLE_OPTIMIZATION = 0x1; - /// Allow derivatives (children) of the pipeline. - /// - /// Must be set when pipelines set the pipeline as base. - const ALLOW_DERIVATIVES = 0x2; - } -); - -/// A reference to a parent pipeline. The assumption is that -/// a parent and derivative/child pipeline have most settings -/// in common, and one may be switched for another more quickly -/// than entirely unrelated pipelines would be. -#[derive(Debug)] -pub enum BasePipeline<'a, P: 'a> { - /// Referencing an existing pipeline as parent. - Pipeline(&'a P), - /// A pipeline in the same create pipelines call. - /// - /// The index of the parent must be lower than the index of the child. - Index(usize), - /// No parent pipeline exists. - None, -} - -/// Pipeline state which may be static or dynamic. -#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum State { - /// Static state that cannot be altered. - Static(T), - /// Dynamic state set through a command buffer. - Dynamic, -} - -impl State { - /// Returns the static value or a default. - pub fn static_or(self, default: T) -> T { - match self { - State::Static(v) => v, - State::Dynamic => default, - } - } - - /// Whether the state is static. - pub fn is_static(self) -> bool { - match self { - State::Static(_) => true, - State::Dynamic => false, - } - } - - /// Whether the state is dynamic. - pub fn is_dynamic(self) -> bool { - !self.is_static() - } -} - - -/// Safely read SPIR-V -/// -/// Converts to native endianness and returns correctly aligned storage without unnecessary -/// copying. Returns an `InvalidData` error if the input is trivially not SPIR-V. -/// -/// This function can also be used to convert an already in-memory `&[u8]` to a valid `Vec`, -/// but prefer working with `&[u32]` from the start whenever possible. -/// -/// # Examples -/// ```no_run -/// let mut file = std::fs::File::open("/path/to/shader.spv").unwrap(); -/// let words = gfx_hal::pso::read_spirv(&mut file).unwrap(); -/// ``` -/// ``` -/// const SPIRV: &[u8] = &[ -/// 0x03, 0x02, 0x23, 0x07, // ... -/// ]; -/// let words = gfx_hal::pso::read_spirv(std::io::Cursor::new(&SPIRV[..])).unwrap(); -/// ``` -pub fn read_spirv(mut x: R) -> io::Result> { - let size = x.seek(io::SeekFrom::End(0))?; - if size % 4 != 0 { - return Err(io::Error::new( - io::ErrorKind::InvalidData, - "input length not divisible by 4", - )); - } - if size > usize::max_value() as u64 { - return Err(io::Error::new(io::ErrorKind::InvalidData, "input too long")); - } - let words = (size / 4) as usize; - let mut result = Vec::::with_capacity(words); - x.seek(io::SeekFrom::Start(0))?; - unsafe { - // Writing all bytes through a pointer with less strict alignment when our type has no - // invalid bitpatterns is safe. - x.read_exact(slice::from_raw_parts_mut( - result.as_mut_ptr() as *mut u8, - words * 4, - ))?; - result.set_len(words); - } - const MAGIC_NUMBER: u32 = 0x07230203; - if result.len() > 0 && result[0] == MAGIC_NUMBER.swap_bytes() { - for word in &mut result { - *word = word.swap_bytes(); - } - } - if result.len() == 0 || result[0] != MAGIC_NUMBER { - return Err(io::Error::new( - io::ErrorKind::InvalidData, - "input missing SPIR-V magic number", - )); - } - Ok(result) -} +//! Raw Pipeline State Objects +//! +//! This module contains items used to create and manage Pipelines. + +use crate::{device, pass, Backend}; +use std::{fmt, io, slice}; + +mod compute; +mod descriptor; +mod graphics; +mod input_assembler; +mod output_merger; +mod specialization; + +pub use self::{ + compute::*, + descriptor::*, + graphics::*, + input_assembler::*, + output_merger::*, + specialization::*, +}; + +/// Error types happening upon PSO creation on the device side. +#[derive(Clone, Debug, PartialEq)] +pub enum CreationError { + /// Unknown other error. + Other, + /// Invalid subpass (not part of renderpass). + InvalidSubpass(pass::SubpassId), + /// Shader compilation error. + Shader(device::ShaderError), + /// Out of either host or device memory. + OutOfMemory(device::OutOfMemory), +} + +impl From for CreationError { + fn from(err: device::OutOfMemory) -> Self { + CreationError::OutOfMemory(err) + } +} + +impl std::fmt::Display for CreationError { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + CreationError::OutOfMemory(err) => write!(fmt, "Failed to create pipeline: {}", err), + CreationError::Other => write!(fmt, "Failed to create pipeline: Unsupported usage: Implementation specific error occurred"), + CreationError::InvalidSubpass(subpass) => write!(fmt, "Failed to create pipeline: Invalid subpass: {}", subpass), + CreationError::Shader(err) => write!(fmt, "Failed to create pipeline: {}", err), + } + } +} + +impl std::error::Error for CreationError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + CreationError::OutOfMemory(err) => Some(err), + CreationError::Shader(err) => Some(err), + CreationError::InvalidSubpass(_) => None, + CreationError::Other => None, + } + } +} + +bitflags!( + /// Stages of the logical pipeline. + /// + /// The pipeline is structured by the ordering of the flags. + /// Some stages are queue type dependent. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct PipelineStage: u32 { + /// Beginning of the command queue. + const TOP_OF_PIPE = 0x1; + /// Indirect data consumption. + const DRAW_INDIRECT = 0x2; + /// Vertex data consumption. + const VERTEX_INPUT = 0x4; + /// Vertex shader execution. + const VERTEX_SHADER = 0x8; + /// Hull shader execution. + const HULL_SHADER = 0x10; + /// Domain shader execution. + const DOMAIN_SHADER = 0x20; + /// Geometry shader execution. + const GEOMETRY_SHADER = 0x40; + /// Fragment shader execution. + const FRAGMENT_SHADER = 0x80; + /// Stage of early depth and stencil test. + const EARLY_FRAGMENT_TESTS = 0x100; + /// Stage of late depth and stencil test. + const LATE_FRAGMENT_TESTS = 0x200; + /// Stage of final color value calculation. + const COLOR_ATTACHMENT_OUTPUT = 0x400; + /// Compute shader execution, + const COMPUTE_SHADER = 0x800; + /// Copy/Transfer command execution. + const TRANSFER = 0x1000; + /// End of the command queue. + const BOTTOM_OF_PIPE = 0x2000; + /// Read/Write access from host. + /// (Not a real pipeline stage) + const HOST = 0x4000; + } +); + +bitflags!( + /// Combination of different shader pipeline stages. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct ShaderStageFlags: u32 { + /// Vertex shader stage. + const VERTEX = 0x1; + /// Hull (tessellation) shader stage. + const HULL = 0x2; + /// Domain (tessellation) shader stage. + const DOMAIN = 0x4; + /// Geometry shader stage. + const GEOMETRY = 0x8; + /// Fragment shader stage. + const FRAGMENT = 0x10; + /// Compute shader stage. + const COMPUTE = 0x20; + /// All graphics pipeline shader stages. + const GRAPHICS = Self::VERTEX.bits | Self::HULL.bits | + Self::DOMAIN.bits | Self::GEOMETRY.bits | Self::FRAGMENT.bits; + /// All shader stages (matches Vulkan). + const ALL = 0x7FFFFFFF; + } +); + +// Note: this type is only needed for backends, not used anywhere within gfx_hal. +/// Which program stage this shader represents. +#[allow(missing_docs)] +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[repr(u8)] +pub enum Stage { + Vertex, + Hull, + Domain, + Geometry, + Fragment, + Compute, +} + +impl From for ShaderStageFlags { + fn from(stage: Stage) -> Self { + match stage { + Stage::Vertex => ShaderStageFlags::VERTEX, + Stage::Hull => ShaderStageFlags::HULL, + Stage::Domain => ShaderStageFlags::DOMAIN, + Stage::Geometry => ShaderStageFlags::GEOMETRY, + Stage::Fragment => ShaderStageFlags::FRAGMENT, + Stage::Compute => ShaderStageFlags::COMPUTE, + } + } +} + +impl fmt::Display for Stage { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(match self { + Stage::Vertex => "vertex", + Stage::Hull => "hull", + Stage::Domain => "domain", + Stage::Geometry => "geometry", + Stage::Fragment => "fragment", + Stage::Compute => "compute", + }) + } +} + +/// Shader entry point. +#[derive(Debug)] +pub struct EntryPoint<'a, B: Backend> { + /// Entry point name. + pub entry: &'a str, + /// Shader module reference. + pub module: &'a B::ShaderModule, + /// Specialization. + pub specialization: Specialization<'a>, +} + +impl<'a, B: Backend> Clone for EntryPoint<'a, B> { + fn clone(&self) -> Self { + EntryPoint { + entry: self.entry, + module: self.module, + specialization: self.specialization.clone(), + } + } +} + +bitflags!( + /// Pipeline creation flags. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct PipelineCreationFlags: u32 { + /// Disable pipeline optimizations. + /// + /// May speedup pipeline creation. + const DISABLE_OPTIMIZATION = 0x1; + /// Allow derivatives (children) of the pipeline. + /// + /// Must be set when pipelines set the pipeline as base. + const ALLOW_DERIVATIVES = 0x2; + } +); + +/// A reference to a parent pipeline. The assumption is that +/// a parent and derivative/child pipeline have most settings +/// in common, and one may be switched for another more quickly +/// than entirely unrelated pipelines would be. +#[derive(Debug)] +pub enum BasePipeline<'a, P: 'a> { + /// Referencing an existing pipeline as parent. + Pipeline(&'a P), + /// A pipeline in the same create pipelines call. + /// + /// The index of the parent must be lower than the index of the child. + Index(usize), + /// No parent pipeline exists. + None, +} + +/// Pipeline state which may be static or dynamic. +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum State { + /// Static state that cannot be altered. + Static(T), + /// Dynamic state set through a command buffer. + Dynamic, +} + +impl State { + /// Returns the static value or a default. + pub fn static_or(self, default: T) -> T { + match self { + State::Static(v) => v, + State::Dynamic => default, + } + } + + /// Whether the state is static. + pub fn is_static(self) -> bool { + match self { + State::Static(_) => true, + State::Dynamic => false, + } + } + + /// Whether the state is dynamic. + pub fn is_dynamic(self) -> bool { + !self.is_static() + } +} + +/// Safely read SPIR-V +/// +/// Converts to native endianness and returns correctly aligned storage without unnecessary +/// copying. Returns an `InvalidData` error if the input is trivially not SPIR-V. +/// +/// This function can also be used to convert an already in-memory `&[u8]` to a valid `Vec`, +/// but prefer working with `&[u32]` from the start whenever possible. +/// +/// # Examples +/// ```no_run +/// let mut file = std::fs::File::open("/path/to/shader.spv").unwrap(); +/// let words = gfx_hal::pso::read_spirv(&mut file).unwrap(); +/// ``` +/// ``` +/// const SPIRV: &[u8] = &[ +/// 0x03, 0x02, 0x23, 0x07, // ... +/// ]; +/// let words = gfx_hal::pso::read_spirv(std::io::Cursor::new(&SPIRV[..])).unwrap(); +/// ``` +pub fn read_spirv(mut x: R) -> io::Result> { + let size = x.seek(io::SeekFrom::End(0))?; + if size % 4 != 0 { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "input length not divisible by 4", + )); + } + if size > usize::max_value() as u64 { + return Err(io::Error::new(io::ErrorKind::InvalidData, "input too long")); + } + let words = (size / 4) as usize; + let mut result = Vec::::with_capacity(words); + x.seek(io::SeekFrom::Start(0))?; + unsafe { + // Writing all bytes through a pointer with less strict alignment when our type has no + // invalid bitpatterns is safe. + x.read_exact(slice::from_raw_parts_mut( + result.as_mut_ptr() as *mut u8, + words * 4, + ))?; + result.set_len(words); + } + const MAGIC_NUMBER: u32 = 0x07230203; + if result.len() > 0 && result[0] == MAGIC_NUMBER.swap_bytes() { + for word in &mut result { + *word = word.swap_bytes(); + } + } + if result.len() == 0 || result[0] != MAGIC_NUMBER { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "input missing SPIR-V magic number", + )); + } + Ok(result) +} diff --git a/third_party/rust/gfx-hal/src/pso/output_merger.rs b/third_party/rust/gfx-hal/src/pso/output_merger.rs old mode 100755 new mode 100644 index 906daca7c667..6f8e41874ed9 --- a/third_party/rust/gfx-hal/src/pso/output_merger.rs +++ b/third_party/rust/gfx-hal/src/pso/output_merger.rs @@ -1,362 +1,359 @@ -//! Output Merger (OM) stage description. -//! The final stage in a pipeline that creates pixel colors from -//! the input shader results, depth/stencil information, etc. - -use crate::pso::{ - graphics::StencilValue, - State, -}; - -/// A pixel-wise comparison function. -#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum Comparison { - /// `false` - Never = 0, - /// `x < y` - Less = 1, - /// `x == y` - Equal = 2, - /// `x <= y` - LessEqual = 3, - /// `x > y` - Greater = 4, - /// `x != y` - NotEqual = 5, - /// `x >= y` - GreaterEqual = 6, - /// `true` - Always = 7, -} - -bitflags!( - /// Target output color mask. - #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] - pub struct ColorMask: u8 { - /// Red mask - const RED = 0x1; - /// Green mask - const GREEN = 0x2; - /// Blue mask - const BLUE = 0x4; - /// Alpha channel mask - const ALPHA = 0x8; - /// Mask for RGB channels - const COLOR = 0x7; - /// Mask all channels - const ALL = 0xF; - /// Mask no channels. - const NONE = 0x0; - } -); - -impl Default for ColorMask { - fn default() -> Self { - Self::ALL - } -} - -/// Defines the possible blending factors. -/// During blending, the source or destination fragment may be -/// multiplied by a factor to produce the final result. -#[allow(missing_docs)] -#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum Factor { - Zero = 0, - One = 1, - SrcColor = 2, - OneMinusSrcColor = 3, - DstColor = 4, - OneMinusDstColor = 5, - SrcAlpha = 6, - OneMinusSrcAlpha = 7, - DstAlpha = 8, - OneMinusDstAlpha = 9, - ConstColor = 10, - OneMinusConstColor = 11, - ConstAlpha = 12, - OneMinusConstAlpha = 13, - SrcAlphaSaturate = 14, - Src1Color = 15, - OneMinusSrc1Color = 16, - Src1Alpha = 17, - OneMinusSrc1Alpha = 18, -} - -/// Blending operations. -#[allow(missing_docs)] -#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum BlendOp { - /// Adds source and destination. - /// Source and destination are multiplied by factors before addition. - Add { src: Factor, dst: Factor }, - /// Subtracts destination from source. - /// Source and destination are multiplied by factors before subtraction. - Sub { src: Factor, dst: Factor }, - /// Subtracts source from destination. - /// Source and destination are multiplied by factors before subtraction. - RevSub { src: Factor, dst: Factor }, - /// Component-wise minimum value of source and destination. - Min, - /// Component-wise maximum value of source and destination. - Max, -} - -impl BlendOp { - /// Replace the destination value with the source. - pub const REPLACE: Self = BlendOp::Add { - src: Factor::One, - dst: Factor::Zero, - }; - /// Add the source and destination together. - pub const ADD: Self = BlendOp::Add { - src: Factor::One, - dst: Factor::One, - }; - /// Alpha blend the source and destination together. - pub const ALPHA: Self = BlendOp::Add { - src: Factor::SrcAlpha, - dst: Factor::OneMinusSrcAlpha, - }; - /// Alpha blend a premultiplied-alpha source with the destination. - pub const PREMULTIPLIED_ALPHA: Self = BlendOp::Add { - src: Factor::One, - dst: Factor::OneMinusSrcAlpha, - }; -} - -/// Specifies whether to use blending, and if so, -/// which operations to use for color and alpha channels. -#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct BlendState { - /// The blend operation to use for the color channels. - pub color: BlendOp, - /// The blend operation to use for the alpha channel. - pub alpha: BlendOp, -} - -impl BlendState { - /// Replace the color. - pub const REPLACE: Self = BlendState { - color: BlendOp::REPLACE, - alpha: BlendOp::REPLACE, - }; - /// Additive blending. - pub const ADD: Self = BlendState { - color: BlendOp::ADD, - alpha: BlendOp::ADD, - }; - /// Multiplicative blending. - pub const MULTIPLY: Self = BlendState { - color: BlendOp::Add { - src: Factor::Zero, - dst: Factor::SrcColor, - }, - alpha: BlendOp::Add { - src: Factor::Zero, - dst: Factor::SrcAlpha, - }, - }; - /// Alpha blending. - pub const ALPHA: Self = BlendState { - color: BlendOp::ALPHA, - alpha: BlendOp::PREMULTIPLIED_ALPHA, - }; - /// Pre-multiplied alpha blending. - pub const PREMULTIPLIED_ALPHA: Self = BlendState { - color: BlendOp::PREMULTIPLIED_ALPHA, - alpha: BlendOp::PREMULTIPLIED_ALPHA, - }; -} - -/// PSO color target descriptor. -#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct ColorBlendDesc { - /// Color write mask. - pub mask: ColorMask, - /// Blend state, if enabled. - pub blend: Option, -} - -impl ColorBlendDesc { - /// Empty blend descriptor just writes out the color without blending. - // this can be used because `Default::default()` isn't a const function... - pub const EMPTY: Self = ColorBlendDesc { - mask: ColorMask::ALL, - blend: None, - }; -} - -/// Depth test state. -#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct DepthTest { - /// Comparison function to use. - pub fun: Comparison, - /// Specify whether to write to the depth buffer or not. - pub write: bool, -} - -impl DepthTest { - /// A depth test that always fails. - pub const FAIL: Self = DepthTest { - fun: Comparison::Never, - write: false, - }; - /// A depth test that always succeeds but doesn't - /// write to the depth buffer - // DOC TODO: Not a terribly helpful description there... - pub const PASS_TEST: Self = DepthTest { - fun: Comparison::Always, - write: false, - }; - /// A depth test that always succeeds and writes its result - /// to the depth buffer. - pub const PASS_WRITE: Self = DepthTest { - fun: Comparison::Always, - write: true, - }; -} - -/// The operation to use for stencil masking. -#[repr(u8)] -#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum StencilOp { - /// Keep the current value in the stencil buffer (no change). - Keep = 0, - /// Set the value in the stencil buffer to zero. - Zero = 1, - /// Set the stencil buffer value to `reference` from `StencilFace`. - Replace = 2, - /// Increment the stencil buffer value, clamping to its maximum value. - IncrementClamp = 3, - /// Decrement the stencil buffer value, clamping to its minimum value. - DecrementClamp = 4, - /// Bitwise invert the current value in the stencil buffer. - Invert = 5, - /// Increment the stencil buffer value, wrapping around to 0 on overflow. - IncrementWrap = 6, - /// Decrement the stencil buffer value, wrapping around to the maximum value on overflow. - DecrementWrap = 7, -} - -/// Complete stencil state for a given side of a face. -#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct StencilFace { - /// Comparison function to use to determine if the stencil test passes. - pub fun: Comparison, - /// What operation to do if the stencil test fails. - pub op_fail: StencilOp, - /// What operation to do if the stencil test passes but the depth test fails. - pub op_depth_fail: StencilOp, - /// What operation to do if both the depth and stencil test pass. - pub op_pass: StencilOp, -} - -impl Default for StencilFace { - fn default() -> StencilFace { - StencilFace { - fun: Comparison::Never, - op_fail: StencilOp::Keep, - op_depth_fail: StencilOp::Keep, - op_pass: StencilOp::Keep, - } - } -} - -/// A generic struct holding the properties of two sides of a polygon. -#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct Sided { - /// Information about the front face. - pub front: T, - /// Information about the back face. - pub back: T, -} - -impl Sided { - /// Create a new `Sided` structure with both `front` and `back` holding - /// the same value. - pub fn new(value: T) -> Self { - Sided { - front: value, - back: value, - } - } -} - -/// Pair of stencil values that could be either baked into a graphics pipeline -/// or provided dynamically. -pub type StencilValues = State>; - -/// Defines a stencil test. Stencil testing is an operation -/// performed to cull fragments; -/// the new fragment is tested against the value held in the -/// stencil buffer, and if the test fails the fragment is -/// discarded. -#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct StencilTest { - /// Operations for stencil faces. - pub faces: Sided, - /// Masks that are ANDd with both the stencil buffer value and the reference value when they - /// are read before doing the stencil test. - pub read_masks: StencilValues, - /// Mask that are ANDd with the stencil value before writing to the stencil buffer. - pub write_masks: StencilValues, - /// The reference values used for stencil tests. - pub reference_values: StencilValues, -} - -impl Default for StencilTest { - fn default() -> Self { - StencilTest { - faces: Sided::default(), - read_masks: State::Static(Sided::new(!0)), - write_masks: State::Static(Sided::new(!0)), - reference_values: State::Static(Sided::new(0)), - } - } -} - -/// PSO depth-stencil target descriptor. -#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct DepthStencilDesc { - /// Optional depth testing/writing. - pub depth: Option, - /// Enable depth bounds testing. - pub depth_bounds: bool, - /// Stencil test/write. - pub stencil: Option, -} - -impl DepthStencilDesc { - /// Returns true if the descriptor assumes the depth attachment. - pub fn uses_depth(&self) -> bool { - self.depth.is_some() || self.depth_bounds - } - /// Returns true if the descriptor assumes the stencil attachment. - pub fn uses_stencil(&self) -> bool { - self.stencil.is_some() - } -} - -bitflags!( - /// Face. - #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] - pub struct Face: u32 { - /// Empty face. TODO: remove when constexpr are stabilized to use empty() - const NONE = 0x0; - /// Front face. - const FRONT = 0x1; - /// Back face. - const BACK = 0x2; - } -); +//! Output Merger (OM) stage description. +//! The final stage in a pipeline that creates pixel colors from +//! the input shader results, depth/stencil information, etc. + +use crate::pso::{graphics::StencilValue, State}; + +/// A pixel-wise comparison function. +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum Comparison { + /// `false` + Never = 0, + /// `x < y` + Less = 1, + /// `x == y` + Equal = 2, + /// `x <= y` + LessEqual = 3, + /// `x > y` + Greater = 4, + /// `x != y` + NotEqual = 5, + /// `x >= y` + GreaterEqual = 6, + /// `true` + Always = 7, +} + +bitflags!( + /// Target output color mask. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct ColorMask: u8 { + /// Red mask + const RED = 0x1; + /// Green mask + const GREEN = 0x2; + /// Blue mask + const BLUE = 0x4; + /// Alpha channel mask + const ALPHA = 0x8; + /// Mask for RGB channels + const COLOR = 0x7; + /// Mask all channels + const ALL = 0xF; + /// Mask no channels. + const NONE = 0x0; + } +); + +impl Default for ColorMask { + fn default() -> Self { + Self::ALL + } +} + +/// Defines the possible blending factors. +/// During blending, the source or destination fragment may be +/// multiplied by a factor to produce the final result. +#[allow(missing_docs)] +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum Factor { + Zero = 0, + One = 1, + SrcColor = 2, + OneMinusSrcColor = 3, + DstColor = 4, + OneMinusDstColor = 5, + SrcAlpha = 6, + OneMinusSrcAlpha = 7, + DstAlpha = 8, + OneMinusDstAlpha = 9, + ConstColor = 10, + OneMinusConstColor = 11, + ConstAlpha = 12, + OneMinusConstAlpha = 13, + SrcAlphaSaturate = 14, + Src1Color = 15, + OneMinusSrc1Color = 16, + Src1Alpha = 17, + OneMinusSrc1Alpha = 18, +} + +/// Blending operations. +#[allow(missing_docs)] +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum BlendOp { + /// Adds source and destination. + /// Source and destination are multiplied by factors before addition. + Add { src: Factor, dst: Factor }, + /// Subtracts destination from source. + /// Source and destination are multiplied by factors before subtraction. + Sub { src: Factor, dst: Factor }, + /// Subtracts source from destination. + /// Source and destination are multiplied by factors before subtraction. + RevSub { src: Factor, dst: Factor }, + /// Component-wise minimum value of source and destination. + Min, + /// Component-wise maximum value of source and destination. + Max, +} + +impl BlendOp { + /// Replace the destination value with the source. + pub const REPLACE: Self = BlendOp::Add { + src: Factor::One, + dst: Factor::Zero, + }; + /// Add the source and destination together. + pub const ADD: Self = BlendOp::Add { + src: Factor::One, + dst: Factor::One, + }; + /// Alpha blend the source and destination together. + pub const ALPHA: Self = BlendOp::Add { + src: Factor::SrcAlpha, + dst: Factor::OneMinusSrcAlpha, + }; + /// Alpha blend a premultiplied-alpha source with the destination. + pub const PREMULTIPLIED_ALPHA: Self = BlendOp::Add { + src: Factor::One, + dst: Factor::OneMinusSrcAlpha, + }; +} + +/// Specifies whether to use blending, and if so, +/// which operations to use for color and alpha channels. +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct BlendState { + /// The blend operation to use for the color channels. + pub color: BlendOp, + /// The blend operation to use for the alpha channel. + pub alpha: BlendOp, +} + +impl BlendState { + /// Replace the color. + pub const REPLACE: Self = BlendState { + color: BlendOp::REPLACE, + alpha: BlendOp::REPLACE, + }; + /// Additive blending. + pub const ADD: Self = BlendState { + color: BlendOp::ADD, + alpha: BlendOp::ADD, + }; + /// Multiplicative blending. + pub const MULTIPLY: Self = BlendState { + color: BlendOp::Add { + src: Factor::Zero, + dst: Factor::SrcColor, + }, + alpha: BlendOp::Add { + src: Factor::Zero, + dst: Factor::SrcAlpha, + }, + }; + /// Alpha blending. + pub const ALPHA: Self = BlendState { + color: BlendOp::ALPHA, + alpha: BlendOp::PREMULTIPLIED_ALPHA, + }; + /// Pre-multiplied alpha blending. + pub const PREMULTIPLIED_ALPHA: Self = BlendState { + color: BlendOp::PREMULTIPLIED_ALPHA, + alpha: BlendOp::PREMULTIPLIED_ALPHA, + }; +} + +/// PSO color target descriptor. +#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct ColorBlendDesc { + /// Color write mask. + pub mask: ColorMask, + /// Blend state, if enabled. + pub blend: Option, +} + +impl ColorBlendDesc { + /// Empty blend descriptor just writes out the color without blending. + // this can be used because `Default::default()` isn't a const function... + pub const EMPTY: Self = ColorBlendDesc { + mask: ColorMask::ALL, + blend: None, + }; +} + +/// Depth test state. +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct DepthTest { + /// Comparison function to use. + pub fun: Comparison, + /// Specify whether to write to the depth buffer or not. + pub write: bool, +} + +impl DepthTest { + /// A depth test that always fails. + pub const FAIL: Self = DepthTest { + fun: Comparison::Never, + write: false, + }; + /// A depth test that always succeeds but doesn't + /// write to the depth buffer + // DOC TODO: Not a terribly helpful description there... + pub const PASS_TEST: Self = DepthTest { + fun: Comparison::Always, + write: false, + }; + /// A depth test that always succeeds and writes its result + /// to the depth buffer. + pub const PASS_WRITE: Self = DepthTest { + fun: Comparison::Always, + write: true, + }; +} + +/// The operation to use for stencil masking. +#[repr(u8)] +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum StencilOp { + /// Keep the current value in the stencil buffer (no change). + Keep = 0, + /// Set the value in the stencil buffer to zero. + Zero = 1, + /// Set the stencil buffer value to `reference` from `StencilFace`. + Replace = 2, + /// Increment the stencil buffer value, clamping to its maximum value. + IncrementClamp = 3, + /// Decrement the stencil buffer value, clamping to its minimum value. + DecrementClamp = 4, + /// Bitwise invert the current value in the stencil buffer. + Invert = 5, + /// Increment the stencil buffer value, wrapping around to 0 on overflow. + IncrementWrap = 6, + /// Decrement the stencil buffer value, wrapping around to the maximum value on overflow. + DecrementWrap = 7, +} + +/// Complete stencil state for a given side of a face. +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct StencilFace { + /// Comparison function to use to determine if the stencil test passes. + pub fun: Comparison, + /// What operation to do if the stencil test fails. + pub op_fail: StencilOp, + /// What operation to do if the stencil test passes but the depth test fails. + pub op_depth_fail: StencilOp, + /// What operation to do if both the depth and stencil test pass. + pub op_pass: StencilOp, +} + +impl Default for StencilFace { + fn default() -> StencilFace { + StencilFace { + fun: Comparison::Never, + op_fail: StencilOp::Keep, + op_depth_fail: StencilOp::Keep, + op_pass: StencilOp::Keep, + } + } +} + +/// A generic struct holding the properties of two sides of a polygon. +#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Sided { + /// Information about the front face. + pub front: T, + /// Information about the back face. + pub back: T, +} + +impl Sided { + /// Create a new `Sided` structure with both `front` and `back` holding + /// the same value. + pub fn new(value: T) -> Self { + Sided { + front: value, + back: value, + } + } +} + +/// Pair of stencil values that could be either baked into a graphics pipeline +/// or provided dynamically. +pub type StencilValues = State>; + +/// Defines a stencil test. Stencil testing is an operation +/// performed to cull fragments; +/// the new fragment is tested against the value held in the +/// stencil buffer, and if the test fails the fragment is +/// discarded. +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct StencilTest { + /// Operations for stencil faces. + pub faces: Sided, + /// Masks that are ANDd with both the stencil buffer value and the reference value when they + /// are read before doing the stencil test. + pub read_masks: StencilValues, + /// Mask that are ANDd with the stencil value before writing to the stencil buffer. + pub write_masks: StencilValues, + /// The reference values used for stencil tests. + pub reference_values: StencilValues, +} + +impl Default for StencilTest { + fn default() -> Self { + StencilTest { + faces: Sided::default(), + read_masks: State::Static(Sided::new(!0)), + write_masks: State::Static(Sided::new(!0)), + reference_values: State::Static(Sided::new(0)), + } + } +} + +/// PSO depth-stencil target descriptor. +#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct DepthStencilDesc { + /// Optional depth testing/writing. + pub depth: Option, + /// Enable depth bounds testing. + pub depth_bounds: bool, + /// Stencil test/write. + pub stencil: Option, +} + +impl DepthStencilDesc { + /// Returns true if the descriptor assumes the depth attachment. + pub fn uses_depth(&self) -> bool { + self.depth.is_some() || self.depth_bounds + } + /// Returns true if the descriptor assumes the stencil attachment. + pub fn uses_stencil(&self) -> bool { + self.stencil.is_some() + } +} + +bitflags!( + /// Face. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct Face: u32 { + /// Empty face. TODO: remove when constexpr are stabilized to use empty() + const NONE = 0x0; + /// Front face. + const FRONT = 0x1; + /// Back face. + const BACK = 0x2; + } +); diff --git a/third_party/rust/gfx-hal/src/pso/specialization.rs b/third_party/rust/gfx-hal/src/pso/specialization.rs old mode 100755 new mode 100644 index 66e5db020918..e1b54e22011b --- a/third_party/rust/gfx-hal/src/pso/specialization.rs +++ b/third_party/rust/gfx-hal/src/pso/specialization.rs @@ -1,132 +1,132 @@ -//! Pipeline specialization types. - -use std::{borrow::Cow, ops::Range, slice}; - -/// Specialization constant for pipelines. -/// -/// Specialization constants allow for easy configuration of -/// multiple similar pipelines. For example, there may be a -/// boolean exposed to the shader that switches the specularity on/off -/// provided via a specialization constant. -/// That would produce separate PSO's for the "on" and "off" states -/// but they share most of the internal stuff and are fast to produce. -/// More importantly, they are fast to execute, since the driver -/// can optimize out the branch on that other PSO creation. -#[derive(Debug, Clone, Hash, PartialEq)] -pub struct SpecializationConstant { - /// Constant identifier in shader source. - pub id: u32, - /// Value to override specialization constant. - pub range: Range, -} - -/// Specialization information structure. -#[derive(Debug, Clone)] -pub struct Specialization<'a> { - /// Constant array. - pub constants: Cow<'a, [SpecializationConstant]>, - /// Raw data. - pub data: Cow<'a, [u8]>, -} - -impl Specialization<'_> { - /// Empty specialization instance. - pub const EMPTY: Self = Specialization { - constants: Cow::Borrowed(&[]), - data: Cow::Borrowed(&[]), - }; -} - -impl Default for Specialization<'_> { - fn default() -> Self { - Specialization::EMPTY - } -} - -#[doc(hidden)] -#[derive(Debug, Default)] -pub struct SpecializationStorage { - constants: Vec, - data: Vec, -} - -/// List of specialization constants. -#[doc(hidden)] -pub trait SpecConstList: Sized { - fn fold(self, storage: &mut SpecializationStorage); -} - -impl From for Specialization<'_> -where - T: SpecConstList, -{ - fn from(list: T) -> Self { - let mut storage = SpecializationStorage::default(); - list.fold(&mut storage); - Specialization { - data: Cow::Owned(storage.data), - constants: Cow::Owned(storage.constants), - } - } -} - -#[doc(hidden)] -#[derive(Debug)] -pub struct SpecConstListNil; - -#[doc(hidden)] -#[derive(Debug)] -pub struct SpecConstListCons { - pub head: (u32, H), - pub tail: T, -} - -impl SpecConstList for SpecConstListNil { - fn fold(self, _storage: &mut SpecializationStorage) {} -} - -impl SpecConstList for SpecConstListCons -where - T: SpecConstList, -{ - fn fold(self, storage: &mut SpecializationStorage) { - let size = std::mem::size_of::(); - assert!(storage.data.len() + size <= u16::max_value() as usize); - let offset = storage.data.len() as u16; - storage.data.extend_from_slice(unsafe { - // Inspecting bytes is always safe. - slice::from_raw_parts(&self.head.1 as *const H as *const u8, size) - }); - storage.constants.push(SpecializationConstant { - id: self.head.0, - range: offset .. offset + size as u16, - }); - self.tail.fold(storage) - } -} - -/// Macro for specifying list of specialization constatns for `EntryPoint`. -#[macro_export] -macro_rules! spec_const_list { - (@ $(,)?) => { - $crate::pso::SpecConstListNil - }; - - (@ $head_id:expr => $head_constant:expr $(,$tail_id:expr => $tail_constant:expr)* $(,)?) => { - $crate::pso::SpecConstListCons { - head: ($head_id, $head_constant), - tail: $crate::spec_const_list!(@ $($tail_id:expr => $tail_constant:expr),*), - } - }; - - ($($id:expr => $constant:expr),* $(,)?) => { - $crate::spec_const_list!(@ $($id => $constant),*).into() - }; - - ($($constant:expr),* $(,)?) => { - { - let mut counter = 0; - $crate::spec_const_list!(@ $({ counter += 1; counter - 1 } => $constant),*).into() - } - }; -} +//! Pipeline specialization types. + +use std::{borrow::Cow, ops::Range, slice}; + +/// Specialization constant for pipelines. +/// +/// Specialization constants allow for easy configuration of +/// multiple similar pipelines. For example, there may be a +/// boolean exposed to the shader that switches the specularity on/off +/// provided via a specialization constant. +/// That would produce separate PSO's for the "on" and "off" states +/// but they share most of the internal stuff and are fast to produce. +/// More importantly, they are fast to execute, since the driver +/// can optimize out the branch on that other PSO creation. +#[derive(Debug, Clone, Hash, PartialEq)] +pub struct SpecializationConstant { + /// Constant identifier in shader source. + pub id: u32, + /// Value to override specialization constant. + pub range: Range, +} + +/// Specialization information structure. +#[derive(Debug, Clone)] +pub struct Specialization<'a> { + /// Constant array. + pub constants: Cow<'a, [SpecializationConstant]>, + /// Raw data. + pub data: Cow<'a, [u8]>, +} + +impl Specialization<'_> { + /// Empty specialization instance. + pub const EMPTY: Self = Specialization { + constants: Cow::Borrowed(&[]), + data: Cow::Borrowed(&[]), + }; +} + +impl Default for Specialization<'_> { + fn default() -> Self { + Specialization::EMPTY + } +} + +#[doc(hidden)] +#[derive(Debug, Default)] +pub struct SpecializationStorage { + constants: Vec, + data: Vec, +} + +/// List of specialization constants. +#[doc(hidden)] +pub trait SpecConstList: Sized { + fn fold(self, storage: &mut SpecializationStorage); +} + +impl From for Specialization<'_> +where + T: SpecConstList, +{ + fn from(list: T) -> Self { + let mut storage = SpecializationStorage::default(); + list.fold(&mut storage); + Specialization { + data: Cow::Owned(storage.data), + constants: Cow::Owned(storage.constants), + } + } +} + +#[doc(hidden)] +#[derive(Debug)] +pub struct SpecConstListNil; + +#[doc(hidden)] +#[derive(Debug)] +pub struct SpecConstListCons { + pub head: (u32, H), + pub tail: T, +} + +impl SpecConstList for SpecConstListNil { + fn fold(self, _storage: &mut SpecializationStorage) {} +} + +impl SpecConstList for SpecConstListCons +where + T: SpecConstList, +{ + fn fold(self, storage: &mut SpecializationStorage) { + let size = std::mem::size_of::(); + assert!(storage.data.len() + size <= u16::max_value() as usize); + let offset = storage.data.len() as u16; + storage.data.extend_from_slice(unsafe { + // Inspecting bytes is always safe. + slice::from_raw_parts(&self.head.1 as *const H as *const u8, size) + }); + storage.constants.push(SpecializationConstant { + id: self.head.0, + range: offset .. offset + size as u16, + }); + self.tail.fold(storage) + } +} + +/// Macro for specifying list of specialization constatns for `EntryPoint`. +#[macro_export] +macro_rules! spec_const_list { + (@ $(,)?) => { + $crate::pso::SpecConstListNil + }; + + (@ $head_id:expr => $head_constant:expr $(,$tail_id:expr => $tail_constant:expr)* $(,)?) => { + $crate::pso::SpecConstListCons { + head: ($head_id, $head_constant), + tail: $crate::spec_const_list!(@ $($tail_id => $tail_constant),*), + } + }; + + ($($id:expr => $constant:expr),* $(,)?) => { + $crate::spec_const_list!(@ $($id => $constant),*).into() + }; + + ($($constant:expr),* $(,)?) => { + { + let mut counter = 0; + $crate::spec_const_list!(@ $({ counter += 1; counter - 1 } => $constant),*).into() + } + }; +} diff --git a/third_party/rust/gfx-hal/src/query.rs b/third_party/rust/gfx-hal/src/query.rs old mode 100755 new mode 100644 index ff13e322b2a5..6a3f5c564141 --- a/third_party/rust/gfx-hal/src/query.rs +++ b/third_party/rust/gfx-hal/src/query.rs @@ -1,115 +1,117 @@ -//! Queries are commands that can be submitted to a command buffer to record statistics or -//! other useful values as the command buffer is running. They are often intended for profiling -//! or other introspection, providing a mechanism for the command buffer to record data about its -//! operation as it is running. - -use crate::device::OutOfMemory; -use crate::Backend; - -/// A query identifier. -pub type Id = u32; - -/// Query creation error. -#[derive(Clone, Debug, PartialEq)] -pub enum CreationError { - /// Out of either host or device memory. - OutOfMemory(OutOfMemory), - - /// Query type unsupported. - Unsupported(Type), -} - -impl From for CreationError { - fn from(error: OutOfMemory) -> Self { - CreationError::OutOfMemory(error) - } -} - -impl std::fmt::Display for CreationError { - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - CreationError::OutOfMemory(err) => write!(fmt, "Failed to create query: {}", err), - CreationError::Unsupported(ty) => write!(fmt, "Failed to create query: Unsupported type: {:?}", ty), - } - } -} - -/// A `Query` object has a particular identifier and saves its results to a given `QueryPool`. -/// It is passed as a parameter to the command buffer's query methods. -#[derive(Debug)] -pub struct Query<'a, B: Backend> { - /// - pub pool: &'a B::QueryPool, - /// - pub id: Id, -} - -bitflags!( - /// Query control flags. - #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] - pub struct ControlFlags: u32 { - /// Occlusion queries **must** return the exact sampler number. - /// - /// Requires `precise_occlusion_query` device feature. - const PRECISE = 0x1; - } -); - -bitflags!( - /// Query result flags. - #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] - pub struct ResultFlags: u32 { - /// Results will be written as an array of 64-bit unsigned integer values. - const BITS_64 = 0x1; - /// Wait for each query’s status to become available before retrieving its results. - const WAIT = 0x2; - /// Availability status accompanies the results. - const WITH_AVAILABILITY = 0x4; - /// Returning partial results is acceptable. - const PARTIAL = 0x8; - } -); - -/// Type of queries in a query pool. -#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] -pub enum Type { - /// Occlusion query. Count the number of drawn samples between - /// the start and end of the query command. - Occlusion, - /// Pipeline statistic query. Counts the number of pipeline stage - /// invocations of the given types between the start and end of - /// the query command. - PipelineStatistics(PipelineStatistic), - /// Timestamp query. Timestamps can be recorded to the - /// query pool by calling `write_timestamp()`. - Timestamp, -} - -bitflags!( - /// Pipeline statistic flags - #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] - pub struct PipelineStatistic: u32 { - /// - const INPUT_ASSEMBLY_VERTICES = 0x1; - /// - const INPUT_ASSEMBLY_PRIMITIVES = 0x2; - /// - const VERTEX_SHADER_INVOCATIONS = 0x4; - /// - const GEOMETRY_SHADER_INVOCATIONS = 0x8; - /// - const GEOMETRY_SHADER_PRIMITIVES = 0x10; - /// - const CLIPPING_INVOCATIONS = 0x20; - /// - const CLIPPING_PRIMITIVES = 0x40; - /// - const FRAGMENT_SHADER_INVOCATIONS = 0x80; - /// - const HULL_SHADER_PATCHES = 0x100; - /// - const DOMAIN_SHADER_INVOCATIONS = 0x200; - /// - const COMPUTE_SHADER_INVOCATIONS = 0x400; - } -); +//! Queries are commands that can be submitted to a command buffer to record statistics or +//! other useful values as the command buffer is running. They are often intended for profiling +//! or other introspection, providing a mechanism for the command buffer to record data about its +//! operation as it is running. + +use crate::device::OutOfMemory; +use crate::Backend; + +/// A query identifier. +pub type Id = u32; + +/// Query creation error. +#[derive(Clone, Debug, PartialEq)] +pub enum CreationError { + /// Out of either host or device memory. + OutOfMemory(OutOfMemory), + + /// Query type unsupported. + Unsupported(Type), +} + +impl From for CreationError { + fn from(error: OutOfMemory) -> Self { + CreationError::OutOfMemory(error) + } +} + +impl std::fmt::Display for CreationError { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + CreationError::OutOfMemory(err) => write!(fmt, "Failed to create query: {}", err), + CreationError::Unsupported(ty) => { + write!(fmt, "Failed to create query: Unsupported type: {:?}", ty) + } + } + } +} + +/// A `Query` object has a particular identifier and saves its results to a given `QueryPool`. +/// It is passed as a parameter to the command buffer's query methods. +#[derive(Debug)] +pub struct Query<'a, B: Backend> { + /// + pub pool: &'a B::QueryPool, + /// + pub id: Id, +} + +bitflags!( + /// Query control flags. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct ControlFlags: u32 { + /// Occlusion queries **must** return the exact sampler number. + /// + /// Requires `precise_occlusion_query` device feature. + const PRECISE = 0x1; + } +); + +bitflags!( + /// Query result flags. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct ResultFlags: u32 { + /// Results will be written as an array of 64-bit unsigned integer values. + const BITS_64 = 0x1; + /// Wait for each query’s status to become available before retrieving its results. + const WAIT = 0x2; + /// Availability status accompanies the results. + const WITH_AVAILABILITY = 0x4; + /// Returning partial results is acceptable. + const PARTIAL = 0x8; + } +); + +/// Type of queries in a query pool. +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +pub enum Type { + /// Occlusion query. Count the number of drawn samples between + /// the start and end of the query command. + Occlusion, + /// Pipeline statistic query. Counts the number of pipeline stage + /// invocations of the given types between the start and end of + /// the query command. + PipelineStatistics(PipelineStatistic), + /// Timestamp query. Timestamps can be recorded to the + /// query pool by calling `write_timestamp()`. + Timestamp, +} + +bitflags!( + /// Pipeline statistic flags + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct PipelineStatistic: u32 { + /// + const INPUT_ASSEMBLY_VERTICES = 0x1; + /// + const INPUT_ASSEMBLY_PRIMITIVES = 0x2; + /// + const VERTEX_SHADER_INVOCATIONS = 0x4; + /// + const GEOMETRY_SHADER_INVOCATIONS = 0x8; + /// + const GEOMETRY_SHADER_PRIMITIVES = 0x10; + /// + const CLIPPING_INVOCATIONS = 0x20; + /// + const CLIPPING_PRIMITIVES = 0x40; + /// + const FRAGMENT_SHADER_INVOCATIONS = 0x80; + /// + const HULL_SHADER_PATCHES = 0x100; + /// + const DOMAIN_SHADER_INVOCATIONS = 0x200; + /// + const COMPUTE_SHADER_INVOCATIONS = 0x400; + } +); diff --git a/third_party/rust/gfx-hal/src/queue/family.rs b/third_party/rust/gfx-hal/src/queue/family.rs old mode 100755 new mode 100644 index 5fb36ef071ad..49ad705f3336 --- a/third_party/rust/gfx-hal/src/queue/family.rs +++ b/third_party/rust/gfx-hal/src/queue/family.rs @@ -1,52 +1,52 @@ -//! Queue family and groups. - -use crate::queue::QueueType; -use crate::Backend; - -use std::any::Any; -use std::fmt::Debug; - -/// General information about a queue family, available upon adapter discovery. -/// -/// Note that a backend can expose multiple queue families with the same properties. -pub trait QueueFamily: Debug + Any + Send + Sync { - /// Returns the type of queues. - fn queue_type(&self) -> QueueType; - /// Returns maximum number of queues created from this family. - fn max_queues(&self) -> usize; - /// Returns the queue family ID. - fn id(&self) -> QueueFamilyId; -} - -/// Identifier for a queue family of a physical device. -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct QueueFamilyId(pub usize); - -/// Bare-metal queue group. -/// -/// Denotes all queues created from one queue family. -#[derive(Debug)] -pub struct QueueGroup { - /// Family index for the queues in this group. - pub family: QueueFamilyId, - /// List of queues. - pub queues: Vec, -} - -impl QueueGroup { - /// Create a new, empty queue group for a queue family. - pub fn new(family: QueueFamilyId) -> Self { - QueueGroup { - family, - queues: Vec::new(), - } - } - - /// Add a command queue to the group. - /// - /// The queue needs to be created from this queue family. - pub fn add_queue(&mut self, queue: B::CommandQueue) { - self.queues.push(queue); - } -} +//! Queue family and groups. + +use crate::queue::QueueType; +use crate::Backend; + +use std::any::Any; +use std::fmt::Debug; + +/// General information about a queue family, available upon adapter discovery. +/// +/// Note that a backend can expose multiple queue families with the same properties. +pub trait QueueFamily: Debug + Any + Send + Sync { + /// Returns the type of queues. + fn queue_type(&self) -> QueueType; + /// Returns maximum number of queues created from this family. + fn max_queues(&self) -> usize; + /// Returns the queue family ID. + fn id(&self) -> QueueFamilyId; +} + +/// Identifier for a queue family of a physical device. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct QueueFamilyId(pub usize); + +/// Bare-metal queue group. +/// +/// Denotes all queues created from one queue family. +#[derive(Debug)] +pub struct QueueGroup { + /// Family index for the queues in this group. + pub family: QueueFamilyId, + /// List of queues. + pub queues: Vec, +} + +impl QueueGroup { + /// Create a new, empty queue group for a queue family. + pub fn new(family: QueueFamilyId) -> Self { + QueueGroup { + family, + queues: Vec::new(), + } + } + + /// Add a command queue to the group. + /// + /// The queue needs to be created from this queue family. + pub fn add_queue(&mut self, queue: B::CommandQueue) { + self.queues.push(queue); + } +} diff --git a/third_party/rust/gfx-hal/src/queue/mod.rs b/third_party/rust/gfx-hal/src/queue/mod.rs old mode 100755 new mode 100644 index 1d9c214bf297..defd5c3b5e7a --- a/third_party/rust/gfx-hal/src/queue/mod.rs +++ b/third_party/rust/gfx-hal/src/queue/mod.rs @@ -1,149 +1,149 @@ -//! Command queues. -//! -//! Queues are the execution paths of the graphical processing units. These process -//! submitted commands buffers. -//! -//! There are different types of queues, which can only handle associated command buffers. -//! `CommandQueue` has the capability defined by `C`: graphics, compute and transfer. - -pub mod family; - -use crate::{ - device::OutOfMemory, - pso, - window::{PresentError, PresentationSurface, Suboptimal, SwapImageIndex}, - Backend, -}; -use std::{any::Any, borrow::Borrow, fmt, iter}; - -pub use self::family::{QueueFamily, QueueFamilyId, QueueGroup}; - -/// The type of the queue, an enum encompassing `queue::Capability` -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum QueueType { - /// Supports all operations. - General, - /// Only supports graphics and transfer operations. - Graphics, - /// Only supports compute and transfer operations. - Compute, - /// Only supports transfer operations. - Transfer, -} - -impl QueueType { - /// Returns true if the queue supports graphics operations. - pub fn supports_graphics(&self) -> bool { - match *self { - QueueType::General | QueueType::Graphics => true, - QueueType::Compute | QueueType::Transfer => false, - } - } - /// Returns true if the queue supports compute operations. - pub fn supports_compute(&self) -> bool { - match *self { - QueueType::General | QueueType::Graphics | QueueType::Compute => true, - QueueType::Transfer => false, - } - } - /// Returns true if the queue supports transfer operations. - pub fn supports_transfer(&self) -> bool { - true - } -} - -/// Scheduling hint for devices about the priority of a queue. Values range from `0.0` (low) to -/// `1.0` (high). -pub type QueuePriority = f32; - -/// Submission information for a command queue. -#[derive(Debug)] -pub struct Submission { - /// Command buffers to submit. - pub command_buffers: Ic, - /// Semaphores to wait being signalled before submission. - pub wait_semaphores: Iw, - /// Semaphores to signal after all command buffers in the submission have finished execution. - pub signal_semaphores: Is, -} - -/// `RawCommandQueue` are abstractions to the internal GPU execution engines. -/// Commands are executed on the the device by submitting command buffers to queues. -pub trait CommandQueue: fmt::Debug + Any + Send + Sync { - /// Submit command buffers to queue for execution. - /// `fence` must be in unsignalled state, and will be signalled after all command buffers in the submission have - /// finished execution. - /// - /// Unsafe because it's not checked that the queue can process the submitted command buffers. - /// Trying to submit compute commands to a graphics queue will result in undefined behavior. - /// Each queue implements safer wrappers according to their supported functionalities! - unsafe fn submit<'a, T, Ic, S, Iw, Is>( - &mut self, - submission: Submission, - fence: Option<&B::Fence>, - ) where - T: 'a + Borrow, - Ic: IntoIterator, - S: 'a + Borrow, - Iw: IntoIterator, - Is: IntoIterator; - - /// Simplified version of `submit` that doesn't expect any semaphores. - unsafe fn submit_without_semaphores<'a, T, Ic>( - &mut self, - command_buffers: Ic, - fence: Option<&B::Fence>, - ) where - T: 'a + Borrow, - Ic: IntoIterator, - { - let submission = Submission { - command_buffers, - wait_semaphores: iter::empty(), - signal_semaphores: iter::empty(), - }; - self.submit::<_, _, B::Semaphore, _, _>(submission, fence) - } - - /// Presents the result of the queue to the given swapchains, after waiting on all the - /// semaphores given in `wait_semaphores`. A given swapchain must not appear in this - /// list more than once. - /// - /// Unsafe for the same reasons as `submit()`. - unsafe fn present<'a, W, Is, S, Iw>( - &mut self, - swapchains: Is, - wait_semaphores: Iw, - ) -> Result, PresentError> - where - Self: Sized, - W: 'a + Borrow, - Is: IntoIterator, - S: 'a + Borrow, - Iw: IntoIterator; - - /// Simplified version of `present` that doesn't expect any semaphores. - unsafe fn present_without_semaphores<'a, W, Is>( - &mut self, - swapchains: Is, - ) -> Result, PresentError> - where - Self: Sized, - W: 'a + Borrow, - Is: IntoIterator, - { - self.present::<_, _, B::Semaphore, _>(swapchains, iter::empty()) - } - - /// Present the a - unsafe fn present_surface( - &mut self, - surface: &mut B::Surface, - image: >::SwapchainImage, - wait_semaphore: Option<&B::Semaphore>, - ) -> Result, PresentError>; - - /// Wait for the queue to idle. - fn wait_idle(&self) -> Result<(), OutOfMemory>; -} +//! Command queues. +//! +//! Queues are the execution paths of the graphical processing units. These process +//! submitted commands buffers. +//! +//! There are different types of queues, which can only handle associated command buffers. +//! `CommandQueue` has the capability defined by `C`: graphics, compute and transfer. + +pub mod family; + +use crate::{ + device::OutOfMemory, + pso, + window::{PresentError, PresentationSurface, Suboptimal, SwapImageIndex}, + Backend, +}; +use std::{any::Any, borrow::Borrow, fmt, iter}; + +pub use self::family::{QueueFamily, QueueFamilyId, QueueGroup}; + +/// The type of the queue, an enum encompassing `queue::Capability` +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum QueueType { + /// Supports all operations. + General, + /// Only supports graphics and transfer operations. + Graphics, + /// Only supports compute and transfer operations. + Compute, + /// Only supports transfer operations. + Transfer, +} + +impl QueueType { + /// Returns true if the queue supports graphics operations. + pub fn supports_graphics(&self) -> bool { + match *self { + QueueType::General | QueueType::Graphics => true, + QueueType::Compute | QueueType::Transfer => false, + } + } + /// Returns true if the queue supports compute operations. + pub fn supports_compute(&self) -> bool { + match *self { + QueueType::General | QueueType::Graphics | QueueType::Compute => true, + QueueType::Transfer => false, + } + } + /// Returns true if the queue supports transfer operations. + pub fn supports_transfer(&self) -> bool { + true + } +} + +/// Scheduling hint for devices about the priority of a queue. Values range from `0.0` (low) to +/// `1.0` (high). +pub type QueuePriority = f32; + +/// Submission information for a command queue. +#[derive(Debug)] +pub struct Submission { + /// Command buffers to submit. + pub command_buffers: Ic, + /// Semaphores to wait being signalled before submission. + pub wait_semaphores: Iw, + /// Semaphores to signal after all command buffers in the submission have finished execution. + pub signal_semaphores: Is, +} + +/// `RawCommandQueue` are abstractions to the internal GPU execution engines. +/// Commands are executed on the the device by submitting command buffers to queues. +pub trait CommandQueue: fmt::Debug + Any + Send + Sync { + /// Submit command buffers to queue for execution. + /// `fence` must be in unsignalled state, and will be signalled after all command buffers in the submission have + /// finished execution. + /// + /// Unsafe because it's not checked that the queue can process the submitted command buffers. + /// Trying to submit compute commands to a graphics queue will result in undefined behavior. + /// Each queue implements safer wrappers according to their supported functionalities! + unsafe fn submit<'a, T, Ic, S, Iw, Is>( + &mut self, + submission: Submission, + fence: Option<&B::Fence>, + ) where + T: 'a + Borrow, + Ic: IntoIterator, + S: 'a + Borrow, + Iw: IntoIterator, + Is: IntoIterator; + + /// Simplified version of `submit` that doesn't expect any semaphores. + unsafe fn submit_without_semaphores<'a, T, Ic>( + &mut self, + command_buffers: Ic, + fence: Option<&B::Fence>, + ) where + T: 'a + Borrow, + Ic: IntoIterator, + { + let submission = Submission { + command_buffers, + wait_semaphores: iter::empty(), + signal_semaphores: iter::empty(), + }; + self.submit::<_, _, B::Semaphore, _, _>(submission, fence) + } + + /// Presents the result of the queue to the given swapchains, after waiting on all the + /// semaphores given in `wait_semaphores`. A given swapchain must not appear in this + /// list more than once. + /// + /// Unsafe for the same reasons as `submit()`. + unsafe fn present<'a, W, Is, S, Iw>( + &mut self, + swapchains: Is, + wait_semaphores: Iw, + ) -> Result, PresentError> + where + Self: Sized, + W: 'a + Borrow, + Is: IntoIterator, + S: 'a + Borrow, + Iw: IntoIterator; + + /// Simplified version of `present` that doesn't expect any semaphores. + unsafe fn present_without_semaphores<'a, W, Is>( + &mut self, + swapchains: Is, + ) -> Result, PresentError> + where + Self: Sized, + W: 'a + Borrow, + Is: IntoIterator, + { + self.present::<_, _, B::Semaphore, _>(swapchains, iter::empty()) + } + + /// Present the a + unsafe fn present_surface( + &mut self, + surface: &mut B::Surface, + image: >::SwapchainImage, + wait_semaphore: Option<&B::Semaphore>, + ) -> Result, PresentError>; + + /// Wait for the queue to idle. + fn wait_idle(&self) -> Result<(), OutOfMemory>; +} diff --git a/third_party/rust/gfx-hal/src/range.rs b/third_party/rust/gfx-hal/src/range.rs deleted file mode 100755 index 67ff17c601be..000000000000 --- a/third_party/rust/gfx-hal/src/range.rs +++ /dev/null @@ -1,59 +0,0 @@ -//! Generic range type abstraction that allows -//! ranges to be handled a little more generically. - -use std::ops::{Range, RangeFrom, RangeFull, RangeTo}; - -/// Abstracts the std range types. -/// -/// Based upon the nightly `RangeArgument` trait. -pub trait RangeArg { - /// Start index bound. - fn start(&self) -> Option<&T>; - /// End index bound. - fn end(&self) -> Option<&T>; -} - -impl RangeArg for Range { - fn start(&self) -> Option<&T> { - Some(&self.start) - } - fn end(&self) -> Option<&T> { - Some(&self.end) - } -} - -impl RangeArg for RangeTo { - fn start(&self) -> Option<&T> { - None - } - fn end(&self) -> Option<&T> { - Some(&self.end) - } -} - -impl RangeArg for RangeFrom { - fn start(&self) -> Option<&T> { - Some(&self.start) - } - fn end(&self) -> Option<&T> { - None - } -} - -impl RangeArg for RangeFull { - fn start(&self) -> Option<&T> { - None - } - fn end(&self) -> Option<&T> { - None - } -} - -impl RangeArg for (Option, Option) { - fn start(&self) -> Option<&T> { - self.0.as_ref() - } - fn end(&self) -> Option<&T> { - self.1.as_ref() - } -} diff --git a/third_party/rust/gfx-hal/src/window.rs b/third_party/rust/gfx-hal/src/window.rs old mode 100755 new mode 100644 index 5b2a9f7dbc6e..bb51653b3ae1 --- a/third_party/rust/gfx-hal/src/window.rs +++ b/third_party/rust/gfx-hal/src/window.rs @@ -1,605 +1,643 @@ -//! Windowing system interoperability -//! -//! Screen presentation (fullscreen or window) of images requires two objects: -//! -//! * [`Surface`] is the host abstraction of the native screen -//! * [`Swapchain`] is the device abstraction for a surface, containing multiple presentable images -//! -//! ## Window -//! -//! // DOC TODO -//! -//! ## Surface -//! -//! // DOC TODO -//! -//! ## Swapchain -//! -//! The most interesting part of a swapchain are the contained presentable images/backbuffers. -//! Presentable images are specialized images, which can be presented on the screen. They are -//! 2D color images with optionally associated depth-stencil images. -//! -//! The common steps for presentation of a frame are acquisition and presentation: -//! -//! ```no_run -//! # extern crate gfx_backend_empty as empty; -//! # extern crate gfx_hal; -//! # fn main() { -//! # use gfx_hal::prelude::*; -//! -//! # let mut swapchain: empty::Swapchain = return; -//! # let device: empty::Device = return; -//! # let mut present_queue: empty::CommandQueue = return; -//! # unsafe { -//! let acquisition_semaphore = device.create_semaphore().unwrap(); -//! let render_semaphore = device.create_semaphore().unwrap(); -//! -//! let (frame, suboptimal) = swapchain.acquire_image(!0, Some(&acquisition_semaphore), None).unwrap(); -//! // render the scene.. -//! // `render_semaphore` will be signalled once rendering has been finished -//! swapchain.present(&mut present_queue, 0, &[render_semaphore]); -//! # }} -//! ``` -//! -//! Queues need to synchronize with the presentation engine, usually done via signalling a semaphore -//! once a frame is available for rendering and waiting on a separate semaphore until scene rendering -//! has finished. -//! -//! ### Recreation -//! -//! DOC TODO - -use crate::device; -use crate::format::Format; -use crate::image; -use crate::queue::CommandQueue; -use crate::Backend; - -use std::any::Any; -use std::borrow::Borrow; -use std::cmp::{max, min}; -use std::fmt; -use std::iter; -use std::ops::RangeInclusive; - -/// Error occurred during swapchain creation. -#[derive(Clone, Debug, PartialEq)] -pub enum CreationError { - /// Out of either host or device memory. - OutOfMemory(device::OutOfMemory), - /// Device is lost - DeviceLost(device::DeviceLost), - /// Surface is lost - SurfaceLost(device::SurfaceLost), - /// Window in use - WindowInUse(device::WindowInUse), -} - -impl From for CreationError { - fn from(error: device::OutOfMemory) -> Self { - CreationError::OutOfMemory(error) - } -} - -impl From for CreationError { - fn from(error: device::DeviceLost) -> Self { - CreationError::DeviceLost(error) - } -} - -impl From for CreationError { - fn from(error: device::SurfaceLost) -> Self { - CreationError::SurfaceLost(error) - } -} - -impl From for CreationError { - fn from(error: device::WindowInUse) -> Self { - CreationError::WindowInUse(error) - } -} - -impl std::fmt::Display for CreationError { - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - CreationError::OutOfMemory(err) => write!(fmt, "Failed to create or configure swapchain: {}", err), - CreationError::DeviceLost(err) => write!(fmt, "Failed to create or configure swapchain: {}", err), - CreationError::SurfaceLost(err) => write!(fmt, "Failed to create or configure swapchain: {}", err), - CreationError::WindowInUse(err) => write!(fmt, "Failed to create or configure swapchain: {}", err), - } - } -} - -impl std::error::Error for CreationError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - CreationError::OutOfMemory(err) => Some(err), - CreationError::DeviceLost(err) => Some(err), - CreationError::SurfaceLost(err) => Some(err), - CreationError::WindowInUse(err) => Some(err), - } - } -} - -/// An extent describes the size of a rectangle, such as -/// a window or texture. It is not used for referring to a -/// sub-rectangle; for that see `command::Rect`. -#[derive(Clone, Copy, Debug, PartialEq, PartialOrd)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct Extent2D { - /// Width - pub width: image::Size, - /// Height - pub height: image::Size, -} - -impl From for Extent2D { - fn from(ex: image::Extent) -> Self { - Extent2D { - width: ex.width, - height: ex.height, - } - } -} - -impl Extent2D { - /// Convert into a regular image extent. - pub fn to_extent(&self) -> image::Extent { - image::Extent { - width: self.width, - height: self.height, - depth: 1, - } - } -} - -/// Describes information about what a `Surface`'s properties are. -/// Fetch this with `surface.compatibility(device)`. -#[derive(Debug, Clone)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct SurfaceCapabilities { - /// Number of presentable images supported by the adapter for a swapchain - /// created from this surface. - /// - /// - `image_count.start` must be at least 1. - /// - `image_count.end` must be larger or equal to `image_count.start`. - pub image_count: RangeInclusive, - - /// Current extent of the surface. - /// - /// `None` if the surface has no explicit size, depending on the swapchain extent. - pub current_extent: Option, - - /// Range of supported extents. - /// - /// `current_extent` must be inside this range. - pub extents: RangeInclusive, - - /// Maximum number of layers supported for presentable images. - /// - /// Must be at least 1. - pub max_image_layers: image::Layer, - - /// Supported image usage flags. - pub usage: image::Usage, - - /// A bitmask of supported presentation modes. - pub present_modes: PresentMode, - - /// A bitmask of supported alpha composition modes. - pub composite_alpha_modes: CompositeAlphaMode, -} - -impl SurfaceCapabilities { - fn clamped_extent(&self, default_extent: Extent2D) -> Extent2D { - match self.current_extent { - Some(current) => current, - None => { - let (min_width, max_width) = (self.extents.start().width, self.extents.end().width); - let (min_height, max_height) = - (self.extents.start().height, self.extents.end().height); - - // clamp the default_extent to within the allowed surface sizes - let width = min(max_width, max(default_extent.width, min_width)); - let height = min(max_height, max(default_extent.height, min_height)); - - Extent2D { width, height } - } - } - } -} - -/// A `Surface` abstracts the surface of a native window. -pub trait Surface: fmt::Debug + Any + Send + Sync { - /// Check if the queue family supports presentation to this surface. - /// - /// # Examples - /// - /// ```no_run - /// - /// ``` - fn supports_queue_family(&self, family: &B::QueueFamily) -> bool; - - /// Query surface capabilities for this physical device. - /// - /// Use this function for configuring swapchain creation. - fn capabilities(&self, physical_device: &B::PhysicalDevice) -> SurfaceCapabilities; - - /// Query surface formats for this physical device. - /// - /// This function may be slow. It's typically used during the initialization only. - /// - /// Note: technically the surface support formats may change at the point - /// where an application needs to recreate the swapchain, e.g. when the window - /// is moved to a different monitor. - /// - /// If `None` is returned then the surface has no preferred format and the - /// application may use any desired format. - fn supported_formats(&self, physical_device: &B::PhysicalDevice) -> Option>; -} - -/// A surface trait that exposes the ability to present images on the -/// associtated swap chain. -pub trait PresentationSurface: Surface { - /// An opaque type wrapping the swapchain image. - type SwapchainImage: Borrow + fmt::Debug + Send + Sync; - - /// Set up the swapchain associated with the surface to have the given format. - unsafe fn configure_swapchain( - &mut self, - device: &B::Device, - config: SwapchainConfig, - ) -> Result<(), CreationError>; - - /// Remove the associated swapchain from this surface. - /// - /// This has to be done before the surface is dropped. - unsafe fn unconfigure_swapchain(&mut self, device: &B::Device); - - /// Acquire a new swapchain image for rendering. - /// - /// May fail according to one of the reasons indicated in `AcquireError` enum. - /// - /// # Synchronization - /// - /// The acquired image is available to render. No synchronization is required. - /// - /// # Examples - /// - /// ```no_run - /// - /// ``` - unsafe fn acquire_image( - &mut self, - timeout_ns: u64, - ) -> Result<(Self::SwapchainImage, Option), AcquireError>; -} - -/// Index of an image in the swapchain. -/// -/// The swapchain is a series of one or more images, usually -/// with one being drawn on while the other is displayed by -/// the GPU (aka double-buffering). A `SwapImageIndex` refers -/// to a particular image in the swapchain. -pub type SwapImageIndex = u32; - - -bitflags!( - /// Specifies the mode regulating how a swapchain presents frames. - #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] - pub struct PresentMode: u32 { - /// Don't ever wait for v-sync. - const IMMEDIATE = 0x1; - /// Wait for v-sync, overwrite the last rendered frame. - const MAILBOX = 0x2; - /// Present frames in the same order they are rendered. - const FIFO = 0x4; - /// Don't wait for the next v-sync if we just missed it. - const RELAXED = 0x8; - } -); - -bitflags!( - /// Specifies how the alpha channel of the images should be handled during - /// compositing. - #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] - pub struct CompositeAlphaMode: u32 { - /// The alpha channel, if it exists, of the images is ignored in the - /// compositing process. Instead, the image is treated as if it has a - /// constant alpha of 1.0. - const OPAQUE = 0x1; - /// The alpha channel, if it exists, of the images is respected in the - /// compositing process. The non-alpha channels of the image are - /// expected to already be multiplied by the alpha channel by the - /// application. - const PREMULTIPLIED = 0x2; - /// The alpha channel, if it exists, of the images is respected in the - /// compositing process. The non-alpha channels of the image are not - /// expected to already be multiplied by the alpha channel by the - /// application; instead, the compositor will multiply the non-alpha - /// channels of the image by the alpha channel during compositing. - const POSTMULTIPLIED = 0x4; - /// The way in which the presentation engine treats the alpha channel in - /// the images is unknown to gfx-hal. Instead, the application is - /// responsible for setting the composite alpha blending mode using - /// native window system commands. If the application does not set the - /// blending mode using native window system commands, then a - /// platform-specific default will be used. - const INHERIT = 0x8; - } -); - -/// Contains all the data necessary to create a new `Swapchain`: -/// color, depth, and number of images. -/// -/// # Examples -/// -/// This type implements the builder pattern, method calls can be -/// easily chained. -/// -/// ```no_run -/// # extern crate gfx_hal; -/// # fn main() { -/// # use gfx_hal::window::SwapchainConfig; -/// # use gfx_hal::format::Format; -/// let config = SwapchainConfig::new(100, 100, Format::Bgra8Unorm, 2); -/// # } -/// ``` -#[derive(Debug, Clone)] -pub struct SwapchainConfig { - /// Presentation mode. - pub present_mode: PresentMode, - /// Alpha composition mode. - pub composite_alpha_mode: CompositeAlphaMode, - /// Format of the backbuffer images. - pub format: Format, - /// Requested image extent. Must be in - /// `SurfaceCapabilities::extents` range. - pub extent: Extent2D, - /// Number of images in the swapchain. Must be in - /// `SurfaceCapabilities::image_count` range. - pub image_count: SwapImageIndex, - /// Number of image layers. Must be lower or equal to - /// `SurfaceCapabilities::max_image_layers`. - pub image_layers: image::Layer, - /// Image usage of the backbuffer images. - pub image_usage: image::Usage, -} - -impl SwapchainConfig { - /// Create a new default configuration (color images only). - /// - /// # Examples - /// - /// ```no_run - /// - /// ``` - pub fn new(width: u32, height: u32, format: Format, image_count: SwapImageIndex) -> Self { - SwapchainConfig { - present_mode: PresentMode::FIFO, - composite_alpha_mode: CompositeAlphaMode::OPAQUE, - format, - extent: Extent2D { width, height }, - image_count, - image_layers: 1, - image_usage: image::Usage::COLOR_ATTACHMENT, - } - } - - /// Create a swapchain configuration based on the capabilities - /// returned from a physical device query. If the surface does not - /// specify a current size, default_extent is clamped and used instead. - pub fn from_caps(caps: &SurfaceCapabilities, format: Format, default_extent: Extent2D) -> Self { - let composite_alpha_mode = if caps.composite_alpha_modes.contains(CompositeAlphaMode::INHERIT) { - CompositeAlphaMode::INHERIT - } else if caps.composite_alpha_modes.contains(CompositeAlphaMode::OPAQUE) { - CompositeAlphaMode::OPAQUE - } else { - panic!("neither INHERIT or OPAQUE CompositeAlphaMode(s) are supported") - }; - let present_mode = if caps.present_modes.contains(PresentMode::FIFO) { - PresentMode::FIFO - } else { - panic!("FIFO PresentMode is not supported") - }; - - SwapchainConfig { - present_mode, - composite_alpha_mode, - format, - extent: caps.clamped_extent(default_extent), - image_count: *caps.image_count.start(), - image_layers: 1, - image_usage: image::Usage::COLOR_ATTACHMENT, - } - } - - /// Specify the presentation mode. - /// - /// # Examples - /// - /// ```no_run - /// - /// ``` - pub fn with_present_mode(mut self, mode: PresentMode) -> Self { - self.present_mode = mode; - self - } - - /// Specify the usage of backbuffer images. - /// - /// # Examples - /// - /// ```no_run - /// - /// ``` - pub fn with_image_usage(mut self, usage: image::Usage) -> Self { - self.image_usage = usage; - self - } - - // TODO: depth-only, stencil-only, swapchain size, present modes, etc. -} - -/// Marker value returned if the swapchain no longer matches the surface properties exactly, -/// but can still be used to present to the surface successfully. -#[derive(Debug)] -pub struct Suboptimal; - -/// Error on acquiring the next image from a swapchain. -#[derive(Clone, Debug, PartialEq)] -pub enum AcquireError { - /// Out of either host or device memory. - OutOfMemory(device::OutOfMemory), - /// No image was ready and no timeout was specified. - NotReady, - /// No image was ready after the specified timeout expired. - Timeout, - /// The swapchain is no longer in sync with the surface, needs to be re-created. - OutOfDate, - /// The surface was lost, and the swapchain is no longer usable. - SurfaceLost(device::SurfaceLost), - /// Device is lost - DeviceLost(device::DeviceLost), -} - -impl std::fmt::Display for AcquireError { - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - AcquireError::OutOfMemory(err) => write!(fmt, "Failed to acqure image: {}", err), - AcquireError::NotReady => write!(fmt, "Failed to acqure image: No image ready (timeout wasn't specified)"), - AcquireError::Timeout => write!(fmt, "Failed to acqure image: No image ready (timeout)"), - AcquireError::OutOfDate => write!(fmt, "Failed to acqure image: Swapchain is out of date and needs to be re-created"), - AcquireError::SurfaceLost(err) => write!(fmt, "Failed to acqure image: {}", err), - AcquireError::DeviceLost(err) => write!(fmt, "Failed to acqure image: {}", err), - } - } -} - -impl std::error::Error for AcquireError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - AcquireError::OutOfMemory(err) => Some(err), - AcquireError::SurfaceLost(err) => Some(err), - AcquireError::DeviceLost(err) => Some(err), - _ => None, - } - } -} - -/// Error on acquiring the next image from a swapchain. -#[derive(Clone, Debug, PartialEq)] -pub enum PresentError { - /// Out of either host or device memory. - OutOfMemory(device::OutOfMemory), - /// The swapchain is no longer in sync with the surface, needs to be re-created. - OutOfDate, - /// The surface was lost, and the swapchain is no longer usable. - SurfaceLost(device::SurfaceLost), - /// Device is lost - DeviceLost(device::DeviceLost), -} - -impl std::fmt::Display for PresentError { - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - PresentError::OutOfMemory(err) => write!(fmt, "Failed to present image: {}", err), - PresentError::OutOfDate => write!(fmt, "Failed to present image: Swapchain is out of date and needs to be re-created"), - PresentError::SurfaceLost(err) => write!(fmt, "Failed to present image: {}", err), - PresentError::DeviceLost(err) => write!(fmt, "Failed to present image: {}", err), - } - } -} - -impl std::error::Error for PresentError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - PresentError::OutOfMemory(err) => Some(err), - PresentError::SurfaceLost(err) => Some(err), - PresentError::DeviceLost(err) => Some(err), - _ => None, - } - } -} - -/// The `Swapchain` is the backend representation of the surface. -/// It consists of multiple buffers, which will be presented on the surface. -pub trait Swapchain: fmt::Debug + Any + Send + Sync { - /// Acquire a new swapchain image for rendering. This needs to be called before presenting. - /// - /// May fail according to one of the reasons indicated in `AcquireError` enum. - /// - /// # Synchronization - /// - /// The acquired image will not be immediately available when the function returns. - /// Once available the provided [`Semaphore`](../trait.Resources.html#associatedtype.Semaphore) - /// and [`Fence`](../trait.Resources.html#associatedtype.Fence) will be signaled. - /// - /// # Examples - /// - /// ```no_run - /// - /// ``` - unsafe fn acquire_image( - &mut self, - timeout_ns: u64, - semaphore: Option<&B::Semaphore>, - fence: Option<&B::Fence>, - ) -> Result<(SwapImageIndex, Option), AcquireError>; - - /// Present one acquired image. - /// - /// # Safety - /// - /// The passed queue _must_ support presentation on the surface, which is - /// used for creating this swapchain. - /// - /// # Examples - /// - /// ```no_run - /// - /// ``` - unsafe fn present<'a, S, Iw>( - &'a self, - present_queue: &mut B::CommandQueue, - image_index: SwapImageIndex, - wait_semaphores: Iw, - ) -> Result, PresentError> - where - Self: 'a + Sized + Borrow, - S: 'a + Borrow, - Iw: IntoIterator, - { - present_queue.present(iter::once((self, image_index)), wait_semaphores) - } - - /// Present one acquired image without any semaphore synchronization. - unsafe fn present_without_semaphores<'a>( - &'a self, - present_queue: &mut B::CommandQueue, - image_index: SwapImageIndex, - ) -> Result, PresentError> - where - Self: 'a + Sized + Borrow, - { - self.present::(present_queue, image_index, iter::empty()) - } -} - -/// Error occurred during surface creation. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum InitError { - /// Window handle is not supported by the backend. - UnsupportedWindowHandle, -} - - -impl std::fmt::Display for InitError { - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - InitError::UnsupportedWindowHandle => write!(fmt, "Failed to create surface: Specified window handle is unsupported"), - } - } -} - -impl std::error::Error for InitError {} +//! Windowing system interoperability +//! +//! Screen presentation (fullscreen or window) of images requires two objects: +//! +//! * [Surface](window::Surface) is the host abstraction of the native screen +//! * [Swapchain](window::Swapchain) is the device abstraction for a surface, containing multiple presentable images +//! +//! ## Window +//! +//! // DOC TODO +//! +//! ## Surface +//! +//! // DOC TODO +//! +//! ## Swapchain +//! +//! The most interesting part of a swapchain are the contained presentable images/backbuffers. +//! Presentable images are specialized images, which can be presented on the screen. They are +//! 2D color images with optionally associated depth-stencil images. +//! +//! The common steps for presentation of a frame are acquisition and presentation: +//! +//! ```no_run +//! # extern crate gfx_backend_empty as empty; +//! # extern crate gfx_hal; +//! # fn main() { +//! # use gfx_hal::prelude::*; +//! +//! # let mut swapchain: empty::Swapchain = return; +//! # let device: empty::Device = return; +//! # let mut present_queue: empty::CommandQueue = return; +//! # unsafe { +//! let acquisition_semaphore = device.create_semaphore().unwrap(); +//! let render_semaphore = device.create_semaphore().unwrap(); +//! +//! let (frame, suboptimal) = swapchain.acquire_image(!0, Some(&acquisition_semaphore), None).unwrap(); +//! // render the scene.. +//! // `render_semaphore` will be signalled once rendering has been finished +//! swapchain.present(&mut present_queue, 0, &[render_semaphore]); +//! # }} +//! ``` +//! +//! Queues need to synchronize with the presentation engine, usually done via signalling a semaphore +//! once a frame is available for rendering and waiting on a separate semaphore until scene rendering +//! has finished. +//! +//! ### Recreation +//! +//! DOC TODO + +use crate::device; +use crate::format::Format; +use crate::image; +use crate::queue::CommandQueue; +use crate::Backend; + +use std::any::Any; +use std::borrow::Borrow; +use std::cmp::{max, min}; +use std::fmt; +use std::iter; +use std::ops::RangeInclusive; + + +/// Default image usage for the swapchain. +pub const DEFAULT_USAGE: image::Usage = image::Usage::COLOR_ATTACHMENT; +/// Default image count for the swapchain. +pub const DEFAULT_IMAGE_COUNT: SwapImageIndex = 3; + +/// Error occurred during swapchain creation. +#[derive(Clone, Debug, PartialEq)] +pub enum CreationError { + /// Out of either host or device memory. + OutOfMemory(device::OutOfMemory), + /// Device is lost + DeviceLost(device::DeviceLost), + /// Surface is lost + SurfaceLost(device::SurfaceLost), + /// Window in use + WindowInUse(device::WindowInUse), +} + +impl From for CreationError { + fn from(error: device::OutOfMemory) -> Self { + CreationError::OutOfMemory(error) + } +} + +impl From for CreationError { + fn from(error: device::DeviceLost) -> Self { + CreationError::DeviceLost(error) + } +} + +impl From for CreationError { + fn from(error: device::SurfaceLost) -> Self { + CreationError::SurfaceLost(error) + } +} + +impl From for CreationError { + fn from(error: device::WindowInUse) -> Self { + CreationError::WindowInUse(error) + } +} + +impl std::fmt::Display for CreationError { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + CreationError::OutOfMemory(err) => { + write!(fmt, "Failed to create or configure swapchain: {}", err) + } + CreationError::DeviceLost(err) => { + write!(fmt, "Failed to create or configure swapchain: {}", err) + } + CreationError::SurfaceLost(err) => { + write!(fmt, "Failed to create or configure swapchain: {}", err) + } + CreationError::WindowInUse(err) => { + write!(fmt, "Failed to create or configure swapchain: {}", err) + } + } + } +} + +impl std::error::Error for CreationError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + CreationError::OutOfMemory(err) => Some(err), + CreationError::DeviceLost(err) => Some(err), + CreationError::SurfaceLost(err) => Some(err), + CreationError::WindowInUse(err) => Some(err), + } + } +} + +/// An extent describes the size of a rectangle, such as +/// a window or texture. It is not used for referring to a +/// sub-rectangle; for that see `command::Rect`. +#[derive(Clone, Copy, Debug, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Extent2D { + /// Width + pub width: image::Size, + /// Height + pub height: image::Size, +} + +impl From for Extent2D { + fn from(ex: image::Extent) -> Self { + Extent2D { + width: ex.width, + height: ex.height, + } + } +} + +impl Extent2D { + /// Convert into a regular image extent. + pub fn to_extent(&self) -> image::Extent { + image::Extent { + width: self.width, + height: self.height, + depth: 1, + } + } +} + +/// Describes information about what a `Surface`'s properties are. +/// Fetch this with [Surface::capabilities]. +#[derive(Debug, Clone)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct SurfaceCapabilities { + /// Number of presentable images supported by the adapter for a swapchain + /// created from this surface. + /// + /// - `image_count.start` must be at least 1. + /// - `image_count.end` must be larger or equal to `image_count.start`. + pub image_count: RangeInclusive, + + /// Current extent of the surface. + /// + /// `None` if the surface has no explicit size, depending on the swapchain extent. + pub current_extent: Option, + + /// Range of supported extents. + /// + /// `current_extent` must be inside this range. + pub extents: RangeInclusive, + + /// Maximum number of layers supported for presentable images. + /// + /// Must be at least 1. + pub max_image_layers: image::Layer, + + /// Supported image usage flags. + pub usage: image::Usage, + + /// A bitmask of supported presentation modes. + pub present_modes: PresentMode, + + /// A bitmask of supported alpha composition modes. + pub composite_alpha_modes: CompositeAlphaMode, +} + +impl SurfaceCapabilities { + fn clamped_extent(&self, default_extent: Extent2D) -> Extent2D { + match self.current_extent { + Some(current) => current, + None => { + let (min_width, max_width) = (self.extents.start().width, self.extents.end().width); + let (min_height, max_height) = + (self.extents.start().height, self.extents.end().height); + + // clamp the default_extent to within the allowed surface sizes + let width = min(max_width, max(default_extent.width, min_width)); + let height = min(max_height, max(default_extent.height, min_height)); + + Extent2D { width, height } + } + } + } +} + +/// A `Surface` abstracts the surface of a native window. +pub trait Surface: fmt::Debug + Any + Send + Sync { + /// Check if the queue family supports presentation to this surface. + /// + /// # Examples + /// + /// ```no_run + /// + /// ``` + fn supports_queue_family(&self, family: &B::QueueFamily) -> bool; + + /// Query surface capabilities for this physical device. + /// + /// Use this function for configuring swapchain creation. + fn capabilities(&self, physical_device: &B::PhysicalDevice) -> SurfaceCapabilities; + + /// Query surface formats for this physical device. + /// + /// This function may be slow. It's typically used during the initialization only. + /// + /// Note: technically the surface support formats may change at the point + /// where an application needs to recreate the swapchain, e.g. when the window + /// is moved to a different monitor. + /// + /// If `None` is returned then the surface has no preferred format and the + /// application may use any desired format. + fn supported_formats(&self, physical_device: &B::PhysicalDevice) -> Option>; +} + +/// A surface trait that exposes the ability to present images on the +/// associtated swap chain. +pub trait PresentationSurface: Surface { + /// An opaque type wrapping the swapchain image. + type SwapchainImage: Borrow + fmt::Debug + Send + Sync; + + /// Set up the swapchain associated with the surface to have the given format. + unsafe fn configure_swapchain( + &mut self, + device: &B::Device, + config: SwapchainConfig, + ) -> Result<(), CreationError>; + + /// Remove the associated swapchain from this surface. + /// + /// This has to be done before the surface is dropped. + unsafe fn unconfigure_swapchain(&mut self, device: &B::Device); + + /// Acquire a new swapchain image for rendering. + /// + /// May fail according to one of the reasons indicated in `AcquireError` enum. + /// + /// # Synchronization + /// + /// The acquired image is available to render. No synchronization is required. + /// + /// # Examples + /// + /// ```no_run + /// + /// ``` + unsafe fn acquire_image( + &mut self, + timeout_ns: u64, + ) -> Result<(Self::SwapchainImage, Option), AcquireError>; +} + +/// Index of an image in the swapchain. +/// +/// The swapchain is a series of one or more images, usually +/// with one being drawn on while the other is displayed by +/// the GPU (aka double-buffering). A `SwapImageIndex` refers +/// to a particular image in the swapchain. +pub type SwapImageIndex = u32; + +bitflags!( + /// Specifies the mode regulating how a swapchain presents frames. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct PresentMode: u32 { + /// Don't ever wait for v-sync. + const IMMEDIATE = 0x1; + /// Wait for v-sync, overwrite the last rendered frame. + const MAILBOX = 0x2; + /// Present frames in the same order they are rendered. + const FIFO = 0x4; + /// Don't wait for the next v-sync if we just missed it. + const RELAXED = 0x8; + } +); + +bitflags!( + /// Specifies how the alpha channel of the images should be handled during + /// compositing. + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct CompositeAlphaMode: u32 { + /// The alpha channel, if it exists, of the images is ignored in the + /// compositing process. Instead, the image is treated as if it has a + /// constant alpha of 1.0. + const OPAQUE = 0x1; + /// The alpha channel, if it exists, of the images is respected in the + /// compositing process. The non-alpha channels of the image are + /// expected to already be multiplied by the alpha channel by the + /// application. + const PREMULTIPLIED = 0x2; + /// The alpha channel, if it exists, of the images is respected in the + /// compositing process. The non-alpha channels of the image are not + /// expected to already be multiplied by the alpha channel by the + /// application; instead, the compositor will multiply the non-alpha + /// channels of the image by the alpha channel during compositing. + const POSTMULTIPLIED = 0x4; + /// The way in which the presentation engine treats the alpha channel in + /// the images is unknown to gfx-hal. Instead, the application is + /// responsible for setting the composite alpha blending mode using + /// native window system commands. If the application does not set the + /// blending mode using native window system commands, then a + /// platform-specific default will be used. + const INHERIT = 0x8; + } +); + +/// Contains all the data necessary to create a new `Swapchain`: +/// color, depth, and number of images. +/// +/// # Examples +/// +/// This type implements the builder pattern, method calls can be +/// easily chained. +/// +/// ```no_run +/// # extern crate gfx_hal; +/// # fn main() { +/// # use gfx_hal::window::SwapchainConfig; +/// # use gfx_hal::format::Format; +/// let config = SwapchainConfig::new(100, 100, Format::Bgra8Unorm, 2); +/// # } +/// ``` +#[derive(Debug, Clone)] +pub struct SwapchainConfig { + /// Presentation mode. + pub present_mode: PresentMode, + /// Alpha composition mode. + pub composite_alpha_mode: CompositeAlphaMode, + /// Format of the backbuffer images. + pub format: Format, + /// Requested image extent. Must be in + /// `SurfaceCapabilities::extents` range. + pub extent: Extent2D, + /// Number of images in the swapchain. Must be in + /// `SurfaceCapabilities::image_count` range. + pub image_count: SwapImageIndex, + /// Number of image layers. Must be lower or equal to + /// `SurfaceCapabilities::max_image_layers`. + pub image_layers: image::Layer, + /// Image usage of the backbuffer images. + pub image_usage: image::Usage, +} + +impl SwapchainConfig { + /// Create a new default configuration (color images only). + /// + /// # Examples + /// + /// ```no_run + /// + /// ``` + pub fn new(width: u32, height: u32, format: Format, image_count: SwapImageIndex) -> Self { + SwapchainConfig { + present_mode: PresentMode::FIFO, + composite_alpha_mode: CompositeAlphaMode::OPAQUE, + format, + extent: Extent2D { width, height }, + image_count, + image_layers: 1, + image_usage: DEFAULT_USAGE, + } + } + + /// Create a swapchain configuration based on the capabilities + /// returned from a physical device query. If the surface does not + /// specify a current size, default_extent is clamped and used instead. + /// + /// The default values are taken from `DEFAULT_USAGE` and `DEFAULT_IMAGE_COUNT`. + pub fn from_caps(caps: &SurfaceCapabilities, format: Format, default_extent: Extent2D) -> Self { + let composite_alpha_mode = if caps + .composite_alpha_modes + .contains(CompositeAlphaMode::INHERIT) + { + CompositeAlphaMode::INHERIT + } else if caps + .composite_alpha_modes + .contains(CompositeAlphaMode::OPAQUE) + { + CompositeAlphaMode::OPAQUE + } else { + panic!("neither INHERIT or OPAQUE CompositeAlphaMode(s) are supported") + }; + let present_mode = if caps.present_modes.contains(PresentMode::MAILBOX) { + PresentMode::MAILBOX + } else if caps.present_modes.contains(PresentMode::FIFO) { + PresentMode::FIFO + } else { + panic!("FIFO PresentMode is not supported") + }; + + SwapchainConfig { + present_mode, + composite_alpha_mode, + format, + extent: caps.clamped_extent(default_extent), + image_count: DEFAULT_IMAGE_COUNT + .max(*caps.image_count.start()) + .min(*caps.image_count.end()), + image_layers: 1, + image_usage: DEFAULT_USAGE, + } + } + + /// Specify the presentation mode. + pub fn with_present_mode(mut self, mode: PresentMode) -> Self { + self.present_mode = mode; + self + } + + /// Specify the presentation mode. + pub fn with_composite_alpha_mode(mut self, mode: CompositeAlphaMode) -> Self { + self.composite_alpha_mode = mode; + self + } + + /// Specify the usage of backbuffer images. + pub fn with_image_usage(mut self, usage: image::Usage) -> Self { + self.image_usage = usage; + self + } + + /// Specify the count of backbuffer image. + pub fn with_image_count(mut self, count: SwapImageIndex) -> Self { + self.image_count = count; + self + } + + // TODO: depth-only, stencil-only, swapchain size, present modes, etc. +} + +/// Marker value returned if the swapchain no longer matches the surface properties exactly, +/// but can still be used to present to the surface successfully. +#[derive(Debug)] +pub struct Suboptimal; + +/// Error on acquiring the next image from a swapchain. +#[derive(Clone, Debug, PartialEq)] +pub enum AcquireError { + /// Out of either host or device memory. + OutOfMemory(device::OutOfMemory), + /// No image was ready and no timeout was specified. + NotReady, + /// No image was ready after the specified timeout expired. + Timeout, + /// The swapchain is no longer in sync with the surface, needs to be re-created. + OutOfDate, + /// The surface was lost, and the swapchain is no longer usable. + SurfaceLost(device::SurfaceLost), + /// Device is lost + DeviceLost(device::DeviceLost), +} + +impl std::fmt::Display for AcquireError { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + AcquireError::OutOfMemory(err) => write!(fmt, "Failed to acqure image: {}", err), + AcquireError::NotReady => write!( + fmt, + "Failed to acqure image: No image ready (timeout wasn't specified)" + ), + AcquireError::Timeout => { + write!(fmt, "Failed to acqure image: No image ready (timeout)") + } + AcquireError::OutOfDate => write!( + fmt, + "Failed to acqure image: Swapchain is out of date and needs to be re-created" + ), + AcquireError::SurfaceLost(err) => write!(fmt, "Failed to acqure image: {}", err), + AcquireError::DeviceLost(err) => write!(fmt, "Failed to acqure image: {}", err), + } + } +} + +impl std::error::Error for AcquireError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + AcquireError::OutOfMemory(err) => Some(err), + AcquireError::SurfaceLost(err) => Some(err), + AcquireError::DeviceLost(err) => Some(err), + _ => None, + } + } +} + +/// Error on acquiring the next image from a swapchain. +#[derive(Clone, Debug, PartialEq)] +pub enum PresentError { + /// Out of either host or device memory. + OutOfMemory(device::OutOfMemory), + /// The swapchain is no longer in sync with the surface, needs to be re-created. + OutOfDate, + /// The surface was lost, and the swapchain is no longer usable. + SurfaceLost(device::SurfaceLost), + /// Device is lost + DeviceLost(device::DeviceLost), +} + +impl std::fmt::Display for PresentError { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + PresentError::OutOfMemory(err) => write!(fmt, "Failed to present image: {}", err), + PresentError::OutOfDate => write!( + fmt, + "Failed to present image: Swapchain is out of date and needs to be re-created" + ), + PresentError::SurfaceLost(err) => write!(fmt, "Failed to present image: {}", err), + PresentError::DeviceLost(err) => write!(fmt, "Failed to present image: {}", err), + } + } +} + +impl std::error::Error for PresentError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + PresentError::OutOfMemory(err) => Some(err), + PresentError::SurfaceLost(err) => Some(err), + PresentError::DeviceLost(err) => Some(err), + _ => None, + } + } +} + +/// The `Swapchain` is the backend representation of the surface. +/// It consists of multiple buffers, which will be presented on the surface. +pub trait Swapchain: fmt::Debug + Any + Send + Sync { + /// Acquire a new swapchain image for rendering. This needs to be called before presenting. + /// + /// May fail according to one of the reasons indicated in `AcquireError` enum. + /// + /// # Synchronization + /// + /// The acquired image will not be immediately available when the function returns. + /// Once available the provided [`Semaphore`](../trait.Resources.html#associatedtype.Semaphore) + /// and [`Fence`](../trait.Resources.html#associatedtype.Fence) will be signaled. + /// + /// # Examples + /// + /// ```no_run + /// + /// ``` + unsafe fn acquire_image( + &mut self, + timeout_ns: u64, + semaphore: Option<&B::Semaphore>, + fence: Option<&B::Fence>, + ) -> Result<(SwapImageIndex, Option), AcquireError>; + + /// Present one acquired image. + /// + /// # Safety + /// + /// The passed queue _must_ support presentation on the surface, which is + /// used for creating this swapchain. + /// + /// # Examples + /// + /// ```no_run + /// + /// ``` + unsafe fn present<'a, S, Iw>( + &'a self, + present_queue: &mut B::CommandQueue, + image_index: SwapImageIndex, + wait_semaphores: Iw, + ) -> Result, PresentError> + where + Self: 'a + Sized + Borrow, + S: 'a + Borrow, + Iw: IntoIterator, + { + present_queue.present(iter::once((self, image_index)), wait_semaphores) + } + + /// Present one acquired image without any semaphore synchronization. + unsafe fn present_without_semaphores<'a>( + &'a self, + present_queue: &mut B::CommandQueue, + image_index: SwapImageIndex, + ) -> Result, PresentError> + where + Self: 'a + Sized + Borrow, + { + self.present::(present_queue, image_index, iter::empty()) + } +} + +/// Error occurred during surface creation. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum InitError { + /// Window handle is not supported by the backend. + UnsupportedWindowHandle, +} + +impl std::fmt::Display for InitError { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + InitError::UnsupportedWindowHandle => write!( + fmt, + "Failed to create surface: Specified window handle is unsupported" + ), + } + } +} + +impl std::error::Error for InitError {} diff --git a/third_party/rust/gfx-memory/.cargo-checksum.json b/third_party/rust/gfx-memory/.cargo-checksum.json new file mode 100644 index 000000000000..6c94ba639efb --- /dev/null +++ b/third_party/rust/gfx-memory/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"48d0937c4f20bc1b4120ab0341b633c7c414cfb81a2a5a3e1046f7c9ca153f5c","src/allocator/dedicated.rs":"3064401bdb920b974e708e107a504aceb2323fc8524d26c25478ba2447898f65","src/allocator/general.rs":"eb29e528c9e0889b1f3d50b68c01670d48501b5ae13660284f473a86b9d68ba9","src/allocator/linear.rs":"1a76ddd2f94f2aa93a5b55905e63a1d75c46be5a5560462f8dfb6c21a9a77cc4","src/allocator/mod.rs":"bd6e4537382d8781611f3b69c3eda356733f1cd39dfe4c9410a064fb42674348","src/block.rs":"486966e2b7d16cc3dbd9f3e48a94e1d35b559cd44d07d429c2fa706fc756d5a6","src/heaps/heap.rs":"637ec44890bff21189c1d4958fc145e5745cdb69252e4ea297d546829488bf8c","src/heaps/memory_type.rs":"cea0ff03668d06829c474202ca153464cb81eb6029acd5abda7d27cf8d2c39ae","src/heaps/mod.rs":"f84295b2d80cf2f3572397a491298043ee3b0f59c501bc55f7b19a43a476822a","src/lib.rs":"01df2268f219cd2c8d508a0255629306a88b62e4243a7b3a7301d0c9539e3a9f","src/mapping.rs":"75fdedae06acc9de74cc023c92df5a85ed62a0cdf9903eaa30fe3072bec8c7b5","src/memory.rs":"171958d7d3c32fc4b17b60486a4c98c1de1fd011dcbf0b1467a4c40a32b2bbb6","src/stats.rs":"2bf8bea9d8006740ebdedbf2f46e579a7b523cca7248810aae70af8686a605fb","src/usage.rs":"374ce6bdc434004e6a23de80b7d281f6a1da07d63e0f4f543bf5ad1b72e094d8"},"package":"c2eed6cda674d9cd4d92229102dbd544292124533d236904f987e9afab456137"} \ No newline at end of file diff --git a/third_party/rust/rendy-memory/Cargo.toml b/third_party/rust/gfx-memory/Cargo.toml similarity index 62% rename from third_party/rust/rendy-memory/Cargo.toml rename to third_party/rust/gfx-memory/Cargo.toml index bc71560f9fe6..e1288853a4a7 100644 --- a/third_party/rust/rendy-memory/Cargo.toml +++ b/third_party/rust/gfx-memory/Cargo.toml @@ -12,20 +12,25 @@ [package] edition = "2018" -name = "rendy-memory" -version = "0.5.1" -authors = ["omni-viral "] -description = "Rendy's memory manager" -documentation = "https://docs.rs/rendy-memory" -keywords = ["graphics", "gfx-hal", "rendy"] +name = "gfx-memory" +version = "0.1.3" +authors = ["omni-viral ", "The Gfx-rs Developers"] +description = "fx-hal memory allocator" +documentation = "https://docs.rs/gfx-descriptor" +keywords = ["graphics", "gfx-hal"] categories = ["rendering"] license = "MIT OR Apache-2.0" -repository = "https://github.com/amethyst/rendy" +repository = "https://github.com/gfx-rs/gfx-extras" [dependencies.colorful] version = "0.2" +optional = true -[dependencies.gfx-hal] -version = "0.4" +[dependencies.fxhash] +version = "0.2" + +[dependencies.hal] +version = "0.5" +package = "gfx-hal" [dependencies.hibitset] version = "0.6" @@ -34,22 +39,9 @@ default-features = false [dependencies.log] version = "0.4" -[dependencies.relevant] -version = "0.4" -features = ["log"] - -[dependencies.serde] -version = "1.0" -features = ["derive"] -optional = true - [dependencies.slab] version = "0.4" - -[dependencies.smallvec] -version = "0.6" [dev-dependencies.rand] version = "0.7" [features] -serde-1 = ["serde", "gfx-hal/serde"] diff --git a/third_party/rust/gfx-memory/src/allocator/dedicated.rs b/third_party/rust/gfx-memory/src/allocator/dedicated.rs new file mode 100644 index 000000000000..9ad432735525 --- /dev/null +++ b/third_party/rust/gfx-memory/src/allocator/dedicated.rs @@ -0,0 +1,171 @@ +use crate::{ + allocator::{Allocator, Kind}, + block::Block, + mapping::MappedRange, + memory::Memory, + AtomSize, Size, +}; +use hal::{device::Device as _, Backend}; +use std::ptr::NonNull; + +/// Memory block allocated from `DedicatedAllocator`. +#[derive(Debug)] +pub struct DedicatedBlock { + memory: Memory, + ptr: Option>, +} + +unsafe impl Send for DedicatedBlock {} +unsafe impl Sync for DedicatedBlock {} + +impl DedicatedBlock { + /// Get inner memory. + /// Panics if mapped. + pub fn unwrap_memory(self) -> Memory { + assert_eq!(self.ptr, None); + self.memory + } + + /// Make a non-mappable block. + pub fn from_memory(memory: Memory) -> Self { + DedicatedBlock { memory, ptr: None } + } + + /// Get the size of the block. + pub fn size(&self) -> Size { + self.memory.size() + } +} + +impl Block for DedicatedBlock { + fn properties(&self) -> hal::memory::Properties { + self.memory.properties() + } + + fn memory(&self) -> &B::Memory { + self.memory.raw() + } + + fn segment(&self) -> hal::memory::Segment { + hal::memory::Segment { + offset: 0, + size: Some(self.memory.size()), + } + } + + fn map<'a>( + &'a mut self, + _device: &B::Device, + segment: hal::memory::Segment, + ) -> Result, hal::device::MapError> { + let requested_range = segment.offset..match segment.size { + Some(s) => segment.offset + s, + None => self.memory.size(), + }; + let mapping_range = match self.memory.non_coherent_atom_size { + Some(atom) => crate::align_range(&requested_range, atom), + None => requested_range.clone(), + }; + + Ok(unsafe { + MappedRange::from_raw( + &self.memory, + self.ptr + //TODO: https://github.com/gfx-rs/gfx/issues/3182 + .ok_or(hal::device::MapError::MappingFailed)? + .as_ptr() + .offset(mapping_range.start as isize), + mapping_range, + requested_range, + ) + }) + } +} + +/// Dedicated memory allocator that uses memory object per allocation requested. +/// +/// This allocator suites best huge allocations. +/// From 32 MiB when GPU has 4-8 GiB memory total. +/// +/// `Heaps` use this allocator when none of sub-allocators bound to the memory type +/// can handle size required. +/// TODO: Check if resource prefers dedicated memory. +#[derive(Debug)] +pub struct DedicatedAllocator { + memory_type: hal::MemoryTypeId, + memory_properties: hal::memory::Properties, + non_coherent_atom_size: Option, + used: Size, +} + +impl DedicatedAllocator { + /// Create new `LinearAllocator` + /// for `memory_type` with `memory_properties` specified + pub fn new( + memory_type: hal::MemoryTypeId, + memory_properties: hal::memory::Properties, + non_coherent_atom_size: Size, + ) -> Self { + DedicatedAllocator { + memory_type, + memory_properties, + non_coherent_atom_size: if crate::is_non_coherent_visible(memory_properties) { + AtomSize::new(non_coherent_atom_size) + } else { + None + }, + used: 0, + } + } +} + +impl Allocator for DedicatedAllocator { + type Block = DedicatedBlock; + + const KIND: Kind = Kind::Dedicated; + + fn alloc( + &mut self, + device: &B::Device, + size: Size, + _align: Size, + ) -> Result<(DedicatedBlock, Size), hal::device::AllocationError> { + let size = match self.non_coherent_atom_size { + Some(atom) => crate::align_size(size, atom), + None => size, + }; + log::trace!("Allocate block of size: {}", size); + + let (memory, ptr) = unsafe { + super::allocate_memory_helper( + device, + self.memory_type, + size, + self.memory_properties, + self.non_coherent_atom_size, + )? + }; + + self.used += size; + Ok((DedicatedBlock { memory, ptr }, size)) + } + + fn free(&mut self, device: &B::Device, block: DedicatedBlock) -> Size { + let size = block.memory.size(); + log::trace!("Free block of size: {}", size); + self.used -= size; + unsafe { + device.unmap_memory(block.memory.raw()); + device.free_memory(block.memory.into_raw()); + } + size + } +} + +impl Drop for DedicatedAllocator { + fn drop(&mut self) { + if self.used != 0 { + log::error!("Not all allocation from DedicatedAllocator was freed"); + } + } +} diff --git a/third_party/rust/rendy-memory/src/allocator/dynamic.rs b/third_party/rust/gfx-memory/src/allocator/general.rs similarity index 50% rename from third_party/rust/rendy-memory/src/allocator/dynamic.rs rename to third_party/rust/gfx-memory/src/allocator/general.rs index bb531617f9e2..ae86c7ad81cd 100644 --- a/third_party/rust/rendy-memory/src/allocator/dynamic.rs +++ b/third_party/rust/gfx-memory/src/allocator/general.rs @@ -1,674 +1,643 @@ -use std::{ - collections::{BTreeSet, HashMap}, - ops::Range, - ptr::NonNull, - thread, -}; - -use { - crate::{ - allocator::{Allocator, Kind}, - block::Block, - mapping::*, - memory::*, - util::*, - }, - gfx_hal::{device::Device as _, Backend}, - hibitset::{BitSet, BitSetLike as _}, -}; - -/// Memory block allocated from `DynamicAllocator` -#[derive(Debug)] -pub struct DynamicBlock { - block_index: u32, - chunk_index: u32, - count: u32, - memory: *const Memory, - ptr: Option>, - range: Range, - relevant: relevant::Relevant, -} - -unsafe impl Send for DynamicBlock where B: Backend {} -unsafe impl Sync for DynamicBlock where B: Backend {} - -impl DynamicBlock -where - B: Backend, -{ - fn shared_memory(&self) -> &Memory { - // Memory won't be freed until last block created from it deallocated. - unsafe { &*self.memory } - } - - fn size(&self) -> u64 { - self.range.end - self.range.start - } - - fn dispose(self) { - self.relevant.dispose(); - } -} - -impl Block for DynamicBlock -where - B: Backend, -{ - #[inline] - fn properties(&self) -> gfx_hal::memory::Properties { - self.shared_memory().properties() - } - - #[inline] - fn memory(&self) -> &B::Memory { - self.shared_memory().raw() - } - - #[inline] - fn range(&self) -> Range { - self.range.clone() - } - - #[inline] - fn map<'a>( - &'a mut self, - _device: &B::Device, - range: Range, - ) -> Result, gfx_hal::device::MapError> { - debug_assert!( - range.start < range.end, - "Memory mapping region must have valid size" - ); - if !self.shared_memory().host_visible() { - //TODO: invalid access error - return Err(gfx_hal::device::MapError::MappingFailed); - } - - if let Some(ptr) = self.ptr { - if let Some((ptr, range)) = mapped_sub_range(ptr, self.range.clone(), range) { - let mapping = unsafe { MappedRange::from_raw(self.shared_memory(), ptr, range) }; - Ok(mapping) - } else { - Err(gfx_hal::device::MapError::OutOfBounds) - } - } else { - Err(gfx_hal::device::MapError::MappingFailed) - } - } - - #[inline] - fn unmap(&mut self, _device: &B::Device) {} -} - -/// Config for `DynamicAllocator`. -#[derive(Clone, Copy, Debug)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub struct DynamicConfig { - /// All requests are rounded up to multiple of this value. - pub block_size_granularity: u64, - - /// Maximum chunk of blocks size. - /// Actual chunk size is `min(max_chunk_size, block_size * blocks_per_chunk)` - pub max_chunk_size: u64, - - /// Minimum size of device allocation. - pub min_device_allocation: u64, -} - -/// No-fragmentation allocator. -/// Suitable for any type of small allocations. -/// Every freed block can be reused. -#[derive(Debug)] -pub struct DynamicAllocator { - /// Memory type that this allocator allocates. - memory_type: gfx_hal::MemoryTypeId, - - /// Memory properties of the memory type. - memory_properties: gfx_hal::memory::Properties, - - /// All requests are rounded up to multiple of this value. - block_size_granularity: u64, - - /// Maximum chunk of blocks size. - max_chunk_size: u64, - - /// Minimum size of device allocation. - min_device_allocation: u64, - - /// Chunk lists. - sizes: HashMap>, - - /// Ordered set of sizes that have allocated chunks. - chunks: BTreeSet, -} - -unsafe impl Send for DynamicAllocator where B: Backend {} -unsafe impl Sync for DynamicAllocator where B: Backend {} - -#[derive(Debug)] -struct SizeEntry { - /// Total count of allocated blocks with size corresponding to this entry. - total_blocks: u64, - - /// Bits per ready (non-exhausted) chunks with free blocks. - ready_chunks: BitSet, - - /// List of chunks. - chunks: slab::Slab>, -} - -impl Default for SizeEntry -where - B: Backend, -{ - fn default() -> Self { - SizeEntry { - chunks: Default::default(), - total_blocks: 0, - ready_chunks: Default::default(), - } - } -} - -const MAX_BLOCKS_PER_CHUNK: u32 = 64; -const MIN_BLOCKS_PER_CHUNK: u32 = 8; - -impl DynamicAllocator -where - B: Backend, -{ - /// Create new `DynamicAllocator` - /// for `memory_type` with `memory_properties` specified, - /// with `DynamicConfig` provided. - pub fn new( - memory_type: gfx_hal::MemoryTypeId, - memory_properties: gfx_hal::memory::Properties, - config: DynamicConfig, - ) -> Self { - log::trace!( - "Create new allocator: type: '{:?}', properties: '{:#?}' config: '{:#?}'", - memory_type, - memory_properties, - config - ); - - assert!( - config.block_size_granularity.is_power_of_two(), - "Allocation granularity must be power of two" - ); - - assert!( - config.max_chunk_size.is_power_of_two(), - "Max chunk size must be power of two" - ); - - assert!( - config.min_device_allocation.is_power_of_two(), - "Min device allocation must be power of two" - ); - - assert!( - config.min_device_allocation <= config.max_chunk_size, - "Min device allocation must be less than or equalt to max chunk size" - ); - - if memory_properties.contains(gfx_hal::memory::Properties::CPU_VISIBLE) { - debug_assert!( - fits_usize(config.max_chunk_size), - "Max chunk size must fit usize for mapping" - ); - } - - DynamicAllocator { - memory_type, - memory_properties, - block_size_granularity: config.block_size_granularity, - max_chunk_size: config.max_chunk_size, - min_device_allocation: config.min_device_allocation, - sizes: HashMap::new(), - chunks: BTreeSet::new(), - } - } - - /// Maximum allocation size. - pub fn max_allocation(&self) -> u64 { - self.max_chunk_size / MIN_BLOCKS_PER_CHUNK as u64 - } - - /// Allocate memory chunk from device. - fn alloc_chunk_from_device( - &self, - device: &B::Device, - block_size: u64, - chunk_size: u64, - ) -> Result, gfx_hal::device::AllocationError> { - log::trace!( - "Allocate chunk of size: {} for blocks of size {} from device", - chunk_size, - block_size - ); - - // Allocate from device. - let (memory, mapping) = unsafe { - // Valid memory type specified. - let raw = device.allocate_memory(self.memory_type, chunk_size)?; - - let mapping = if self - .memory_properties - .contains(gfx_hal::memory::Properties::CPU_VISIBLE) - { - log::trace!("Map new memory object"); - match device.map_memory(&raw, 0..chunk_size) { - Ok(mapping) => Some(NonNull::new_unchecked(mapping)), - Err(gfx_hal::device::MapError::OutOfMemory(error)) => { - device.free_memory(raw); - return Err(error.into()); - } - Err(_) => panic!("Unexpected mapping failure"), - } - } else { - None - }; - let memory = Memory::from_raw(raw, chunk_size, self.memory_properties); - (memory, mapping) - }; - Ok(Chunk::from_memory(block_size, memory, mapping)) - } - - /// Allocate memory chunk for given block size. - fn alloc_chunk( - &mut self, - device: &B::Device, - block_size: u64, - total_blocks: u64, - ) -> Result<(Chunk, u64), gfx_hal::device::AllocationError> { - log::trace!( - "Allocate chunk for blocks of size {} ({} total blocks allocated)", - block_size, - total_blocks - ); - - let min_chunk_size = MIN_BLOCKS_PER_CHUNK as u64 * block_size; - let min_size = min_chunk_size.min(total_blocks * block_size); - let max_chunk_size = MAX_BLOCKS_PER_CHUNK as u64 * block_size; - - // If smallest possible chunk size is larger then this allocator max allocation - if min_size > self.max_allocation() - || (total_blocks < MIN_BLOCKS_PER_CHUNK as u64 - && min_size >= self.min_device_allocation) - { - // Allocate memory block from device. - let chunk = self.alloc_chunk_from_device(device, block_size, min_size)?; - return Ok((chunk, min_size)); - } - - if let Some(&chunk_size) = self - .chunks - .range(min_chunk_size..=max_chunk_size) - .next_back() - { - // Allocate block for the chunk. - let (block, allocated) = self.alloc_from_entry(device, chunk_size, 1, block_size)?; - Ok((Chunk::from_block(block_size, block), allocated)) - } else { - let total_blocks = self.sizes[&block_size].total_blocks; - let chunk_size = - (max_chunk_size.min(min_chunk_size.max(total_blocks * block_size)) / 2 + 1) - .next_power_of_two(); - let (block, allocated) = self.alloc_block(device, chunk_size, block_size)?; - Ok((Chunk::from_block(block_size, block), allocated)) - } - } - - /// Allocate blocks from particular chunk. - fn alloc_from_chunk( - chunks: &mut slab::Slab>, - chunk_index: u32, - block_size: u64, - count: u32, - align: u64, - ) -> Option> { - log::trace!( - "Allocate {} consecutive blocks of size {} from chunk {}", - count, - block_size, - chunk_index - ); - - let ref mut chunk = chunks[chunk_index as usize]; - let block_index = chunk.acquire_blocks(count, block_size, align)?; - let block_range = chunk.blocks_range(block_size, block_index, count); - - debug_assert_eq!((block_range.end - block_range.start) % count as u64, 0); - - Some(DynamicBlock { - range: block_range.clone(), - memory: chunk.shared_memory(), - block_index, - chunk_index, - count, - ptr: chunk.mapping_ptr().map(|ptr| { - mapped_fitting_range(ptr, chunk.range(), block_range) - .expect("Block must be sub-range of chunk") - }), - relevant: relevant::Relevant, - }) - } - - /// Allocate blocks from size entry. - fn alloc_from_entry( - &mut self, - device: &B::Device, - block_size: u64, - count: u32, - align: u64, - ) -> Result<(DynamicBlock, u64), gfx_hal::device::AllocationError> { - log::trace!( - "Allocate {} consecutive blocks for size {} from the entry", - count, - block_size - ); - - debug_assert!(count < MIN_BLOCKS_PER_CHUNK); - let size_entry = self.sizes.entry(block_size).or_default(); - - for chunk_index in (&size_entry.ready_chunks).iter() { - if let Some(block) = Self::alloc_from_chunk( - &mut size_entry.chunks, - chunk_index, - block_size, - count, - align, - ) { - return Ok((block, 0)); - } - } - - if size_entry.chunks.vacant_entry().key() > max_chunks_per_size() { - return Err(gfx_hal::device::OutOfMemory::Host.into()); - } - - let total_blocks = size_entry.total_blocks; - let (chunk, allocated) = self.alloc_chunk(device, block_size, total_blocks)?; - let size_entry = self.sizes.entry(block_size).or_default(); - let chunk_index = size_entry.chunks.insert(chunk) as u32; - - let block = Self::alloc_from_chunk( - &mut size_entry.chunks, - chunk_index, - block_size, - count, - align, - ) - .expect("New chunk should yield blocks"); - - if !size_entry.chunks[chunk_index as usize].is_exhausted() { - size_entry.ready_chunks.add(chunk_index); - } - - Ok((block, allocated)) - } - - /// Allocate block. - fn alloc_block( - &mut self, - device: &B::Device, - block_size: u64, - align: u64, - ) -> Result<(DynamicBlock, u64), gfx_hal::device::AllocationError> { - log::trace!("Allocate block of size {}", block_size); - - debug_assert_eq!(block_size % self.block_size_granularity, 0); - let size_entry = self.sizes.entry(block_size).or_default(); - size_entry.total_blocks += 1; - - let overhead = (MIN_BLOCKS_PER_CHUNK as u64 - 1) / size_entry.total_blocks; - - if overhead >= 1 { - if let Some(&size) = self - .chunks - .range(block_size / 4..block_size * overhead) - .next() - { - return self.alloc_from_entry( - device, - size, - ((block_size - 1) / size + 1) as u32, - align, - ); - } - } - - if size_entry.total_blocks == MIN_BLOCKS_PER_CHUNK as u64 { - self.chunks.insert(block_size); - } - - self.alloc_from_entry(device, block_size, 1, align) - } - - fn free_chunk(&mut self, device: &B::Device, chunk: Chunk, block_size: u64) -> u64 { - log::trace!("Free chunk: {:#?}", chunk); - assert!(chunk.is_unused(block_size)); - match chunk.flavor { - ChunkFlavor::Dedicated(boxed, _) => { - let size = boxed.size(); - unsafe { - if self - .memory_properties - .contains(gfx_hal::memory::Properties::CPU_VISIBLE) - { - log::trace!("Unmap memory: {:#?}", boxed); - device.unmap_memory(boxed.raw()); - } - device.free_memory(boxed.into_raw()); - } - size - } - ChunkFlavor::Dynamic(dynamic_block) => self.free(device, dynamic_block), - } - } - - fn free_block(&mut self, device: &B::Device, block: DynamicBlock) -> u64 { - log::trace!("Free block: {:#?}", block); - - let block_size = block.size() / block.count as u64; - let ref mut size_entry = self - .sizes - .get_mut(&block_size) - .expect("Unable to get size entry from which block was allocated"); - let chunk_index = block.chunk_index; - let ref mut chunk = size_entry.chunks[chunk_index as usize]; - let block_index = block.block_index; - let count = block.count; - block.dispose(); - chunk.release_blocks(block_index, count); - if chunk.is_unused(block_size) { - size_entry.ready_chunks.remove(chunk_index); - let chunk = size_entry.chunks.remove(chunk_index as usize); - self.free_chunk(device, chunk, block_size) - } else { - size_entry.ready_chunks.add(chunk_index); - 0 - } - } - - /// Perform full cleanup of the memory allocated. - pub fn dispose(self) { - if !thread::panicking() { - for (index, size) in self.sizes { - assert_eq!(size.chunks.len(), 0, "SizeEntry({}) is still used", index); - } - } else { - for (index, size) in self.sizes { - if size.chunks.len() != 0 { - log::error!("Memory leak: SizeEntry({}) is still used", index); - } - } - } - } -} - -impl Allocator for DynamicAllocator -where - B: Backend, -{ - type Block = DynamicBlock; - - fn kind() -> Kind { - Kind::Dynamic - } - - fn alloc( - &mut self, - device: &B::Device, - size: u64, - align: u64, - ) -> Result<(DynamicBlock, u64), gfx_hal::device::AllocationError> { - debug_assert!(size <= self.max_allocation()); - debug_assert!(align.is_power_of_two()); - let aligned_size = ((size - 1) | (align - 1) | (self.block_size_granularity - 1)) + 1; - - log::trace!( - "Allocate dynamic block: size: {}, align: {}, aligned size: {}, type: {}", - size, - align, - aligned_size, - self.memory_type.0 - ); - - self.alloc_block(device, aligned_size, align) - } - - fn free(&mut self, device: &B::Device, block: DynamicBlock) -> u64 { - self.free_block(device, block) - } -} - -/// Block allocated for chunk. -#[derive(Debug)] -enum ChunkFlavor { - /// Allocated from device. - Dedicated(Box>, Option>), - - /// Allocated from chunk of bigger blocks. - Dynamic(DynamicBlock), -} - -#[derive(Debug)] -struct Chunk { - flavor: ChunkFlavor, - blocks: u64, -} - -impl Chunk -where - B: Backend, -{ - fn from_memory(block_size: u64, memory: Memory, mapping: Option>) -> Self { - let blocks = memory.size() / block_size; - debug_assert!(blocks <= MAX_BLOCKS_PER_CHUNK as u64); - - let high_bit = 1 << (blocks - 1); - - Chunk { - flavor: ChunkFlavor::Dedicated(Box::new(memory), mapping), - blocks: (high_bit - 1) | high_bit, - } - } - - fn from_block(block_size: u64, chunk_block: DynamicBlock) -> Self { - let blocks = (chunk_block.size() / block_size).min(MAX_BLOCKS_PER_CHUNK as u64); - - let high_bit = 1 << (blocks - 1); - - Chunk { - flavor: ChunkFlavor::Dynamic(chunk_block), - blocks: (high_bit - 1) | high_bit, - } - } - - fn shared_memory(&self) -> &Memory { - match &self.flavor { - ChunkFlavor::Dedicated(boxed, _) => &*boxed, - ChunkFlavor::Dynamic(chunk_block) => chunk_block.shared_memory(), - } - } - - fn range(&self) -> Range { - match &self.flavor { - ChunkFlavor::Dedicated(boxed, _) => 0..boxed.size(), - ChunkFlavor::Dynamic(chunk_block) => chunk_block.range(), - } - } - - fn size(&self) -> u64 { - let range = self.range(); - range.end - range.start - } - - // Get block bytes range - fn blocks_range(&self, block_size: u64, block_index: u32, count: u32) -> Range { - let range = self.range(); - let start = range.start + block_size * block_index as u64; - let end = start + block_size * count as u64; - debug_assert!(end <= range.end); - start..end - } - - /// Check if there are free blocks. - fn is_unused(&self, block_size: u64) -> bool { - let blocks = (self.size() / block_size).min(MAX_BLOCKS_PER_CHUNK as u64); - - let high_bit = 1 << (blocks - 1); - let mask = (high_bit - 1) | high_bit; - - debug_assert!(self.blocks <= mask); - self.blocks == mask - } - - /// Check if there are free blocks. - fn is_exhausted(&self) -> bool { - self.blocks == 0 - } - - fn acquire_blocks(&mut self, count: u32, block_size: u64, align: u64) -> Option { - debug_assert!(count > 0 && count <= MAX_BLOCKS_PER_CHUNK); - - // Holds a bit-array of all positions with `count` free blocks. - let mut blocks = !0; - for i in 0..count { - blocks &= self.blocks >> i; - } - // Find a position in `blocks` that is aligned. - while blocks != 0 { - let index = blocks.trailing_zeros(); - blocks &= !(1 << index); - - if (index as u64 * block_size) & (align - 1) == 0 { - let mask = ((1 << count) - 1) << index; - self.blocks &= !mask; - return Some(index); - } - } - None - } - - fn release_blocks(&mut self, index: u32, count: u32) { - let mask = ((1 << count) - 1) << index; - debug_assert_eq!(self.blocks & mask, 0); - self.blocks |= mask; - } - - fn mapping_ptr(&self) -> Option> { - match &self.flavor { - ChunkFlavor::Dedicated(_, ptr) => *ptr, - ChunkFlavor::Dynamic(chunk_block) => chunk_block.ptr, - } - } -} - -fn max_chunks_per_size() -> usize { - let value = (std::mem::size_of::() * 8).pow(4); - debug_assert!(fits_u32(value)); - value -} +use crate::{ + allocator::{Allocator, Kind}, + block::Block, + mapping::MappedRange, + memory::Memory, + AtomSize, Size, +}; +use hal::{device::Device as _, Backend}; +use hibitset::{BitSet, BitSetLike as _}; +use std::{ + collections::{BTreeSet, HashMap}, + hash::BuildHasherDefault, + ops::Range, + ptr::NonNull, + sync::Arc, + thread, +}; + +//TODO: const fn +fn max_chunks_per_size() -> usize { + let value = (std::mem::size_of::() * 8).pow(4); + value +} + +/// Memory block allocated from `GeneralAllocator` +#[derive(Debug)] +pub struct GeneralBlock { + block_index: u32, + chunk_index: u32, + count: u32, + memory: Arc>, + ptr: Option>, + range: Range, +} + +unsafe impl Send for GeneralBlock {} +unsafe impl Sync for GeneralBlock {} + +impl GeneralBlock { + /// Get the size of this block. + pub fn size(&self) -> Size { + self.range.end - self.range.start + } +} + +impl Block for GeneralBlock { + fn properties(&self) -> hal::memory::Properties { + self.memory.properties() + } + + fn memory(&self) -> &B::Memory { + self.memory.raw() + } + + fn segment(&self) -> hal::memory::Segment { + hal::memory::Segment { + offset: self.range.start, + size: Some(self.range.end - self.range.start), + } + } + + fn map<'a>( + &'a mut self, + _device: &B::Device, + segment: hal::memory::Segment, + ) -> Result, hal::device::MapError> { + let requested_range = crate::segment_to_sub_range(segment, &self.range)?; + let mapping_range = match self.memory.non_coherent_atom_size { + Some(atom) => crate::align_range(&requested_range, atom), + None => requested_range.clone(), + }; + + Ok(unsafe { + MappedRange::from_raw( + &*self.memory, + self.ptr + .ok_or(hal::device::MapError::MappingFailed)? + .as_ptr() + .offset((mapping_range.start - self.range.start) as isize), + mapping_range, + requested_range, + ) + }) + } +} + +/// Config for `GeneralAllocator`. +#[derive(Clone, Copy, Debug)] +pub struct GeneralConfig { + /// All requests are rounded up to multiple of this value. + pub block_size_granularity: Size, + + /// Maximum chunk of blocks size. + /// Actual chunk size is `min(max_chunk_size, block_size * blocks_per_chunk)` + pub max_chunk_size: Size, + + /// Minimum size of device allocation. + pub min_device_allocation: Size, +} + +/// No-fragmentation allocator. +/// Suitable for any type of small allocations. +/// Every freed block can be reused. +#[derive(Debug)] +pub struct GeneralAllocator { + /// Memory type that this allocator allocates. + memory_type: hal::MemoryTypeId, + + /// Memory properties of the memory type. + memory_properties: hal::memory::Properties, + + /// All requests are rounded up to multiple of this value. + block_size_granularity: Size, + + /// Maximum chunk of blocks size. + max_chunk_size: Size, + + /// Minimum size of device allocation. + min_device_allocation: Size, + + /// Chunk lists. + sizes: HashMap, BuildHasherDefault>, + + /// Ordered set of sizes that have allocated chunks. + chunks: BTreeSet, + + non_coherent_atom_size: Option, +} + +//TODO: ensure Send and Sync +unsafe impl Send for GeneralAllocator {} +unsafe impl Sync for GeneralAllocator {} + +#[derive(Debug)] +struct SizeEntry { + /// Total count of allocated blocks with size corresponding to this entry. + total_blocks: Size, + + /// Bits per ready (non-exhausted) chunks with free blocks. + ready_chunks: BitSet, + + /// List of chunks. + chunks: slab::Slab>, +} + +impl Default for SizeEntry { + fn default() -> Self { + SizeEntry { + chunks: Default::default(), + total_blocks: 0, + ready_chunks: Default::default(), + } + } +} + +const MAX_BLOCKS_PER_CHUNK: u32 = 64; +const MIN_BLOCKS_PER_CHUNK: u32 = 8; + +impl GeneralAllocator { + /// Create new `GeneralAllocator` + /// for `memory_type` with `memory_properties` specified, + /// with `GeneralConfig` provided. + pub fn new( + memory_type: hal::MemoryTypeId, + memory_properties: hal::memory::Properties, + config: &GeneralConfig, + non_coherent_atom_size: Size, + ) -> Self { + log::trace!( + "Create new allocator: type: '{:?}', properties: '{:#?}' config: '{:#?}'", + memory_type, + memory_properties, + config + ); + + assert!( + config.block_size_granularity.is_power_of_two(), + "Allocation granularity must be power of two" + ); + assert!( + config.max_chunk_size.is_power_of_two(), + "Max chunk size must be power of two" + ); + + assert!( + config.min_device_allocation.is_power_of_two(), + "Min device allocation must be power of two" + ); + + assert!( + config.min_device_allocation <= config.max_chunk_size, + "Min device allocation must be less than or equalt to max chunk size" + ); + + let (block_size_granularity, non_coherent_atom_size) = + if crate::is_non_coherent_visible(memory_properties) { + let granularity = non_coherent_atom_size + .max(config.block_size_granularity) + .next_power_of_two(); + (granularity, AtomSize::new(non_coherent_atom_size)) + } else { + (config.block_size_granularity, None) + }; + + GeneralAllocator { + memory_type, + memory_properties, + block_size_granularity, + max_chunk_size: config.max_chunk_size, + min_device_allocation: config.min_device_allocation, + sizes: HashMap::default(), + chunks: BTreeSet::new(), + non_coherent_atom_size, + } + } + + /// Maximum allocation size. + pub fn max_allocation(&self) -> Size { + self.max_chunk_size / MIN_BLOCKS_PER_CHUNK as Size + } + + /// Allocate memory chunk from device. + fn alloc_chunk_from_device( + &self, + device: &B::Device, + block_size: Size, + chunk_size: Size, + ) -> Result, hal::device::AllocationError> { + log::trace!( + "Allocate chunk of size: {} for blocks of size {} from device", + chunk_size, + block_size + ); + + let (memory, ptr) = unsafe { + super::allocate_memory_helper( + device, + self.memory_type, + chunk_size, + self.memory_properties, + self.non_coherent_atom_size, + )? + }; + + Ok(Chunk::from_memory(block_size, memory, ptr)) + } + + /// Allocate memory chunk for given block size. + fn alloc_chunk( + &mut self, + device: &B::Device, + block_size: Size, + total_blocks: Size, + ) -> Result<(Chunk, Size), hal::device::AllocationError> { + log::trace!( + "Allocate chunk for blocks of size {} ({} total blocks allocated)", + block_size, + total_blocks + ); + + let min_chunk_size = MIN_BLOCKS_PER_CHUNK as Size * block_size; + let min_size = min_chunk_size.min(total_blocks * block_size); + let max_chunk_size = MAX_BLOCKS_PER_CHUNK as Size * block_size; + + // If smallest possible chunk size is larger then this allocator max allocation + if min_size > self.max_allocation() + || (total_blocks < MIN_BLOCKS_PER_CHUNK as Size + && min_size >= self.min_device_allocation) + { + // Allocate memory block from device. + let chunk = self.alloc_chunk_from_device(device, block_size, min_size)?; + return Ok((chunk, min_size)); + } + + let (block, allocated) = match self + .chunks + .range(min_chunk_size..=max_chunk_size) + .next_back() + { + Some(&chunk_size) => { + // Allocate block for the chunk. + self.alloc_from_entry(device, chunk_size, 1, block_size)? + } + None => { + let total_blocks = self.sizes[&block_size].total_blocks; + let chunk_size = + (max_chunk_size.min(min_chunk_size.max(total_blocks * block_size)) / 2 + 1) + .next_power_of_two(); + self.alloc_block(device, chunk_size, block_size)? + } + }; + + Ok((Chunk::from_block(block_size, block), allocated)) + } + + /// Allocate blocks from particular chunk. + fn alloc_from_chunk( + chunks: &mut slab::Slab>, + chunk_index: u32, + block_size: Size, + count: u32, + align: Size, + ) -> Option> { + log::trace!( + "Allocate {} consecutive blocks of size {} from chunk {}", + count, + block_size, + chunk_index + ); + + let ref mut chunk = chunks[chunk_index as usize]; + let block_index = chunk.acquire_blocks(count, block_size, align)?; + let block_range = chunk.blocks_range(block_size, block_index, count); + + debug_assert_eq!((block_range.end - block_range.start) % count as Size, 0); + + Some(GeneralBlock { + range: block_range.clone(), + memory: Arc::clone(chunk.shared_memory()), + block_index, + chunk_index, + count, + ptr: chunk.mapping_ptr().map(|ptr| unsafe { + let offset = (block_range.start - chunk.range().start) as isize; + NonNull::new_unchecked(ptr.as_ptr().offset(offset)) + }), + }) + } + + /// Allocate blocks from size entry. + fn alloc_from_entry( + &mut self, + device: &B::Device, + block_size: Size, + count: u32, + align: Size, + ) -> Result<(GeneralBlock, Size), hal::device::AllocationError> { + log::trace!( + "Allocate {} consecutive blocks for size {} from the entry", + count, + block_size + ); + + debug_assert!(count < MIN_BLOCKS_PER_CHUNK); + let size_entry = self.sizes.entry(block_size).or_default(); + + for chunk_index in (&size_entry.ready_chunks).iter() { + if let Some(block) = Self::alloc_from_chunk( + &mut size_entry.chunks, + chunk_index, + block_size, + count, + align, + ) { + return Ok((block, 0)); + } + } + + if size_entry.chunks.vacant_entry().key() > max_chunks_per_size() { + return Err(hal::device::OutOfMemory::Host.into()); + } + + let total_blocks = size_entry.total_blocks; + let (chunk, allocated) = self.alloc_chunk(device, block_size, total_blocks)?; + log::trace!("\tChunk init mask: 0x{:x}", chunk.blocks); + let size_entry = self.sizes.entry(block_size).or_default(); + let chunk_index = size_entry.chunks.insert(chunk) as u32; + + let block = Self::alloc_from_chunk( + &mut size_entry.chunks, + chunk_index, + block_size, + count, + align, + ) + .expect("New chunk should yield blocks"); + + if !size_entry.chunks[chunk_index as usize].is_exhausted() { + size_entry.ready_chunks.add(chunk_index); + } + + Ok((block, allocated)) + } + + /// Allocate block. + fn alloc_block( + &mut self, + device: &B::Device, + block_size: Size, + align: Size, + ) -> Result<(GeneralBlock, Size), hal::device::AllocationError> { + log::trace!("Allocate block of size {}", block_size); + + debug_assert_eq!(block_size % self.block_size_granularity, 0); + let size_entry = self.sizes.entry(block_size).or_default(); + size_entry.total_blocks += 1; + + let overhead = (MIN_BLOCKS_PER_CHUNK as Size - 1) / size_entry.total_blocks; + + if overhead >= 1 { + if let Some(&size) = self + .chunks + .range(block_size / 4..block_size * overhead) + .next() + { + return self.alloc_from_entry( + device, + size, + ((block_size - 1) / size + 1) as u32, + align, + ); + } + } else { + self.chunks.insert(block_size); + } + + self.alloc_from_entry(device, block_size, 1, align) + } + + fn free_chunk(&mut self, device: &B::Device, chunk: Chunk, block_size: Size) -> Size { + log::trace!("Free chunk: {:#?}", chunk); + assert!(chunk.is_unused(block_size)); + match chunk.flavor { + ChunkFlavor::Dedicated { memory, .. } => { + let size = memory.size(); + match Arc::try_unwrap(memory) { + Ok(mem) => unsafe { + if mem.is_mappable() { + device.unmap_memory(mem.raw()); + } + device.free_memory(mem.into_raw()); + }, + Err(_) => { + log::error!("Allocated `Chunk` was freed, but memory is still shared and never will be destroyed"); + } + } + size + } + ChunkFlavor::General(block) => self.free(device, block), + } + } + + fn free_block(&mut self, device: &B::Device, block: GeneralBlock) -> Size { + log::trace!("Free block: {:#?}", block); + + let block_size = block.size() / block.count as Size; + let size_entry = self + .sizes + .get_mut(&block_size) + .expect("Unable to get size entry from which block was allocated"); + let chunk_index = block.chunk_index; + let ref mut chunk = size_entry.chunks[chunk_index as usize]; + let block_index = block.block_index; + let count = block.count; + + chunk.release_blocks(block_index, count); + if chunk.is_unused(block_size) { + size_entry.ready_chunks.remove(chunk_index); + let chunk = size_entry.chunks.remove(chunk_index as usize); + drop(block); // it keeps an Arc reference to the chunk + self.free_chunk(device, chunk, block_size) + } else { + size_entry.ready_chunks.add(chunk_index); + 0 + } + } + + /// Free the contents of the allocator. + pub fn clear(&mut self, _device: &B::Device) {} +} + +impl Allocator for GeneralAllocator { + type Block = GeneralBlock; + + const KIND: Kind = Kind::General; + + fn alloc( + &mut self, + device: &B::Device, + size: Size, + align: Size, + ) -> Result<(GeneralBlock, Size), hal::device::AllocationError> { + debug_assert!(align.is_power_of_two()); + let aligned_size = ((size - 1) | (align - 1) | (self.block_size_granularity - 1)) + 1; + let map_aligned_size = match self.non_coherent_atom_size { + Some(atom) => crate::align_size(aligned_size, atom), + None => aligned_size, + }; + + log::trace!( + "Allocate general block: size: {}, align: {}, aligned size: {}, type: {}", + size, + align, + map_aligned_size, + self.memory_type.0 + ); + + self.alloc_block(device, map_aligned_size, align) + } + + fn free(&mut self, device: &B::Device, block: GeneralBlock) -> Size { + self.free_block(device, block) + } +} + +impl Drop for GeneralAllocator { + fn drop(&mut self) { + for (index, size) in self.sizes.drain() { + if !thread::panicking() { + assert_eq!(size.chunks.len(), 0, "SizeEntry({}) is still used", index); + } else { + log::error!("Memory leak: SizeEntry({}) is still used", index); + } + } + } +} + +/// Block allocated for chunk. +#[derive(Debug)] +enum ChunkFlavor { + /// Allocated from device. + Dedicated { + memory: Arc>, + ptr: Option>, + }, + /// Allocated from chunk of bigger blocks. + General(GeneralBlock), +} + +#[derive(Debug)] +struct Chunk { + flavor: ChunkFlavor, + /// A bit mask of block availability. Each bit in 0 .. MAX_BLOCKS_PER_CHUNK + /// corresponds to a block, which is free if the bit is 1. + blocks: u64, +} + +impl Chunk { + fn from_memory(block_size: Size, memory: Memory, ptr: Option>) -> Self { + let blocks = memory.size() / block_size; + debug_assert!(blocks <= MAX_BLOCKS_PER_CHUNK as Size); + + let high_bit = 1 << (blocks - 1); + + Chunk { + flavor: ChunkFlavor::Dedicated { + memory: Arc::new(memory), + ptr, + }, + blocks: (high_bit - 1) | high_bit, + } + } + + fn from_block(block_size: Size, chunk_block: GeneralBlock) -> Self { + let blocks = (chunk_block.size() / block_size).min(MAX_BLOCKS_PER_CHUNK as Size); + + let high_bit = 1 << (blocks - 1); + + Chunk { + flavor: ChunkFlavor::General(chunk_block), + blocks: (high_bit - 1) | high_bit, + } + } + + fn shared_memory(&self) -> &Arc> { + match self.flavor { + ChunkFlavor::Dedicated { ref memory, .. } => memory, + ChunkFlavor::General(ref block) => &block.memory, + } + } + + fn range(&self) -> Range { + match self.flavor { + ChunkFlavor::Dedicated { ref memory, .. } => 0..memory.size(), + ChunkFlavor::General(ref block) => block.range.clone(), + } + } + + // Get block bytes range + fn blocks_range(&self, block_size: Size, block_index: u32, count: u32) -> Range { + let range = self.range(); + let start = range.start + block_size * block_index as Size; + let end = start + block_size * count as Size; + debug_assert!(end <= range.end); + start..end + } + + /// Check if there are free blocks. + fn is_unused(&self, block_size: Size) -> bool { + let range = self.range(); + let blocks = ((range.end - range.start) / block_size).min(MAX_BLOCKS_PER_CHUNK as Size); + + let high_bit = 1 << (blocks - 1); + let mask = (high_bit - 1) | high_bit; + + debug_assert!(self.blocks <= mask); + self.blocks == mask + } + + /// Check if there are free blocks. + fn is_exhausted(&self) -> bool { + self.blocks == 0 + } + + fn acquire_blocks(&mut self, count: u32, block_size: Size, align: Size) -> Option { + debug_assert!(count > 0 && count <= MAX_BLOCKS_PER_CHUNK); + + // Holds a bit-array of all positions with `count` free blocks. + let mut blocks = !0u64; + for i in 0..count { + blocks &= self.blocks >> i; + } + // Find a position in `blocks` that is aligned. + while blocks != 0 { + let index = blocks.trailing_zeros(); + blocks ^= 1 << index; + + if (index as Size * block_size) & (align - 1) == 0 { + let mask = ((1 << count) - 1) << index; + debug_assert_eq!(self.blocks & mask, mask); + self.blocks ^= mask; + log::trace!("Chunk acquire mask: 0x{:x} -> 0x{:x}", mask, self.blocks); + return Some(index); + } + } + None + } + + fn release_blocks(&mut self, index: u32, count: u32) { + debug_assert!(index + count <= MAX_BLOCKS_PER_CHUNK); + let mask = ((1 << count) - 1) << index; + debug_assert_eq!(self.blocks & mask, 0); + self.blocks |= mask; + log::trace!("Chunk release mask: 0x{:x} -> 0x{:x}", mask, self.blocks); + } + + fn mapping_ptr(&self) -> Option> { + match self.flavor { + ChunkFlavor::Dedicated { ptr, .. } => ptr, + ChunkFlavor::General(ref block) => block.ptr, + } + } +} diff --git a/third_party/rust/gfx-memory/src/allocator/linear.rs b/third_party/rust/gfx-memory/src/allocator/linear.rs new file mode 100644 index 000000000000..5c724222613e --- /dev/null +++ b/third_party/rust/gfx-memory/src/allocator/linear.rs @@ -0,0 +1,277 @@ +use crate::{ + allocator::{Allocator, Kind}, + block::Block, + mapping::MappedRange, + memory::Memory, + AtomSize, Size, +}; +use hal::{device::Device as _, Backend}; +use std::{collections::VecDeque, ops::Range, ptr::NonNull, sync::Arc}; + +/// Memory block allocated from `LinearAllocator`. +#[derive(Debug)] +pub struct LinearBlock { + memory: Arc>, + linear_index: Size, + ptr: Option>, + range: Range, +} + +unsafe impl Send for LinearBlock {} +unsafe impl Sync for LinearBlock {} + +impl LinearBlock { + /// Get the size of this block. + pub fn size(&self) -> Size { + self.range.end - self.range.start + } +} + +impl Block for LinearBlock { + fn properties(&self) -> hal::memory::Properties { + self.memory.properties() + } + + fn memory(&self) -> &B::Memory { + self.memory.raw() + } + + fn segment(&self) -> hal::memory::Segment { + hal::memory::Segment { + offset: self.range.start, + size: Some(self.range.end - self.range.start), + } + } + + fn map<'a>( + &'a mut self, + _device: &B::Device, + segment: hal::memory::Segment, + ) -> Result, hal::device::MapError> { + let requested_range = crate::segment_to_sub_range(segment, &self.range)?; + + let mapping_range = match self.memory.non_coherent_atom_size { + Some(atom) => crate::align_range(&requested_range, atom), + None => requested_range.clone(), + }; + + Ok(unsafe { + MappedRange::from_raw( + &self.memory, + self.ptr + //TODO: https://github.com/gfx-rs/gfx/issues/3182 + .ok_or(hal::device::MapError::MappingFailed)? + .as_ptr() + .offset((mapping_range.start - self.range.start) as isize), + mapping_range, + requested_range, + ) + }) + } +} + +/// Config for `LinearAllocator`. +#[derive(Clone, Copy, Debug)] +pub struct LinearConfig { + /// Size of the linear chunk. + /// Keep it big. + pub linear_size: Size, +} + +/// Linear allocator that return memory from chunk sequentially. +/// It keeps only number of bytes allocated from each chunk. +/// Once chunk is exhausted it is placed into list. +/// When all blocks allocated from head of that list are freed, +/// head is freed as well. +/// +/// This allocator suites best short-lived types of allocations. +/// Allocation strategy requires minimal overhead and implementation is fast. +/// But holding single block will completely stop memory recycling. +#[derive(Debug)] +pub struct LinearAllocator { + memory_type: hal::MemoryTypeId, + memory_properties: hal::memory::Properties, + linear_size: Size, + offset: Size, + lines: VecDeque>, + non_coherent_atom_size: Option, +} + +#[derive(Debug)] +struct Line { + used: Size, + free: Size, + memory: Arc>, + ptr: Option>, +} + +unsafe impl Send for Line {} +unsafe impl Sync for Line {} + +impl LinearAllocator { + /// Create new `LinearAllocator` + /// for `memory_type` with `memory_properties` specified, + /// with `LinearConfig` provided. + pub fn new( + memory_type: hal::MemoryTypeId, + memory_properties: hal::memory::Properties, + config: &LinearConfig, + non_coherent_atom_size: Size, + ) -> Self { + log::trace!( + "Create new 'linear' allocator: type: '{:?}', properties: '{:#?}' config: '{:#?}'", + memory_type, + memory_properties, + config + ); + let (linear_size, non_coherent_atom_size) = + if crate::is_non_coherent_visible(memory_properties) { + let atom = AtomSize::new(non_coherent_atom_size); + (crate::align_size(config.linear_size, atom.unwrap()), atom) + } else { + (config.linear_size, None) + }; + + LinearAllocator { + memory_type, + memory_properties, + linear_size, + offset: 0, + lines: VecDeque::new(), + non_coherent_atom_size, + } + } + + /// Maximum allocation size. + pub fn max_allocation(&self) -> Size { + self.linear_size / 2 + } + + fn cleanup(&mut self, device: &B::Device, off: usize) -> Size { + let mut freed = 0; + while self.lines.len() > off { + if self.lines[0].used > self.lines[0].free { + break; + } + + let line = self.lines.pop_front().unwrap(); + self.offset += 1; + + match Arc::try_unwrap(line.memory) { + Ok(mem) => unsafe { + log::trace!("Freed 'Line' of size of {}", mem.size()); + if mem.is_mappable() { + device.unmap_memory(mem.raw()); + } + freed += mem.size(); + device.free_memory(mem.into_raw()); + }, + Err(_) => { + log::error!("Allocated `Line` was freed, but memory is still shared and never will be destroyed."); + } + } + } + freed + } + + /// Perform full cleanup of the memory allocated. + pub fn clear(&mut self, device: &B::Device) { + let _ = self.cleanup(device, 0); + if !self.lines.is_empty() { + log::error!( + "Lines are not empty during allocator disposal. Lines: {:#?}", + self.lines + ); + } + } +} + +impl Allocator for LinearAllocator { + type Block = LinearBlock; + + const KIND: Kind = Kind::Linear; + + fn alloc( + &mut self, + device: &B::Device, + size: Size, + align: Size, + ) -> Result<(LinearBlock, Size), hal::device::AllocationError> { + let (size, align) = match self.non_coherent_atom_size { + Some(atom) => ( + crate::align_size(size, atom), + crate::align_size(align, atom), + ), + None => (size, align), + }; + + if size > self.linear_size || align > self.linear_size { + //TODO: better error here? + return Err(hal::device::AllocationError::TooManyObjects); + } + + let count = self.lines.len() as Size; + if let Some(line) = self.lines.back_mut() { + let aligned_offset = + crate::align_offset(line.used, unsafe { AtomSize::new_unchecked(align) }); + if aligned_offset + size <= self.linear_size { + line.free += aligned_offset - line.used; + line.used = aligned_offset + size; + + let block = LinearBlock { + linear_index: self.offset + count - 1, + memory: Arc::clone(&line.memory), + ptr: line.ptr.map(|ptr| unsafe { + NonNull::new_unchecked(ptr.as_ptr().offset(aligned_offset as isize)) + }), + range: aligned_offset..aligned_offset + size, + }; + + return Ok((block, 0)); + } + } + + log::trace!("Allocated 'Line' of size of {}", self.linear_size); + let (memory, ptr) = unsafe { + super::allocate_memory_helper( + device, + self.memory_type, + self.linear_size, + self.memory_properties, + self.non_coherent_atom_size, + )? + }; + + let line = Line { + used: size, + free: 0, + ptr, + memory: Arc::new(memory), + }; + + let block = LinearBlock { + linear_index: self.offset + count, + memory: Arc::clone(&line.memory), + ptr, + range: 0..size, + }; + + self.lines.push_back(line); + Ok((block, self.linear_size)) + } + + fn free(&mut self, device: &B::Device, block: Self::Block) -> Size { + let index = (block.linear_index - self.offset) as usize; + self.lines[index].free += block.size(); + drop(block); + self.cleanup(device, 1) + } +} + +impl Drop for LinearAllocator { + fn drop(&mut self) { + if !self.lines.is_empty() { + log::error!("Not all allocation from LinearAllocator was freed"); + } + } +} diff --git a/third_party/rust/gfx-memory/src/allocator/mod.rs b/third_party/rust/gfx-memory/src/allocator/mod.rs new file mode 100644 index 000000000000..f8de742294d5 --- /dev/null +++ b/third_party/rust/gfx-memory/src/allocator/mod.rs @@ -0,0 +1,79 @@ +//! This module provides `Allocator` trait and few allocators that implements the trait. + +mod dedicated; +mod general; +mod linear; + +pub use self::{ + dedicated::{DedicatedAllocator, DedicatedBlock}, + general::{GeneralAllocator, GeneralBlock, GeneralConfig}, + linear::{LinearAllocator, LinearBlock, LinearConfig}, +}; +use crate::{block::Block, memory::Memory, AtomSize, Size}; +use std::ptr::NonNull; + +/// Allocator kind. +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub enum Kind { + /// Memory object per allocation. + Dedicated, + + /// General purpose allocator. + General, + + /// Allocates linearly. + /// Fast and low overhead. + /// Suitable for one-time-use allocations. + Linear, +} + +/// Allocator trait implemented for various allocators. +pub trait Allocator { + /// Block type returned by allocator. + type Block: Block; + + /// Allocator kind. + const KIND: Kind; + + /// Allocate block of memory. + /// On success returns allocated block and amount of memory consumed from device. + fn alloc( + &mut self, + device: &B::Device, + size: Size, + align: Size, + ) -> Result<(Self::Block, Size), hal::device::AllocationError>; + + /// Free block of memory. + /// Returns amount of memory returned to the device. + fn free(&mut self, device: &B::Device, block: Self::Block) -> Size; +} + +unsafe fn allocate_memory_helper( + device: &B::Device, + memory_type: hal::MemoryTypeId, + size: Size, + memory_properties: hal::memory::Properties, + non_coherent_atom_size: Option, +) -> Result<(Memory, Option>), hal::device::AllocationError> { + use hal::device::Device as _; + + let raw = device.allocate_memory(memory_type, size)?; + + let ptr = if memory_properties.contains(hal::memory::Properties::CPU_VISIBLE) { + match device.map_memory(&raw, hal::memory::Segment::ALL) { + Ok(ptr) => NonNull::new(ptr), + Err(hal::device::MapError::OutOfMemory(error)) => { + device.free_memory(raw); + return Err(error.into()); + } + Err(e) => panic!("Unexpected mapping failure: {:?}", e), + } + } else { + None + }; + + let memory = Memory::from_raw(raw, size, memory_properties, non_coherent_atom_size); + + Ok((memory, ptr)) +} diff --git a/third_party/rust/gfx-memory/src/block.rs b/third_party/rust/gfx-memory/src/block.rs new file mode 100644 index 000000000000..2a846e6c9f1b --- /dev/null +++ b/third_party/rust/gfx-memory/src/block.rs @@ -0,0 +1,25 @@ +use crate::mapping::MappedRange; +use hal::memory as m; + +/// Block that owns a `Segment` of the `Memory`. +/// Implementor must ensure that there can't be any other blocks +/// with overlapping range (either through type system or safety notes for unsafe functions). +/// Provides access to safe memory range mapping. +pub trait Block { + /// Get memory properties of the block. + fn properties(&self) -> m::Properties; + + /// Get raw memory object. + fn memory(&self) -> &B::Memory; + + /// Get memory segment owned by this block. + fn segment(&self) -> m::Segment; + + /// Get mapping for the block segment. + /// Memory writes to the region performed by device become available for the host. + fn map<'a>( + &'a mut self, + device: &B::Device, + segment: m::Segment, + ) -> Result, hal::device::MapError>; +} diff --git a/third_party/rust/rendy-memory/src/heaps/heap.rs b/third_party/rust/gfx-memory/src/heaps/heap.rs similarity index 70% rename from third_party/rust/rendy-memory/src/heaps/heap.rs rename to third_party/rust/gfx-memory/src/heaps/heap.rs index 6595cbc83a56..11355d4384ea 100644 --- a/third_party/rust/rendy-memory/src/heaps/heap.rs +++ b/third_party/rust/gfx-memory/src/heaps/heap.rs @@ -1,14 +1,17 @@ -use crate::utilization::*; +use crate::{ + stats::{MemoryHeapUtilization, MemoryUtilization}, + Size, +}; #[derive(Debug)] pub(super) struct MemoryHeap { - size: u64, - used: u64, - effective: u64, + size: Size, + used: Size, + effective: Size, } impl MemoryHeap { - pub(super) fn new(size: u64) -> Self { + pub(super) fn new(size: Size) -> Self { MemoryHeap { size, used: 0, @@ -16,7 +19,7 @@ impl MemoryHeap { } } - pub(super) fn available(&self) -> u64 { + pub(super) fn available(&self) -> Size { if self.used > self.size { log::warn!("Heap size exceeded"); 0 @@ -25,13 +28,13 @@ impl MemoryHeap { } } - pub(super) fn allocated(&mut self, used: u64, effective: u64) { + pub(super) fn allocated(&mut self, used: Size, effective: Size) { self.used += used; self.effective += effective; debug_assert!(self.used >= self.effective); } - pub(super) fn freed(&mut self, used: u64, effective: u64) { + pub(super) fn freed(&mut self, used: Size, effective: Size) { self.used -= used; self.effective -= effective; debug_assert!(self.used >= self.effective); diff --git a/third_party/rust/gfx-memory/src/heaps/memory_type.rs b/third_party/rust/gfx-memory/src/heaps/memory_type.rs new file mode 100644 index 000000000000..fea0e3200acc --- /dev/null +++ b/third_party/rust/gfx-memory/src/heaps/memory_type.rs @@ -0,0 +1,131 @@ +use crate::{ + allocator::*, + stats::MemoryTypeUtilization, MemoryUtilization, + Size, +}; +use hal::memory::Properties; + + +#[derive(Debug)] +pub(super) enum BlockFlavor { + Dedicated(DedicatedBlock), + General(GeneralBlock), + Linear(LinearBlock), +} + +impl BlockFlavor { + pub(super) fn size(&self) -> Size { + match self { + BlockFlavor::Dedicated(block) => block.size(), + BlockFlavor::General(block) => block.size(), + BlockFlavor::Linear(block) => block.size(), + } + } +} + +#[derive(Debug)] +pub(super) struct MemoryType { + heap_index: usize, + properties: Properties, + dedicated: DedicatedAllocator, + general: GeneralAllocator, + linear: LinearAllocator, + used: Size, + effective: Size, +} + +impl MemoryType { + pub(super) fn new( + type_id: hal::MemoryTypeId, + hal_memory_type: &hal::adapter::MemoryType, + general_config: &GeneralConfig, + linear_config: &LinearConfig, + non_coherent_atom_size: Size, + ) -> Self { + MemoryType { + heap_index: hal_memory_type.heap_index, + properties: hal_memory_type.properties, + dedicated: DedicatedAllocator::new( + type_id, + hal_memory_type.properties, + non_coherent_atom_size, + ), + general: GeneralAllocator::new( + type_id, + hal_memory_type.properties, + general_config, + non_coherent_atom_size, + ), + linear: LinearAllocator::new( + type_id, + hal_memory_type.properties, + linear_config, + non_coherent_atom_size, + ), + used: 0, + effective: 0, + } + } + + pub(super) fn properties(&self) -> Properties { + self.properties + } + + pub(super) fn heap_index(&self) -> usize { + self.heap_index + } + + pub(super) fn alloc( + &mut self, + device: &B::Device, + kind: Kind, + size: Size, + align: Size, + ) -> Result<(BlockFlavor, Size), hal::device::AllocationError> { + let (block, allocated) = match kind { + Kind::Dedicated => { + self.dedicated + .alloc(device, size, align) + .map(|(block, size)| (BlockFlavor::Dedicated(block), size)) + } + Kind::General => { + self.general + .alloc(device, size, align) + .map(|(block, size)| (BlockFlavor::General(block), size)) + } + Kind::Linear => { + self.linear + .alloc(device, size, align) + .map(|(block, size)| (BlockFlavor::Linear(block), size)) + } + }?; + self.effective += block.size(); + self.used += allocated; + Ok((block, allocated)) + } + + pub(super) fn free(&mut self, device: &B::Device, block: BlockFlavor) -> Size { + match block { + BlockFlavor::Dedicated(block) => self.dedicated.free(device, block), + BlockFlavor::General(block) => self.general.free(device, block), + BlockFlavor::Linear(block) => self.linear.free(device, block), + } + } + + pub(super) fn clear(&mut self, device: &B::Device) { + log::trace!("Dispose memory allocators"); + self.general.clear(device); + self.linear.clear(device); + } + + pub(super) fn utilization(&self) -> MemoryTypeUtilization { + MemoryTypeUtilization { + utilization: MemoryUtilization { + used: self.used, + effective: self.effective, + }, + properties: self.properties, + heap_index: self.heap_index, + } + } +} diff --git a/third_party/rust/gfx-memory/src/heaps/mod.rs b/third_party/rust/gfx-memory/src/heaps/mod.rs new file mode 100644 index 000000000000..4e0f19f024c2 --- /dev/null +++ b/third_party/rust/gfx-memory/src/heaps/mod.rs @@ -0,0 +1,277 @@ +mod heap; +mod memory_type; + +use self::{ + heap::MemoryHeap, + memory_type::{BlockFlavor, MemoryType}, +}; +use crate::{ + allocator::*, block::Block, mapping::MappedRange, stats::TotalMemoryUtilization, + usage::MemoryUsage, Size, +}; + +/// Possible errors returned by `Heaps`. +#[derive(Clone, Debug, PartialEq)] +pub enum HeapsError { + /// Memory allocation failure. + AllocationError(hal::device::AllocationError), + /// No memory types among required for resource with requested properties was found. + NoSuitableMemory(u32, hal::memory::Properties), +} + +impl std::fmt::Display for HeapsError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + HeapsError::AllocationError(e) => write!(f, "{:?}", e), + HeapsError::NoSuitableMemory(e, e2) => write!( + f, + "Memory type among ({}) with properties ({:?}) not found", + e, e2 + ), + } + } +} +impl std::error::Error for HeapsError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match *self { + HeapsError::AllocationError(ref err) => Some(err), + HeapsError::NoSuitableMemory(..) => None, + } + } +} + +impl From for HeapsError { + fn from(error: hal::device::AllocationError) -> Self { + HeapsError::AllocationError(error) + } +} + +impl From for HeapsError { + fn from(error: hal::device::OutOfMemory) -> Self { + HeapsError::AllocationError(error.into()) + } +} + +/// Heaps available on particular physical device. +#[derive(Debug)] +pub struct Heaps { + types: Vec>, + heaps: Vec, +} + +impl Heaps { + /// Initialize the new `Heaps` object. + pub unsafe fn new( + hal_memory_properties: &hal::adapter::MemoryProperties, + config_general: GeneralConfig, + config_linear: LinearConfig, + non_coherent_atom_size: Size, + ) -> Self { + Heaps { + types: hal_memory_properties.memory_types + .iter() + .enumerate() + .map(|(index, mt)| { + assert!(mt.heap_index < hal_memory_properties.memory_heaps.len()); + MemoryType::new( + hal::MemoryTypeId(index), + mt, + &config_general, + &config_linear, + non_coherent_atom_size, + ) + }) + .collect(), + heaps: hal_memory_properties.memory_heaps + .iter() + .map(|&size| MemoryHeap::new(size)) + .collect(), + } + } + + /// Allocate memory block + /// from one of memory types specified by `mask`, + /// for intended `usage`, + /// with `size` + /// and `align` requirements. + pub fn allocate( + &mut self, + device: &B::Device, + mask: u32, + usage: MemoryUsage, + kind: Kind, + size: Size, + align: Size, + ) -> Result, HeapsError> { + let (memory_index, _, _) = { + let suitable_types = self + .types + .iter() + .enumerate() + .filter(|(index, _)| (mask & (1u32 << index)) != 0) + .filter_map(|(index, mt)| { + if mt.properties().contains(usage.properties_required()) { + let fitness = usage.memory_fitness(mt.properties()); + Some((index, mt, fitness)) + } else { + None + } + }); + + if suitable_types.clone().next().is_none() { + return Err(HeapsError::NoSuitableMemory( + mask, + usage.properties_required(), + )); + } + + suitable_types + .filter(|(_, mt, _)| self.heaps[mt.heap_index()].available() > size + align) + .max_by_key(|&(_, _, fitness)| fitness) + .ok_or_else(|| { + log::error!("All suitable heaps are exhausted. {:#?}", self); + hal::device::OutOfMemory::Device + })? + }; + + self.allocate_from(device, memory_index as u32, kind, size, align) + } + + /// Allocate memory block + /// from `memory_index` specified, + /// for intended `usage`, + /// with `size` + /// and `align` requirements. + fn allocate_from( + &mut self, + device: &B::Device, + memory_index: u32, + kind: Kind, + size: Size, + align: Size, + ) -> Result, HeapsError> { + log::trace!( + "Allocate memory block: type '{}', kind '{:?}', size: '{}', align: '{}'", + memory_index, + kind, + size, + align + ); + + let ref mut memory_type = self.types[memory_index as usize]; + let ref mut memory_heap = self.heaps[memory_type.heap_index()]; + + if memory_heap.available() < size { + return Err(hal::device::OutOfMemory::Device.into()); + } + + let (flavor, allocated) = match memory_type.alloc(device, kind, size, align) { + Ok(mapping) => mapping, + Err(e) if kind == Kind::Linear => { + log::warn!("Unable to allocate {:?} with {:?}: {:?}", size, kind, e); + memory_type.alloc(device, Kind::Dedicated, size, align)? + } + Err(e) => return Err(e.into()), + }; + memory_heap.allocated(allocated, flavor.size()); + + Ok(MemoryBlock { + flavor, + memory_index, + }) + } + + /// Free memory block. + /// + /// Memory block must be allocated from this heap. + pub fn free(&mut self, device: &B::Device, block: MemoryBlock) { + let memory_index = block.memory_index; + let size = block.flavor.size(); + log::trace!( + "Free memory block: type '{}', size: '{}'", + memory_index, + size, + ); + + let ref mut memory_type = self.types[memory_index as usize]; + let ref mut memory_heap = self.heaps[memory_type.heap_index()]; + let freed = memory_type.free(device, block.flavor); + memory_heap.freed(freed, size); + } + + /// Clear allocators before dropping. + /// Will panic if memory instances are left allocated. + pub fn clear(&mut self, device: &B::Device) { + for mut mt in self.types.drain(..) { + mt.clear(device) + } + } + + /// Get memory utilization. + pub fn utilization(&self) -> TotalMemoryUtilization { + TotalMemoryUtilization { + heaps: self.heaps.iter().map(MemoryHeap::utilization).collect(), + types: self.types.iter().map(MemoryType::utilization).collect(), + } + } +} + +impl Drop for Heaps { + fn drop(&mut self) { + if !self.types.is_empty() { + log::error!("Heaps still have {:?} types live on drop", self.types.len()); + } + } +} + +/// Memory block allocated from `Heaps`. +#[derive(Debug)] +pub struct MemoryBlock { + flavor: BlockFlavor, + memory_index: u32, +} + +impl MemoryBlock { + /// Get memory type id. + pub fn memory_type(&self) -> u32 { + self.memory_index + } +} + +impl Block for MemoryBlock { + fn properties(&self) -> hal::memory::Properties { + match self.flavor { + BlockFlavor::Dedicated(ref block) => block.properties(), + BlockFlavor::General(ref block) => block.properties(), + BlockFlavor::Linear(ref block) => block.properties(), + } + } + + fn memory(&self) -> &B::Memory { + match self.flavor { + BlockFlavor::Dedicated(ref block) => block.memory(), + BlockFlavor::General(ref block) => block.memory(), + BlockFlavor::Linear(ref block) => block.memory(), + } + } + + fn segment(&self) -> hal::memory::Segment { + match self.flavor { + BlockFlavor::Dedicated(ref block) => block.segment(), + BlockFlavor::General(ref block) => block.segment(), + BlockFlavor::Linear(ref block) => block.segment(), + } + } + + fn map<'a>( + &'a mut self, + device: &B::Device, + segment: hal::memory::Segment, + ) -> Result, hal::device::MapError> { + match self.flavor { + BlockFlavor::Dedicated(ref mut block) => block.map(device, segment), + BlockFlavor::General(ref mut block) => block.map(device, segment), + BlockFlavor::Linear(ref mut block) => block.map(device, segment), + } + } +} diff --git a/third_party/rust/gfx-memory/src/lib.rs b/third_party/rust/gfx-memory/src/lib.rs new file mode 100644 index 000000000000..2904ce8cd35a --- /dev/null +++ b/third_party/rust/gfx-memory/src/lib.rs @@ -0,0 +1,75 @@ +//! GPU memory management +//! + +#![warn( + missing_docs, + trivial_casts, + trivial_numeric_casts, + unused_extern_crates, + unused_import_braces, + unused_qualifications +)] +mod allocator; +mod block; +mod heaps; +mod mapping; +mod memory; +mod stats; +mod usage; + +pub use crate::{ + allocator::*, + block::Block, + heaps::{Heaps, HeapsError, MemoryBlock}, + mapping::{MappedRange, Writer}, + memory::Memory, + stats::*, + usage::MemoryUsage, +}; + +use std::ops::Range; + +/// Type for any memory sizes. +pub type Size = u64; +/// Type for non-coherent atom sizes. +pub type AtomSize = std::num::NonZeroU64; + +fn is_non_coherent_visible(properties: hal::memory::Properties) -> bool { + properties.contains(hal::memory::Properties::CPU_VISIBLE) + && !properties.contains(hal::memory::Properties::COHERENT) +} + +fn align_range(range: &Range, align: AtomSize) -> Range { + let start = range.start - range.start % align.get(); + let end = ((range.end - 1) / align.get() + 1) * align.get(); + start..end +} + +fn align_size(size: Size, align: AtomSize) -> Size { + ((size - 1) / align.get() + 1) * align.get() +} + +fn align_offset(value: Size, align: AtomSize) -> Size { + debug_assert_eq!(align.get().count_ones(), 1); + if value == 0 { + 0 + } else { + 1 + ((value - 1) | (align.get() - 1)) + } +} + +fn segment_to_sub_range( + segment: hal::memory::Segment, + whole: &Range, +) -> Result, hal::device::MapError> { + let start = whole.start + segment.offset; + match segment.size { + Some(s) if start + s <= whole.end => Ok(start..start + s), + None if start < whole.end => Ok(start..whole.end), + _ => Err(hal::device::MapError::OutOfBounds), + } +} + +fn is_sub_range(sub: &Range, range: &Range) -> bool { + sub.start >= range.start && sub.end <= range.end +} diff --git a/third_party/rust/gfx-memory/src/mapping.rs b/third_party/rust/gfx-memory/src/mapping.rs new file mode 100644 index 000000000000..3648fdf95783 --- /dev/null +++ b/third_party/rust/gfx-memory/src/mapping.rs @@ -0,0 +1,203 @@ +use { + crate::{memory::Memory, Size}, + hal::{device::Device as _, Backend}, + std::{iter, ops::Range, ptr::NonNull, slice}, +}; + +#[derive(Debug)] +struct Flush<'a, B: Backend> { + device: &'a B::Device, + memory: &'a B::Memory, + segment: hal::memory::Segment, +} + +/// Wrapper structure for a mutable slice with deferred +/// flushing for non-coherent memory. +#[derive(Debug)] +pub struct Writer<'a, 'b, T, B: Backend> { + /// Wrapped slice. + pub slice: &'a mut [T], + flush: Option>, +} + +impl Writer<'_, '_, T, B> { + /// Dispose of the wrapper and return a bare mapping pointer. + /// + /// The segment to flush is returned. The user is responsible + /// to flush this segment manually. + pub fn forget(mut self) -> (*mut T, Option) { + (self.slice.as_mut_ptr(), self.flush.take().map(|f| f.segment)) + } +} + +impl<'a, 'b, T, B: Backend> Drop for Writer<'a, 'b, T, B> { + fn drop(&mut self) { + if let Some(f) = self.flush.take() { + unsafe { + f.device + .flush_mapped_memory_ranges(iter::once((f.memory, f.segment))) + .expect("Should flush successfully") + }; + } + } +} + +/// Represents range of the memory mapped to the host. +/// Provides methods for safer host access to the memory. +#[derive(Debug)] +pub struct MappedRange<'a, B: Backend> { + /// Memory object that is mapped. + memory: &'a Memory, + + /// Pointer to range mapped memory. + ptr: NonNull, + + /// Range of mapped memory. + mapping_range: Range, + + /// Mapping range requested by caller. + /// Must be subrange of `mapping_range`. + requested_range: Range, +} + +impl<'a, B: Backend> MappedRange<'a, B> { + /// Construct mapped range from raw mapping + /// + /// # Safety + /// + /// `memory` `range` must be mapped to host memory region pointer by `ptr`. + /// `range` is in memory object space. + /// `ptr` points to the `range.start` offset from memory origin. + pub(crate) unsafe fn from_raw( + memory: &'a Memory, + ptr: *mut u8, + mapping_range: Range, + requested_range: Range, + ) -> Self { + debug_assert!( + mapping_range.start < mapping_range.end, + "Memory mapping region must have valid size" + ); + + debug_assert!( + requested_range.start < requested_range.end, + "Memory mapping region must have valid size" + ); + + match memory.non_coherent_atom_size { + Some(atom) => { + debug_assert_eq!((mapping_range.start % atom.get(), mapping_range.end % atom.get()), (0, 0), + "Bounds of non-coherent memory mapping ranges must be multiple of `Limits::non_coherent_atom_size`", + ); + debug_assert!( + crate::is_sub_range(&requested_range, &mapping_range), + "Requested {:?} must be sub-range of mapping {:?}", + requested_range, + mapping_range, + ); + } + None => { + debug_assert_eq!(mapping_range, requested_range); + } + }; + + MappedRange { + ptr: NonNull::new_unchecked(ptr), + mapping_range, + requested_range, + memory, + } + } + + /// Get pointer to beginning of memory region. + /// i.e. to `range().start` offset from memory origin. + pub fn ptr(&self) -> NonNull { + let offset = (self.requested_range.start - self.mapping_range.start) as isize; + unsafe { NonNull::new_unchecked(self.ptr.as_ptr().offset(offset)) } + } + + /// Get mapped range. + pub fn range(&self) -> Range { + self.requested_range.clone() + } + + /// Return true if the mapped memory is coherent. + pub fn is_coherent(&self) -> bool { + self.memory.non_coherent_atom_size.is_none() + } + + /// Fetch readable slice of sub-range to be read. + /// Invalidating range if memory is not coherent. + /// + /// # Safety + /// + /// * Caller must ensure that device won't write to the memory region until the borrowing ends. + /// * `T` Must be plain-old-data type compatible with data in mapped region. + pub unsafe fn read<'b, T>( + &'b mut self, + device: &B::Device, + segment: hal::memory::Segment, + ) -> Result<&'b [T], hal::device::MapError> + where + 'a: 'b, + T: Copy, + { + let sub_range = crate::segment_to_sub_range(segment, &self.requested_range)?; + + if let Some(atom) = self.memory.non_coherent_atom_size { + let aligned_range = crate::align_range(&sub_range, atom); + let segment = hal::memory::Segment { + offset: aligned_range.start, + size: Some(aligned_range.end - aligned_range.start), + }; + device.invalidate_mapped_memory_ranges(iter::once((self.memory.raw(), segment)))?; + } + + let ptr = self + .ptr + .as_ptr() + .offset((sub_range.start - self.mapping_range.start) as isize); + let size = (sub_range.end - sub_range.start) as usize; + + let (_pre, slice, _post) = slice::from_raw_parts(ptr, size).align_to(); + Ok(slice) + } + + /// Fetch writer to the sub-region. + /// This writer will flush data on drop if written at least once. + /// + /// # Safety + /// + /// * Caller must ensure that device won't write to or read from the memory region. + pub unsafe fn write<'b, T: 'b>( + &'b mut self, + device: &'b B::Device, + segment: hal::memory::Segment, + ) -> Result, hal::device::MapError> + where + 'a: 'b, + T: Copy, + { + let sub_range = crate::segment_to_sub_range(segment, &self.requested_range)?; + let ptr = self + .ptr + .as_ptr() + .offset((sub_range.start - self.mapping_range.start) as isize); + let size = (sub_range.end - sub_range.start) as usize; + + let (_pre, slice, _post) = slice::from_raw_parts_mut(ptr, size).align_to_mut(); + let memory = self.memory.raw(); + let flush = self.memory.non_coherent_atom_size.map(|atom| Flush { + device, + memory, + segment: { + let range = crate::align_range(&sub_range, atom); + hal::memory::Segment { + offset: range.start, + size: Some(range.end - range.start), + } + }, + }); + Ok(Writer { slice, flush }) + } +} diff --git a/third_party/rust/gfx-memory/src/memory.rs b/third_party/rust/gfx-memory/src/memory.rs new file mode 100644 index 000000000000..fe47985701dc --- /dev/null +++ b/third_party/rust/gfx-memory/src/memory.rs @@ -0,0 +1,63 @@ +use crate::{AtomSize, Size}; + +/// Memory object wrapper. +/// Contains size and properties of the memory. +#[derive(Debug)] +pub struct Memory { + raw: B::Memory, + size: Size, + properties: hal::memory::Properties, + pub(crate) non_coherent_atom_size: Option, +} + +impl Memory { + /// Get memory properties. + pub fn properties(&self) -> hal::memory::Properties { + self.properties + } + + /// Get memory size. + pub fn size(&self) -> Size { + self.size + } + + /// Get raw memory. + pub fn raw(&self) -> &B::Memory { + &self.raw + } + + /// Unwrap raw memory. + pub fn into_raw(self) -> B::Memory { + self.raw + } + + /// Create memory from raw object. + /// + /// # Safety + /// + /// TODO: + pub unsafe fn from_raw( + raw: B::Memory, + size: Size, + properties: hal::memory::Properties, + non_coherent_atom_size: Option, + ) -> Self { + debug_assert_eq!( + non_coherent_atom_size.is_some(), + crate::is_non_coherent_visible(properties), + ); + Memory { + properties, + raw, + size, + non_coherent_atom_size, + } + } + + /// Check if this memory is host-visible and can be mapped. + /// `Equivalent to `memory.properties().contains(Properties::CPU_VISIBLE)`. + pub fn is_mappable(&self) -> bool { + self.properties + .contains(hal::memory::Properties::CPU_VISIBLE) + } +} diff --git a/third_party/rust/rendy-memory/src/utilization.rs b/third_party/rust/gfx-memory/src/stats.rs similarity index 89% rename from third_party/rust/rendy-memory/src/utilization.rs rename to third_party/rust/gfx-memory/src/stats.rs index 8fe5327aa6b4..6b1ca3fbcc66 100644 --- a/third_party/rust/rendy-memory/src/utilization.rs +++ b/third_party/rust/gfx-memory/src/stats.rs @@ -1,137 +1,140 @@ -use { - colorful::{core::color_string::CString, Color, Colorful as _}, - gfx_hal::memory::Properties, -}; - -/// Memory utilization stats. -#[derive(Clone, Copy, Debug)] -pub struct MemoryUtilization { - /// Total number of bytes allocated. - pub used: u64, - /// Effective number bytes allocated. - pub effective: u64, -} - -/// Memory utilization of one heap. -#[derive(Clone, Copy, Debug)] -pub struct MemoryHeapUtilization { - /// Utilization. - pub utilization: MemoryUtilization, - - /// Memory heap size. - pub size: u64, -} - -/// Memory utilization of one type. -#[derive(Clone, Copy, Debug)] -pub struct MemoryTypeUtilization { - /// Utilization. - pub utilization: MemoryUtilization, - - /// Memory type info. - pub properties: Properties, - - /// Index of heap this memory type uses. - pub heap_index: usize, -} - -/// Total memory utilization. -#[derive(Clone, Debug)] -pub struct TotalMemoryUtilization { - /// Utilization by types. - pub types: Vec, - - /// Utilization by heaps. - pub heaps: Vec, -} - -impl std::fmt::Display for TotalMemoryUtilization { - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - const MB: u64 = 1024 * 1024; - - writeln!(fmt, "!!! Memory utilization !!!")?; - for (index, heap) in self.heaps.iter().enumerate() { - let size = heap.size; - let MemoryUtilization { used, effective } = heap.utilization; - let usage_basis_points = used * 10000 / size; - let fill = if usage_basis_points > 10000 { - // Shouldn't happen, but just in case. - 50 - } else { - (usage_basis_points / 200) as usize - }; - let effective_basis_points = if used > 0 { - effective * 10000 / used - } else { - 10000 - }; - - let line = ("|".repeat(fill) + &(" ".repeat(50 - fill))) - .gradient_with_color(Color::Green, Color::Red); - writeln!( - fmt, - "Heap {}:\n{:6} / {:<6} or{} {{ effective:{} }} [{}]", - format!("{}", index).magenta(), - format!("{}MB", used / MB), - format!("{}MB", size / MB), - format_basis_points(usage_basis_points), - format_basis_points_inverted(effective_basis_points), - line - )?; - - for ty in self.types.iter().filter(|ty| ty.heap_index == index) { - let properties = ty.properties; - let MemoryUtilization { used, effective } = ty.utilization; - let usage_basis_points = used * 10000 / size; - let effective_basis_points = if used > 0 { - effective * 10000 / used - } else { - 0 - }; - - writeln!( - fmt, - " {:>6} or{} {{ effective:{} }} | {:?}", - format!("{}MB", used / MB), - format_basis_points(usage_basis_points), - format_basis_points_inverted(effective_basis_points), - properties, - )?; - } - } - - Ok(()) - } -} - -fn format_basis_points(basis_points: u64) -> CString { - debug_assert!(basis_points <= 10000); - let s = format!("{:>3}.{:02}%", basis_points / 100, basis_points % 100); - if basis_points > 7500 { - s.red() - } else if basis_points > 5000 { - s.yellow() - } else if basis_points > 2500 { - s.green() - } else if basis_points > 100 { - s.blue() - } else { - s.white() - } -} - -fn format_basis_points_inverted(basis_points: u64) -> CString { - debug_assert!(basis_points <= 10000); - let s = format!("{:>3}.{:02}%", basis_points / 100, basis_points % 100); - if basis_points > 9900 { - s.white() - } else if basis_points > 7500 { - s.blue() - } else if basis_points > 5000 { - s.green() - } else if basis_points > 2500 { - s.yellow() - } else { - s.red() - } -} +use crate::Size; +#[cfg(feature = "colorful")] +use colorful::{core::color_string::CString, Color, Colorful as _}; +use hal::memory::Properties; + +/// Memory utilization stats. +#[derive(Clone, Copy, Debug)] +pub struct MemoryUtilization { + /// Total number of bytes allocated. + pub used: Size, + /// Effective number bytes allocated. + pub effective: Size, +} + +/// Memory utilization of one heap. +#[derive(Clone, Copy, Debug)] +pub struct MemoryHeapUtilization { + /// Utilization. + pub utilization: MemoryUtilization, + + /// Memory heap size. + pub size: Size, +} + +/// Memory utilization of one type. +#[derive(Clone, Copy, Debug)] +pub struct MemoryTypeUtilization { + /// Utilization. + pub utilization: MemoryUtilization, + + /// Memory type info. + pub properties: Properties, + + /// Index of heap this memory type uses. + pub heap_index: usize, +} + +/// Total memory utilization. +#[derive(Clone, Debug)] +pub struct TotalMemoryUtilization { + /// Utilization by types. + pub types: Vec, + + /// Utilization by heaps. + pub heaps: Vec, +} + +#[cfg(feature = "colorful")] +impl std::fmt::Display for TotalMemoryUtilization { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + const MB: Size = 1024 * 1024; + + writeln!(fmt, "!!! Memory utilization !!!")?; + for (index, heap) in self.heaps.iter().enumerate() { + let size = heap.size; + let MemoryUtilization { used, effective } = heap.utilization; + let usage_basis_points = used * 10000 / size; + let fill = if usage_basis_points > 10000 { + // Shouldn't happen, but just in case. + 50 + } else { + (usage_basis_points / 200) as usize + }; + let effective_basis_points = if used > 0 { + effective * 10000 / used + } else { + 10000 + }; + + let line = ("|".repeat(fill) + &(" ".repeat(50 - fill))) + .gradient_with_color(Color::Green, Color::Red); + writeln!( + fmt, + "Heap {}:\n{:6} / {:<6} or{} {{ effective:{} }} [{}]", + format!("{}", index).magenta(), + format!("{}MB", used / MB), + format!("{}MB", size / MB), + format_basis_points(usage_basis_points), + format_basis_points_inverted(effective_basis_points), + line + )?; + + for ty in self.types.iter().filter(|ty| ty.heap_index == index) { + let properties = ty.properties; + let MemoryUtilization { used, effective } = ty.utilization; + let usage_basis_points = used * 10000 / size; + let effective_basis_points = if used > 0 { + effective * 10000 / used + } else { + 0 + }; + + writeln!( + fmt, + " {:>6} or{} {{ effective:{} }} | {:?}", + format!("{}MB", used / MB), + format_basis_points(usage_basis_points), + format_basis_points_inverted(effective_basis_points), + properties, + )?; + } + } + + Ok(()) + } +} + +#[cfg(feature = "colorful")] +fn format_basis_points(basis_points: Size) -> CString { + debug_assert!(basis_points <= 10000); + let s = format!("{:>3}.{:02}%", basis_points / 100, basis_points % 100); + if basis_points > 7500 { + s.red() + } else if basis_points > 5000 { + s.yellow() + } else if basis_points > 2500 { + s.green() + } else if basis_points > 100 { + s.blue() + } else { + s.white() + } +} + +#[cfg(feature = "colorful")] +fn format_basis_points_inverted(basis_points: Size) -> CString { + debug_assert!(basis_points <= 10000); + let s = format!("{:>3}.{:02}%", basis_points / 100, basis_points % 100); + if basis_points > 9900 { + s.white() + } else if basis_points > 7500 { + s.blue() + } else if basis_points > 5000 { + s.green() + } else if basis_points > 2500 { + s.yellow() + } else { + s.red() + } +} diff --git a/third_party/rust/gfx-memory/src/usage.rs b/third_party/rust/gfx-memory/src/usage.rs new file mode 100644 index 000000000000..74b586f30102 --- /dev/null +++ b/third_party/rust/gfx-memory/src/usage.rs @@ -0,0 +1,64 @@ +//! Defines usage types for memory bocks. +//! See `Usage` and implementations for details. + +use hal::memory as m; + +/// Scenarios of how resources use memory. +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum MemoryUsage { + /// Full speed GPU access. + /// Optimal for render targets and persistent resources. + /// Avoid memory with host access. + Private, + /// CPU to GPU data flow with update commands. + /// Used for dynamic buffer data, typically constant buffers. + /// Host access is guaranteed. + /// Prefers memory with fast GPU access. + Dynamic { + /// Optimize for multiple disjoint small portions to be updated, + /// as opposed to big linear chunks of memory. + sparse_updates: bool, + }, + /// CPU to GPU data flow with mapping. + /// Used for staging data before copying to the `Data` memory. + /// Host access is guaranteed. + Staging { + /// Optimize for reading back from Gpu. + read_back: bool, + }, +} + +impl MemoryUsage { + /// Set of required memory properties for this usage. + pub fn properties_required(&self) -> m::Properties { + match *self { + MemoryUsage::Private => m::Properties::DEVICE_LOCAL, + MemoryUsage::Dynamic { .. } | MemoryUsage::Staging { .. } => m::Properties::CPU_VISIBLE, + } + } + + pub(crate) fn memory_fitness(&self, properties: m::Properties) -> u32 { + match *self { + MemoryUsage::Private => { + assert!(properties.contains(m::Properties::DEVICE_LOCAL)); + 0 | (!properties.contains(m::Properties::CPU_VISIBLE) as u32) << 3 + | (!properties.contains(m::Properties::LAZILY_ALLOCATED) as u32) << 2 + | (!properties.contains(m::Properties::CPU_CACHED) as u32) << 1 + | (!properties.contains(m::Properties::COHERENT) as u32) << 0 + } + MemoryUsage::Dynamic { sparse_updates } => { + assert!(properties.contains(m::Properties::CPU_VISIBLE)); + assert!(!properties.contains(m::Properties::LAZILY_ALLOCATED)); + 0 | (properties.contains(m::Properties::DEVICE_LOCAL) as u32) << 2 + | ((properties.contains(m::Properties::COHERENT) == sparse_updates) as u32) << 1 + | (!properties.contains(m::Properties::CPU_CACHED) as u32) << 0 + } + MemoryUsage::Staging { read_back } => { + assert!(properties.contains(m::Properties::CPU_VISIBLE)); + assert!(!properties.contains(m::Properties::LAZILY_ALLOCATED)); + 0 | ((properties.contains(m::Properties::CPU_CACHED) == read_back) as u32) << 1 + | (!properties.contains(m::Properties::DEVICE_LOCAL) as u32) << 0 + } + } + } +} diff --git a/third_party/rust/libloading/.cargo-checksum.json b/third_party/rust/libloading/.cargo-checksum.json index a9556779af13..6a7f51d63560 100644 --- a/third_party/rust/libloading/.cargo-checksum.json +++ b/third_party/rust/libloading/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"5bc50b6714c71bebc3b616d276e3c73c34ebd375ec99fcaba76b29ac5685e696","LICENSE":"b29f8b01452350c20dd1af16ef83b598fea3053578ccc1c7a0ef40e57be2620f","README.mkd":"b4cd83f110d01dc5aa8fcaf3da34bdbe1478efdba767d73abc14d4d87e4775fa","appveyor.yml":"8382c7f1769f6cf78029a221058c4d73f35a48308b5dfc38d875facabec1c139","build.rs":"dd60f6fc4cef3f02f56b74b6f03ed665ee8ce47e4040e2919282289affa1aca8","src/changelog.rs":"1ac991741280fdd9c5268d617ae7279d08146cfb3222b86e32315ff90b392598","src/lib.rs":"c17a20b5b9125b5a6d7b0913c35448ee688795f3e03f7bf1bf0aaf3e7e4e729e","src/os/mod.rs":"51d733e5522dacd6069642ad66aa6d7acf6c82950c934eb040e8dfd112e6d610","src/os/unix/global_static.c":"b1096dedf7d4aed5c28b658fc917f6603339ffd92390c84e25cb543bdc9460ac","src/os/unix/mod.rs":"717c09d09c20b14b8b8344899ffc3448ef23cf230ec61536a9fd53e8900d05ee","src/os/windows/mod.rs":"0b648941117a1573501ce1be5fcd11416361e1fe9c19a49f0826569a6cbc400a","src/test_helpers.rs":"3a55052e8cd5231e97d9282b43398c2f144c57ced2d2df64bde7f482f5c778e7","src/util.rs":"0b0155448a26db4b00b2a6ca129e0e1f6f75870c56c9777d262941818c7581b7","tests/functions.rs":"4633f3673db6a5b3623ea8927b13314c25502c9fbb63bb17a5a35650ea489012","tests/markers.rs":"8e9c1b883404d9190e4f23ed39b3d6cbbccb3a07883f733b04aed4357b9c6aca","tests/nagisa32.dll":"5c69b2bd9c8a6ad04165c221075fc9fade1dd66ca697399ace528a5a62328e36","tests/nagisa64.dll":"e20b95e3036f3289421abd100760874d4f455afd33c3b5b64fec56b191f7d477","tests/windows.rs":"7711dfe19062d91356cd127546542b1b6e13aeef76ad3098f32c8a6ae319b66a"},"package":"9c3ad660d7cb8c5822cd83d10897b0f1f1526792737a179e73896152f85b88c2"} \ No newline at end of file +{"files":{"Cargo.toml":"9110a58fe827a68e5df22f8d38e4beab38c259724942e868c5ae3debc2f0ebae","LICENSE":"b29f8b01452350c20dd1af16ef83b598fea3053578ccc1c7a0ef40e57be2620f","README.mkd":"b4cd83f110d01dc5aa8fcaf3da34bdbe1478efdba767d73abc14d4d87e4775fa","appveyor.yml":"8382c7f1769f6cf78029a221058c4d73f35a48308b5dfc38d875facabec1c139","build.rs":"d8f7fce1b459d117cd48d85ba3643124bd09657a0df9e0e90a1fd997decff741","src/changelog.rs":"e8a769578ebe2db81055b131ce12fa14c9ad0f21a79035748f244e5b347b2ada","src/lib.rs":"0cc0f6b42c98c14183dea2bc9deaf5aa574fabbe61081fe3339d74430f25fc12","src/os/mod.rs":"51d733e5522dacd6069642ad66aa6d7acf6c82950c934eb040e8dfd112e6d610","src/os/unix/global_static.c":"b1096dedf7d4aed5c28b658fc917f6603339ffd92390c84e25cb543bdc9460ac","src/os/unix/mod.rs":"9a84c15d0b9e5125a6ca086854a0e18884cb6c04cea54f47f1a44243e69335c2","src/os/windows/mod.rs":"c0ee0068a0564d64b7f3d3053d799492693c34571a935fc893a41a62a86fccdd","src/test_helpers.rs":"3a55052e8cd5231e97d9282b43398c2f144c57ced2d2df64bde7f482f5c778e7","src/util.rs":"5d1d3fcf7e5e9dc67df0dbf91332c5e3f5875e90c8f80ada5cfad0bc3c402d7e","tests/functions.rs":"4633f3673db6a5b3623ea8927b13314c25502c9fbb63bb17a5a35650ea489012","tests/markers.rs":"8e9c1b883404d9190e4f23ed39b3d6cbbccb3a07883f733b04aed4357b9c6aca","tests/nagisa32.dll":"5c69b2bd9c8a6ad04165c221075fc9fade1dd66ca697399ace528a5a62328e36","tests/nagisa64.dll":"e20b95e3036f3289421abd100760874d4f455afd33c3b5b64fec56b191f7d477","tests/windows.rs":"7711dfe19062d91356cd127546542b1b6e13aeef76ad3098f32c8a6ae319b66a"},"package":"f2b111a074963af1d37a139918ac6d49ad1d0d5e47f72fd55388619691a7d753"} \ No newline at end of file diff --git a/third_party/rust/libloading/Cargo.toml b/third_party/rust/libloading/Cargo.toml index ccb1134bd1e9..f9b19a8f51dd 100644 --- a/third_party/rust/libloading/Cargo.toml +++ b/third_party/rust/libloading/Cargo.toml @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -12,7 +12,7 @@ [package] name = "libloading" -version = "0.5.0" +version = "0.5.2" authors = ["Simonas Kazlauskas "] build = "build.rs" description = "A safer binding to platform’s dynamic library loading utilities" diff --git a/third_party/rust/libloading/build.rs b/third_party/rust/libloading/build.rs index 88c8c5fa44a7..fc380a7450a0 100644 --- a/third_party/rust/libloading/build.rs +++ b/third_party/rust/libloading/build.rs @@ -14,6 +14,7 @@ fn main(){ // What happens if the executable is not linked up dynamically? Ok("openbsd") | Ok("bitrig") | Ok("netbsd") | Ok("macos") | Ok("ios") => {} Ok("solaris") => {} + Ok("haiku") => {} // dependencies come with winapi Ok("windows") => {} tos => { diff --git a/third_party/rust/libloading/src/changelog.rs b/third_party/rust/libloading/src/changelog.rs index 3e63f02d5acd..215ea89f741d 100644 --- a/third_party/rust/libloading/src/changelog.rs +++ b/third_party/rust/libloading/src/changelog.rs @@ -1,5 +1,15 @@ //! Project changelog +/// Release 0.5.2 (2019-07-07) +/// +/// * Added API to convert OS-specific `Library` and `Symbol` conversion to underlying resources. +pub mod r0_5_2 {} + +/// Release 0.5.1 (2019-06-01) +/// +/// * Build on Haiku targets. +pub mod r0_5_1 {} + /// Release 0.5.0 (2018-01-11) /// /// * Update to `winapi = ^0.3`; @@ -7,7 +17,7 @@ /// ## Breaking changes /// /// * libloading now requires a C compiler to build on UNIX; -/// * This is a temporary measure until the [`linkage`] attribute is stablised; +/// * This is a temporary measure until the [`linkage`] attribute is stabilised; /// * Necessary to resolve [#32]. /// /// [`linkage`]: https://github.com/rust-lang/rust/issues/29603 @@ -79,7 +89,7 @@ pub mod r0_3_1 {} /// The last two additions focus on not restricting potential usecases of this library, allowing /// users of the library to circumvent safety checks if need be. /// -/// ## Beaking Changes +/// ## Breaking Changes /// /// `Library::new` defaults to `RTLD_NOW` instead of `RTLD_LAZY` on UNIX for more consistent /// cross-platform behaviour. If a library loaded with `Library::new` had any linking errors, but diff --git a/third_party/rust/libloading/src/lib.rs b/third_party/rust/libloading/src/lib.rs index 75f55957a96e..a3bfe102866a 100644 --- a/third_party/rust/libloading/src/lib.rs +++ b/third_party/rust/libloading/src/lib.rs @@ -13,7 +13,7 @@ //! //! # Usage //! -//! Add dependency to this library to your `Cargo.toml`: +//! Add a dependency on this library to your `Cargo.toml`: //! //! ```toml //! [dependencies] diff --git a/third_party/rust/libloading/src/os/unix/mod.rs b/third_party/rust/libloading/src/os/unix/mod.rs index a6f6f12ff412..d0456dd55a32 100644 --- a/third_party/rust/libloading/src/os/unix/mod.rs +++ b/third_party/rust/libloading/src/os/unix/mod.rs @@ -194,6 +194,29 @@ impl Library { Ok(x) => Ok(x) } } + + /// Convert the `Library` to a raw handle. + /// + /// The handle returned by this function shall be usable with APIs which accept handles + /// as returned by `dlopen`. + pub fn into_raw(self) -> *mut raw::c_void { + let handle = self.handle; + mem::forget(self); + handle + } + + /// Convert a raw handle returned by `dlopen`-family of calls to a `Library`. + /// + /// ## Unsafety + /// + /// The pointer shall be a result of a successful call of the `dlopen`-family of functions or a + /// pointer previously returned by `Library::into_raw` call. It must be valid to call `dlclose` + /// with this pointer as an argument. + pub unsafe fn from_raw(handle: *mut raw::c_void) -> Library { + Library { + handle: handle + } + } } impl Drop for Library { @@ -221,6 +244,15 @@ pub struct Symbol { pd: marker::PhantomData } +impl Symbol { + /// Convert the loaded Symbol into a raw pointer. + pub fn into_raw(self) -> *mut raw::c_void { + let pointer = self.pointer; + mem::forget(self); + pointer + } +} + impl Symbol> { /// Lift Option out of the symbol. pub fn lift_option(self) -> Option> { diff --git a/third_party/rust/libloading/src/os/windows/mod.rs b/third_party/rust/libloading/src/os/windows/mod.rs index d2fa0f4d1d27..8157eaba1d00 100644 --- a/third_party/rust/libloading/src/os/windows/mod.rs +++ b/third_party/rust/libloading/src/os/windows/mod.rs @@ -111,6 +111,23 @@ impl Library { panic!("GetProcAddress failed but GetLastError did not report the error") )) } + + /// Convert the `Library` to a raw handle. + pub fn into_raw(self) -> HMODULE { + let handle = self.0; + mem::forget(self); + handle + } + + /// Convert a raw handle to a `Library`. + /// + /// ## Unsafety + /// + /// The handle shall be a result of a successful call of `LoadLibraryW` or a + /// handle previously returned by the `Library::into_raw` call. + pub unsafe fn from_raw(handle: HMODULE) -> Library { + Library(handle) + } } impl Drop for Library { @@ -150,6 +167,15 @@ pub struct Symbol { pd: marker::PhantomData } +impl Symbol { + /// Convert the loaded Symbol into a handle. + pub fn into_raw(self) -> FARPROC { + let pointer = self.pointer; + mem::forget(self); + pointer + } +} + impl Symbol> { /// Lift Option out of the symbol. pub fn lift_option(self) -> Option> { @@ -225,7 +251,7 @@ impl ErrorModeGuard { // T1: SetErrorMode(old_mode) # not SEM_FAILCE // T2: SetErrorMode(SEM_FAILCE) # restores to SEM_FAILCE on drop // - // This is still somewhat racy in a sense that T1 might resture the error + // This is still somewhat racy in a sense that T1 might restore the error // mode before T2 finishes loading the library, but that is less of a // concern – it will only end up in end user seeing a dialog. // diff --git a/third_party/rust/libloading/src/util.rs b/third_party/rust/libloading/src/util.rs index caa95b63df46..650266e1df1a 100644 --- a/third_party/rust/libloading/src/util.rs +++ b/third_party/rust/libloading/src/util.rs @@ -33,7 +33,7 @@ impl ::std::fmt::Display for NullError { } } -/// Checks for last byte and avoids alocatting if its zero. +/// Checks for last byte and avoids allocating if it is zero. /// /// Non-last null bytes still result in an error. pub fn cstr_cow_from_bytes<'a>(slice: &'a [u8]) -> Result, NullError> { diff --git a/third_party/rust/peek-poke-derive/.cargo-checksum.json b/third_party/rust/peek-poke-derive/.cargo-checksum.json index 433df168e554..10107f416e9b 100644 --- a/third_party/rust/peek-poke-derive/.cargo-checksum.json +++ b/third_party/rust/peek-poke-derive/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"COPYRIGHT":"58daeef13dbf0d72acb4899f2497d2cd2c540806eaa6531e58ed1bc98f8040e9","Cargo.toml":"5b345964cd485bdcd079ec27fbc0d8f5d530984412e99df3f3edf4ea27fbe94e","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"41339bf57e18b824d8e6028e4f639271e88d75dc0441e09a72c04d36178649f8","README.md":"fa1190ee4345c4b97e280d8b2e37c4adb0a2e0d9ef1371e35684b5e57a3ee76c","src/lib.rs":"26760428abfce60c5704bc9d74cffb99c00f16ad0641ba9f1fd09dc87b5221bb","src/max_size_expr.rs":"18db4b3b55206dd5f413c1a7db575d6039dc14a7fb548e47300d3ae86db203e0","src/peek_from_expr.rs":"b9140b787c76a27ebeb3cd3aa1d4dddcc2005efb65173a50845d815d1fc66cf0","src/peek_poke.rs":"e123680e644ecb295c657362513ae0b0e8f2bf65bc0f64b1eb59a46ffec77419","src/poke_into_expr.rs":"9e6679ef1b10706eb437575aa119907d26f7951d8c7c562ebb119805e97978da"},"package":null} \ No newline at end of file +{"files":{"Cargo.toml":"65e4cc5be87e52517c63837790ec2037442183f0a08f03cba8577635b5af5eff","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"41339bf57e18b824d8e6028e4f639271e88d75dc0441e09a72c04d36178649f8","README.md":"d8e72ee5467fed029b7d2b57725e3ab28d83043811edb3817622d0bc75d70225","src/lib.rs":"ad77182eb4d96318610d945c7ce86efe039310a5111c1c1d6b1235f165776c4e"},"package":"6fb44a25c5bba983be0fc8592dfaf3e6d0935ce8be0c6b15b2a39507af34a926"} \ No newline at end of file diff --git a/third_party/rust/peek-poke-derive/COPYRIGHT b/third_party/rust/peek-poke-derive/COPYRIGHT deleted file mode 100644 index 7ff03cd7636b..000000000000 --- a/third_party/rust/peek-poke-derive/COPYRIGHT +++ /dev/null @@ -1,14 +0,0 @@ -Except as otherwise noted (below and/or in individual files), peek-poke is -licensed under the Apache License, Version 2.0 or - or the MIT license - or , at your option. - -peek-poke includes packages written by third parties. -The following third party packages are included, and carry -their own copyright notices and license terms: - -* Portions of the peek-poke-derive code for generating PeekPoke trait is - derived from desse-derive, which is dual licensed under Apache License, - Version 2.0 or the MIT license , at your option. - - Copyright (c) 2019, Devashish Dixit. diff --git a/third_party/rust/peek-poke-derive/Cargo.toml b/third_party/rust/peek-poke-derive/Cargo.toml index 4d0ec42fb0cb..6678c38eae14 100644 --- a/third_party/rust/peek-poke-derive/Cargo.toml +++ b/third_party/rust/peek-poke-derive/Cargo.toml @@ -1,15 +1,38 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + [package] -name = "peek-poke-derive" -version = "0.2.0" -authors = ["Dan Glastonbury "] -license = "MIT/Apache-2.0" edition = "2018" +name = "peek-poke-derive" +version = "0.2.1" +authors = ["Dan Glastonbury "] +description = "Derive macro for peek-poke." +license = "MIT/Apache-2.0" +repository = "https://github.com/servo/webrender" [lib] doctest = false proc-macro = true +[dependencies.proc-macro2] +version = "1" -[dependencies] -proc-macro2 = "1" -quote = "1" -syn = "1" +[dependencies.quote] +version = "1" + +[dependencies.syn] +version = "1" + +[dependencies.synstructure] +version = "0.12" + +[dependencies.unicode-xid] +version = "0.2" diff --git a/third_party/rust/peek-poke-derive/README.md b/third_party/rust/peek-poke-derive/README.md index f786e08d4ee2..1c379aa1211f 100644 --- a/third_party/rust/peek-poke-derive/README.md +++ b/third_party/rust/peek-poke-derive/README.md @@ -47,8 +47,7 @@ Licensed under either of at your option. -see [LICENSE-APACHE](LICENSE-APACHE), [LICENSE-MIT](LICENSE-MIT), and -[COPYRIGHT](COPYRIGHT) for details. +see [LICENSE-APACHE](LICENSE-APACHE), [LICENSE-MIT](LICENSE-MIT) for details. ## Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as diff --git a/third_party/rust/peek-poke-derive/src/lib.rs b/third_party/rust/peek-poke-derive/src/lib.rs index 94ee200f19c8..7000f28bf15c 100644 --- a/third_party/rust/peek-poke-derive/src/lib.rs +++ b/third_party/rust/peek-poke-derive/src/lib.rs @@ -1,33 +1,266 @@ -extern crate proc_macro; +// Copyright 2019 The Servo Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. -use proc_macro::TokenStream; -use syn::{parse_macro_input, DeriveInput}; +use proc_macro2::{Span, TokenStream}; +use quote::quote; +use syn::{Ident, Index, TraitBound}; +use synstructure::{decl_derive, Structure, BindStyle, AddBounds}; +use unicode_xid::UnicodeXID; -mod max_size_expr; -mod peek_from_expr; -mod peek_poke; -mod poke_into_expr; - -#[proc_macro_derive(PeekPoke)] -pub fn peek_poke_macro_derive(input: TokenStream) -> TokenStream { - let input = parse_macro_input!(input as DeriveInput); - peek_poke::get_peek_poke_impl(input).into() +// Internal method for sanitizing an identifier for hygiene purposes. +fn sanitize_ident(s: &str) -> Ident { + let mut res = String::with_capacity(s.len()); + for mut c in s.chars() { + if !UnicodeXID::is_xid_continue(c) { + c = '_' + } + // Deduplicate consecutive _ characters. + if res.ends_with('_') && c == '_' { + continue; + } + res.push(c); + } + Ident::new(&res, Span::call_site()) } -#[proc_macro_derive(Poke)] -pub fn poke_macro_derive(input: TokenStream) -> TokenStream { - let input = parse_macro_input!(input as DeriveInput); - peek_poke::get_poke_impl(input).into() +/// Calculates size type for number of variants (used for enums) +fn get_discriminant_size_type(len: usize) -> TokenStream { + if len <= ::max_value() as usize { + quote! { u8 } + } else if len <= ::max_value() as usize { + quote! { u16 } + } else { + quote! { u32 } + } } -#[proc_macro_derive(PeekCopy)] -pub fn peek_copy_macro_derive(input: TokenStream) -> TokenStream { - let input = parse_macro_input!(input as DeriveInput); - peek_poke::get_peek_copy_impl(input).into() +fn is_struct(s: &Structure) -> bool { + // a single variant with no prefix is 'struct' + match &s.variants()[..] { + [v] if v.prefix.is_none() => true, + _ => false, + } } -#[proc_macro_derive(PeekDefault)] -pub fn peek_default_macro_derive(input: TokenStream) -> TokenStream { - let input = parse_macro_input!(input as DeriveInput); - peek_poke::get_peek_default_impl(input).into() +fn derive_max_size(s: &Structure) -> TokenStream { + let max_size = s.variants().iter().fold(quote!(0), |acc, vi| { + let variant_size = vi.bindings().iter().fold(quote!(0), |acc, bi| { + // compute size of each variant by summing the sizes of its bindings + let ty = &bi.ast().ty; + quote!(#acc + <#ty>::max_size()) + }); + + // find the maximum of each variant + quote! { + max(#acc, #variant_size) + } + }); + + let body = if is_struct(s) { + max_size + } else { + let discriminant_size_type = get_discriminant_size_type(s.variants().len()); + quote! { + #discriminant_size_type ::max_size() + #max_size + } + }; + + quote! { + #[inline(always)] + fn max_size() -> usize { + use std::cmp::max; + #body + } + } } + +fn derive_peek_from_for_enum(s: &mut Structure) -> TokenStream { + assert!(!is_struct(s)); + s.bind_with(|_| BindStyle::Move); + + let num_variants = s.variants().len(); + let discriminant_size_type = get_discriminant_size_type(num_variants); + let body = s + .variants() + .iter() + .enumerate() + .fold(quote!(), |acc, (i, vi)| { + let bindings = vi + .bindings() + .iter() + .map(|bi| quote!(#bi)) + .collect::>(); + + let variant_pat = Index::from(i); + let poke_exprs = bindings.iter().fold(quote!(), |acc, bi| { + quote! { + #acc + let (#bi, bytes) = peek_poke::peek_from_default(bytes); + } + }); + let construct = vi.construct(|_, i| { + let bi = &bindings[i]; + quote!(#bi) + }); + + quote! { + #acc + #variant_pat => { + #poke_exprs + *output = #construct; + bytes + } + } + }); + + let type_name = s.ast().ident.to_string(); + let max_tag_value = num_variants - 1; + + quote! { + #[inline(always)] + unsafe fn peek_from(bytes: *const u8, output: *mut Self) -> *const u8 { + let (variant, bytes) = peek_poke::peek_from_default::<#discriminant_size_type>(bytes); + match variant { + #body + out_of_range_tag => { + panic!("WRDL: memory corruption detected while parsing {} - enum tag should be <= {}, but was {}", + #type_name, #max_tag_value, out_of_range_tag); + } + } + } + } +} + +fn derive_peek_from_for_struct(s: &mut Structure) -> TokenStream { + assert!(is_struct(&s)); + + s.variants_mut()[0].bind_with(|_| BindStyle::RefMut); + let pat = s.variants()[0].pat(); + let peek_exprs = s.variants()[0].bindings().iter().fold(quote!(), |acc, bi| { + let ty = &bi.ast().ty; + quote! { + #acc + let bytes = <#ty>::peek_from(bytes, #bi); + } + }); + + let body = quote! { + #pat => { + #peek_exprs + bytes + } + }; + + quote! { + #[inline(always)] + unsafe fn peek_from(bytes: *const u8, output: *mut Self) -> *const u8 { + match &mut (*output) { + #body + } + } + } +} + +fn derive_poke_into(s: &Structure) -> TokenStream { + let is_struct = is_struct(&s); + let discriminant_size_type = get_discriminant_size_type(s.variants().len()); + let body = s + .variants() + .iter() + .enumerate() + .fold(quote!(), |acc, (i, vi)| { + let init = if !is_struct { + let index = Index::from(i); + quote! { + let bytes = #discriminant_size_type::poke_into(&#index, bytes); + } + } else { + quote!() + }; + let variant_pat = vi.pat(); + let poke_exprs = vi.bindings().iter().fold(init, |acc, bi| { + quote! { + #acc + let bytes = #bi.poke_into(bytes); + } + }); + + quote! { + #acc + #variant_pat => { + #poke_exprs + bytes + } + } + }); + + quote! { + #[inline(always)] + unsafe fn poke_into(&self, bytes: *mut u8) -> *mut u8 { + match &*self { + #body + } + } + } +} + +fn peek_poke_derive(mut s: Structure) -> TokenStream { + s.binding_name(|_, i| Ident::new(&format!("__self_{}", i), Span::call_site())); + + let max_size_fn = derive_max_size(&s); + let poke_into_fn = derive_poke_into(&s); + let peek_from_fn = if is_struct(&s) { + derive_peek_from_for_struct(&mut s) + } else { + derive_peek_from_for_enum(&mut s) + }; + + let poke_impl = s.gen_impl(quote! { + extern crate peek_poke; + + gen unsafe impl peek_poke::Poke for @Self { + #max_size_fn + #poke_into_fn + } + }); + + // To implement `fn peek_from` we require that types implement `Default` + // trait to create temporary values. This code does the addition all + // manually until https://github.com/mystor/synstructure/issues/24 is fixed. + let default_trait = syn::parse_str::("::std::default::Default").unwrap(); + let peek_trait = syn::parse_str::("peek_poke::Peek").unwrap(); + + let ast = s.ast(); + let name = &ast.ident; + let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl(); + let mut where_clause = where_clause.cloned(); + s.add_trait_bounds(&default_trait, &mut where_clause, AddBounds::Generics); + s.add_trait_bounds(&peek_trait, &mut where_clause, AddBounds::Generics); + + let dummy_const: Ident = sanitize_ident(&format!("_DERIVE_peek_poke_Peek_FOR_{}", name)); + + let peek_impl = quote! { + #[allow(non_upper_case_globals)] + const #dummy_const: () = { + extern crate peek_poke; + + impl #impl_generics peek_poke::Peek for #name #ty_generics #where_clause { + #peek_from_fn + } + }; + }; + + quote! { + #poke_impl + #peek_impl + } +} + +decl_derive!([PeekPoke] => peek_poke_derive); diff --git a/third_party/rust/peek-poke-derive/src/max_size_expr.rs b/third_party/rust/peek-poke-derive/src/max_size_expr.rs deleted file mode 100644 index 2993b7c0ddca..000000000000 --- a/third_party/rust/peek-poke-derive/src/max_size_expr.rs +++ /dev/null @@ -1,87 +0,0 @@ -use proc_macro2::TokenStream; -use quote::quote; -use syn::{punctuated::Punctuated, DataEnum, DataStruct, Field, Fields, Type, Variant}; - -/// Calculates size expression for punctuated fields -fn get_max_size_expr_for_punctuated_field(fields: &Punctuated) -> TokenStream { - if fields.is_empty() { - return quote! { 0 }; - } else { - let types = fields.iter().map(|field| &field.ty).collect::>(); - quote! { #(<#types>::max_size())+* } - } -} - -/// Calculates size expression for fields -fn get_max_size_expr_for_fields(fields: &Fields) -> TokenStream { - match fields { - Fields::Unit => quote! { 0 }, - Fields::Named(named_fields) => { - get_max_size_expr_for_punctuated_field(&named_fields.named) - } - Fields::Unnamed(unnamed_fields) => { - get_max_size_expr_for_punctuated_field(&unnamed_fields.unnamed) - } - } -} - -/// Calculates size expression for punctuated variants -fn get_max_size_expr_for_punctuated_variant(variants: &Punctuated) -> TokenStream { - if variants.is_empty() { - return quote! { 0 }; - } else { - let count_size_expr = get_variant_count_max_size_expr(variants.len()); - let max_size_expr = get_variant_max_size_expr(variants); - - quote! { #count_size_expr + #max_size_expr } - } -} - -/// Calculates size expression for variant -#[allow(unused)] -fn get_max_size_expr_for_variant(variant: &Variant) -> TokenStream { - get_max_size_expr_for_fields(&variant.fields) -} - -/// Calculates size expression for number of variants (used for enums) -fn get_variant_count_max_size_expr(len: usize) -> TokenStream { - let size_type = get_variant_count_max_size_type(len); - quote! { <#size_type>::max_size() } -} - -/// Calculates size expression for maximum sized variant -fn get_variant_max_size_expr(variants: &Punctuated) -> TokenStream { - let mut max_size_expr = quote! { 0 }; - - for variant in variants { - let variant_size_expr = get_max_size_expr_for_variant(variant); - max_size_expr = quote! { core::cmp::max(#max_size_expr, #variant_size_expr) }; - } - - max_size_expr -} - -/// Calculates size type for number of variants (used for enums) -pub fn get_variant_count_max_size_type(len: usize) -> TokenStream { - if len <= ::max_value() as usize { - quote! { u8 } - } else if len <= ::max_value() as usize { - quote! { u16 } - } else if len <= ::max_value() as usize { - quote! { u32 } - } else if len <= ::max_value() as usize { - quote! { u64 } - } else { - quote! { u128 } - } -} - -/// Calculates size expression for [`DataStruct`](syn::DataStruct) -pub fn for_struct(struct_data: &DataStruct) -> TokenStream { - get_max_size_expr_for_fields(&struct_data.fields) -} - -/// Calculates size expression for [`DataEnum`](syn::DataEnum) -pub fn for_enum(enum_data: &DataEnum) -> TokenStream { - get_max_size_expr_for_punctuated_variant(&enum_data.variants) -} diff --git a/third_party/rust/peek-poke-derive/src/peek_from_expr.rs b/third_party/rust/peek-poke-derive/src/peek_from_expr.rs deleted file mode 100644 index 3c97305d3c5e..000000000000 --- a/third_party/rust/peek-poke-derive/src/peek_from_expr.rs +++ /dev/null @@ -1,206 +0,0 @@ -use crate::{max_size_expr, peek_poke::Generate}; -use proc_macro2::TokenStream; -use quote::{quote, ToTokens}; -use std::{fmt::Display, str::FromStr}; -use syn::{DataEnum, DataStruct, Fields, Ident, Index}; - -/// Calculates serialize expression for fields -fn get_peek_from_expr_for_fields( - field_prefix: T, - fields: &Fields, -) -> (TokenStream, TokenStream) { - match fields { - Fields::Unit => (quote! {}, quote! {}), - Fields::Named(named_fields) => { - if named_fields.named.is_empty() { - (quote! {}, quote! {}) - } else { - let mut exprs = Vec::with_capacity(named_fields.named.len()); - let mut fields = Vec::with_capacity(named_fields.named.len()); - - for field in named_fields.named.iter() { - let field_name = match &field.ident { - None => unreachable!(), - Some(ref ident) => quote! { #ident }, - }; - - let field_ref = - TokenStream::from_str(&format!("{}{}", field_prefix, field_name)).unwrap(); - - exprs.push(quote! { - let bytes = #field_ref.peek_from(bytes); - }); - fields.push(field_name); - } - - ( - quote! { - #(#exprs;)* - }, - quote! { - #(#fields),* - }, - ) - } - } - Fields::Unnamed(unnamed_fields) => { - if unnamed_fields.unnamed.is_empty() { - (quote! {}, quote! {}) - } else { - let mut fields = Vec::with_capacity(unnamed_fields.unnamed.len()); - let mut exprs = Vec::with_capacity(unnamed_fields.unnamed.len()); - - for n in 0..unnamed_fields.unnamed.len() { - let field_name = - TokenStream::from_str(&format!("{}{}", field_prefix, n)).unwrap(); - - exprs.push(quote! { - let bytes = #field_name.peek_from(bytes); - }); - fields.push(field_name); - } - - ( - quote! { - #(#exprs)* - }, - quote! { - #(#fields),* - }, - ) - } - } - } -} - -fn get_peek_from_init_expr_for_fields(fields: &Fields, gen: Generate) -> TokenStream { - match fields { - Fields::Unit => quote! {}, - Fields::Named(named_fields) => { - if named_fields.named.is_empty() { - quote! {} - } else { - let mut exprs = Vec::with_capacity(named_fields.named.len()); - - for field in &named_fields.named { - let field_name = match &field.ident { - None => unreachable!(), - Some(ref ident) => quote! { #ident }, - }; - - let field_type = &field.ty; - - let init = if gen == Generate::PeekDefault { - quote! { - let mut #field_name = #field_type::default(); - } - } else { - quote! { - let mut #field_name: #field_type = unsafe { core::mem::uninitialized() }; - } - }; - exprs.push(init); - } - quote! { - #(#exprs)* - } - } - } - Fields::Unnamed(unnamed_fields) => { - if unnamed_fields.unnamed.is_empty() { - quote! {} - } else { - let mut exprs = Vec::with_capacity(unnamed_fields.unnamed.len()); - - for (n, field) in unnamed_fields.unnamed.iter().enumerate() { - let field_name = TokenStream::from_str(&format!("__self_{}", n)).unwrap(); - let field_type = &field.ty; - - let init = if gen == Generate::PeekDefault { - quote! { - let mut #field_name = #field_type::default(); - } - } else { - quote! { - let mut #field_name: #field_type = unsafe { core::mem::uninitialized() }; - } - }; - exprs.push(init); - } - - quote! { - #(#exprs)* - } - } - } - } -} - -/// Calculates size expression for [`DataStruct`](syn::DataStruct) -pub fn for_struct(struct_data: &DataStruct) -> TokenStream { - let (exprs, _) = get_peek_from_expr_for_fields(quote! { self. }, &struct_data.fields); - quote! { - #exprs - bytes - } -} - -/// Calculates size expression for [`DataEnum`](syn::DataEnum) -pub fn for_enum(name: &Ident, enum_data: &DataEnum, gen: Generate) -> TokenStream { - let variant_count = enum_data.variants.len(); - - let size_type = max_size_expr::get_variant_count_max_size_type(variant_count); - let mut match_exprs = Vec::with_capacity(variant_count); - - let variant_expr = quote! { - let mut variant: #size_type = 0; - let bytes = variant.peek_from(bytes); - }; - - for (i, variant) in enum_data.variants.iter().enumerate() { - let variant_name = &variant.ident; - let prefix = match &variant.fields { - Fields::Unnamed(..) => quote! {__self_}, - _ => quote! {}, - }; - let (variant_expr, fields_expr) = get_peek_from_expr_for_fields(prefix, &variant.fields); - - let index = Index::from(i); - let init_expr = get_peek_from_init_expr_for_fields(&variant.fields, gen); - let self_assign_expr = match &variant.fields { - Fields::Named(..) => quote! { - *self = #name:: #variant_name { #fields_expr }; - }, - Fields::Unnamed(..) => quote! { - *self = #name:: #variant_name(#fields_expr); - }, - Fields::Unit => quote! { - *self = #name:: #variant_name; - }, - }; - - match_exprs.push(quote! { - #index => { - #init_expr - #variant_expr - #self_assign_expr - bytes - } - }); - } - - match_exprs.push(quote! { - _ => unreachable!() - }); - - let match_expr = quote! { - match variant { - #(#match_exprs),* - } - }; - - quote! { - #variant_expr - #match_expr - } -} diff --git a/third_party/rust/peek-poke-derive/src/peek_poke.rs b/third_party/rust/peek-poke-derive/src/peek_poke.rs deleted file mode 100644 index 55a7d9e35892..000000000000 --- a/third_party/rust/peek-poke-derive/src/peek_poke.rs +++ /dev/null @@ -1,154 +0,0 @@ -use crate::{max_size_expr, peek_from_expr, poke_into_expr}; -use proc_macro2::TokenStream; -use quote::{quote, ToTokens}; -use syn::{parse_quote, Data::*, DeriveInput, GenericParam, Generics}; - -#[repr(C)] -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] -pub enum Generate { - Both, - Poke, - PeekCopy, - PeekDefault, -} - -impl Generate { - fn peek(self) -> bool { - match self { - Generate::Both | Generate::PeekCopy | Generate::PeekDefault => true, - _ => false, - } - } - fn poke(self) -> bool { - match self { - Generate::Both | Generate::Poke => true, - _ => false, - } - } -} - -pub fn get_peek_poke_impl(input: DeriveInput) -> TokenStream { - get_impl(input, Generate::Both) -} - -pub fn get_poke_impl(input: DeriveInput) -> TokenStream { - get_impl(input, Generate::Poke) -} - -pub fn get_peek_copy_impl(input: DeriveInput) -> TokenStream { - get_impl(input, Generate::PeekCopy) -} - -pub fn get_peek_default_impl(input: DeriveInput) -> TokenStream { - get_impl(input, Generate::PeekDefault) -} - -/// Returns `PeekPoke` trait implementation -fn get_impl(input: DeriveInput, gen: Generate) -> TokenStream { - let name = input.ident; - let (add_copy_trait, add_default_trait) = match &input.data { - Enum(..) => { - assert!( - gen != Generate::Both, - "This macro cannot be used on enums! use `PeekCopy` or `PeekDefault`" - ); - (gen == Generate::PeekCopy, gen == Generate::PeekDefault) - } - _ => (false, false), - }; - - let (max_size, poke_into, peek_from) = match &input.data { - Struct(ref struct_data) => ( - max_size_expr::for_struct(&struct_data), - poke_into_expr::for_struct(&name, &struct_data), - peek_from_expr::for_struct(&struct_data), - ), - Enum(ref enum_data) => ( - max_size_expr::for_enum(&enum_data), - poke_into_expr::for_enum(&name, &enum_data), - peek_from_expr::for_enum(&name, &enum_data, gen), - ), - - Union(_) => panic!("This macro cannot be used on unions!"), - }; - - let poke_generics = add_trait_bound(input.generics.clone(), quote! { peek_poke::Poke }); - let (impl_generics, ty_generics, where_clause) = poke_generics.split_for_impl(); - - let poke_impl = if gen.poke() { - quote! { - #[automatically_derived] - #[allow(unused_qualifications)] - #[allow(unused)] - unsafe impl #impl_generics peek_poke::Poke for #name #ty_generics #where_clause { - #[inline(always)] - fn max_size() -> usize { - #max_size - } - - #[inline(always)] - unsafe fn poke_into(&self, bytes: *mut u8) -> *mut u8 { - #poke_into - } - } - } - } else { - quote! {} - }; - - let peek_generics = add_trait_bound(input.generics.clone(), quote! { peek_poke::Peek }); - let peek_generics = add_trait_bound_if(peek_generics, quote! { Copy }, add_copy_trait); - let peek_generics = add_trait_bound_if(peek_generics, quote! { Default }, add_default_trait); - let peek_generics = add_where_predicate_if(peek_generics, quote! { Self: Copy }, add_copy_trait); - let peek_generics = add_where_predicate_if(peek_generics, quote! { Self: Default }, add_default_trait); - let (impl_generics, ty_generics, where_clause) = peek_generics.split_for_impl(); - - let peek_impl = if gen.peek() { - quote! { - #[automatically_derived] - #[allow(unused_qualifications)] - #[allow(unused)] - impl #impl_generics peek_poke::Peek for #name #ty_generics #where_clause { - #[inline(always)] - unsafe fn peek_from(&mut self, bytes: *const u8) -> *const u8 { - #peek_from - } - } - } - } else { - quote! {} - }; - - quote! { - #poke_impl - #peek_impl - } -} - -// Add a bound, eg `T: PeekPoke`, for every type parameter `T`. -fn add_trait_bound(mut generics: Generics, bound: impl ToTokens) -> Generics { - for param in &mut generics.params { - if let GenericParam::Type(ref mut type_param) = *param { - type_param.bounds.push(parse_quote!(#bound)); - } - } - generics -} - -fn add_trait_bound_if(generics: Generics, bound: impl ToTokens, add: bool) -> Generics { - if add { - add_trait_bound(generics, bound) - } else { - generics - } -} - -fn add_where_predicate_if(mut generics: Generics, predicate: impl ToTokens, add: bool) -> Generics { - if add { - generics - .make_where_clause() - .predicates - .push(parse_quote!(#predicate)); - } - generics -} diff --git a/third_party/rust/peek-poke-derive/src/poke_into_expr.rs b/third_party/rust/peek-poke-derive/src/poke_into_expr.rs deleted file mode 100644 index 28e5c7978743..000000000000 --- a/third_party/rust/peek-poke-derive/src/poke_into_expr.rs +++ /dev/null @@ -1,127 +0,0 @@ -use crate::max_size_expr; -use proc_macro2::{Span, TokenStream}; -use quote::{quote, ToTokens}; -use std::{fmt::Display, str::FromStr}; -use syn::{DataEnum, DataStruct, Fields, Ident, Index}; - -/// Calculates serialize expression for fields -fn get_poke_into_expr_for_fields( - container_prefix: T, - fields: &Fields, -) -> TokenStream { - match fields { - Fields::Unit => quote! { bytes }, - Fields::Named(named_fields) => { - let mut exprs = Vec::with_capacity(named_fields.named.len()); - - for field in named_fields.named.iter() { - let field_name = match &field.ident { - None => unreachable!(), - Some(ref ident) => quote! { #ident }, - }; - - let field_ref = - TokenStream::from_str(&format!("{}{}", container_prefix, field_name)).unwrap(); - - exprs.push(quote! { - let bytes = #field_ref.poke_into(bytes); - }); - } - - quote! { - #(#exprs)* - bytes - } - } - Fields::Unnamed(unnamed_fields) => { - let mut exprs = Vec::with_capacity(unnamed_fields.unnamed.len()); - - for i in 0..unnamed_fields.unnamed.len() { - let field_ref = - TokenStream::from_str(&format!("{}{}", container_prefix, i)).unwrap(); - - exprs.push(quote! { - let bytes = #field_ref.poke_into(bytes); - }); - } - - quote! { - #(#exprs)* - bytes - } - } - } -} - -/// Calculates expression for [`DataStruct`](syn::DataStruct) -pub fn for_struct(_: &Ident, struct_data: &DataStruct) -> TokenStream { - get_poke_into_expr_for_fields(quote! { self. }, &struct_data.fields) -} - -/// Calculates serialize expression for [`DataEnum`](syn::DataEnum) -pub fn for_enum(name: &Ident, enum_data: &DataEnum) -> TokenStream { - let variant_count = enum_data.variants.len(); - - let size_type = max_size_expr::get_variant_count_max_size_type(variant_count); - let mut match_exprs = Vec::with_capacity(variant_count); - - for (i, variant) in enum_data.variants.iter().enumerate() { - let index = Index::from(i); - - let field_prefix = match variant.fields { - Fields::Unit => quote! {}, - Fields::Named(_) => quote! {}, - Fields::Unnamed(_) => quote! { __self_ }, - }; - - let fields_expr = match &variant.fields { - Fields::Unit => quote! {}, - Fields::Named(named_fields) => { - let mut exprs = Vec::with_capacity(named_fields.named.len()); - - for field in named_fields.named.iter() { - let field_name = match &field.ident { - None => unreachable!(), - Some(ref ident) => quote! { #ident }, - }; - - exprs.push(quote! { ref #field_name }) - } - - quote! { { #(#exprs),* } } - } - Fields::Unnamed(unnamed_fields) => { - let len = unnamed_fields.unnamed.len(); - let mut exprs = Vec::with_capacity(len); - - for j in 0..len { - let name = Ident::new(&format!("{}{}", field_prefix, j), Span::call_site()); - exprs.push(quote! { ref #name }); - } - - quote! { ( #(#exprs),* ) } - } - }; - - let variant_name = &variant.ident; - let variant_init_expr = quote! { - let bytes = (#index as #size_type).poke_into(bytes); - }; - let variant_impl_expr = get_poke_into_expr_for_fields(field_prefix, &variant.fields); - - let variant_expr = quote! { - #variant_init_expr - #variant_impl_expr - }; - - match_exprs.push(quote! { - #name:: #variant_name #fields_expr => { #variant_expr } - }); - } - - quote! { - match self { - #(#match_exprs),* - } - } -} diff --git a/third_party/rust/peek-poke/.cargo-checksum.json b/third_party/rust/peek-poke/.cargo-checksum.json index c2d01ae53c32..80bede2afacb 100644 --- a/third_party/rust/peek-poke/.cargo-checksum.json +++ b/third_party/rust/peek-poke/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"COPYRIGHT":"58daeef13dbf0d72acb4899f2497d2cd2c540806eaa6531e58ed1bc98f8040e9","Cargo.toml":"bfe2b0b425f1db2bde987739d4c117331ef796bf2334431cda591cb7249c7860","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"41339bf57e18b824d8e6028e4f639271e88d75dc0441e09a72c04d36178649f8","README.md":"fa1190ee4345c4b97e280d8b2e37c4adb0a2e0d9ef1371e35684b5e57a3ee76c","benches/versus_bincode.rs":"324912679c2aee4d6c1c8921fb59497298b8daa24b6abeb10b7c390b344a8c12","examples/webrender.rs":"3129d8a56f8acab9cb825defeb357526176bc931ab3aa68bdbc4d4dbb4fe4e34","src/lib.rs":"70408285de880354cb8ccd2b286651b6f2153f11cf9fa288df53a89bfc3a66de","tests/max_size.rs":"cf820c92ca9e826aa8574dc193a10611aad49e40f5e047c99c137fa6df6093b1","tests/round_trip.rs":"8f53dba6e4d61aa315b4fb3d5a7c80cb434eb5778b893891fa886556d4e0c89a"},"package":null} \ No newline at end of file +{"files":{"Cargo.toml":"04c9a97eca0deff1eb0b850bea27004c993cb7dd8d6b147d2f6a1936247c9445","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"41339bf57e18b824d8e6028e4f639271e88d75dc0441e09a72c04d36178649f8","README.md":"d8e72ee5467fed029b7d2b57725e3ab28d83043811edb3817622d0bc75d70225","src/euclid.rs":"5f432259510ecf137f0dee522625c00e6794411354d3778ce5ea9308dee0f6a3","src/lib.rs":"fde596773e6ff44977411afc73c724329b54ce677e251dd19e884dd30570ca56","src/slice_ext.rs":"25cefdf5a69a04f4be91d0b64398255e2c192391a65504cfbb87bc358de3c143","src/vec_ext.rs":"57ec06a51054f35b63d3749587cb781bdc0610c58c4013c1b305ab7c8cd33924","tests/max_size.rs":"c3042401799cf3185039211c94cbe6e86ac23e26a0b36785d8a8e6badf2853ae","tests/round_trip.rs":"347a7269dc236bc0ec2bcc8b558aad93c35975f625c55297de6c0519bd242688"},"package":"d93fd6a575ebf1ac2668d08443c97a22872cfb463fd8b7ddd141e9f6be59af2f"} \ No newline at end of file diff --git a/third_party/rust/peek-poke/COPYRIGHT b/third_party/rust/peek-poke/COPYRIGHT deleted file mode 100644 index 7ff03cd7636b..000000000000 --- a/third_party/rust/peek-poke/COPYRIGHT +++ /dev/null @@ -1,14 +0,0 @@ -Except as otherwise noted (below and/or in individual files), peek-poke is -licensed under the Apache License, Version 2.0 or - or the MIT license - or , at your option. - -peek-poke includes packages written by third parties. -The following third party packages are included, and carry -their own copyright notices and license terms: - -* Portions of the peek-poke-derive code for generating PeekPoke trait is - derived from desse-derive, which is dual licensed under Apache License, - Version 2.0 or the MIT license , at your option. - - Copyright (c) 2019, Devashish Dixit. diff --git a/third_party/rust/peek-poke/Cargo.toml b/third_party/rust/peek-poke/Cargo.toml index 91bd8b0eca7b..3d0c9006af12 100644 --- a/third_party/rust/peek-poke/Cargo.toml +++ b/third_party/rust/peek-poke/Cargo.toml @@ -1,32 +1,32 @@ -[workspace] +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) [package] +edition = "2018" name = "peek-poke" version = "0.2.0" authors = ["Dan Glastonbury "] +description = "A mechanism for serializing and deserializing data into/from byte buffers, for use in WebRender." license = "MIT/Apache-2.0" -edition = "2018" +repository = "https://github.com/servo/webrender" +[dependencies.euclid] +version = "0.20.0" +optional = true -[dependencies] -peek-poke-derive = { version = "0.2", path = "./peek-poke-derive", optional = true } - -[dev-dependencies] -bincode = "1.1" -criterion = "0.2" -serde = "1.0" -serde_derive = { git = "https://github.com/servo/serde", branch = "deserialize_from_enums10", features = ["deserialize_in_place"] } +[dependencies.peek-poke-derive] +version = "0.2" +optional = true [features] default = ["derive"] derive = ["peek-poke-derive"] -extras = ["derive"] -option_copy = [] -option_default = [] - -[[bench]] -name = "versus_bincode" -harness = false -required-features = ["option_copy"] - -[profile.release] -opt-level = 2 +extras = ["derive", "euclid"] diff --git a/third_party/rust/peek-poke/README.md b/third_party/rust/peek-poke/README.md index f786e08d4ee2..1c379aa1211f 100644 --- a/third_party/rust/peek-poke/README.md +++ b/third_party/rust/peek-poke/README.md @@ -47,8 +47,7 @@ Licensed under either of at your option. -see [LICENSE-APACHE](LICENSE-APACHE), [LICENSE-MIT](LICENSE-MIT), and -[COPYRIGHT](COPYRIGHT) for details. +see [LICENSE-APACHE](LICENSE-APACHE), [LICENSE-MIT](LICENSE-MIT) for details. ## Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as diff --git a/third_party/rust/peek-poke/benches/versus_bincode.rs b/third_party/rust/peek-poke/benches/versus_bincode.rs deleted file mode 100644 index f2b8582573c7..000000000000 --- a/third_party/rust/peek-poke/benches/versus_bincode.rs +++ /dev/null @@ -1,186 +0,0 @@ -#[macro_use] -extern crate serde_derive; - -use criterion::{black_box, criterion_group, criterion_main, Benchmark, Criterion}; - -use bincode::{deserialize_in_place, serialize_into}; -use peek_poke::{Peek, PeekCopy, PeekPoke, Poke}; -use std::{io, ptr}; - -#[derive(Debug, Deserialize, PartialEq, PeekPoke, Serialize)] -pub struct Point { - pub x: f32, - pub y: f32, -} - -#[derive(Debug, Deserialize, PartialEq, PeekPoke, Serialize)] -pub struct Size { - pub w: f32, - pub h: f32, -} - -#[derive(Debug, Deserialize, PartialEq, PeekPoke, Serialize)] -pub struct Rect { - pub point: Point, - pub size: Size, -} - -pub type PipelineSourceId = u32; -#[repr(C)] -#[derive(Clone, Copy, Debug, Deserialize, PartialEq, PeekPoke, Serialize)] -pub struct PipelineId(pub PipelineSourceId, pub u32); - -#[repr(C)] -#[derive(Clone, Copy, Debug, Deserialize, PartialEq, PeekPoke, Serialize)] -pub struct ClipChainId(pub u64, pub PipelineId); - -#[repr(C)] -#[derive(Clone, Copy, Debug, Deserialize, PartialEq, PeekCopy, Poke, Serialize)] -pub enum ClipId { - Clip(usize, PipelineId), - ClipChain(ClipChainId), -} - -#[repr(C)] -#[derive(Clone, Copy, Debug, Default, Deserialize, PartialEq, PeekPoke, Serialize)] -pub struct ItemTag(u64, u16); - -#[repr(C)] -#[derive(Debug, Deserialize, PartialEq, PeekPoke, Serialize)] -pub struct SpatialId(pub usize, PipelineId); - -#[repr(C)] -#[derive(Debug, Deserialize, PartialEq, PeekPoke, Serialize)] -pub struct CommonItemProperties { - pub clip_rect: Rect, - pub spatial_id: SpatialId, - pub clip_id: ClipId, - pub hit_info: Option, - pub is_backface_visible: bool, -} - -// This is used by webrender_api -#[derive(Clone, Copy)] -struct UnsafeReader { - start: *const u8, - end: *const u8, -} - -impl UnsafeReader { - #[inline(always)] - fn new(buf: &[u8]) -> UnsafeReader { - unsafe { - let end = buf.as_ptr().add(buf.len()); - let start = buf.as_ptr(); - UnsafeReader { start, end } - } - } - - // This read implementation is significantly faster than the standard &[u8] one. - // - // First, it only supports reading exactly buf.len() bytes. This ensures that - // the argument to memcpy is always buf.len() and will allow a constant buf.len() - // to be propagated through to memcpy which LLVM will turn into explicit loads and - // stores. The standard implementation does a len = min(slice.len(), buf.len()) - // - // Second, we only need to adjust 'start' after reading and it's only adjusted by a - // constant. This allows LLVM to avoid adjusting the length field after ever read - // and lets it be aggregated into a single adjustment. - #[inline(always)] - fn read_internal(&mut self, buf: &mut [u8]) { - // this is safe because we panic if start + buf.len() > end - unsafe { - assert!( - self.start.add(buf.len()) <= self.end, - "UnsafeReader: read past end of target" - ); - ptr::copy_nonoverlapping(self.start, buf.as_mut_ptr(), buf.len()); - self.start = self.start.add(buf.len()); - } - } -} - -impl io::Read for UnsafeReader { - // These methods were not being inlined and we need them to be so that the memcpy - // is for a constant size - #[inline(always)] - fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.read_internal(buf); - Ok(buf.len()) - } - #[inline(always)] - fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> { - self.read_internal(buf); - Ok(()) - } -} - -#[allow(unused_must_use)] -fn criterion_benchmark(c: &mut Criterion) { - c.bench( - "struct::serialize", - Benchmark::new("peek_poke::poke_into", |b| { - let mut buffer = Vec::with_capacity(1024); - let ptr = buffer.as_mut_ptr(); - b.iter(|| { - let my_struct = CommonItemProperties { - clip_rect: Rect { - point: Point { x: 1.0, y: 2.0 }, - size: Size { w: 4.0, h: 5.0 }, - }, - clip_id: ClipId::Clip(5, PipelineId(1, 2)), - spatial_id: SpatialId(3, PipelineId(4, 5)), - hit_info: None, - is_backface_visible: true, - }; - black_box(unsafe { black_box(&my_struct).poke_into(ptr) }); - }) - }) - .with_function("bincode::serialize", |b| { - let mut buffer = Vec::with_capacity(1024); - b.iter(|| { - buffer.clear(); - let my_struct = CommonItemProperties { - clip_rect: Rect { - point: Point { x: 1.0, y: 2.0 }, - size: Size { w: 4.0, h: 5.0 }, - }, - clip_id: ClipId::Clip(5, PipelineId(1, 2)), - spatial_id: SpatialId(3, PipelineId(4, 5)), - hit_info: None, - is_backface_visible: true, - }; - black_box(serialize_into(&mut buffer, black_box(&my_struct))); - }) - }), - ); - - c.bench( - "struct::deserialize", - Benchmark::new("peek_poke::peek_from", |b| { - let bytes = vec![ - 0u8, 0, 128, 63, 0, 0, 0, 64, 0, 0, 128, 64, 0, 0, 160, 64, 3, 0, 0, 0, 0, 0, 0, 0, - 4, 0, 0, 0, 5, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0, 1, - ]; - let mut result: CommonItemProperties = unsafe { std::mem::uninitialized() }; - b.iter(|| { - black_box(unsafe { result.peek_from(black_box(bytes.as_ptr())) }); - }) - }) - .with_function("bincode::deserialize", |b| { - let bytes = vec![ - 0u8, 0, 128, 63, 0, 0, 0, 64, 0, 0, 128, 64, 0, 0, 160, 64, 3, 0, 0, 0, 0, 0, 0, 0, - 4, 0, 0, 0, 5, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0, 1, - ]; - let mut result: CommonItemProperties = unsafe { std::mem::uninitialized() }; - let reader = UnsafeReader::new(&bytes); - b.iter(|| { - let reader = bincode::IoReader::new(reader); - black_box(deserialize_in_place(reader, &mut result)); - }) - }), - ); -} - -criterion_group!(benches, criterion_benchmark); -criterion_main!(benches); diff --git a/third_party/rust/peek-poke/examples/webrender.rs b/third_party/rust/peek-poke/examples/webrender.rs deleted file mode 100644 index 2814b5b58257..000000000000 --- a/third_party/rust/peek-poke/examples/webrender.rs +++ /dev/null @@ -1,111 +0,0 @@ -use peek_poke::{Peek, PeekCopy, PeekPoke, Poke}; - -#[repr(C)] -#[derive(Debug, PartialEq, PeekPoke)] -pub struct Point { - pub x: f32, - pub y: f32, -} - -#[repr(C)] -#[derive(Debug, PartialEq, PeekPoke)] -pub struct Size { - pub w: f32, - pub h: f32, -} - -#[repr(C)] -#[derive(Debug, PartialEq, PeekPoke)] -pub struct Rect { - pub point: Point, - pub size: Size, -} - -pub type PipelineSourceId = u32; -#[repr(C)] -#[derive(Clone, Copy, Debug, PartialEq, PeekPoke)] -pub struct PipelineId(pub PipelineSourceId, pub u32); - -#[repr(C)] -#[derive(Clone, Copy, Debug, PartialEq, PeekPoke)] -pub struct ClipChainId(pub u64, pub PipelineId); - -#[repr(C)] -#[derive(Clone, Copy, Debug, PartialEq, PeekCopy, Poke)] -pub enum ClipId { - Clip(usize, PipelineId), - ClipChain(ClipChainId), -} - -pub type ItemTag = (u64, u16); -#[repr(C)] -#[derive(Debug, PartialEq, PeekPoke)] -pub struct SpatialId(pub usize, PipelineId); - -#[repr(C)] -#[derive(Debug, PartialEq, PeekPoke)] -pub struct CommonItemProperties { - pub clip_rect: Rect, - pub clip_id: ClipId, - pub spatial_id: SpatialId, - #[cfg(any(feature = "option_copy", feature = "option_default"))] - pub hit_info: Option, - pub is_backface_visible: bool, -} - -#[inline(never)] -unsafe fn test(bytes: *mut u8, x: &T) -> *mut u8 { - x.poke_into(bytes) -} - -fn poke_into(bytes: &mut Vec, x: &T) { - bytes.reserve(::max_size()); - let ptr = bytes.as_mut_ptr(); - let new_ptr = unsafe { test(ptr, x) }; - let new_len = (new_ptr as usize) - (bytes.as_ptr() as usize); - unsafe { - bytes.set_len(new_len); - } -} - -#[inline(never)] -unsafe fn test1(x: &mut T, bytes: *const u8) -> *const u8 { - x.peek_from(bytes) -} - -fn peek_from(x: &mut T, bytes: &[u8]) -> usize { - assert!(bytes.len() >= ::max_size()); - let ptr = bytes.as_ptr(); - let new_ptr = unsafe { test1(x, ptr) }; - let size = (new_ptr as usize) - (ptr as usize); - assert!(size <= bytes.len()); - size -} - -pub fn main() { - let x = CommonItemProperties { - clip_rect: Rect { - point: Point { x: 1.0, y: 2.0 }, - size: Size { w: 4.0, h: 5.0 }, - }, - clip_id: ClipId::Clip(5, PipelineId(1, 2)), - spatial_id: SpatialId(3, PipelineId(4, 5)), - #[cfg(any(feature = "option_copy", feature = "option_default"))] - hit_info: None, - is_backface_visible: true, - }; - let mut bytes = Vec::::new(); - poke_into(&mut bytes, &x); - println!("{:?}", bytes); - assert_eq!( - bytes, - vec![ - 0u8, 0, 128, 63, 0, 0, 0, 64, 0, 0, 128, 64, 0, 0, 160, 64, 3, 0, 0, 0, 0, 0, 0, 0, 4, - 0, 0, 0, 5, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0, 1 - ] - ); - let mut y: CommonItemProperties = unsafe { std::mem::zeroed() }; - peek_from(&mut y, &bytes); - println!("{:?}", y); - assert_eq!(x, y); -} diff --git a/third_party/rust/peek-poke/src/euclid.rs b/third_party/rust/peek-poke/src/euclid.rs new file mode 100644 index 000000000000..44b0ed27e730 --- /dev/null +++ b/third_party/rust/peek-poke/src/euclid.rs @@ -0,0 +1,170 @@ +// Copyright 2019 The Servo Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use crate::{Peek, Poke}; +use euclid::{Point2D, Rect, SideOffsets2D, Size2D, Transform3D, Vector2D}; + +unsafe impl Poke for Point2D { + #[inline(always)] + fn max_size() -> usize { + 2 * T::max_size() + } + #[inline(always)] + unsafe fn poke_into(&self, bytes: *mut u8) -> *mut u8 { + let bytes = self.x.poke_into(bytes); + let bytes = self.y.poke_into(bytes); + bytes + } +} +impl Peek for Point2D { + #[inline(always)] + unsafe fn peek_from(bytes: *const u8, output: *mut Self) -> *const u8 { + let bytes = T::peek_from(bytes, &mut (*output).x); + let bytes = T::peek_from(bytes, &mut (*output).y); + bytes + } +} + +unsafe impl Poke for Rect { + #[inline(always)] + fn max_size() -> usize { + Point2D::::max_size() + Size2D::::max_size() + } + #[inline(always)] + unsafe fn poke_into(&self, bytes: *mut u8) -> *mut u8 { + let bytes = self.origin.poke_into(bytes); + let bytes = self.size.poke_into(bytes); + bytes + } +} +impl Peek for Rect { + #[inline(always)] + unsafe fn peek_from(bytes: *const u8, output: *mut Self) -> *const u8 { + let bytes = Point2D::::peek_from(bytes, &mut (*output).origin); + let bytes = Size2D::::peek_from(bytes, &mut (*output).size); + bytes + } +} + +unsafe impl Poke for SideOffsets2D { + #[inline(always)] + fn max_size() -> usize { + 4 * T::max_size() + } + #[inline(always)] + unsafe fn poke_into(&self, bytes: *mut u8) -> *mut u8 { + let bytes = self.top.poke_into(bytes); + let bytes = self.right.poke_into(bytes); + let bytes = self.bottom.poke_into(bytes); + let bytes = self.left.poke_into(bytes); + bytes + } +} +impl Peek for SideOffsets2D { + #[inline(always)] + unsafe fn peek_from(bytes: *const u8, output: *mut Self) -> *const u8 { + let bytes = T::peek_from(bytes, &mut (*output).top); + let bytes = T::peek_from(bytes, &mut (*output).right); + let bytes = T::peek_from(bytes, &mut (*output).bottom); + let bytes = T::peek_from(bytes, &mut (*output).left); + bytes + } +} + +unsafe impl Poke for Size2D { + #[inline(always)] + fn max_size() -> usize { + 2 * T::max_size() + } + #[inline(always)] + unsafe fn poke_into(&self, bytes: *mut u8) -> *mut u8 { + let bytes = self.width.poke_into(bytes); + let bytes = self.height.poke_into(bytes); + bytes + } +} +impl Peek for Size2D { + #[inline(always)] + unsafe fn peek_from(bytes: *const u8, output: *mut Self) -> *const u8 { + let bytes = T::peek_from(bytes, &mut (*output).width); + let bytes = T::peek_from(bytes, &mut (*output).height); + bytes + } +} + +unsafe impl Poke for Transform3D { + #[inline(always)] + fn max_size() -> usize { + 16 * T::max_size() + } + #[inline(always)] + unsafe fn poke_into(&self, bytes: *mut u8) -> *mut u8 { + let bytes = self.m11.poke_into(bytes); + let bytes = self.m12.poke_into(bytes); + let bytes = self.m13.poke_into(bytes); + let bytes = self.m14.poke_into(bytes); + let bytes = self.m21.poke_into(bytes); + let bytes = self.m22.poke_into(bytes); + let bytes = self.m23.poke_into(bytes); + let bytes = self.m24.poke_into(bytes); + let bytes = self.m31.poke_into(bytes); + let bytes = self.m32.poke_into(bytes); + let bytes = self.m33.poke_into(bytes); + let bytes = self.m34.poke_into(bytes); + let bytes = self.m41.poke_into(bytes); + let bytes = self.m42.poke_into(bytes); + let bytes = self.m43.poke_into(bytes); + let bytes = self.m44.poke_into(bytes); + bytes + } +} +impl Peek for Transform3D { + #[inline(always)] + unsafe fn peek_from(bytes: *const u8, output: *mut Self) -> *const u8 { + let bytes = T::peek_from(bytes, &mut (*output).m11); + let bytes = T::peek_from(bytes, &mut (*output).m12); + let bytes = T::peek_from(bytes, &mut (*output).m13); + let bytes = T::peek_from(bytes, &mut (*output).m14); + let bytes = T::peek_from(bytes, &mut (*output).m21); + let bytes = T::peek_from(bytes, &mut (*output).m22); + let bytes = T::peek_from(bytes, &mut (*output).m23); + let bytes = T::peek_from(bytes, &mut (*output).m24); + let bytes = T::peek_from(bytes, &mut (*output).m31); + let bytes = T::peek_from(bytes, &mut (*output).m32); + let bytes = T::peek_from(bytes, &mut (*output).m33); + let bytes = T::peek_from(bytes, &mut (*output).m34); + let bytes = T::peek_from(bytes, &mut (*output).m41); + let bytes = T::peek_from(bytes, &mut (*output).m42); + let bytes = T::peek_from(bytes, &mut (*output).m43); + let bytes = T::peek_from(bytes, &mut (*output).m44); + bytes + } +} + +unsafe impl Poke for Vector2D { + #[inline(always)] + fn max_size() -> usize { + 2 * T::max_size() + } + #[inline(always)] + unsafe fn poke_into(&self, bytes: *mut u8) -> *mut u8 { + let bytes = self.x.poke_into(bytes); + let bytes = self.y.poke_into(bytes); + bytes + } +} +impl Peek for Vector2D { + #[inline(always)] + unsafe fn peek_from(bytes: *const u8, output: *mut Self) -> *const u8 { + let bytes = T::peek_from(bytes, &mut (*output).x); + let bytes = T::peek_from(bytes, &mut (*output).y); + bytes + } +} diff --git a/third_party/rust/peek-poke/src/lib.rs b/third_party/rust/peek-poke/src/lib.rs index 0f89e32fb6d1..55864599bbd3 100644 --- a/third_party/rust/peek-poke/src/lib.rs +++ b/third_party/rust/peek-poke/src/lib.rs @@ -1,3 +1,13 @@ +// Copyright 2019 The Servo Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + //! Fast binary serialization and deserialization for types with a known maximum size. //! //! ## Binary Encoding Scheme @@ -6,37 +16,118 @@ //! //! ## Comparison to bincode -#![no_std] - #[cfg(feature = "derive")] pub use peek_poke_derive::*; -use core::{ - marker::PhantomData, - mem::{size_of, transmute}, -}; +use core::{marker::PhantomData, mem::size_of, slice}; +use crate::{slice_ext::*, vec_ext::*}; -#[cfg(feature = "option_copy")] -use core::mem::uninitialized; +mod slice_ext; +mod vec_ext; -// Helper to copy a slice of bytes `bytes` into a buffer of bytes pointed to by -// `dest`. -#[inline(always)] -fn copy_bytes_to(bytes: &[u8], dest: *mut u8) -> *mut u8 { +union MaybeUninitShim { + uninit: (), + init: T, +} + +/// Peek helper for constructing a `T` by `Copy`ing into an uninitialized stack +/// allocation. +pub unsafe fn peek_from_uninit(bytes: *const u8) -> (T, *const u8) { + let mut val = MaybeUninitShim { uninit: () }; + let bytes = ::peek_from(bytes, &mut val.init); + (val.init, bytes) +} + +/// Peek helper for constructing a `T` by `Default` initialized stack +/// allocation. +pub unsafe fn peek_from_default(bytes: *const u8) -> (T, *const u8) { + let mut val = T::default(); + let bytes = ::peek_from(bytes, &mut val); + (val, bytes) +} + +/// Peek inplace a `T` from a slice of bytes, returning a slice of the remaining +/// bytes. `src` must contain at least `T::max_size()` bytes. +/// +/// [`ensure_red_zone`] can be used to add required padding. +pub fn peek_from_slice<'a, T: Peek>(src: &'a [u8], dst: &mut T) -> &'a [u8] { unsafe { - bytes.as_ptr().copy_to_nonoverlapping(dest, bytes.len()); - dest.add(bytes.len()) + // If src.len() == T::max_size() then src is at the start of the red-zone. + assert!(T::max_size() < src.len(), "WRDL: unexpected end of display list"); + let end_ptr = T::peek_from(src.as_ptr(), dst); + let len = end_ptr as usize - src.as_ptr() as usize; + // Did someone break the T::peek_from() can't read more than T::max_size() + // bytes contract? + assert!(len <= src.len(), "WRDL: Peek::max_size was wrong"); + slice::from_raw_parts(end_ptr, src.len() - len) } } -#[inline(always)] -fn copy_to_slice(src: *const u8, slice: &mut [u8]) -> *const u8 { +/// Poke helper to insert a serialized version of `src` at the beginning for `dst`. +pub fn poke_inplace_slice(src: &T, dst: &mut [u8]) { + assert!(T::max_size() <= dst.len(), "WRDL: buffer too small to write into"); unsafe { - src.copy_to_nonoverlapping(slice.as_mut_ptr(), slice.len()); - src.add(slice.len()) + src.poke_into(dst.as_mut_ptr()); } } +/// Poke helper to append a serialized version of `src` to the end of `dst`. +pub fn poke_into_vec(src: &T, dst: &mut Vec) { + dst.reserve(T::max_size()); + unsafe { + let ptr = dst.as_end_mut_ptr(); + let end_ptr = src.poke_into(ptr); + dst.set_end_ptr(end_ptr); + } +} + +// TODO: Is returning the len of the iterator of any practical use? +pub fn poke_extend_vec(src: I, dst: &mut Vec) -> usize +where + I: ExactSizeIterator, + I::Item: Poke, +{ + let len = src.len(); + let max_size = len * I::Item::max_size(); + dst.reserve(max_size); + unsafe { + let ptr = dst.as_end_mut_ptr(); + // Guard against the possibility of a misbehaved implementation of + // ExactSizeIterator by writing at most `len` items. + let end_ptr = src.take(len).fold(ptr, |ptr, item| item.poke_into(ptr)); + dst.set_end_ptr(end_ptr); + } + + len +} + +/// Add `T::max_size()` "red zone" (padding of zeroes) to the end of the vec of +/// `bytes`. This allows deserialization to assert that at least `T::max_size()` +/// bytes exist at all times. +pub fn ensure_red_zone(bytes: &mut Vec) { + bytes.reserve(T::max_size()); + unsafe { + let end_ptr = bytes.as_end_mut_ptr(); + end_ptr.write_bytes(0, T::max_size()); + bytes.set_end_ptr(end_ptr.add(T::max_size())); + } +} + +#[inline] +unsafe fn read_verbatim(src: *const u8, dst: *mut T) -> *const u8 { + *dst = (src as *const T).read_unaligned(); + src.add(size_of::()) +} + +#[inline] +unsafe fn write_verbatim(src: T, dst: *mut u8) -> *mut u8 { + (dst as *mut T).write_unaligned(src); + dst.add(size_of::()) +} + +#[cfg(feature = "extras")] +mod euclid; + /// A trait for values that provide serialization into buffers of bytes. /// /// # Example @@ -143,7 +234,7 @@ pub trait Peek: Poke { /// /// * `bytes` must pointer to at least the number of bytes returned by /// `Poke::max_size()`. - unsafe fn peek_from(&mut self, bytes: *const u8) -> *const u8; + unsafe fn peek_from(bytes: *const u8, output: *mut Self) -> *const u8; } macro_rules! impl_poke_for_deref { @@ -163,13 +254,6 @@ macro_rules! impl_poke_for_deref { impl_poke_for_deref!(<'a, T: Poke> Poke for &'a T); impl_poke_for_deref!(<'a, T: Poke> Poke for &'a mut T); -impl<'a, T: Peek> Peek for &'a mut T { - #[inline(always)] - unsafe fn peek_from(&mut self, bytes: *const u8) -> *const u8 { - (**self).peek_from(bytes) - } -} - macro_rules! impl_for_primitive { ($($ty:ty)+) => { $(unsafe impl Poke for $ty { @@ -179,15 +263,13 @@ macro_rules! impl_for_primitive { } #[inline(always)] unsafe fn poke_into(&self, bytes: *mut u8) -> *mut u8 { - let int_bytes = transmute::<_, &[u8; size_of::<$ty>()]>(self); - copy_bytes_to(int_bytes, bytes) + write_verbatim(*self, bytes) } } impl Peek for $ty { #[inline(always)] - unsafe fn peek_from(&mut self, bytes: *const u8) -> *const u8 { - let int_bytes = transmute::<_, &mut [u8; size_of::<$ty>()]>(self); - copy_to_slice(bytes, int_bytes) + unsafe fn peek_from(bytes: *const u8, output: *mut Self) -> *const u8 { + read_verbatim(bytes, output) } })+ }; @@ -202,19 +284,20 @@ impl_for_primitive! { unsafe impl Poke for bool { #[inline(always)] fn max_size() -> usize { - ::max_size() + u8::max_size() } #[inline] unsafe fn poke_into(&self, bytes: *mut u8) -> *mut u8 { (*self as u8).poke_into(bytes) } } + impl Peek for bool { #[inline] - unsafe fn peek_from(&mut self, bytes: *const u8) -> *const u8 { + unsafe fn peek_from(bytes: *const u8, output: *mut Self) -> *const u8 { let mut int_bool = 0u8; - let ptr = int_bool.peek_from(bytes); - *self = int_bool != 0; + let ptr = ::peek_from(bytes, &mut int_bool); + *output = int_bool != 0; ptr } } @@ -229,10 +312,11 @@ unsafe impl Poke for PhantomData { bytes } } + impl Peek for PhantomData { #[inline(always)] - unsafe fn peek_from(&mut self, bytes: *const u8) -> *const u8 { - *self = PhantomData; + unsafe fn peek_from(bytes: *const u8, output: *mut Self) -> *const u8 { + *output = PhantomData; bytes } } @@ -240,8 +324,9 @@ impl Peek for PhantomData { unsafe impl Poke for Option { #[inline(always)] fn max_size() -> usize { - ::max_size() + ::max_size() + u8::max_size() + T::max_size() } + #[inline] unsafe fn poke_into(&self, bytes: *mut u8) -> *mut u8 { match self { @@ -255,43 +340,18 @@ unsafe impl Poke for Option { } } -#[cfg(feature = "option_copy")] -impl Peek for Option { - #[inline] - unsafe fn peek_from(&mut self, bytes: *const u8) -> *const u8 { - let mut variant = 0u8; - let bytes = variant.peek_from(bytes); - match variant { - 0 => { - *self = None; - bytes - } - 1 => { - let mut __0: T = uninitialized(); - let bytes = __0.peek_from(bytes); - *self = Some(__0); - bytes - } - _ => unreachable!(), - } - } -} - -#[cfg(feature = "option_default")] impl Peek for Option { #[inline] - unsafe fn peek_from(&mut self, bytes: *const u8) -> *const u8 { - let mut variant = 0u8; - let bytes = variant.peek_from(bytes); + unsafe fn peek_from(bytes: *const u8, output: *mut Self) -> *const u8 { + let (variant, bytes) = peek_from_default::(bytes); match variant { 0 => { - *self = None; + *output = None; bytes } 1 => { - let mut __0 = T::default(); - let bytes = __0.peek_from(bytes); - *self = Some(__0); + let (val, bytes) = peek_from_default(bytes); + *output = Some(val); bytes } _ => unreachable!(), @@ -303,15 +363,15 @@ macro_rules! impl_for_arrays { ($($len:tt)+) => { $(unsafe impl Poke for [T; $len] { fn max_size() -> usize { - $len * ::max_size() + $len * T::max_size() } unsafe fn poke_into(&self, bytes: *mut u8) -> *mut u8 { self.iter().fold(bytes, |bytes, e| e.poke_into(bytes)) } } impl Peek for [T; $len] { - unsafe fn peek_from(&mut self, bytes: *const u8) -> *const u8 { - self.iter_mut().fold(bytes, |bytes, e| e.peek_from(bytes)) + unsafe fn peek_from(bytes: *const u8, output: *mut Self) -> *const u8 { + (&mut *output).iter_mut().fold(bytes, |bytes, e| ::peek_from(bytes, e)) } })+ } @@ -333,8 +393,8 @@ unsafe impl Poke for () { } } impl Peek for () { - unsafe fn peek_from(&mut self, bytes: *const u8) -> *const u8 { - *self = (); + unsafe fn peek_from(bytes: *const u8, output: *mut Self) -> *const u8 { + *output = (); bytes } } @@ -352,8 +412,8 @@ macro_rules! impl_for_tuple { } } impl<$($ty: Peek),+> Peek for ($($ty,)+) { - unsafe fn peek_from(&mut self, bytes: *const u8) -> *const u8 { - $(let bytes = self.$n.peek_from(bytes);)+ + unsafe fn peek_from(bytes: *const u8, output: *mut Self) -> *const u8 { + $(let bytes = $ty::peek_from(bytes, &mut (*output).$n);)+ bytes } } diff --git a/third_party/rust/peek-poke/src/slice_ext.rs b/third_party/rust/peek-poke/src/slice_ext.rs new file mode 100644 index 000000000000..f309d2f74102 --- /dev/null +++ b/third_party/rust/peek-poke/src/slice_ext.rs @@ -0,0 +1,19 @@ +// Copyright 2019 The Servo Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub trait AsEndMutPtr { + fn as_end_mut_ptr(self) -> *mut T; +} + +impl<'a> AsEndMutPtr for &'a mut [u8] { + fn as_end_mut_ptr(self) -> *mut u8 { + unsafe { self.as_mut_ptr().add(self.len()) } + } +} diff --git a/third_party/rust/peek-poke/src/vec_ext.rs b/third_party/rust/peek-poke/src/vec_ext.rs new file mode 100644 index 000000000000..42e26032e52a --- /dev/null +++ b/third_party/rust/peek-poke/src/vec_ext.rs @@ -0,0 +1,26 @@ +// Copyright 2019 The Servo Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::vec::Vec; + +pub trait VecExt { + type Item; + unsafe fn set_end_ptr(&mut self, end: *const Self::Item); +} + +impl VecExt for Vec { + type Item = T; + unsafe fn set_end_ptr(&mut self, end: *const T) { + assert!(end as usize >= self.as_ptr() as usize); + let new_len = end as usize - self.as_ptr() as usize; + assert!(new_len <= self.capacity()); + self.set_len(new_len); + } +} diff --git a/third_party/rust/peek-poke/tests/max_size.rs b/third_party/rust/peek-poke/tests/max_size.rs index 080456ac111f..5f9f9ca52efc 100644 --- a/third_party/rust/peek-poke/tests/max_size.rs +++ b/third_party/rust/peek-poke/tests/max_size.rs @@ -1,5 +1,16 @@ +// Copyright 2019 The Servo Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + #![allow(dead_code)] -use peek_poke::{PeekPoke, Poke, PeekCopy}; + +use peek_poke::{PeekPoke, Poke}; use std::{marker::PhantomData, mem::size_of}; #[test] @@ -60,7 +71,7 @@ fn test_basic_struct() { #[test] fn test_enum() { - #[derive(Clone, Copy, PeekCopy, Poke)] + #[derive(Clone, Copy, PeekPoke)] enum TestEnum { NoArg, OneArg(usize), @@ -77,7 +88,7 @@ fn test_enum() { #[test] fn test_enum_cstyle() { #[repr(u32)] - #[derive(Clone, Copy, PeekCopy, Poke)] + #[derive(Clone, Copy, PeekPoke)] enum BorderStyle { None = 0, Solid = 1, diff --git a/third_party/rust/peek-poke/tests/round_trip.rs b/third_party/rust/peek-poke/tests/round_trip.rs index 4996eb1feae7..3134c207ec4d 100644 --- a/third_party/rust/peek-poke/tests/round_trip.rs +++ b/third_party/rust/peek-poke/tests/round_trip.rs @@ -1,4 +1,14 @@ -use peek_poke::{Peek, PeekPoke, Poke,PeekCopy}; +// Copyright 2019 The Servo Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use peek_poke::{Peek, PeekPoke, Poke}; use std::{fmt::Debug, marker::PhantomData}; fn poke_into(a: &V) -> Vec { @@ -18,8 +28,7 @@ where V: Debug + Default + PartialEq + Peek + Poke, { let v = poke_into(&a); - let mut b = V::default(); - let end_ptr = unsafe { b.peek_from(v.as_ptr()) }; + let (b, end_ptr) = unsafe { peek_poke::peek_from_default(v.as_ptr()) }; let size = end_ptr as usize - v.as_ptr() as usize; assert_eq!(size, v.len()); assert_eq!(a, b); @@ -89,7 +98,7 @@ fn test_fixed_size_array() { #[test] fn test_tuple() { - the_same((1isize,)); + the_same((1isize, )); the_same((1isize, 2isize, 3isize)); the_same((1isize, ())); } @@ -116,7 +125,7 @@ fn test_basic_struct() { #[test] fn test_enum() { - #[derive(Clone, Copy, Debug, PartialEq, PeekCopy, Poke)] + #[derive(Clone, Copy, Debug, PartialEq, PeekPoke)] enum TestEnum { NoArg, OneArg(usize), @@ -141,7 +150,7 @@ fn test_enum() { #[test] fn test_enum_cstyle() { #[repr(u32)] - #[derive(Clone, Copy, Debug, PartialEq, Eq, PeekCopy, Poke)] + #[derive(Clone, Copy, Debug, PartialEq, Eq, PeekPoke)] enum BorderStyle { None = 0, Solid = 1, @@ -199,6 +208,27 @@ fn test_generic() { the_same(Foo { x: 19.0, y: 42.0 }); } +#[test] +fn test_generic_enum() { + #[derive(Clone, Copy, Debug, Default, PartialEq, PeekPoke)] + pub struct PropertyBindingKey { + pub id: usize, + _phantom: PhantomData, + } + + #[derive(Clone, Copy, Debug, PartialEq, PeekPoke)] + pub enum PropertyBinding { + Value(T), + Binding(PropertyBindingKey, T), + } + + impl Default for PropertyBinding { + fn default() -> Self { + PropertyBinding::Value(Default::default()) + } + } +} + #[cfg(all(feature = "extras", feature = "option_copy"))] mod extra_tests { use super::*; diff --git a/third_party/rust/relevant/.cargo-checksum.json b/third_party/rust/relevant/.cargo-checksum.json deleted file mode 100644 index 05021dc81d96..000000000000 --- a/third_party/rust/relevant/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{"Cargo.toml":"5e5011954bc26ef3ddc850942128938e98df4c8271abe61c0ea5f1f0a17fc0a6","LICENSE-APACHE":"7cfd738c53d61c79f07e348f622bf7707c9084237054d37fbe07788a75f5881c","LICENSE-MIT":"9507d46994231e6272fcaca81c5af179e32f1522c775eaaec9938015b645ae99","README.md":"8da07788d7b67b67e2eee64989761b87559426f66da0e54d408f769c588cf0f3","src/lib.rs":"7fc604f517c72d7fed503c935a0eb3cc9d7e7d00cfb3a7aacfeabaaa611fe35f"},"package":"bbc232e13d37f4547f5b9b42a5efc380cabe5dbc1807f8b893580640b2ab0308"} \ No newline at end of file diff --git a/third_party/rust/relevant/Cargo.toml b/third_party/rust/relevant/Cargo.toml deleted file mode 100644 index bcd08c7d8ee1..000000000000 --- a/third_party/rust/relevant/Cargo.toml +++ /dev/null @@ -1,42 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - -[package] -name = "relevant" -version = "0.4.2" -authors = ["omni-viral "] -description = "A small utility type to emulate must-use types" -license = "MIT/Apache-2.0" -repository = "https://github.com/omni-viral/relevant.git" -[package.metadata.docs.rs] -features = ["backtrace", "log", "serde-1"] -[dependencies.backtrace] -version = "0.3.13" -optional = true - -[dependencies.cfg-if] -version = "0.1" - -[dependencies.log] -version = "0.4" -optional = true - -[dependencies.serde] -version = "1.0" -features = ["derive"] -optional = true - -[features] -default = ["std"] -panic = [] -serde-1 = ["serde"] -std = [] diff --git a/third_party/rust/relevant/LICENSE-APACHE b/third_party/rust/relevant/LICENSE-APACHE deleted file mode 100644 index f47c9411414e..000000000000 --- a/third_party/rust/relevant/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/third_party/rust/relevant/LICENSE-MIT b/third_party/rust/relevant/LICENSE-MIT deleted file mode 100644 index 4a4762e607f0..000000000000 --- a/third_party/rust/relevant/LICENSE-MIT +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2016 The Amethyst Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/relevant/README.md b/third_party/rust/relevant/README.md deleted file mode 100644 index c05ab42f90cb..000000000000 --- a/third_party/rust/relevant/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# Relevant - -A small utility type to emulate must-use types. -They are different from `#[must_use]` attribute in that the one who have an instance must either send it somewhere else or `dispose` it manually. - -This must be desired for types that need manual destruction which can't be implemented with `Drop` trait. -For example resource handler created from some source and that must be returned to the same source. - -## Usage - -The type `Relevant` is non-droppable. As limitation of current implementation it panics when dropped. -To make type non-droppable it must contain non-droppable type (`Relevant` type for example). - -### Example - -```rust - -struct SourceOfFoos { - handle: u64, -} - -/// Foo must be destroyed manually. -struct Foo { - handle: u64, - relevant: Relevant, -} - -/// Function from C library to create `Foo` -/// Access to same source must be synchronized. -extern "C" create_foo(source: u64) -> u64; - -/// Function from C library to destroy `Foo`. -/// Access to same source must be synchronized. -extern "C" destroy_foo(source: u64, foo: u64) -> u64; - -impl SourceOfFoos { - fn create_foo(&mut self) -> Foo { - Foo { - handle: create_foo(self.handle), - relevant: Relevant, - } - } - - fn destroy_foo(&mut self, foo: Foo) { - destroy_foo(self.handle, foo.handle); - foo.relevant.dispose(); - } -} - -``` - -Now it is not possible to accidentally drop `Foo` and leak handle. -Of course it always possible to explicitly `std::mem::forget` relevant type. -But it will be deliberate leak. diff --git a/third_party/rust/relevant/src/lib.rs b/third_party/rust/relevant/src/lib.rs deleted file mode 100644 index 8319d73de221..000000000000 --- a/third_party/rust/relevant/src/lib.rs +++ /dev/null @@ -1,89 +0,0 @@ -//! Defines `Relevant` type to use in types that requires -//! custom destruction. -//! Crate supports 3 main mechnisms for error reporting: -//! * "panic" feature makes `Relevant` panic on drop -//! * "log" feature uses `log` crate and `Relevant` will emit `log::error!` on drop. -//! * otherwise `Relevant` will print into stderr using `eprintln!` on drop. -//! -//! "backtrace" feature will add backtrace to the error unless it is reported via panicking. -//! "message" feature will add custom message (specified when value was created) to the error. -//! - -#![cfg_attr(not(feature = "std"), no_std)] - -#[cfg(not(feature = "std"))] -use core as std; - -/// Values of this type can't be automatically dropped. -/// If struct or enum has field with type `Relevant`, -/// it can't be automatically dropped either. And so considered relevant too. -/// User has to deconstruct such values and call `Relevant::dispose`. -/// If relevant field is private it means that user has to move value into some public method. -/// -/// # Panics -/// -/// With "panic" feature enabled this value will always panic on drop. -/// -#[derive(Clone, Debug, PartialOrd, PartialEq, Ord, Eq, Hash)] -#[cfg_attr(feature = "serde-1", derive(serde::Serialize, serde::Deserialize))] -pub struct Relevant; - -impl Relevant { - /// Dispose this value. - pub fn dispose(self) { - std::mem::forget(self) - } -} - -impl Drop for Relevant { - fn drop(&mut self) { - dropped() - } -} - -cfg_if::cfg_if! { - if #[cfg(feature = "panic")] { - macro_rules! sink { - ($($x:tt)*) => { panic!($($x)*) }; - } - } else if #[cfg(feature = "log")] { - macro_rules! sink { - ($($x:tt)*) => { log::error!($($x)*) }; - } - } else if #[cfg(feature = "std")] { - macro_rules! sink { - ($($x:tt)*) => { eprintln!($($x)*) }; - } - } else { - macro_rules! sink { - ($($x:tt)*) => { panic!($($x)*) }; - } - } -} - -cfg_if::cfg_if! { - if #[cfg(all(not(feature = "panic"), any(feature = "std", feature = "log"), feature = "backtrace"))] { - fn whine() { - let backtrace = backtrace::Backtrace::new(); - sink!("Values of this type can't be dropped!. Trace: {:#?}", backtrace) - } - } else { - fn whine() { - sink!("Values of this type can't be dropped!") - } - } -} - -cfg_if::cfg_if! { - if #[cfg(feature = "std")] { - fn dropped() { - if !std::thread::panicking() { - whine() - } - } - } else { - fn dropped() { - whine() - } - } -} \ No newline at end of file diff --git a/third_party/rust/rendy-descriptor/.cargo-checksum.json b/third_party/rust/rendy-descriptor/.cargo-checksum.json deleted file mode 100644 index 22aa539f909c..000000000000 --- a/third_party/rust/rendy-descriptor/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{"Cargo.toml":"ee74a4961bdffd03f8e3d7a218fc8d94151bb5ebb688c5ed9cab343bf35d0fa1","src/allocator.rs":"5f55db009d1d12cc7257e4668f39f070641a79d0123221b836191faae7e486d1","src/lib.rs":"9ca8109e174f350fc3113480315d2d43ed4f2f9f30d887c6806de4577351a6c7","src/ranges.rs":"206ed3dfdc7167b3ada0420773ba6052aaecefbf9d31b31e53e40024c458833b"},"package":"f475bcc0505946e998590f1f0545c52ef4b559174a1b353a7ce6638def8b621e"} \ No newline at end of file diff --git a/third_party/rust/rendy-descriptor/src/allocator.rs b/third_party/rust/rendy-descriptor/src/allocator.rs deleted file mode 100644 index 978276cc1cf6..000000000000 --- a/third_party/rust/rendy-descriptor/src/allocator.rs +++ /dev/null @@ -1,398 +0,0 @@ -use { - crate::ranges::*, - gfx_hal::{ - device::{Device, OutOfMemory}, - pso::{AllocationError, DescriptorPool as _, DescriptorPoolCreateFlags}, - Backend, - }, - smallvec::{smallvec, SmallVec}, - std::{ - collections::{HashMap, VecDeque}, - ops::Deref, - }, -}; - -const MIN_SETS: u32 = 64; -const MAX_SETS: u32 = 512; - -/// Descriptor set from allocator. -#[derive(Debug)] -pub struct DescriptorSet { - raw: B::DescriptorSet, - pool: u64, - ranges: DescriptorRanges, -} - -impl DescriptorSet -where - B: Backend, -{ - /// Get raw set - pub fn raw(&self) -> &B::DescriptorSet { - &self.raw - } - - /// Get raw set - /// It must not be replaced. - pub unsafe fn raw_mut(&mut self) -> &mut B::DescriptorSet { - &mut self.raw - } -} - -impl Deref for DescriptorSet -where - B: Backend, -{ - type Target = B::DescriptorSet; - - fn deref(&self) -> &B::DescriptorSet { - &self.raw - } -} - -#[derive(Debug)] -struct Allocation { - sets: SmallVec<[B::DescriptorSet; 1]>, - pools: Vec, -} - -#[derive(Debug)] -struct DescriptorPool { - raw: B::DescriptorPool, - size: u32, - - // Number of free sets left. - free: u32, - - // Number of sets freed (they can't be reused until gfx-hal 0.2) - freed: u32, -} - -unsafe fn allocate_from_pool( - raw: &mut B::DescriptorPool, - layout: &B::DescriptorSetLayout, - count: u32, - allocation: &mut SmallVec<[B::DescriptorSet; 1]>, -) -> Result<(), OutOfMemory> { - let sets_were = allocation.len(); - raw.allocate_sets(std::iter::repeat(layout).take(count as usize), allocation) - .map_err(|err| match err { - AllocationError::Host => OutOfMemory::Host, - AllocationError::Device => OutOfMemory::Device, - err => { - // We check pool for free descriptors and sets before calling this function, - // so it can't be exhausted. - // And it can't be fragmented either according to spec - // - // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/html/vkspec.html#VkDescriptorPoolCreateInfo - // - // """ - // Additionally, if all sets allocated from the pool since it was created or most recently reset - // use the same number of descriptors (of each type) and the requested allocation also - // uses that same number of descriptors (of each type), then fragmentation must not cause an allocation failure - // """ - panic!("Unexpected error: {:?}", err); - } - })?; - assert_eq!(allocation.len(), sets_were + count as usize); - Ok(()) -} - -#[derive(Debug)] -struct DescriptorBucket { - pools_offset: u64, - pools: VecDeque>, - total: u64, -} - -impl DescriptorBucket -where - B: Backend, -{ - fn new() -> Self { - DescriptorBucket { - pools_offset: 0, - pools: VecDeque::new(), - total: 0, - } - } - - fn new_pool_size(&self, count: u32) -> u32 { - MIN_SETS // at least MIN_SETS - .max(count) // at least enough for allocation - .max(self.total.min(MAX_SETS as u64) as u32) // at least as much as was allocated so far capped to MAX_SETS - .next_power_of_two() // rounded up to nearest 2^N - } - - unsafe fn dispose(mut self, device: &B::Device) { - if self.total > 0 { - log::error!("Not all descriptor sets were deallocated"); - } - - while let Some(pool) = self.pools.pop_front() { - assert!(pool.freed + pool.free <= pool.size); - if pool.freed + pool.free < pool.size { - log::error!( - "Descriptor pool is still in use during allocator disposal. {:?}", - pool - ); - } else { - log::trace!("Destroying used up descriptor pool"); - device.destroy_descriptor_pool(pool.raw); - self.pools_offset += 1; - } - } - - self.pools - .drain(..) - .for_each(|pool| device.destroy_descriptor_pool(pool.raw)); - } - - unsafe fn allocate( - &mut self, - device: &B::Device, - layout: &B::DescriptorSetLayout, - layout_ranges: DescriptorRanges, - mut count: u32, - allocation: &mut Allocation, - ) -> Result<(), OutOfMemory> { - if count == 0 { - return Ok(()); - } - - for (index, pool) in self.pools.iter_mut().enumerate().rev() { - if pool.free == 0 { - continue; - } - - let allocate = pool.free.min(count); - log::trace!("Allocate {} from exising pool", allocate); - allocate_from_pool::(&mut pool.raw, layout, allocate, &mut allocation.sets)?; - allocation.pools.extend( - std::iter::repeat(index as u64 + self.pools_offset).take(allocate as usize), - ); - count -= allocate; - pool.free -= allocate; - self.total += allocate as u64; - - if count == 0 { - return Ok(()); - } - } - - while count > 0 { - let size = self.new_pool_size(count); - let pool_ranges = layout_ranges * size; - log::trace!( - "Create new pool with {} sets and {:?} descriptors", - size, - pool_ranges, - ); - let raw = device.create_descriptor_pool( - size as usize, - &pool_ranges, - DescriptorPoolCreateFlags::empty(), - )?; - let allocate = size.min(count); - - self.pools.push_back(DescriptorPool { - raw, - size, - free: size, - freed: 0, - }); - let index = self.pools.len() - 1; - let pool = self.pools.back_mut().unwrap(); - - allocate_from_pool::(&mut pool.raw, layout, allocate, &mut allocation.sets)?; - allocation.pools.extend( - std::iter::repeat(index as u64 + self.pools_offset).take(allocate as usize), - ); - - count -= allocate; - pool.free -= allocate; - self.total += allocate as u64; - } - - Ok(()) - } - - unsafe fn free(&mut self, sets: impl IntoIterator, pool: u64) { - let pool = &mut self.pools[(pool - self.pools_offset) as usize]; - let freed = sets.into_iter().count() as u32; - pool.freed += freed; - self.total -= freed as u64; - log::trace!("Freed {} from descriptor bucket", freed); - } - - unsafe fn cleanup(&mut self, device: &B::Device) { - while let Some(pool) = self.pools.pop_front() { - if pool.freed < pool.size { - self.pools.push_front(pool); - break; - } - log::trace!("Destroying used up descriptor pool"); - device.destroy_descriptor_pool(pool.raw); - self.pools_offset += 1; - } - } -} - -/// Descriptor allocator. -/// Can be used to allocate descriptor sets for any layout. -#[derive(Debug)] -pub struct DescriptorAllocator { - buckets: HashMap>, - allocation: Allocation, - relevant: relevant::Relevant, - total: u64, -} - -impl DescriptorAllocator -where - B: Backend, -{ - /// Create new allocator instance. - pub fn new() -> Self { - DescriptorAllocator { - buckets: HashMap::new(), - allocation: Allocation { - sets: SmallVec::new(), - pools: Vec::new(), - }, - relevant: relevant::Relevant, - total: 0, - } - } - - /// Destroy allocator instance. - /// All sets allocated from this allocator become invalid. - pub unsafe fn dispose(mut self, device: &B::Device) { - self.buckets - .drain() - .for_each(|(_, bucket)| bucket.dispose(device)); - self.relevant.dispose(); - } - - /// Allocate descriptor set with specified layout. - /// `DescriptorRanges` must match descriptor numbers of the layout. - /// `DescriptorRanges` can be constructed [from bindings] that were used - /// to create layout instance. - /// - /// [from bindings]: . - pub unsafe fn allocate( - &mut self, - device: &B::Device, - layout: &B::DescriptorSetLayout, - layout_ranges: DescriptorRanges, - count: u32, - extend: &mut impl Extend>, - ) -> Result<(), OutOfMemory> { - if count == 0 { - return Ok(()); - } - - log::trace!( - "Allocating {} sets with layout {:?} @ {:?}", - count, - layout, - layout_ranges - ); - - let bucket = self - .buckets - .entry(layout_ranges) - .or_insert_with(|| DescriptorBucket::new()); - match bucket.allocate(device, layout, layout_ranges, count, &mut self.allocation) { - Ok(()) => { - extend.extend( - Iterator::zip( - self.allocation.pools.drain(..), - self.allocation.sets.drain(), - ) - .map(|(pool, set)| DescriptorSet { - raw: set, - ranges: layout_ranges, - pool, - }), - ); - Ok(()) - } - Err(err) => { - // Free sets allocated so far. - let mut last = None; - for (index, pool) in self.allocation.pools.drain(..).enumerate().rev() { - match last { - Some(last) if last == pool => { - // same pool, continue - } - Some(last) => { - let sets = &mut self.allocation.sets; - // Free contiguous range of sets from one pool in one go. - bucket.free((index + 1..sets.len()).map(|_| sets.pop().unwrap()), last); - } - None => last = Some(pool), - } - } - - if let Some(last) = last { - bucket.free(self.allocation.sets.drain(), last); - } - - Err(err) - } - } - } - - /// Free descriptor sets. - /// - /// # Safety - /// - /// None of descriptor sets can be referenced in any pending command buffers. - /// All command buffers where at least one of descriptor sets referenced - /// move to invalid state. - pub unsafe fn free(&mut self, all_sets: impl IntoIterator>) { - let mut free: Option<(DescriptorRanges, u64, SmallVec<[B::DescriptorSet; 32]>)> = None; - - // Collect contig - for set in all_sets { - match &mut free { - slot @ None => { - slot.replace((set.ranges, set.pool, smallvec![set.raw])); - } - Some((ranges, pool, raw_sets)) if *ranges == set.ranges && *pool == set.pool => { - raw_sets.push(set.raw); - } - Some((ranges, pool, raw_sets)) => { - let bucket = self - .buckets - .get_mut(ranges) - .expect("Set should be allocated from this allocator"); - debug_assert!(bucket.total >= raw_sets.len() as u64); - - bucket.free(raw_sets.drain(), *pool); - *pool = set.pool; - *ranges = set.ranges; - raw_sets.push(set.raw); - } - } - } - - if let Some((ranges, pool, raw_sets)) = free { - let bucket = self - .buckets - .get_mut(&ranges) - .expect("Set should be allocated from this allocator"); - debug_assert!(bucket.total >= raw_sets.len() as u64); - - bucket.free(raw_sets, pool); - } - } - - /// Perform cleanup to allow resources reuse. - pub unsafe fn cleanup(&mut self, device: &B::Device) { - self.buckets - .values_mut() - .for_each(|bucket| bucket.cleanup(device)); - } -} diff --git a/third_party/rust/rendy-descriptor/src/lib.rs b/third_party/rust/rendy-descriptor/src/lib.rs deleted file mode 100644 index 18d5e0e51975..000000000000 --- a/third_party/rust/rendy-descriptor/src/lib.rs +++ /dev/null @@ -1,4 +0,0 @@ -mod allocator; -mod ranges; - -pub use {allocator::*, ranges::*}; diff --git a/third_party/rust/rendy-descriptor/src/ranges.rs b/third_party/rust/rendy-descriptor/src/ranges.rs deleted file mode 100644 index a39d1e6c81b5..000000000000 --- a/third_party/rust/rendy-descriptor/src/ranges.rs +++ /dev/null @@ -1,187 +0,0 @@ -use std::{ - cmp::Ordering, - ops::{Add, AddAssign, Mul, MulAssign, Sub, SubAssign}, -}; - -pub use gfx_hal::pso::{DescriptorRangeDesc, DescriptorSetLayoutBinding, DescriptorType}; - -const DESCRIPTOR_TYPES_COUNT: usize = 11; - -const DESCRIPTOR_TYPES: [DescriptorType; DESCRIPTOR_TYPES_COUNT] = [ - DescriptorType::Sampler, - DescriptorType::CombinedImageSampler, - DescriptorType::SampledImage, - DescriptorType::StorageImage, - DescriptorType::UniformTexelBuffer, - DescriptorType::StorageTexelBuffer, - DescriptorType::UniformBuffer, - DescriptorType::StorageBuffer, - DescriptorType::UniformBufferDynamic, - DescriptorType::StorageBufferDynamic, - DescriptorType::InputAttachment, -]; - -/// Number of descriptors per type. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] -pub struct DescriptorRanges { - counts: [u32; DESCRIPTOR_TYPES_COUNT], -} - -impl DescriptorRanges { - /// Create new instance without descriptors. - pub fn zero() -> Self { - DescriptorRanges { - counts: [0; DESCRIPTOR_TYPES_COUNT], - } - } - - /// Add a single layout binding. - /// Useful when created with `DescriptorRanges::zero()`. - pub fn add_binding(&mut self, binding: DescriptorSetLayoutBinding) { - self.counts[binding.ty as usize] += binding.count as u32; - } - - /// Iterate through ranges yelding - /// descriptor types and their amount. - pub fn iter(&self) -> DescriptorRangesIter<'_> { - DescriptorRangesIter { - counts: &self.counts, - index: 0, - } - } - - /// Read as slice. - pub fn counts(&self) -> &[u32] { - &self.counts - } - - /// Read or write as slice. - pub fn counts_mut(&mut self) -> &mut [u32] { - &mut self.counts - } - - /// Calculate ranges from bindings. - pub fn from_bindings(bindings: &[DescriptorSetLayoutBinding]) -> Self { - let mut descs = Self::zero(); - - for binding in bindings { - descs.counts[binding.ty as usize] += binding.count as u32; - } - - descs - } - - /// Calculate ranges from bindings, specified with an iterator. - pub fn from_binding_iter(bindings: I) -> Self - where - I: Iterator, - { - let mut descs = Self::zero(); - - for binding in bindings { - descs.counts[binding.ty as usize] += binding.count as u32; - } - - descs - } -} - -impl PartialOrd for DescriptorRanges { - fn partial_cmp(&self, other: &Self) -> Option { - let mut ord = self.counts[0].partial_cmp(&other.counts[0])?; - for i in 1..DESCRIPTOR_TYPES_COUNT { - match (ord, self.counts[i].partial_cmp(&other.counts[i])?) { - (Ordering::Less, Ordering::Greater) | (Ordering::Greater, Ordering::Less) => { - return None; - } - (Ordering::Equal, new) => ord = new, - _ => (), - } - } - Some(ord) - } -} - -impl Add for DescriptorRanges { - type Output = Self; - fn add(mut self, rhs: Self) -> Self { - self += rhs; - self - } -} - -impl AddAssign for DescriptorRanges { - fn add_assign(&mut self, rhs: Self) { - for i in 0..DESCRIPTOR_TYPES_COUNT { - self.counts[i] += rhs.counts[i]; - } - } -} - -impl Sub for DescriptorRanges { - type Output = Self; - fn sub(mut self, rhs: Self) -> Self { - self -= rhs; - self - } -} - -impl SubAssign for DescriptorRanges { - fn sub_assign(&mut self, rhs: Self) { - for i in 0..DESCRIPTOR_TYPES_COUNT { - self.counts[i] -= rhs.counts[i]; - } - } -} - -impl Mul for DescriptorRanges { - type Output = Self; - fn mul(mut self, rhs: u32) -> Self { - self *= rhs; - self - } -} - -impl MulAssign for DescriptorRanges { - fn mul_assign(&mut self, rhs: u32) { - for i in 0..DESCRIPTOR_TYPES_COUNT { - self.counts[i] *= rhs; - } - } -} - -impl<'a> IntoIterator for &'a DescriptorRanges { - type Item = DescriptorRangeDesc; - type IntoIter = DescriptorRangesIter<'a>; - - fn into_iter(self) -> DescriptorRangesIter<'a> { - self.iter() - } -} - -/// Iterator over descriptor ranges. -pub struct DescriptorRangesIter<'a> { - counts: &'a [u32; DESCRIPTOR_TYPES_COUNT], - index: u8, -} - -impl<'a> Iterator for DescriptorRangesIter<'a> { - type Item = DescriptorRangeDesc; - - fn next(&mut self) -> Option { - loop { - let index = self.index as usize; - if index >= DESCRIPTOR_TYPES_COUNT { - return None; - } else { - self.index += 1; - if self.counts[index] > 0 { - return Some(DescriptorRangeDesc { - count: self.counts[index] as usize, - ty: DESCRIPTOR_TYPES[index], - }); - } - } - } - } -} diff --git a/third_party/rust/rendy-memory/.cargo-checksum.json b/third_party/rust/rendy-memory/.cargo-checksum.json deleted file mode 100644 index 6939d578fc93..000000000000 --- a/third_party/rust/rendy-memory/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{"Cargo.toml":"fe83416b06a4dd51f32ff690cd9dbf2bb6bd50b71f56c4638e5aa9d462f85b70","src/allocator/dedicated.rs":"5684b00774c9b69d39a23c5ae9b86a99a250d42a69f5d703c14c92445cbfa24e","src/allocator/dynamic.rs":"72ec46316b630d8c81ab88aac2b9749c0a535dae58bb766ac3cdb26e93ab81bf","src/allocator/linear.rs":"8721b2fcab68b39c0371b285a9d999dd871e82a676f0372b4458561140e2d401","src/allocator/mod.rs":"3338e9964975352439726a9fb476886eb8a44c80dc0abe71de74a7c925551fdd","src/block.rs":"e7edbb32d9a1b2f7eb6d129850e5b2f879cf6dff249865b45d959867d1e13bb0","src/heaps/heap.rs":"b78719a00cbfb36d5b7cf116608443267b9ca99a2394be11ee70ce676282f5d5","src/heaps/memory_type.rs":"6329a34e75fccbc86c5f7b2096b9b459a8bdf31f678ac4acdbab01545011d7d8","src/heaps/mod.rs":"02011a59a4f09d1adbf4d5de0d1d0b21118aff44571df9de588d2d196a81bacd","src/lib.rs":"92ce6b52031f3abf662e6a22a114eaadf3bd27f21aad85d33f81d60331bb2459","src/mapping/mod.rs":"6447a902c195f46c4187c8e4872c12d83112eca9d4e4080ee2b84b30bb311092","src/mapping/range.rs":"631e3e063d96cfd3c360437e72d12cfbf4cc0fba3288e3ade291929ba0ae2589","src/mapping/write.rs":"d32403c1cd9973135cd0fdb87630768e5e40164e21c65486735be475a4b3b781","src/memory.rs":"3022d0c02c483494eab55ca7147647c1a6bd5258a0b7410455e6f983c01e2678","src/usage.rs":"c313855d79b6d4638c788f20345d39a62d929437813a166007a3ec4ea69ee24d","src/util.rs":"39049e139b31594e45b0c1e5058d60832651e8bb83ae30c861c01fea99fd5ca5","src/utilization.rs":"dacb44e4dac24818de2914c3fdd68eaf5f9bfb8e1cf296ff20be03e224d7eec0"},"package":"08f99de535d9e48d9cfab780b521702cc0d7183d354872d223967b75abae1199"} \ No newline at end of file diff --git a/third_party/rust/rendy-memory/src/allocator/dedicated.rs b/third_party/rust/rendy-memory/src/allocator/dedicated.rs deleted file mode 100644 index 1824bfebcf4f..000000000000 --- a/third_party/rust/rendy-memory/src/allocator/dedicated.rs +++ /dev/null @@ -1,188 +0,0 @@ -use std::{ops::Range, ptr::NonNull}; - -use { - crate::{ - allocator::{Allocator, Kind}, - block::Block, - mapping::{mapped_fitting_range, MappedRange}, - memory::*, - }, - gfx_hal::{device::Device as _, Backend}, -}; - -/// Memory block allocated from `DedicatedAllocator` -#[derive(Debug)] -pub struct DedicatedBlock { - memory: Memory, - mapping: Option<(NonNull, Range)>, -} - -unsafe impl Send for DedicatedBlock where B: Backend {} -unsafe impl Sync for DedicatedBlock where B: Backend {} - -impl DedicatedBlock -where - B: Backend, -{ - /// Get inner memory. - /// Panics if mapped. - pub fn unwrap_memory(self) -> Memory { - assert!(self.mapping.is_none()); - self.memory - } - - /// Make unmapped block. - pub fn from_memory(memory: Memory) -> Self { - DedicatedBlock { - memory, - mapping: None, - } - } -} - -impl Block for DedicatedBlock -where - B: Backend, -{ - #[inline] - fn properties(&self) -> gfx_hal::memory::Properties { - self.memory.properties() - } - - #[inline] - fn memory(&self) -> &B::Memory { - self.memory.raw() - } - - #[inline] - fn range(&self) -> Range { - 0..self.memory.size() - } - - fn map<'a>( - &'a mut self, - device: &B::Device, - range: Range, - ) -> Result, gfx_hal::device::MapError> { - assert!( - range.start < range.end, - "Memory mapping region must have valid size" - ); - - if !self.memory.host_visible() { - //TODO: invalid access error - return Err(gfx_hal::device::MapError::MappingFailed); - } - - unsafe { - if let Some(ptr) = self - .mapping - .clone() - .and_then(|mapping| mapped_fitting_range(mapping.0, mapping.1, range.clone())) - { - Ok(MappedRange::from_raw(&self.memory, ptr, range)) - } else { - self.unmap(device); - let ptr = device.map_memory(self.memory.raw(), range.clone())?; - let ptr = NonNull::new(ptr).expect("Memory mapping shouldn't return nullptr"); - let mapping = MappedRange::from_raw(&self.memory, ptr, range); - self.mapping = Some((mapping.ptr(), mapping.range())); - Ok(mapping) - } - } - } - - fn unmap(&mut self, device: &B::Device) { - if self.mapping.take().is_some() { - unsafe { - // trace!("Unmap memory: {:#?}", self.memory); - device.unmap_memory(self.memory.raw()); - } - } - } -} - -/// Dedicated memory allocator that uses memory object per allocation requested. -/// -/// This allocator suites best huge allocations. -/// From 32 MiB when GPU has 4-8 GiB memory total. -/// -/// `Heaps` use this allocator when none of sub-allocators bound to the memory type -/// can handle size required. -/// TODO: Check if resource prefers dedicated memory. -#[derive(Debug)] -pub struct DedicatedAllocator { - memory_type: gfx_hal::MemoryTypeId, - memory_properties: gfx_hal::memory::Properties, - used: u64, -} - -impl DedicatedAllocator { - /// Get properties required by the allocator. - pub fn properties_required() -> gfx_hal::memory::Properties { - gfx_hal::memory::Properties::empty() - } - - /// Create new `LinearAllocator` - /// for `memory_type` with `memory_properties` specified - pub fn new( - memory_type: gfx_hal::MemoryTypeId, - memory_properties: gfx_hal::memory::Properties, - ) -> Self { - DedicatedAllocator { - memory_type, - memory_properties, - used: 0, - } - } -} - -impl Allocator for DedicatedAllocator -where - B: Backend, -{ - type Block = DedicatedBlock; - - fn kind() -> Kind { - Kind::Dedicated - } - - #[inline] - fn alloc( - &mut self, - device: &B::Device, - size: u64, - _align: u64, - ) -> Result<(DedicatedBlock, u64), gfx_hal::device::AllocationError> { - let memory = unsafe { - Memory::from_raw( - device.allocate_memory(self.memory_type, size)?, - size, - self.memory_properties, - ) - }; - - self.used += size; - - Ok((DedicatedBlock::from_memory(memory), size)) - } - - #[inline] - fn free(&mut self, device: &B::Device, mut block: DedicatedBlock) -> u64 { - block.unmap(device); - let size = block.memory.size(); - self.used -= size; - unsafe { - device.free_memory(block.memory.into_raw()); - } - size - } -} - -impl Drop for DedicatedAllocator { - fn drop(&mut self) { - if self.used > 0 { - log::error!("Not all allocation from DedicatedAllocator was freed"); - } - } -} diff --git a/third_party/rust/rendy-memory/src/allocator/linear.rs b/third_party/rust/rendy-memory/src/allocator/linear.rs deleted file mode 100644 index 291d72aee54b..000000000000 --- a/third_party/rust/rendy-memory/src/allocator/linear.rs +++ /dev/null @@ -1,325 +0,0 @@ -use std::{collections::VecDeque, ops::Range, ptr::NonNull}; - -use { - crate::{ - allocator::{Allocator, Kind}, - block::Block, - mapping::*, - memory::*, - util::*, - }, - gfx_hal::{device::Device as _, Backend}, - std::sync::Arc, -}; - -/// Memory block allocated from `LinearAllocator` -pub struct LinearBlock { - memory: Arc>, - linear_index: u64, - ptr: NonNull, - range: Range, - relevant: relevant::Relevant, -} - -impl std::fmt::Debug for LinearBlock -where - B: Backend, -{ - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - fmt.debug_struct("LinearBlock") - .field("memory", &*self.memory) - .field("linear_index", &self.linear_index) - .field("ptr", &self.ptr) - .field("range", &self.range) - .finish() - } -} - -unsafe impl Send for LinearBlock where B: Backend {} -unsafe impl Sync for LinearBlock where B: Backend {} - -impl LinearBlock -where - B: Backend, -{ - fn size(&self) -> u64 { - self.range.end - self.range.start - } - - fn dispose(self) { - self.relevant.dispose(); - } -} - -impl Block for LinearBlock -where - B: Backend, -{ - #[inline] - fn properties(&self) -> gfx_hal::memory::Properties { - self.memory.properties() - } - - #[inline] - fn memory(&self) -> &B::Memory { - self.memory.raw() - } - - #[inline] - fn range(&self) -> Range { - self.range.clone() - } - - #[inline] - fn map<'a>( - &'a mut self, - _device: &B::Device, - range: Range, - ) -> Result, gfx_hal::device::MapError> { - assert!( - range.start < range.end, - "Memory mapping region must have valid size" - ); - if !self.memory.host_visible() { - //TODO: invalid access error - return Err(gfx_hal::device::MapError::MappingFailed); - } - - if let Some((ptr, range)) = mapped_sub_range(self.ptr, self.range.clone(), range) { - let mapping = unsafe { MappedRange::from_raw(&*self.memory, ptr, range) }; - Ok(mapping) - } else { - Err(gfx_hal::device::MapError::OutOfBounds) - } - } - - #[inline] - fn unmap(&mut self, _device: &B::Device) { - debug_assert!(self.memory.host_visible()); - } -} - -/// Config for `LinearAllocator`. -#[derive(Clone, Copy, Debug)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub struct LinearConfig { - /// Size of the linear chunk. - /// Keep it big. - pub linear_size: u64, -} - -/// Linear allocator that return memory from chunk sequentially. -/// It keeps only number of bytes allocated from each chunk. -/// Once chunk is exhausted it is placed into list. -/// When all blocks allocated from head of that list are freed, -/// head is freed as well. -/// -/// This allocator suites best short-lived types of allocations. -/// Allocation strategy requires minimal overhead and implementation is fast. -/// But holding single block will completely stop memory recycling. -#[derive(Debug)] -pub struct LinearAllocator { - memory_type: gfx_hal::MemoryTypeId, - memory_properties: gfx_hal::memory::Properties, - linear_size: u64, - offset: u64, - lines: VecDeque>, -} - -#[derive(Debug)] -struct Line { - used: u64, - free: u64, - memory: Arc>, - ptr: NonNull, -} - -unsafe impl Send for Line where B: Backend {} -unsafe impl Sync for Line where B: Backend {} - -impl LinearAllocator -where - B: Backend, -{ - /// Get properties required by the `LinearAllocator`. - pub fn properties_required() -> gfx_hal::memory::Properties { - gfx_hal::memory::Properties::CPU_VISIBLE - } - - /// Maximum allocation size. - pub fn max_allocation(&self) -> u64 { - self.linear_size / 2 - } - - /// Create new `LinearAllocator` - /// for `memory_type` with `memory_properties` specified, - /// with `LinearConfig` provided. - pub fn new( - memory_type: gfx_hal::MemoryTypeId, - memory_properties: gfx_hal::memory::Properties, - config: LinearConfig, - ) -> Self { - log::trace!( - "Create new 'linear' allocator: type: '{:?}', properties: '{:#?}' config: '{:#?}'", - memory_type, - memory_properties, - config - ); - assert!(memory_properties.contains(Self::properties_required())); - assert!( - fits_usize(config.linear_size), - "Linear size must fit in both usize and u64" - ); - LinearAllocator { - memory_type, - memory_properties, - linear_size: config.linear_size, - offset: 0, - lines: VecDeque::new(), - } - } - - /// Perform full cleanup of the memory allocated. - pub fn dispose(mut self, device: &B::Device) { - let _ = self.cleanup(device, 0); - if !self.lines.is_empty() { - log::error!( - "Lines are not empty during allocator disposal. Lines: {:#?}", - self.lines - ); - } - } - - fn cleanup(&mut self, device: &B::Device, off: usize) -> u64 { - let mut freed = 0; - while self.lines.len() > off { - if self.lines[0].used > self.lines[0].free { - break; - } - - let line = self.lines.pop_front().unwrap(); - self.offset += 1; - - unsafe { - match Arc::try_unwrap(line.memory) { - Ok(memory) => { - // trace!("Unmap memory: {:#?}", line.memory); - device.unmap_memory(memory.raw()); - - freed += memory.size(); - device.free_memory(memory.into_raw()); - } - Err(_) => log::error!("Allocated `Line` was freed, but memory is still shared and never will be destroyed"), - } - } - } - freed - } -} - -impl Allocator for LinearAllocator -where - B: Backend, -{ - type Block = LinearBlock; - - fn kind() -> Kind { - Kind::Linear - } - - fn alloc( - &mut self, - device: &B::Device, - size: u64, - align: u64, - ) -> Result<(LinearBlock, u64), gfx_hal::device::AllocationError> { - debug_assert!(self - .memory_properties - .contains(gfx_hal::memory::Properties::CPU_VISIBLE)); - - assert!(size <= self.linear_size); - assert!(align <= self.linear_size); - - let count = self.lines.len() as u64; - if let Some(line) = self.lines.back_mut() { - let aligned = aligned(line.used, align); - let overhead = aligned - line.used; - if self.linear_size - size > aligned { - line.used = aligned + size; - line.free += overhead; - let (ptr, range) = - mapped_sub_range(line.ptr, 0..self.linear_size, aligned..aligned + size) - .expect("This sub-range must fit in line mapping"); - - return Ok(( - LinearBlock { - linear_index: self.offset + count - 1, - memory: line.memory.clone(), - ptr, - range, - relevant: relevant::Relevant, - }, - 0, - )); - } - } - - let (memory, ptr) = unsafe { - let raw = device.allocate_memory(self.memory_type, self.linear_size)?; - - let ptr = match device.map_memory(&raw, 0..self.linear_size) { - Ok(ptr) => NonNull::new_unchecked(ptr), - Err(gfx_hal::device::MapError::OutOfMemory(error)) => { - device.free_memory(raw); - return Err(error.into()); - } - Err(_) => panic!("Unexpected mapping failure"), - }; - - let memory = Memory::from_raw(raw, self.linear_size, self.memory_properties); - - (memory, ptr) - }; - - let line = Line { - used: size, - free: 0, - ptr, - memory: Arc::new(memory), - }; - - let (ptr, range) = mapped_sub_range(ptr, 0..self.linear_size, 0..size) - .expect("This sub-range must fit in line mapping"); - - let block = LinearBlock { - linear_index: self.offset + count, - memory: line.memory.clone(), - ptr, - range, - relevant: relevant::Relevant, - }; - - self.lines.push_back(line); - Ok((block, self.linear_size)) - } - - fn free(&mut self, device: &B::Device, block: Self::Block) -> u64 { - let index = block.linear_index - self.offset; - assert!( - fits_usize(index), - "This can't exceed lines list length which fits into usize by definition" - ); - let index = index as usize; - assert!( - index < self.lines.len(), - "Can't be allocated from not yet created line" - ); - { - let ref mut line = self.lines[index]; - line.free += block.size(); - } - block.dispose(); - - self.cleanup(device, 1) - } -} diff --git a/third_party/rust/rendy-memory/src/allocator/mod.rs b/third_party/rust/rendy-memory/src/allocator/mod.rs deleted file mode 100644 index c6bb04d6ee7f..000000000000 --- a/third_party/rust/rendy-memory/src/allocator/mod.rs +++ /dev/null @@ -1,50 +0,0 @@ -//! This module provides `Allocator` trait and few allocators that implements the trait. - -mod dedicated; -mod dynamic; -mod linear; - -use crate::block::Block; - -pub use self::{ - dedicated::{DedicatedAllocator, DedicatedBlock}, - dynamic::{DynamicAllocator, DynamicBlock, DynamicConfig}, - linear::{LinearAllocator, LinearBlock, LinearConfig}, -}; - -/// Allocator kind. -#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] -pub enum Kind { - /// Memory object per allocation. - Dedicated, - - /// General purpose allocator. - Dynamic, - - /// Allocates linearly. - /// Fast and low overhead. - /// Suitable for one-time-use allocations. - Linear, -} - -/// Allocator trait implemented for various allocators. -pub trait Allocator { - /// Block type returned by allocator. - type Block: Block; - - /// Get allocator kind. - fn kind() -> Kind; - - /// Allocate block of memory. - /// On success returns allocated block and amount of memory consumed from device. - fn alloc( - &mut self, - device: &B::Device, - size: u64, - align: u64, - ) -> Result<(Self::Block, u64), gfx_hal::device::AllocationError>; - - /// Free block of memory. - /// Returns amount of memory returned to the device. - fn free(&mut self, device: &B::Device, block: Self::Block) -> u64; -} diff --git a/third_party/rust/rendy-memory/src/block.rs b/third_party/rust/rendy-memory/src/block.rs deleted file mode 100644 index cfdf8ca2865c..000000000000 --- a/third_party/rust/rendy-memory/src/block.rs +++ /dev/null @@ -1,36 +0,0 @@ -use std::ops::Range; - -use crate::mapping::MappedRange; - -/// Block that owns a `Range` of the `Memory`. -/// Implementor must ensure that there can't be any other blocks -/// with overlapping range (either through type system or safety notes for unsafe functions). -/// Provides access to safe memory range mapping. -pub trait Block { - /// Get memory properties of the block. - fn properties(&self) -> gfx_hal::memory::Properties; - - /// Get raw memory object. - fn memory(&self) -> &B::Memory; - - /// Get memory range owned by this block. - fn range(&self) -> Range; - - /// Get size of the block. - fn size(&self) -> u64 { - let range = self.range(); - range.end - range.start - } - - /// Get mapping for the buffer range. - /// Memory writes to the region performed by device become available for the host. - fn map<'a>( - &'a mut self, - device: &B::Device, - range: Range, - ) -> Result, gfx_hal::device::MapError>; - - /// Release memory mapping. Must be called after successful `map` call. - /// No-op if block is not mapped. - fn unmap(&mut self, device: &B::Device); -} diff --git a/third_party/rust/rendy-memory/src/heaps/memory_type.rs b/third_party/rust/rendy-memory/src/heaps/memory_type.rs deleted file mode 100644 index fa0eb844d771..000000000000 --- a/third_party/rust/rendy-memory/src/heaps/memory_type.rs +++ /dev/null @@ -1,157 +0,0 @@ -use { - super::{BlockFlavor, HeapsConfig}, - crate::{allocator::*, usage::MemoryUsage, utilization::*}, - gfx_hal::memory::Properties, -}; - -#[derive(Debug)] -pub(super) struct MemoryType { - heap_index: usize, - properties: Properties, - dedicated: DedicatedAllocator, - linear: Option>, - dynamic: Option>, - // chunk: Option, - used: u64, - effective: u64, -} - -impl MemoryType -where - B: gfx_hal::Backend, -{ - pub(super) fn new( - memory_type: gfx_hal::MemoryTypeId, - heap_index: usize, - properties: Properties, - config: HeapsConfig, - ) -> Self { - MemoryType { - properties, - heap_index, - dedicated: DedicatedAllocator::new(memory_type, properties), - linear: if properties.contains(Properties::CPU_VISIBLE) { - config - .linear - .map(|config| LinearAllocator::new(memory_type, properties, config)) - } else { - None - }, - dynamic: config - .dynamic - .map(|config| DynamicAllocator::new(memory_type, properties, config)), - used: 0, - effective: 0, - } - } - - pub(super) fn properties(&self) -> Properties { - self.properties - } - - pub(super) fn heap_index(&self) -> usize { - self.heap_index - } - - pub(super) fn alloc( - &mut self, - device: &B::Device, - usage: impl MemoryUsage, - size: u64, - align: u64, - ) -> Result<(BlockFlavor, u64), gfx_hal::device::AllocationError> { - let (block, allocated) = self.alloc_impl(device, usage, size, align)?; - self.effective += block.size(); - self.used += allocated; - Ok((block, allocated)) - } - - fn alloc_impl( - &mut self, - device: &B::Device, - usage: impl MemoryUsage, - size: u64, - align: u64, - ) -> Result<(BlockFlavor, u64), gfx_hal::device::AllocationError> { - match (self.dynamic.as_mut(), self.linear.as_mut()) { - (Some(dynamic), Some(linear)) => { - if dynamic.max_allocation() >= size - && usage.allocator_fitness(Kind::Dynamic) - > usage.allocator_fitness(Kind::Linear) - { - dynamic - .alloc(device, size, align) - .map(|(block, size)| (BlockFlavor::Dynamic(block), size)) - } else if linear.max_allocation() >= size - && usage.allocator_fitness(Kind::Linear) > 0 - { - linear - .alloc(device, size, align) - .map(|(block, size)| (BlockFlavor::Linear(block), size)) - } else { - self.dedicated - .alloc(device, size, align) - .map(|(block, size)| (BlockFlavor::Dedicated(block), size)) - } - } - (Some(dynamic), None) => { - if dynamic.max_allocation() >= size && usage.allocator_fitness(Kind::Dynamic) > 0 { - dynamic - .alloc(device, size, align) - .map(|(block, size)| (BlockFlavor::Dynamic(block), size)) - } else { - self.dedicated - .alloc(device, size, align) - .map(|(block, size)| (BlockFlavor::Dedicated(block), size)) - } - } - (None, Some(linear)) => { - if linear.max_allocation() >= size && usage.allocator_fitness(Kind::Linear) > 0 { - linear - .alloc(device, size, align) - .map(|(block, size)| (BlockFlavor::Linear(block), size)) - } else { - self.dedicated - .alloc(device, size, align) - .map(|(block, size)| (BlockFlavor::Dedicated(block), size)) - } - } - (None, None) => self - .dedicated - .alloc(device, size, align) - .map(|(block, size)| (BlockFlavor::Dedicated(block), size)), - } - } - - pub(super) fn free(&mut self, device: &B::Device, block: BlockFlavor) -> u64 { - match block { - BlockFlavor::Dedicated(block) => self.dedicated.free(device, block), - BlockFlavor::Linear(block) => self.linear.as_mut().unwrap().free(device, block), - BlockFlavor::Dynamic(block) => self.dynamic.as_mut().unwrap().free(device, block), - } - } - - pub(super) fn dispose(self, device: &B::Device) { - log::trace!("Dispose memory allocators"); - - if let Some(linear) = self.linear { - linear.dispose(device); - log::trace!("Linear allocator disposed"); - } - if let Some(dynamic) = self.dynamic { - dynamic.dispose(); - log::trace!("Dynamic allocator disposed"); - } - } - - pub(super) fn utilization(&self) -> MemoryTypeUtilization { - MemoryTypeUtilization { - utilization: MemoryUtilization { - used: self.used, - effective: self.effective, - }, - properties: self.properties, - heap_index: self.heap_index, - } - } -} diff --git a/third_party/rust/rendy-memory/src/heaps/mod.rs b/third_party/rust/rendy-memory/src/heaps/mod.rs deleted file mode 100644 index 2b4041625ea8..000000000000 --- a/third_party/rust/rendy-memory/src/heaps/mod.rs +++ /dev/null @@ -1,324 +0,0 @@ -mod heap; -mod memory_type; - -use { - self::{heap::MemoryHeap, memory_type::MemoryType}, - crate::{allocator::*, block::Block, mapping::*, usage::MemoryUsage, util::*, utilization::*}, - std::ops::Range, -}; - -/// Possible errors returned by `Heaps`. -#[allow(missing_copy_implementations)] -#[derive(Debug)] -pub enum HeapsError { - /// Memory allocation failure. - AllocationError(gfx_hal::device::AllocationError), - /// No memory types among required for resource with requested properties was found. - NoSuitableMemory(u32, gfx_hal::memory::Properties), -} - -impl std::fmt::Display for HeapsError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - HeapsError::AllocationError(e) => write!(f, "{:?}", e), - HeapsError::NoSuitableMemory(e, e2) => write!( - f, - "Memory type among ({}) with properties ({:?}) not found", - e, e2 - ), - } - } -} -impl std::error::Error for HeapsError {} - -impl From for HeapsError { - fn from(error: gfx_hal::device::AllocationError) -> Self { - HeapsError::AllocationError(error) - } -} - -impl From for HeapsError { - fn from(error: gfx_hal::device::OutOfMemory) -> Self { - HeapsError::AllocationError(error.into()) - } -} - -/// Config for `Heaps` allocator. -#[derive(Clone, Copy, Debug)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub struct HeapsConfig { - /// Config for linear sub-allocator. - pub linear: Option, - - /// Config for dynamic sub-allocator. - pub dynamic: Option, -} - -/// Heaps available on particular physical device. -#[derive(Debug)] -pub struct Heaps { - types: Vec>, - heaps: Vec, -} - -impl Heaps -where - B: gfx_hal::Backend, -{ - /// This must be called with `gfx_hal::memory::Properties` fetched from physical device. - pub unsafe fn new(types: P, heaps: H) -> Self - where - P: IntoIterator, - H: IntoIterator, - { - let heaps = heaps - .into_iter() - .map(|size| MemoryHeap::new(size)) - .collect::>(); - Heaps { - types: types - .into_iter() - .enumerate() - .map(|(index, (properties, heap_index, config))| { - assert!( - fits_u32(index), - "Number of memory types must fit in u32 limit" - ); - assert!( - fits_usize(heap_index), - "Number of memory types must fit in u32 limit" - ); - let memory_type = gfx_hal::MemoryTypeId(index); - let heap_index = heap_index as usize; - assert!(heap_index < heaps.len()); - MemoryType::new(memory_type, heap_index, properties, config) - }) - .collect(), - heaps, - } - } - - /// Allocate memory block - /// from one of memory types specified by `mask`, - /// for intended `usage`, - /// with `size` - /// and `align` requirements. - pub fn allocate( - &mut self, - device: &B::Device, - mask: u32, - usage: impl MemoryUsage, - size: u64, - align: u64, - ) -> Result, HeapsError> { - debug_assert!(fits_u32(self.types.len())); - - let (memory_index, _, _) = { - let suitable_types = self - .types - .iter() - .enumerate() - .filter(|(index, _)| (mask & (1u32 << index)) != 0) - .filter_map(|(index, mt)| { - if mt.properties().contains(usage.properties_required()) { - let fitness = usage.memory_fitness(mt.properties()); - Some((index, mt, fitness)) - } else { - None - } - }) - .collect::>(); - - if suitable_types.is_empty() { - return Err(HeapsError::NoSuitableMemory( - mask, - usage.properties_required(), - )); - } - - suitable_types - .into_iter() - .filter(|(_, mt, _)| self.heaps[mt.heap_index()].available() > size + align) - .max_by_key(|&(_, _, fitness)| fitness) - .ok_or_else(|| { - log::error!("All suitable heaps are exhausted. {:#?}", self); - gfx_hal::device::OutOfMemory::Device - })? - }; - - self.allocate_from(device, memory_index as u32, usage, size, align) - } - - /// Allocate memory block - /// from `memory_index` specified, - /// for intended `usage`, - /// with `size` - /// and `align` requirements. - fn allocate_from( - &mut self, - device: &B::Device, - memory_index: u32, - usage: impl MemoryUsage, - size: u64, - align: u64, - ) -> Result, HeapsError> { - log::trace!( - "Allocate memory block: type '{}', usage '{:#?}', size: '{}', align: '{}'", - memory_index, - usage, - size, - align - ); - assert!(fits_usize(memory_index)); - - let ref mut memory_type = self.types[memory_index as usize]; - let ref mut memory_heap = self.heaps[memory_type.heap_index()]; - - if memory_heap.available() < size { - return Err(gfx_hal::device::OutOfMemory::Device.into()); - } - - let (block, allocated) = memory_type.alloc(device, usage, size, align)?; - memory_heap.allocated(allocated, block.size()); - - Ok(MemoryBlock { - block, - memory_index, - }) - } - - /// Free memory block. - /// - /// Memory block must be allocated from this heap. - pub fn free(&mut self, device: &B::Device, block: MemoryBlock) { - // trace!("Free block '{:#?}'", block); - let memory_index = block.memory_index; - debug_assert!(fits_usize(memory_index)); - let size = block.size(); - - let ref mut memory_type = self.types[memory_index as usize]; - let ref mut memory_heap = self.heaps[memory_type.heap_index()]; - let freed = memory_type.free(device, block.block); - memory_heap.freed(freed, size); - } - - /// Dispose of allocator. - /// Cleanup allocators before dropping. - /// Will panic if memory instances are left allocated. - pub fn dispose(self, device: &B::Device) { - for mt in self.types { - mt.dispose(device) - } - } - - /// Get memory utilization. - pub fn utilization(&self) -> TotalMemoryUtilization { - TotalMemoryUtilization { - heaps: self.heaps.iter().map(MemoryHeap::utilization).collect(), - types: self.types.iter().map(MemoryType::utilization).collect(), - } - } -} - -/// Memory block allocated from `Heaps`. -#[derive(Debug)] -pub struct MemoryBlock { - block: BlockFlavor, - memory_index: u32, -} - -impl MemoryBlock -where - B: gfx_hal::Backend, -{ - /// Get memory type id. - pub fn memory_type(&self) -> u32 { - self.memory_index - } -} - -#[derive(Debug)] -enum BlockFlavor { - Dedicated(DedicatedBlock), - Linear(LinearBlock), - Dynamic(DynamicBlock), - // Chunk(ChunkBlock), -} - -macro_rules! any_block { - ($self:ident. $block:ident => $expr:expr) => {{ - use self::BlockFlavor::*; - match $self.$block { - Dedicated($block) => $expr, - Linear($block) => $expr, - Dynamic($block) => $expr, - // Chunk($block) => $expr, - } - }}; - (& $self:ident. $block:ident => $expr:expr) => {{ - use self::BlockFlavor::*; - match &$self.$block { - Dedicated($block) => $expr, - Linear($block) => $expr, - Dynamic($block) => $expr, - // Chunk($block) => $expr, - } - }}; - (&mut $self:ident. $block:ident => $expr:expr) => {{ - use self::BlockFlavor::*; - match &mut $self.$block { - Dedicated($block) => $expr, - Linear($block) => $expr, - Dynamic($block) => $expr, - // Chunk($block) => $expr, - } - }}; -} - -impl BlockFlavor -where - B: gfx_hal::Backend, -{ - #[inline] - fn size(&self) -> u64 { - use self::BlockFlavor::*; - match self { - Dedicated(block) => block.size(), - Linear(block) => block.size(), - Dynamic(block) => block.size(), - // Chunk(block) => block.size(), - } - } -} - -impl Block for MemoryBlock -where - B: gfx_hal::Backend, -{ - #[inline] - fn properties(&self) -> gfx_hal::memory::Properties { - any_block!(&self.block => block.properties()) - } - - #[inline] - fn memory(&self) -> &B::Memory { - any_block!(&self.block => block.memory()) - } - - #[inline] - fn range(&self) -> Range { - any_block!(&self.block => block.range()) - } - - fn map<'a>( - &'a mut self, - device: &B::Device, - range: Range, - ) -> Result, gfx_hal::device::MapError> { - any_block!(&mut self.block => block.map(device, range)) - } - - fn unmap(&mut self, device: &B::Device) { - any_block!(&mut self.block => block.unmap(device)) - } -} diff --git a/third_party/rust/rendy-memory/src/lib.rs b/third_party/rust/rendy-memory/src/lib.rs deleted file mode 100644 index a0653ee2b125..000000000000 --- a/third_party/rust/rendy-memory/src/lib.rs +++ /dev/null @@ -1,31 +0,0 @@ -//! GPU memory management -//! - -#![warn( - missing_debug_implementations, - missing_copy_implementations, - missing_docs, - trivial_casts, - trivial_numeric_casts, - unused_extern_crates, - unused_import_braces, - unused_qualifications -)] -mod allocator; -mod block; -mod heaps; -mod mapping; -mod memory; -mod usage; -mod util; -mod utilization; - -pub use crate::{ - allocator::*, - block::Block, - heaps::{Heaps, HeapsConfig, HeapsError, MemoryBlock}, - mapping::{write::Write, Coherent, MappedRange, MaybeCoherent, NonCoherent}, - memory::Memory, - usage::*, - utilization::*, -}; diff --git a/third_party/rust/rendy-memory/src/mapping/mod.rs b/third_party/rust/rendy-memory/src/mapping/mod.rs deleted file mode 100644 index 6da4c90e3fda..000000000000 --- a/third_party/rust/rendy-memory/src/mapping/mod.rs +++ /dev/null @@ -1,288 +0,0 @@ -mod range; -pub(crate) mod write; - -use { - crate::{memory::Memory, util::fits_usize}, - gfx_hal::{device::Device as _, Backend}, - std::{ops::Range, ptr::NonNull}, -}; - -pub(crate) use self::range::{ - mapped_fitting_range, mapped_slice, mapped_slice_mut, mapped_sub_range, -}; -use self::write::{Write, WriteCoherent, WriteFlush}; - -/// Non-coherent marker. -#[derive(Clone, Copy, Debug)] -pub struct NonCoherent; - -/// Coherent marker. -#[derive(Clone, Copy, Debug)] -pub struct Coherent; - -/// Value that contains either coherent marker or non-coherent marker. -#[derive(Clone, Copy, Debug)] -pub struct MaybeCoherent(bool); - -/// Represents range of the memory mapped to the host. -/// Provides methods for safer host access to the memory. -#[derive(Debug)] -pub struct MappedRange<'a, B: Backend, C = MaybeCoherent> { - /// Memory object that is mapped. - memory: &'a Memory, - - /// Pointer to range mapped memory. - ptr: NonNull, - - /// Range of mapped memory. - range: Range, - - /// Coherency marker - coherent: C, -} - -impl<'a, B> MappedRange<'a, B> -where - B: Backend, -{ - // /// Map range of memory. - // /// `range` is in memory object space. - // /// - // /// # Safety - // /// - // /// * Only one range for the given memory object can be mapped. - // /// * Memory object must be not mapped. - // /// * Memory object must be created with device specified. - // pub unsafe fn new( - // memory: &'a Memory, - // device: &B::Device, - // range: Range, - // ) -> Result { - // assert!( - // range.start < range.end, - // "Memory mapping region must have valid size" - // ); - // assert!( - // fits_usize(range.end - range.start), - // "Range length must fit in usize" - // ); - // assert!(memory.host_visible()); - - // let ptr = device.map_memory(memory.raw(), range.clone())?; - // assert!( - // (ptr as usize).wrapping_neg() >= (range.end - range.start) as usize, - // "Resulting pointer value + range length must fit in usize. Pointer: {:p}, range {:?}", - // ptr, - // range, - // ); - - // Ok(Self::from_raw(memory, NonNull::new_unchecked(ptr), range)) - // } - - /// Construct mapped range from raw mapping - /// - /// # Safety - /// - /// `memory` `range` must be mapped to host memory region pointer by `ptr`. - /// `range` is in memory object space. - /// `ptr` points to the `range.start` offset from memory origin. - pub unsafe fn from_raw(memory: &'a Memory, ptr: NonNull, range: Range) -> Self { - assert!( - range.start < range.end, - "Memory mapping region must have valid size" - ); - MappedRange { - ptr, - range, - memory, - coherent: MaybeCoherent(memory.host_coherent()), - } - } - - /// Get pointer to beginning of memory region. - /// i.e. to `range().start` offset from memory origin. - pub fn ptr(&self) -> NonNull { - self.ptr - } - - /// Get mapped range. - pub fn range(&self) -> Range { - self.range.clone() - } - - /// Fetch readable slice of sub-range to be read. - /// Invalidating range if memory is not coherent. - /// `range.end - range.start` must be multiple of `size_of::()`. - /// `mapping offset + range.start` must be multiple of `align_of::()`. - /// - /// # Safety - /// - /// * Caller must ensure that device won't write to the memory region until the borrowing ends. - /// * `T` Must be plain-old-data type compatible with data in mapped region. - pub unsafe fn read<'b, T>( - &'b mut self, - device: &B::Device, - range: Range, - ) -> Result<&'b [T], gfx_hal::device::MapError> - where - 'a: 'b, - T: Copy, - { - assert!( - range.start < range.end, - "Memory mapping region must have valid size" - ); - assert!( - fits_usize(range.end - range.start), - "Range length must fit in usize" - ); - - let (ptr, range) = mapped_sub_range(self.ptr, self.range.clone(), range) - .ok_or_else(|| gfx_hal::device::MapError::OutOfBounds)?; - - let size = (range.end - range.start) as usize; - - if self.coherent.0 { - device - .invalidate_mapped_memory_ranges(Some((self.memory.raw(), self.range.clone())))?; - } - - let slice = mapped_slice::(ptr, size); - Ok(slice) - } - - /// Fetch writer to the sub-region. - /// This writer will flush data on drop if written at least once. - /// - /// # Safety - /// - /// * Caller must ensure that device won't write to or read from the memory region. - pub unsafe fn write<'b, T: 'b>( - &'b mut self, - device: &'b B::Device, - range: Range, - ) -> Result + 'b, gfx_hal::device::MapError> - where - 'a: 'b, - T: Copy, - { - assert!( - range.start < range.end, - "Memory mapping region must have valid size" - ); - assert!( - fits_usize(range.end - range.start), - "Range length must fit in usize" - ); - - let (ptr, range) = mapped_sub_range(self.ptr, self.range.clone(), range) - .ok_or_else(|| gfx_hal::device::MapError::OutOfBounds)?; - - let size = (range.end - range.start) as usize; - - if !self.coherent.0 { - device - .invalidate_mapped_memory_ranges(Some((self.memory.raw(), self.range.clone())))?; - } - - let slice = mapped_slice_mut::(ptr, size); - - let ref memory = self.memory; - - Ok(WriteFlush { - slice, - flush: if !self.coherent.0 { - Some(move || { - device - .flush_mapped_memory_ranges(Some((memory.raw(), range))) - .expect("Should flush successfully"); - }) - } else { - None - }, - }) - } - - /// Convert into mapped range with statically known coherency. - pub fn coherent(self) -> Result, MappedRange<'a, B, NonCoherent>> { - if self.coherent.0 { - Ok(MappedRange { - memory: self.memory, - ptr: self.ptr, - range: self.range, - coherent: Coherent, - }) - } else { - Err(MappedRange { - memory: self.memory, - ptr: self.ptr, - range: self.range, - coherent: NonCoherent, - }) - } - } -} - -impl<'a, B> From> for MappedRange<'a, B> -where - B: Backend, -{ - fn from(range: MappedRange<'a, B, Coherent>) -> Self { - MappedRange { - memory: range.memory, - ptr: range.ptr, - range: range.range, - coherent: MaybeCoherent(true), - } - } -} - -impl<'a, B> From> for MappedRange<'a, B> -where - B: Backend, -{ - fn from(range: MappedRange<'a, B, NonCoherent>) -> Self { - MappedRange { - memory: range.memory, - ptr: range.ptr, - range: range.range, - coherent: MaybeCoherent(false), - } - } -} - -impl<'a, B> MappedRange<'a, B, Coherent> -where - B: Backend, -{ - /// Fetch writer to the sub-region. - /// - /// # Safety - /// - /// * Caller must ensure that device won't write to or read from the memory region. - pub unsafe fn write<'b, U: 'b>( - &'b mut self, - range: Range, - ) -> Result + 'b, gfx_hal::device::MapError> - where - U: Copy, - { - assert!( - range.start < range.end, - "Memory mapping region must have valid size" - ); - assert!( - fits_usize(range.end - range.start), - "Range length must fit in usize" - ); - - let (ptr, range) = mapped_sub_range(self.ptr, self.range.clone(), range) - .ok_or_else(|| gfx_hal::device::MapError::OutOfBounds)?; - - let size = (range.end - range.start) as usize; - - let slice = mapped_slice_mut::(ptr, size); - - Ok(WriteCoherent { slice }) - } -} diff --git a/third_party/rust/rendy-memory/src/mapping/range.rs b/third_party/rust/rendy-memory/src/mapping/range.rs deleted file mode 100644 index f840cc7539b5..000000000000 --- a/third_party/rust/rendy-memory/src/mapping/range.rs +++ /dev/null @@ -1,101 +0,0 @@ -use { - crate::util::fits_usize, - std::{ - mem::{align_of, size_of}, - ops::Range, - ptr::NonNull, - slice::{from_raw_parts, from_raw_parts_mut}, - }, -}; - -/// Get sub-range of memory mapping. -/// `range` and `fitting` is in memory object space. -/// `ptr` points to the `range.start` offset from memory origin. -/// returns pointer to `fitting.start` offset from memory origin -/// if `fitting` is contained in `range`. -pub(crate) fn mapped_fitting_range( - ptr: NonNull, - range: Range, - fitting: Range, -) -> Option> { - assert!( - range.start < range.end, - "Memory mapping region must have valid size" - ); - assert!( - fitting.start < fitting.end, - "Memory mapping region must have valid size" - ); - assert!(fits_usize(range.end - range.start)); - assert!(usize::max_value() - (range.end - range.start) as usize >= ptr.as_ptr() as usize); - - if fitting.start < range.start || fitting.end > range.end { - None - } else { - Some(unsafe { - // for x > 0 and y >= 0: x + y > 0. No overflow due to checks above. - NonNull::new_unchecked( - (ptr.as_ptr() as usize + (fitting.start - range.start) as usize) as *mut u8, - ) - }) - } -} - -/// Get sub-range of memory mapping. -/// `range` is in memory object space. -/// `sub` is a range inside `range`. -/// `ptr` points to the `range.start` offset from memory origin. -/// returns pointer to the `range.starti + sub.start` offset from memory origin -/// if `sub` fits in `range`. -pub(crate) fn mapped_sub_range( - ptr: NonNull, - range: Range, - sub: Range, -) -> Option<(NonNull, Range)> { - let fitting = sub.start.checked_add(range.start)?..sub.end.checked_add(range.start)?; - let ptr = mapped_fitting_range(ptr, range, fitting.clone())?; - Some((ptr, fitting)) -} - -/// # Safety -/// -/// User must ensure that: -/// * this function won't create aliasing slices. -/// * returned slice doesn't outlive mapping. -/// * `T` Must be plain-old-data type compatible with data in mapped region. -pub(crate) unsafe fn mapped_slice_mut<'a, T>(ptr: NonNull, size: usize) -> &'a mut [T] { - assert_eq!( - size % size_of::(), - 0, - "Range length must be multiple of element size" - ); - let offset = ptr.as_ptr() as usize; - assert_eq!( - offset % align_of::(), - 0, - "Range offset must be multiple of element alignment" - ); - assert!(usize::max_value() - size >= ptr.as_ptr() as usize); - from_raw_parts_mut(ptr.as_ptr() as *mut T, size) -} - -/// # Safety -/// -/// User must ensure that: -/// * returned slice doesn't outlive mapping. -/// * `T` Must be plain-old-data type compatible with data in mapped region. -pub(crate) unsafe fn mapped_slice<'a, T>(ptr: NonNull, size: usize) -> &'a [T] { - assert_eq!( - size % size_of::(), - 0, - "Range length must be multiple of element size" - ); - let offset = ptr.as_ptr() as usize; - assert_eq!( - offset % align_of::(), - 0, - "Range offset must be multiple of element alignment" - ); - assert!(usize::max_value() - size >= ptr.as_ptr() as usize); - from_raw_parts(ptr.as_ptr() as *const T, size) -} diff --git a/third_party/rust/rendy-memory/src/mapping/write.rs b/third_party/rust/rendy-memory/src/mapping/write.rs deleted file mode 100644 index d067a6126c2c..000000000000 --- a/third_party/rust/rendy-memory/src/mapping/write.rs +++ /dev/null @@ -1,73 +0,0 @@ -use std::ptr::copy_nonoverlapping; - -/// Trait for memory region suitable for host writes. -pub trait Write { - /// Get mutable slice of `T` bound to mapped range. - /// - /// # Safety - /// - /// * Returned slice should not be read. - unsafe fn slice(&mut self) -> &mut [T]; - - /// Write data into mapped memory sub-region. - /// - /// # Panic - /// - /// Panics if `data.len()` is greater than this sub-region len. - fn write(&mut self, data: &[T]) { - unsafe { - let slice = self.slice(); - assert!(data.len() <= slice.len()); - copy_nonoverlapping(data.as_ptr(), slice.as_mut_ptr(), data.len()); - } - } -} - -#[derive(Debug)] -pub(super) struct WriteFlush<'a, T, F: FnOnce() + 'a> { - pub(super) slice: &'a mut [T], - pub(super) flush: Option, -} - -impl<'a, T, F> Drop for WriteFlush<'a, T, F> -where - T: 'a, - F: FnOnce() + 'a, -{ - fn drop(&mut self) { - if let Some(f) = self.flush.take() { - f(); - } - } -} - -impl<'a, T, F> Write for WriteFlush<'a, T, F> -where - T: Copy + 'a, - F: FnOnce() + 'a, -{ - /// # Safety - /// - /// [See doc comment for trait method](trait.Write#method.slice) - unsafe fn slice(&mut self) -> &mut [T] { - self.slice - } -} - -#[warn(dead_code)] -#[derive(Debug)] -pub(super) struct WriteCoherent<'a, T> { - pub(super) slice: &'a mut [T], -} - -impl<'a, T> Write for WriteCoherent<'a, T> -where - T: Copy + 'a, -{ - /// # Safety - /// - /// [See doc comment for trait method](trait.Write#method.slice) - unsafe fn slice(&mut self) -> &mut [T] { - self.slice - } -} diff --git a/third_party/rust/rendy-memory/src/memory.rs b/third_party/rust/rendy-memory/src/memory.rs deleted file mode 100644 index b3c2b9c1d163..000000000000 --- a/third_party/rust/rendy-memory/src/memory.rs +++ /dev/null @@ -1,82 +0,0 @@ -// use std::fmt; - -/// Memory object wrapper. -/// Contains size and properties of the memory. -#[derive(Debug)] -pub struct Memory { - raw: B::Memory, - size: u64, - properties: gfx_hal::memory::Properties, - relevant: relevant::Relevant, -} - -impl Memory -where - B: gfx_hal::Backend, -{ - /// Get memory properties. - pub fn properties(&self) -> gfx_hal::memory::Properties { - self.properties - } - - /// Get memory size. - pub fn size(&self) -> u64 { - self.size - } - - /// Get raw memory. - pub fn raw(&self) -> &B::Memory { - &self.raw - } - - /// Unwrap raw memory. - pub fn into_raw(self) -> B::Memory { - self.relevant.dispose(); - self.raw - } - - /// Create memory from raw object. - /// - /// # Safety - /// - /// TODO: - pub unsafe fn from_raw( - raw: B::Memory, - size: u64, - properties: gfx_hal::memory::Properties, - ) -> Self { - Memory { - properties, - raw, - size, - relevant: relevant::Relevant, - } - } - - /// Check if this memory is host-visible and can be mapped. - /// `memory.host_visible()` is equivalent to `memory.properties().contains(Properties::CPU_VISIBLE)` - pub fn host_visible(&self) -> bool { - self.properties - .contains(gfx_hal::memory::Properties::CPU_VISIBLE) - } - - /// Check if this memory is host-coherent and doesn't require invalidating or flushing. - /// `memory.host_coherent()` is equivalent to `memory.properties().contains(Properties::COHERENT)` - pub fn host_coherent(&self) -> bool { - self.properties - .contains(gfx_hal::memory::Properties::COHERENT) - } -} - -// pub(crate) fn memory_ptr_fmt( -// memory: &*const Memory, -// fmt: &mut fmt::Formatter<'_>, -// ) -> Result<(), fmt::Error> { -// unsafe { -// if fmt.alternate() { -// write!(fmt, "*const {:#?}", **memory) -// } else { -// write!(fmt, "*const {:?}", **memory) -// } -// } -// } diff --git a/third_party/rust/rendy-memory/src/usage.rs b/third_party/rust/rendy-memory/src/usage.rs deleted file mode 100644 index 53586ef90c71..000000000000 --- a/third_party/rust/rendy-memory/src/usage.rs +++ /dev/null @@ -1,210 +0,0 @@ -//! Defines usage types for memory bocks. -//! See `Usage` and implementations for details. - -use crate::allocator::Kind; - -/// Memory usage trait. -pub trait MemoryUsage: std::fmt::Debug { - /// Get set of properties required for the usage. - fn properties_required(&self) -> gfx_hal::memory::Properties; - - /// Get comparable fitness value for memory properties. - /// - /// # Panics - /// - /// This function will panic if properties set doesn't contain required properties. - fn memory_fitness(&self, properties: gfx_hal::memory::Properties) -> u32; - - /// Get comparable fitness value for memory allocator. - fn allocator_fitness(&self, kind: Kind) -> u32; -} - -impl MemoryUsage for T -where - T: std::ops::Deref + std::fmt::Debug, - T::Target: MemoryUsage, -{ - fn properties_required(&self) -> gfx_hal::memory::Properties { - (&**self).properties_required() - } - fn memory_fitness(&self, properties: gfx_hal::memory::Properties) -> u32 { - (&**self).memory_fitness(properties) - } - fn allocator_fitness(&self, kind: Kind) -> u32 { - (&**self).allocator_fitness(kind) - } -} - -/// Full speed GPU access. -/// Optimal for render targets and persistent resources. -/// Avoid memory with host access. -#[derive(Clone, Copy, Debug)] -pub struct Data; - -impl MemoryUsage for Data { - fn properties_required(&self) -> gfx_hal::memory::Properties { - gfx_hal::memory::Properties::DEVICE_LOCAL - } - - #[inline] - fn memory_fitness(&self, properties: gfx_hal::memory::Properties) -> u32 { - assert!(properties.contains(gfx_hal::memory::Properties::DEVICE_LOCAL)); - 0 | ((!properties.contains(gfx_hal::memory::Properties::CPU_VISIBLE)) as u32) << 3 - | ((!properties.contains(gfx_hal::memory::Properties::LAZILY_ALLOCATED)) as u32) << 2 - | ((!properties.contains(gfx_hal::memory::Properties::CPU_CACHED)) as u32) << 1 - | ((!properties.contains(gfx_hal::memory::Properties::COHERENT)) as u32) << 0 - } - - fn allocator_fitness(&self, kind: Kind) -> u32 { - match kind { - Kind::Dedicated => 1, - Kind::Dynamic => 2, - Kind::Linear => 0, - } - } -} - -/// CPU to GPU data flow with update commands. -/// Used for dynamic buffer data, typically constant buffers. -/// Host access is guaranteed. -/// Prefers memory with fast GPU access. -#[derive(Clone, Copy, Debug)] -pub struct Dynamic; - -impl MemoryUsage for Dynamic { - fn properties_required(&self) -> gfx_hal::memory::Properties { - gfx_hal::memory::Properties::CPU_VISIBLE - } - - #[inline] - fn memory_fitness(&self, properties: gfx_hal::memory::Properties) -> u32 { - assert!(properties.contains(gfx_hal::memory::Properties::CPU_VISIBLE)); - assert!(!properties.contains(gfx_hal::memory::Properties::LAZILY_ALLOCATED)); - - 0 | (properties.contains(gfx_hal::memory::Properties::DEVICE_LOCAL) as u32) << 2 - | (properties.contains(gfx_hal::memory::Properties::COHERENT) as u32) << 1 - | ((!properties.contains(gfx_hal::memory::Properties::CPU_CACHED)) as u32) << 0 - } - - fn allocator_fitness(&self, kind: Kind) -> u32 { - match kind { - Kind::Dedicated => 1, - Kind::Dynamic => 2, - Kind::Linear => 0, - } - } -} - -/// CPU to GPU data flow with mapping. -/// Used for staging data before copying to the `Data` memory. -/// Host access is guaranteed. -#[derive(Clone, Copy, Debug)] -pub struct Upload; - -impl MemoryUsage for Upload { - fn properties_required(&self) -> gfx_hal::memory::Properties { - gfx_hal::memory::Properties::CPU_VISIBLE - } - - #[inline] - fn memory_fitness(&self, properties: gfx_hal::memory::Properties) -> u32 { - assert!(properties.contains(gfx_hal::memory::Properties::CPU_VISIBLE)); - assert!(!properties.contains(gfx_hal::memory::Properties::LAZILY_ALLOCATED)); - - 0 | ((!properties.contains(gfx_hal::memory::Properties::DEVICE_LOCAL)) as u32) << 2 - | (properties.contains(gfx_hal::memory::Properties::COHERENT) as u32) << 1 - | ((!properties.contains(gfx_hal::memory::Properties::CPU_CACHED)) as u32) << 0 - } - - fn allocator_fitness(&self, kind: Kind) -> u32 { - match kind { - Kind::Dedicated => 0, - Kind::Dynamic => 1, - Kind::Linear => 2, - } - } -} - -/// GPU to CPU data flow with mapping. -/// Used for copying data from `Data` memory to be read by the host. -/// Host access is guaranteed. -#[derive(Clone, Copy, Debug)] -pub struct Download; - -impl MemoryUsage for Download { - fn properties_required(&self) -> gfx_hal::memory::Properties { - gfx_hal::memory::Properties::CPU_VISIBLE - } - - #[inline] - fn memory_fitness(&self, properties: gfx_hal::memory::Properties) -> u32 { - assert!(properties.contains(gfx_hal::memory::Properties::CPU_VISIBLE)); - assert!(!properties.contains(gfx_hal::memory::Properties::LAZILY_ALLOCATED)); - - 0 | ((!properties.contains(gfx_hal::memory::Properties::DEVICE_LOCAL)) as u32) << 2 - | (properties.contains(gfx_hal::memory::Properties::CPU_CACHED) as u32) << 1 - | (properties.contains(gfx_hal::memory::Properties::COHERENT) as u32) << 0 - } - - fn allocator_fitness(&self, kind: Kind) -> u32 { - match kind { - Kind::Dedicated => 0, - Kind::Dynamic => 1, - Kind::Linear => 2, - } - } -} - -/// Well-known memory usage types. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum MemoryUsageValue { - /// See [`Data`] - /// - /// [`Data`]: struct.Data.html - Data, - - /// See [`Dynamic`] - /// - /// [`Dynamic`]: struct.Dynamic.html - Dynamic, - - /// See [`Upload`] - /// - /// [`Upload`]: struct.Upload.html - Upload, - - /// See [`Download`] - /// - /// [`Download`]: struct.Download.html - Download, -} - -/// Memory usage trait. -impl MemoryUsage for MemoryUsageValue { - fn properties_required(&self) -> gfx_hal::memory::Properties { - match self { - MemoryUsageValue::Data => Data.properties_required(), - MemoryUsageValue::Dynamic => Dynamic.properties_required(), - MemoryUsageValue::Upload => Upload.properties_required(), - MemoryUsageValue::Download => Download.properties_required(), - } - } - - fn memory_fitness(&self, properties: gfx_hal::memory::Properties) -> u32 { - match self { - MemoryUsageValue::Data => Data.memory_fitness(properties), - MemoryUsageValue::Dynamic => Dynamic.memory_fitness(properties), - MemoryUsageValue::Upload => Upload.memory_fitness(properties), - MemoryUsageValue::Download => Download.memory_fitness(properties), - } - } - - fn allocator_fitness(&self, kind: Kind) -> u32 { - match self { - MemoryUsageValue::Data => Data.allocator_fitness(kind), - MemoryUsageValue::Dynamic => Dynamic.allocator_fitness(kind), - MemoryUsageValue::Upload => Upload.allocator_fitness(kind), - MemoryUsageValue::Download => Download.allocator_fitness(kind), - } - } -} diff --git a/third_party/rust/rendy-memory/src/util.rs b/third_party/rust/rendy-memory/src/util.rs deleted file mode 100644 index 8ce109bb8f60..000000000000 --- a/third_party/rust/rendy-memory/src/util.rs +++ /dev/null @@ -1,125 +0,0 @@ -pub(crate) fn aligned(value: u64, align: u64) -> u64 { - debug_assert_ne!(align, 0); - debug_assert_eq!(align.count_ones(), 1); - if value == 0 { - 0 - } else { - 1u64 + ((value - 1u64) | (align - 1u64)) - } -} - -pub(crate) trait IntegerFitting { - fn fits_usize(self) -> bool; - fn fits_isize(self) -> bool; - - fn usize_fits(value: usize) -> bool; - fn isize_fits(value: isize) -> bool; -} - -#[cfg(any(target_pointer_width = "16", target_pointer_width = "32"))] -impl IntegerFitting for u64 { - fn fits_usize(self) -> bool { - self <= usize::max_value() as u64 - } - fn fits_isize(self) -> bool { - self <= isize::max_value() as u64 - } - fn usize_fits(_value: usize) -> bool { - true - } - fn isize_fits(value: isize) -> bool { - value >= 0 - } -} - -#[cfg(target_pointer_width = "64")] -impl IntegerFitting for u64 { - fn fits_usize(self) -> bool { - true - } - fn fits_isize(self) -> bool { - self <= isize::max_value() as u64 - } - fn usize_fits(_value: usize) -> bool { - true - } - fn isize_fits(value: isize) -> bool { - value >= 0 - } -} - -#[cfg(not(any( - target_pointer_width = "16", - target_pointer_width = "32", - target_pointer_width = "64" -)))] -impl IntegerFitting for u64 { - fn fits_usize(self) -> bool { - true - } - fn fits_isize(self) -> bool { - true - } - fn usize_fits(value: usize) -> bool { - value <= u64::max_value() as usize - } - fn isize_fits(value: isize) -> bool { - value >= 0 && value <= u64::max_value() as isize - } -} - -#[cfg(target_pointer_width = "16")] -impl IntegerFitting for u32 { - fn fits_usize(self) -> bool { - self <= usize::max_value() as u32 - } - fn fits_isize(self) -> bool { - self <= isize::max_value() as u32 - } - fn usize_fits(_value: usize) -> bool { - true - } - fn isize_fits(value: isize) -> bool { - value >= 0 - } -} - -#[cfg(target_pointer_width = "32")] -impl IntegerFitting for u32 { - fn fits_usize(self) -> bool { - true - } - fn fits_isize(self) -> bool { - self <= isize::max_value() as u32 - } - fn usize_fits(_value: usize) -> bool { - true - } - fn isize_fits(value: isize) -> bool { - value >= 0 - } -} - -#[cfg(not(any(target_pointer_width = "16", target_pointer_width = "32")))] -impl IntegerFitting for u32 { - fn fits_usize(self) -> bool { - true - } - fn fits_isize(self) -> bool { - true - } - fn usize_fits(value: usize) -> bool { - value <= u32::max_value() as usize - } - fn isize_fits(value: isize) -> bool { - value >= 0 && value <= u32::max_value() as isize - } -} - -pub(crate) fn fits_usize(value: T) -> bool { - value.fits_usize() -} - -pub(crate) fn fits_u32(value: usize) -> bool { - u32::usize_fits(value) -} diff --git a/third_party/rust/shared_library/.cargo-checksum.json b/third_party/rust/shared_library/.cargo-checksum.json deleted file mode 100644 index 0b16e8307b7b..000000000000 --- a/third_party/rust/shared_library/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{"Cargo.toml":"f9ad715b6b6424f37f903a069039d3567d46e426d98d11d4e62a9a3933691e5b","LICENSE-APACHE":"c144680885b29e4719e2a51f0aab5439a1e02d980692b5aaf086cae12727f28b","LICENSE-MIT":"1c07d19ccbe2578665ab7d8c63f71559f890eb8d2a82fa39d0206b7a3414064f","src/dynamic_library.rs":"973df715d4ae2daae662392d73ca853b9bacdb4165bab3e4d8343427dca55c9c","src/lib.rs":"29f1aef9437d1ab891d17d6a6b86c6e1176813d372333cfdfc063b97586deb02"},"package":"5a9e7e0f2bfae24d8a5b5a66c5b257a83c7412304311512a0c054cd5e619da11"} \ No newline at end of file diff --git a/third_party/rust/shared_library/Cargo.toml b/third_party/rust/shared_library/Cargo.toml deleted file mode 100644 index 1d7d58be91fd..000000000000 --- a/third_party/rust/shared_library/Cargo.toml +++ /dev/null @@ -1,24 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - -[package] -name = "shared_library" -version = "0.1.9" -authors = ["Pierre Krieger "] -description = "Easily bind to and load shared libraries" -license = "Apache-2.0/MIT" -repository = "https://github.com/tomaka/shared_library/" -[dependencies.lazy_static] -version = "1" - -[dependencies.libc] -version = "0.2" diff --git a/third_party/rust/shared_library/LICENSE-APACHE b/third_party/rust/shared_library/LICENSE-APACHE deleted file mode 100644 index 1b22bef9c78a..000000000000 --- a/third_party/rust/shared_library/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. \ No newline at end of file diff --git a/third_party/rust/shared_library/LICENSE-MIT b/third_party/rust/shared_library/LICENSE-MIT deleted file mode 100644 index 4f2b149cd621..000000000000 --- a/third_party/rust/shared_library/LICENSE-MIT +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2017 Pierre Krieger - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/third_party/rust/shared_library/src/dynamic_library.rs b/third_party/rust/shared_library/src/dynamic_library.rs deleted file mode 100644 index 753b6324a4f6..000000000000 --- a/third_party/rust/shared_library/src/dynamic_library.rs +++ /dev/null @@ -1,410 +0,0 @@ -// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Dynamic library facilities. -//! -//! A simple wrapper over the platform's dynamic library facilities - -#![allow(missing_docs)] - -use std::env; -use std::ffi::{CString, OsString}; -use std::mem; -use std::path::{Path, PathBuf}; -use libc; - -pub struct DynamicLibrary { - handle: *mut u8 -} - -unsafe impl Send for DynamicLibrary {} -unsafe impl Sync for DynamicLibrary {} - -impl Drop for DynamicLibrary { - fn drop(&mut self) { - if let Err(str) = dl::check_for_errors_in(|| unsafe { - dl::close(self.handle) - }) { - panic!("{}", str) - } - } -} - -/// Special handles to be used with the `symbol_special` function. These are -/// provided by a GNU only extension and are not included as part of the POSIX -/// standard. -/// -/// See https://linux.die.net/man/3/dlsym for their behaviour. -#[cfg(target_os = "linux")] -pub enum SpecialHandles { - Next, - Default, -} - -impl DynamicLibrary { - // FIXME (#12938): Until DST lands, we cannot decompose &str into - // & and str, so we cannot usefully take ToCStr arguments by - // reference (without forcing an additional & around &str). So we - // are instead temporarily adding an instance for &Path, so that - // we can take ToCStr as owned. When DST lands, the &Path instance - // should be removed, and arguments bound by ToCStr should be - // passed by reference. (Here: in the `open` method.) - - /// Lazily loads the dynamic library named `filename` into memory and - /// then returns an opaque "handle" for that dynamic library. - /// - /// Returns a handle to the calling process when passed `None`. - pub fn open(filename: Option<&Path>) -> Result { - // The dynamic library must not be constructed if there is - // an error opening the library so the destructor does not - // run. - dl::open(filename.map(|path| path.as_os_str())) - .map(|handle| DynamicLibrary { handle }) - } - - /// Prepends a path to this process's search path for dynamic libraries - pub fn prepend_search_path(path: &Path) { - let mut search_path = Self::search_path(); - search_path.insert(0, path.to_path_buf()); - env::set_var(Self::envvar(), &Self::create_path(&search_path)); - } - - /// From a slice of paths, create a new vector which is suitable to be an - /// environment variable for this platforms dylib search path. - pub fn create_path(path: &[PathBuf]) -> OsString { - let mut newvar = OsString::new(); - for (i, path) in path.iter().enumerate() { - if i > 0 { newvar.push(Self::separator()); } - newvar.push(path); - } - newvar - } - - /// Returns the environment variable for this process's dynamic library - /// search path - pub fn envvar() -> &'static str { - if cfg!(windows) { - "PATH" - } else if cfg!(target_os = "macos") { - "DYLD_LIBRARY_PATH" - } else { - "LD_LIBRARY_PATH" - } - } - - //TODO: turn this and `envvar` into associated constants - fn separator() -> &'static str { - if cfg!(windows) { ";" } else { ":" } - } - - /// Returns the current search path for dynamic libraries being used by this - /// process - pub fn search_path() -> Vec { - match env::var_os(Self::envvar()) { - Some(var) => env::split_paths(&var).collect(), - None => Vec::new(), - } - } - - /// Returns the address of where symbol `symbol` was loaded into memory. - /// - /// In POSIX compliant systems, we return 'Err' if the symbol was not found, - /// in this library or any of the libraries that were automatically loaded - /// when this library was loaded. - pub unsafe fn symbol(&self, symbol: &str) -> Result<*mut T, String> { - // This function should have a lifetime constraint of 'a on - // T but that feature is still unimplemented - - let raw_string = CString::new(symbol).unwrap(); - // The value must not be constructed if there is an error so - // the destructor does not run. - dl::check_for_errors_in(|| { - dl::symbol(self.handle as *mut libc::c_void, raw_string.as_ptr() as *const _) - }) - .map(|sym| mem::transmute(sym)) - } - - /// Returns the address of the first occurance of symbol `symbol` using the - /// default library search order if you use `SpecialHandles::Default`. - /// - /// Returns the address of the next occurance of symbol `symbol` after the - /// current library in the default library search order if you use - /// `SpecialHandles::Next`. - #[cfg(target_os = "linux")] - pub unsafe fn symbol_special(handle: SpecialHandles, symbol: &str) -> Result<*mut T, String> { - // This function should have a lifetime constraint of 'a on - // T but that feature is still unimplemented - - let handle = match handle { - SpecialHandles::Next => mem::transmute::(-1), - SpecialHandles::Default => ::std::ptr::null_mut(), - }; - - let raw_string = CString::new(symbol).unwrap(); - // The value must not be constructed if there is an error so - // the destructor does not run. - dl::check_for_errors_in(|| { - dl::symbol(handle, raw_string.as_ptr() as *const _) - }) - .map(|sym| mem::transmute(sym)) - } -} - -#[cfg(all(test, not(target_os = "ios")))] -mod test { - use super::*; - use std::mem; - use std::path::Path; - - #[test] - #[cfg_attr(any(windows, target_os = "android"), ignore)] // FIXME #8818, #10379 - fn test_loading_cosine() { - // The math library does not need to be loaded since it is already - // statically linked in - let libm = match DynamicLibrary::open(None) { - Err(error) => panic!("Could not load self as module: {}", error), - Ok(libm) => libm - }; - - let cosine: extern fn(libc::c_double) -> libc::c_double = unsafe { - match libm.symbol("cos") { - Err(error) => panic!("Could not load function cos: {}", error), - Ok(cosine) => mem::transmute::<*mut u8, _>(cosine) - } - }; - - let argument = 0.0; - let expected_result = 1.0; - let result = cosine(argument); - if result != expected_result { - panic!("cos({}) != {} but equaled {} instead", argument, - expected_result, result) - } - } - - #[test] - #[cfg(any(target_os = "linux", - target_os = "macos", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "netbsd", - target_os = "dragonfly", - target_os = "bitrig", - target_os = "openbsd", - target_os = "solaris"))] - fn test_errors_do_not_crash() { - // Open /dev/null as a library to get an error, and make sure - // that only causes an error, and not a crash. - let path = Path::new("/dev/null"); - match DynamicLibrary::open(Some(&path)) { - Err(_) => {} - Ok(_) => panic!("Successfully opened the empty library.") - } - } -} - -//TODO: use `unix` shortcut? -#[cfg(any(target_os = "linux", - target_os = "android", - target_os = "macos", - target_os = "ios", - target_os = "fuchsia", - target_os = "freebsd", - target_os = "netbsd", - target_os = "dragonfly", - target_os = "bitrig", - target_os = "openbsd", - target_os = "solaris", - target_os = "emscripten"))] -mod dl { - use std::ffi::{CString, CStr, OsStr}; - use std::os::unix::ffi::OsStrExt; - use std::str; - use libc; - use std::ptr; - use std::sync::Mutex; - - lazy_static! { - static ref LOCK: Mutex<()> = Mutex::new(()); - } - - pub fn open(filename: Option<&OsStr>) -> Result<*mut u8, String> { - check_for_errors_in(|| unsafe { - match filename { - Some(filename) => open_external(filename), - None => open_internal(), - } - }) - } - - const LAZY: libc::c_int = 1; - - unsafe fn open_external(filename: &OsStr) -> *mut u8 { - let s = CString::new(filename.as_bytes().to_vec()).unwrap(); - dlopen(s.as_ptr() as *const _, LAZY) as *mut u8 - } - - unsafe fn open_internal() -> *mut u8 { - dlopen(ptr::null(), LAZY) as *mut u8 - } - - pub fn check_for_errors_in(f: F) -> Result where - F: FnOnce() -> T, - { - unsafe { - // dlerror isn't thread safe, so we need to lock around this entire - // sequence - let _guard = LOCK.lock(); - let _old_error = dlerror(); - - let result = f(); - - let last_error = dlerror() as *const _; - let ret = if ptr::null() == last_error { - Ok(result) - } else { - let s = CStr::from_ptr(last_error).to_bytes(); - Err(str::from_utf8(s).unwrap().to_string()) - }; - - ret - } - } - - pub unsafe fn symbol( - handle: *mut libc::c_void, - symbol: *const libc::c_char, - ) -> *mut u8 { - dlsym(handle, symbol) as *mut u8 - } - - pub unsafe fn close(handle: *mut u8) { - dlclose(handle as *mut libc::c_void); () - } - - extern { - fn dlopen( - filename: *const libc::c_char, - flag: libc::c_int, - ) -> *mut libc::c_void; - fn dlerror() -> *mut libc::c_char; - fn dlsym( - handle: *mut libc::c_void, - symbol: *const libc::c_char, - ) -> *mut libc::c_void; - fn dlclose( - handle: *mut libc::c_void, - ) -> libc::c_int; - } -} - -#[cfg(target_os = "windows")] -mod dl { - use std::ffi::OsStr; - use std::iter::Iterator; - use libc; - use std::ops::FnOnce; - use std::io::Error as IoError; - use std::os::windows::prelude::*; - use std::option::Option::{self, Some, None}; - use std::ptr; - use std::result::Result; - use std::result::Result::{Ok, Err}; - use std::string::String; - use std::vec::Vec; - - pub fn open(filename: Option<&OsStr>) -> Result<*mut u8, String> { - // disable "dll load failed" error dialog. - let prev_error_mode = unsafe { - // SEM_FAILCRITICALERRORS 0x01 - let new_error_mode = 1; - SetErrorMode(new_error_mode) - }; - - unsafe { - SetLastError(0); - } - - let result = match filename { - Some(filename) => { - let filename_str: Vec<_> = - filename.encode_wide().chain(Some(0).into_iter()).collect(); - let result = unsafe { - LoadLibraryW(filename_str.as_ptr() as *const libc::c_void) - }; - // beware: Vec/String may change errno during drop! - // so we get error here. - if result == ptr::null_mut() { - Err(format!("{}", IoError::last_os_error())) - } else { - Ok(result as *mut u8) - } - } - None => { - let mut handle = ptr::null_mut(); - let succeeded = unsafe { - GetModuleHandleExW(0, ptr::null(), &mut handle) - }; - if succeeded == 0 { - Err(format!("{}", IoError::last_os_error())) - } else { - Ok(handle as *mut u8) - } - } - }; - - unsafe { - SetErrorMode(prev_error_mode); - } - - result - } - - pub fn check_for_errors_in(f: F) -> Result where - F: FnOnce() -> T, - { - unsafe { - SetLastError(0); - - let result = f(); - - let error = IoError::last_os_error(); - if 0 == error.raw_os_error().unwrap() { - Ok(result) - } else { - Err(format!("{}", error)) - } - } - } - - pub unsafe fn symbol(handle: *mut libc::c_void, symbol: *const libc::c_char) -> *mut u8 { - GetProcAddress(handle, symbol) as *mut u8 - } - pub unsafe fn close(handle: *mut u8) { - FreeLibrary(handle as *mut libc::c_void); () - } - - #[allow(non_snake_case)] - extern "system" { - fn SetLastError(error: libc::size_t); - fn LoadLibraryW(name: *const libc::c_void) -> *mut libc::c_void; - fn GetModuleHandleExW( - dwFlags: u32, - name: *const u16, - handle: *mut *mut libc::c_void, - ) -> i32; - fn GetProcAddress( - handle: *mut libc::c_void, - name: *const libc::c_char, - ) -> *mut libc::c_void; - fn FreeLibrary(handle: *mut libc::c_void); - fn SetErrorMode(uMode: libc::c_uint) -> libc::c_uint; - } -} diff --git a/third_party/rust/shared_library/src/lib.rs b/third_party/rust/shared_library/src/lib.rs deleted file mode 100644 index e698a47a968a..000000000000 --- a/third_party/rust/shared_library/src/lib.rs +++ /dev/null @@ -1,175 +0,0 @@ -extern crate libc; - -#[macro_use] -extern crate lazy_static; - -pub mod dynamic_library; - -/// Error that can happen while loading the shared library. -#[derive(Debug, Clone)] -pub enum LoadingError { - /// - LibraryNotFound { - descr: String, - }, - - /// One of the symbols could not be found in the library. - SymbolNotFound { - /// The symbol. - symbol: &'static str, - } -} - -#[macro_export] -macro_rules! shared_library { - ($struct_name:ident, pub $($rest:tt)+) => { - shared_library!(__impl $struct_name [] [] [] pub $($rest)+); - }; - - ($struct_name:ident, fn $($rest:tt)+) => { - shared_library!(__impl $struct_name [] [] [] fn $($rest)+); - }; - - ($struct_name:ident, static $($rest:tt)+) => { - shared_library!(__impl $struct_name [] [] [] static $($rest)+); - }; - - ($struct_name:ident, $def_path:expr, $($rest:tt)+) => { - shared_library!(__impl $struct_name [] [$def_path] [] $($rest)+); - }; - - (__impl $struct_name:ident - [$($p1:tt)*] [$($p2:tt)*] [$($p3:tt)*] - , $($rest:tt)* - ) => { - shared_library!(__impl $struct_name [$($p1)*] [$($p2)*] [$($p3)*] $($rest)*); - }; - - (__impl $struct_name:ident - [$($p1:tt)*] [$($p2:tt)*] [$($p3:tt)*] - pub $($rest:tt)* - ) => { - shared_library!(__impl $struct_name - [$($p1)*] [$($p2)*] [$($p3)* pub] $($rest)*); - }; - - (__impl $struct_name:ident - [$($p1:tt)*] [$($p2:tt)*] [$($p3:tt)*] - fn $name:ident($($p:ident:$ty:ty),*) -> $ret:ty, $($rest:tt)* - ) => { - shared_library!(__impl $struct_name - [$($p1)*, $name:unsafe extern fn($($p:$ty),*) -> $ret] - [$($p2)*] - [$($p3)* - unsafe fn $name($($p:$ty),*) -> $ret { - #![allow(dead_code)] - ($struct_name::get_static_ref().$name)($($p),*) - } - ] $($rest)*); - }; - - (__impl $struct_name:ident - [$($p1:tt)*] [$($p2:tt)*] [$($p3:tt)*] - static $name:ident:$ty:ty, $($rest:tt)* - ) => { - shared_library!(__impl $struct_name - [$($p1)*, $name: $ty] - [$($p2)*] - [$($p3)*] $($rest)*); - }; - - (__impl $struct_name:ident - [$($p1:tt)*] [$($p2:tt)*] [$($p3:tt)*] - fn $name:ident($($p:ident:$ty:ty),*), $($rest:tt)* - ) => { - shared_library!(__impl $struct_name - [$($p1)*] [$($p2)*] [$($p3)*] - fn $name($($p:$ty),*) -> (), $($rest)*); - }; - - (__impl $struct_name:ident [$(,$mem_n:ident:$mem_t:ty)+] [$($p2:tt)*] [$($p3:tt)*]) => { - /// Symbols loaded from a shared library. - #[allow(non_snake_case)] - pub struct $struct_name { - _library_guard: $crate::dynamic_library::DynamicLibrary, - $( - pub $mem_n: $mem_t, - )+ - } - - impl $struct_name { - /// Tries to open the dynamic library. - #[allow(non_snake_case)] - pub fn open(path: &::std::path::Path) -> Result<$struct_name, $crate::LoadingError> { - use std::mem; - - let dylib = match $crate::dynamic_library::DynamicLibrary::open(Some(path)) { - Ok(l) => l, - Err(reason) => return Err($crate::LoadingError::LibraryNotFound { descr: reason }) - }; - - $( - let $mem_n: *mut () = match unsafe { dylib.symbol(stringify!($mem_n)) } { - Ok(s) => s, - Err(_) => return Err($crate::LoadingError::SymbolNotFound { symbol: stringify!($mem_n) }), - }; - )+ - - Ok($struct_name { - _library_guard: dylib, - $( - $mem_n: unsafe { mem::transmute($mem_n) }, - )+ - }) - } - } - - shared_library!(__write_static_fns $struct_name [] [$($p2)*] [$($p3)*]); - }; - - (__write_static_fns $struct_name:ident [$($p1:tt)*] [] [$($p3:tt)*]) => { - }; - - (__write_static_fns $struct_name:ident [$($p1:tt)*] [$defpath:expr] [$($standalones:item)+]) => { - impl $struct_name { - /// This function is used by the regular functions. - fn get_static_ref() -> &'static $struct_name { - $struct_name::try_loading().ok() - .expect(concat!("Could not open dynamic \ - library `", stringify!($struct_name), - "`")) - } - - /// Try loading the static symbols linked to this library. - pub fn try_loading() -> Result<&'static $struct_name, $crate::LoadingError> { - use std::sync::{Mutex, Once, ONCE_INIT}; - use std::mem; - - unsafe { - static mut DATA: *const Mutex> = 0 as *const _; - - static mut INIT: Once = ONCE_INIT; - INIT.call_once(|| { - let data = Box::new(Mutex::new(None)); - DATA = &*data; - mem::forget(data); - }); - - let data: &Mutex> = &*DATA; - let mut data = data.lock().unwrap(); - - if let Some(ref data) = *data { - return Ok(mem::transmute(data)); - } - - let path = ::std::path::Path::new($defpath); - let result = try!($struct_name::open(path)); - *data = Some(result); - Ok(mem::transmute(data.as_ref().unwrap())) - } - } - } - - $($standalones)+ - }; -} diff --git a/third_party/rust/smallvec-0.6.10/.cargo-checksum.json b/third_party/rust/smallvec-0.6.10/.cargo-checksum.json deleted file mode 100644 index 57d87f2e00bb..000000000000 --- a/third_party/rust/smallvec-0.6.10/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{"Cargo.toml":"a064123fc22a52158c37be025c3b70d413c4b1ee743e92a2e80ed419e2992d65","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0b28172679e0009b655da42797c03fd163a3379d5cfa67ba1f1655e974a2a1a9","README.md":"38eef4ebde6fe6effa12a2dbca3bd69d6446b2935f19a329ac4926f1cb2e5013","benches/bench.rs":"9dca7122a3dcb2c099e49807e4d3b8f01d9220e2b3db0a54e9901ee74392866f","lib.rs":"4d6998b0b80a85e85cf00bd317a88518067e9e8ba191185418263dec67069c16"},"package":"ab606a9c5e214920bb66c458cd7be8ef094f813f20fe77a54cc7dbfff220d4b7"} \ No newline at end of file diff --git a/third_party/rust/smallvec-0.6.10/Cargo.toml b/third_party/rust/smallvec-0.6.10/Cargo.toml deleted file mode 100644 index 5b23e5a16be9..000000000000 --- a/third_party/rust/smallvec-0.6.10/Cargo.toml +++ /dev/null @@ -1,39 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - -[package] -name = "smallvec" -version = "0.6.10" -authors = ["Simon Sapin "] -description = "'Small vector' optimization: store up to a small number of items on the stack" -documentation = "https://doc.servo.org/smallvec/" -readme = "README.md" -keywords = ["small", "vec", "vector", "stack", "no_std"] -categories = ["data-structures"] -license = "MIT/Apache-2.0" -repository = "https://github.com/servo/rust-smallvec" - -[lib] -name = "smallvec" -path = "lib.rs" -[dependencies.serde] -version = "1" -optional = true -[dev-dependencies.bincode] -version = "1.0.1" - -[features] -default = ["std"] -may_dangle = [] -specialization = [] -std = [] -union = [] diff --git a/third_party/rust/smallvec-0.6.10/LICENSE-APACHE b/third_party/rust/smallvec-0.6.10/LICENSE-APACHE deleted file mode 100644 index 16fe87b06e80..000000000000 --- a/third_party/rust/smallvec-0.6.10/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/third_party/rust/smallvec-0.6.10/LICENSE-MIT b/third_party/rust/smallvec-0.6.10/LICENSE-MIT deleted file mode 100644 index 9729c1284e1b..000000000000 --- a/third_party/rust/smallvec-0.6.10/LICENSE-MIT +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2018 The Servo Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/smallvec-0.6.10/README.md b/third_party/rust/smallvec-0.6.10/README.md deleted file mode 100644 index fda7fd4d2ca0..000000000000 --- a/third_party/rust/smallvec-0.6.10/README.md +++ /dev/null @@ -1,8 +0,0 @@ -rust-smallvec -============= - -[Documentation](https://docs.rs/smallvec/) - -[Release notes](https://github.com/servo/rust-smallvec/releases) - -"Small vector" optimization for Rust: store up to a small number of items on the stack diff --git a/third_party/rust/smallvec-0.6.10/benches/bench.rs b/third_party/rust/smallvec-0.6.10/benches/bench.rs deleted file mode 100644 index 36cb1333fab7..000000000000 --- a/third_party/rust/smallvec-0.6.10/benches/bench.rs +++ /dev/null @@ -1,295 +0,0 @@ -#![feature(test)] - -#[macro_use] -extern crate smallvec; -extern crate test; - -use self::test::Bencher; -use smallvec::{ExtendFromSlice, SmallVec}; - -const VEC_SIZE: usize = 16; -const SPILLED_SIZE: usize = 100; - -trait Vector: for<'a> From<&'a [T]> + Extend + ExtendFromSlice { - fn new() -> Self; - fn push(&mut self, val: T); - fn pop(&mut self) -> Option; - fn remove(&mut self, p: usize) -> T; - fn insert(&mut self, n: usize, val: T); - fn from_elem(val: T, n: usize) -> Self; - fn from_elems(val: &[T]) -> Self; -} - -impl Vector for Vec { - fn new() -> Self { - Self::with_capacity(VEC_SIZE) - } - - fn push(&mut self, val: T) { - self.push(val) - } - - fn pop(&mut self) -> Option { - self.pop() - } - - fn remove(&mut self, p: usize) -> T { - self.remove(p) - } - - fn insert(&mut self, n: usize, val: T) { - self.insert(n, val) - } - - fn from_elem(val: T, n: usize) -> Self { - vec![val; n] - } - - fn from_elems(val: &[T]) -> Self { - val.to_owned() - } -} - -impl Vector for SmallVec<[T; VEC_SIZE]> { - fn new() -> Self { - Self::new() - } - - fn push(&mut self, val: T) { - self.push(val) - } - - fn pop(&mut self) -> Option { - self.pop() - } - - fn remove(&mut self, p: usize) -> T { - self.remove(p) - } - - fn insert(&mut self, n: usize, val: T) { - self.insert(n, val) - } - - fn from_elem(val: T, n: usize) -> Self { - smallvec![val; n] - } - - fn from_elems(val: &[T]) -> Self { - SmallVec::from_slice(val) - } -} - -macro_rules! make_benches { - ($typ:ty { $($b_name:ident => $g_name:ident($($args:expr),*),)* }) => { - $( - #[bench] - fn $b_name(b: &mut Bencher) { - $g_name::<$typ>($($args,)* b) - } - )* - } -} - -make_benches! { - SmallVec<[u64; VEC_SIZE]> { - bench_push => gen_push(SPILLED_SIZE as _), - bench_push_small => gen_push(VEC_SIZE as _), - bench_insert => gen_insert(SPILLED_SIZE as _), - bench_insert_small => gen_insert(VEC_SIZE as _), - bench_remove => gen_remove(SPILLED_SIZE as _), - bench_remove_small => gen_remove(VEC_SIZE as _), - bench_extend => gen_extend(SPILLED_SIZE as _), - bench_extend_small => gen_extend(VEC_SIZE as _), - bench_from_iter => gen_from_iter(SPILLED_SIZE as _), - bench_from_iter_small => gen_from_iter(VEC_SIZE as _), - bench_from_slice => gen_from_slice(SPILLED_SIZE as _), - bench_from_slice_small => gen_from_slice(VEC_SIZE as _), - bench_extend_from_slice => gen_extend_from_slice(SPILLED_SIZE as _), - bench_extend_from_slice_small => gen_extend_from_slice(VEC_SIZE as _), - bench_macro_from_elem => gen_from_elem(SPILLED_SIZE as _), - bench_macro_from_elem_small => gen_from_elem(VEC_SIZE as _), - bench_pushpop => gen_pushpop(), - } -} - -make_benches! { - Vec { - bench_push_vec => gen_push(SPILLED_SIZE as _), - bench_push_vec_small => gen_push(VEC_SIZE as _), - bench_insert_vec => gen_insert(SPILLED_SIZE as _), - bench_insert_vec_small => gen_insert(VEC_SIZE as _), - bench_remove_vec => gen_remove(SPILLED_SIZE as _), - bench_remove_vec_small => gen_remove(VEC_SIZE as _), - bench_extend_vec => gen_extend(SPILLED_SIZE as _), - bench_extend_vec_small => gen_extend(VEC_SIZE as _), - bench_from_iter_vec => gen_from_iter(SPILLED_SIZE as _), - bench_from_iter_vec_small => gen_from_iter(VEC_SIZE as _), - bench_from_slice_vec => gen_from_slice(SPILLED_SIZE as _), - bench_from_slice_vec_small => gen_from_slice(VEC_SIZE as _), - bench_extend_from_slice_vec => gen_extend_from_slice(SPILLED_SIZE as _), - bench_extend_from_slice_vec_small => gen_extend_from_slice(VEC_SIZE as _), - bench_macro_from_elem_vec => gen_from_elem(SPILLED_SIZE as _), - bench_macro_from_elem_vec_small => gen_from_elem(VEC_SIZE as _), - bench_pushpop_vec => gen_pushpop(), - } -} - -fn gen_push>(n: u64, b: &mut Bencher) { - #[inline(never)] - fn push_noinline>(vec: &mut V, x: u64) { - vec.push(x); - } - - b.iter(|| { - let mut vec = V::new(); - for x in 0..n { - push_noinline(&mut vec, x); - } - vec - }); -} - -fn gen_insert>(n: u64, b: &mut Bencher) { - #[inline(never)] - fn insert_noinline>(vec: &mut V, p: usize, x: u64) { - vec.insert(p, x) - } - - b.iter(|| { - let mut vec = V::new(); - // Add one element, with each iteration we insert one before the end. - // This means that we benchmark the insertion operation and not the - // time it takes to `ptr::copy` the data. - vec.push(0); - for x in 0..n { - insert_noinline(&mut vec, x as _, x); - } - vec - }); -} - -fn gen_remove>(n: usize, b: &mut Bencher) { - #[inline(never)] - fn remove_noinline>(vec: &mut V, p: usize) -> u64 { - vec.remove(p) - } - - b.iter(|| { - let mut vec = V::from_elem(0, n as _); - - for x in (0..n - 1).rev() { - remove_noinline(&mut vec, x); - } - }); -} - -fn gen_extend>(n: u64, b: &mut Bencher) { - b.iter(|| { - let mut vec = V::new(); - vec.extend(0..n); - vec - }); -} - -fn gen_from_iter>(n: u64, b: &mut Bencher) { - let v: Vec = (0..n).collect(); - b.iter(|| { - let vec = V::from(&v); - vec - }); -} - -fn gen_from_slice>(n: u64, b: &mut Bencher) { - let v: Vec = (0..n).collect(); - b.iter(|| { - let vec = V::from_elems(&v); - vec - }); -} - -fn gen_extend_from_slice>(n: u64, b: &mut Bencher) { - let v: Vec = (0..n).collect(); - b.iter(|| { - let mut vec = V::new(); - vec.extend_from_slice(&v); - vec - }); -} - -fn gen_pushpop>(b: &mut Bencher) { - #[inline(never)] - fn pushpop_noinline>(vec: &mut V, x: u64) -> Option { - vec.push(x); - vec.pop() - } - - b.iter(|| { - let mut vec = V::new(); - for x in 0..SPILLED_SIZE as _ { - pushpop_noinline(&mut vec, x); - } - vec - }); -} - -fn gen_from_elem>(n: usize, b: &mut Bencher) { - b.iter(|| { - let vec = V::from_elem(42, n); - vec - }); -} - -#[bench] -fn bench_insert_many(b: &mut Bencher) { - #[inline(never)] - fn insert_many_noinline>( - vec: &mut SmallVec<[u64; VEC_SIZE]>, - index: usize, - iterable: I, - ) { - vec.insert_many(index, iterable) - } - - b.iter(|| { - let mut vec = SmallVec::<[u64; VEC_SIZE]>::new(); - insert_many_noinline(&mut vec, 0, 0..SPILLED_SIZE as _); - insert_many_noinline(&mut vec, 0, 0..SPILLED_SIZE as _); - vec - }); -} - -#[bench] -fn bench_insert_from_slice(b: &mut Bencher) { - let v: Vec = (0..SPILLED_SIZE as _).collect(); - b.iter(|| { - let mut vec = SmallVec::<[u64; VEC_SIZE]>::new(); - vec.insert_from_slice(0, &v); - vec.insert_from_slice(0, &v); - vec - }); -} - -#[bench] -fn bench_macro_from_list(b: &mut Bencher) { - b.iter(|| { - let vec: SmallVec<[u64; 16]> = smallvec![ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 24, 32, 36, 0x40, 0x80, - 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000, 0x20000, 0x40000, - 0x80000, 0x100000, - ]; - vec - }); -} - -#[bench] -fn bench_macro_from_list_vec(b: &mut Bencher) { - b.iter(|| { - let vec: Vec = vec![ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 24, 32, 36, 0x40, 0x80, - 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000, 0x20000, 0x40000, - 0x80000, 0x100000, - ]; - vec - }); -} diff --git a/third_party/rust/smallvec-0.6.10/lib.rs b/third_party/rust/smallvec-0.6.10/lib.rs deleted file mode 100644 index e45ca7aebdb0..000000000000 --- a/third_party/rust/smallvec-0.6.10/lib.rs +++ /dev/null @@ -1,2360 +0,0 @@ -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Small vectors in various sizes. These store a certain number of elements inline, and fall back -//! to the heap for larger allocations. This can be a useful optimization for improving cache -//! locality and reducing allocator traffic for workloads that fit within the inline buffer. -//! -//! ## no_std support -//! -//! By default, `smallvec` depends on `libstd`. However, it can be configured to use the unstable -//! `liballoc` API instead, for use on platforms that have `liballoc` but not `libstd`. This -//! configuration is currently unstable and is not guaranteed to work on all versions of Rust. -//! -//! To depend on `smallvec` without `libstd`, use `default-features = false` in the `smallvec` -//! section of Cargo.toml to disable its `"std"` feature. -//! -//! ## `union` feature -//! -//! When the `union` feature is enabled `smallvec` will track its state (inline or spilled) -//! without the use of an enum tag, reducing the size of the `smallvec` by one machine word. -//! This means that there is potentially no space overhead compared to `Vec`. -//! Note that `smallvec` can still be larger than `Vec` if the inline buffer is larger than two -//! machine words. -//! -//! To use this feature add `features = ["union"]` in the `smallvec` section of Cargo.toml. -//! Note that this feature requires a nightly compiler (for now). - -#![cfg_attr(not(feature = "std"), no_std)] -#![cfg_attr(not(feature = "std"), feature(alloc))] -#![cfg_attr(feature = "union", feature(untagged_unions))] -#![cfg_attr(feature = "specialization", feature(specialization))] -#![cfg_attr(feature = "may_dangle", feature(dropck_eyepatch))] -#![deny(missing_docs)] - - -#[cfg(not(feature = "std"))] -#[macro_use] -extern crate alloc; - -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; - -#[cfg(feature = "serde")] -extern crate serde; - -#[cfg(not(feature = "std"))] -mod std { - pub use core::*; -} - -use std::borrow::{Borrow, BorrowMut}; -use std::cmp; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::iter::{IntoIterator, FromIterator, repeat}; -use std::mem; -use std::mem::ManuallyDrop; -use std::ops; -use std::ptr; -use std::slice; -#[cfg(feature = "std")] -use std::io; -#[cfg(feature = "serde")] -use serde::ser::{Serialize, Serializer, SerializeSeq}; -#[cfg(feature = "serde")] -use serde::de::{Deserialize, Deserializer, SeqAccess, Visitor}; -#[cfg(feature = "serde")] -use std::marker::PhantomData; - -/// Creates a [`SmallVec`] containing the arguments. -/// -/// `smallvec!` allows `SmallVec`s to be defined with the same syntax as array expressions. -/// There are two forms of this macro: -/// -/// - Create a [`SmallVec`] containing a given list of elements: -/// -/// ``` -/// # #[macro_use] extern crate smallvec; -/// # use smallvec::SmallVec; -/// # fn main() { -/// let v: SmallVec<[_; 128]> = smallvec![1, 2, 3]; -/// assert_eq!(v[0], 1); -/// assert_eq!(v[1], 2); -/// assert_eq!(v[2], 3); -/// # } -/// ``` -/// -/// - Create a [`SmallVec`] from a given element and size: -/// -/// ``` -/// # #[macro_use] extern crate smallvec; -/// # use smallvec::SmallVec; -/// # fn main() { -/// let v: SmallVec<[_; 0x8000]> = smallvec![1; 3]; -/// assert_eq!(v, SmallVec::from_buf([1, 1, 1])); -/// # } -/// ``` -/// -/// Note that unlike array expressions this syntax supports all elements -/// which implement [`Clone`] and the number of elements doesn't have to be -/// a constant. -/// -/// This will use `clone` to duplicate an expression, so one should be careful -/// using this with types having a nonstandard `Clone` implementation. For -/// example, `smallvec![Rc::new(1); 5]` will create a vector of five references -/// to the same boxed integer value, not five references pointing to independently -/// boxed integers. - -#[macro_export] -macro_rules! smallvec { - // count helper: transform any expression into 1 - (@one $x:expr) => (1usize); - ($elem:expr; $n:expr) => ({ - $crate::SmallVec::from_elem($elem, $n) - }); - ($($x:expr),*$(,)*) => ({ - let count = 0usize $(+ smallvec!(@one $x))*; - let mut vec = $crate::SmallVec::new(); - if count <= vec.inline_size() { - $(vec.push($x);)* - vec - } else { - $crate::SmallVec::from_vec(vec![$($x,)*]) - } - }); -} - -/// Hint to the optimizer that any code path which calls this function is -/// statically unreachable and can be removed. -/// -/// Equivalent to `std::hint::unreachable_unchecked` but works in older versions of Rust. -#[inline] -pub unsafe fn unreachable() -> ! { - enum Void {} - let x: &Void = mem::transmute(1usize); - match *x {} -} - -/// `panic!()` in debug builds, optimization hint in release. -#[cfg(not(feature = "union"))] -macro_rules! debug_unreachable { - () => { debug_unreachable!("entered unreachable code") }; - ($e:expr) => { - if cfg!(not(debug_assertions)) { - unreachable(); - } else { - panic!($e); - } - } -} - -/// Common operations implemented by both `Vec` and `SmallVec`. -/// -/// This can be used to write generic code that works with both `Vec` and `SmallVec`. -/// -/// ## Example -/// -/// ```rust -/// use smallvec::{VecLike, SmallVec}; -/// -/// fn initialize>(v: &mut V) { -/// for i in 0..5 { -/// v.push(i); -/// } -/// } -/// -/// let mut vec = Vec::new(); -/// initialize(&mut vec); -/// -/// let mut small_vec = SmallVec::<[u8; 8]>::new(); -/// initialize(&mut small_vec); -/// ``` -#[deprecated(note = "Use `Extend` and `Deref<[T]>` instead")] -pub trait VecLike: - ops::Index + - ops::IndexMut + - ops::Index, Output=[T]> + - ops::IndexMut> + - ops::Index, Output=[T]> + - ops::IndexMut> + - ops::Index, Output=[T]> + - ops::IndexMut> + - ops::Index + - ops::IndexMut + - ops::DerefMut + - Extend { - - /// Append an element to the vector. - fn push(&mut self, value: T); -} - -#[allow(deprecated)] -impl VecLike for Vec { - #[inline] - fn push(&mut self, value: T) { - Vec::push(self, value); - } -} - -/// Trait to be implemented by a collection that can be extended from a slice -/// -/// ## Example -/// -/// ```rust -/// use smallvec::{ExtendFromSlice, SmallVec}; -/// -/// fn initialize>(v: &mut V) { -/// v.extend_from_slice(b"Test!"); -/// } -/// -/// let mut vec = Vec::new(); -/// initialize(&mut vec); -/// assert_eq!(&vec, b"Test!"); -/// -/// let mut small_vec = SmallVec::<[u8; 8]>::new(); -/// initialize(&mut small_vec); -/// assert_eq!(&small_vec as &[_], b"Test!"); -/// ``` -pub trait ExtendFromSlice { - /// Extends a collection from a slice of its element type - fn extend_from_slice(&mut self, other: &[T]); -} - -impl ExtendFromSlice for Vec { - fn extend_from_slice(&mut self, other: &[T]) { - Vec::extend_from_slice(self, other) - } -} - -unsafe fn deallocate(ptr: *mut T, capacity: usize) { - let _vec: Vec = Vec::from_raw_parts(ptr, 0, capacity); - // Let it drop. -} - -/// An iterator that removes the items from a `SmallVec` and yields them by value. -/// -/// Returned from [`SmallVec::drain`][1]. -/// -/// [1]: struct.SmallVec.html#method.drain -pub struct Drain<'a, T: 'a> { - iter: slice::IterMut<'a,T>, -} - -impl<'a, T: 'a> Iterator for Drain<'a,T> { - type Item = T; - - #[inline] - fn next(&mut self) -> Option { - self.iter.next().map(|reference| unsafe { ptr::read(reference) }) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } -} - -impl<'a, T: 'a> DoubleEndedIterator for Drain<'a, T> { - #[inline] - fn next_back(&mut self) -> Option { - self.iter.next_back().map(|reference| unsafe { ptr::read(reference) }) - } -} - -impl<'a, T> ExactSizeIterator for Drain<'a, T> { } - -impl<'a, T: 'a> Drop for Drain<'a,T> { - fn drop(&mut self) { - // Destroy the remaining elements. - for _ in self.by_ref() {} - } -} - -#[cfg(feature = "union")] -union SmallVecData { - inline: ManuallyDrop, - heap: (*mut A::Item, usize), -} - -#[cfg(feature = "union")] -impl SmallVecData { - #[inline] - unsafe fn inline(&self) -> &A { - &self.inline - } - #[inline] - unsafe fn inline_mut(&mut self) -> &mut A { - &mut self.inline - } - #[inline] - fn from_inline(inline: A) -> SmallVecData { - SmallVecData { inline: ManuallyDrop::new(inline) } - } - #[inline] - unsafe fn into_inline(self) -> A { ManuallyDrop::into_inner(self.inline) } - #[inline] - unsafe fn heap(&self) -> (*mut A::Item, usize) { - self.heap - } - #[inline] - unsafe fn heap_mut(&mut self) -> &mut (*mut A::Item, usize) { - &mut self.heap - } - #[inline] - fn from_heap(ptr: *mut A::Item, len: usize) -> SmallVecData { - SmallVecData { heap: (ptr, len) } - } -} - -#[cfg(not(feature = "union"))] -enum SmallVecData { - Inline(ManuallyDrop), - Heap((*mut A::Item, usize)), -} - -#[cfg(not(feature = "union"))] -impl SmallVecData { - #[inline] - unsafe fn inline(&self) -> &A { - match *self { - SmallVecData::Inline(ref a) => a, - _ => debug_unreachable!(), - } - } - #[inline] - unsafe fn inline_mut(&mut self) -> &mut A { - match *self { - SmallVecData::Inline(ref mut a) => a, - _ => debug_unreachable!(), - } - } - #[inline] - fn from_inline(inline: A) -> SmallVecData { - SmallVecData::Inline(ManuallyDrop::new(inline)) - } - #[inline] - unsafe fn into_inline(self) -> A { - match self { - SmallVecData::Inline(a) => ManuallyDrop::into_inner(a), - _ => debug_unreachable!(), - } - } - #[inline] - unsafe fn heap(&self) -> (*mut A::Item, usize) { - match *self { - SmallVecData::Heap(data) => data, - _ => debug_unreachable!(), - } - } - #[inline] - unsafe fn heap_mut(&mut self) -> &mut (*mut A::Item, usize) { - match *self { - SmallVecData::Heap(ref mut data) => data, - _ => debug_unreachable!(), - } - } - #[inline] - fn from_heap(ptr: *mut A::Item, len: usize) -> SmallVecData { - SmallVecData::Heap((ptr, len)) - } -} - -unsafe impl Send for SmallVecData {} -unsafe impl Sync for SmallVecData {} - -/// A `Vec`-like container that can store a small number of elements inline. -/// -/// `SmallVec` acts like a vector, but can store a limited amount of data inline within the -/// `SmallVec` struct rather than in a separate allocation. If the data exceeds this limit, the -/// `SmallVec` will "spill" its data onto the heap, allocating a new buffer to hold it. -/// -/// The amount of data that a `SmallVec` can store inline depends on its backing store. The backing -/// store can be any type that implements the `Array` trait; usually it is a small fixed-sized -/// array. For example a `SmallVec<[u64; 8]>` can hold up to eight 64-bit integers inline. -/// -/// ## Example -/// -/// ```rust -/// use smallvec::SmallVec; -/// let mut v = SmallVec::<[u8; 4]>::new(); // initialize an empty vector -/// -/// // The vector can hold up to 4 items without spilling onto the heap. -/// v.extend(0..4); -/// assert_eq!(v.len(), 4); -/// assert!(!v.spilled()); -/// -/// // Pushing another element will force the buffer to spill: -/// v.push(4); -/// assert_eq!(v.len(), 5); -/// assert!(v.spilled()); -/// ``` -pub struct SmallVec { - // The capacity field is used to determine which of the storage variants is active: - // If capacity <= A::size() then the inline variant is used and capacity holds the current length of the vector (number of elements actually in use). - // If capacity > A::size() then the heap variant is used and capacity holds the size of the memory allocation. - capacity: usize, - data: SmallVecData, -} - -impl SmallVec { - /// Construct an empty vector - #[inline] - pub fn new() -> SmallVec { - unsafe { - SmallVec { - capacity: 0, - data: SmallVecData::from_inline(mem::uninitialized()), - } - } - } - - /// Construct an empty vector with enough capacity pre-allocated to store at least `n` - /// elements. - /// - /// Will create a heap allocation only if `n` is larger than the inline capacity. - /// - /// ``` - /// # use smallvec::SmallVec; - /// - /// let v: SmallVec<[u8; 3]> = SmallVec::with_capacity(100); - /// - /// assert!(v.is_empty()); - /// assert!(v.capacity() >= 100); - /// ``` - #[inline] - pub fn with_capacity(n: usize) -> Self { - let mut v = SmallVec::new(); - v.reserve_exact(n); - v - } - - /// Construct a new `SmallVec` from a `Vec`. - /// - /// Elements will be copied to the inline buffer if vec.capacity() <= A::size(). - /// - /// ```rust - /// use smallvec::SmallVec; - /// - /// let vec = vec![1, 2, 3, 4, 5]; - /// let small_vec: SmallVec<[_; 3]> = SmallVec::from_vec(vec); - /// - /// assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - /// ``` - #[inline] - pub fn from_vec(mut vec: Vec) -> SmallVec { - if vec.capacity() <= A::size() { - unsafe { - let mut data = SmallVecData::::from_inline(mem::uninitialized()); - let len = vec.len(); - vec.set_len(0); - ptr::copy_nonoverlapping(vec.as_ptr(), data.inline_mut().ptr_mut(), len); - - SmallVec { - capacity: len, - data, - } - } - } else { - let (ptr, cap, len) = (vec.as_mut_ptr(), vec.capacity(), vec.len()); - mem::forget(vec); - - SmallVec { - capacity: cap, - data: SmallVecData::from_heap(ptr, len), - } - } - } - - /// Constructs a new `SmallVec` on the stack from an `A` without - /// copying elements. - /// - /// ```rust - /// use smallvec::SmallVec; - /// - /// let buf = [1, 2, 3, 4, 5]; - /// let small_vec: SmallVec<_> = SmallVec::from_buf(buf); - /// - /// assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - /// ``` - #[inline] - pub fn from_buf(buf: A) -> SmallVec { - SmallVec { - capacity: A::size(), - data: SmallVecData::from_inline(buf), - } - } - - /// Constructs a new `SmallVec` on the stack from an `A` without - /// copying elements. Also sets the length, which must be less or - /// equal to the size of `buf`. - /// - /// ```rust - /// use smallvec::SmallVec; - /// - /// let buf = [1, 2, 3, 4, 5, 0, 0, 0]; - /// let small_vec: SmallVec<_> = SmallVec::from_buf_and_len(buf, 5); - /// - /// assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - /// ``` - #[inline] - pub fn from_buf_and_len(buf: A, len: usize) -> SmallVec { - assert!(len <= A::size()); - unsafe { SmallVec::from_buf_and_len_unchecked(buf, len) } - } - - /// Constructs a new `SmallVec` on the stack from an `A` without - /// copying elements. Also sets the length. The user is responsible - /// for ensuring that `len <= A::size()`. - /// - /// ```rust - /// use smallvec::SmallVec; - /// - /// let buf = [1, 2, 3, 4, 5, 0, 0, 0]; - /// let small_vec: SmallVec<_> = unsafe { - /// SmallVec::from_buf_and_len_unchecked(buf, 5) - /// }; - /// - /// assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - /// ``` - #[inline] - pub unsafe fn from_buf_and_len_unchecked(buf: A, len: usize) -> SmallVec { - SmallVec { - capacity: len, - data: SmallVecData::from_inline(buf), - } - } - - - /// Sets the length of a vector. - /// - /// This will explicitly set the size of the vector, without actually - /// modifying its buffers, so it is up to the caller to ensure that the - /// vector is actually the specified size. - pub unsafe fn set_len(&mut self, new_len: usize) { - let (_, len_ptr, _) = self.triple_mut(); - *len_ptr = new_len; - } - - /// The maximum number of elements this vector can hold inline - #[inline] - pub fn inline_size(&self) -> usize { - A::size() - } - - /// The number of elements stored in the vector - #[inline] - pub fn len(&self) -> usize { - self.triple().1 - } - - /// Returns `true` if the vector is empty - #[inline] - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// The number of items the vector can hold without reallocating - #[inline] - pub fn capacity(&self) -> usize { - self.triple().2 - } - - /// Returns a tuple with (data ptr, len, capacity) - /// Useful to get all SmallVec properties with a single check of the current storage variant. - #[inline] - fn triple(&self) -> (*const A::Item, usize, usize) { - unsafe { - if self.spilled() { - let (ptr, len) = self.data.heap(); - (ptr, len, self.capacity) - } else { - (self.data.inline().ptr(), self.capacity, A::size()) - } - } - } - - /// Returns a tuple with (data ptr, len ptr, capacity) - #[inline] - fn triple_mut(&mut self) -> (*mut A::Item, &mut usize, usize) { - unsafe { - if self.spilled() { - let &mut (ptr, ref mut len_ptr) = self.data.heap_mut(); - (ptr, len_ptr, self.capacity) - } else { - (self.data.inline_mut().ptr_mut(), &mut self.capacity, A::size()) - } - } - } - - /// Returns `true` if the data has spilled into a separate heap-allocated buffer. - #[inline] - pub fn spilled(&self) -> bool { - self.capacity > A::size() - } - - /// Empty the vector and return an iterator over its former contents. - pub fn drain(&mut self) -> Drain { - unsafe { - let ptr = self.as_mut_ptr(); - - let current_len = self.len(); - self.set_len(0); - - let slice = slice::from_raw_parts_mut(ptr, current_len); - - Drain { - iter: slice.iter_mut(), - } - } - } - - /// Append an item to the vector. - #[inline] - pub fn push(&mut self, value: A::Item) { - unsafe { - let (_, &mut len, cap) = self.triple_mut(); - if len == cap { - self.reserve(1); - } - let (ptr, len_ptr, _) = self.triple_mut(); - *len_ptr = len + 1; - ptr::write(ptr.offset(len as isize), value); - } - } - - /// Remove an item from the end of the vector and return it, or None if empty. - #[inline] - pub fn pop(&mut self) -> Option { - unsafe { - let (ptr, len_ptr, _) = self.triple_mut(); - if *len_ptr == 0 { - return None; - } - let last_index = *len_ptr - 1; - *len_ptr = last_index; - Some(ptr::read(ptr.offset(last_index as isize))) - } - } - - /// Re-allocate to set the capacity to `max(new_cap, inline_size())`. - /// - /// Panics if `new_cap` is less than the vector's length. - pub fn grow(&mut self, new_cap: usize) { - unsafe { - let (ptr, &mut len, cap) = self.triple_mut(); - let unspilled = !self.spilled(); - assert!(new_cap >= len); - if new_cap <= self.inline_size() { - if unspilled { - return; - } - self.data = SmallVecData::from_inline(mem::uninitialized()); - ptr::copy_nonoverlapping(ptr, self.data.inline_mut().ptr_mut(), len); - self.capacity = len; - } else if new_cap != cap { - let mut vec = Vec::with_capacity(new_cap); - let new_alloc = vec.as_mut_ptr(); - mem::forget(vec); - ptr::copy_nonoverlapping(ptr, new_alloc, len); - self.data = SmallVecData::from_heap(new_alloc, len); - self.capacity = new_cap; - if unspilled { - return; - } - } else { - return; - } - deallocate(ptr, cap); - } - } - - /// Reserve capacity for `additional` more elements to be inserted. - /// - /// May reserve more space to avoid frequent reallocations. - /// - /// If the new capacity would overflow `usize` then it will be set to `usize::max_value()` - /// instead. (This means that inserting `additional` new elements is not guaranteed to be - /// possible after calling this function.) - #[inline] - pub fn reserve(&mut self, additional: usize) { - // prefer triple_mut() even if triple() would work - // so that the optimizer removes duplicated calls to it - // from callers like insert() - let (_, &mut len, cap) = self.triple_mut(); - if cap - len < additional { - let new_cap = len.checked_add(additional). - and_then(usize::checked_next_power_of_two). - unwrap_or(usize::max_value()); - self.grow(new_cap); - } - } - - /// Reserve the minimum capacity for `additional` more elements to be inserted. - /// - /// Panics if the new capacity overflows `usize`. - pub fn reserve_exact(&mut self, additional: usize) { - let (_, &mut len, cap) = self.triple_mut(); - if cap - len < additional { - match len.checked_add(additional) { - Some(cap) => self.grow(cap), - None => panic!("reserve_exact overflow"), - } - } - } - - /// Shrink the capacity of the vector as much as possible. - /// - /// When possible, this will move data from an external heap buffer to the vector's inline - /// storage. - pub fn shrink_to_fit(&mut self) { - if !self.spilled() { - return; - } - let len = self.len(); - if self.inline_size() >= len { - unsafe { - let (ptr, len) = self.data.heap(); - self.data = SmallVecData::from_inline(mem::uninitialized()); - ptr::copy_nonoverlapping(ptr, self.data.inline_mut().ptr_mut(), len); - deallocate(ptr, self.capacity); - self.capacity = len; - } - } else if self.capacity() > len { - self.grow(len); - } - } - - /// Shorten the vector, keeping the first `len` elements and dropping the rest. - /// - /// If `len` is greater than or equal to the vector's current length, this has no - /// effect. - /// - /// This does not re-allocate. If you want the vector's capacity to shrink, call - /// `shrink_to_fit` after truncating. - pub fn truncate(&mut self, len: usize) { - unsafe { - let (ptr, len_ptr, _) = self.triple_mut(); - while len < *len_ptr { - let last_index = *len_ptr - 1; - *len_ptr = last_index; - ptr::drop_in_place(ptr.offset(last_index as isize)); - } - } - } - - /// Extracts a slice containing the entire vector. - /// - /// Equivalent to `&s[..]`. - pub fn as_slice(&self) -> &[A::Item] { - self - } - - /// Extracts a mutable slice of the entire vector. - /// - /// Equivalent to `&mut s[..]`. - pub fn as_mut_slice(&mut self) -> &mut [A::Item] { - self - } - - /// Remove the element at position `index`, replacing it with the last element. - /// - /// This does not preserve ordering, but is O(1). - /// - /// Panics if `index` is out of bounds. - #[inline] - pub fn swap_remove(&mut self, index: usize) -> A::Item { - let len = self.len(); - self.swap(len - 1, index); - self.pop().unwrap_or_else(|| unsafe { unreachable() }) - } - - /// Remove all elements from the vector. - #[inline] - pub fn clear(&mut self) { - self.truncate(0); - } - - /// Remove and return the element at position `index`, shifting all elements after it to the - /// left. - /// - /// Panics if `index` is out of bounds. - pub fn remove(&mut self, index: usize) -> A::Item { - unsafe { - let (mut ptr, len_ptr, _) = self.triple_mut(); - let len = *len_ptr; - assert!(index < len); - *len_ptr = len - 1; - ptr = ptr.offset(index as isize); - let item = ptr::read(ptr); - ptr::copy(ptr.offset(1), ptr, len - index - 1); - item - } - } - - /// Insert an element at position `index`, shifting all elements after it to the right. - /// - /// Panics if `index` is out of bounds. - pub fn insert(&mut self, index: usize, element: A::Item) { - self.reserve(1); - - unsafe { - let (mut ptr, len_ptr, _) = self.triple_mut(); - let len = *len_ptr; - assert!(index <= len); - *len_ptr = len + 1; - ptr = ptr.offset(index as isize); - ptr::copy(ptr, ptr.offset(1), len - index); - ptr::write(ptr, element); - } - } - - /// Insert multiple elements at position `index`, shifting all following elements toward the - /// back. - pub fn insert_many>(&mut self, index: usize, iterable: I) { - let iter = iterable.into_iter(); - if index == self.len() { - return self.extend(iter); - } - - let (lower_size_bound, _) = iter.size_hint(); - assert!(lower_size_bound <= std::isize::MAX as usize); // Ensure offset is indexable - assert!(index + lower_size_bound >= index); // Protect against overflow - self.reserve(lower_size_bound); - - unsafe { - let old_len = self.len(); - assert!(index <= old_len); - let mut ptr = self.as_mut_ptr().offset(index as isize); - - // Move the trailing elements. - ptr::copy(ptr, ptr.offset(lower_size_bound as isize), old_len - index); - - // In case the iterator panics, don't double-drop the items we just copied above. - self.set_len(index); - - let mut num_added = 0; - for element in iter { - let mut cur = ptr.offset(num_added as isize); - if num_added >= lower_size_bound { - // Iterator provided more elements than the hint. Move trailing items again. - self.reserve(1); - ptr = self.as_mut_ptr().offset(index as isize); - cur = ptr.offset(num_added as isize); - ptr::copy(cur, cur.offset(1), old_len - index); - } - ptr::write(cur, element); - num_added += 1; - } - if num_added < lower_size_bound { - // Iterator provided fewer elements than the hint - ptr::copy(ptr.offset(lower_size_bound as isize), ptr.offset(num_added as isize), old_len - index); - } - - self.set_len(old_len + num_added); - } - } - - /// Convert a SmallVec to a Vec, without reallocating if the SmallVec has already spilled onto - /// the heap. - pub fn into_vec(self) -> Vec { - if self.spilled() { - unsafe { - let (ptr, len) = self.data.heap(); - let v = Vec::from_raw_parts(ptr, len, self.capacity); - mem::forget(self); - v - } - } else { - self.into_iter().collect() - } - } - - /// Convert the SmallVec into an `A` if possible. Otherwise return `Err(Self)`. - /// - /// This method returns `Err(Self)` if the SmallVec is too short (and the `A` contains uninitialized elements), - /// or if the SmallVec is too long (and all the elements were spilled to the heap). - pub fn into_inner(self) -> Result { - if self.spilled() || self.len() != A::size() { - Err(self) - } else { - unsafe { - let data = ptr::read(&self.data); - mem::forget(self); - Ok(data.into_inline()) - } - } - } - - /// Retains only the elements specified by the predicate. - /// - /// In other words, remove all elements `e` such that `f(&e)` returns `false`. - /// This method operates in place and preserves the order of the retained - /// elements. - pub fn retain bool>(&mut self, mut f: F) { - let mut del = 0; - let len = self.len(); - for i in 0..len { - if !f(&mut self[i]) { - del += 1; - } else if del > 0 { - self.swap(i - del, i); - } - } - self.truncate(len - del); - } - - /// Removes consecutive duplicate elements. - pub fn dedup(&mut self) where A::Item: PartialEq { - self.dedup_by(|a, b| a == b); - } - - /// Removes consecutive duplicate elements using the given equality relation. - pub fn dedup_by(&mut self, mut same_bucket: F) - where F: FnMut(&mut A::Item, &mut A::Item) -> bool - { - // See the implementation of Vec::dedup_by in the - // standard library for an explanation of this algorithm. - let len = self.len(); - if len <= 1 { - return; - } - - let ptr = self.as_mut_ptr(); - let mut w: usize = 1; - - unsafe { - for r in 1..len { - let p_r = ptr.offset(r as isize); - let p_wm1 = ptr.offset((w - 1) as isize); - if !same_bucket(&mut *p_r, &mut *p_wm1) { - if r != w { - let p_w = p_wm1.offset(1); - mem::swap(&mut *p_r, &mut *p_w); - } - w += 1; - } - } - } - - self.truncate(w); - } - - /// Removes consecutive elements that map to the same key. - pub fn dedup_by_key(&mut self, mut key: F) - where F: FnMut(&mut A::Item) -> K, - K: PartialEq - { - self.dedup_by(|a, b| key(a) == key(b)); - } - - /// Creates a `SmallVec` directly from the raw components of another - /// `SmallVec`. - /// - /// # Safety - /// - /// This is highly unsafe, due to the number of invariants that aren't - /// checked: - /// - /// * `ptr` needs to have been previously allocated via `SmallVec` for its - /// spilled storage (at least, it's highly likely to be incorrect if it - /// wasn't). - /// * `ptr`'s `A::Item` type needs to be the same size and alignment that - /// it was allocated with - /// * `length` needs to be less than or equal to `capacity`. - /// * `capacity` needs to be the capacity that the pointer was allocated - /// with. - /// - /// Violating these may cause problems like corrupting the allocator's - /// internal data structures. - /// - /// Additionally, `capacity` must be greater than the amount of inline - /// storage `A` has; that is, the new `SmallVec` must need to spill over - /// into heap allocated storage. This condition is asserted against. - /// - /// The ownership of `ptr` is effectively transferred to the - /// `SmallVec` which may then deallocate, reallocate or change the - /// contents of memory pointed to by the pointer at will. Ensure - /// that nothing else uses the pointer after calling this - /// function. - /// - /// # Examples - /// - /// ``` - /// # #[macro_use] extern crate smallvec; - /// # use smallvec::SmallVec; - /// use std::mem; - /// use std::ptr; - /// - /// fn main() { - /// let mut v: SmallVec<[_; 1]> = smallvec![1, 2, 3]; - /// - /// // Pull out the important parts of `v`. - /// let p = v.as_mut_ptr(); - /// let len = v.len(); - /// let cap = v.capacity(); - /// let spilled = v.spilled(); - /// - /// unsafe { - /// // Forget all about `v`. The heap allocation that stored the - /// // three values won't be deallocated. - /// mem::forget(v); - /// - /// // Overwrite memory with [4, 5, 6]. - /// // - /// // This is only safe if `spilled` is true! Otherwise, we are - /// // writing into the old `SmallVec`'s inline storage on the - /// // stack. - /// assert!(spilled); - /// for i in 0..len as isize { - /// ptr::write(p.offset(i), 4 + i); - /// } - /// - /// // Put everything back together into a SmallVec with a different - /// // amount of inline storage, but which is still less than `cap`. - /// let rebuilt = SmallVec::<[_; 2]>::from_raw_parts(p, len, cap); - /// assert_eq!(&*rebuilt, &[4, 5, 6]); - /// } - /// } - pub unsafe fn from_raw_parts( - ptr: *mut A::Item, - length: usize, - capacity: usize, - ) -> SmallVec { - assert!(capacity > A::size()); - SmallVec { - capacity, - data: SmallVecData::from_heap(ptr, length), - } - } -} - -impl SmallVec where A::Item: Copy { - /// Copy the elements from a slice into a new `SmallVec`. - /// - /// For slices of `Copy` types, this is more efficient than `SmallVec::from(slice)`. - pub fn from_slice(slice: &[A::Item]) -> Self { - let len = slice.len(); - if len <= A::size() { - SmallVec { - capacity: len, - data: SmallVecData::from_inline(unsafe { - let mut data: A = mem::uninitialized(); - ptr::copy_nonoverlapping(slice.as_ptr(), data.ptr_mut(), len); - data - }) - } - } else { - let mut b = slice.to_vec(); - let (ptr, cap) = (b.as_mut_ptr(), b.capacity()); - mem::forget(b); - SmallVec { - capacity: cap, - data: SmallVecData::from_heap(ptr, len), - } - } - } - - /// Copy elements from a slice into the vector at position `index`, shifting any following - /// elements toward the back. - /// - /// For slices of `Copy` types, this is more efficient than `insert`. - pub fn insert_from_slice(&mut self, index: usize, slice: &[A::Item]) { - self.reserve(slice.len()); - - let len = self.len(); - assert!(index <= len); - - unsafe { - let slice_ptr = slice.as_ptr(); - let ptr = self.as_mut_ptr().offset(index as isize); - ptr::copy(ptr, ptr.offset(slice.len() as isize), len - index); - ptr::copy_nonoverlapping(slice_ptr, ptr, slice.len()); - self.set_len(len + slice.len()); - } - } - - /// Copy elements from a slice and append them to the vector. - /// - /// For slices of `Copy` types, this is more efficient than `extend`. - #[inline] - pub fn extend_from_slice(&mut self, slice: &[A::Item]) { - let len = self.len(); - self.insert_from_slice(len, slice); - } -} - -impl SmallVec where A::Item: Clone { - /// Resizes the vector so that its length is equal to `len`. - /// - /// If `len` is less than the current length, the vector simply truncated. - /// - /// If `len` is greater than the current length, `value` is appended to the - /// vector until its length equals `len`. - pub fn resize(&mut self, len: usize, value: A::Item) { - let old_len = self.len(); - - if len > old_len { - self.extend(repeat(value).take(len - old_len)); - } else { - self.truncate(len); - } - } - - /// Creates a `SmallVec` with `n` copies of `elem`. - /// ``` - /// use smallvec::SmallVec; - /// - /// let v = SmallVec::<[char; 128]>::from_elem('d', 2); - /// assert_eq!(v, SmallVec::from_buf(['d', 'd'])); - /// ``` - pub fn from_elem(elem: A::Item, n: usize) -> Self { - if n > A::size() { - vec![elem; n].into() - } else { - let mut v = SmallVec::::new(); - unsafe { - let (ptr, len_ptr, _) = v.triple_mut(); - let mut local_len = SetLenOnDrop::new(len_ptr); - - for i in 0..n as isize { - ::std::ptr::write(ptr.offset(i), elem.clone()); - local_len.increment_len(1); - } - } - v - } - } -} - -impl ops::Deref for SmallVec { - type Target = [A::Item]; - #[inline] - fn deref(&self) -> &[A::Item] { - unsafe { - let (ptr, len, _) = self.triple(); - slice::from_raw_parts(ptr, len) - } - } -} - -impl ops::DerefMut for SmallVec { - #[inline] - fn deref_mut(&mut self) -> &mut [A::Item] { - unsafe { - let (ptr, &mut len, _) = self.triple_mut(); - slice::from_raw_parts_mut(ptr, len) - } - } -} - -impl AsRef<[A::Item]> for SmallVec { - #[inline] - fn as_ref(&self) -> &[A::Item] { - self - } -} - -impl AsMut<[A::Item]> for SmallVec { - #[inline] - fn as_mut(&mut self) -> &mut [A::Item] { - self - } -} - -impl Borrow<[A::Item]> for SmallVec { - #[inline] - fn borrow(&self) -> &[A::Item] { - self - } -} - -impl BorrowMut<[A::Item]> for SmallVec { - #[inline] - fn borrow_mut(&mut self) -> &mut [A::Item] { - self - } -} - -#[cfg(feature = "std")] -impl> io::Write for SmallVec { - #[inline] - fn write(&mut self, buf: &[u8]) -> io::Result { - self.extend_from_slice(buf); - Ok(buf.len()) - } - - #[inline] - fn write_all(&mut self, buf: &[u8]) -> io::Result<()> { - self.extend_from_slice(buf); - Ok(()) - } - - #[inline] - fn flush(&mut self) -> io::Result<()> { - Ok(()) - } -} - -#[cfg(feature = "serde")] -impl Serialize for SmallVec where A::Item: Serialize { - fn serialize(&self, serializer: S) -> Result { - let mut state = serializer.serialize_seq(Some(self.len()))?; - for item in self { - state.serialize_element(&item)?; - } - state.end() - } -} - -#[cfg(feature = "serde")] -impl<'de, A: Array> Deserialize<'de> for SmallVec where A::Item: Deserialize<'de> { - fn deserialize>(deserializer: D) -> Result { - deserializer.deserialize_seq(SmallVecVisitor{phantom: PhantomData}) - } -} - -#[cfg(feature = "serde")] -struct SmallVecVisitor { - phantom: PhantomData -} - -#[cfg(feature = "serde")] -impl<'de, A: Array> Visitor<'de> for SmallVecVisitor -where A::Item: Deserialize<'de>, -{ - type Value = SmallVec; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a sequence") - } - - fn visit_seq(self, mut seq: B) -> Result - where - B: SeqAccess<'de>, - { - let len = seq.size_hint().unwrap_or(0); - let mut values = SmallVec::with_capacity(len); - - while let Some(value) = seq.next_element()? { - values.push(value); - } - - Ok(values) - } -} - - -#[cfg(feature = "specialization")] -trait SpecFrom { - fn spec_from(slice: S) -> SmallVec; -} - -#[cfg(feature = "specialization")] -impl<'a, A: Array> SpecFrom for SmallVec where A::Item: Clone { - #[inline] - default fn spec_from(slice: &'a [A::Item]) -> SmallVec { - slice.into_iter().cloned().collect() - } -} - -#[cfg(feature = "specialization")] -impl<'a, A: Array> SpecFrom for SmallVec where A::Item: Copy { - #[inline] - fn spec_from(slice: &'a [A::Item]) -> SmallVec { - SmallVec::from_slice(slice) - } -} - -impl<'a, A: Array> From<&'a [A::Item]> for SmallVec where A::Item: Clone { - #[cfg(not(feature = "specialization"))] - #[inline] - fn from(slice: &'a [A::Item]) -> SmallVec { - slice.into_iter().cloned().collect() - } - - #[cfg(feature = "specialization")] - #[inline] - fn from(slice: &'a [A::Item]) -> SmallVec { - SmallVec::spec_from(slice) - } -} - -impl From> for SmallVec { - #[inline] - fn from(vec: Vec) -> SmallVec { - SmallVec::from_vec(vec) - } -} - -impl From for SmallVec { - #[inline] - fn from(array: A) -> SmallVec { - SmallVec::from_buf(array) - } -} - -macro_rules! impl_index { - ($index_type: ty, $output_type: ty) => { - impl ops::Index<$index_type> for SmallVec { - type Output = $output_type; - #[inline] - fn index(&self, index: $index_type) -> &$output_type { - &(&**self)[index] - } - } - - impl ops::IndexMut<$index_type> for SmallVec { - #[inline] - fn index_mut(&mut self, index: $index_type) -> &mut $output_type { - &mut (&mut **self)[index] - } - } - } -} - -impl_index!(usize, A::Item); -impl_index!(ops::Range, [A::Item]); -impl_index!(ops::RangeFrom, [A::Item]); -impl_index!(ops::RangeTo, [A::Item]); -impl_index!(ops::RangeFull, [A::Item]); - -impl ExtendFromSlice for SmallVec where A::Item: Copy { - fn extend_from_slice(&mut self, other: &[A::Item]) { - SmallVec::extend_from_slice(self, other) - } -} - -#[allow(deprecated)] -impl VecLike for SmallVec { - #[inline] - fn push(&mut self, value: A::Item) { - SmallVec::push(self, value); - } -} - -impl FromIterator for SmallVec { - fn from_iter>(iterable: I) -> SmallVec { - let mut v = SmallVec::new(); - v.extend(iterable); - v - } -} - -impl Extend for SmallVec { - fn extend>(&mut self, iterable: I) { - let mut iter = iterable.into_iter(); - let (lower_size_bound, _) = iter.size_hint(); - self.reserve(lower_size_bound); - - unsafe { - let (ptr, len_ptr, cap) = self.triple_mut(); - let mut len = SetLenOnDrop::new(len_ptr); - while len.get() < cap { - if let Some(out) = iter.next() { - ptr::write(ptr.offset(len.get() as isize), out); - len.increment_len(1); - } else { - return; - } - } - } - - for elem in iter { - self.push(elem); - } - } -} - -impl fmt::Debug for SmallVec where A::Item: fmt::Debug { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_list().entries(self.iter()).finish() - } -} - -impl Default for SmallVec { - #[inline] - fn default() -> SmallVec { - SmallVec::new() - } -} - -#[cfg(feature = "may_dangle")] -unsafe impl<#[may_dangle] A: Array> Drop for SmallVec { - fn drop(&mut self) { - unsafe { - if self.spilled() { - let (ptr, len) = self.data.heap(); - Vec::from_raw_parts(ptr, len, self.capacity); - } else { - ptr::drop_in_place(&mut self[..]); - } - } - } -} - -#[cfg(not(feature = "may_dangle"))] -impl Drop for SmallVec { - fn drop(&mut self) { - unsafe { - if self.spilled() { - let (ptr, len) = self.data.heap(); - Vec::from_raw_parts(ptr, len, self.capacity); - } else { - ptr::drop_in_place(&mut self[..]); - } - } - } -} - -impl Clone for SmallVec where A::Item: Clone { - fn clone(&self) -> SmallVec { - let mut new_vector = SmallVec::with_capacity(self.len()); - for element in self.iter() { - new_vector.push((*element).clone()) - } - new_vector - } -} - -impl PartialEq> for SmallVec - where A::Item: PartialEq { - #[inline] - fn eq(&self, other: &SmallVec) -> bool { self[..] == other[..] } - #[inline] - fn ne(&self, other: &SmallVec) -> bool { self[..] != other[..] } -} - -impl Eq for SmallVec where A::Item: Eq {} - -impl PartialOrd for SmallVec where A::Item: PartialOrd { - #[inline] - fn partial_cmp(&self, other: &SmallVec) -> Option { - PartialOrd::partial_cmp(&**self, &**other) - } -} - -impl Ord for SmallVec where A::Item: Ord { - #[inline] - fn cmp(&self, other: &SmallVec) -> cmp::Ordering { - Ord::cmp(&**self, &**other) - } -} - -impl Hash for SmallVec where A::Item: Hash { - fn hash(&self, state: &mut H) { - (**self).hash(state) - } -} - -unsafe impl Send for SmallVec where A::Item: Send {} - -/// An iterator that consumes a `SmallVec` and yields its items by value. -/// -/// Returned from [`SmallVec::into_iter`][1]. -/// -/// [1]: struct.SmallVec.html#method.into_iter -pub struct IntoIter { - data: SmallVec, - current: usize, - end: usize, -} - -impl Drop for IntoIter { - fn drop(&mut self) { - for _ in self { } - } -} - -impl Iterator for IntoIter { - type Item = A::Item; - - #[inline] - fn next(&mut self) -> Option { - if self.current == self.end { - None - } - else { - unsafe { - let current = self.current as isize; - self.current += 1; - Some(ptr::read(self.data.as_ptr().offset(current))) - } - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let size = self.end - self.current; - (size, Some(size)) - } -} - -impl DoubleEndedIterator for IntoIter { - #[inline] - fn next_back(&mut self) -> Option { - if self.current == self.end { - None - } - else { - unsafe { - self.end -= 1; - Some(ptr::read(self.data.as_ptr().offset(self.end as isize))) - } - } - } -} - -impl ExactSizeIterator for IntoIter { } - -impl IntoIterator for SmallVec { - type IntoIter = IntoIter; - type Item = A::Item; - fn into_iter(mut self) -> Self::IntoIter { - unsafe { - // Set SmallVec len to zero as `IntoIter` drop handles dropping of the elements - let len = self.len(); - self.set_len(0); - IntoIter { - data: self, - current: 0, - end: len, - } - } - } -} - -impl<'a, A: Array> IntoIterator for &'a SmallVec { - type IntoIter = slice::Iter<'a, A::Item>; - type Item = &'a A::Item; - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl<'a, A: Array> IntoIterator for &'a mut SmallVec { - type IntoIter = slice::IterMut<'a, A::Item>; - type Item = &'a mut A::Item; - fn into_iter(self) -> Self::IntoIter { - self.iter_mut() - } -} - -/// Types that can be used as the backing store for a SmallVec -pub unsafe trait Array { - /// The type of the array's elements. - type Item; - /// Returns the number of items the array can hold. - fn size() -> usize; - /// Returns a pointer to the first element of the array. - fn ptr(&self) -> *const Self::Item; - /// Returns a mutable pointer to the first element of the array. - fn ptr_mut(&mut self) -> *mut Self::Item; -} - -/// Set the length of the vec when the `SetLenOnDrop` value goes out of scope. -/// -/// Copied from https://github.com/rust-lang/rust/pull/36355 -struct SetLenOnDrop<'a> { - len: &'a mut usize, - local_len: usize, -} - -impl<'a> SetLenOnDrop<'a> { - #[inline] - fn new(len: &'a mut usize) -> Self { - SetLenOnDrop { local_len: *len, len: len } - } - - #[inline] - fn get(&self) -> usize { - self.local_len - } - - #[inline] - fn increment_len(&mut self, increment: usize) { - self.local_len += increment; - } -} - -impl<'a> Drop for SetLenOnDrop<'a> { - #[inline] - fn drop(&mut self) { - *self.len = self.local_len; - } -} - -macro_rules! impl_array( - ($($size:expr),+) => { - $( - unsafe impl Array for [T; $size] { - type Item = T; - fn size() -> usize { $size } - fn ptr(&self) -> *const T { self.as_ptr() } - fn ptr_mut(&mut self) -> *mut T { self.as_mut_ptr() } - } - )+ - } -); - -impl_array!(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 24, 32, 36, - 0x40, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, - 0x10000, 0x20000, 0x40000, 0x80000, 0x100000); - -#[cfg(test)] -mod tests { - use SmallVec; - - use std::iter::FromIterator; - - #[cfg(feature = "std")] - use std::borrow::ToOwned; - #[cfg(not(feature = "std"))] - use alloc::borrow::ToOwned; - #[cfg(feature = "std")] - use std::rc::Rc; - #[cfg(not(feature = "std"))] - use alloc::rc::Rc; - #[cfg(not(feature = "std"))] - use alloc::boxed::Box; - #[cfg(not(feature = "std"))] - use alloc::vec::Vec; - - #[test] - pub fn test_zero() { - let mut v = SmallVec::<[_; 0]>::new(); - assert!(!v.spilled()); - v.push(0usize); - assert!(v.spilled()); - assert_eq!(&*v, &[0]); - } - - // We heap allocate all these strings so that double frees will show up under valgrind. - - #[test] - pub fn test_inline() { - let mut v = SmallVec::<[_; 16]>::new(); - v.push("hello".to_owned()); - v.push("there".to_owned()); - assert_eq!(&*v, &[ - "hello".to_owned(), - "there".to_owned(), - ][..]); - } - - #[test] - pub fn test_spill() { - let mut v = SmallVec::<[_; 2]>::new(); - v.push("hello".to_owned()); - assert_eq!(v[0], "hello"); - v.push("there".to_owned()); - v.push("burma".to_owned()); - assert_eq!(v[0], "hello"); - v.push("shave".to_owned()); - assert_eq!(&*v, &[ - "hello".to_owned(), - "there".to_owned(), - "burma".to_owned(), - "shave".to_owned(), - ][..]); - } - - #[test] - pub fn test_double_spill() { - let mut v = SmallVec::<[_; 2]>::new(); - v.push("hello".to_owned()); - v.push("there".to_owned()); - v.push("burma".to_owned()); - v.push("shave".to_owned()); - v.push("hello".to_owned()); - v.push("there".to_owned()); - v.push("burma".to_owned()); - v.push("shave".to_owned()); - assert_eq!(&*v, &[ - "hello".to_owned(), - "there".to_owned(), - "burma".to_owned(), - "shave".to_owned(), - "hello".to_owned(), - "there".to_owned(), - "burma".to_owned(), - "shave".to_owned(), - ][..]); - } - - /// https://github.com/servo/rust-smallvec/issues/4 - #[test] - fn issue_4() { - SmallVec::<[Box; 2]>::new(); - } - - /// https://github.com/servo/rust-smallvec/issues/5 - #[test] - fn issue_5() { - assert!(Some(SmallVec::<[&u32; 2]>::new()).is_some()); - } - - #[test] - fn test_with_capacity() { - let v: SmallVec<[u8; 3]> = SmallVec::with_capacity(1); - assert!(v.is_empty()); - assert!(!v.spilled()); - assert_eq!(v.capacity(), 3); - - let v: SmallVec<[u8; 3]> = SmallVec::with_capacity(10); - assert!(v.is_empty()); - assert!(v.spilled()); - assert_eq!(v.capacity(), 10); - } - - #[test] - fn drain() { - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(3); - assert_eq!(v.drain().collect::>(), &[3]); - - // spilling the vec - v.push(3); - v.push(4); - v.push(5); - assert_eq!(v.drain().collect::>(), &[3, 4, 5]); - } - - #[test] - fn drain_rev() { - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(3); - assert_eq!(v.drain().rev().collect::>(), &[3]); - - // spilling the vec - v.push(3); - v.push(4); - v.push(5); - assert_eq!(v.drain().rev().collect::>(), &[5, 4, 3]); - } - - #[test] - fn into_iter() { - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(3); - assert_eq!(v.into_iter().collect::>(), &[3]); - - // spilling the vec - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(3); - v.push(4); - v.push(5); - assert_eq!(v.into_iter().collect::>(), &[3, 4, 5]); - } - - #[test] - fn into_iter_rev() { - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(3); - assert_eq!(v.into_iter().rev().collect::>(), &[3]); - - // spilling the vec - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(3); - v.push(4); - v.push(5); - assert_eq!(v.into_iter().rev().collect::>(), &[5, 4, 3]); - } - - #[test] - fn into_iter_drop() { - use std::cell::Cell; - - struct DropCounter<'a>(&'a Cell); - - impl<'a> Drop for DropCounter<'a> { - fn drop(&mut self) { - self.0.set(self.0.get() + 1); - } - } - - { - let cell = Cell::new(0); - let mut v: SmallVec<[DropCounter; 2]> = SmallVec::new(); - v.push(DropCounter(&cell)); - v.into_iter(); - assert_eq!(cell.get(), 1); - } - - { - let cell = Cell::new(0); - let mut v: SmallVec<[DropCounter; 2]> = SmallVec::new(); - v.push(DropCounter(&cell)); - v.push(DropCounter(&cell)); - assert!(v.into_iter().next().is_some()); - assert_eq!(cell.get(), 2); - } - - { - let cell = Cell::new(0); - let mut v: SmallVec<[DropCounter; 2]> = SmallVec::new(); - v.push(DropCounter(&cell)); - v.push(DropCounter(&cell)); - v.push(DropCounter(&cell)); - assert!(v.into_iter().next().is_some()); - assert_eq!(cell.get(), 3); - } - { - let cell = Cell::new(0); - let mut v: SmallVec<[DropCounter; 2]> = SmallVec::new(); - v.push(DropCounter(&cell)); - v.push(DropCounter(&cell)); - v.push(DropCounter(&cell)); - { - let mut it = v.into_iter(); - assert!(it.next().is_some()); - assert!(it.next_back().is_some()); - } - assert_eq!(cell.get(), 3); - } - } - - #[test] - fn test_capacity() { - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.reserve(1); - assert_eq!(v.capacity(), 2); - assert!(!v.spilled()); - - v.reserve_exact(0x100); - assert!(v.capacity() >= 0x100); - - v.push(0); - v.push(1); - v.push(2); - v.push(3); - - v.shrink_to_fit(); - assert!(v.capacity() < 0x100); - } - - #[test] - fn test_truncate() { - let mut v: SmallVec<[Box; 8]> = SmallVec::new(); - - for x in 0..8 { - v.push(Box::new(x)); - } - v.truncate(4); - - assert_eq!(v.len(), 4); - assert!(!v.spilled()); - - assert_eq!(*v.swap_remove(1), 1); - assert_eq!(*v.remove(1), 3); - v.insert(1, Box::new(3)); - - assert_eq!(&v.iter().map(|v| **v).collect::>(), &[0, 3, 2]); - } - - #[test] - fn test_insert_many() { - let mut v: SmallVec<[u8; 8]> = SmallVec::new(); - for x in 0..4 { - v.push(x); - } - assert_eq!(v.len(), 4); - v.insert_many(1, [5, 6].iter().cloned()); - assert_eq!(&v.iter().map(|v| *v).collect::>(), &[0, 5, 6, 1, 2, 3]); - } - - struct MockHintIter{x: T, hint: usize} - impl Iterator for MockHintIter { - type Item = T::Item; - fn next(&mut self) -> Option {self.x.next()} - fn size_hint(&self) -> (usize, Option) {(self.hint, None)} - } - - #[test] - fn test_insert_many_short_hint() { - let mut v: SmallVec<[u8; 8]> = SmallVec::new(); - for x in 0..4 { - v.push(x); - } - assert_eq!(v.len(), 4); - v.insert_many(1, MockHintIter{x: [5, 6].iter().cloned(), hint: 5}); - assert_eq!(&v.iter().map(|v| *v).collect::>(), &[0, 5, 6, 1, 2, 3]); - } - - #[test] - fn test_insert_many_long_hint() { - let mut v: SmallVec<[u8; 8]> = SmallVec::new(); - for x in 0..4 { - v.push(x); - } - assert_eq!(v.len(), 4); - v.insert_many(1, MockHintIter{x: [5, 6].iter().cloned(), hint: 1}); - assert_eq!(&v.iter().map(|v| *v).collect::>(), &[0, 5, 6, 1, 2, 3]); - } - - #[cfg(feature = "std")] - #[test] - // https://github.com/servo/rust-smallvec/issues/96 - fn test_insert_many_panic() { - struct PanicOnDoubleDrop { - dropped: Box - } - - impl Drop for PanicOnDoubleDrop { - fn drop(&mut self) { - assert!(!*self.dropped, "already dropped"); - *self.dropped = true; - } - } - - struct BadIter; - impl Iterator for BadIter { - type Item = PanicOnDoubleDrop; - fn size_hint(&self) -> (usize, Option) { (1, None) } - fn next(&mut self) -> Option { panic!() } - } - - let mut vec: SmallVec<[PanicOnDoubleDrop; 0]> = vec![ - PanicOnDoubleDrop { dropped: Box::new(false) }, - PanicOnDoubleDrop { dropped: Box::new(false) }, - ].into(); - let result = ::std::panic::catch_unwind(move || { - vec.insert_many(0, BadIter); - }); - assert!(result.is_err()); - } - - #[test] - #[should_panic] - fn test_invalid_grow() { - let mut v: SmallVec<[u8; 8]> = SmallVec::new(); - v.extend(0..8); - v.grow(5); - } - - #[test] - fn test_insert_from_slice() { - let mut v: SmallVec<[u8; 8]> = SmallVec::new(); - for x in 0..4 { - v.push(x); - } - assert_eq!(v.len(), 4); - v.insert_from_slice(1, &[5, 6]); - assert_eq!(&v.iter().map(|v| *v).collect::>(), &[0, 5, 6, 1, 2, 3]); - } - - #[test] - fn test_extend_from_slice() { - let mut v: SmallVec<[u8; 8]> = SmallVec::new(); - for x in 0..4 { - v.push(x); - } - assert_eq!(v.len(), 4); - v.extend_from_slice(&[5, 6]); - assert_eq!(&v.iter().map(|v| *v).collect::>(), &[0, 1, 2, 3, 5, 6]); - } - - #[test] - #[should_panic] - fn test_drop_panic_smallvec() { - // This test should only panic once, and not double panic, - // which would mean a double drop - struct DropPanic; - - impl Drop for DropPanic { - fn drop(&mut self) { - panic!("drop"); - } - } - - let mut v = SmallVec::<[_; 1]>::new(); - v.push(DropPanic); - } - - #[test] - fn test_eq() { - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - let mut b: SmallVec<[u32; 2]> = SmallVec::new(); - let mut c: SmallVec<[u32; 2]> = SmallVec::new(); - // a = [1, 2] - a.push(1); - a.push(2); - // b = [1, 2] - b.push(1); - b.push(2); - // c = [3, 4] - c.push(3); - c.push(4); - - assert!(a == b); - assert!(a != c); - } - - #[test] - fn test_ord() { - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - let mut b: SmallVec<[u32; 2]> = SmallVec::new(); - let mut c: SmallVec<[u32; 2]> = SmallVec::new(); - // a = [1] - a.push(1); - // b = [1, 1] - b.push(1); - b.push(1); - // c = [1, 2] - c.push(1); - c.push(2); - - assert!(a < b); - assert!(b > a); - assert!(b < c); - assert!(c > b); - } - - #[cfg(feature = "std")] - #[test] - fn test_hash() { - use std::hash::Hash; - use std::collections::hash_map::DefaultHasher; - - { - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - let b = [1, 2]; - a.extend(b.iter().cloned()); - let mut hasher = DefaultHasher::new(); - assert_eq!(a.hash(&mut hasher), b.hash(&mut hasher)); - } - { - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - let b = [1, 2, 11, 12]; - a.extend(b.iter().cloned()); - let mut hasher = DefaultHasher::new(); - assert_eq!(a.hash(&mut hasher), b.hash(&mut hasher)); - } - } - - #[test] - fn test_as_ref() { - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - a.push(1); - assert_eq!(a.as_ref(), [1]); - a.push(2); - assert_eq!(a.as_ref(), [1, 2]); - a.push(3); - assert_eq!(a.as_ref(), [1, 2, 3]); - } - - #[test] - fn test_as_mut() { - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - a.push(1); - assert_eq!(a.as_mut(), [1]); - a.push(2); - assert_eq!(a.as_mut(), [1, 2]); - a.push(3); - assert_eq!(a.as_mut(), [1, 2, 3]); - a.as_mut()[1] = 4; - assert_eq!(a.as_mut(), [1, 4, 3]); - } - - #[test] - fn test_borrow() { - use std::borrow::Borrow; - - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - a.push(1); - assert_eq!(a.borrow(), [1]); - a.push(2); - assert_eq!(a.borrow(), [1, 2]); - a.push(3); - assert_eq!(a.borrow(), [1, 2, 3]); - } - - #[test] - fn test_borrow_mut() { - use std::borrow::BorrowMut; - - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - a.push(1); - assert_eq!(a.borrow_mut(), [1]); - a.push(2); - assert_eq!(a.borrow_mut(), [1, 2]); - a.push(3); - assert_eq!(a.borrow_mut(), [1, 2, 3]); - BorrowMut::<[u32]>::borrow_mut(&mut a)[1] = 4; - assert_eq!(a.borrow_mut(), [1, 4, 3]); - } - - #[test] - fn test_from() { - assert_eq!(&SmallVec::<[u32; 2]>::from(&[1][..])[..], [1]); - assert_eq!(&SmallVec::<[u32; 2]>::from(&[1, 2, 3][..])[..], [1, 2, 3]); - - let vec = vec![]; - let small_vec: SmallVec<[u8; 3]> = SmallVec::from(vec); - assert_eq!(&*small_vec, &[]); - drop(small_vec); - - let vec = vec![1, 2, 3, 4, 5]; - let small_vec: SmallVec<[u8; 3]> = SmallVec::from(vec); - assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - drop(small_vec); - - let vec = vec![1, 2, 3, 4, 5]; - let small_vec: SmallVec<[u8; 1]> = SmallVec::from(vec); - assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - drop(small_vec); - - let array = [1]; - let small_vec: SmallVec<[u8; 1]> = SmallVec::from(array); - assert_eq!(&*small_vec, &[1]); - drop(small_vec); - - let array = [99; 128]; - let small_vec: SmallVec<[u8; 128]> = SmallVec::from(array); - assert_eq!(&*small_vec, vec![99u8; 128].as_slice()); - drop(small_vec); - } - - #[test] - fn test_from_slice() { - assert_eq!(&SmallVec::<[u32; 2]>::from_slice(&[1][..])[..], [1]); - assert_eq!(&SmallVec::<[u32; 2]>::from_slice(&[1, 2, 3][..])[..], [1, 2, 3]); - } - - #[test] - fn test_exact_size_iterator() { - let mut vec = SmallVec::<[u32; 2]>::from(&[1, 2, 3][..]); - assert_eq!(vec.clone().into_iter().len(), 3); - assert_eq!(vec.drain().len(), 3); - } - - #[test] - #[allow(deprecated)] - fn veclike_deref_slice() { - use super::VecLike; - - fn test>(vec: &mut T) { - assert!(!vec.is_empty()); - assert_eq!(vec.len(), 3); - - vec.sort(); - assert_eq!(&vec[..], [1, 2, 3]); - } - - let mut vec = SmallVec::<[i32; 2]>::from(&[3, 1, 2][..]); - test(&mut vec); - } - - #[test] - fn shrink_to_fit_unspill() { - let mut vec = SmallVec::<[u8; 2]>::from_iter(0..3); - vec.pop(); - assert!(vec.spilled()); - vec.shrink_to_fit(); - assert!(!vec.spilled(), "shrink_to_fit will un-spill if possible"); - } - - #[test] - fn test_into_vec() { - let vec = SmallVec::<[u8; 2]>::from_iter(0..2); - assert_eq!(vec.into_vec(), vec![0, 1]); - - let vec = SmallVec::<[u8; 2]>::from_iter(0..3); - assert_eq!(vec.into_vec(), vec![0, 1, 2]); - } - - #[test] - fn test_into_inner() { - let vec = SmallVec::<[u8; 2]>::from_iter(0..2); - assert_eq!(vec.into_inner(), Ok([0, 1])); - - let vec = SmallVec::<[u8; 2]>::from_iter(0..1); - assert_eq!(vec.clone().into_inner(), Err(vec)); - - let vec = SmallVec::<[u8; 2]>::from_iter(0..3); - assert_eq!(vec.clone().into_inner(), Err(vec)); - } - - #[test] - fn test_from_vec() { - let vec = vec![]; - let small_vec: SmallVec<[u8; 3]> = SmallVec::from_vec(vec); - assert_eq!(&*small_vec, &[]); - drop(small_vec); - - let vec = vec![]; - let small_vec: SmallVec<[u8; 1]> = SmallVec::from_vec(vec); - assert_eq!(&*small_vec, &[]); - drop(small_vec); - - let vec = vec![1]; - let small_vec: SmallVec<[u8; 3]> = SmallVec::from_vec(vec); - assert_eq!(&*small_vec, &[1]); - drop(small_vec); - - let vec = vec![1, 2, 3]; - let small_vec: SmallVec<[u8; 3]> = SmallVec::from_vec(vec); - assert_eq!(&*small_vec, &[1, 2, 3]); - drop(small_vec); - - let vec = vec![1, 2, 3, 4, 5]; - let small_vec: SmallVec<[u8; 3]> = SmallVec::from_vec(vec); - assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - drop(small_vec); - - let vec = vec![1, 2, 3, 4, 5]; - let small_vec: SmallVec<[u8; 1]> = SmallVec::from_vec(vec); - assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - drop(small_vec); - } - - #[test] - fn test_retain() { - // Test inline data storate - let mut sv: SmallVec<[i32; 5]> = SmallVec::from_slice(&[1, 2, 3, 3, 4]); - sv.retain(|&mut i| i != 3); - assert_eq!(sv.pop(), Some(4)); - assert_eq!(sv.pop(), Some(2)); - assert_eq!(sv.pop(), Some(1)); - assert_eq!(sv.pop(), None); - - // Test spilled data storage - let mut sv: SmallVec<[i32; 3]> = SmallVec::from_slice(&[1, 2, 3, 3, 4]); - sv.retain(|&mut i| i != 3); - assert_eq!(sv.pop(), Some(4)); - assert_eq!(sv.pop(), Some(2)); - assert_eq!(sv.pop(), Some(1)); - assert_eq!(sv.pop(), None); - - // Test that drop implementations are called for inline. - let one = Rc::new(1); - let mut sv: SmallVec<[Rc; 3]> = SmallVec::new(); - sv.push(Rc::clone(&one)); - assert_eq!(Rc::strong_count(&one), 2); - sv.retain(|_| false); - assert_eq!(Rc::strong_count(&one), 1); - - // Test that drop implementations are called for spilled data. - let mut sv: SmallVec<[Rc; 1]> = SmallVec::new(); - sv.push(Rc::clone(&one)); - sv.push(Rc::new(2)); - assert_eq!(Rc::strong_count(&one), 2); - sv.retain(|_| false); - assert_eq!(Rc::strong_count(&one), 1); - } - - #[test] - fn test_dedup() { - let mut dupes: SmallVec<[i32; 5]> = SmallVec::from_slice(&[1, 1, 2, 3, 3]); - dupes.dedup(); - assert_eq!(&*dupes, &[1, 2, 3]); - - let mut empty: SmallVec<[i32; 5]> = SmallVec::new(); - empty.dedup(); - assert!(empty.is_empty()); - - let mut all_ones: SmallVec<[i32; 5]> = SmallVec::from_slice(&[1, 1, 1, 1, 1]); - all_ones.dedup(); - assert_eq!(all_ones.len(), 1); - - let mut no_dupes: SmallVec<[i32; 5]> = SmallVec::from_slice(&[1, 2, 3, 4, 5]); - no_dupes.dedup(); - assert_eq!(no_dupes.len(), 5); - } - - #[test] - fn test_resize() { - let mut v: SmallVec<[i32; 8]> = SmallVec::new(); - v.push(1); - v.resize(5, 0); - assert_eq!(v[..], [1, 0, 0, 0, 0][..]); - - v.resize(2, -1); - assert_eq!(v[..], [1, 0][..]); - } - - #[cfg(feature = "std")] - #[test] - fn test_write() { - use io::Write; - - let data = [1, 2, 3, 4, 5]; - - let mut small_vec: SmallVec<[u8; 2]> = SmallVec::new(); - let len = small_vec.write(&data[..]).unwrap(); - assert_eq!(len, 5); - assert_eq!(small_vec.as_ref(), data.as_ref()); - - let mut small_vec: SmallVec<[u8; 2]> = SmallVec::new(); - small_vec.write_all(&data[..]).unwrap(); - assert_eq!(small_vec.as_ref(), data.as_ref()); - } - - #[cfg(feature = "serde")] - extern crate bincode; - - #[cfg(feature = "serde")] - #[test] - fn test_serde() { - use self::bincode::{config, deserialize}; - let mut small_vec: SmallVec<[i32; 2]> = SmallVec::new(); - small_vec.push(1); - let encoded = config().limit(100).serialize(&small_vec).unwrap(); - let decoded: SmallVec<[i32; 2]> = deserialize(&encoded).unwrap(); - assert_eq!(small_vec, decoded); - small_vec.push(2); - // Spill the vec - small_vec.push(3); - small_vec.push(4); - // Check again after spilling. - let encoded = config().limit(100).serialize(&small_vec).unwrap(); - let decoded: SmallVec<[i32; 2]> = deserialize(&encoded).unwrap(); - assert_eq!(small_vec, decoded); - } - - #[test] - fn grow_to_shrink() { - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(1); - v.push(2); - v.push(3); - assert!(v.spilled()); - v.clear(); - // Shrink to inline. - v.grow(2); - assert!(!v.spilled()); - assert_eq!(v.capacity(), 2); - assert_eq!(v.len(), 0); - v.push(4); - assert_eq!(v[..], [4]); - } - - #[test] - fn resumable_extend() { - let s = "a b c"; - // This iterator yields: (Some('a'), None, Some('b'), None, Some('c')), None - let it = s - .chars() - .scan(0, |_, ch| if ch.is_whitespace() { None } else { Some(ch) }); - let mut v: SmallVec<[char; 4]> = SmallVec::new(); - v.extend(it); - assert_eq!(v[..], ['a']); - } - - #[test] - fn grow_spilled_same_size() { - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(0); - v.push(1); - v.push(2); - assert!(v.spilled()); - assert_eq!(v.capacity(), 4); - // grow with the same capacity - v.grow(4); - assert_eq!(v.capacity(), 4); - assert_eq!(v[..], [0, 1, 2]); - } -} diff --git a/third_party/rust/smallvec/.cargo-checksum.json b/third_party/rust/smallvec/.cargo-checksum.json index 3e4ba0088386..1078c914c4e7 100644 --- a/third_party/rust/smallvec/.cargo-checksum.json +++ b/third_party/rust/smallvec/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"82c58cfe1208040b0772a4eb0fc59c2f84c75dd28115f2847a6edc91a340b7f4","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0b28172679e0009b655da42797c03fd163a3379d5cfa67ba1f1655e974a2a1a9","README.md":"a01127c37308457e8d396b176fb790846be0978c173be3f13260b62efcef011b","benches/bench.rs":"9dca7122a3dcb2c099e49807e4d3b8f01d9220e2b3db0a54e9901ee74392866f","lib.rs":"6b128fc5aa50b5dd775d45252e277c13546f1de2ebee340c6c8ff48627678244","scripts/run_miri.sh":"2e83d153efc16cbc3c41589e306faa0624c8b9a0feecea3baae6e34f4563ac42","specialization.rs":"46433586203399251cba496d67b88d34e1be3c2b591986b77463513da1c66471"},"package":"5c2fb2ec9bcd216a5b0d0ccf31ab17b5ed1d627960edff65bbe95d3ce221cefc"} \ No newline at end of file +{"files":{"Cargo.toml":"45e745963490153700d8392f914b159019420aa81d8ae80241771769199cf65b","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0b28172679e0009b655da42797c03fd163a3379d5cfa67ba1f1655e974a2a1a9","README.md":"a01127c37308457e8d396b176fb790846be0978c173be3f13260b62efcef011b","benches/bench.rs":"9dca7122a3dcb2c099e49807e4d3b8f01d9220e2b3db0a54e9901ee74392866f","lib.rs":"45aa4cd721dd8e649b79c443b9d12b8ff0f9e4dd4188e604b2d6b36b8ceb1c71","scripts/run_miri.sh":"2e83d153efc16cbc3c41589e306faa0624c8b9a0feecea3baae6e34f4563ac42","specialization.rs":"46433586203399251cba496d67b88d34e1be3c2b591986b77463513da1c66471"},"package":"05720e22615919e4734f6a99ceae50d00226c3c5aca406e102ebc33298214e0a"} \ No newline at end of file diff --git a/third_party/rust/smallvec/Cargo.toml b/third_party/rust/smallvec/Cargo.toml index 199dc13b2739..e994f50f52d3 100644 --- a/third_party/rust/smallvec/Cargo.toml +++ b/third_party/rust/smallvec/Cargo.toml @@ -13,7 +13,7 @@ [package] edition = "2018" name = "smallvec" -version = "1.2.0" +version = "1.3.0" authors = ["Simon Sapin "] description = "'Small vector' optimization: store up to a small number of items on the stack" documentation = "https://docs.rs/smallvec/" @@ -33,6 +33,7 @@ optional = true version = "1.0.1" [features] +const_generics = [] may_dangle = [] specialization = [] union = [] diff --git a/third_party/rust/smallvec/lib.rs b/third_party/rust/smallvec/lib.rs index 0c8243edaef0..d6fd6371e502 100644 --- a/third_party/rust/smallvec/lib.rs +++ b/third_party/rust/smallvec/lib.rs @@ -14,7 +14,16 @@ //! `write` feature implements the `std::io::Write` trait for vectors of `u8`. //! When this feature is enabled, `smallvec` depends on `std`. //! -//! ## `union` feature +//! ## Optional features +//! +//! ### `write` +//! +//! When this feature is enabled, `SmallVec<[u8; _]>` implements the `std::io::Write` trait. +//! This feature is not compatible with `#![no_std]` programs. +//! +//! ### `union` +//! +//! **This feature is unstable and requires a nightly build of the Rust toolchain.** //! //! When the `union` feature is enabled `smallvec` will track its state (inline or spilled) //! without the use of an enum tag, reducing the size of the `smallvec` by one machine word. @@ -24,11 +33,36 @@ //! //! To use this feature add `features = ["union"]` in the `smallvec` section of Cargo.toml. //! Note that this feature requires a nightly compiler (for now). +//! +//! ### `const_generics` +//! +//! **This feature is unstable and requires a nightly build of the Rust toolchain.** +//! +//! When this feature is enabled, `SmallVec` works with any arrays of any size, not just a fixed +//! list of sizes. +//! +//! ### `specialization` +//! +//! **This feature is unstable and requires a nightly build of the Rust toolchain.** +//! +//! When this feature is enabled, `SmallVec::from(slice)` has improved performance for slices +//! of `Copy` types. (Without this feature, you can use `SmallVec::from_slice` to get optimal +//! performance for `Copy` types.) +//! +//! ### `may_dangle` +//! +//! **This feature is unstable and requires a nightly build of the Rust toolchain.** +//! +//! This feature makes the Rust compiler less strict about use of vectors that contain borrowed +//! references. For details, see the +//! [Rustonomicon](https://doc.rust-lang.org/1.42.0/nomicon/dropck.html#an-escape-hatch). #![no_std] #![cfg_attr(feature = "union", feature(untagged_unions))] #![cfg_attr(feature = "specialization", feature(specialization))] #![cfg_attr(feature = "may_dangle", feature(dropck_eyepatch))] +#![cfg_attr(feature = "const_generics", allow(incomplete_features))] +#![cfg_attr(feature = "const_generics", feature(const_generics))] #![deny(missing_docs)] #[doc(hidden)] @@ -1053,6 +1087,7 @@ impl SmallVec { /// assert_eq!(&*rebuilt, &[4, 5, 6]); /// } /// } + #[inline] pub unsafe fn from_raw_parts(ptr: *mut A::Item, length: usize, capacity: usize) -> SmallVec { assert!(capacity > A::size()); SmallVec { @@ -1370,6 +1405,7 @@ where } impl FromIterator for SmallVec { + #[inline] fn from_iter>(iterable: I) -> SmallVec { let mut v = SmallVec::new(); v.extend(iterable); @@ -1450,6 +1486,7 @@ impl Clone for SmallVec where A::Item: Clone, { + #[inline] fn clone(&self) -> SmallVec { let mut new_vector = SmallVec::with_capacity(self.len()); for element in self.iter() { @@ -1667,6 +1704,13 @@ impl<'a> Drop for SetLenOnDrop<'a> { } } +#[cfg(feature = "const_generics")] +unsafe impl Array for [T; N] { + type Item = T; + fn size() -> usize { N } +} + +#[cfg(not(feature = "const_generics"))] macro_rules! impl_array( ($($size:expr),+) => { $( @@ -1678,12 +1722,27 @@ macro_rules! impl_array( } ); +#[cfg(not(feature = "const_generics"))] impl_array!( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 24, 32, 36, 0x40, 0x60, 0x80, 0x100, 0x200, 0x400, 0x600, 0x800, 0x1000, 0x2000, 0x4000, 0x6000, 0x8000, 0x10000, 0x20000, 0x40000, 0x60000, 0x80000, 0x10_0000 ); +/// Convenience trait for constructing a `SmallVec` +pub trait ToSmallVec { + /// Construct a new `SmallVec` from a slice. + fn to_smallvec(&self) -> SmallVec; +} + +impl ToSmallVec for [A::Item] + where A::Item: Copy { + #[inline] + fn to_smallvec(&self) -> SmallVec { + SmallVec::from_slice(self) + } +} + #[cfg(test)] mod tests { use crate::SmallVec; @@ -2522,4 +2581,10 @@ mod tests { assert_eq!(v.capacity(), 4); assert_eq!(v[..], [0, 1, 2]); } + + #[cfg(feature = "const_generics")] + #[test] + fn const_generics() { + let _v = SmallVec::<[i32; 987]>::default(); + } } diff --git a/third_party/rust/spirv-cross-internal/.cargo-checksum.json b/third_party/rust/spirv-cross-internal/.cargo-checksum.json index 33ae53f24742..bf13e33cb99c 100644 --- a/third_party/rust/spirv-cross-internal/.cargo-checksum.json +++ b/third_party/rust/spirv-cross-internal/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"306d77fae1e434cea201d7f9b9f5a5b22c9322d089c89bc0ef5c0f8a70a4b578","build.rs":"975dd26cbee3c1987ed658e38fe2211224320ce7943d6ef7bd2735425280e89a","src/bindings_native.rs":"d999b7a89c4ab7f2b839995bac1cda77f63142c4fff3b6f6764e27dec774b45c","src/bindings_wasm.rs":"fce9a87f0af3d700e673cba68a771329e6a4841b9f048db5b5d648f83f6062b6","src/bindings_wasm_functions.rs":"3ea541791b3ea8f4881d813070c83549d0ee5e701d158a53942af415d42e7c6f","src/compiler.rs":"647370c53e95aacfd5f3c8b01821eb420630289cd66b5cfdc8f4f2d47dee90aa","src/emscripten.rs":"3169890001970610013026468739910afca0d85e00d7e34beadfdd31bbcbeeb7","src/glsl.rs":"0a80523899d168796a623d6ac1f65b0422843ec0bb29c6fe1bb5cb837ceee163","src/hlsl.rs":"f6b5a61e00cbabef4c64eda2640229ea82b130f4254d36d32dda7ced357cc213","src/lib.rs":"cc41cbbe48f3e96791ba5338c66fa1fe0e533eaed6bbdced3f008d5e9fe6c6ce","src/msl.rs":"a62d5a7d65fa0bc6d1e24db2c2ec11b850c0a1b5da7a71669e63f5639f064bc9","src/ptr_util.rs":"280404beede469b2c9ae40536323515a9213dac5d30014fac870a23b37672442","src/spirv.rs":"5dd16eb7402e70122459318ba9ac5aecda12837ed13ca5240d2e3c8611b67cbf","src/vendor/SPIRV-Cross/.clang-format":"9ec4314e20afecad827a2dbd4832256be8464e88aab4a53fab45173ed129b2ed","src/vendor/SPIRV-Cross/.gitignore":"7f23cc92ddb5e1f584447e98d3e8ab6543fc182f1543f0f6ec29856f9250cdd6","src/vendor/SPIRV-Cross/CMakeLists.txt":"a2a76ecacf1a0620e7a1c044c18fbe7210b4f9384cb8fd03095739f25b3d4f40","src/vendor/SPIRV-Cross/GLSL.std.450.h":"20f32378793c5f416bc0704f44345c2a14c99cba3f411e3beaf1bcea372d58ba","src/vendor/SPIRV-Cross/LICENSE":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30","src/vendor/SPIRV-Cross/Makefile":"e2255d32e47d447b100ce3cd0753d0758dc26460e0440d14cc157d4caf9b62f1","src/vendor/SPIRV-Cross/cmake/gitversion.in.h":"75408b8a0cf86d6cf3e59d840c26ab28d3bda48f0c7f30001082a5ddf05d4184","src/vendor/SPIRV-Cross/format_all.sh":"7682215b1a669cd5a574489b3918e4009942b30a69f43d46bf68bd32a81ed399","src/vendor/SPIRV-Cross/gn/BUILD.gn":"340a042ebd24a9cdeb756b22627701a8df085349660645e4eac8531dd0024010","src/vendor/SPIRV-Cross/include/spirv_cross/barrier.hpp":"bb796625e89f75e239e92f9a61597d421ffe5fb1902d200691ebe95cf856a1f8","src/vendor/SPIRV-Cross/include/spirv_cross/external_interface.h":"cdceda962d87133e44989510edc944e99052d713869b406a8b6b2d54e3d02dd7","src/vendor/SPIRV-Cross/include/spirv_cross/image.hpp":"681d0964b144c5009424196a8bc832cb81cfe5df5b91c2f3e1bfb625765a0c50","src/vendor/SPIRV-Cross/include/spirv_cross/internal_interface.hpp":"ab8851e5708b944a9bf340ce17297d94bef4876d30c833ea83d44b16f60726f6","src/vendor/SPIRV-Cross/include/spirv_cross/sampler.hpp":"ee7c48bda908d1a5153acc6157afb35f3c66a84179ad6dea1adfdaa791a58b03","src/vendor/SPIRV-Cross/include/spirv_cross/thread_group.hpp":"70d9e0400f62de71d3775972eadc196ddb218254fa8155e8e33daf8d99957cc0","src/vendor/SPIRV-Cross/main.cpp":"059dca8074ec16a981c38449959d102b1b30983bccd80989ff91dc21e5105a74","src/vendor/SPIRV-Cross/pkg-config/spirv-cross-c-shared.pc.in":"cf4c55760569e296c5c2a0e306bb1af83272fb48a8d8ae1877b2196720129529","src/vendor/SPIRV-Cross/spirv.h":"7c2f6af34455c96957bad8a2d67197fbc6693308579d45e9740b5a9c330ca84a","src/vendor/SPIRV-Cross/spirv.hpp":"d937d4016e2fb8fca62838e0dec9f70d551751eaff07155f060750822373bc8b","src/vendor/SPIRV-Cross/spirv_cfg.cpp":"a7b47c8d05f96a9a51ac5a5d9d24cce65ea0661110ea499caf885a4dc0aa0bf4","src/vendor/SPIRV-Cross/spirv_cfg.hpp":"c803177e728e62e90856596d62b036c93d4a99dfc86edf597ea9597f0fbff8ea","src/vendor/SPIRV-Cross/spirv_common.hpp":"713ef166de2ac85b6a327110f98f21354dc6b4e8a112e0f3aa34543b2f5f36fc","src/vendor/SPIRV-Cross/spirv_cpp.cpp":"3cef3b9df5a5a5acc2aedc0ac6440a54c4afbd503c0281e7f8c9e123479188f9","src/vendor/SPIRV-Cross/spirv_cpp.hpp":"50f3704eb9b33f63284fcde37ee58859de83bdd19b87665bc410da3b7c952bfb","src/vendor/SPIRV-Cross/spirv_cross.cpp":"1b3d1d13b71b7c53d894d12ca1f6a22aa283d003c533df931a92b7ef202ab7b2","src/vendor/SPIRV-Cross/spirv_cross.hpp":"50558c0314234a9f438821f2ac4dc3a4e3489a3ab0df17dd5558e6ff8f0d79c3","src/vendor/SPIRV-Cross/spirv_cross_c.cpp":"ab4d72758f71c0f1c57a7412d91418850a2380bc57f2caa018e13e6a8918db84","src/vendor/SPIRV-Cross/spirv_cross_c.h":"6a98ccb6b9e6f366cb137a448134f19feba7929c543bf8acec86ab98da20e9f8","src/vendor/SPIRV-Cross/spirv_cross_containers.hpp":"5058178cb018420fc7ebb33a50bb1dabebe3dbd2e848560a1b22f0c618b81d08","src/vendor/SPIRV-Cross/spirv_cross_error_handling.hpp":"bf8b9a0a2f8b15db6e4fc87373f6ab437f772b546e2643c6edb3ec28ae8c10a9","src/vendor/SPIRV-Cross/spirv_cross_parsed_ir.cpp":"addf8ee2a81f731ecf0000a3bbf324fff463e8fb93f018d0f8ae99607c25da16","src/vendor/SPIRV-Cross/spirv_cross_parsed_ir.hpp":"a1e78ba7bade737b4b25e73119846ef0a5618025830f68292a2feef0f3718d3a","src/vendor/SPIRV-Cross/spirv_cross_util.cpp":"6d4126ab19c0754240b9d567565efcec20cf99c792c91c622b75a9395552e009","src/vendor/SPIRV-Cross/spirv_cross_util.hpp":"85139cbc86ae9cb93a5d25d398f109c142f1f646f86425bd51d89e0486506e4d","src/vendor/SPIRV-Cross/spirv_glsl.cpp":"f0c3e917b809c28b6d372f58b1ac6997c2cc7888e724af108c12ee4ebbe22723","src/vendor/SPIRV-Cross/spirv_glsl.hpp":"69fb8349964dbab7f1c2f1725b6329d0461968309778de069a7d6f8377cff11e","src/vendor/SPIRV-Cross/spirv_hlsl.cpp":"6bef1d3b301e55317da71afc911296d1d4e6dc1957b51cb5384e39a14a0d255d","src/vendor/SPIRV-Cross/spirv_hlsl.hpp":"199d1d677750b67964579e6abd33eefce90437813256138b2e0c4c41cc183e05","src/vendor/SPIRV-Cross/spirv_msl.cpp":"3e06f8ac4117e630b1b998d1b95f2594ef67ece4d51c124796d3f1bba3036239","src/vendor/SPIRV-Cross/spirv_msl.hpp":"95e53f03e124fd01bb450733e938666750d69c87451797c82ac8e1155a910978","src/vendor/SPIRV-Cross/spirv_parser.cpp":"76d5a9a9237a5fd6fd682a5562578d3cb2b27d0911cfb3df93e2b2c70011a8d7","src/vendor/SPIRV-Cross/spirv_parser.hpp":"b2dbbb6ba4e7fc774f9d6071e3f1765ee0824548f1732d65ebfc06b060426520","src/vendor/SPIRV-Cross/spirv_reflect.cpp":"22b0f0621afb953ba24143db4d2362c0677cd9bb2f6d7b010d0be39c5ed282f6","src/vendor/SPIRV-Cross/spirv_reflect.hpp":"35e7858287f94d865a4785e87ba9b4ab849b52ffc818801d13086ab304c9dca3","src/wrapper.cpp":"8a6bc6ed9c7916f13e4c940a51daa6ff8501e39265b0a56bcfc8ff7c60d0ba6a","src/wrapper.hpp":"c20bc6645e3041e608e3c0d3f7233c631e032485159deb0ea21f327fb0f7cd3e","tests/common/mod.rs":"2843bf104c7938d93065f7b5688c9f063ad9e5720c407c737aedc5f2dee5a80f","tests/glsl_tests.rs":"4983dec4551531d187dec6d277fdcd4d45d41def34f5afc6486f8ce627316583","tests/hlsl_tests.rs":"346842860dd6b1036584fff20192a725475f2252638152766fcff6aba6b75e06","tests/msl_tests.rs":"211d3b9cb43455a4c55bd619c05acdd21953358580c50ae75cac3f06eb26c5dd","tests/shaders/array.vert":"d0dab7ddea131e069961054f40a164602448aa78720b7ff480e141d1f7b0b2d6","tests/shaders/array.vert.spv":"8e44421590ade1716be66ad39f60fb1ce58eedeab8f0806335a7369687b308b1","tests/shaders/rasterize_disabled.vert":"da6de172549830216933c44edf18b13113d7ca87462e3d09ad50dfc9c9836821","tests/shaders/rasterize_disabled.vert.spv":"2ba809eb500ed6e5a067389ccc056551e796e7019517593d110fb62c9dca2056","tests/shaders/sampler.frag":"4c568e65176afe596dd8ef279485e992607e94d612786214ae1c6702d0322e1f","tests/shaders/sampler.frag.spv":"bd7bd1973a82dcfdf5755361fa4dd420fdf1c32c5de0a6f0896a8d5971f98684","tests/shaders/simple.vert":"ea143c97dff5ef03728b96b2dd893bdc59d56651581ecf9fe50f10807b0efdd0","tests/shaders/simple.vert.spv":"a2b5094ffd76288e0d08c37ce0351e28f20bb6d80ddd73fc44a71c1c7cbbf7db","tests/shaders/specialization.comp":"ce32fa1615737209f2e465ea347d79030ddcb33a88c38447e7cde7dffc920163","tests/shaders/specialization.comp.spv":"848604e37b870b8999692b266677be2ce0df6ce38093a0d81e6bc43d0bdf8a3f","tests/shaders/struct.frag":"d8840bb1961d6f14609b00ee54406c1e3ea31cecd8231b77cfb73d28b71910c0","tests/shaders/struct.frag.spv":"774aa886374eb95abf9bb7d0045ee77d97e26529e9ec96b90991a515fdbca4be","tests/shaders/struct.vert":"9299cda83ddb5b5c3d95ab0d057e4df2af137dfd92d6c4d3e96295b7d42e29a1","tests/shaders/struct.vert.spv":"4a82bdee72616ac058bc60d4255efa3e78199a2b8597570c013bebbee7107fb7","tests/shaders/workgroup.comp":"478044b5392e0d1fb042253d71ea6bf7b8a014af4a6ee35d8db4c5343ac69739","tests/shaders/workgroup.comp.spv":"72f636fe3d1d6d0c5963f71bf4349c7e40d544331b33b6b64fb5b65784e6abee","tests/spirv_tests.rs":"fe711deea874de5a1daf1759e4d88c014700f6d06e244288f1eac1a37c54e2ff"},"package":null} \ No newline at end of file +{"files":{"Cargo.toml":"2d38375182d0ff62d527ba5fe9823e2dd294584e40db7c576fd5d3460adc11bc","build.rs":"68d1e7d99b4d890e21606aa7e7904d4f9b8caed6c0951bb8f5028745a7103d1e","src/bindings_native.rs":"062f30f40029048ef9fc192485c3efd888941d83050620e45e1e173fed865ca5","src/bindings_wasm.rs":"82a9d2a329dd91a467754e6c5df0effaa66ba44e3d070d52bce1be44b6d31252","src/bindings_wasm_functions.rs":"2515dd91e98e769fe282e7dc4f60820f4be7365c784a9f3bd17310d18a136da8","src/compiler.rs":"c56a8882335ef29b5c8096b572d3dbd396edf09cc2ef633bb7cfedcb83a08fba","src/emscripten.rs":"3169890001970610013026468739910afca0d85e00d7e34beadfdd31bbcbeeb7","src/glsl.rs":"0a80523899d168796a623d6ac1f65b0422843ec0bb29c6fe1bb5cb837ceee163","src/hlsl.rs":"f6b5a61e00cbabef4c64eda2640229ea82b130f4254d36d32dda7ced357cc213","src/lib.rs":"cc41cbbe48f3e96791ba5338c66fa1fe0e533eaed6bbdced3f008d5e9fe6c6ce","src/msl.rs":"5505d1d626d8e7903ffa9d137e7392a1670fa7b574eacc4bbd33673937b36ea7","src/ptr_util.rs":"280404beede469b2c9ae40536323515a9213dac5d30014fac870a23b37672442","src/spirv.rs":"527e47b1e7b5f107863aa8095c3d3ea237406a3cbeb254759c724bddf15365a9","src/vendor/SPIRV-Cross/.clang-format":"9ec4314e20afecad827a2dbd4832256be8464e88aab4a53fab45173ed129b2ed","src/vendor/SPIRV-Cross/.gitignore":"7f23cc92ddb5e1f584447e98d3e8ab6543fc182f1543f0f6ec29856f9250cdd6","src/vendor/SPIRV-Cross/.travis.yml":"abcc1b2f622b65feafd3e37a2b5e867fce3cf7211cae9fb2bf92a6de79100be4","src/vendor/SPIRV-Cross/CMakeLists.txt":"79130cba36427af1a3f64be829e265462b434bfcc383cf1783cff699fbe54a75","src/vendor/SPIRV-Cross/CODE_OF_CONDUCT.md":"a25e58cd66a9543e1500db9a5c3c027e874893ad1a264530bf26eb10918b5d80","src/vendor/SPIRV-Cross/GLSL.std.450.h":"20f32378793c5f416bc0704f44345c2a14c99cba3f411e3beaf1bcea372d58ba","src/vendor/SPIRV-Cross/LICENSE":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30","src/vendor/SPIRV-Cross/Makefile":"e2255d32e47d447b100ce3cd0753d0758dc26460e0440d14cc157d4caf9b62f1","src/vendor/SPIRV-Cross/README.md":"65552d5ff69c692a476a6841e324e2e2404bae049a36faefe5182617264bcc8c","src/vendor/SPIRV-Cross/appveyor.yml":"0f18b8ae5fadc027a20f69307fc32e56a97edfbdcb55ac392852308f88545a04","src/vendor/SPIRV-Cross/include/spirv_cross/barrier.hpp":"bb796625e89f75e239e92f9a61597d421ffe5fb1902d200691ebe95cf856a1f8","src/vendor/SPIRV-Cross/include/spirv_cross/external_interface.h":"cdceda962d87133e44989510edc944e99052d713869b406a8b6b2d54e3d02dd7","src/vendor/SPIRV-Cross/include/spirv_cross/image.hpp":"681d0964b144c5009424196a8bc832cb81cfe5df5b91c2f3e1bfb625765a0c50","src/vendor/SPIRV-Cross/include/spirv_cross/internal_interface.hpp":"ab8851e5708b944a9bf340ce17297d94bef4876d30c833ea83d44b16f60726f6","src/vendor/SPIRV-Cross/include/spirv_cross/sampler.hpp":"ee7c48bda908d1a5153acc6157afb35f3c66a84179ad6dea1adfdaa791a58b03","src/vendor/SPIRV-Cross/include/spirv_cross/thread_group.hpp":"70d9e0400f62de71d3775972eadc196ddb218254fa8155e8e33daf8d99957cc0","src/vendor/SPIRV-Cross/main.cpp":"14cb630d50dd4f17d9a88ef65c217f9788c9da711abbce6f147f5b0c0a1be11a","src/vendor/SPIRV-Cross/pkg-config/spirv-cross-c-shared.pc.in":"cf4c55760569e296c5c2a0e306bb1af83272fb48a8d8ae1877b2196720129529","src/vendor/SPIRV-Cross/spirv.h":"7c2f6af34455c96957bad8a2d67197fbc6693308579d45e9740b5a9c330ca84a","src/vendor/SPIRV-Cross/spirv.hpp":"d937d4016e2fb8fca62838e0dec9f70d551751eaff07155f060750822373bc8b","src/vendor/SPIRV-Cross/spirv_cfg.cpp":"a7b47c8d05f96a9a51ac5a5d9d24cce65ea0661110ea499caf885a4dc0aa0bf4","src/vendor/SPIRV-Cross/spirv_cfg.hpp":"c803177e728e62e90856596d62b036c93d4a99dfc86edf597ea9597f0fbff8ea","src/vendor/SPIRV-Cross/spirv_common.hpp":"713ef166de2ac85b6a327110f98f21354dc6b4e8a112e0f3aa34543b2f5f36fc","src/vendor/SPIRV-Cross/spirv_cpp.cpp":"3cef3b9df5a5a5acc2aedc0ac6440a54c4afbd503c0281e7f8c9e123479188f9","src/vendor/SPIRV-Cross/spirv_cpp.hpp":"50f3704eb9b33f63284fcde37ee58859de83bdd19b87665bc410da3b7c952bfb","src/vendor/SPIRV-Cross/spirv_cross.cpp":"1b3d1d13b71b7c53d894d12ca1f6a22aa283d003c533df931a92b7ef202ab7b2","src/vendor/SPIRV-Cross/spirv_cross.hpp":"50558c0314234a9f438821f2ac4dc3a4e3489a3ab0df17dd5558e6ff8f0d79c3","src/vendor/SPIRV-Cross/spirv_cross_c.cpp":"dbe5c0a7d87b314e77605b6eaaa095faa6c235f564edff341f88e3a526a9d3cf","src/vendor/SPIRV-Cross/spirv_cross_c.h":"4292daeb0df82e4821b263391a04e8972010053369c418b46d80628443740af2","src/vendor/SPIRV-Cross/spirv_cross_containers.hpp":"5058178cb018420fc7ebb33a50bb1dabebe3dbd2e848560a1b22f0c618b81d08","src/vendor/SPIRV-Cross/spirv_cross_error_handling.hpp":"bf8b9a0a2f8b15db6e4fc87373f6ab437f772b546e2643c6edb3ec28ae8c10a9","src/vendor/SPIRV-Cross/spirv_cross_parsed_ir.cpp":"addf8ee2a81f731ecf0000a3bbf324fff463e8fb93f018d0f8ae99607c25da16","src/vendor/SPIRV-Cross/spirv_cross_parsed_ir.hpp":"a1e78ba7bade737b4b25e73119846ef0a5618025830f68292a2feef0f3718d3a","src/vendor/SPIRV-Cross/spirv_cross_util.cpp":"6d4126ab19c0754240b9d567565efcec20cf99c792c91c622b75a9395552e009","src/vendor/SPIRV-Cross/spirv_cross_util.hpp":"85139cbc86ae9cb93a5d25d398f109c142f1f646f86425bd51d89e0486506e4d","src/vendor/SPIRV-Cross/spirv_glsl.cpp":"d5841c5979041fd05997449760e302bd5d9e46e827516446a805d87d53221971","src/vendor/SPIRV-Cross/spirv_glsl.hpp":"b4e8ed6c89b056cc96a16d7972e3de90b5233f73deaefa89988f42314fd8713f","src/vendor/SPIRV-Cross/spirv_hlsl.cpp":"ce2888dbec8780e7022266bfe509d72dc492add7c7ee7d3c20620215889c10e8","src/vendor/SPIRV-Cross/spirv_hlsl.hpp":"ab5a6a5bea80f4b30323f6eb2439fab90608ec1533c6b17be8ee4cf3209cd877","src/vendor/SPIRV-Cross/spirv_msl.cpp":"d7ad7c4289b27d06b1c6b4de63fa370b688fc64a5c02b73f859c1457c9f5052e","src/vendor/SPIRV-Cross/spirv_msl.hpp":"95e53f03e124fd01bb450733e938666750d69c87451797c82ac8e1155a910978","src/vendor/SPIRV-Cross/spirv_parser.cpp":"76d5a9a9237a5fd6fd682a5562578d3cb2b27d0911cfb3df93e2b2c70011a8d7","src/vendor/SPIRV-Cross/spirv_parser.hpp":"b2dbbb6ba4e7fc774f9d6071e3f1765ee0824548f1732d65ebfc06b060426520","src/vendor/SPIRV-Cross/spirv_reflect.cpp":"22b0f0621afb953ba24143db4d2362c0677cd9bb2f6d7b010d0be39c5ed282f6","src/vendor/SPIRV-Cross/spirv_reflect.hpp":"35e7858287f94d865a4785e87ba9b4ab849b52ffc818801d13086ab304c9dca3","src/wrapper.cpp":"b3867b8d7b3c9fcee925aa8f025ff5cfd35d4858d4200ace7bd9881a41348622","src/wrapper.hpp":"d1260cb37de98f81463fda005b13c40ef719c5e62bf5e4a73692fe630fea3a04","tests/common/mod.rs":"2843bf104c7938d93065f7b5688c9f063ad9e5720c407c737aedc5f2dee5a80f","tests/glsl_tests.rs":"4983dec4551531d187dec6d277fdcd4d45d41def34f5afc6486f8ce627316583","tests/hlsl_tests.rs":"346842860dd6b1036584fff20192a725475f2252638152766fcff6aba6b75e06","tests/msl_tests.rs":"211d3b9cb43455a4c55bd619c05acdd21953358580c50ae75cac3f06eb26c5dd","tests/shaders/array.vert":"d0dab7ddea131e069961054f40a164602448aa78720b7ff480e141d1f7b0b2d6","tests/shaders/array.vert.spv":"8e44421590ade1716be66ad39f60fb1ce58eedeab8f0806335a7369687b308b1","tests/shaders/multiple_entry_points.cl":"2abbe57f2387f07f7f5f4cd375e47900be9c13bdc79aa0ed98a43a798cb0df81","tests/shaders/multiple_entry_points.cl.spv":"bdd34ce6765dbeab637631c3cbdf251532870d9fd6cd6c54883c0c872058ab3b","tests/shaders/rasterize_disabled.vert":"da6de172549830216933c44edf18b13113d7ca87462e3d09ad50dfc9c9836821","tests/shaders/rasterize_disabled.vert.spv":"2ba809eb500ed6e5a067389ccc056551e796e7019517593d110fb62c9dca2056","tests/shaders/sampler.frag":"4c568e65176afe596dd8ef279485e992607e94d612786214ae1c6702d0322e1f","tests/shaders/sampler.frag.spv":"bd7bd1973a82dcfdf5755361fa4dd420fdf1c32c5de0a6f0896a8d5971f98684","tests/shaders/simple.vert":"ea143c97dff5ef03728b96b2dd893bdc59d56651581ecf9fe50f10807b0efdd0","tests/shaders/simple.vert.spv":"a2b5094ffd76288e0d08c37ce0351e28f20bb6d80ddd73fc44a71c1c7cbbf7db","tests/shaders/specialization.comp":"ce32fa1615737209f2e465ea347d79030ddcb33a88c38447e7cde7dffc920163","tests/shaders/specialization.comp.spv":"848604e37b870b8999692b266677be2ce0df6ce38093a0d81e6bc43d0bdf8a3f","tests/shaders/struct.frag":"d8840bb1961d6f14609b00ee54406c1e3ea31cecd8231b77cfb73d28b71910c0","tests/shaders/struct.frag.spv":"774aa886374eb95abf9bb7d0045ee77d97e26529e9ec96b90991a515fdbca4be","tests/shaders/struct.vert":"9299cda83ddb5b5c3d95ab0d057e4df2af137dfd92d6c4d3e96295b7d42e29a1","tests/shaders/struct.vert.spv":"4a82bdee72616ac058bc60d4255efa3e78199a2b8597570c013bebbee7107fb7","tests/shaders/two_ubo.vert":"be109b2c65e5e9e1bb0dab968d7f651232f6b1c46935a3928f980bf7a40f2d62","tests/shaders/two_ubo.vert.spv":"efd14e2d0a782d61dd944711f30b9e7fcb14af17593c1fe4e11cf2b7e232bcc2","tests/shaders/workgroup.comp":"478044b5392e0d1fb042253d71ea6bf7b8a014af4a6ee35d8db4c5343ac69739","tests/shaders/workgroup.comp.spv":"72f636fe3d1d6d0c5963f71bf4349c7e40d544331b33b6b64fb5b65784e6abee","tests/spirv_tests.rs":"f319ee131618cb00436bc956d186e2e4ce88131fd40182ebeaa497c464d77488"},"package":null} \ No newline at end of file diff --git a/third_party/rust/spirv-cross-internal/Cargo.toml b/third_party/rust/spirv-cross-internal/Cargo.toml index 3a6f1ed3487e..adeb182037bb 100644 --- a/third_party/rust/spirv-cross-internal/Cargo.toml +++ b/third_party/rust/spirv-cross-internal/Cargo.toml @@ -23,3 +23,7 @@ msl = [] [target.'cfg(not(target_arch = "wasm32"))'.build-dependencies] cc = "1.0.4" + +#[target.wasm32-unknown-unknown.dependencies] +#wasm-bindgen = "0.2.33" +#js-sys = "0.3.10" diff --git a/third_party/rust/spirv-cross-internal/build.rs b/third_party/rust/spirv-cross-internal/build.rs index cf39f7555360..2f21dd75a337 100644 --- a/third_party/rust/spirv-cross-internal/build.rs +++ b/third_party/rust/spirv-cross-internal/build.rs @@ -1,7 +1,7 @@ fn main() { // Prevent building SPIRV-Cross on wasm32 target let target_arch = std::env::var("CARGO_CFG_TARGET_ARCH"); - if let Ok(ref arch) = target_arch { + if let Ok(arch) = target_arch { if "wasm32" == arch { return; } @@ -14,7 +14,7 @@ fn main() { let is_ios = target_os.is_ok() && target_os.unwrap() == "ios"; let mut build = cc::Build::new(); - build.cpp(true).static_crt(false); + build.cpp(true); let compiler = build.try_get_compiler(); let is_clang = compiler.is_ok() && compiler.unwrap().is_like_clang(); @@ -25,10 +25,6 @@ fn main() { build.flag_if_supported("-std=c++14"); } - // add Gecko-specific flags - build.flag("-fno-exceptions"); - build.flag("-fno-rtti"); - build .flag("-DSPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS") .flag("-DSPIRV_CROSS_WRAPPER_NO_EXCEPTIONS"); diff --git a/third_party/rust/spirv-cross-internal/src/bindings_native.rs b/third_party/rust/spirv-cross-internal/src/bindings_native.rs index 2c65a3183aec..0c7fb579cba3 100644 --- a/third_party/rust/spirv-cross-internal/src/bindings_native.rs +++ b/third_party/rust/spirv-cross-internal/src/bindings_native.rs @@ -8,17 +8,14 @@ pub mod root { #[allow(unused_imports)] use self::super::super::root; pub type Id = ::std::os::raw::c_uint; - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum SourceLanguage { - SourceLanguageUnknown = 0, - SourceLanguageESSL = 1, - SourceLanguageGLSL = 2, - SourceLanguageOpenCL_C = 3, - SourceLanguageOpenCL_CPP = 4, - SourceLanguageHLSL = 5, - SourceLanguageMax = 2147483647, - } + pub const SourceLanguage_SourceLanguageUnknown: root::spv::SourceLanguage = 0; + pub const SourceLanguage_SourceLanguageESSL: root::spv::SourceLanguage = 1; + pub const SourceLanguage_SourceLanguageGLSL: root::spv::SourceLanguage = 2; + pub const SourceLanguage_SourceLanguageOpenCL_C: root::spv::SourceLanguage = 3; + pub const SourceLanguage_SourceLanguageOpenCL_CPP: root::spv::SourceLanguage = 4; + pub const SourceLanguage_SourceLanguageHLSL: root::spv::SourceLanguage = 5; + pub const SourceLanguage_SourceLanguageMax: root::spv::SourceLanguage = 2147483647; + pub type SourceLanguage = u32; #[repr(u32)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub enum ExecutionModel { @@ -39,287 +36,351 @@ pub mod root { ExecutionModelCallableNV = 5318, ExecutionModelMax = 2147483647, } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum AddressingModel { - AddressingModelLogical = 0, - AddressingModelPhysical32 = 1, - AddressingModelPhysical64 = 2, - AddressingModelPhysicalStorageBuffer64EXT = 5348, - AddressingModelMax = 2147483647, + pub const AddressingModel_AddressingModelLogical: root::spv::AddressingModel = 0; + pub const AddressingModel_AddressingModelPhysical32: root::spv::AddressingModel = 1; + pub const AddressingModel_AddressingModelPhysical64: root::spv::AddressingModel = 2; + pub const AddressingModel_AddressingModelPhysicalStorageBuffer64: + root::spv::AddressingModel = 5348; + pub const AddressingModel_AddressingModelPhysicalStorageBuffer64EXT: + root::spv::AddressingModel = 5348; + pub const AddressingModel_AddressingModelMax: root::spv::AddressingModel = 2147483647; + pub type AddressingModel = u32; + pub const MemoryModel_MemoryModelSimple: root::spv::MemoryModel = 0; + pub const MemoryModel_MemoryModelGLSL450: root::spv::MemoryModel = 1; + pub const MemoryModel_MemoryModelOpenCL: root::spv::MemoryModel = 2; + pub const MemoryModel_MemoryModelVulkan: root::spv::MemoryModel = 3; + pub const MemoryModel_MemoryModelVulkanKHR: root::spv::MemoryModel = 3; + pub const MemoryModel_MemoryModelMax: root::spv::MemoryModel = 2147483647; + pub type MemoryModel = u32; + pub const ExecutionMode_ExecutionModeInvocations: root::spv::ExecutionMode = 0; + pub const ExecutionMode_ExecutionModeSpacingEqual: root::spv::ExecutionMode = 1; + pub const ExecutionMode_ExecutionModeSpacingFractionalEven: root::spv::ExecutionMode = 2; + pub const ExecutionMode_ExecutionModeSpacingFractionalOdd: root::spv::ExecutionMode = 3; + pub const ExecutionMode_ExecutionModeVertexOrderCw: root::spv::ExecutionMode = 4; + pub const ExecutionMode_ExecutionModeVertexOrderCcw: root::spv::ExecutionMode = 5; + pub const ExecutionMode_ExecutionModePixelCenterInteger: root::spv::ExecutionMode = 6; + pub const ExecutionMode_ExecutionModeOriginUpperLeft: root::spv::ExecutionMode = 7; + pub const ExecutionMode_ExecutionModeOriginLowerLeft: root::spv::ExecutionMode = 8; + pub const ExecutionMode_ExecutionModeEarlyFragmentTests: root::spv::ExecutionMode = 9; + pub const ExecutionMode_ExecutionModePointMode: root::spv::ExecutionMode = 10; + pub const ExecutionMode_ExecutionModeXfb: root::spv::ExecutionMode = 11; + pub const ExecutionMode_ExecutionModeDepthReplacing: root::spv::ExecutionMode = 12; + pub const ExecutionMode_ExecutionModeDepthGreater: root::spv::ExecutionMode = 14; + pub const ExecutionMode_ExecutionModeDepthLess: root::spv::ExecutionMode = 15; + pub const ExecutionMode_ExecutionModeDepthUnchanged: root::spv::ExecutionMode = 16; + pub const ExecutionMode_ExecutionModeLocalSize: root::spv::ExecutionMode = 17; + pub const ExecutionMode_ExecutionModeLocalSizeHint: root::spv::ExecutionMode = 18; + pub const ExecutionMode_ExecutionModeInputPoints: root::spv::ExecutionMode = 19; + pub const ExecutionMode_ExecutionModeInputLines: root::spv::ExecutionMode = 20; + pub const ExecutionMode_ExecutionModeInputLinesAdjacency: root::spv::ExecutionMode = 21; + pub const ExecutionMode_ExecutionModeTriangles: root::spv::ExecutionMode = 22; + pub const ExecutionMode_ExecutionModeInputTrianglesAdjacency: root::spv::ExecutionMode = 23; + pub const ExecutionMode_ExecutionModeQuads: root::spv::ExecutionMode = 24; + pub const ExecutionMode_ExecutionModeIsolines: root::spv::ExecutionMode = 25; + pub const ExecutionMode_ExecutionModeOutputVertices: root::spv::ExecutionMode = 26; + pub const ExecutionMode_ExecutionModeOutputPoints: root::spv::ExecutionMode = 27; + pub const ExecutionMode_ExecutionModeOutputLineStrip: root::spv::ExecutionMode = 28; + pub const ExecutionMode_ExecutionModeOutputTriangleStrip: root::spv::ExecutionMode = 29; + pub const ExecutionMode_ExecutionModeVecTypeHint: root::spv::ExecutionMode = 30; + pub const ExecutionMode_ExecutionModeContractionOff: root::spv::ExecutionMode = 31; + pub const ExecutionMode_ExecutionModeInitializer: root::spv::ExecutionMode = 33; + pub const ExecutionMode_ExecutionModeFinalizer: root::spv::ExecutionMode = 34; + pub const ExecutionMode_ExecutionModeSubgroupSize: root::spv::ExecutionMode = 35; + pub const ExecutionMode_ExecutionModeSubgroupsPerWorkgroup: root::spv::ExecutionMode = 36; + pub const ExecutionMode_ExecutionModeSubgroupsPerWorkgroupId: root::spv::ExecutionMode = 37; + pub const ExecutionMode_ExecutionModeLocalSizeId: root::spv::ExecutionMode = 38; + pub const ExecutionMode_ExecutionModeLocalSizeHintId: root::spv::ExecutionMode = 39; + pub const ExecutionMode_ExecutionModePostDepthCoverage: root::spv::ExecutionMode = 4446; + pub const ExecutionMode_ExecutionModeDenormPreserve: root::spv::ExecutionMode = 4459; + pub const ExecutionMode_ExecutionModeDenormFlushToZero: root::spv::ExecutionMode = 4460; + pub const ExecutionMode_ExecutionModeSignedZeroInfNanPreserve: root::spv::ExecutionMode = + 4461; + pub const ExecutionMode_ExecutionModeRoundingModeRTE: root::spv::ExecutionMode = 4462; + pub const ExecutionMode_ExecutionModeRoundingModeRTZ: root::spv::ExecutionMode = 4463; + pub const ExecutionMode_ExecutionModeStencilRefReplacingEXT: root::spv::ExecutionMode = + 5027; + pub const ExecutionMode_ExecutionModeOutputLinesNV: root::spv::ExecutionMode = 5269; + pub const ExecutionMode_ExecutionModeOutputPrimitivesNV: root::spv::ExecutionMode = 5270; + pub const ExecutionMode_ExecutionModeDerivativeGroupQuadsNV: root::spv::ExecutionMode = + 5289; + pub const ExecutionMode_ExecutionModeDerivativeGroupLinearNV: root::spv::ExecutionMode = + 5290; + pub const ExecutionMode_ExecutionModeOutputTrianglesNV: root::spv::ExecutionMode = 5298; + pub const ExecutionMode_ExecutionModePixelInterlockOrderedEXT: root::spv::ExecutionMode = + 5366; + pub const ExecutionMode_ExecutionModePixelInterlockUnorderedEXT: root::spv::ExecutionMode = + 5367; + pub const ExecutionMode_ExecutionModeSampleInterlockOrderedEXT: root::spv::ExecutionMode = + 5368; + pub const ExecutionMode_ExecutionModeSampleInterlockUnorderedEXT: root::spv::ExecutionMode = + 5369; + pub const ExecutionMode_ExecutionModeShadingRateInterlockOrderedEXT: + root::spv::ExecutionMode = 5370; + pub const ExecutionMode_ExecutionModeShadingRateInterlockUnorderedEXT: + root::spv::ExecutionMode = 5371; + pub const ExecutionMode_ExecutionModeMax: root::spv::ExecutionMode = 2147483647; + pub type ExecutionMode = u32; + pub const StorageClass_StorageClassUniformConstant: root::spv::StorageClass = 0; + pub const StorageClass_StorageClassInput: root::spv::StorageClass = 1; + pub const StorageClass_StorageClassUniform: root::spv::StorageClass = 2; + pub const StorageClass_StorageClassOutput: root::spv::StorageClass = 3; + pub const StorageClass_StorageClassWorkgroup: root::spv::StorageClass = 4; + pub const StorageClass_StorageClassCrossWorkgroup: root::spv::StorageClass = 5; + pub const StorageClass_StorageClassPrivate: root::spv::StorageClass = 6; + pub const StorageClass_StorageClassFunction: root::spv::StorageClass = 7; + pub const StorageClass_StorageClassGeneric: root::spv::StorageClass = 8; + pub const StorageClass_StorageClassPushConstant: root::spv::StorageClass = 9; + pub const StorageClass_StorageClassAtomicCounter: root::spv::StorageClass = 10; + pub const StorageClass_StorageClassImage: root::spv::StorageClass = 11; + pub const StorageClass_StorageClassStorageBuffer: root::spv::StorageClass = 12; + pub const StorageClass_StorageClassCallableDataNV: root::spv::StorageClass = 5328; + pub const StorageClass_StorageClassIncomingCallableDataNV: root::spv::StorageClass = 5329; + pub const StorageClass_StorageClassRayPayloadNV: root::spv::StorageClass = 5338; + pub const StorageClass_StorageClassHitAttributeNV: root::spv::StorageClass = 5339; + pub const StorageClass_StorageClassIncomingRayPayloadNV: root::spv::StorageClass = 5342; + pub const StorageClass_StorageClassShaderRecordBufferNV: root::spv::StorageClass = 5343; + pub const StorageClass_StorageClassPhysicalStorageBuffer: root::spv::StorageClass = 5349; + pub const StorageClass_StorageClassPhysicalStorageBufferEXT: root::spv::StorageClass = 5349; + pub const StorageClass_StorageClassMax: root::spv::StorageClass = 2147483647; + pub type StorageClass = u32; + pub const Dim_Dim1D: root::spv::Dim = 0; + pub const Dim_Dim2D: root::spv::Dim = 1; + pub const Dim_Dim3D: root::spv::Dim = 2; + pub const Dim_DimCube: root::spv::Dim = 3; + pub const Dim_DimRect: root::spv::Dim = 4; + pub const Dim_DimBuffer: root::spv::Dim = 5; + pub const Dim_DimSubpassData: root::spv::Dim = 6; + pub const Dim_DimMax: root::spv::Dim = 2147483647; + pub type Dim = u32; + pub const SamplerAddressingMode_SamplerAddressingModeNone: + root::spv::SamplerAddressingMode = 0; + pub const SamplerAddressingMode_SamplerAddressingModeClampToEdge: + root::spv::SamplerAddressingMode = 1; + pub const SamplerAddressingMode_SamplerAddressingModeClamp: + root::spv::SamplerAddressingMode = 2; + pub const SamplerAddressingMode_SamplerAddressingModeRepeat: + root::spv::SamplerAddressingMode = 3; + pub const SamplerAddressingMode_SamplerAddressingModeRepeatMirrored: + root::spv::SamplerAddressingMode = 4; + pub const SamplerAddressingMode_SamplerAddressingModeMax: root::spv::SamplerAddressingMode = + 2147483647; + pub type SamplerAddressingMode = u32; + pub const SamplerFilterMode_SamplerFilterModeNearest: root::spv::SamplerFilterMode = 0; + pub const SamplerFilterMode_SamplerFilterModeLinear: root::spv::SamplerFilterMode = 1; + pub const SamplerFilterMode_SamplerFilterModeMax: root::spv::SamplerFilterMode = 2147483647; + pub type SamplerFilterMode = u32; + pub const ImageFormat_ImageFormatUnknown: root::spv::ImageFormat = 0; + pub const ImageFormat_ImageFormatRgba32f: root::spv::ImageFormat = 1; + pub const ImageFormat_ImageFormatRgba16f: root::spv::ImageFormat = 2; + pub const ImageFormat_ImageFormatR32f: root::spv::ImageFormat = 3; + pub const ImageFormat_ImageFormatRgba8: root::spv::ImageFormat = 4; + pub const ImageFormat_ImageFormatRgba8Snorm: root::spv::ImageFormat = 5; + pub const ImageFormat_ImageFormatRg32f: root::spv::ImageFormat = 6; + pub const ImageFormat_ImageFormatRg16f: root::spv::ImageFormat = 7; + pub const ImageFormat_ImageFormatR11fG11fB10f: root::spv::ImageFormat = 8; + pub const ImageFormat_ImageFormatR16f: root::spv::ImageFormat = 9; + pub const ImageFormat_ImageFormatRgba16: root::spv::ImageFormat = 10; + pub const ImageFormat_ImageFormatRgb10A2: root::spv::ImageFormat = 11; + pub const ImageFormat_ImageFormatRg16: root::spv::ImageFormat = 12; + pub const ImageFormat_ImageFormatRg8: root::spv::ImageFormat = 13; + pub const ImageFormat_ImageFormatR16: root::spv::ImageFormat = 14; + pub const ImageFormat_ImageFormatR8: root::spv::ImageFormat = 15; + pub const ImageFormat_ImageFormatRgba16Snorm: root::spv::ImageFormat = 16; + pub const ImageFormat_ImageFormatRg16Snorm: root::spv::ImageFormat = 17; + pub const ImageFormat_ImageFormatRg8Snorm: root::spv::ImageFormat = 18; + pub const ImageFormat_ImageFormatR16Snorm: root::spv::ImageFormat = 19; + pub const ImageFormat_ImageFormatR8Snorm: root::spv::ImageFormat = 20; + pub const ImageFormat_ImageFormatRgba32i: root::spv::ImageFormat = 21; + pub const ImageFormat_ImageFormatRgba16i: root::spv::ImageFormat = 22; + pub const ImageFormat_ImageFormatRgba8i: root::spv::ImageFormat = 23; + pub const ImageFormat_ImageFormatR32i: root::spv::ImageFormat = 24; + pub const ImageFormat_ImageFormatRg32i: root::spv::ImageFormat = 25; + pub const ImageFormat_ImageFormatRg16i: root::spv::ImageFormat = 26; + pub const ImageFormat_ImageFormatRg8i: root::spv::ImageFormat = 27; + pub const ImageFormat_ImageFormatR16i: root::spv::ImageFormat = 28; + pub const ImageFormat_ImageFormatR8i: root::spv::ImageFormat = 29; + pub const ImageFormat_ImageFormatRgba32ui: root::spv::ImageFormat = 30; + pub const ImageFormat_ImageFormatRgba16ui: root::spv::ImageFormat = 31; + pub const ImageFormat_ImageFormatRgba8ui: root::spv::ImageFormat = 32; + pub const ImageFormat_ImageFormatR32ui: root::spv::ImageFormat = 33; + pub const ImageFormat_ImageFormatRgb10a2ui: root::spv::ImageFormat = 34; + pub const ImageFormat_ImageFormatRg32ui: root::spv::ImageFormat = 35; + pub const ImageFormat_ImageFormatRg16ui: root::spv::ImageFormat = 36; + pub const ImageFormat_ImageFormatRg8ui: root::spv::ImageFormat = 37; + pub const ImageFormat_ImageFormatR16ui: root::spv::ImageFormat = 38; + pub const ImageFormat_ImageFormatR8ui: root::spv::ImageFormat = 39; + pub const ImageFormat_ImageFormatMax: root::spv::ImageFormat = 2147483647; + pub type ImageFormat = u32; + pub const ImageChannelOrder_ImageChannelOrderR: root::spv::ImageChannelOrder = 0; + pub const ImageChannelOrder_ImageChannelOrderA: root::spv::ImageChannelOrder = 1; + pub const ImageChannelOrder_ImageChannelOrderRG: root::spv::ImageChannelOrder = 2; + pub const ImageChannelOrder_ImageChannelOrderRA: root::spv::ImageChannelOrder = 3; + pub const ImageChannelOrder_ImageChannelOrderRGB: root::spv::ImageChannelOrder = 4; + pub const ImageChannelOrder_ImageChannelOrderRGBA: root::spv::ImageChannelOrder = 5; + pub const ImageChannelOrder_ImageChannelOrderBGRA: root::spv::ImageChannelOrder = 6; + pub const ImageChannelOrder_ImageChannelOrderARGB: root::spv::ImageChannelOrder = 7; + pub const ImageChannelOrder_ImageChannelOrderIntensity: root::spv::ImageChannelOrder = 8; + pub const ImageChannelOrder_ImageChannelOrderLuminance: root::spv::ImageChannelOrder = 9; + pub const ImageChannelOrder_ImageChannelOrderRx: root::spv::ImageChannelOrder = 10; + pub const ImageChannelOrder_ImageChannelOrderRGx: root::spv::ImageChannelOrder = 11; + pub const ImageChannelOrder_ImageChannelOrderRGBx: root::spv::ImageChannelOrder = 12; + pub const ImageChannelOrder_ImageChannelOrderDepth: root::spv::ImageChannelOrder = 13; + pub const ImageChannelOrder_ImageChannelOrderDepthStencil: root::spv::ImageChannelOrder = + 14; + pub const ImageChannelOrder_ImageChannelOrdersRGB: root::spv::ImageChannelOrder = 15; + pub const ImageChannelOrder_ImageChannelOrdersRGBx: root::spv::ImageChannelOrder = 16; + pub const ImageChannelOrder_ImageChannelOrdersRGBA: root::spv::ImageChannelOrder = 17; + pub const ImageChannelOrder_ImageChannelOrdersBGRA: root::spv::ImageChannelOrder = 18; + pub const ImageChannelOrder_ImageChannelOrderABGR: root::spv::ImageChannelOrder = 19; + pub const ImageChannelOrder_ImageChannelOrderMax: root::spv::ImageChannelOrder = 2147483647; + pub type ImageChannelOrder = u32; + pub const ImageChannelDataType_ImageChannelDataTypeSnormInt8: + root::spv::ImageChannelDataType = 0; + pub const ImageChannelDataType_ImageChannelDataTypeSnormInt16: + root::spv::ImageChannelDataType = 1; + pub const ImageChannelDataType_ImageChannelDataTypeUnormInt8: + root::spv::ImageChannelDataType = 2; + pub const ImageChannelDataType_ImageChannelDataTypeUnormInt16: + root::spv::ImageChannelDataType = 3; + pub const ImageChannelDataType_ImageChannelDataTypeUnormShort565: + root::spv::ImageChannelDataType = 4; + pub const ImageChannelDataType_ImageChannelDataTypeUnormShort555: + root::spv::ImageChannelDataType = 5; + pub const ImageChannelDataType_ImageChannelDataTypeUnormInt101010: + root::spv::ImageChannelDataType = 6; + pub const ImageChannelDataType_ImageChannelDataTypeSignedInt8: + root::spv::ImageChannelDataType = 7; + pub const ImageChannelDataType_ImageChannelDataTypeSignedInt16: + root::spv::ImageChannelDataType = 8; + pub const ImageChannelDataType_ImageChannelDataTypeSignedInt32: + root::spv::ImageChannelDataType = 9; + pub const ImageChannelDataType_ImageChannelDataTypeUnsignedInt8: + root::spv::ImageChannelDataType = 10; + pub const ImageChannelDataType_ImageChannelDataTypeUnsignedInt16: + root::spv::ImageChannelDataType = 11; + pub const ImageChannelDataType_ImageChannelDataTypeUnsignedInt32: + root::spv::ImageChannelDataType = 12; + pub const ImageChannelDataType_ImageChannelDataTypeHalfFloat: + root::spv::ImageChannelDataType = 13; + pub const ImageChannelDataType_ImageChannelDataTypeFloat: root::spv::ImageChannelDataType = + 14; + pub const ImageChannelDataType_ImageChannelDataTypeUnormInt24: + root::spv::ImageChannelDataType = 15; + pub const ImageChannelDataType_ImageChannelDataTypeUnormInt101010_2: + root::spv::ImageChannelDataType = 16; + pub const ImageChannelDataType_ImageChannelDataTypeMax: root::spv::ImageChannelDataType = + 2147483647; + pub type ImageChannelDataType = u32; + pub const ImageOperandsShift_ImageOperandsBiasShift: root::spv::ImageOperandsShift = 0; + pub const ImageOperandsShift_ImageOperandsLodShift: root::spv::ImageOperandsShift = 1; + pub const ImageOperandsShift_ImageOperandsGradShift: root::spv::ImageOperandsShift = 2; + pub const ImageOperandsShift_ImageOperandsConstOffsetShift: root::spv::ImageOperandsShift = + 3; + pub const ImageOperandsShift_ImageOperandsOffsetShift: root::spv::ImageOperandsShift = 4; + pub const ImageOperandsShift_ImageOperandsConstOffsetsShift: root::spv::ImageOperandsShift = + 5; + pub const ImageOperandsShift_ImageOperandsSampleShift: root::spv::ImageOperandsShift = 6; + pub const ImageOperandsShift_ImageOperandsMinLodShift: root::spv::ImageOperandsShift = 7; + pub const ImageOperandsShift_ImageOperandsMakeTexelAvailableShift: + root::spv::ImageOperandsShift = 8; + pub const ImageOperandsShift_ImageOperandsMakeTexelAvailableKHRShift: + root::spv::ImageOperandsShift = 8; + pub const ImageOperandsShift_ImageOperandsMakeTexelVisibleShift: + root::spv::ImageOperandsShift = 9; + pub const ImageOperandsShift_ImageOperandsMakeTexelVisibleKHRShift: + root::spv::ImageOperandsShift = 9; + pub const ImageOperandsShift_ImageOperandsNonPrivateTexelShift: + root::spv::ImageOperandsShift = 10; + pub const ImageOperandsShift_ImageOperandsNonPrivateTexelKHRShift: + root::spv::ImageOperandsShift = 10; + pub const ImageOperandsShift_ImageOperandsVolatileTexelShift: + root::spv::ImageOperandsShift = 11; + pub const ImageOperandsShift_ImageOperandsVolatileTexelKHRShift: + root::spv::ImageOperandsShift = 11; + pub const ImageOperandsShift_ImageOperandsSignExtendShift: root::spv::ImageOperandsShift = + 12; + pub const ImageOperandsShift_ImageOperandsZeroExtendShift: root::spv::ImageOperandsShift = + 13; + pub const ImageOperandsShift_ImageOperandsMax: root::spv::ImageOperandsShift = 2147483647; + pub type ImageOperandsShift = u32; + impl ImageOperandsMask { + pub const ImageOperandsMaskNone: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(0); } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum MemoryModel { - MemoryModelSimple = 0, - MemoryModelGLSL450 = 1, - MemoryModelOpenCL = 2, - MemoryModelVulkanKHR = 3, - MemoryModelMax = 2147483647, + impl ImageOperandsMask { + pub const ImageOperandsBiasMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(1); } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum ExecutionMode { - ExecutionModeInvocations = 0, - ExecutionModeSpacingEqual = 1, - ExecutionModeSpacingFractionalEven = 2, - ExecutionModeSpacingFractionalOdd = 3, - ExecutionModeVertexOrderCw = 4, - ExecutionModeVertexOrderCcw = 5, - ExecutionModePixelCenterInteger = 6, - ExecutionModeOriginUpperLeft = 7, - ExecutionModeOriginLowerLeft = 8, - ExecutionModeEarlyFragmentTests = 9, - ExecutionModePointMode = 10, - ExecutionModeXfb = 11, - ExecutionModeDepthReplacing = 12, - ExecutionModeDepthGreater = 14, - ExecutionModeDepthLess = 15, - ExecutionModeDepthUnchanged = 16, - ExecutionModeLocalSize = 17, - ExecutionModeLocalSizeHint = 18, - ExecutionModeInputPoints = 19, - ExecutionModeInputLines = 20, - ExecutionModeInputLinesAdjacency = 21, - ExecutionModeTriangles = 22, - ExecutionModeInputTrianglesAdjacency = 23, - ExecutionModeQuads = 24, - ExecutionModeIsolines = 25, - ExecutionModeOutputVertices = 26, - ExecutionModeOutputPoints = 27, - ExecutionModeOutputLineStrip = 28, - ExecutionModeOutputTriangleStrip = 29, - ExecutionModeVecTypeHint = 30, - ExecutionModeContractionOff = 31, - ExecutionModeInitializer = 33, - ExecutionModeFinalizer = 34, - ExecutionModeSubgroupSize = 35, - ExecutionModeSubgroupsPerWorkgroup = 36, - ExecutionModeSubgroupsPerWorkgroupId = 37, - ExecutionModeLocalSizeId = 38, - ExecutionModeLocalSizeHintId = 39, - ExecutionModePostDepthCoverage = 4446, - ExecutionModeDenormPreserve = 4459, - ExecutionModeDenormFlushToZero = 4460, - ExecutionModeSignedZeroInfNanPreserve = 4461, - ExecutionModeRoundingModeRTE = 4462, - ExecutionModeRoundingModeRTZ = 4463, - ExecutionModeStencilRefReplacingEXT = 5027, - ExecutionModeOutputLinesNV = 5269, - ExecutionModeOutputPrimitivesNV = 5270, - ExecutionModeDerivativeGroupQuadsNV = 5289, - ExecutionModeDerivativeGroupLinearNV = 5290, - ExecutionModeOutputTrianglesNV = 5298, - ExecutionModeMax = 2147483647, + impl ImageOperandsMask { + pub const ImageOperandsLodMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(2); } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum StorageClass { - StorageClassUniformConstant = 0, - StorageClassInput = 1, - StorageClassUniform = 2, - StorageClassOutput = 3, - StorageClassWorkgroup = 4, - StorageClassCrossWorkgroup = 5, - StorageClassPrivate = 6, - StorageClassFunction = 7, - StorageClassGeneric = 8, - StorageClassPushConstant = 9, - StorageClassAtomicCounter = 10, - StorageClassImage = 11, - StorageClassStorageBuffer = 12, - StorageClassCallableDataNV = 5328, - StorageClassIncomingCallableDataNV = 5329, - StorageClassRayPayloadNV = 5338, - StorageClassHitAttributeNV = 5339, - StorageClassIncomingRayPayloadNV = 5342, - StorageClassShaderRecordBufferNV = 5343, - StorageClassPhysicalStorageBufferEXT = 5349, - StorageClassMax = 2147483647, + impl ImageOperandsMask { + pub const ImageOperandsGradMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(4); } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum Dim { - Dim1D = 0, - Dim2D = 1, - Dim3D = 2, - DimCube = 3, - DimRect = 4, - DimBuffer = 5, - DimSubpassData = 6, - DimMax = 2147483647, + impl ImageOperandsMask { + pub const ImageOperandsConstOffsetMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(8); } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum SamplerAddressingMode { - SamplerAddressingModeNone = 0, - SamplerAddressingModeClampToEdge = 1, - SamplerAddressingModeClamp = 2, - SamplerAddressingModeRepeat = 3, - SamplerAddressingModeRepeatMirrored = 4, - SamplerAddressingModeMax = 2147483647, + impl ImageOperandsMask { + pub const ImageOperandsOffsetMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(16); } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum SamplerFilterMode { - SamplerFilterModeNearest = 0, - SamplerFilterModeLinear = 1, - SamplerFilterModeMax = 2147483647, + impl ImageOperandsMask { + pub const ImageOperandsConstOffsetsMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(32); } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum ImageFormat { - ImageFormatUnknown = 0, - ImageFormatRgba32f = 1, - ImageFormatRgba16f = 2, - ImageFormatR32f = 3, - ImageFormatRgba8 = 4, - ImageFormatRgba8Snorm = 5, - ImageFormatRg32f = 6, - ImageFormatRg16f = 7, - ImageFormatR11fG11fB10f = 8, - ImageFormatR16f = 9, - ImageFormatRgba16 = 10, - ImageFormatRgb10A2 = 11, - ImageFormatRg16 = 12, - ImageFormatRg8 = 13, - ImageFormatR16 = 14, - ImageFormatR8 = 15, - ImageFormatRgba16Snorm = 16, - ImageFormatRg16Snorm = 17, - ImageFormatRg8Snorm = 18, - ImageFormatR16Snorm = 19, - ImageFormatR8Snorm = 20, - ImageFormatRgba32i = 21, - ImageFormatRgba16i = 22, - ImageFormatRgba8i = 23, - ImageFormatR32i = 24, - ImageFormatRg32i = 25, - ImageFormatRg16i = 26, - ImageFormatRg8i = 27, - ImageFormatR16i = 28, - ImageFormatR8i = 29, - ImageFormatRgba32ui = 30, - ImageFormatRgba16ui = 31, - ImageFormatRgba8ui = 32, - ImageFormatR32ui = 33, - ImageFormatRgb10a2ui = 34, - ImageFormatRg32ui = 35, - ImageFormatRg16ui = 36, - ImageFormatRg8ui = 37, - ImageFormatR16ui = 38, - ImageFormatR8ui = 39, - ImageFormatMax = 2147483647, + impl ImageOperandsMask { + pub const ImageOperandsSampleMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(64); } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum ImageChannelOrder { - ImageChannelOrderR = 0, - ImageChannelOrderA = 1, - ImageChannelOrderRG = 2, - ImageChannelOrderRA = 3, - ImageChannelOrderRGB = 4, - ImageChannelOrderRGBA = 5, - ImageChannelOrderBGRA = 6, - ImageChannelOrderARGB = 7, - ImageChannelOrderIntensity = 8, - ImageChannelOrderLuminance = 9, - ImageChannelOrderRx = 10, - ImageChannelOrderRGx = 11, - ImageChannelOrderRGBx = 12, - ImageChannelOrderDepth = 13, - ImageChannelOrderDepthStencil = 14, - ImageChannelOrdersRGB = 15, - ImageChannelOrdersRGBx = 16, - ImageChannelOrdersRGBA = 17, - ImageChannelOrdersBGRA = 18, - ImageChannelOrderABGR = 19, - ImageChannelOrderMax = 2147483647, + impl ImageOperandsMask { + pub const ImageOperandsMinLodMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(128); } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum ImageChannelDataType { - ImageChannelDataTypeSnormInt8 = 0, - ImageChannelDataTypeSnormInt16 = 1, - ImageChannelDataTypeUnormInt8 = 2, - ImageChannelDataTypeUnormInt16 = 3, - ImageChannelDataTypeUnormShort565 = 4, - ImageChannelDataTypeUnormShort555 = 5, - ImageChannelDataTypeUnormInt101010 = 6, - ImageChannelDataTypeSignedInt8 = 7, - ImageChannelDataTypeSignedInt16 = 8, - ImageChannelDataTypeSignedInt32 = 9, - ImageChannelDataTypeUnsignedInt8 = 10, - ImageChannelDataTypeUnsignedInt16 = 11, - ImageChannelDataTypeUnsignedInt32 = 12, - ImageChannelDataTypeHalfFloat = 13, - ImageChannelDataTypeFloat = 14, - ImageChannelDataTypeUnormInt24 = 15, - ImageChannelDataTypeUnormInt101010_2 = 16, - ImageChannelDataTypeMax = 2147483647, + impl ImageOperandsMask { + pub const ImageOperandsMakeTexelAvailableMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(256); } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum ImageOperandsShift { - ImageOperandsBiasShift = 0, - ImageOperandsLodShift = 1, - ImageOperandsGradShift = 2, - ImageOperandsConstOffsetShift = 3, - ImageOperandsOffsetShift = 4, - ImageOperandsConstOffsetsShift = 5, - ImageOperandsSampleShift = 6, - ImageOperandsMinLodShift = 7, - ImageOperandsMakeTexelAvailableKHRShift = 8, - ImageOperandsMakeTexelVisibleKHRShift = 9, - ImageOperandsNonPrivateTexelKHRShift = 10, - ImageOperandsVolatileTexelKHRShift = 11, - ImageOperandsMax = 2147483647, + impl ImageOperandsMask { + pub const ImageOperandsMakeTexelAvailableKHRMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(256); } - pub const ImageOperandsMask_ImageOperandsMaskNone: - root::spv::ImageOperandsMask = - ImageOperandsMask(0); - pub const ImageOperandsMask_ImageOperandsBiasMask: - root::spv::ImageOperandsMask = - ImageOperandsMask(1); - pub const ImageOperandsMask_ImageOperandsLodMask: - root::spv::ImageOperandsMask = - ImageOperandsMask(2); - pub const ImageOperandsMask_ImageOperandsGradMask: - root::spv::ImageOperandsMask = - ImageOperandsMask(4); - pub const ImageOperandsMask_ImageOperandsConstOffsetMask: - root::spv::ImageOperandsMask = - ImageOperandsMask(8); - pub const ImageOperandsMask_ImageOperandsOffsetMask: - root::spv::ImageOperandsMask = - ImageOperandsMask(16); - pub const ImageOperandsMask_ImageOperandsConstOffsetsMask: - root::spv::ImageOperandsMask = - ImageOperandsMask(32); - pub const ImageOperandsMask_ImageOperandsSampleMask: - root::spv::ImageOperandsMask = - ImageOperandsMask(64); - pub const ImageOperandsMask_ImageOperandsMinLodMask: - root::spv::ImageOperandsMask = - ImageOperandsMask(128); - pub const ImageOperandsMask_ImageOperandsMakeTexelAvailableKHRMask: - root::spv::ImageOperandsMask = - ImageOperandsMask(256); - pub const ImageOperandsMask_ImageOperandsMakeTexelVisibleKHRMask: - root::spv::ImageOperandsMask = - ImageOperandsMask(512); - pub const ImageOperandsMask_ImageOperandsNonPrivateTexelKHRMask: - root::spv::ImageOperandsMask = - ImageOperandsMask(1024); - pub const ImageOperandsMask_ImageOperandsVolatileTexelKHRMask: - root::spv::ImageOperandsMask = - ImageOperandsMask(2048); - impl ::std::ops::BitOr for - root::spv::ImageOperandsMask { - type - Output - = - Self; + impl ImageOperandsMask { + pub const ImageOperandsMakeTexelVisibleMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(512); + } + impl ImageOperandsMask { + pub const ImageOperandsMakeTexelVisibleKHRMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(512); + } + impl ImageOperandsMask { + pub const ImageOperandsNonPrivateTexelMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(1024); + } + impl ImageOperandsMask { + pub const ImageOperandsNonPrivateTexelKHRMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(1024); + } + impl ImageOperandsMask { + pub const ImageOperandsVolatileTexelMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(2048); + } + impl ImageOperandsMask { + pub const ImageOperandsVolatileTexelKHRMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(2048); + } + impl ImageOperandsMask { + pub const ImageOperandsSignExtendMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(4096); + } + impl ImageOperandsMask { + pub const ImageOperandsZeroExtendMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(8192); + } + impl ::std::ops::BitOr for root::spv::ImageOperandsMask { + type Output = Self; #[inline] fn bitor(self, other: Self) -> Self { ImageOperandsMask(self.0 | other.0) @@ -331,12 +392,8 @@ pub mod root { self.0 |= rhs.0; } } - impl ::std::ops::BitAnd for - root::spv::ImageOperandsMask { - type - Output - = - Self; + impl ::std::ops::BitAnd for root::spv::ImageOperandsMask { + type Output = Self; #[inline] fn bitand(self, other: Self) -> Self { ImageOperandsMask(self.0 & other.0) @@ -348,43 +405,44 @@ pub mod root { self.0 &= rhs.0; } } - #[repr(C)] + #[repr(transparent)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub struct ImageOperandsMask(pub ::std::os::raw::c_uint); - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum FPFastMathModeShift { - FPFastMathModeNotNaNShift = 0, - FPFastMathModeNotInfShift = 1, - FPFastMathModeNSZShift = 2, - FPFastMathModeAllowRecipShift = 3, - FPFastMathModeFastShift = 4, - FPFastMathModeMax = 2147483647, + pub struct ImageOperandsMask(pub u32); + pub const FPFastMathModeShift_FPFastMathModeNotNaNShift: root::spv::FPFastMathModeShift = 0; + pub const FPFastMathModeShift_FPFastMathModeNotInfShift: root::spv::FPFastMathModeShift = 1; + pub const FPFastMathModeShift_FPFastMathModeNSZShift: root::spv::FPFastMathModeShift = 2; + pub const FPFastMathModeShift_FPFastMathModeAllowRecipShift: + root::spv::FPFastMathModeShift = 3; + pub const FPFastMathModeShift_FPFastMathModeFastShift: root::spv::FPFastMathModeShift = 4; + pub const FPFastMathModeShift_FPFastMathModeMax: root::spv::FPFastMathModeShift = + 2147483647; + pub type FPFastMathModeShift = u32; + impl FPFastMathModeMask { + pub const FPFastMathModeMaskNone: root::spv::FPFastMathModeMask = + root::spv::FPFastMathModeMask(0); } - pub const FPFastMathModeMask_FPFastMathModeMaskNone: - root::spv::FPFastMathModeMask = - FPFastMathModeMask(0); - pub const FPFastMathModeMask_FPFastMathModeNotNaNMask: - root::spv::FPFastMathModeMask = - FPFastMathModeMask(1); - pub const FPFastMathModeMask_FPFastMathModeNotInfMask: - root::spv::FPFastMathModeMask = - FPFastMathModeMask(2); - pub const FPFastMathModeMask_FPFastMathModeNSZMask: - root::spv::FPFastMathModeMask = - FPFastMathModeMask(4); - pub const FPFastMathModeMask_FPFastMathModeAllowRecipMask: - root::spv::FPFastMathModeMask = - FPFastMathModeMask(8); - pub const FPFastMathModeMask_FPFastMathModeFastMask: - root::spv::FPFastMathModeMask = - FPFastMathModeMask(16); - impl ::std::ops::BitOr for - root::spv::FPFastMathModeMask { - type - Output - = - Self; + impl FPFastMathModeMask { + pub const FPFastMathModeNotNaNMask: root::spv::FPFastMathModeMask = + root::spv::FPFastMathModeMask(1); + } + impl FPFastMathModeMask { + pub const FPFastMathModeNotInfMask: root::spv::FPFastMathModeMask = + root::spv::FPFastMathModeMask(2); + } + impl FPFastMathModeMask { + pub const FPFastMathModeNSZMask: root::spv::FPFastMathModeMask = + root::spv::FPFastMathModeMask(4); + } + impl FPFastMathModeMask { + pub const FPFastMathModeAllowRecipMask: root::spv::FPFastMathModeMask = + root::spv::FPFastMathModeMask(8); + } + impl FPFastMathModeMask { + pub const FPFastMathModeFastMask: root::spv::FPFastMathModeMask = + root::spv::FPFastMathModeMask(16); + } + impl ::std::ops::BitOr for root::spv::FPFastMathModeMask { + type Output = Self; #[inline] fn bitor(self, other: Self) -> Self { FPFastMathModeMask(self.0 | other.0) @@ -396,12 +454,8 @@ pub mod root { self.0 |= rhs.0; } } - impl ::std::ops::BitAnd for - root::spv::FPFastMathModeMask { - type - Output - = - Self; + impl ::std::ops::BitAnd for root::spv::FPFastMathModeMask { + type Output = Self; #[inline] fn bitand(self, other: Self) -> Self { FPFastMathModeMask(self.0 & other.0) @@ -413,45 +467,62 @@ pub mod root { self.0 &= rhs.0; } } - #[repr(C)] + #[repr(transparent)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub struct FPFastMathModeMask(pub ::std::os::raw::c_uint); - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum FPRoundingMode { - FPRoundingModeRTE = 0, - FPRoundingModeRTZ = 1, - FPRoundingModeRTP = 2, - FPRoundingModeRTN = 3, - FPRoundingModeMax = 2147483647, + pub struct FPFastMathModeMask(pub u32); + pub const FPRoundingMode_FPRoundingModeRTE: root::spv::FPRoundingMode = 0; + pub const FPRoundingMode_FPRoundingModeRTZ: root::spv::FPRoundingMode = 1; + pub const FPRoundingMode_FPRoundingModeRTP: root::spv::FPRoundingMode = 2; + pub const FPRoundingMode_FPRoundingModeRTN: root::spv::FPRoundingMode = 3; + pub const FPRoundingMode_FPRoundingModeMax: root::spv::FPRoundingMode = 2147483647; + pub type FPRoundingMode = u32; + pub const LinkageType_LinkageTypeExport: root::spv::LinkageType = 0; + pub const LinkageType_LinkageTypeImport: root::spv::LinkageType = 1; + pub const LinkageType_LinkageTypeMax: root::spv::LinkageType = 2147483647; + pub type LinkageType = u32; + pub const AccessQualifier_AccessQualifierReadOnly: root::spv::AccessQualifier = 0; + pub const AccessQualifier_AccessQualifierWriteOnly: root::spv::AccessQualifier = 1; + pub const AccessQualifier_AccessQualifierReadWrite: root::spv::AccessQualifier = 2; + pub const AccessQualifier_AccessQualifierMax: root::spv::AccessQualifier = 2147483647; + pub type AccessQualifier = u32; + pub const FunctionParameterAttribute_FunctionParameterAttributeZext: + root::spv::FunctionParameterAttribute = 0; + pub const FunctionParameterAttribute_FunctionParameterAttributeSext: + root::spv::FunctionParameterAttribute = 1; + pub const FunctionParameterAttribute_FunctionParameterAttributeByVal: + root::spv::FunctionParameterAttribute = 2; + pub const FunctionParameterAttribute_FunctionParameterAttributeSret: + root::spv::FunctionParameterAttribute = 3; + pub const FunctionParameterAttribute_FunctionParameterAttributeNoAlias: + root::spv::FunctionParameterAttribute = 4; + pub const FunctionParameterAttribute_FunctionParameterAttributeNoCapture: + root::spv::FunctionParameterAttribute = 5; + pub const FunctionParameterAttribute_FunctionParameterAttributeNoWrite: + root::spv::FunctionParameterAttribute = 6; + pub const FunctionParameterAttribute_FunctionParameterAttributeNoReadWrite: + root::spv::FunctionParameterAttribute = 7; + pub const FunctionParameterAttribute_FunctionParameterAttributeMax: + root::spv::FunctionParameterAttribute = 2147483647; + pub type FunctionParameterAttribute = u32; + impl root::spv::Decoration { + pub const DecorationNonUniformEXT: root::spv::Decoration = + Decoration::DecorationNonUniform; } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum LinkageType { - LinkageTypeExport = 0, - LinkageTypeImport = 1, - LinkageTypeMax = 2147483647, + impl root::spv::Decoration { + pub const DecorationRestrictPointerEXT: root::spv::Decoration = + Decoration::DecorationRestrictPointer; } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum AccessQualifier { - AccessQualifierReadOnly = 0, - AccessQualifierWriteOnly = 1, - AccessQualifierReadWrite = 2, - AccessQualifierMax = 2147483647, + impl root::spv::Decoration { + pub const DecorationAliasedPointerEXT: root::spv::Decoration = + Decoration::DecorationAliasedPointer; } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum FunctionParameterAttribute { - FunctionParameterAttributeZext = 0, - FunctionParameterAttributeSext = 1, - FunctionParameterAttributeByVal = 2, - FunctionParameterAttributeSret = 3, - FunctionParameterAttributeNoAlias = 4, - FunctionParameterAttributeNoCapture = 5, - FunctionParameterAttributeNoWrite = 6, - FunctionParameterAttributeNoReadWrite = 7, - FunctionParameterAttributeMax = 2147483647, + impl root::spv::Decoration { + pub const DecorationHlslCounterBufferGOOGLE: root::spv::Decoration = + Decoration::DecorationCounterBuffer; + } + impl root::spv::Decoration { + pub const DecorationUserSemantic: root::spv::Decoration = + Decoration::DecorationHlslSemanticGOOGLE; } #[repr(u32)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] @@ -482,6 +553,7 @@ pub mod root { DecorationNonWritable = 24, DecorationNonReadable = 25, DecorationUniform = 26, + DecorationUniformId = 27, DecorationSaturatedConversion = 28, DecorationStream = 29, DecorationLocation = 30, @@ -513,27 +585,36 @@ pub mod root { DecorationPerViewNV = 5272, DecorationPerTaskNV = 5273, DecorationPerVertexNV = 5285, - DecorationNonUniformEXT = 5300, - DecorationRestrictPointerEXT = 5355, - DecorationAliasedPointerEXT = 5356, - DecorationHlslCounterBufferGOOGLE = 5634, + DecorationNonUniform = 5300, + DecorationRestrictPointer = 5355, + DecorationAliasedPointer = 5356, + DecorationCounterBuffer = 5634, DecorationHlslSemanticGOOGLE = 5635, + DecorationUserTypeGOOGLE = 5636, DecorationMax = 2147483647, } - pub const BuiltIn_BuiltInSubgroupEqMaskKHR: root::spv::BuiltIn = - BuiltIn::BuiltInSubgroupEqMask; - pub const BuiltIn_BuiltInSubgroupGeMaskKHR: root::spv::BuiltIn = - BuiltIn::BuiltInSubgroupGeMask; - pub const BuiltIn_BuiltInSubgroupGtMaskKHR: root::spv::BuiltIn = - BuiltIn::BuiltInSubgroupGtMask; - pub const BuiltIn_BuiltInSubgroupLeMaskKHR: root::spv::BuiltIn = - BuiltIn::BuiltInSubgroupLeMask; - pub const BuiltIn_BuiltInSubgroupLtMaskKHR: root::spv::BuiltIn = - BuiltIn::BuiltInSubgroupLtMask; - pub const BuiltIn_BuiltInFragmentSizeNV: root::spv::BuiltIn = - BuiltIn::BuiltInFragSizeEXT; - pub const BuiltIn_BuiltInInvocationsPerPixelNV: root::spv::BuiltIn = - BuiltIn::BuiltInFragInvocationCountEXT; + impl root::spv::BuiltIn { + pub const BuiltInSubgroupEqMaskKHR: root::spv::BuiltIn = BuiltIn::BuiltInSubgroupEqMask; + } + impl root::spv::BuiltIn { + pub const BuiltInSubgroupGeMaskKHR: root::spv::BuiltIn = BuiltIn::BuiltInSubgroupGeMask; + } + impl root::spv::BuiltIn { + pub const BuiltInSubgroupGtMaskKHR: root::spv::BuiltIn = BuiltIn::BuiltInSubgroupGtMask; + } + impl root::spv::BuiltIn { + pub const BuiltInSubgroupLeMaskKHR: root::spv::BuiltIn = BuiltIn::BuiltInSubgroupLeMask; + } + impl root::spv::BuiltIn { + pub const BuiltInSubgroupLtMaskKHR: root::spv::BuiltIn = BuiltIn::BuiltInSubgroupLtMask; + } + impl root::spv::BuiltIn { + pub const BuiltInFragmentSizeNV: root::spv::BuiltIn = BuiltIn::BuiltInFragSizeEXT; + } + impl root::spv::BuiltIn { + pub const BuiltInInvocationsPerPixelNV: root::spv::BuiltIn = + BuiltIn::BuiltInFragInvocationCountEXT; + } #[repr(u32)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub enum BuiltIn { @@ -628,30 +709,33 @@ pub mod root { BuiltInHitTNV = 5332, BuiltInHitKindNV = 5333, BuiltInIncomingRayFlagsNV = 5351, + BuiltInWarpsPerSMNV = 5374, + BuiltInSMCountNV = 5375, + BuiltInWarpIDNV = 5376, + BuiltInSMIDNV = 5377, BuiltInMax = 2147483647, } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum SelectionControlShift { - SelectionControlFlattenShift = 0, - SelectionControlDontFlattenShift = 1, - SelectionControlMax = 2147483647, + pub const SelectionControlShift_SelectionControlFlattenShift: + root::spv::SelectionControlShift = 0; + pub const SelectionControlShift_SelectionControlDontFlattenShift: + root::spv::SelectionControlShift = 1; + pub const SelectionControlShift_SelectionControlMax: root::spv::SelectionControlShift = + 2147483647; + pub type SelectionControlShift = u32; + impl SelectionControlMask { + pub const SelectionControlMaskNone: root::spv::SelectionControlMask = + root::spv::SelectionControlMask(0); } - pub const SelectionControlMask_SelectionControlMaskNone: - root::spv::SelectionControlMask = - SelectionControlMask(0); - pub const SelectionControlMask_SelectionControlFlattenMask: - root::spv::SelectionControlMask = - SelectionControlMask(1); - pub const SelectionControlMask_SelectionControlDontFlattenMask: - root::spv::SelectionControlMask = - SelectionControlMask(2); - impl ::std::ops::BitOr for - root::spv::SelectionControlMask { - type - Output - = - Self; + impl SelectionControlMask { + pub const SelectionControlFlattenMask: root::spv::SelectionControlMask = + root::spv::SelectionControlMask(1); + } + impl SelectionControlMask { + pub const SelectionControlDontFlattenMask: root::spv::SelectionControlMask = + root::spv::SelectionControlMask(2); + } + impl ::std::ops::BitOr for root::spv::SelectionControlMask { + type Output = Self; #[inline] fn bitor(self, other: Self) -> Self { SelectionControlMask(self.0 | other.0) @@ -663,12 +747,8 @@ pub mod root { self.0 |= rhs.0; } } - impl ::std::ops::BitAnd for - root::spv::SelectionControlMask { - type - Output - = - Self; + impl ::std::ops::BitAnd for root::spv::SelectionControlMask { + type Output = Self; #[inline] fn bitand(self, other: Self) -> Self { SelectionControlMask(self.0 & other.0) @@ -676,44 +756,69 @@ pub mod root { } impl ::std::ops::BitAndAssign for root::spv::SelectionControlMask { #[inline] - fn bitand_assign(&mut self, - rhs: root::spv::SelectionControlMask) { + fn bitand_assign(&mut self, rhs: root::spv::SelectionControlMask) { self.0 &= rhs.0; } } - #[repr(C)] + #[repr(transparent)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub struct SelectionControlMask(pub ::std::os::raw::c_uint); - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum LoopControlShift { - LoopControlUnrollShift = 0, - LoopControlDontUnrollShift = 1, - LoopControlDependencyInfiniteShift = 2, - LoopControlDependencyLengthShift = 3, - LoopControlMax = 2147483647, + pub struct SelectionControlMask(pub u32); + pub const LoopControlShift_LoopControlUnrollShift: root::spv::LoopControlShift = 0; + pub const LoopControlShift_LoopControlDontUnrollShift: root::spv::LoopControlShift = 1; + pub const LoopControlShift_LoopControlDependencyInfiniteShift: root::spv::LoopControlShift = + 2; + pub const LoopControlShift_LoopControlDependencyLengthShift: root::spv::LoopControlShift = + 3; + pub const LoopControlShift_LoopControlMinIterationsShift: root::spv::LoopControlShift = 4; + pub const LoopControlShift_LoopControlMaxIterationsShift: root::spv::LoopControlShift = 5; + pub const LoopControlShift_LoopControlIterationMultipleShift: root::spv::LoopControlShift = + 6; + pub const LoopControlShift_LoopControlPeelCountShift: root::spv::LoopControlShift = 7; + pub const LoopControlShift_LoopControlPartialCountShift: root::spv::LoopControlShift = 8; + pub const LoopControlShift_LoopControlMax: root::spv::LoopControlShift = 2147483647; + pub type LoopControlShift = u32; + impl LoopControlMask { + pub const LoopControlMaskNone: root::spv::LoopControlMask = + root::spv::LoopControlMask(0); } - pub const LoopControlMask_LoopControlMaskNone: - root::spv::LoopControlMask = - LoopControlMask(0); - pub const LoopControlMask_LoopControlUnrollMask: - root::spv::LoopControlMask = - LoopControlMask(1); - pub const LoopControlMask_LoopControlDontUnrollMask: - root::spv::LoopControlMask = - LoopControlMask(2); - pub const LoopControlMask_LoopControlDependencyInfiniteMask: - root::spv::LoopControlMask = - LoopControlMask(4); - pub const LoopControlMask_LoopControlDependencyLengthMask: - root::spv::LoopControlMask = - LoopControlMask(8); - impl ::std::ops::BitOr for - root::spv::LoopControlMask { - type - Output - = - Self; + impl LoopControlMask { + pub const LoopControlUnrollMask: root::spv::LoopControlMask = + root::spv::LoopControlMask(1); + } + impl LoopControlMask { + pub const LoopControlDontUnrollMask: root::spv::LoopControlMask = + root::spv::LoopControlMask(2); + } + impl LoopControlMask { + pub const LoopControlDependencyInfiniteMask: root::spv::LoopControlMask = + root::spv::LoopControlMask(4); + } + impl LoopControlMask { + pub const LoopControlDependencyLengthMask: root::spv::LoopControlMask = + root::spv::LoopControlMask(8); + } + impl LoopControlMask { + pub const LoopControlMinIterationsMask: root::spv::LoopControlMask = + root::spv::LoopControlMask(16); + } + impl LoopControlMask { + pub const LoopControlMaxIterationsMask: root::spv::LoopControlMask = + root::spv::LoopControlMask(32); + } + impl LoopControlMask { + pub const LoopControlIterationMultipleMask: root::spv::LoopControlMask = + root::spv::LoopControlMask(64); + } + impl LoopControlMask { + pub const LoopControlPeelCountMask: root::spv::LoopControlMask = + root::spv::LoopControlMask(128); + } + impl LoopControlMask { + pub const LoopControlPartialCountMask: root::spv::LoopControlMask = + root::spv::LoopControlMask(256); + } + impl ::std::ops::BitOr for root::spv::LoopControlMask { + type Output = Self; #[inline] fn bitor(self, other: Self) -> Self { LoopControlMask(self.0 | other.0) @@ -725,12 +830,8 @@ pub mod root { self.0 |= rhs.0; } } - impl ::std::ops::BitAnd for - root::spv::LoopControlMask { - type - Output - = - Self; + impl ::std::ops::BitAnd for root::spv::LoopControlMask { + type Output = Self; #[inline] fn bitand(self, other: Self) -> Self { LoopControlMask(self.0 & other.0) @@ -742,39 +843,42 @@ pub mod root { self.0 &= rhs.0; } } - #[repr(C)] + #[repr(transparent)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub struct LoopControlMask(pub ::std::os::raw::c_uint); - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum FunctionControlShift { - FunctionControlInlineShift = 0, - FunctionControlDontInlineShift = 1, - FunctionControlPureShift = 2, - FunctionControlConstShift = 3, - FunctionControlMax = 2147483647, + pub struct LoopControlMask(pub u32); + pub const FunctionControlShift_FunctionControlInlineShift: root::spv::FunctionControlShift = + 0; + pub const FunctionControlShift_FunctionControlDontInlineShift: + root::spv::FunctionControlShift = 1; + pub const FunctionControlShift_FunctionControlPureShift: root::spv::FunctionControlShift = + 2; + pub const FunctionControlShift_FunctionControlConstShift: root::spv::FunctionControlShift = + 3; + pub const FunctionControlShift_FunctionControlMax: root::spv::FunctionControlShift = + 2147483647; + pub type FunctionControlShift = u32; + impl FunctionControlMask { + pub const FunctionControlMaskNone: root::spv::FunctionControlMask = + root::spv::FunctionControlMask(0); } - pub const FunctionControlMask_FunctionControlMaskNone: - root::spv::FunctionControlMask = - FunctionControlMask(0); - pub const FunctionControlMask_FunctionControlInlineMask: - root::spv::FunctionControlMask = - FunctionControlMask(1); - pub const FunctionControlMask_FunctionControlDontInlineMask: - root::spv::FunctionControlMask = - FunctionControlMask(2); - pub const FunctionControlMask_FunctionControlPureMask: - root::spv::FunctionControlMask = - FunctionControlMask(4); - pub const FunctionControlMask_FunctionControlConstMask: - root::spv::FunctionControlMask = - FunctionControlMask(8); - impl ::std::ops::BitOr for - root::spv::FunctionControlMask { - type - Output - = - Self; + impl FunctionControlMask { + pub const FunctionControlInlineMask: root::spv::FunctionControlMask = + root::spv::FunctionControlMask(1); + } + impl FunctionControlMask { + pub const FunctionControlDontInlineMask: root::spv::FunctionControlMask = + root::spv::FunctionControlMask(2); + } + impl FunctionControlMask { + pub const FunctionControlPureMask: root::spv::FunctionControlMask = + root::spv::FunctionControlMask(4); + } + impl FunctionControlMask { + pub const FunctionControlConstMask: root::spv::FunctionControlMask = + root::spv::FunctionControlMask(8); + } + impl ::std::ops::BitOr for root::spv::FunctionControlMask { + type Output = Self; #[inline] fn bitor(self, other: Self) -> Self { FunctionControlMask(self.0 | other.0) @@ -786,12 +890,8 @@ pub mod root { self.0 |= rhs.0; } } - impl ::std::ops::BitAnd for - root::spv::FunctionControlMask { - type - Output - = - Self; + impl ::std::ops::BitAnd for root::spv::FunctionControlMask { + type Output = Self; #[inline] fn bitand(self, other: Self) -> Self { FunctionControlMask(self.0 & other.0) @@ -803,75 +903,120 @@ pub mod root { self.0 &= rhs.0; } } - #[repr(C)] + #[repr(transparent)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub struct FunctionControlMask(pub ::std::os::raw::c_uint); - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum MemorySemanticsShift { - MemorySemanticsAcquireShift = 1, - MemorySemanticsReleaseShift = 2, - MemorySemanticsAcquireReleaseShift = 3, - MemorySemanticsSequentiallyConsistentShift = 4, - MemorySemanticsUniformMemoryShift = 6, - MemorySemanticsSubgroupMemoryShift = 7, - MemorySemanticsWorkgroupMemoryShift = 8, - MemorySemanticsCrossWorkgroupMemoryShift = 9, - MemorySemanticsAtomicCounterMemoryShift = 10, - MemorySemanticsImageMemoryShift = 11, - MemorySemanticsOutputMemoryKHRShift = 12, - MemorySemanticsMakeAvailableKHRShift = 13, - MemorySemanticsMakeVisibleKHRShift = 14, - MemorySemanticsMax = 2147483647, + pub struct FunctionControlMask(pub u32); + pub const MemorySemanticsShift_MemorySemanticsAcquireShift: + root::spv::MemorySemanticsShift = 1; + pub const MemorySemanticsShift_MemorySemanticsReleaseShift: + root::spv::MemorySemanticsShift = 2; + pub const MemorySemanticsShift_MemorySemanticsAcquireReleaseShift: + root::spv::MemorySemanticsShift = 3; + pub const MemorySemanticsShift_MemorySemanticsSequentiallyConsistentShift: + root::spv::MemorySemanticsShift = 4; + pub const MemorySemanticsShift_MemorySemanticsUniformMemoryShift: + root::spv::MemorySemanticsShift = 6; + pub const MemorySemanticsShift_MemorySemanticsSubgroupMemoryShift: + root::spv::MemorySemanticsShift = 7; + pub const MemorySemanticsShift_MemorySemanticsWorkgroupMemoryShift: + root::spv::MemorySemanticsShift = 8; + pub const MemorySemanticsShift_MemorySemanticsCrossWorkgroupMemoryShift: + root::spv::MemorySemanticsShift = 9; + pub const MemorySemanticsShift_MemorySemanticsAtomicCounterMemoryShift: + root::spv::MemorySemanticsShift = 10; + pub const MemorySemanticsShift_MemorySemanticsImageMemoryShift: + root::spv::MemorySemanticsShift = 11; + pub const MemorySemanticsShift_MemorySemanticsOutputMemoryShift: + root::spv::MemorySemanticsShift = 12; + pub const MemorySemanticsShift_MemorySemanticsOutputMemoryKHRShift: + root::spv::MemorySemanticsShift = 12; + pub const MemorySemanticsShift_MemorySemanticsMakeAvailableShift: + root::spv::MemorySemanticsShift = 13; + pub const MemorySemanticsShift_MemorySemanticsMakeAvailableKHRShift: + root::spv::MemorySemanticsShift = 13; + pub const MemorySemanticsShift_MemorySemanticsMakeVisibleShift: + root::spv::MemorySemanticsShift = 14; + pub const MemorySemanticsShift_MemorySemanticsMakeVisibleKHRShift: + root::spv::MemorySemanticsShift = 14; + pub const MemorySemanticsShift_MemorySemanticsVolatileShift: + root::spv::MemorySemanticsShift = 15; + pub const MemorySemanticsShift_MemorySemanticsMax: root::spv::MemorySemanticsShift = + 2147483647; + pub type MemorySemanticsShift = u32; + impl MemorySemanticsMask { + pub const MemorySemanticsMaskNone: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(0); } - pub const MemorySemanticsMask_MemorySemanticsMaskNone: - root::spv::MemorySemanticsMask = - MemorySemanticsMask(0); - pub const MemorySemanticsMask_MemorySemanticsAcquireMask: - root::spv::MemorySemanticsMask = - MemorySemanticsMask(2); - pub const MemorySemanticsMask_MemorySemanticsReleaseMask: - root::spv::MemorySemanticsMask = - MemorySemanticsMask(4); - pub const MemorySemanticsMask_MemorySemanticsAcquireReleaseMask: - root::spv::MemorySemanticsMask = - MemorySemanticsMask(8); - pub const MemorySemanticsMask_MemorySemanticsSequentiallyConsistentMask: - root::spv::MemorySemanticsMask = - MemorySemanticsMask(16); - pub const MemorySemanticsMask_MemorySemanticsUniformMemoryMask: - root::spv::MemorySemanticsMask = - MemorySemanticsMask(64); - pub const MemorySemanticsMask_MemorySemanticsSubgroupMemoryMask: - root::spv::MemorySemanticsMask = - MemorySemanticsMask(128); - pub const MemorySemanticsMask_MemorySemanticsWorkgroupMemoryMask: - root::spv::MemorySemanticsMask = - MemorySemanticsMask(256); - pub const MemorySemanticsMask_MemorySemanticsCrossWorkgroupMemoryMask: - root::spv::MemorySemanticsMask = - MemorySemanticsMask(512); - pub const MemorySemanticsMask_MemorySemanticsAtomicCounterMemoryMask: - root::spv::MemorySemanticsMask = - MemorySemanticsMask(1024); - pub const MemorySemanticsMask_MemorySemanticsImageMemoryMask: - root::spv::MemorySemanticsMask = - MemorySemanticsMask(2048); - pub const MemorySemanticsMask_MemorySemanticsOutputMemoryKHRMask: - root::spv::MemorySemanticsMask = - MemorySemanticsMask(4096); - pub const MemorySemanticsMask_MemorySemanticsMakeAvailableKHRMask: - root::spv::MemorySemanticsMask = - MemorySemanticsMask(8192); - pub const MemorySemanticsMask_MemorySemanticsMakeVisibleKHRMask: - root::spv::MemorySemanticsMask = - MemorySemanticsMask(16384); - impl ::std::ops::BitOr for - root::spv::MemorySemanticsMask { - type - Output - = - Self; + impl MemorySemanticsMask { + pub const MemorySemanticsAcquireMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(2); + } + impl MemorySemanticsMask { + pub const MemorySemanticsReleaseMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(4); + } + impl MemorySemanticsMask { + pub const MemorySemanticsAcquireReleaseMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(8); + } + impl MemorySemanticsMask { + pub const MemorySemanticsSequentiallyConsistentMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(16); + } + impl MemorySemanticsMask { + pub const MemorySemanticsUniformMemoryMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(64); + } + impl MemorySemanticsMask { + pub const MemorySemanticsSubgroupMemoryMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(128); + } + impl MemorySemanticsMask { + pub const MemorySemanticsWorkgroupMemoryMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(256); + } + impl MemorySemanticsMask { + pub const MemorySemanticsCrossWorkgroupMemoryMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(512); + } + impl MemorySemanticsMask { + pub const MemorySemanticsAtomicCounterMemoryMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(1024); + } + impl MemorySemanticsMask { + pub const MemorySemanticsImageMemoryMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(2048); + } + impl MemorySemanticsMask { + pub const MemorySemanticsOutputMemoryMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(4096); + } + impl MemorySemanticsMask { + pub const MemorySemanticsOutputMemoryKHRMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(4096); + } + impl MemorySemanticsMask { + pub const MemorySemanticsMakeAvailableMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(8192); + } + impl MemorySemanticsMask { + pub const MemorySemanticsMakeAvailableKHRMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(8192); + } + impl MemorySemanticsMask { + pub const MemorySemanticsMakeVisibleMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(16384); + } + impl MemorySemanticsMask { + pub const MemorySemanticsMakeVisibleKHRMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(16384); + } + impl MemorySemanticsMask { + pub const MemorySemanticsVolatileMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(32768); + } + impl ::std::ops::BitOr for root::spv::MemorySemanticsMask { + type Output = Self; #[inline] fn bitor(self, other: Self) -> Self { MemorySemanticsMask(self.0 | other.0) @@ -883,12 +1028,8 @@ pub mod root { self.0 |= rhs.0; } } - impl ::std::ops::BitAnd for - root::spv::MemorySemanticsMask { - type - Output - = - Self; + impl ::std::ops::BitAnd for root::spv::MemorySemanticsMask { + type Output = Self; #[inline] fn bitand(self, other: Self) -> Self { MemorySemanticsMask(self.0 & other.0) @@ -900,47 +1041,68 @@ pub mod root { self.0 &= rhs.0; } } - #[repr(C)] + #[repr(transparent)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub struct MemorySemanticsMask(pub ::std::os::raw::c_uint); - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum MemoryAccessShift { - MemoryAccessVolatileShift = 0, - MemoryAccessAlignedShift = 1, - MemoryAccessNontemporalShift = 2, - MemoryAccessMakePointerAvailableKHRShift = 3, - MemoryAccessMakePointerVisibleKHRShift = 4, - MemoryAccessNonPrivatePointerKHRShift = 5, - MemoryAccessMax = 2147483647, + pub struct MemorySemanticsMask(pub u32); + pub const MemoryAccessShift_MemoryAccessVolatileShift: root::spv::MemoryAccessShift = 0; + pub const MemoryAccessShift_MemoryAccessAlignedShift: root::spv::MemoryAccessShift = 1; + pub const MemoryAccessShift_MemoryAccessNontemporalShift: root::spv::MemoryAccessShift = 2; + pub const MemoryAccessShift_MemoryAccessMakePointerAvailableShift: + root::spv::MemoryAccessShift = 3; + pub const MemoryAccessShift_MemoryAccessMakePointerAvailableKHRShift: + root::spv::MemoryAccessShift = 3; + pub const MemoryAccessShift_MemoryAccessMakePointerVisibleShift: + root::spv::MemoryAccessShift = 4; + pub const MemoryAccessShift_MemoryAccessMakePointerVisibleKHRShift: + root::spv::MemoryAccessShift = 4; + pub const MemoryAccessShift_MemoryAccessNonPrivatePointerShift: + root::spv::MemoryAccessShift = 5; + pub const MemoryAccessShift_MemoryAccessNonPrivatePointerKHRShift: + root::spv::MemoryAccessShift = 5; + pub const MemoryAccessShift_MemoryAccessMax: root::spv::MemoryAccessShift = 2147483647; + pub type MemoryAccessShift = u32; + impl MemoryAccessMask { + pub const MemoryAccessMaskNone: root::spv::MemoryAccessMask = + root::spv::MemoryAccessMask(0); } - pub const MemoryAccessMask_MemoryAccessMaskNone: - root::spv::MemoryAccessMask = - MemoryAccessMask(0); - pub const MemoryAccessMask_MemoryAccessVolatileMask: - root::spv::MemoryAccessMask = - MemoryAccessMask(1); - pub const MemoryAccessMask_MemoryAccessAlignedMask: - root::spv::MemoryAccessMask = - MemoryAccessMask(2); - pub const MemoryAccessMask_MemoryAccessNontemporalMask: - root::spv::MemoryAccessMask = - MemoryAccessMask(4); - pub const MemoryAccessMask_MemoryAccessMakePointerAvailableKHRMask: - root::spv::MemoryAccessMask = - MemoryAccessMask(8); - pub const MemoryAccessMask_MemoryAccessMakePointerVisibleKHRMask: - root::spv::MemoryAccessMask = - MemoryAccessMask(16); - pub const MemoryAccessMask_MemoryAccessNonPrivatePointerKHRMask: - root::spv::MemoryAccessMask = - MemoryAccessMask(32); - impl ::std::ops::BitOr for - root::spv::MemoryAccessMask { - type - Output - = - Self; + impl MemoryAccessMask { + pub const MemoryAccessVolatileMask: root::spv::MemoryAccessMask = + root::spv::MemoryAccessMask(1); + } + impl MemoryAccessMask { + pub const MemoryAccessAlignedMask: root::spv::MemoryAccessMask = + root::spv::MemoryAccessMask(2); + } + impl MemoryAccessMask { + pub const MemoryAccessNontemporalMask: root::spv::MemoryAccessMask = + root::spv::MemoryAccessMask(4); + } + impl MemoryAccessMask { + pub const MemoryAccessMakePointerAvailableMask: root::spv::MemoryAccessMask = + root::spv::MemoryAccessMask(8); + } + impl MemoryAccessMask { + pub const MemoryAccessMakePointerAvailableKHRMask: root::spv::MemoryAccessMask = + root::spv::MemoryAccessMask(8); + } + impl MemoryAccessMask { + pub const MemoryAccessMakePointerVisibleMask: root::spv::MemoryAccessMask = + root::spv::MemoryAccessMask(16); + } + impl MemoryAccessMask { + pub const MemoryAccessMakePointerVisibleKHRMask: root::spv::MemoryAccessMask = + root::spv::MemoryAccessMask(16); + } + impl MemoryAccessMask { + pub const MemoryAccessNonPrivatePointerMask: root::spv::MemoryAccessMask = + root::spv::MemoryAccessMask(32); + } + impl MemoryAccessMask { + pub const MemoryAccessNonPrivatePointerKHRMask: root::spv::MemoryAccessMask = + root::spv::MemoryAccessMask(32); + } + impl ::std::ops::BitOr for root::spv::MemoryAccessMask { + type Output = Self; #[inline] fn bitor(self, other: Self) -> Self { MemoryAccessMask(self.0 | other.0) @@ -952,12 +1114,8 @@ pub mod root { self.0 |= rhs.0; } } - impl ::std::ops::BitAnd for - root::spv::MemoryAccessMask { - type - Output - = - Self; + impl ::std::ops::BitAnd for root::spv::MemoryAccessMask { + type Output = Self; #[inline] fn bitand(self, other: Self) -> Self { MemoryAccessMask(self.0 & other.0) @@ -969,50 +1127,47 @@ pub mod root { self.0 &= rhs.0; } } - #[repr(C)] + #[repr(transparent)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub struct MemoryAccessMask(pub ::std::os::raw::c_uint); - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum Scope { - ScopeCrossDevice = 0, - ScopeDevice = 1, - ScopeWorkgroup = 2, - ScopeSubgroup = 3, - ScopeInvocation = 4, - ScopeQueueFamilyKHR = 5, - ScopeMax = 2147483647, + pub struct MemoryAccessMask(pub u32); + pub const Scope_ScopeCrossDevice: root::spv::Scope = 0; + pub const Scope_ScopeDevice: root::spv::Scope = 1; + pub const Scope_ScopeWorkgroup: root::spv::Scope = 2; + pub const Scope_ScopeSubgroup: root::spv::Scope = 3; + pub const Scope_ScopeInvocation: root::spv::Scope = 4; + pub const Scope_ScopeQueueFamily: root::spv::Scope = 5; + pub const Scope_ScopeQueueFamilyKHR: root::spv::Scope = 5; + pub const Scope_ScopeMax: root::spv::Scope = 2147483647; + pub type Scope = u32; + pub const GroupOperation_GroupOperationReduce: root::spv::GroupOperation = 0; + pub const GroupOperation_GroupOperationInclusiveScan: root::spv::GroupOperation = 1; + pub const GroupOperation_GroupOperationExclusiveScan: root::spv::GroupOperation = 2; + pub const GroupOperation_GroupOperationClusteredReduce: root::spv::GroupOperation = 3; + pub const GroupOperation_GroupOperationPartitionedReduceNV: root::spv::GroupOperation = 6; + pub const GroupOperation_GroupOperationPartitionedInclusiveScanNV: + root::spv::GroupOperation = 7; + pub const GroupOperation_GroupOperationPartitionedExclusiveScanNV: + root::spv::GroupOperation = 8; + pub const GroupOperation_GroupOperationMax: root::spv::GroupOperation = 2147483647; + pub type GroupOperation = u32; + impl KernelEnqueueFlags { + pub const KernelEnqueueFlagsNoWait: root::spv::KernelEnqueueFlags = + root::spv::KernelEnqueueFlags(0); } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum GroupOperation { - GroupOperationReduce = 0, - GroupOperationInclusiveScan = 1, - GroupOperationExclusiveScan = 2, - GroupOperationClusteredReduce = 3, - GroupOperationPartitionedReduceNV = 6, - GroupOperationPartitionedInclusiveScanNV = 7, - GroupOperationPartitionedExclusiveScanNV = 8, - GroupOperationMax = 2147483647, + impl KernelEnqueueFlags { + pub const KernelEnqueueFlagsWaitKernel: root::spv::KernelEnqueueFlags = + root::spv::KernelEnqueueFlags(1); } - pub const KernelEnqueueFlags_KernelEnqueueFlagsNoWait: - root::spv::KernelEnqueueFlags = - KernelEnqueueFlags(0); - pub const KernelEnqueueFlags_KernelEnqueueFlagsWaitKernel: - root::spv::KernelEnqueueFlags = - KernelEnqueueFlags(1); - pub const KernelEnqueueFlags_KernelEnqueueFlagsWaitWorkGroup: - root::spv::KernelEnqueueFlags = - KernelEnqueueFlags(2); - pub const KernelEnqueueFlags_KernelEnqueueFlagsMax: - root::spv::KernelEnqueueFlags = - KernelEnqueueFlags(2147483647); - impl ::std::ops::BitOr for - root::spv::KernelEnqueueFlags { - type - Output - = - Self; + impl KernelEnqueueFlags { + pub const KernelEnqueueFlagsWaitWorkGroup: root::spv::KernelEnqueueFlags = + root::spv::KernelEnqueueFlags(2); + } + impl KernelEnqueueFlags { + pub const KernelEnqueueFlagsMax: root::spv::KernelEnqueueFlags = + root::spv::KernelEnqueueFlags(2147483647); + } + impl ::std::ops::BitOr for root::spv::KernelEnqueueFlags { + type Output = Self; #[inline] fn bitor(self, other: Self) -> Self { KernelEnqueueFlags(self.0 | other.0) @@ -1024,12 +1179,8 @@ pub mod root { self.0 |= rhs.0; } } - impl ::std::ops::BitAnd for - root::spv::KernelEnqueueFlags { - type - Output - = - Self; + impl ::std::ops::BitAnd for root::spv::KernelEnqueueFlags { + type Output = Self; #[inline] fn bitand(self, other: Self) -> Self { KernelEnqueueFlags(self.0 & other.0) @@ -1041,27 +1192,24 @@ pub mod root { self.0 &= rhs.0; } } - #[repr(C)] + #[repr(transparent)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub struct KernelEnqueueFlags(pub ::std::os::raw::c_uint); - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum KernelProfilingInfoShift { - KernelProfilingInfoCmdExecTimeShift = 0, - KernelProfilingInfoMax = 2147483647, + pub struct KernelEnqueueFlags(pub u32); + pub const KernelProfilingInfoShift_KernelProfilingInfoCmdExecTimeShift: + root::spv::KernelProfilingInfoShift = 0; + pub const KernelProfilingInfoShift_KernelProfilingInfoMax: + root::spv::KernelProfilingInfoShift = 2147483647; + pub type KernelProfilingInfoShift = u32; + impl KernelProfilingInfoMask { + pub const KernelProfilingInfoMaskNone: root::spv::KernelProfilingInfoMask = + root::spv::KernelProfilingInfoMask(0); } - pub const KernelProfilingInfoMask_KernelProfilingInfoMaskNone: - root::spv::KernelProfilingInfoMask = - KernelProfilingInfoMask(0); - pub const KernelProfilingInfoMask_KernelProfilingInfoCmdExecTimeMask: - root::spv::KernelProfilingInfoMask = - KernelProfilingInfoMask(1); - impl ::std::ops::BitOr for - root::spv::KernelProfilingInfoMask { - type - Output - = - Self; + impl KernelProfilingInfoMask { + pub const KernelProfilingInfoCmdExecTimeMask: root::spv::KernelProfilingInfoMask = + root::spv::KernelProfilingInfoMask(1); + } + impl ::std::ops::BitOr for root::spv::KernelProfilingInfoMask { + type Output = Self; #[inline] fn bitor(self, other: Self) -> Self { KernelProfilingInfoMask(self.0 | other.0) @@ -1069,17 +1217,12 @@ pub mod root { } impl ::std::ops::BitOrAssign for root::spv::KernelProfilingInfoMask { #[inline] - fn bitor_assign(&mut self, - rhs: root::spv::KernelProfilingInfoMask) { + fn bitor_assign(&mut self, rhs: root::spv::KernelProfilingInfoMask) { self.0 |= rhs.0; } } - impl ::std::ops::BitAnd for - root::spv::KernelProfilingInfoMask { - type - Output - = - Self; + impl ::std::ops::BitAnd for root::spv::KernelProfilingInfoMask { + type Output = Self; #[inline] fn bitand(self, other: Self) -> Self { KernelProfilingInfoMask(self.0 & other.0) @@ -1087,553 +1230,760 @@ pub mod root { } impl ::std::ops::BitAndAssign for root::spv::KernelProfilingInfoMask { #[inline] - fn bitand_assign(&mut self, - rhs: root::spv::KernelProfilingInfoMask) { + fn bitand_assign(&mut self, rhs: root::spv::KernelProfilingInfoMask) { self.0 &= rhs.0; } } - #[repr(C)] + #[repr(transparent)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub struct KernelProfilingInfoMask(pub ::std::os::raw::c_uint); - pub const Capability_CapabilityStorageUniformBufferBlock16: - root::spv::Capability = - Capability::CapabilityStorageBuffer16BitAccess; - pub const Capability_CapabilityUniformAndStorageBuffer16BitAccess: - root::spv::Capability = - Capability::CapabilityStorageUniform16; - pub const Capability_CapabilityShaderViewportIndexLayerNV: - root::spv::Capability = - Capability::CapabilityShaderViewportIndexLayerEXT; - pub const Capability_CapabilityShadingRateNV: root::spv::Capability = - Capability::CapabilityFragmentDensityEXT; - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum Capability { - CapabilityMatrix = 0, - CapabilityShader = 1, - CapabilityGeometry = 2, - CapabilityTessellation = 3, - CapabilityAddresses = 4, - CapabilityLinkage = 5, - CapabilityKernel = 6, - CapabilityVector16 = 7, - CapabilityFloat16Buffer = 8, - CapabilityFloat16 = 9, - CapabilityFloat64 = 10, - CapabilityInt64 = 11, - CapabilityInt64Atomics = 12, - CapabilityImageBasic = 13, - CapabilityImageReadWrite = 14, - CapabilityImageMipmap = 15, - CapabilityPipes = 17, - CapabilityGroups = 18, - CapabilityDeviceEnqueue = 19, - CapabilityLiteralSampler = 20, - CapabilityAtomicStorage = 21, - CapabilityInt16 = 22, - CapabilityTessellationPointSize = 23, - CapabilityGeometryPointSize = 24, - CapabilityImageGatherExtended = 25, - CapabilityStorageImageMultisample = 27, - CapabilityUniformBufferArrayDynamicIndexing = 28, - CapabilitySampledImageArrayDynamicIndexing = 29, - CapabilityStorageBufferArrayDynamicIndexing = 30, - CapabilityStorageImageArrayDynamicIndexing = 31, - CapabilityClipDistance = 32, - CapabilityCullDistance = 33, - CapabilityImageCubeArray = 34, - CapabilitySampleRateShading = 35, - CapabilityImageRect = 36, - CapabilitySampledRect = 37, - CapabilityGenericPointer = 38, - CapabilityInt8 = 39, - CapabilityInputAttachment = 40, - CapabilitySparseResidency = 41, - CapabilityMinLod = 42, - CapabilitySampled1D = 43, - CapabilityImage1D = 44, - CapabilitySampledCubeArray = 45, - CapabilitySampledBuffer = 46, - CapabilityImageBuffer = 47, - CapabilityImageMSArray = 48, - CapabilityStorageImageExtendedFormats = 49, - CapabilityImageQuery = 50, - CapabilityDerivativeControl = 51, - CapabilityInterpolationFunction = 52, - CapabilityTransformFeedback = 53, - CapabilityGeometryStreams = 54, - CapabilityStorageImageReadWithoutFormat = 55, - CapabilityStorageImageWriteWithoutFormat = 56, - CapabilityMultiViewport = 57, - CapabilitySubgroupDispatch = 58, - CapabilityNamedBarrier = 59, - CapabilityPipeStorage = 60, - CapabilityGroupNonUniform = 61, - CapabilityGroupNonUniformVote = 62, - CapabilityGroupNonUniformArithmetic = 63, - CapabilityGroupNonUniformBallot = 64, - CapabilityGroupNonUniformShuffle = 65, - CapabilityGroupNonUniformShuffleRelative = 66, - CapabilityGroupNonUniformClustered = 67, - CapabilityGroupNonUniformQuad = 68, - CapabilitySubgroupBallotKHR = 4423, - CapabilityDrawParameters = 4427, - CapabilitySubgroupVoteKHR = 4431, - CapabilityStorageBuffer16BitAccess = 4433, - CapabilityStorageUniform16 = 4434, - CapabilityStoragePushConstant16 = 4435, - CapabilityStorageInputOutput16 = 4436, - CapabilityDeviceGroup = 4437, - CapabilityMultiView = 4439, - CapabilityVariablePointersStorageBuffer = 4441, - CapabilityVariablePointers = 4442, - CapabilityAtomicStorageOps = 4445, - CapabilitySampleMaskPostDepthCoverage = 4447, - CapabilityStorageBuffer8BitAccess = 4448, - CapabilityUniformAndStorageBuffer8BitAccess = 4449, - CapabilityStoragePushConstant8 = 4450, - CapabilityDenormPreserve = 4464, - CapabilityDenormFlushToZero = 4465, - CapabilitySignedZeroInfNanPreserve = 4466, - CapabilityRoundingModeRTE = 4467, - CapabilityRoundingModeRTZ = 4468, - CapabilityFloat16ImageAMD = 5008, - CapabilityImageGatherBiasLodAMD = 5009, - CapabilityFragmentMaskAMD = 5010, - CapabilityStencilExportEXT = 5013, - CapabilityImageReadWriteLodAMD = 5015, - CapabilitySampleMaskOverrideCoverageNV = 5249, - CapabilityGeometryShaderPassthroughNV = 5251, - CapabilityShaderViewportIndexLayerEXT = 5254, - CapabilityShaderViewportMaskNV = 5255, - CapabilityShaderStereoViewNV = 5259, - CapabilityPerViewAttributesNV = 5260, - CapabilityFragmentFullyCoveredEXT = 5265, - CapabilityMeshShadingNV = 5266, - CapabilityImageFootprintNV = 5282, - CapabilityFragmentBarycentricNV = 5284, - CapabilityComputeDerivativeGroupQuadsNV = 5288, - CapabilityFragmentDensityEXT = 5291, - CapabilityGroupNonUniformPartitionedNV = 5297, - CapabilityShaderNonUniformEXT = 5301, - CapabilityRuntimeDescriptorArrayEXT = 5302, - CapabilityInputAttachmentArrayDynamicIndexingEXT = 5303, - CapabilityUniformTexelBufferArrayDynamicIndexingEXT = 5304, - CapabilityStorageTexelBufferArrayDynamicIndexingEXT = 5305, - CapabilityUniformBufferArrayNonUniformIndexingEXT = 5306, - CapabilitySampledImageArrayNonUniformIndexingEXT = 5307, - CapabilityStorageBufferArrayNonUniformIndexingEXT = 5308, - CapabilityStorageImageArrayNonUniformIndexingEXT = 5309, - CapabilityInputAttachmentArrayNonUniformIndexingEXT = 5310, - CapabilityUniformTexelBufferArrayNonUniformIndexingEXT = 5311, - CapabilityStorageTexelBufferArrayNonUniformIndexingEXT = 5312, - CapabilityRayTracingNV = 5340, - CapabilityVulkanMemoryModelKHR = 5345, - CapabilityVulkanMemoryModelDeviceScopeKHR = 5346, - CapabilityPhysicalStorageBufferAddressesEXT = 5347, - CapabilityComputeDerivativeGroupLinearNV = 5350, - CapabilitySubgroupShuffleINTEL = 5568, - CapabilitySubgroupBufferBlockIOINTEL = 5569, - CapabilitySubgroupImageBlockIOINTEL = 5570, - CapabilitySubgroupImageMediaBlockIOINTEL = 5579, - CapabilityMax = 2147483647, - } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum Op { - OpNop = 0, - OpUndef = 1, - OpSourceContinued = 2, - OpSource = 3, - OpSourceExtension = 4, - OpName = 5, - OpMemberName = 6, - OpString = 7, - OpLine = 8, - OpExtension = 10, - OpExtInstImport = 11, - OpExtInst = 12, - OpMemoryModel = 14, - OpEntryPoint = 15, - OpExecutionMode = 16, - OpCapability = 17, - OpTypeVoid = 19, - OpTypeBool = 20, - OpTypeInt = 21, - OpTypeFloat = 22, - OpTypeVector = 23, - OpTypeMatrix = 24, - OpTypeImage = 25, - OpTypeSampler = 26, - OpTypeSampledImage = 27, - OpTypeArray = 28, - OpTypeRuntimeArray = 29, - OpTypeStruct = 30, - OpTypeOpaque = 31, - OpTypePointer = 32, - OpTypeFunction = 33, - OpTypeEvent = 34, - OpTypeDeviceEvent = 35, - OpTypeReserveId = 36, - OpTypeQueue = 37, - OpTypePipe = 38, - OpTypeForwardPointer = 39, - OpConstantTrue = 41, - OpConstantFalse = 42, - OpConstant = 43, - OpConstantComposite = 44, - OpConstantSampler = 45, - OpConstantNull = 46, - OpSpecConstantTrue = 48, - OpSpecConstantFalse = 49, - OpSpecConstant = 50, - OpSpecConstantComposite = 51, - OpSpecConstantOp = 52, - OpFunction = 54, - OpFunctionParameter = 55, - OpFunctionEnd = 56, - OpFunctionCall = 57, - OpVariable = 59, - OpImageTexelPointer = 60, - OpLoad = 61, - OpStore = 62, - OpCopyMemory = 63, - OpCopyMemorySized = 64, - OpAccessChain = 65, - OpInBoundsAccessChain = 66, - OpPtrAccessChain = 67, - OpArrayLength = 68, - OpGenericPtrMemSemantics = 69, - OpInBoundsPtrAccessChain = 70, - OpDecorate = 71, - OpMemberDecorate = 72, - OpDecorationGroup = 73, - OpGroupDecorate = 74, - OpGroupMemberDecorate = 75, - OpVectorExtractDynamic = 77, - OpVectorInsertDynamic = 78, - OpVectorShuffle = 79, - OpCompositeConstruct = 80, - OpCompositeExtract = 81, - OpCompositeInsert = 82, - OpCopyObject = 83, - OpTranspose = 84, - OpSampledImage = 86, - OpImageSampleImplicitLod = 87, - OpImageSampleExplicitLod = 88, - OpImageSampleDrefImplicitLod = 89, - OpImageSampleDrefExplicitLod = 90, - OpImageSampleProjImplicitLod = 91, - OpImageSampleProjExplicitLod = 92, - OpImageSampleProjDrefImplicitLod = 93, - OpImageSampleProjDrefExplicitLod = 94, - OpImageFetch = 95, - OpImageGather = 96, - OpImageDrefGather = 97, - OpImageRead = 98, - OpImageWrite = 99, - OpImage = 100, - OpImageQueryFormat = 101, - OpImageQueryOrder = 102, - OpImageQuerySizeLod = 103, - OpImageQuerySize = 104, - OpImageQueryLod = 105, - OpImageQueryLevels = 106, - OpImageQuerySamples = 107, - OpConvertFToU = 109, - OpConvertFToS = 110, - OpConvertSToF = 111, - OpConvertUToF = 112, - OpUConvert = 113, - OpSConvert = 114, - OpFConvert = 115, - OpQuantizeToF16 = 116, - OpConvertPtrToU = 117, - OpSatConvertSToU = 118, - OpSatConvertUToS = 119, - OpConvertUToPtr = 120, - OpPtrCastToGeneric = 121, - OpGenericCastToPtr = 122, - OpGenericCastToPtrExplicit = 123, - OpBitcast = 124, - OpSNegate = 126, - OpFNegate = 127, - OpIAdd = 128, - OpFAdd = 129, - OpISub = 130, - OpFSub = 131, - OpIMul = 132, - OpFMul = 133, - OpUDiv = 134, - OpSDiv = 135, - OpFDiv = 136, - OpUMod = 137, - OpSRem = 138, - OpSMod = 139, - OpFRem = 140, - OpFMod = 141, - OpVectorTimesScalar = 142, - OpMatrixTimesScalar = 143, - OpVectorTimesMatrix = 144, - OpMatrixTimesVector = 145, - OpMatrixTimesMatrix = 146, - OpOuterProduct = 147, - OpDot = 148, - OpIAddCarry = 149, - OpISubBorrow = 150, - OpUMulExtended = 151, - OpSMulExtended = 152, - OpAny = 154, - OpAll = 155, - OpIsNan = 156, - OpIsInf = 157, - OpIsFinite = 158, - OpIsNormal = 159, - OpSignBitSet = 160, - OpLessOrGreater = 161, - OpOrdered = 162, - OpUnordered = 163, - OpLogicalEqual = 164, - OpLogicalNotEqual = 165, - OpLogicalOr = 166, - OpLogicalAnd = 167, - OpLogicalNot = 168, - OpSelect = 169, - OpIEqual = 170, - OpINotEqual = 171, - OpUGreaterThan = 172, - OpSGreaterThan = 173, - OpUGreaterThanEqual = 174, - OpSGreaterThanEqual = 175, - OpULessThan = 176, - OpSLessThan = 177, - OpULessThanEqual = 178, - OpSLessThanEqual = 179, - OpFOrdEqual = 180, - OpFUnordEqual = 181, - OpFOrdNotEqual = 182, - OpFUnordNotEqual = 183, - OpFOrdLessThan = 184, - OpFUnordLessThan = 185, - OpFOrdGreaterThan = 186, - OpFUnordGreaterThan = 187, - OpFOrdLessThanEqual = 188, - OpFUnordLessThanEqual = 189, - OpFOrdGreaterThanEqual = 190, - OpFUnordGreaterThanEqual = 191, - OpShiftRightLogical = 194, - OpShiftRightArithmetic = 195, - OpShiftLeftLogical = 196, - OpBitwiseOr = 197, - OpBitwiseXor = 198, - OpBitwiseAnd = 199, - OpNot = 200, - OpBitFieldInsert = 201, - OpBitFieldSExtract = 202, - OpBitFieldUExtract = 203, - OpBitReverse = 204, - OpBitCount = 205, - OpDPdx = 207, - OpDPdy = 208, - OpFwidth = 209, - OpDPdxFine = 210, - OpDPdyFine = 211, - OpFwidthFine = 212, - OpDPdxCoarse = 213, - OpDPdyCoarse = 214, - OpFwidthCoarse = 215, - OpEmitVertex = 218, - OpEndPrimitive = 219, - OpEmitStreamVertex = 220, - OpEndStreamPrimitive = 221, - OpControlBarrier = 224, - OpMemoryBarrier = 225, - OpAtomicLoad = 227, - OpAtomicStore = 228, - OpAtomicExchange = 229, - OpAtomicCompareExchange = 230, - OpAtomicCompareExchangeWeak = 231, - OpAtomicIIncrement = 232, - OpAtomicIDecrement = 233, - OpAtomicIAdd = 234, - OpAtomicISub = 235, - OpAtomicSMin = 236, - OpAtomicUMin = 237, - OpAtomicSMax = 238, - OpAtomicUMax = 239, - OpAtomicAnd = 240, - OpAtomicOr = 241, - OpAtomicXor = 242, - OpPhi = 245, - OpLoopMerge = 246, - OpSelectionMerge = 247, - OpLabel = 248, - OpBranch = 249, - OpBranchConditional = 250, - OpSwitch = 251, - OpKill = 252, - OpReturn = 253, - OpReturnValue = 254, - OpUnreachable = 255, - OpLifetimeStart = 256, - OpLifetimeStop = 257, - OpGroupAsyncCopy = 259, - OpGroupWaitEvents = 260, - OpGroupAll = 261, - OpGroupAny = 262, - OpGroupBroadcast = 263, - OpGroupIAdd = 264, - OpGroupFAdd = 265, - OpGroupFMin = 266, - OpGroupUMin = 267, - OpGroupSMin = 268, - OpGroupFMax = 269, - OpGroupUMax = 270, - OpGroupSMax = 271, - OpReadPipe = 274, - OpWritePipe = 275, - OpReservedReadPipe = 276, - OpReservedWritePipe = 277, - OpReserveReadPipePackets = 278, - OpReserveWritePipePackets = 279, - OpCommitReadPipe = 280, - OpCommitWritePipe = 281, - OpIsValidReserveId = 282, - OpGetNumPipePackets = 283, - OpGetMaxPipePackets = 284, - OpGroupReserveReadPipePackets = 285, - OpGroupReserveWritePipePackets = 286, - OpGroupCommitReadPipe = 287, - OpGroupCommitWritePipe = 288, - OpEnqueueMarker = 291, - OpEnqueueKernel = 292, - OpGetKernelNDrangeSubGroupCount = 293, - OpGetKernelNDrangeMaxSubGroupSize = 294, - OpGetKernelWorkGroupSize = 295, - OpGetKernelPreferredWorkGroupSizeMultiple = 296, - OpRetainEvent = 297, - OpReleaseEvent = 298, - OpCreateUserEvent = 299, - OpIsValidEvent = 300, - OpSetUserEventStatus = 301, - OpCaptureEventProfilingInfo = 302, - OpGetDefaultQueue = 303, - OpBuildNDRange = 304, - OpImageSparseSampleImplicitLod = 305, - OpImageSparseSampleExplicitLod = 306, - OpImageSparseSampleDrefImplicitLod = 307, - OpImageSparseSampleDrefExplicitLod = 308, - OpImageSparseSampleProjImplicitLod = 309, - OpImageSparseSampleProjExplicitLod = 310, - OpImageSparseSampleProjDrefImplicitLod = 311, - OpImageSparseSampleProjDrefExplicitLod = 312, - OpImageSparseFetch = 313, - OpImageSparseGather = 314, - OpImageSparseDrefGather = 315, - OpImageSparseTexelsResident = 316, - OpNoLine = 317, - OpAtomicFlagTestAndSet = 318, - OpAtomicFlagClear = 319, - OpImageSparseRead = 320, - OpSizeOf = 321, - OpTypePipeStorage = 322, - OpConstantPipeStorage = 323, - OpCreatePipeFromPipeStorage = 324, - OpGetKernelLocalSizeForSubgroupCount = 325, - OpGetKernelMaxNumSubgroups = 326, - OpTypeNamedBarrier = 327, - OpNamedBarrierInitialize = 328, - OpMemoryNamedBarrier = 329, - OpModuleProcessed = 330, - OpExecutionModeId = 331, - OpDecorateId = 332, - OpGroupNonUniformElect = 333, - OpGroupNonUniformAll = 334, - OpGroupNonUniformAny = 335, - OpGroupNonUniformAllEqual = 336, - OpGroupNonUniformBroadcast = 337, - OpGroupNonUniformBroadcastFirst = 338, - OpGroupNonUniformBallot = 339, - OpGroupNonUniformInverseBallot = 340, - OpGroupNonUniformBallotBitExtract = 341, - OpGroupNonUniformBallotBitCount = 342, - OpGroupNonUniformBallotFindLSB = 343, - OpGroupNonUniformBallotFindMSB = 344, - OpGroupNonUniformShuffle = 345, - OpGroupNonUniformShuffleXor = 346, - OpGroupNonUniformShuffleUp = 347, - OpGroupNonUniformShuffleDown = 348, - OpGroupNonUniformIAdd = 349, - OpGroupNonUniformFAdd = 350, - OpGroupNonUniformIMul = 351, - OpGroupNonUniformFMul = 352, - OpGroupNonUniformSMin = 353, - OpGroupNonUniformUMin = 354, - OpGroupNonUniformFMin = 355, - OpGroupNonUniformSMax = 356, - OpGroupNonUniformUMax = 357, - OpGroupNonUniformFMax = 358, - OpGroupNonUniformBitwiseAnd = 359, - OpGroupNonUniformBitwiseOr = 360, - OpGroupNonUniformBitwiseXor = 361, - OpGroupNonUniformLogicalAnd = 362, - OpGroupNonUniformLogicalOr = 363, - OpGroupNonUniformLogicalXor = 364, - OpGroupNonUniformQuadBroadcast = 365, - OpGroupNonUniformQuadSwap = 366, - OpSubgroupBallotKHR = 4421, - OpSubgroupFirstInvocationKHR = 4422, - OpSubgroupAllKHR = 4428, - OpSubgroupAnyKHR = 4429, - OpSubgroupAllEqualKHR = 4430, - OpSubgroupReadInvocationKHR = 4432, - OpGroupIAddNonUniformAMD = 5000, - OpGroupFAddNonUniformAMD = 5001, - OpGroupFMinNonUniformAMD = 5002, - OpGroupUMinNonUniformAMD = 5003, - OpGroupSMinNonUniformAMD = 5004, - OpGroupFMaxNonUniformAMD = 5005, - OpGroupUMaxNonUniformAMD = 5006, - OpGroupSMaxNonUniformAMD = 5007, - OpFragmentMaskFetchAMD = 5011, - OpFragmentFetchAMD = 5012, - OpImageSampleFootprintNV = 5283, - OpGroupNonUniformPartitionNV = 5296, - OpWritePackedPrimitiveIndices4x8NV = 5299, - OpReportIntersectionNV = 5334, - OpIgnoreIntersectionNV = 5335, - OpTerminateRayNV = 5336, - OpTraceNV = 5337, - OpTypeAccelerationStructureNV = 5341, - OpExecuteCallableNV = 5344, - OpSubgroupShuffleINTEL = 5571, - OpSubgroupShuffleDownINTEL = 5572, - OpSubgroupShuffleUpINTEL = 5573, - OpSubgroupShuffleXorINTEL = 5574, - OpSubgroupBlockReadINTEL = 5575, - OpSubgroupBlockWriteINTEL = 5576, - OpSubgroupImageBlockReadINTEL = 5577, - OpSubgroupImageBlockWriteINTEL = 5578, - OpSubgroupImageMediaBlockReadINTEL = 5580, - OpSubgroupImageMediaBlockWriteINTEL = 5581, - OpDecorateStringGOOGLE = 5632, - OpMemberDecorateStringGOOGLE = 5633, - OpMax = 2147483647, - } + pub struct KernelProfilingInfoMask(pub u32); + pub const Capability_CapabilityMatrix: root::spv::Capability = 0; + pub const Capability_CapabilityShader: root::spv::Capability = 1; + pub const Capability_CapabilityGeometry: root::spv::Capability = 2; + pub const Capability_CapabilityTessellation: root::spv::Capability = 3; + pub const Capability_CapabilityAddresses: root::spv::Capability = 4; + pub const Capability_CapabilityLinkage: root::spv::Capability = 5; + pub const Capability_CapabilityKernel: root::spv::Capability = 6; + pub const Capability_CapabilityVector16: root::spv::Capability = 7; + pub const Capability_CapabilityFloat16Buffer: root::spv::Capability = 8; + pub const Capability_CapabilityFloat16: root::spv::Capability = 9; + pub const Capability_CapabilityFloat64: root::spv::Capability = 10; + pub const Capability_CapabilityInt64: root::spv::Capability = 11; + pub const Capability_CapabilityInt64Atomics: root::spv::Capability = 12; + pub const Capability_CapabilityImageBasic: root::spv::Capability = 13; + pub const Capability_CapabilityImageReadWrite: root::spv::Capability = 14; + pub const Capability_CapabilityImageMipmap: root::spv::Capability = 15; + pub const Capability_CapabilityPipes: root::spv::Capability = 17; + pub const Capability_CapabilityGroups: root::spv::Capability = 18; + pub const Capability_CapabilityDeviceEnqueue: root::spv::Capability = 19; + pub const Capability_CapabilityLiteralSampler: root::spv::Capability = 20; + pub const Capability_CapabilityAtomicStorage: root::spv::Capability = 21; + pub const Capability_CapabilityInt16: root::spv::Capability = 22; + pub const Capability_CapabilityTessellationPointSize: root::spv::Capability = 23; + pub const Capability_CapabilityGeometryPointSize: root::spv::Capability = 24; + pub const Capability_CapabilityImageGatherExtended: root::spv::Capability = 25; + pub const Capability_CapabilityStorageImageMultisample: root::spv::Capability = 27; + pub const Capability_CapabilityUniformBufferArrayDynamicIndexing: root::spv::Capability = + 28; + pub const Capability_CapabilitySampledImageArrayDynamicIndexing: root::spv::Capability = 29; + pub const Capability_CapabilityStorageBufferArrayDynamicIndexing: root::spv::Capability = + 30; + pub const Capability_CapabilityStorageImageArrayDynamicIndexing: root::spv::Capability = 31; + pub const Capability_CapabilityClipDistance: root::spv::Capability = 32; + pub const Capability_CapabilityCullDistance: root::spv::Capability = 33; + pub const Capability_CapabilityImageCubeArray: root::spv::Capability = 34; + pub const Capability_CapabilitySampleRateShading: root::spv::Capability = 35; + pub const Capability_CapabilityImageRect: root::spv::Capability = 36; + pub const Capability_CapabilitySampledRect: root::spv::Capability = 37; + pub const Capability_CapabilityGenericPointer: root::spv::Capability = 38; + pub const Capability_CapabilityInt8: root::spv::Capability = 39; + pub const Capability_CapabilityInputAttachment: root::spv::Capability = 40; + pub const Capability_CapabilitySparseResidency: root::spv::Capability = 41; + pub const Capability_CapabilityMinLod: root::spv::Capability = 42; + pub const Capability_CapabilitySampled1D: root::spv::Capability = 43; + pub const Capability_CapabilityImage1D: root::spv::Capability = 44; + pub const Capability_CapabilitySampledCubeArray: root::spv::Capability = 45; + pub const Capability_CapabilitySampledBuffer: root::spv::Capability = 46; + pub const Capability_CapabilityImageBuffer: root::spv::Capability = 47; + pub const Capability_CapabilityImageMSArray: root::spv::Capability = 48; + pub const Capability_CapabilityStorageImageExtendedFormats: root::spv::Capability = 49; + pub const Capability_CapabilityImageQuery: root::spv::Capability = 50; + pub const Capability_CapabilityDerivativeControl: root::spv::Capability = 51; + pub const Capability_CapabilityInterpolationFunction: root::spv::Capability = 52; + pub const Capability_CapabilityTransformFeedback: root::spv::Capability = 53; + pub const Capability_CapabilityGeometryStreams: root::spv::Capability = 54; + pub const Capability_CapabilityStorageImageReadWithoutFormat: root::spv::Capability = 55; + pub const Capability_CapabilityStorageImageWriteWithoutFormat: root::spv::Capability = 56; + pub const Capability_CapabilityMultiViewport: root::spv::Capability = 57; + pub const Capability_CapabilitySubgroupDispatch: root::spv::Capability = 58; + pub const Capability_CapabilityNamedBarrier: root::spv::Capability = 59; + pub const Capability_CapabilityPipeStorage: root::spv::Capability = 60; + pub const Capability_CapabilityGroupNonUniform: root::spv::Capability = 61; + pub const Capability_CapabilityGroupNonUniformVote: root::spv::Capability = 62; + pub const Capability_CapabilityGroupNonUniformArithmetic: root::spv::Capability = 63; + pub const Capability_CapabilityGroupNonUniformBallot: root::spv::Capability = 64; + pub const Capability_CapabilityGroupNonUniformShuffle: root::spv::Capability = 65; + pub const Capability_CapabilityGroupNonUniformShuffleRelative: root::spv::Capability = 66; + pub const Capability_CapabilityGroupNonUniformClustered: root::spv::Capability = 67; + pub const Capability_CapabilityGroupNonUniformQuad: root::spv::Capability = 68; + pub const Capability_CapabilityShaderLayer: root::spv::Capability = 69; + pub const Capability_CapabilityShaderViewportIndex: root::spv::Capability = 70; + pub const Capability_CapabilitySubgroupBallotKHR: root::spv::Capability = 4423; + pub const Capability_CapabilityDrawParameters: root::spv::Capability = 4427; + pub const Capability_CapabilitySubgroupVoteKHR: root::spv::Capability = 4431; + pub const Capability_CapabilityStorageBuffer16BitAccess: root::spv::Capability = 4433; + pub const Capability_CapabilityStorageUniformBufferBlock16: root::spv::Capability = 4433; + pub const Capability_CapabilityStorageUniform16: root::spv::Capability = 4434; + pub const Capability_CapabilityUniformAndStorageBuffer16BitAccess: root::spv::Capability = + 4434; + pub const Capability_CapabilityStoragePushConstant16: root::spv::Capability = 4435; + pub const Capability_CapabilityStorageInputOutput16: root::spv::Capability = 4436; + pub const Capability_CapabilityDeviceGroup: root::spv::Capability = 4437; + pub const Capability_CapabilityMultiView: root::spv::Capability = 4439; + pub const Capability_CapabilityVariablePointersStorageBuffer: root::spv::Capability = 4441; + pub const Capability_CapabilityVariablePointers: root::spv::Capability = 4442; + pub const Capability_CapabilityAtomicStorageOps: root::spv::Capability = 4445; + pub const Capability_CapabilitySampleMaskPostDepthCoverage: root::spv::Capability = 4447; + pub const Capability_CapabilityStorageBuffer8BitAccess: root::spv::Capability = 4448; + pub const Capability_CapabilityUniformAndStorageBuffer8BitAccess: root::spv::Capability = + 4449; + pub const Capability_CapabilityStoragePushConstant8: root::spv::Capability = 4450; + pub const Capability_CapabilityDenormPreserve: root::spv::Capability = 4464; + pub const Capability_CapabilityDenormFlushToZero: root::spv::Capability = 4465; + pub const Capability_CapabilitySignedZeroInfNanPreserve: root::spv::Capability = 4466; + pub const Capability_CapabilityRoundingModeRTE: root::spv::Capability = 4467; + pub const Capability_CapabilityRoundingModeRTZ: root::spv::Capability = 4468; + pub const Capability_CapabilityFloat16ImageAMD: root::spv::Capability = 5008; + pub const Capability_CapabilityImageGatherBiasLodAMD: root::spv::Capability = 5009; + pub const Capability_CapabilityFragmentMaskAMD: root::spv::Capability = 5010; + pub const Capability_CapabilityStencilExportEXT: root::spv::Capability = 5013; + pub const Capability_CapabilityImageReadWriteLodAMD: root::spv::Capability = 5015; + pub const Capability_CapabilityShaderClockKHR: root::spv::Capability = 5055; + pub const Capability_CapabilitySampleMaskOverrideCoverageNV: root::spv::Capability = 5249; + pub const Capability_CapabilityGeometryShaderPassthroughNV: root::spv::Capability = 5251; + pub const Capability_CapabilityShaderViewportIndexLayerEXT: root::spv::Capability = 5254; + pub const Capability_CapabilityShaderViewportIndexLayerNV: root::spv::Capability = 5254; + pub const Capability_CapabilityShaderViewportMaskNV: root::spv::Capability = 5255; + pub const Capability_CapabilityShaderStereoViewNV: root::spv::Capability = 5259; + pub const Capability_CapabilityPerViewAttributesNV: root::spv::Capability = 5260; + pub const Capability_CapabilityFragmentFullyCoveredEXT: root::spv::Capability = 5265; + pub const Capability_CapabilityMeshShadingNV: root::spv::Capability = 5266; + pub const Capability_CapabilityImageFootprintNV: root::spv::Capability = 5282; + pub const Capability_CapabilityFragmentBarycentricNV: root::spv::Capability = 5284; + pub const Capability_CapabilityComputeDerivativeGroupQuadsNV: root::spv::Capability = 5288; + pub const Capability_CapabilityFragmentDensityEXT: root::spv::Capability = 5291; + pub const Capability_CapabilityShadingRateNV: root::spv::Capability = 5291; + pub const Capability_CapabilityGroupNonUniformPartitionedNV: root::spv::Capability = 5297; + pub const Capability_CapabilityShaderNonUniform: root::spv::Capability = 5301; + pub const Capability_CapabilityShaderNonUniformEXT: root::spv::Capability = 5301; + pub const Capability_CapabilityRuntimeDescriptorArray: root::spv::Capability = 5302; + pub const Capability_CapabilityRuntimeDescriptorArrayEXT: root::spv::Capability = 5302; + pub const Capability_CapabilityInputAttachmentArrayDynamicIndexing: root::spv::Capability = + 5303; + pub const Capability_CapabilityInputAttachmentArrayDynamicIndexingEXT: + root::spv::Capability = 5303; + pub const Capability_CapabilityUniformTexelBufferArrayDynamicIndexing: + root::spv::Capability = 5304; + pub const Capability_CapabilityUniformTexelBufferArrayDynamicIndexingEXT: + root::spv::Capability = 5304; + pub const Capability_CapabilityStorageTexelBufferArrayDynamicIndexing: + root::spv::Capability = 5305; + pub const Capability_CapabilityStorageTexelBufferArrayDynamicIndexingEXT: + root::spv::Capability = 5305; + pub const Capability_CapabilityUniformBufferArrayNonUniformIndexing: root::spv::Capability = + 5306; + pub const Capability_CapabilityUniformBufferArrayNonUniformIndexingEXT: + root::spv::Capability = 5306; + pub const Capability_CapabilitySampledImageArrayNonUniformIndexing: root::spv::Capability = + 5307; + pub const Capability_CapabilitySampledImageArrayNonUniformIndexingEXT: + root::spv::Capability = 5307; + pub const Capability_CapabilityStorageBufferArrayNonUniformIndexing: root::spv::Capability = + 5308; + pub const Capability_CapabilityStorageBufferArrayNonUniformIndexingEXT: + root::spv::Capability = 5308; + pub const Capability_CapabilityStorageImageArrayNonUniformIndexing: root::spv::Capability = + 5309; + pub const Capability_CapabilityStorageImageArrayNonUniformIndexingEXT: + root::spv::Capability = 5309; + pub const Capability_CapabilityInputAttachmentArrayNonUniformIndexing: + root::spv::Capability = 5310; + pub const Capability_CapabilityInputAttachmentArrayNonUniformIndexingEXT: + root::spv::Capability = 5310; + pub const Capability_CapabilityUniformTexelBufferArrayNonUniformIndexing: + root::spv::Capability = 5311; + pub const Capability_CapabilityUniformTexelBufferArrayNonUniformIndexingEXT: + root::spv::Capability = 5311; + pub const Capability_CapabilityStorageTexelBufferArrayNonUniformIndexing: + root::spv::Capability = 5312; + pub const Capability_CapabilityStorageTexelBufferArrayNonUniformIndexingEXT: + root::spv::Capability = 5312; + pub const Capability_CapabilityRayTracingNV: root::spv::Capability = 5340; + pub const Capability_CapabilityVulkanMemoryModel: root::spv::Capability = 5345; + pub const Capability_CapabilityVulkanMemoryModelKHR: root::spv::Capability = 5345; + pub const Capability_CapabilityVulkanMemoryModelDeviceScope: root::spv::Capability = 5346; + pub const Capability_CapabilityVulkanMemoryModelDeviceScopeKHR: root::spv::Capability = + 5346; + pub const Capability_CapabilityPhysicalStorageBufferAddresses: root::spv::Capability = 5347; + pub const Capability_CapabilityPhysicalStorageBufferAddressesEXT: root::spv::Capability = + 5347; + pub const Capability_CapabilityComputeDerivativeGroupLinearNV: root::spv::Capability = 5350; + pub const Capability_CapabilityCooperativeMatrixNV: root::spv::Capability = 5357; + pub const Capability_CapabilityFragmentShaderSampleInterlockEXT: root::spv::Capability = + 5363; + pub const Capability_CapabilityFragmentShaderShadingRateInterlockEXT: + root::spv::Capability = 5372; + pub const Capability_CapabilityShaderSMBuiltinsNV: root::spv::Capability = 5373; + pub const Capability_CapabilityFragmentShaderPixelInterlockEXT: root::spv::Capability = + 5378; + pub const Capability_CapabilityDemoteToHelperInvocationEXT: root::spv::Capability = 5379; + pub const Capability_CapabilitySubgroupShuffleINTEL: root::spv::Capability = 5568; + pub const Capability_CapabilitySubgroupBufferBlockIOINTEL: root::spv::Capability = 5569; + pub const Capability_CapabilitySubgroupImageBlockIOINTEL: root::spv::Capability = 5570; + pub const Capability_CapabilitySubgroupImageMediaBlockIOINTEL: root::spv::Capability = 5579; + pub const Capability_CapabilityIntegerFunctions2INTEL: root::spv::Capability = 5584; + pub const Capability_CapabilitySubgroupAvcMotionEstimationINTEL: root::spv::Capability = + 5696; + pub const Capability_CapabilitySubgroupAvcMotionEstimationIntraINTEL: + root::spv::Capability = 5697; + pub const Capability_CapabilitySubgroupAvcMotionEstimationChromaINTEL: + root::spv::Capability = 5698; + pub const Capability_CapabilityMax: root::spv::Capability = 2147483647; + pub type Capability = u32; + pub const Op_OpNop: root::spv::Op = 0; + pub const Op_OpUndef: root::spv::Op = 1; + pub const Op_OpSourceContinued: root::spv::Op = 2; + pub const Op_OpSource: root::spv::Op = 3; + pub const Op_OpSourceExtension: root::spv::Op = 4; + pub const Op_OpName: root::spv::Op = 5; + pub const Op_OpMemberName: root::spv::Op = 6; + pub const Op_OpString: root::spv::Op = 7; + pub const Op_OpLine: root::spv::Op = 8; + pub const Op_OpExtension: root::spv::Op = 10; + pub const Op_OpExtInstImport: root::spv::Op = 11; + pub const Op_OpExtInst: root::spv::Op = 12; + pub const Op_OpMemoryModel: root::spv::Op = 14; + pub const Op_OpEntryPoint: root::spv::Op = 15; + pub const Op_OpExecutionMode: root::spv::Op = 16; + pub const Op_OpCapability: root::spv::Op = 17; + pub const Op_OpTypeVoid: root::spv::Op = 19; + pub const Op_OpTypeBool: root::spv::Op = 20; + pub const Op_OpTypeInt: root::spv::Op = 21; + pub const Op_OpTypeFloat: root::spv::Op = 22; + pub const Op_OpTypeVector: root::spv::Op = 23; + pub const Op_OpTypeMatrix: root::spv::Op = 24; + pub const Op_OpTypeImage: root::spv::Op = 25; + pub const Op_OpTypeSampler: root::spv::Op = 26; + pub const Op_OpTypeSampledImage: root::spv::Op = 27; + pub const Op_OpTypeArray: root::spv::Op = 28; + pub const Op_OpTypeRuntimeArray: root::spv::Op = 29; + pub const Op_OpTypeStruct: root::spv::Op = 30; + pub const Op_OpTypeOpaque: root::spv::Op = 31; + pub const Op_OpTypePointer: root::spv::Op = 32; + pub const Op_OpTypeFunction: root::spv::Op = 33; + pub const Op_OpTypeEvent: root::spv::Op = 34; + pub const Op_OpTypeDeviceEvent: root::spv::Op = 35; + pub const Op_OpTypeReserveId: root::spv::Op = 36; + pub const Op_OpTypeQueue: root::spv::Op = 37; + pub const Op_OpTypePipe: root::spv::Op = 38; + pub const Op_OpTypeForwardPointer: root::spv::Op = 39; + pub const Op_OpConstantTrue: root::spv::Op = 41; + pub const Op_OpConstantFalse: root::spv::Op = 42; + pub const Op_OpConstant: root::spv::Op = 43; + pub const Op_OpConstantComposite: root::spv::Op = 44; + pub const Op_OpConstantSampler: root::spv::Op = 45; + pub const Op_OpConstantNull: root::spv::Op = 46; + pub const Op_OpSpecConstantTrue: root::spv::Op = 48; + pub const Op_OpSpecConstantFalse: root::spv::Op = 49; + pub const Op_OpSpecConstant: root::spv::Op = 50; + pub const Op_OpSpecConstantComposite: root::spv::Op = 51; + pub const Op_OpSpecConstantOp: root::spv::Op = 52; + pub const Op_OpFunction: root::spv::Op = 54; + pub const Op_OpFunctionParameter: root::spv::Op = 55; + pub const Op_OpFunctionEnd: root::spv::Op = 56; + pub const Op_OpFunctionCall: root::spv::Op = 57; + pub const Op_OpVariable: root::spv::Op = 59; + pub const Op_OpImageTexelPointer: root::spv::Op = 60; + pub const Op_OpLoad: root::spv::Op = 61; + pub const Op_OpStore: root::spv::Op = 62; + pub const Op_OpCopyMemory: root::spv::Op = 63; + pub const Op_OpCopyMemorySized: root::spv::Op = 64; + pub const Op_OpAccessChain: root::spv::Op = 65; + pub const Op_OpInBoundsAccessChain: root::spv::Op = 66; + pub const Op_OpPtrAccessChain: root::spv::Op = 67; + pub const Op_OpArrayLength: root::spv::Op = 68; + pub const Op_OpGenericPtrMemSemantics: root::spv::Op = 69; + pub const Op_OpInBoundsPtrAccessChain: root::spv::Op = 70; + pub const Op_OpDecorate: root::spv::Op = 71; + pub const Op_OpMemberDecorate: root::spv::Op = 72; + pub const Op_OpDecorationGroup: root::spv::Op = 73; + pub const Op_OpGroupDecorate: root::spv::Op = 74; + pub const Op_OpGroupMemberDecorate: root::spv::Op = 75; + pub const Op_OpVectorExtractDynamic: root::spv::Op = 77; + pub const Op_OpVectorInsertDynamic: root::spv::Op = 78; + pub const Op_OpVectorShuffle: root::spv::Op = 79; + pub const Op_OpCompositeConstruct: root::spv::Op = 80; + pub const Op_OpCompositeExtract: root::spv::Op = 81; + pub const Op_OpCompositeInsert: root::spv::Op = 82; + pub const Op_OpCopyObject: root::spv::Op = 83; + pub const Op_OpTranspose: root::spv::Op = 84; + pub const Op_OpSampledImage: root::spv::Op = 86; + pub const Op_OpImageSampleImplicitLod: root::spv::Op = 87; + pub const Op_OpImageSampleExplicitLod: root::spv::Op = 88; + pub const Op_OpImageSampleDrefImplicitLod: root::spv::Op = 89; + pub const Op_OpImageSampleDrefExplicitLod: root::spv::Op = 90; + pub const Op_OpImageSampleProjImplicitLod: root::spv::Op = 91; + pub const Op_OpImageSampleProjExplicitLod: root::spv::Op = 92; + pub const Op_OpImageSampleProjDrefImplicitLod: root::spv::Op = 93; + pub const Op_OpImageSampleProjDrefExplicitLod: root::spv::Op = 94; + pub const Op_OpImageFetch: root::spv::Op = 95; + pub const Op_OpImageGather: root::spv::Op = 96; + pub const Op_OpImageDrefGather: root::spv::Op = 97; + pub const Op_OpImageRead: root::spv::Op = 98; + pub const Op_OpImageWrite: root::spv::Op = 99; + pub const Op_OpImage: root::spv::Op = 100; + pub const Op_OpImageQueryFormat: root::spv::Op = 101; + pub const Op_OpImageQueryOrder: root::spv::Op = 102; + pub const Op_OpImageQuerySizeLod: root::spv::Op = 103; + pub const Op_OpImageQuerySize: root::spv::Op = 104; + pub const Op_OpImageQueryLod: root::spv::Op = 105; + pub const Op_OpImageQueryLevels: root::spv::Op = 106; + pub const Op_OpImageQuerySamples: root::spv::Op = 107; + pub const Op_OpConvertFToU: root::spv::Op = 109; + pub const Op_OpConvertFToS: root::spv::Op = 110; + pub const Op_OpConvertSToF: root::spv::Op = 111; + pub const Op_OpConvertUToF: root::spv::Op = 112; + pub const Op_OpUConvert: root::spv::Op = 113; + pub const Op_OpSConvert: root::spv::Op = 114; + pub const Op_OpFConvert: root::spv::Op = 115; + pub const Op_OpQuantizeToF16: root::spv::Op = 116; + pub const Op_OpConvertPtrToU: root::spv::Op = 117; + pub const Op_OpSatConvertSToU: root::spv::Op = 118; + pub const Op_OpSatConvertUToS: root::spv::Op = 119; + pub const Op_OpConvertUToPtr: root::spv::Op = 120; + pub const Op_OpPtrCastToGeneric: root::spv::Op = 121; + pub const Op_OpGenericCastToPtr: root::spv::Op = 122; + pub const Op_OpGenericCastToPtrExplicit: root::spv::Op = 123; + pub const Op_OpBitcast: root::spv::Op = 124; + pub const Op_OpSNegate: root::spv::Op = 126; + pub const Op_OpFNegate: root::spv::Op = 127; + pub const Op_OpIAdd: root::spv::Op = 128; + pub const Op_OpFAdd: root::spv::Op = 129; + pub const Op_OpISub: root::spv::Op = 130; + pub const Op_OpFSub: root::spv::Op = 131; + pub const Op_OpIMul: root::spv::Op = 132; + pub const Op_OpFMul: root::spv::Op = 133; + pub const Op_OpUDiv: root::spv::Op = 134; + pub const Op_OpSDiv: root::spv::Op = 135; + pub const Op_OpFDiv: root::spv::Op = 136; + pub const Op_OpUMod: root::spv::Op = 137; + pub const Op_OpSRem: root::spv::Op = 138; + pub const Op_OpSMod: root::spv::Op = 139; + pub const Op_OpFRem: root::spv::Op = 140; + pub const Op_OpFMod: root::spv::Op = 141; + pub const Op_OpVectorTimesScalar: root::spv::Op = 142; + pub const Op_OpMatrixTimesScalar: root::spv::Op = 143; + pub const Op_OpVectorTimesMatrix: root::spv::Op = 144; + pub const Op_OpMatrixTimesVector: root::spv::Op = 145; + pub const Op_OpMatrixTimesMatrix: root::spv::Op = 146; + pub const Op_OpOuterProduct: root::spv::Op = 147; + pub const Op_OpDot: root::spv::Op = 148; + pub const Op_OpIAddCarry: root::spv::Op = 149; + pub const Op_OpISubBorrow: root::spv::Op = 150; + pub const Op_OpUMulExtended: root::spv::Op = 151; + pub const Op_OpSMulExtended: root::spv::Op = 152; + pub const Op_OpAny: root::spv::Op = 154; + pub const Op_OpAll: root::spv::Op = 155; + pub const Op_OpIsNan: root::spv::Op = 156; + pub const Op_OpIsInf: root::spv::Op = 157; + pub const Op_OpIsFinite: root::spv::Op = 158; + pub const Op_OpIsNormal: root::spv::Op = 159; + pub const Op_OpSignBitSet: root::spv::Op = 160; + pub const Op_OpLessOrGreater: root::spv::Op = 161; + pub const Op_OpOrdered: root::spv::Op = 162; + pub const Op_OpUnordered: root::spv::Op = 163; + pub const Op_OpLogicalEqual: root::spv::Op = 164; + pub const Op_OpLogicalNotEqual: root::spv::Op = 165; + pub const Op_OpLogicalOr: root::spv::Op = 166; + pub const Op_OpLogicalAnd: root::spv::Op = 167; + pub const Op_OpLogicalNot: root::spv::Op = 168; + pub const Op_OpSelect: root::spv::Op = 169; + pub const Op_OpIEqual: root::spv::Op = 170; + pub const Op_OpINotEqual: root::spv::Op = 171; + pub const Op_OpUGreaterThan: root::spv::Op = 172; + pub const Op_OpSGreaterThan: root::spv::Op = 173; + pub const Op_OpUGreaterThanEqual: root::spv::Op = 174; + pub const Op_OpSGreaterThanEqual: root::spv::Op = 175; + pub const Op_OpULessThan: root::spv::Op = 176; + pub const Op_OpSLessThan: root::spv::Op = 177; + pub const Op_OpULessThanEqual: root::spv::Op = 178; + pub const Op_OpSLessThanEqual: root::spv::Op = 179; + pub const Op_OpFOrdEqual: root::spv::Op = 180; + pub const Op_OpFUnordEqual: root::spv::Op = 181; + pub const Op_OpFOrdNotEqual: root::spv::Op = 182; + pub const Op_OpFUnordNotEqual: root::spv::Op = 183; + pub const Op_OpFOrdLessThan: root::spv::Op = 184; + pub const Op_OpFUnordLessThan: root::spv::Op = 185; + pub const Op_OpFOrdGreaterThan: root::spv::Op = 186; + pub const Op_OpFUnordGreaterThan: root::spv::Op = 187; + pub const Op_OpFOrdLessThanEqual: root::spv::Op = 188; + pub const Op_OpFUnordLessThanEqual: root::spv::Op = 189; + pub const Op_OpFOrdGreaterThanEqual: root::spv::Op = 190; + pub const Op_OpFUnordGreaterThanEqual: root::spv::Op = 191; + pub const Op_OpShiftRightLogical: root::spv::Op = 194; + pub const Op_OpShiftRightArithmetic: root::spv::Op = 195; + pub const Op_OpShiftLeftLogical: root::spv::Op = 196; + pub const Op_OpBitwiseOr: root::spv::Op = 197; + pub const Op_OpBitwiseXor: root::spv::Op = 198; + pub const Op_OpBitwiseAnd: root::spv::Op = 199; + pub const Op_OpNot: root::spv::Op = 200; + pub const Op_OpBitFieldInsert: root::spv::Op = 201; + pub const Op_OpBitFieldSExtract: root::spv::Op = 202; + pub const Op_OpBitFieldUExtract: root::spv::Op = 203; + pub const Op_OpBitReverse: root::spv::Op = 204; + pub const Op_OpBitCount: root::spv::Op = 205; + pub const Op_OpDPdx: root::spv::Op = 207; + pub const Op_OpDPdy: root::spv::Op = 208; + pub const Op_OpFwidth: root::spv::Op = 209; + pub const Op_OpDPdxFine: root::spv::Op = 210; + pub const Op_OpDPdyFine: root::spv::Op = 211; + pub const Op_OpFwidthFine: root::spv::Op = 212; + pub const Op_OpDPdxCoarse: root::spv::Op = 213; + pub const Op_OpDPdyCoarse: root::spv::Op = 214; + pub const Op_OpFwidthCoarse: root::spv::Op = 215; + pub const Op_OpEmitVertex: root::spv::Op = 218; + pub const Op_OpEndPrimitive: root::spv::Op = 219; + pub const Op_OpEmitStreamVertex: root::spv::Op = 220; + pub const Op_OpEndStreamPrimitive: root::spv::Op = 221; + pub const Op_OpControlBarrier: root::spv::Op = 224; + pub const Op_OpMemoryBarrier: root::spv::Op = 225; + pub const Op_OpAtomicLoad: root::spv::Op = 227; + pub const Op_OpAtomicStore: root::spv::Op = 228; + pub const Op_OpAtomicExchange: root::spv::Op = 229; + pub const Op_OpAtomicCompareExchange: root::spv::Op = 230; + pub const Op_OpAtomicCompareExchangeWeak: root::spv::Op = 231; + pub const Op_OpAtomicIIncrement: root::spv::Op = 232; + pub const Op_OpAtomicIDecrement: root::spv::Op = 233; + pub const Op_OpAtomicIAdd: root::spv::Op = 234; + pub const Op_OpAtomicISub: root::spv::Op = 235; + pub const Op_OpAtomicSMin: root::spv::Op = 236; + pub const Op_OpAtomicUMin: root::spv::Op = 237; + pub const Op_OpAtomicSMax: root::spv::Op = 238; + pub const Op_OpAtomicUMax: root::spv::Op = 239; + pub const Op_OpAtomicAnd: root::spv::Op = 240; + pub const Op_OpAtomicOr: root::spv::Op = 241; + pub const Op_OpAtomicXor: root::spv::Op = 242; + pub const Op_OpPhi: root::spv::Op = 245; + pub const Op_OpLoopMerge: root::spv::Op = 246; + pub const Op_OpSelectionMerge: root::spv::Op = 247; + pub const Op_OpLabel: root::spv::Op = 248; + pub const Op_OpBranch: root::spv::Op = 249; + pub const Op_OpBranchConditional: root::spv::Op = 250; + pub const Op_OpSwitch: root::spv::Op = 251; + pub const Op_OpKill: root::spv::Op = 252; + pub const Op_OpReturn: root::spv::Op = 253; + pub const Op_OpReturnValue: root::spv::Op = 254; + pub const Op_OpUnreachable: root::spv::Op = 255; + pub const Op_OpLifetimeStart: root::spv::Op = 256; + pub const Op_OpLifetimeStop: root::spv::Op = 257; + pub const Op_OpGroupAsyncCopy: root::spv::Op = 259; + pub const Op_OpGroupWaitEvents: root::spv::Op = 260; + pub const Op_OpGroupAll: root::spv::Op = 261; + pub const Op_OpGroupAny: root::spv::Op = 262; + pub const Op_OpGroupBroadcast: root::spv::Op = 263; + pub const Op_OpGroupIAdd: root::spv::Op = 264; + pub const Op_OpGroupFAdd: root::spv::Op = 265; + pub const Op_OpGroupFMin: root::spv::Op = 266; + pub const Op_OpGroupUMin: root::spv::Op = 267; + pub const Op_OpGroupSMin: root::spv::Op = 268; + pub const Op_OpGroupFMax: root::spv::Op = 269; + pub const Op_OpGroupUMax: root::spv::Op = 270; + pub const Op_OpGroupSMax: root::spv::Op = 271; + pub const Op_OpReadPipe: root::spv::Op = 274; + pub const Op_OpWritePipe: root::spv::Op = 275; + pub const Op_OpReservedReadPipe: root::spv::Op = 276; + pub const Op_OpReservedWritePipe: root::spv::Op = 277; + pub const Op_OpReserveReadPipePackets: root::spv::Op = 278; + pub const Op_OpReserveWritePipePackets: root::spv::Op = 279; + pub const Op_OpCommitReadPipe: root::spv::Op = 280; + pub const Op_OpCommitWritePipe: root::spv::Op = 281; + pub const Op_OpIsValidReserveId: root::spv::Op = 282; + pub const Op_OpGetNumPipePackets: root::spv::Op = 283; + pub const Op_OpGetMaxPipePackets: root::spv::Op = 284; + pub const Op_OpGroupReserveReadPipePackets: root::spv::Op = 285; + pub const Op_OpGroupReserveWritePipePackets: root::spv::Op = 286; + pub const Op_OpGroupCommitReadPipe: root::spv::Op = 287; + pub const Op_OpGroupCommitWritePipe: root::spv::Op = 288; + pub const Op_OpEnqueueMarker: root::spv::Op = 291; + pub const Op_OpEnqueueKernel: root::spv::Op = 292; + pub const Op_OpGetKernelNDrangeSubGroupCount: root::spv::Op = 293; + pub const Op_OpGetKernelNDrangeMaxSubGroupSize: root::spv::Op = 294; + pub const Op_OpGetKernelWorkGroupSize: root::spv::Op = 295; + pub const Op_OpGetKernelPreferredWorkGroupSizeMultiple: root::spv::Op = 296; + pub const Op_OpRetainEvent: root::spv::Op = 297; + pub const Op_OpReleaseEvent: root::spv::Op = 298; + pub const Op_OpCreateUserEvent: root::spv::Op = 299; + pub const Op_OpIsValidEvent: root::spv::Op = 300; + pub const Op_OpSetUserEventStatus: root::spv::Op = 301; + pub const Op_OpCaptureEventProfilingInfo: root::spv::Op = 302; + pub const Op_OpGetDefaultQueue: root::spv::Op = 303; + pub const Op_OpBuildNDRange: root::spv::Op = 304; + pub const Op_OpImageSparseSampleImplicitLod: root::spv::Op = 305; + pub const Op_OpImageSparseSampleExplicitLod: root::spv::Op = 306; + pub const Op_OpImageSparseSampleDrefImplicitLod: root::spv::Op = 307; + pub const Op_OpImageSparseSampleDrefExplicitLod: root::spv::Op = 308; + pub const Op_OpImageSparseSampleProjImplicitLod: root::spv::Op = 309; + pub const Op_OpImageSparseSampleProjExplicitLod: root::spv::Op = 310; + pub const Op_OpImageSparseSampleProjDrefImplicitLod: root::spv::Op = 311; + pub const Op_OpImageSparseSampleProjDrefExplicitLod: root::spv::Op = 312; + pub const Op_OpImageSparseFetch: root::spv::Op = 313; + pub const Op_OpImageSparseGather: root::spv::Op = 314; + pub const Op_OpImageSparseDrefGather: root::spv::Op = 315; + pub const Op_OpImageSparseTexelsResident: root::spv::Op = 316; + pub const Op_OpNoLine: root::spv::Op = 317; + pub const Op_OpAtomicFlagTestAndSet: root::spv::Op = 318; + pub const Op_OpAtomicFlagClear: root::spv::Op = 319; + pub const Op_OpImageSparseRead: root::spv::Op = 320; + pub const Op_OpSizeOf: root::spv::Op = 321; + pub const Op_OpTypePipeStorage: root::spv::Op = 322; + pub const Op_OpConstantPipeStorage: root::spv::Op = 323; + pub const Op_OpCreatePipeFromPipeStorage: root::spv::Op = 324; + pub const Op_OpGetKernelLocalSizeForSubgroupCount: root::spv::Op = 325; + pub const Op_OpGetKernelMaxNumSubgroups: root::spv::Op = 326; + pub const Op_OpTypeNamedBarrier: root::spv::Op = 327; + pub const Op_OpNamedBarrierInitialize: root::spv::Op = 328; + pub const Op_OpMemoryNamedBarrier: root::spv::Op = 329; + pub const Op_OpModuleProcessed: root::spv::Op = 330; + pub const Op_OpExecutionModeId: root::spv::Op = 331; + pub const Op_OpDecorateId: root::spv::Op = 332; + pub const Op_OpGroupNonUniformElect: root::spv::Op = 333; + pub const Op_OpGroupNonUniformAll: root::spv::Op = 334; + pub const Op_OpGroupNonUniformAny: root::spv::Op = 335; + pub const Op_OpGroupNonUniformAllEqual: root::spv::Op = 336; + pub const Op_OpGroupNonUniformBroadcast: root::spv::Op = 337; + pub const Op_OpGroupNonUniformBroadcastFirst: root::spv::Op = 338; + pub const Op_OpGroupNonUniformBallot: root::spv::Op = 339; + pub const Op_OpGroupNonUniformInverseBallot: root::spv::Op = 340; + pub const Op_OpGroupNonUniformBallotBitExtract: root::spv::Op = 341; + pub const Op_OpGroupNonUniformBallotBitCount: root::spv::Op = 342; + pub const Op_OpGroupNonUniformBallotFindLSB: root::spv::Op = 343; + pub const Op_OpGroupNonUniformBallotFindMSB: root::spv::Op = 344; + pub const Op_OpGroupNonUniformShuffle: root::spv::Op = 345; + pub const Op_OpGroupNonUniformShuffleXor: root::spv::Op = 346; + pub const Op_OpGroupNonUniformShuffleUp: root::spv::Op = 347; + pub const Op_OpGroupNonUniformShuffleDown: root::spv::Op = 348; + pub const Op_OpGroupNonUniformIAdd: root::spv::Op = 349; + pub const Op_OpGroupNonUniformFAdd: root::spv::Op = 350; + pub const Op_OpGroupNonUniformIMul: root::spv::Op = 351; + pub const Op_OpGroupNonUniformFMul: root::spv::Op = 352; + pub const Op_OpGroupNonUniformSMin: root::spv::Op = 353; + pub const Op_OpGroupNonUniformUMin: root::spv::Op = 354; + pub const Op_OpGroupNonUniformFMin: root::spv::Op = 355; + pub const Op_OpGroupNonUniformSMax: root::spv::Op = 356; + pub const Op_OpGroupNonUniformUMax: root::spv::Op = 357; + pub const Op_OpGroupNonUniformFMax: root::spv::Op = 358; + pub const Op_OpGroupNonUniformBitwiseAnd: root::spv::Op = 359; + pub const Op_OpGroupNonUniformBitwiseOr: root::spv::Op = 360; + pub const Op_OpGroupNonUniformBitwiseXor: root::spv::Op = 361; + pub const Op_OpGroupNonUniformLogicalAnd: root::spv::Op = 362; + pub const Op_OpGroupNonUniformLogicalOr: root::spv::Op = 363; + pub const Op_OpGroupNonUniformLogicalXor: root::spv::Op = 364; + pub const Op_OpGroupNonUniformQuadBroadcast: root::spv::Op = 365; + pub const Op_OpGroupNonUniformQuadSwap: root::spv::Op = 366; + pub const Op_OpCopyLogical: root::spv::Op = 400; + pub const Op_OpPtrEqual: root::spv::Op = 401; + pub const Op_OpPtrNotEqual: root::spv::Op = 402; + pub const Op_OpPtrDiff: root::spv::Op = 403; + pub const Op_OpSubgroupBallotKHR: root::spv::Op = 4421; + pub const Op_OpSubgroupFirstInvocationKHR: root::spv::Op = 4422; + pub const Op_OpSubgroupAllKHR: root::spv::Op = 4428; + pub const Op_OpSubgroupAnyKHR: root::spv::Op = 4429; + pub const Op_OpSubgroupAllEqualKHR: root::spv::Op = 4430; + pub const Op_OpSubgroupReadInvocationKHR: root::spv::Op = 4432; + pub const Op_OpGroupIAddNonUniformAMD: root::spv::Op = 5000; + pub const Op_OpGroupFAddNonUniformAMD: root::spv::Op = 5001; + pub const Op_OpGroupFMinNonUniformAMD: root::spv::Op = 5002; + pub const Op_OpGroupUMinNonUniformAMD: root::spv::Op = 5003; + pub const Op_OpGroupSMinNonUniformAMD: root::spv::Op = 5004; + pub const Op_OpGroupFMaxNonUniformAMD: root::spv::Op = 5005; + pub const Op_OpGroupUMaxNonUniformAMD: root::spv::Op = 5006; + pub const Op_OpGroupSMaxNonUniformAMD: root::spv::Op = 5007; + pub const Op_OpFragmentMaskFetchAMD: root::spv::Op = 5011; + pub const Op_OpFragmentFetchAMD: root::spv::Op = 5012; + pub const Op_OpReadClockKHR: root::spv::Op = 5056; + pub const Op_OpImageSampleFootprintNV: root::spv::Op = 5283; + pub const Op_OpGroupNonUniformPartitionNV: root::spv::Op = 5296; + pub const Op_OpWritePackedPrimitiveIndices4x8NV: root::spv::Op = 5299; + pub const Op_OpReportIntersectionNV: root::spv::Op = 5334; + pub const Op_OpIgnoreIntersectionNV: root::spv::Op = 5335; + pub const Op_OpTerminateRayNV: root::spv::Op = 5336; + pub const Op_OpTraceNV: root::spv::Op = 5337; + pub const Op_OpTypeAccelerationStructureNV: root::spv::Op = 5341; + pub const Op_OpExecuteCallableNV: root::spv::Op = 5344; + pub const Op_OpTypeCooperativeMatrixNV: root::spv::Op = 5358; + pub const Op_OpCooperativeMatrixLoadNV: root::spv::Op = 5359; + pub const Op_OpCooperativeMatrixStoreNV: root::spv::Op = 5360; + pub const Op_OpCooperativeMatrixMulAddNV: root::spv::Op = 5361; + pub const Op_OpCooperativeMatrixLengthNV: root::spv::Op = 5362; + pub const Op_OpBeginInvocationInterlockEXT: root::spv::Op = 5364; + pub const Op_OpEndInvocationInterlockEXT: root::spv::Op = 5365; + pub const Op_OpDemoteToHelperInvocationEXT: root::spv::Op = 5380; + pub const Op_OpIsHelperInvocationEXT: root::spv::Op = 5381; + pub const Op_OpSubgroupShuffleINTEL: root::spv::Op = 5571; + pub const Op_OpSubgroupShuffleDownINTEL: root::spv::Op = 5572; + pub const Op_OpSubgroupShuffleUpINTEL: root::spv::Op = 5573; + pub const Op_OpSubgroupShuffleXorINTEL: root::spv::Op = 5574; + pub const Op_OpSubgroupBlockReadINTEL: root::spv::Op = 5575; + pub const Op_OpSubgroupBlockWriteINTEL: root::spv::Op = 5576; + pub const Op_OpSubgroupImageBlockReadINTEL: root::spv::Op = 5577; + pub const Op_OpSubgroupImageBlockWriteINTEL: root::spv::Op = 5578; + pub const Op_OpSubgroupImageMediaBlockReadINTEL: root::spv::Op = 5580; + pub const Op_OpSubgroupImageMediaBlockWriteINTEL: root::spv::Op = 5581; + pub const Op_OpUCountLeadingZerosINTEL: root::spv::Op = 5585; + pub const Op_OpUCountTrailingZerosINTEL: root::spv::Op = 5586; + pub const Op_OpAbsISubINTEL: root::spv::Op = 5587; + pub const Op_OpAbsUSubINTEL: root::spv::Op = 5588; + pub const Op_OpIAddSatINTEL: root::spv::Op = 5589; + pub const Op_OpUAddSatINTEL: root::spv::Op = 5590; + pub const Op_OpIAverageINTEL: root::spv::Op = 5591; + pub const Op_OpUAverageINTEL: root::spv::Op = 5592; + pub const Op_OpIAverageRoundedINTEL: root::spv::Op = 5593; + pub const Op_OpUAverageRoundedINTEL: root::spv::Op = 5594; + pub const Op_OpISubSatINTEL: root::spv::Op = 5595; + pub const Op_OpUSubSatINTEL: root::spv::Op = 5596; + pub const Op_OpIMul32x16INTEL: root::spv::Op = 5597; + pub const Op_OpUMul32x16INTEL: root::spv::Op = 5598; + pub const Op_OpDecorateString: root::spv::Op = 5632; + pub const Op_OpDecorateStringGOOGLE: root::spv::Op = 5632; + pub const Op_OpMemberDecorateString: root::spv::Op = 5633; + pub const Op_OpMemberDecorateStringGOOGLE: root::spv::Op = 5633; + pub const Op_OpVmeImageINTEL: root::spv::Op = 5699; + pub const Op_OpTypeVmeImageINTEL: root::spv::Op = 5700; + pub const Op_OpTypeAvcImePayloadINTEL: root::spv::Op = 5701; + pub const Op_OpTypeAvcRefPayloadINTEL: root::spv::Op = 5702; + pub const Op_OpTypeAvcSicPayloadINTEL: root::spv::Op = 5703; + pub const Op_OpTypeAvcMcePayloadINTEL: root::spv::Op = 5704; + pub const Op_OpTypeAvcMceResultINTEL: root::spv::Op = 5705; + pub const Op_OpTypeAvcImeResultINTEL: root::spv::Op = 5706; + pub const Op_OpTypeAvcImeResultSingleReferenceStreamoutINTEL: root::spv::Op = 5707; + pub const Op_OpTypeAvcImeResultDualReferenceStreamoutINTEL: root::spv::Op = 5708; + pub const Op_OpTypeAvcImeSingleReferenceStreaminINTEL: root::spv::Op = 5709; + pub const Op_OpTypeAvcImeDualReferenceStreaminINTEL: root::spv::Op = 5710; + pub const Op_OpTypeAvcRefResultINTEL: root::spv::Op = 5711; + pub const Op_OpTypeAvcSicResultINTEL: root::spv::Op = 5712; + pub const Op_OpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL: root::spv::Op = + 5713; + pub const Op_OpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL: root::spv::Op = 5714; + pub const Op_OpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL: root::spv::Op = 5715; + pub const Op_OpSubgroupAvcMceSetInterShapePenaltyINTEL: root::spv::Op = 5716; + pub const Op_OpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL: root::spv::Op = 5717; + pub const Op_OpSubgroupAvcMceSetInterDirectionPenaltyINTEL: root::spv::Op = 5718; + pub const Op_OpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL: root::spv::Op = 5719; + pub const Op_OpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL: root::spv::Op = + 5720; + pub const Op_OpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL: root::spv::Op = 5721; + pub const Op_OpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL: root::spv::Op = 5722; + pub const Op_OpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL: root::spv::Op = 5723; + pub const Op_OpSubgroupAvcMceSetMotionVectorCostFunctionINTEL: root::spv::Op = 5724; + pub const Op_OpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL: root::spv::Op = 5725; + pub const Op_OpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL: root::spv::Op = 5726; + pub const Op_OpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL: root::spv::Op = + 5727; + pub const Op_OpSubgroupAvcMceSetAcOnlyHaarINTEL: root::spv::Op = 5728; + pub const Op_OpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL: root::spv::Op = 5729; + pub const Op_OpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL: root::spv::Op = + 5730; + pub const Op_OpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL: root::spv::Op = + 5731; + pub const Op_OpSubgroupAvcMceConvertToImePayloadINTEL: root::spv::Op = 5732; + pub const Op_OpSubgroupAvcMceConvertToImeResultINTEL: root::spv::Op = 5733; + pub const Op_OpSubgroupAvcMceConvertToRefPayloadINTEL: root::spv::Op = 5734; + pub const Op_OpSubgroupAvcMceConvertToRefResultINTEL: root::spv::Op = 5735; + pub const Op_OpSubgroupAvcMceConvertToSicPayloadINTEL: root::spv::Op = 5736; + pub const Op_OpSubgroupAvcMceConvertToSicResultINTEL: root::spv::Op = 5737; + pub const Op_OpSubgroupAvcMceGetMotionVectorsINTEL: root::spv::Op = 5738; + pub const Op_OpSubgroupAvcMceGetInterDistortionsINTEL: root::spv::Op = 5739; + pub const Op_OpSubgroupAvcMceGetBestInterDistortionsINTEL: root::spv::Op = 5740; + pub const Op_OpSubgroupAvcMceGetInterMajorShapeINTEL: root::spv::Op = 5741; + pub const Op_OpSubgroupAvcMceGetInterMinorShapeINTEL: root::spv::Op = 5742; + pub const Op_OpSubgroupAvcMceGetInterDirectionsINTEL: root::spv::Op = 5743; + pub const Op_OpSubgroupAvcMceGetInterMotionVectorCountINTEL: root::spv::Op = 5744; + pub const Op_OpSubgroupAvcMceGetInterReferenceIdsINTEL: root::spv::Op = 5745; + pub const Op_OpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL: + root::spv::Op = 5746; + pub const Op_OpSubgroupAvcImeInitializeINTEL: root::spv::Op = 5747; + pub const Op_OpSubgroupAvcImeSetSingleReferenceINTEL: root::spv::Op = 5748; + pub const Op_OpSubgroupAvcImeSetDualReferenceINTEL: root::spv::Op = 5749; + pub const Op_OpSubgroupAvcImeRefWindowSizeINTEL: root::spv::Op = 5750; + pub const Op_OpSubgroupAvcImeAdjustRefOffsetINTEL: root::spv::Op = 5751; + pub const Op_OpSubgroupAvcImeConvertToMcePayloadINTEL: root::spv::Op = 5752; + pub const Op_OpSubgroupAvcImeSetMaxMotionVectorCountINTEL: root::spv::Op = 5753; + pub const Op_OpSubgroupAvcImeSetUnidirectionalMixDisableINTEL: root::spv::Op = 5754; + pub const Op_OpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL: root::spv::Op = 5755; + pub const Op_OpSubgroupAvcImeSetWeightedSadINTEL: root::spv::Op = 5756; + pub const Op_OpSubgroupAvcImeEvaluateWithSingleReferenceINTEL: root::spv::Op = 5757; + pub const Op_OpSubgroupAvcImeEvaluateWithDualReferenceINTEL: root::spv::Op = 5758; + pub const Op_OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL: root::spv::Op = 5759; + pub const Op_OpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL: root::spv::Op = 5760; + pub const Op_OpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL: root::spv::Op = + 5761; + pub const Op_OpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL: root::spv::Op = 5762; + pub const Op_OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL: root::spv::Op = + 5763; + pub const Op_OpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL: root::spv::Op = + 5764; + pub const Op_OpSubgroupAvcImeConvertToMceResultINTEL: root::spv::Op = 5765; + pub const Op_OpSubgroupAvcImeGetSingleReferenceStreaminINTEL: root::spv::Op = 5766; + pub const Op_OpSubgroupAvcImeGetDualReferenceStreaminINTEL: root::spv::Op = 5767; + pub const Op_OpSubgroupAvcImeStripSingleReferenceStreamoutINTEL: root::spv::Op = 5768; + pub const Op_OpSubgroupAvcImeStripDualReferenceStreamoutINTEL: root::spv::Op = 5769; + pub const Op_OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL: + root::spv::Op = 5770; + pub const Op_OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL: + root::spv::Op = 5771; + pub const Op_OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL: + root::spv::Op = 5772; + pub const Op_OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL: + root::spv::Op = 5773; + pub const Op_OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL: + root::spv::Op = 5774; + pub const Op_OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL: + root::spv::Op = 5775; + pub const Op_OpSubgroupAvcImeGetBorderReachedINTEL: root::spv::Op = 5776; + pub const Op_OpSubgroupAvcImeGetTruncatedSearchIndicationINTEL: root::spv::Op = 5777; + pub const Op_OpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL: root::spv::Op = + 5778; + pub const Op_OpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL: root::spv::Op = + 5779; + pub const Op_OpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL: root::spv::Op = + 5780; + pub const Op_OpSubgroupAvcFmeInitializeINTEL: root::spv::Op = 5781; + pub const Op_OpSubgroupAvcBmeInitializeINTEL: root::spv::Op = 5782; + pub const Op_OpSubgroupAvcRefConvertToMcePayloadINTEL: root::spv::Op = 5783; + pub const Op_OpSubgroupAvcRefSetBidirectionalMixDisableINTEL: root::spv::Op = 5784; + pub const Op_OpSubgroupAvcRefSetBilinearFilterEnableINTEL: root::spv::Op = 5785; + pub const Op_OpSubgroupAvcRefEvaluateWithSingleReferenceINTEL: root::spv::Op = 5786; + pub const Op_OpSubgroupAvcRefEvaluateWithDualReferenceINTEL: root::spv::Op = 5787; + pub const Op_OpSubgroupAvcRefEvaluateWithMultiReferenceINTEL: root::spv::Op = 5788; + pub const Op_OpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL: root::spv::Op = + 5789; + pub const Op_OpSubgroupAvcRefConvertToMceResultINTEL: root::spv::Op = 5790; + pub const Op_OpSubgroupAvcSicInitializeINTEL: root::spv::Op = 5791; + pub const Op_OpSubgroupAvcSicConfigureSkcINTEL: root::spv::Op = 5792; + pub const Op_OpSubgroupAvcSicConfigureIpeLumaINTEL: root::spv::Op = 5793; + pub const Op_OpSubgroupAvcSicConfigureIpeLumaChromaINTEL: root::spv::Op = 5794; + pub const Op_OpSubgroupAvcSicGetMotionVectorMaskINTEL: root::spv::Op = 5795; + pub const Op_OpSubgroupAvcSicConvertToMcePayloadINTEL: root::spv::Op = 5796; + pub const Op_OpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL: root::spv::Op = 5797; + pub const Op_OpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL: root::spv::Op = 5798; + pub const Op_OpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL: root::spv::Op = 5799; + pub const Op_OpSubgroupAvcSicSetBilinearFilterEnableINTEL: root::spv::Op = 5800; + pub const Op_OpSubgroupAvcSicSetSkcForwardTransformEnableINTEL: root::spv::Op = 5801; + pub const Op_OpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL: root::spv::Op = 5802; + pub const Op_OpSubgroupAvcSicEvaluateIpeINTEL: root::spv::Op = 5803; + pub const Op_OpSubgroupAvcSicEvaluateWithSingleReferenceINTEL: root::spv::Op = 5804; + pub const Op_OpSubgroupAvcSicEvaluateWithDualReferenceINTEL: root::spv::Op = 5805; + pub const Op_OpSubgroupAvcSicEvaluateWithMultiReferenceINTEL: root::spv::Op = 5806; + pub const Op_OpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL: root::spv::Op = + 5807; + pub const Op_OpSubgroupAvcSicConvertToMceResultINTEL: root::spv::Op = 5808; + pub const Op_OpSubgroupAvcSicGetIpeLumaShapeINTEL: root::spv::Op = 5809; + pub const Op_OpSubgroupAvcSicGetBestIpeLumaDistortionINTEL: root::spv::Op = 5810; + pub const Op_OpSubgroupAvcSicGetBestIpeChromaDistortionINTEL: root::spv::Op = 5811; + pub const Op_OpSubgroupAvcSicGetPackedIpeLumaModesINTEL: root::spv::Op = 5812; + pub const Op_OpSubgroupAvcSicGetIpeChromaModeINTEL: root::spv::Op = 5813; + pub const Op_OpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL: root::spv::Op = 5814; + pub const Op_OpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL: root::spv::Op = 5815; + pub const Op_OpSubgroupAvcSicGetInterRawSadsINTEL: root::spv::Op = 5816; + pub const Op_OpMax: root::spv::Op = 2147483647; + pub type Op = u32; } pub mod std { #[allow(unused_imports)] use self::super::super::root; - pub type string = [u64; 4usize]; } - pub mod __gnu_cxx { - #[allow(unused_imports)] - use self::super::super::root; - } - pub type __uint8_t = ::std::os::raw::c_uchar; - pub type __int32_t = ::std::os::raw::c_int; - pub type __uint32_t = ::std::os::raw::c_uint; - pub mod SPIRV_CROSS_NAMESPACE { + pub type __darwin_size_t = ::std::os::raw::c_ulong; + pub mod spirv_cross { #[allow(unused_imports)] use self::super::super::root; #[repr(u32)] @@ -1662,17 +2012,6 @@ pub mod root { ControlPointArray = 20, Char = 21, } - #[repr(C)] - #[derive(Debug, Copy)] - pub struct Resource { - pub id: u32, - pub type_id: u32, - pub base_type_id: u32, - pub name: root::std::string, - } - impl Clone for Resource { - fn clone(&self) -> Self { *self } - } #[repr(u32)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub enum MSLVertexFormat { @@ -1682,21 +2021,18 @@ pub mod root { MSL_VERTEX_FORMAT_INT_MAX = 2147483647, } #[repr(C)] - #[derive(Debug, Copy)] + #[derive(Debug, Copy, Clone)] pub struct MSLVertexAttr { pub location: u32, pub msl_buffer: u32, pub msl_offset: u32, pub msl_stride: u32, pub per_instance: bool, - pub format: root::SPIRV_CROSS_NAMESPACE::MSLVertexFormat, + pub format: root::spirv_cross::MSLVertexFormat, pub builtin: root::spv::BuiltIn, } - impl Clone for MSLVertexAttr { - fn clone(&self) -> Self { *self } - } #[repr(C)] - #[derive(Debug, Copy)] + #[derive(Debug, Copy, Clone)] pub struct MSLResourceBinding { pub stage: root::spv::ExecutionModel, pub desc_set: u32, @@ -1705,83 +2041,148 @@ pub mod root { pub msl_texture: u32, pub msl_sampler: u32, } - impl Clone for MSLResourceBinding { - fn clone(&self) -> Self { *self } - } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum MSLSamplerCoord { - MSL_SAMPLER_COORD_NORMALIZED = 0, - MSL_SAMPLER_COORD_PIXEL = 1, - MSL_SAMPLER_INT_MAX = 2147483647, - } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum MSLSamplerFilter { - MSL_SAMPLER_FILTER_NEAREST = 0, - MSL_SAMPLER_FILTER_LINEAR = 1, - MSL_SAMPLER_FILTER_INT_MAX = 2147483647, - } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum MSLSamplerMipFilter { - MSL_SAMPLER_MIP_FILTER_NONE = 0, - MSL_SAMPLER_MIP_FILTER_NEAREST = 1, - MSL_SAMPLER_MIP_FILTER_LINEAR = 2, - MSL_SAMPLER_MIP_FILTER_INT_MAX = 2147483647, - } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum MSLSamplerAddress { - MSL_SAMPLER_ADDRESS_CLAMP_TO_ZERO = 0, - MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE = 1, - MSL_SAMPLER_ADDRESS_CLAMP_TO_BORDER = 2, - MSL_SAMPLER_ADDRESS_REPEAT = 3, - MSL_SAMPLER_ADDRESS_MIRRORED_REPEAT = 4, - MSL_SAMPLER_ADDRESS_INT_MAX = 2147483647, - } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum MSLSamplerCompareFunc { - MSL_SAMPLER_COMPARE_FUNC_NEVER = 0, - MSL_SAMPLER_COMPARE_FUNC_LESS = 1, - MSL_SAMPLER_COMPARE_FUNC_LESS_EQUAL = 2, - MSL_SAMPLER_COMPARE_FUNC_GREATER = 3, - MSL_SAMPLER_COMPARE_FUNC_GREATER_EQUAL = 4, - MSL_SAMPLER_COMPARE_FUNC_EQUAL = 5, - MSL_SAMPLER_COMPARE_FUNC_NOT_EQUAL = 6, - MSL_SAMPLER_COMPARE_FUNC_ALWAYS = 7, - MSL_SAMPLER_COMPARE_FUNC_INT_MAX = 2147483647, - } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum MSLSamplerBorderColor { - MSL_SAMPLER_BORDER_COLOR_TRANSPARENT_BLACK = 0, - MSL_SAMPLER_BORDER_COLOR_OPAQUE_BLACK = 1, - MSL_SAMPLER_BORDER_COLOR_OPAQUE_WHITE = 2, - MSL_SAMPLER_BORDER_COLOR_INT_MAX = 2147483647, - } + pub const MSLSamplerCoord_MSL_SAMPLER_COORD_NORMALIZED: root::spirv_cross::MSLSamplerCoord = + 0; + pub const MSLSamplerCoord_MSL_SAMPLER_COORD_PIXEL: root::spirv_cross::MSLSamplerCoord = 1; + pub const MSLSamplerCoord_MSL_SAMPLER_INT_MAX: root::spirv_cross::MSLSamplerCoord = + 2147483647; + pub type MSLSamplerCoord = u32; + pub const MSLSamplerFilter_MSL_SAMPLER_FILTER_NEAREST: root::spirv_cross::MSLSamplerFilter = + 0; + pub const MSLSamplerFilter_MSL_SAMPLER_FILTER_LINEAR: root::spirv_cross::MSLSamplerFilter = + 1; + pub const MSLSamplerFilter_MSL_SAMPLER_FILTER_INT_MAX: root::spirv_cross::MSLSamplerFilter = + 2147483647; + pub type MSLSamplerFilter = u32; + pub const MSLSamplerMipFilter_MSL_SAMPLER_MIP_FILTER_NONE: + root::spirv_cross::MSLSamplerMipFilter = 0; + pub const MSLSamplerMipFilter_MSL_SAMPLER_MIP_FILTER_NEAREST: + root::spirv_cross::MSLSamplerMipFilter = 1; + pub const MSLSamplerMipFilter_MSL_SAMPLER_MIP_FILTER_LINEAR: + root::spirv_cross::MSLSamplerMipFilter = 2; + pub const MSLSamplerMipFilter_MSL_SAMPLER_MIP_FILTER_INT_MAX: + root::spirv_cross::MSLSamplerMipFilter = 2147483647; + pub type MSLSamplerMipFilter = u32; + pub const MSLSamplerAddress_MSL_SAMPLER_ADDRESS_CLAMP_TO_ZERO: + root::spirv_cross::MSLSamplerAddress = 0; + pub const MSLSamplerAddress_MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE: + root::spirv_cross::MSLSamplerAddress = 1; + pub const MSLSamplerAddress_MSL_SAMPLER_ADDRESS_CLAMP_TO_BORDER: + root::spirv_cross::MSLSamplerAddress = 2; + pub const MSLSamplerAddress_MSL_SAMPLER_ADDRESS_REPEAT: + root::spirv_cross::MSLSamplerAddress = 3; + pub const MSLSamplerAddress_MSL_SAMPLER_ADDRESS_MIRRORED_REPEAT: + root::spirv_cross::MSLSamplerAddress = 4; + pub const MSLSamplerAddress_MSL_SAMPLER_ADDRESS_INT_MAX: + root::spirv_cross::MSLSamplerAddress = 2147483647; + pub type MSLSamplerAddress = u32; + pub const MSLSamplerCompareFunc_MSL_SAMPLER_COMPARE_FUNC_NEVER: + root::spirv_cross::MSLSamplerCompareFunc = 0; + pub const MSLSamplerCompareFunc_MSL_SAMPLER_COMPARE_FUNC_LESS: + root::spirv_cross::MSLSamplerCompareFunc = 1; + pub const MSLSamplerCompareFunc_MSL_SAMPLER_COMPARE_FUNC_LESS_EQUAL: + root::spirv_cross::MSLSamplerCompareFunc = 2; + pub const MSLSamplerCompareFunc_MSL_SAMPLER_COMPARE_FUNC_GREATER: + root::spirv_cross::MSLSamplerCompareFunc = 3; + pub const MSLSamplerCompareFunc_MSL_SAMPLER_COMPARE_FUNC_GREATER_EQUAL: + root::spirv_cross::MSLSamplerCompareFunc = 4; + pub const MSLSamplerCompareFunc_MSL_SAMPLER_COMPARE_FUNC_EQUAL: + root::spirv_cross::MSLSamplerCompareFunc = 5; + pub const MSLSamplerCompareFunc_MSL_SAMPLER_COMPARE_FUNC_NOT_EQUAL: + root::spirv_cross::MSLSamplerCompareFunc = 6; + pub const MSLSamplerCompareFunc_MSL_SAMPLER_COMPARE_FUNC_ALWAYS: + root::spirv_cross::MSLSamplerCompareFunc = 7; + pub const MSLSamplerCompareFunc_MSL_SAMPLER_COMPARE_FUNC_INT_MAX: + root::spirv_cross::MSLSamplerCompareFunc = 2147483647; + pub type MSLSamplerCompareFunc = u32; + pub const MSLSamplerBorderColor_MSL_SAMPLER_BORDER_COLOR_TRANSPARENT_BLACK: + root::spirv_cross::MSLSamplerBorderColor = 0; + pub const MSLSamplerBorderColor_MSL_SAMPLER_BORDER_COLOR_OPAQUE_BLACK: + root::spirv_cross::MSLSamplerBorderColor = 1; + pub const MSLSamplerBorderColor_MSL_SAMPLER_BORDER_COLOR_OPAQUE_WHITE: + root::spirv_cross::MSLSamplerBorderColor = 2; + pub const MSLSamplerBorderColor_MSL_SAMPLER_BORDER_COLOR_INT_MAX: + root::spirv_cross::MSLSamplerBorderColor = 2147483647; + pub type MSLSamplerBorderColor = u32; + pub const MSLFormatResolution_MSL_FORMAT_RESOLUTION_444: + root::spirv_cross::MSLFormatResolution = 0; + pub const MSLFormatResolution_MSL_FORMAT_RESOLUTION_422: + root::spirv_cross::MSLFormatResolution = 1; + pub const MSLFormatResolution_MSL_FORMAT_RESOLUTION_420: + root::spirv_cross::MSLFormatResolution = 2; + pub const MSLFormatResolution_MSL_FORMAT_RESOLUTION_INT_MAX: + root::spirv_cross::MSLFormatResolution = 2147483647; + pub type MSLFormatResolution = u32; + pub const MSLChromaLocation_MSL_CHROMA_LOCATION_COSITED_EVEN: + root::spirv_cross::MSLChromaLocation = 0; + pub const MSLChromaLocation_MSL_CHROMA_LOCATION_MIDPOINT: + root::spirv_cross::MSLChromaLocation = 1; + pub const MSLChromaLocation_MSL_CHROMA_LOCATION_INT_MAX: + root::spirv_cross::MSLChromaLocation = 2147483647; + pub type MSLChromaLocation = u32; + pub const MSLComponentSwizzle_MSL_COMPONENT_SWIZZLE_IDENTITY: + root::spirv_cross::MSLComponentSwizzle = 0; + pub const MSLComponentSwizzle_MSL_COMPONENT_SWIZZLE_ZERO: + root::spirv_cross::MSLComponentSwizzle = 1; + pub const MSLComponentSwizzle_MSL_COMPONENT_SWIZZLE_ONE: + root::spirv_cross::MSLComponentSwizzle = 2; + pub const MSLComponentSwizzle_MSL_COMPONENT_SWIZZLE_R: + root::spirv_cross::MSLComponentSwizzle = 3; + pub const MSLComponentSwizzle_MSL_COMPONENT_SWIZZLE_G: + root::spirv_cross::MSLComponentSwizzle = 4; + pub const MSLComponentSwizzle_MSL_COMPONENT_SWIZZLE_B: + root::spirv_cross::MSLComponentSwizzle = 5; + pub const MSLComponentSwizzle_MSL_COMPONENT_SWIZZLE_A: + root::spirv_cross::MSLComponentSwizzle = 6; + pub const MSLComponentSwizzle_MSL_COMPONENT_SWIZZLE_INT_MAX: + root::spirv_cross::MSLComponentSwizzle = 2147483647; + pub type MSLComponentSwizzle = u32; + pub const MSLSamplerYCbCrModelConversion_MSL_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY: + root::spirv_cross::MSLSamplerYCbCrModelConversion = 0; + pub const MSLSamplerYCbCrModelConversion_MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY : root :: spirv_cross :: MSLSamplerYCbCrModelConversion = 1 ; + pub const MSLSamplerYCbCrModelConversion_MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_BT_709: + root::spirv_cross::MSLSamplerYCbCrModelConversion = 2; + pub const MSLSamplerYCbCrModelConversion_MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_BT_601: + root::spirv_cross::MSLSamplerYCbCrModelConversion = 3; + pub const MSLSamplerYCbCrModelConversion_MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_BT_2020 : root :: spirv_cross :: MSLSamplerYCbCrModelConversion = 4 ; + pub const MSLSamplerYCbCrModelConversion_MSL_SAMPLER_YCBCR_MODEL_CONVERSION_INT_MAX: + root::spirv_cross::MSLSamplerYCbCrModelConversion = 2147483647; + pub type MSLSamplerYCbCrModelConversion = u32; + pub const MSLSamplerYCbCrRange_MSL_SAMPLER_YCBCR_RANGE_ITU_FULL: + root::spirv_cross::MSLSamplerYCbCrRange = 0; + pub const MSLSamplerYCbCrRange_MSL_SAMPLER_YCBCR_RANGE_ITU_NARROW: + root::spirv_cross::MSLSamplerYCbCrRange = 1; + pub const MSLSamplerYCbCrRange_MSL_SAMPLER_YCBCR_RANGE_INT_MAX: + root::spirv_cross::MSLSamplerYCbCrRange = 2147483647; + pub type MSLSamplerYCbCrRange = u32; #[repr(C)] - #[derive(Debug, Copy)] + #[derive(Debug, Copy, Clone)] pub struct MSLConstexprSampler { - pub coord: root::SPIRV_CROSS_NAMESPACE::MSLSamplerCoord, - pub min_filter: root::SPIRV_CROSS_NAMESPACE::MSLSamplerFilter, - pub mag_filter: root::SPIRV_CROSS_NAMESPACE::MSLSamplerFilter, - pub mip_filter: root::SPIRV_CROSS_NAMESPACE::MSLSamplerMipFilter, - pub s_address: root::SPIRV_CROSS_NAMESPACE::MSLSamplerAddress, - pub t_address: root::SPIRV_CROSS_NAMESPACE::MSLSamplerAddress, - pub r_address: root::SPIRV_CROSS_NAMESPACE::MSLSamplerAddress, - pub compare_func: root::SPIRV_CROSS_NAMESPACE::MSLSamplerCompareFunc, - pub border_color: root::SPIRV_CROSS_NAMESPACE::MSLSamplerBorderColor, + pub coord: root::spirv_cross::MSLSamplerCoord, + pub min_filter: root::spirv_cross::MSLSamplerFilter, + pub mag_filter: root::spirv_cross::MSLSamplerFilter, + pub mip_filter: root::spirv_cross::MSLSamplerMipFilter, + pub s_address: root::spirv_cross::MSLSamplerAddress, + pub t_address: root::spirv_cross::MSLSamplerAddress, + pub r_address: root::spirv_cross::MSLSamplerAddress, + pub compare_func: root::spirv_cross::MSLSamplerCompareFunc, + pub border_color: root::spirv_cross::MSLSamplerBorderColor, pub lod_clamp_min: f32, pub lod_clamp_max: f32, pub max_anisotropy: ::std::os::raw::c_int, + pub planes: u32, + pub resolution: root::spirv_cross::MSLFormatResolution, + pub chroma_filter: root::spirv_cross::MSLSamplerFilter, + pub x_chroma_offset: root::spirv_cross::MSLChromaLocation, + pub y_chroma_offset: root::spirv_cross::MSLChromaLocation, + pub swizzle: [root::spirv_cross::MSLComponentSwizzle; 4usize], + pub ycbcr_model: root::spirv_cross::MSLSamplerYCbCrModelConversion, + pub ycbcr_range: root::spirv_cross::MSLSamplerYCbCrRange, + pub bpc: u32, pub compare_enable: bool, pub lod_clamp_enable: bool, pub anisotropy_enable: bool, - } - impl Clone for MSLConstexprSampler { - fn clone(&self) -> Self { *self } + pub ycbcr_conversion_enable: bool, } } pub type ScInternalCompilerBase = ::std::os::raw::c_void; @@ -1796,7 +2197,7 @@ pub mod root { CompilationError = 2, } #[repr(C)] - #[derive(Debug, Copy)] + #[derive(Debug, Copy, Clone)] pub struct ScEntryPoint { pub name: *mut ::std::os::raw::c_char, pub execution_model: root::spv::ExecutionModel, @@ -1804,42 +2205,30 @@ pub mod root { pub work_group_size_y: u32, pub work_group_size_z: u32, } - impl Clone for ScEntryPoint { - fn clone(&self) -> Self { *self } - } #[repr(C)] - #[derive(Debug, Copy)] + #[derive(Debug, Copy, Clone)] pub struct ScBufferRange { pub index: ::std::os::raw::c_uint, pub offset: usize, pub range: usize, } - impl Clone for ScBufferRange { - fn clone(&self) -> Self { *self } - } #[repr(C)] - #[derive(Debug, Copy)] + #[derive(Debug, Copy, Clone)] pub struct ScCombinedImageSampler { pub combined_id: u32, pub image_id: u32, pub sampler_id: u32, } - impl Clone for ScCombinedImageSampler { - fn clone(&self) -> Self { *self } - } #[repr(C)] - #[derive(Debug, Copy)] + #[derive(Debug, Copy, Clone)] pub struct ScHlslRootConstant { pub start: u32, pub end: u32, pub binding: u32, pub space: u32, } - impl Clone for ScHlslRootConstant { - fn clone(&self) -> Self { *self } - } #[repr(C)] - #[derive(Debug, Copy)] + #[derive(Debug, Copy, Clone)] pub struct ScHlslCompilerOptions { pub shader_model: i32, pub point_size_compat: bool, @@ -1847,11 +2236,8 @@ pub mod root { pub vertex_transform_clip_space: bool, pub vertex_invert_y: bool, } - impl Clone for ScHlslCompilerOptions { - fn clone(&self) -> Self { *self } - } #[repr(C)] - #[derive(Debug, Copy)] + #[derive(Debug, Copy, Clone)] pub struct ScMslCompilerOptions { pub vertex_transform_clip_space: bool, pub vertex_invert_y: bool, @@ -1871,42 +2257,30 @@ pub mod root { pub argument_buffers: bool, pub pad_fragment_output_components: bool, } - impl Clone for ScMslCompilerOptions { - fn clone(&self) -> Self { *self } - } #[repr(C)] - #[derive(Debug, Copy)] + #[derive(Debug, Copy, Clone)] pub struct ScGlslCompilerOptions { pub vertex_transform_clip_space: bool, pub vertex_invert_y: bool, pub version: u32, pub es: bool, } - impl Clone for ScGlslCompilerOptions { - fn clone(&self) -> Self { *self } - } #[repr(C)] - #[derive(Debug, Copy)] + #[derive(Debug, Copy, Clone)] pub struct ScResource { pub id: u32, pub type_id: u32, pub base_type_id: u32, pub name: *mut ::std::os::raw::c_char, } - impl Clone for ScResource { - fn clone(&self) -> Self { *self } - } #[repr(C)] - #[derive(Debug, Copy)] + #[derive(Debug, Copy, Clone)] pub struct ScResourceArray { pub data: *mut root::ScResource, pub num: usize, } - impl Clone for ScResourceArray { - fn clone(&self) -> Self { *self } - } #[repr(C)] - #[derive(Debug, Copy)] + #[derive(Debug, Copy, Clone)] pub struct ScShaderResources { pub uniform_buffers: root::ScResourceArray, pub storage_buffers: root::ScResourceArray, @@ -1920,309 +2294,267 @@ pub mod root { pub separate_images: root::ScResourceArray, pub separate_samplers: root::ScResourceArray, } - impl Clone for ScShaderResources { - fn clone(&self) -> Self { *self } - } #[repr(C)] - #[derive(Debug, Copy)] + #[derive(Debug, Copy, Clone)] pub struct ScSpecializationConstant { pub id: u32, pub constant_id: u32, } - impl Clone for ScSpecializationConstant { - fn clone(&self) -> Self { *self } - } #[repr(C)] - #[derive(Debug, Copy)] + #[derive(Debug, Copy, Clone)] pub struct ScType { - pub type_: root::SPIRV_CROSS_NAMESPACE::SPIRType_BaseType, + pub type_: root::spirv_cross::SPIRType_BaseType, pub member_types: *mut u32, pub member_types_size: usize, pub array: *mut u32, pub array_size: usize, } - impl Clone for ScType { - fn clone(&self) -> Self { *self } + extern "C" { + pub fn sc_internal_get_latest_exception_message( + message: *mut *const ::std::os::raw::c_char, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_get_latest_exception_message(message: - *mut *const ::std::os::raw::c_char) - -> root::ScInternalResult; + pub fn sc_internal_compiler_hlsl_new( + compiler: *mut *mut root::ScInternalCompilerHlsl, + ir: *const u32, + size: usize, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_compiler_hlsl_new(compiler: - *mut *mut root::ScInternalCompilerHlsl, - ir: *const u32, size: usize) - -> root::ScInternalResult; + pub fn sc_internal_compiler_hlsl_set_options( + compiler: *const root::ScInternalCompilerHlsl, + options: *const root::ScHlslCompilerOptions, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_compiler_hlsl_set_options(compiler: - *const root::ScInternalCompilerHlsl, - options: - *const root::ScHlslCompilerOptions) - -> root::ScInternalResult; - } - extern "C" { - pub fn sc_internal_compiler_hlsl_set_root_constant_layout(compiler: - *const root::ScInternalCompilerHlsl, - constants: - *const root::ScHlslRootConstant, - count: - usize) - -> root::ScInternalResult; + pub fn sc_internal_compiler_hlsl_set_root_constant_layout( + compiler: *const root::ScInternalCompilerHlsl, + constants: *const root::ScHlslRootConstant, + count: usize, + ) -> root::ScInternalResult; } #[repr(C)] - #[derive(Debug, Copy)] - pub struct MslConstSamplerMapping { + #[derive(Debug, Copy, Clone)] + pub struct ScMslConstSamplerMapping { pub desc_set: u32, pub binding: u32, - pub sampler: root::SPIRV_CROSS_NAMESPACE::MSLConstexprSampler, - } - impl Clone for MslConstSamplerMapping { - fn clone(&self) -> Self { *self } + pub sampler: root::spirv_cross::MSLConstexprSampler, } extern "C" { - pub fn sc_internal_compiler_msl_new(compiler: - *mut *mut root::ScInternalCompilerMsl, - ir: *const u32, size: usize) - -> root::ScInternalResult; + pub fn sc_internal_compiler_msl_new( + compiler: *mut *mut root::ScInternalCompilerMsl, + ir: *const u32, + size: usize, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_compiler_msl_set_options(compiler: - *const root::ScInternalCompilerMsl, - options: - *const root::ScMslCompilerOptions) - -> root::ScInternalResult; + pub fn sc_internal_compiler_msl_set_options( + compiler: *const root::ScInternalCompilerMsl, + options: *const root::ScMslCompilerOptions, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_compiler_msl_get_is_rasterization_disabled(compiler: - *const root::ScInternalCompilerMsl, - is_rasterization_disabled: - *mut bool) - -> root::ScInternalResult; + pub fn sc_internal_compiler_msl_get_is_rasterization_disabled( + compiler: *const root::ScInternalCompilerMsl, + is_rasterization_disabled: *mut bool, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_compiler_msl_compile(compiler: - *const root::ScInternalCompilerBase, - shader: - *mut *const ::std::os::raw::c_char, - p_vat_overrides: - *const root::SPIRV_CROSS_NAMESPACE::MSLVertexAttr, - vat_override_count: usize, - p_res_overrides: - *const root::SPIRV_CROSS_NAMESPACE::MSLResourceBinding, - res_override_count: usize, - p_const_samplers: - *const root::MslConstSamplerMapping, - const_sampler_count: usize) - -> root::ScInternalResult; + pub fn sc_internal_compiler_msl_compile( + compiler: *const root::ScInternalCompilerBase, + shader: *mut *const ::std::os::raw::c_char, + p_vat_overrides: *const root::spirv_cross::MSLVertexAttr, + vat_override_count: usize, + p_res_overrides: *const root::spirv_cross::MSLResourceBinding, + res_override_count: usize, + p_const_samplers: *const root::ScMslConstSamplerMapping, + const_sampler_count: usize, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_compiler_glsl_new(compiler: - *mut *mut root::ScInternalCompilerGlsl, - ir: *const u32, size: usize) - -> root::ScInternalResult; + pub fn sc_internal_compiler_glsl_new( + compiler: *mut *mut root::ScInternalCompilerGlsl, + ir: *const u32, + size: usize, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_compiler_glsl_set_options(compiler: - *const root::ScInternalCompilerGlsl, - options: - *const root::ScGlslCompilerOptions) - -> root::ScInternalResult; + pub fn sc_internal_compiler_glsl_set_options( + compiler: *const root::ScInternalCompilerGlsl, + options: *const root::ScGlslCompilerOptions, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_compiler_glsl_build_combined_image_samplers(compiler: - *const root::ScInternalCompilerBase) - -> root::ScInternalResult; + pub fn sc_internal_compiler_glsl_build_combined_image_samplers( + compiler: *const root::ScInternalCompilerBase, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_compiler_glsl_get_combined_image_samplers(compiler: - *const root::ScInternalCompilerBase, - samplers: - *mut *const root::ScCombinedImageSampler, - size: - *mut usize) - -> root::ScInternalResult; + pub fn sc_internal_compiler_glsl_get_combined_image_samplers( + compiler: *const root::ScInternalCompilerBase, + samplers: *mut *const root::ScCombinedImageSampler, + size: *mut usize, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_compiler_get_decoration(compiler: - *const root::ScInternalCompilerBase, - result: *mut u32, id: u32, - decoration: - root::spv::Decoration) - -> root::ScInternalResult; + pub fn sc_internal_compiler_get_decoration( + compiler: *const root::ScInternalCompilerBase, + result: *mut u32, + id: u32, + decoration: root::spv::Decoration, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_compiler_set_decoration(compiler: - *const root::ScInternalCompilerBase, - id: u32, - decoration: - root::spv::Decoration, - argument: u32) - -> root::ScInternalResult; + pub fn sc_internal_compiler_set_decoration( + compiler: *const root::ScInternalCompilerBase, + id: u32, + decoration: root::spv::Decoration, + argument: u32, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_compiler_unset_decoration(compiler: - *const root::ScInternalCompilerBase, - id: u32, - decoration: - root::spv::Decoration) - -> root::ScInternalResult; + pub fn sc_internal_compiler_unset_decoration( + compiler: *const root::ScInternalCompilerBase, + id: u32, + decoration: root::spv::Decoration, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_compiler_get_name(compiler: - *const root::ScInternalCompilerBase, - id: u32, - name: - *mut *const ::std::os::raw::c_char) - -> root::ScInternalResult; + pub fn sc_internal_compiler_get_name( + compiler: *const root::ScInternalCompilerBase, + id: u32, + name: *mut *const ::std::os::raw::c_char, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_compiler_set_name(compiler: - *const root::ScInternalCompilerBase, - id: u32, - name: - *const ::std::os::raw::c_char) - -> root::ScInternalResult; + pub fn sc_internal_compiler_set_name( + compiler: *const root::ScInternalCompilerBase, + id: u32, + name: *const ::std::os::raw::c_char, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_compiler_get_entry_points(compiler: - *const root::ScInternalCompilerBase, - entry_points: - *mut *mut root::ScEntryPoint, - size: *mut usize) - -> root::ScInternalResult; + pub fn sc_internal_compiler_get_entry_points( + compiler: *const root::ScInternalCompilerBase, + entry_points: *mut *mut root::ScEntryPoint, + size: *mut usize, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_compiler_get_active_buffer_ranges(compiler: - *const root::ScInternalCompilerBase, - id: u32, - active_buffer_ranges: - *mut *mut root::ScBufferRange, - size: *mut usize) - -> root::ScInternalResult; + pub fn sc_internal_compiler_get_active_buffer_ranges( + compiler: *const root::ScInternalCompilerBase, + id: u32, + active_buffer_ranges: *mut *mut root::ScBufferRange, + size: *mut usize, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_compiler_get_cleansed_entry_point_name(compiler: - *const root::ScInternalCompilerBase, - original_entry_point_name: - *const ::std::os::raw::c_char, - execution_model: - root::spv::ExecutionModel, - compiled_entry_point_name: - *mut *const ::std::os::raw::c_char) - -> root::ScInternalResult; + pub fn sc_internal_compiler_get_cleansed_entry_point_name( + compiler: *const root::ScInternalCompilerBase, + original_entry_point_name: *const ::std::os::raw::c_char, + execution_model: root::spv::ExecutionModel, + compiled_entry_point_name: *mut *const ::std::os::raw::c_char, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_compiler_get_shader_resources(compiler: - *const root::ScInternalCompilerBase, - shader_resources: - *mut root::ScShaderResources) - -> root::ScInternalResult; + pub fn sc_internal_compiler_get_shader_resources( + compiler: *const root::ScInternalCompilerBase, + shader_resources: *mut root::ScShaderResources, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_compiler_get_specialization_constants(compiler: - *const root::ScInternalCompilerBase, - constants: - *mut *mut root::ScSpecializationConstant, - size: - *mut usize) - -> root::ScInternalResult; + pub fn sc_internal_compiler_get_specialization_constants( + compiler: *const root::ScInternalCompilerBase, + constants: *mut *mut root::ScSpecializationConstant, + size: *mut usize, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_compiler_set_scalar_constant(compiler: - *const root::ScInternalCompilerBase, - id: u32, - constant_high_bits: - u32, - constant_low_bits: - u32) - -> root::ScInternalResult; + pub fn sc_internal_compiler_set_scalar_constant( + compiler: *const root::ScInternalCompilerBase, + id: u32, + constant_high_bits: u32, + constant_low_bits: u32, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_compiler_get_type(compiler: - *const root::ScInternalCompilerBase, - id: u32, - spirv_type: - *mut *const root::ScType) - -> root::ScInternalResult; + pub fn sc_internal_compiler_get_type( + compiler: *const root::ScInternalCompilerBase, + id: u32, + spirv_type: *mut *const root::ScType, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_compiler_get_member_name(compiler: - *const root::ScInternalCompilerBase, - id: u32, index: u32, - name: - *mut *const ::std::os::raw::c_char) - -> root::ScInternalResult; + pub fn sc_internal_compiler_get_member_name( + compiler: *const root::ScInternalCompilerBase, + id: u32, + index: u32, + name: *mut *const ::std::os::raw::c_char, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_compiler_get_member_decoration(compiler: - *const root::ScInternalCompilerBase, - id: u32, index: u32, - decoration: - root::spv::Decoration, - result: *mut u32) - -> root::ScInternalResult; + pub fn sc_internal_compiler_get_member_decoration( + compiler: *const root::ScInternalCompilerBase, + id: u32, + index: u32, + decoration: root::spv::Decoration, + result: *mut u32, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_compiler_set_member_decoration(compiler: - *const root::ScInternalCompilerBase, - id: u32, index: u32, - decoration: - root::spv::Decoration, - argument: u32) - -> root::ScInternalResult; + pub fn sc_internal_compiler_set_member_decoration( + compiler: *const root::ScInternalCompilerBase, + id: u32, + index: u32, + decoration: root::spv::Decoration, + argument: u32, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_compiler_get_declared_struct_size(compiler: - *const root::ScInternalCompilerBase, - id: u32, - result: *mut u32) - -> root::ScInternalResult; + pub fn sc_internal_compiler_get_declared_struct_size( + compiler: *const root::ScInternalCompilerBase, + id: u32, + result: *mut u32, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_compiler_get_declared_struct_member_size(compiler: - *const root::ScInternalCompilerBase, - id: u32, - index: - u32, - result: - *mut u32) - -> root::ScInternalResult; + pub fn sc_internal_compiler_get_declared_struct_member_size( + compiler: *const root::ScInternalCompilerBase, + id: u32, + index: u32, + result: *mut u32, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_compiler_rename_interface_variable(compiler: - *const root::ScInternalCompilerBase, - resources: - *const root::ScResource, - resources_size: - usize, - location: u32, - name: - *const ::std::os::raw::c_char) - -> root::ScInternalResult; + pub fn sc_internal_compiler_rename_interface_variable( + compiler: *const root::ScInternalCompilerBase, + resources: *const root::ScResource, + resources_size: usize, + location: u32, + name: *const ::std::os::raw::c_char, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_compiler_get_work_group_size_specialization_constants(compiler: - *const root::ScInternalCompilerBase, - constants: - *mut *mut root::ScSpecializationConstant) - -> root::ScInternalResult; + pub fn sc_internal_compiler_get_work_group_size_specialization_constants( + compiler: *const root::ScInternalCompilerBase, + constants: *mut *mut root::ScSpecializationConstant, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_compiler_compile(compiler: - *const root::ScInternalCompilerBase, - shader: - *mut *const ::std::os::raw::c_char) - -> root::ScInternalResult; + pub fn sc_internal_compiler_compile( + compiler: *const root::ScInternalCompilerBase, + shader: *mut *const ::std::os::raw::c_char, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_compiler_delete(compiler: - *mut root::ScInternalCompilerBase) - -> root::ScInternalResult; + pub fn sc_internal_compiler_delete( + compiler: *mut root::ScInternalCompilerBase, + ) -> root::ScInternalResult; } extern "C" { - pub fn sc_internal_free_pointer(pointer: *mut ::std::os::raw::c_void) - -> root::ScInternalResult; + pub fn sc_internal_free_pointer( + pointer: *mut ::std::os::raw::c_void, + ) -> root::ScInternalResult; } } diff --git a/third_party/rust/spirv-cross-internal/src/bindings_wasm.rs b/third_party/rust/spirv-cross-internal/src/bindings_wasm.rs index c6772bb5ca42..162923f8ade4 100644 --- a/third_party/rust/spirv-cross-internal/src/bindings_wasm.rs +++ b/third_party/rust/spirv-cross-internal/src/bindings_wasm.rs @@ -8,17 +8,14 @@ pub mod root { #[allow(unused_imports)] use self::super::super::root; pub type Id = ::std::os::raw::c_uint; - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum SourceLanguage { - SourceLanguageUnknown = 0, - SourceLanguageESSL = 1, - SourceLanguageGLSL = 2, - SourceLanguageOpenCL_C = 3, - SourceLanguageOpenCL_CPP = 4, - SourceLanguageHLSL = 5, - SourceLanguageMax = 2147483647, - } + pub const SourceLanguage_SourceLanguageUnknown: root::spv::SourceLanguage = 0; + pub const SourceLanguage_SourceLanguageESSL: root::spv::SourceLanguage = 1; + pub const SourceLanguage_SourceLanguageGLSL: root::spv::SourceLanguage = 2; + pub const SourceLanguage_SourceLanguageOpenCL_C: root::spv::SourceLanguage = 3; + pub const SourceLanguage_SourceLanguageOpenCL_CPP: root::spv::SourceLanguage = 4; + pub const SourceLanguage_SourceLanguageHLSL: root::spv::SourceLanguage = 5; + pub const SourceLanguage_SourceLanguageMax: root::spv::SourceLanguage = 2147483647; + pub type SourceLanguage = u32; #[repr(u32)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub enum ExecutionModel { @@ -39,287 +36,351 @@ pub mod root { ExecutionModelCallableNV = 5318, ExecutionModelMax = 2147483647, } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum AddressingModel { - AddressingModelLogical = 0, - AddressingModelPhysical32 = 1, - AddressingModelPhysical64 = 2, - AddressingModelPhysicalStorageBuffer64EXT = 5348, - AddressingModelMax = 2147483647, + pub const AddressingModel_AddressingModelLogical: root::spv::AddressingModel = 0; + pub const AddressingModel_AddressingModelPhysical32: root::spv::AddressingModel = 1; + pub const AddressingModel_AddressingModelPhysical64: root::spv::AddressingModel = 2; + pub const AddressingModel_AddressingModelPhysicalStorageBuffer64: + root::spv::AddressingModel = 5348; + pub const AddressingModel_AddressingModelPhysicalStorageBuffer64EXT: + root::spv::AddressingModel = 5348; + pub const AddressingModel_AddressingModelMax: root::spv::AddressingModel = 2147483647; + pub type AddressingModel = u32; + pub const MemoryModel_MemoryModelSimple: root::spv::MemoryModel = 0; + pub const MemoryModel_MemoryModelGLSL450: root::spv::MemoryModel = 1; + pub const MemoryModel_MemoryModelOpenCL: root::spv::MemoryModel = 2; + pub const MemoryModel_MemoryModelVulkan: root::spv::MemoryModel = 3; + pub const MemoryModel_MemoryModelVulkanKHR: root::spv::MemoryModel = 3; + pub const MemoryModel_MemoryModelMax: root::spv::MemoryModel = 2147483647; + pub type MemoryModel = u32; + pub const ExecutionMode_ExecutionModeInvocations: root::spv::ExecutionMode = 0; + pub const ExecutionMode_ExecutionModeSpacingEqual: root::spv::ExecutionMode = 1; + pub const ExecutionMode_ExecutionModeSpacingFractionalEven: root::spv::ExecutionMode = 2; + pub const ExecutionMode_ExecutionModeSpacingFractionalOdd: root::spv::ExecutionMode = 3; + pub const ExecutionMode_ExecutionModeVertexOrderCw: root::spv::ExecutionMode = 4; + pub const ExecutionMode_ExecutionModeVertexOrderCcw: root::spv::ExecutionMode = 5; + pub const ExecutionMode_ExecutionModePixelCenterInteger: root::spv::ExecutionMode = 6; + pub const ExecutionMode_ExecutionModeOriginUpperLeft: root::spv::ExecutionMode = 7; + pub const ExecutionMode_ExecutionModeOriginLowerLeft: root::spv::ExecutionMode = 8; + pub const ExecutionMode_ExecutionModeEarlyFragmentTests: root::spv::ExecutionMode = 9; + pub const ExecutionMode_ExecutionModePointMode: root::spv::ExecutionMode = 10; + pub const ExecutionMode_ExecutionModeXfb: root::spv::ExecutionMode = 11; + pub const ExecutionMode_ExecutionModeDepthReplacing: root::spv::ExecutionMode = 12; + pub const ExecutionMode_ExecutionModeDepthGreater: root::spv::ExecutionMode = 14; + pub const ExecutionMode_ExecutionModeDepthLess: root::spv::ExecutionMode = 15; + pub const ExecutionMode_ExecutionModeDepthUnchanged: root::spv::ExecutionMode = 16; + pub const ExecutionMode_ExecutionModeLocalSize: root::spv::ExecutionMode = 17; + pub const ExecutionMode_ExecutionModeLocalSizeHint: root::spv::ExecutionMode = 18; + pub const ExecutionMode_ExecutionModeInputPoints: root::spv::ExecutionMode = 19; + pub const ExecutionMode_ExecutionModeInputLines: root::spv::ExecutionMode = 20; + pub const ExecutionMode_ExecutionModeInputLinesAdjacency: root::spv::ExecutionMode = 21; + pub const ExecutionMode_ExecutionModeTriangles: root::spv::ExecutionMode = 22; + pub const ExecutionMode_ExecutionModeInputTrianglesAdjacency: root::spv::ExecutionMode = 23; + pub const ExecutionMode_ExecutionModeQuads: root::spv::ExecutionMode = 24; + pub const ExecutionMode_ExecutionModeIsolines: root::spv::ExecutionMode = 25; + pub const ExecutionMode_ExecutionModeOutputVertices: root::spv::ExecutionMode = 26; + pub const ExecutionMode_ExecutionModeOutputPoints: root::spv::ExecutionMode = 27; + pub const ExecutionMode_ExecutionModeOutputLineStrip: root::spv::ExecutionMode = 28; + pub const ExecutionMode_ExecutionModeOutputTriangleStrip: root::spv::ExecutionMode = 29; + pub const ExecutionMode_ExecutionModeVecTypeHint: root::spv::ExecutionMode = 30; + pub const ExecutionMode_ExecutionModeContractionOff: root::spv::ExecutionMode = 31; + pub const ExecutionMode_ExecutionModeInitializer: root::spv::ExecutionMode = 33; + pub const ExecutionMode_ExecutionModeFinalizer: root::spv::ExecutionMode = 34; + pub const ExecutionMode_ExecutionModeSubgroupSize: root::spv::ExecutionMode = 35; + pub const ExecutionMode_ExecutionModeSubgroupsPerWorkgroup: root::spv::ExecutionMode = 36; + pub const ExecutionMode_ExecutionModeSubgroupsPerWorkgroupId: root::spv::ExecutionMode = 37; + pub const ExecutionMode_ExecutionModeLocalSizeId: root::spv::ExecutionMode = 38; + pub const ExecutionMode_ExecutionModeLocalSizeHintId: root::spv::ExecutionMode = 39; + pub const ExecutionMode_ExecutionModePostDepthCoverage: root::spv::ExecutionMode = 4446; + pub const ExecutionMode_ExecutionModeDenormPreserve: root::spv::ExecutionMode = 4459; + pub const ExecutionMode_ExecutionModeDenormFlushToZero: root::spv::ExecutionMode = 4460; + pub const ExecutionMode_ExecutionModeSignedZeroInfNanPreserve: root::spv::ExecutionMode = + 4461; + pub const ExecutionMode_ExecutionModeRoundingModeRTE: root::spv::ExecutionMode = 4462; + pub const ExecutionMode_ExecutionModeRoundingModeRTZ: root::spv::ExecutionMode = 4463; + pub const ExecutionMode_ExecutionModeStencilRefReplacingEXT: root::spv::ExecutionMode = + 5027; + pub const ExecutionMode_ExecutionModeOutputLinesNV: root::spv::ExecutionMode = 5269; + pub const ExecutionMode_ExecutionModeOutputPrimitivesNV: root::spv::ExecutionMode = 5270; + pub const ExecutionMode_ExecutionModeDerivativeGroupQuadsNV: root::spv::ExecutionMode = + 5289; + pub const ExecutionMode_ExecutionModeDerivativeGroupLinearNV: root::spv::ExecutionMode = + 5290; + pub const ExecutionMode_ExecutionModeOutputTrianglesNV: root::spv::ExecutionMode = 5298; + pub const ExecutionMode_ExecutionModePixelInterlockOrderedEXT: root::spv::ExecutionMode = + 5366; + pub const ExecutionMode_ExecutionModePixelInterlockUnorderedEXT: root::spv::ExecutionMode = + 5367; + pub const ExecutionMode_ExecutionModeSampleInterlockOrderedEXT: root::spv::ExecutionMode = + 5368; + pub const ExecutionMode_ExecutionModeSampleInterlockUnorderedEXT: root::spv::ExecutionMode = + 5369; + pub const ExecutionMode_ExecutionModeShadingRateInterlockOrderedEXT: + root::spv::ExecutionMode = 5370; + pub const ExecutionMode_ExecutionModeShadingRateInterlockUnorderedEXT: + root::spv::ExecutionMode = 5371; + pub const ExecutionMode_ExecutionModeMax: root::spv::ExecutionMode = 2147483647; + pub type ExecutionMode = u32; + pub const StorageClass_StorageClassUniformConstant: root::spv::StorageClass = 0; + pub const StorageClass_StorageClassInput: root::spv::StorageClass = 1; + pub const StorageClass_StorageClassUniform: root::spv::StorageClass = 2; + pub const StorageClass_StorageClassOutput: root::spv::StorageClass = 3; + pub const StorageClass_StorageClassWorkgroup: root::spv::StorageClass = 4; + pub const StorageClass_StorageClassCrossWorkgroup: root::spv::StorageClass = 5; + pub const StorageClass_StorageClassPrivate: root::spv::StorageClass = 6; + pub const StorageClass_StorageClassFunction: root::spv::StorageClass = 7; + pub const StorageClass_StorageClassGeneric: root::spv::StorageClass = 8; + pub const StorageClass_StorageClassPushConstant: root::spv::StorageClass = 9; + pub const StorageClass_StorageClassAtomicCounter: root::spv::StorageClass = 10; + pub const StorageClass_StorageClassImage: root::spv::StorageClass = 11; + pub const StorageClass_StorageClassStorageBuffer: root::spv::StorageClass = 12; + pub const StorageClass_StorageClassCallableDataNV: root::spv::StorageClass = 5328; + pub const StorageClass_StorageClassIncomingCallableDataNV: root::spv::StorageClass = 5329; + pub const StorageClass_StorageClassRayPayloadNV: root::spv::StorageClass = 5338; + pub const StorageClass_StorageClassHitAttributeNV: root::spv::StorageClass = 5339; + pub const StorageClass_StorageClassIncomingRayPayloadNV: root::spv::StorageClass = 5342; + pub const StorageClass_StorageClassShaderRecordBufferNV: root::spv::StorageClass = 5343; + pub const StorageClass_StorageClassPhysicalStorageBuffer: root::spv::StorageClass = 5349; + pub const StorageClass_StorageClassPhysicalStorageBufferEXT: root::spv::StorageClass = 5349; + pub const StorageClass_StorageClassMax: root::spv::StorageClass = 2147483647; + pub type StorageClass = u32; + pub const Dim_Dim1D: root::spv::Dim = 0; + pub const Dim_Dim2D: root::spv::Dim = 1; + pub const Dim_Dim3D: root::spv::Dim = 2; + pub const Dim_DimCube: root::spv::Dim = 3; + pub const Dim_DimRect: root::spv::Dim = 4; + pub const Dim_DimBuffer: root::spv::Dim = 5; + pub const Dim_DimSubpassData: root::spv::Dim = 6; + pub const Dim_DimMax: root::spv::Dim = 2147483647; + pub type Dim = u32; + pub const SamplerAddressingMode_SamplerAddressingModeNone: + root::spv::SamplerAddressingMode = 0; + pub const SamplerAddressingMode_SamplerAddressingModeClampToEdge: + root::spv::SamplerAddressingMode = 1; + pub const SamplerAddressingMode_SamplerAddressingModeClamp: + root::spv::SamplerAddressingMode = 2; + pub const SamplerAddressingMode_SamplerAddressingModeRepeat: + root::spv::SamplerAddressingMode = 3; + pub const SamplerAddressingMode_SamplerAddressingModeRepeatMirrored: + root::spv::SamplerAddressingMode = 4; + pub const SamplerAddressingMode_SamplerAddressingModeMax: root::spv::SamplerAddressingMode = + 2147483647; + pub type SamplerAddressingMode = u32; + pub const SamplerFilterMode_SamplerFilterModeNearest: root::spv::SamplerFilterMode = 0; + pub const SamplerFilterMode_SamplerFilterModeLinear: root::spv::SamplerFilterMode = 1; + pub const SamplerFilterMode_SamplerFilterModeMax: root::spv::SamplerFilterMode = 2147483647; + pub type SamplerFilterMode = u32; + pub const ImageFormat_ImageFormatUnknown: root::spv::ImageFormat = 0; + pub const ImageFormat_ImageFormatRgba32f: root::spv::ImageFormat = 1; + pub const ImageFormat_ImageFormatRgba16f: root::spv::ImageFormat = 2; + pub const ImageFormat_ImageFormatR32f: root::spv::ImageFormat = 3; + pub const ImageFormat_ImageFormatRgba8: root::spv::ImageFormat = 4; + pub const ImageFormat_ImageFormatRgba8Snorm: root::spv::ImageFormat = 5; + pub const ImageFormat_ImageFormatRg32f: root::spv::ImageFormat = 6; + pub const ImageFormat_ImageFormatRg16f: root::spv::ImageFormat = 7; + pub const ImageFormat_ImageFormatR11fG11fB10f: root::spv::ImageFormat = 8; + pub const ImageFormat_ImageFormatR16f: root::spv::ImageFormat = 9; + pub const ImageFormat_ImageFormatRgba16: root::spv::ImageFormat = 10; + pub const ImageFormat_ImageFormatRgb10A2: root::spv::ImageFormat = 11; + pub const ImageFormat_ImageFormatRg16: root::spv::ImageFormat = 12; + pub const ImageFormat_ImageFormatRg8: root::spv::ImageFormat = 13; + pub const ImageFormat_ImageFormatR16: root::spv::ImageFormat = 14; + pub const ImageFormat_ImageFormatR8: root::spv::ImageFormat = 15; + pub const ImageFormat_ImageFormatRgba16Snorm: root::spv::ImageFormat = 16; + pub const ImageFormat_ImageFormatRg16Snorm: root::spv::ImageFormat = 17; + pub const ImageFormat_ImageFormatRg8Snorm: root::spv::ImageFormat = 18; + pub const ImageFormat_ImageFormatR16Snorm: root::spv::ImageFormat = 19; + pub const ImageFormat_ImageFormatR8Snorm: root::spv::ImageFormat = 20; + pub const ImageFormat_ImageFormatRgba32i: root::spv::ImageFormat = 21; + pub const ImageFormat_ImageFormatRgba16i: root::spv::ImageFormat = 22; + pub const ImageFormat_ImageFormatRgba8i: root::spv::ImageFormat = 23; + pub const ImageFormat_ImageFormatR32i: root::spv::ImageFormat = 24; + pub const ImageFormat_ImageFormatRg32i: root::spv::ImageFormat = 25; + pub const ImageFormat_ImageFormatRg16i: root::spv::ImageFormat = 26; + pub const ImageFormat_ImageFormatRg8i: root::spv::ImageFormat = 27; + pub const ImageFormat_ImageFormatR16i: root::spv::ImageFormat = 28; + pub const ImageFormat_ImageFormatR8i: root::spv::ImageFormat = 29; + pub const ImageFormat_ImageFormatRgba32ui: root::spv::ImageFormat = 30; + pub const ImageFormat_ImageFormatRgba16ui: root::spv::ImageFormat = 31; + pub const ImageFormat_ImageFormatRgba8ui: root::spv::ImageFormat = 32; + pub const ImageFormat_ImageFormatR32ui: root::spv::ImageFormat = 33; + pub const ImageFormat_ImageFormatRgb10a2ui: root::spv::ImageFormat = 34; + pub const ImageFormat_ImageFormatRg32ui: root::spv::ImageFormat = 35; + pub const ImageFormat_ImageFormatRg16ui: root::spv::ImageFormat = 36; + pub const ImageFormat_ImageFormatRg8ui: root::spv::ImageFormat = 37; + pub const ImageFormat_ImageFormatR16ui: root::spv::ImageFormat = 38; + pub const ImageFormat_ImageFormatR8ui: root::spv::ImageFormat = 39; + pub const ImageFormat_ImageFormatMax: root::spv::ImageFormat = 2147483647; + pub type ImageFormat = u32; + pub const ImageChannelOrder_ImageChannelOrderR: root::spv::ImageChannelOrder = 0; + pub const ImageChannelOrder_ImageChannelOrderA: root::spv::ImageChannelOrder = 1; + pub const ImageChannelOrder_ImageChannelOrderRG: root::spv::ImageChannelOrder = 2; + pub const ImageChannelOrder_ImageChannelOrderRA: root::spv::ImageChannelOrder = 3; + pub const ImageChannelOrder_ImageChannelOrderRGB: root::spv::ImageChannelOrder = 4; + pub const ImageChannelOrder_ImageChannelOrderRGBA: root::spv::ImageChannelOrder = 5; + pub const ImageChannelOrder_ImageChannelOrderBGRA: root::spv::ImageChannelOrder = 6; + pub const ImageChannelOrder_ImageChannelOrderARGB: root::spv::ImageChannelOrder = 7; + pub const ImageChannelOrder_ImageChannelOrderIntensity: root::spv::ImageChannelOrder = 8; + pub const ImageChannelOrder_ImageChannelOrderLuminance: root::spv::ImageChannelOrder = 9; + pub const ImageChannelOrder_ImageChannelOrderRx: root::spv::ImageChannelOrder = 10; + pub const ImageChannelOrder_ImageChannelOrderRGx: root::spv::ImageChannelOrder = 11; + pub const ImageChannelOrder_ImageChannelOrderRGBx: root::spv::ImageChannelOrder = 12; + pub const ImageChannelOrder_ImageChannelOrderDepth: root::spv::ImageChannelOrder = 13; + pub const ImageChannelOrder_ImageChannelOrderDepthStencil: root::spv::ImageChannelOrder = + 14; + pub const ImageChannelOrder_ImageChannelOrdersRGB: root::spv::ImageChannelOrder = 15; + pub const ImageChannelOrder_ImageChannelOrdersRGBx: root::spv::ImageChannelOrder = 16; + pub const ImageChannelOrder_ImageChannelOrdersRGBA: root::spv::ImageChannelOrder = 17; + pub const ImageChannelOrder_ImageChannelOrdersBGRA: root::spv::ImageChannelOrder = 18; + pub const ImageChannelOrder_ImageChannelOrderABGR: root::spv::ImageChannelOrder = 19; + pub const ImageChannelOrder_ImageChannelOrderMax: root::spv::ImageChannelOrder = 2147483647; + pub type ImageChannelOrder = u32; + pub const ImageChannelDataType_ImageChannelDataTypeSnormInt8: + root::spv::ImageChannelDataType = 0; + pub const ImageChannelDataType_ImageChannelDataTypeSnormInt16: + root::spv::ImageChannelDataType = 1; + pub const ImageChannelDataType_ImageChannelDataTypeUnormInt8: + root::spv::ImageChannelDataType = 2; + pub const ImageChannelDataType_ImageChannelDataTypeUnormInt16: + root::spv::ImageChannelDataType = 3; + pub const ImageChannelDataType_ImageChannelDataTypeUnormShort565: + root::spv::ImageChannelDataType = 4; + pub const ImageChannelDataType_ImageChannelDataTypeUnormShort555: + root::spv::ImageChannelDataType = 5; + pub const ImageChannelDataType_ImageChannelDataTypeUnormInt101010: + root::spv::ImageChannelDataType = 6; + pub const ImageChannelDataType_ImageChannelDataTypeSignedInt8: + root::spv::ImageChannelDataType = 7; + pub const ImageChannelDataType_ImageChannelDataTypeSignedInt16: + root::spv::ImageChannelDataType = 8; + pub const ImageChannelDataType_ImageChannelDataTypeSignedInt32: + root::spv::ImageChannelDataType = 9; + pub const ImageChannelDataType_ImageChannelDataTypeUnsignedInt8: + root::spv::ImageChannelDataType = 10; + pub const ImageChannelDataType_ImageChannelDataTypeUnsignedInt16: + root::spv::ImageChannelDataType = 11; + pub const ImageChannelDataType_ImageChannelDataTypeUnsignedInt32: + root::spv::ImageChannelDataType = 12; + pub const ImageChannelDataType_ImageChannelDataTypeHalfFloat: + root::spv::ImageChannelDataType = 13; + pub const ImageChannelDataType_ImageChannelDataTypeFloat: root::spv::ImageChannelDataType = + 14; + pub const ImageChannelDataType_ImageChannelDataTypeUnormInt24: + root::spv::ImageChannelDataType = 15; + pub const ImageChannelDataType_ImageChannelDataTypeUnormInt101010_2: + root::spv::ImageChannelDataType = 16; + pub const ImageChannelDataType_ImageChannelDataTypeMax: root::spv::ImageChannelDataType = + 2147483647; + pub type ImageChannelDataType = u32; + pub const ImageOperandsShift_ImageOperandsBiasShift: root::spv::ImageOperandsShift = 0; + pub const ImageOperandsShift_ImageOperandsLodShift: root::spv::ImageOperandsShift = 1; + pub const ImageOperandsShift_ImageOperandsGradShift: root::spv::ImageOperandsShift = 2; + pub const ImageOperandsShift_ImageOperandsConstOffsetShift: root::spv::ImageOperandsShift = + 3; + pub const ImageOperandsShift_ImageOperandsOffsetShift: root::spv::ImageOperandsShift = 4; + pub const ImageOperandsShift_ImageOperandsConstOffsetsShift: root::spv::ImageOperandsShift = + 5; + pub const ImageOperandsShift_ImageOperandsSampleShift: root::spv::ImageOperandsShift = 6; + pub const ImageOperandsShift_ImageOperandsMinLodShift: root::spv::ImageOperandsShift = 7; + pub const ImageOperandsShift_ImageOperandsMakeTexelAvailableShift: + root::spv::ImageOperandsShift = 8; + pub const ImageOperandsShift_ImageOperandsMakeTexelAvailableKHRShift: + root::spv::ImageOperandsShift = 8; + pub const ImageOperandsShift_ImageOperandsMakeTexelVisibleShift: + root::spv::ImageOperandsShift = 9; + pub const ImageOperandsShift_ImageOperandsMakeTexelVisibleKHRShift: + root::spv::ImageOperandsShift = 9; + pub const ImageOperandsShift_ImageOperandsNonPrivateTexelShift: + root::spv::ImageOperandsShift = 10; + pub const ImageOperandsShift_ImageOperandsNonPrivateTexelKHRShift: + root::spv::ImageOperandsShift = 10; + pub const ImageOperandsShift_ImageOperandsVolatileTexelShift: + root::spv::ImageOperandsShift = 11; + pub const ImageOperandsShift_ImageOperandsVolatileTexelKHRShift: + root::spv::ImageOperandsShift = 11; + pub const ImageOperandsShift_ImageOperandsSignExtendShift: root::spv::ImageOperandsShift = + 12; + pub const ImageOperandsShift_ImageOperandsZeroExtendShift: root::spv::ImageOperandsShift = + 13; + pub const ImageOperandsShift_ImageOperandsMax: root::spv::ImageOperandsShift = 2147483647; + pub type ImageOperandsShift = u32; + impl ImageOperandsMask { + pub const ImageOperandsMaskNone: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(0); } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum MemoryModel { - MemoryModelSimple = 0, - MemoryModelGLSL450 = 1, - MemoryModelOpenCL = 2, - MemoryModelVulkanKHR = 3, - MemoryModelMax = 2147483647, + impl ImageOperandsMask { + pub const ImageOperandsBiasMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(1); } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum ExecutionMode { - ExecutionModeInvocations = 0, - ExecutionModeSpacingEqual = 1, - ExecutionModeSpacingFractionalEven = 2, - ExecutionModeSpacingFractionalOdd = 3, - ExecutionModeVertexOrderCw = 4, - ExecutionModeVertexOrderCcw = 5, - ExecutionModePixelCenterInteger = 6, - ExecutionModeOriginUpperLeft = 7, - ExecutionModeOriginLowerLeft = 8, - ExecutionModeEarlyFragmentTests = 9, - ExecutionModePointMode = 10, - ExecutionModeXfb = 11, - ExecutionModeDepthReplacing = 12, - ExecutionModeDepthGreater = 14, - ExecutionModeDepthLess = 15, - ExecutionModeDepthUnchanged = 16, - ExecutionModeLocalSize = 17, - ExecutionModeLocalSizeHint = 18, - ExecutionModeInputPoints = 19, - ExecutionModeInputLines = 20, - ExecutionModeInputLinesAdjacency = 21, - ExecutionModeTriangles = 22, - ExecutionModeInputTrianglesAdjacency = 23, - ExecutionModeQuads = 24, - ExecutionModeIsolines = 25, - ExecutionModeOutputVertices = 26, - ExecutionModeOutputPoints = 27, - ExecutionModeOutputLineStrip = 28, - ExecutionModeOutputTriangleStrip = 29, - ExecutionModeVecTypeHint = 30, - ExecutionModeContractionOff = 31, - ExecutionModeInitializer = 33, - ExecutionModeFinalizer = 34, - ExecutionModeSubgroupSize = 35, - ExecutionModeSubgroupsPerWorkgroup = 36, - ExecutionModeSubgroupsPerWorkgroupId = 37, - ExecutionModeLocalSizeId = 38, - ExecutionModeLocalSizeHintId = 39, - ExecutionModePostDepthCoverage = 4446, - ExecutionModeDenormPreserve = 4459, - ExecutionModeDenormFlushToZero = 4460, - ExecutionModeSignedZeroInfNanPreserve = 4461, - ExecutionModeRoundingModeRTE = 4462, - ExecutionModeRoundingModeRTZ = 4463, - ExecutionModeStencilRefReplacingEXT = 5027, - ExecutionModeOutputLinesNV = 5269, - ExecutionModeOutputPrimitivesNV = 5270, - ExecutionModeDerivativeGroupQuadsNV = 5289, - ExecutionModeDerivativeGroupLinearNV = 5290, - ExecutionModeOutputTrianglesNV = 5298, - ExecutionModeMax = 2147483647, + impl ImageOperandsMask { + pub const ImageOperandsLodMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(2); } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum StorageClass { - StorageClassUniformConstant = 0, - StorageClassInput = 1, - StorageClassUniform = 2, - StorageClassOutput = 3, - StorageClassWorkgroup = 4, - StorageClassCrossWorkgroup = 5, - StorageClassPrivate = 6, - StorageClassFunction = 7, - StorageClassGeneric = 8, - StorageClassPushConstant = 9, - StorageClassAtomicCounter = 10, - StorageClassImage = 11, - StorageClassStorageBuffer = 12, - StorageClassCallableDataNV = 5328, - StorageClassIncomingCallableDataNV = 5329, - StorageClassRayPayloadNV = 5338, - StorageClassHitAttributeNV = 5339, - StorageClassIncomingRayPayloadNV = 5342, - StorageClassShaderRecordBufferNV = 5343, - StorageClassPhysicalStorageBufferEXT = 5349, - StorageClassMax = 2147483647, + impl ImageOperandsMask { + pub const ImageOperandsGradMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(4); } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum Dim { - Dim1D = 0, - Dim2D = 1, - Dim3D = 2, - DimCube = 3, - DimRect = 4, - DimBuffer = 5, - DimSubpassData = 6, - DimMax = 2147483647, + impl ImageOperandsMask { + pub const ImageOperandsConstOffsetMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(8); } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum SamplerAddressingMode { - SamplerAddressingModeNone = 0, - SamplerAddressingModeClampToEdge = 1, - SamplerAddressingModeClamp = 2, - SamplerAddressingModeRepeat = 3, - SamplerAddressingModeRepeatMirrored = 4, - SamplerAddressingModeMax = 2147483647, + impl ImageOperandsMask { + pub const ImageOperandsOffsetMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(16); } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum SamplerFilterMode { - SamplerFilterModeNearest = 0, - SamplerFilterModeLinear = 1, - SamplerFilterModeMax = 2147483647, + impl ImageOperandsMask { + pub const ImageOperandsConstOffsetsMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(32); } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum ImageFormat { - ImageFormatUnknown = 0, - ImageFormatRgba32f = 1, - ImageFormatRgba16f = 2, - ImageFormatR32f = 3, - ImageFormatRgba8 = 4, - ImageFormatRgba8Snorm = 5, - ImageFormatRg32f = 6, - ImageFormatRg16f = 7, - ImageFormatR11fG11fB10f = 8, - ImageFormatR16f = 9, - ImageFormatRgba16 = 10, - ImageFormatRgb10A2 = 11, - ImageFormatRg16 = 12, - ImageFormatRg8 = 13, - ImageFormatR16 = 14, - ImageFormatR8 = 15, - ImageFormatRgba16Snorm = 16, - ImageFormatRg16Snorm = 17, - ImageFormatRg8Snorm = 18, - ImageFormatR16Snorm = 19, - ImageFormatR8Snorm = 20, - ImageFormatRgba32i = 21, - ImageFormatRgba16i = 22, - ImageFormatRgba8i = 23, - ImageFormatR32i = 24, - ImageFormatRg32i = 25, - ImageFormatRg16i = 26, - ImageFormatRg8i = 27, - ImageFormatR16i = 28, - ImageFormatR8i = 29, - ImageFormatRgba32ui = 30, - ImageFormatRgba16ui = 31, - ImageFormatRgba8ui = 32, - ImageFormatR32ui = 33, - ImageFormatRgb10a2ui = 34, - ImageFormatRg32ui = 35, - ImageFormatRg16ui = 36, - ImageFormatRg8ui = 37, - ImageFormatR16ui = 38, - ImageFormatR8ui = 39, - ImageFormatMax = 2147483647, + impl ImageOperandsMask { + pub const ImageOperandsSampleMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(64); } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum ImageChannelOrder { - ImageChannelOrderR = 0, - ImageChannelOrderA = 1, - ImageChannelOrderRG = 2, - ImageChannelOrderRA = 3, - ImageChannelOrderRGB = 4, - ImageChannelOrderRGBA = 5, - ImageChannelOrderBGRA = 6, - ImageChannelOrderARGB = 7, - ImageChannelOrderIntensity = 8, - ImageChannelOrderLuminance = 9, - ImageChannelOrderRx = 10, - ImageChannelOrderRGx = 11, - ImageChannelOrderRGBx = 12, - ImageChannelOrderDepth = 13, - ImageChannelOrderDepthStencil = 14, - ImageChannelOrdersRGB = 15, - ImageChannelOrdersRGBx = 16, - ImageChannelOrdersRGBA = 17, - ImageChannelOrdersBGRA = 18, - ImageChannelOrderABGR = 19, - ImageChannelOrderMax = 2147483647, + impl ImageOperandsMask { + pub const ImageOperandsMinLodMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(128); } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum ImageChannelDataType { - ImageChannelDataTypeSnormInt8 = 0, - ImageChannelDataTypeSnormInt16 = 1, - ImageChannelDataTypeUnormInt8 = 2, - ImageChannelDataTypeUnormInt16 = 3, - ImageChannelDataTypeUnormShort565 = 4, - ImageChannelDataTypeUnormShort555 = 5, - ImageChannelDataTypeUnormInt101010 = 6, - ImageChannelDataTypeSignedInt8 = 7, - ImageChannelDataTypeSignedInt16 = 8, - ImageChannelDataTypeSignedInt32 = 9, - ImageChannelDataTypeUnsignedInt8 = 10, - ImageChannelDataTypeUnsignedInt16 = 11, - ImageChannelDataTypeUnsignedInt32 = 12, - ImageChannelDataTypeHalfFloat = 13, - ImageChannelDataTypeFloat = 14, - ImageChannelDataTypeUnormInt24 = 15, - ImageChannelDataTypeUnormInt101010_2 = 16, - ImageChannelDataTypeMax = 2147483647, + impl ImageOperandsMask { + pub const ImageOperandsMakeTexelAvailableMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(256); } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum ImageOperandsShift { - ImageOperandsBiasShift = 0, - ImageOperandsLodShift = 1, - ImageOperandsGradShift = 2, - ImageOperandsConstOffsetShift = 3, - ImageOperandsOffsetShift = 4, - ImageOperandsConstOffsetsShift = 5, - ImageOperandsSampleShift = 6, - ImageOperandsMinLodShift = 7, - ImageOperandsMakeTexelAvailableKHRShift = 8, - ImageOperandsMakeTexelVisibleKHRShift = 9, - ImageOperandsNonPrivateTexelKHRShift = 10, - ImageOperandsVolatileTexelKHRShift = 11, - ImageOperandsMax = 2147483647, + impl ImageOperandsMask { + pub const ImageOperandsMakeTexelAvailableKHRMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(256); } - pub const ImageOperandsMask_ImageOperandsMaskNone: - root::spv::ImageOperandsMask = - ImageOperandsMask(0); - pub const ImageOperandsMask_ImageOperandsBiasMask: - root::spv::ImageOperandsMask = - ImageOperandsMask(1); - pub const ImageOperandsMask_ImageOperandsLodMask: - root::spv::ImageOperandsMask = - ImageOperandsMask(2); - pub const ImageOperandsMask_ImageOperandsGradMask: - root::spv::ImageOperandsMask = - ImageOperandsMask(4); - pub const ImageOperandsMask_ImageOperandsConstOffsetMask: - root::spv::ImageOperandsMask = - ImageOperandsMask(8); - pub const ImageOperandsMask_ImageOperandsOffsetMask: - root::spv::ImageOperandsMask = - ImageOperandsMask(16); - pub const ImageOperandsMask_ImageOperandsConstOffsetsMask: - root::spv::ImageOperandsMask = - ImageOperandsMask(32); - pub const ImageOperandsMask_ImageOperandsSampleMask: - root::spv::ImageOperandsMask = - ImageOperandsMask(64); - pub const ImageOperandsMask_ImageOperandsMinLodMask: - root::spv::ImageOperandsMask = - ImageOperandsMask(128); - pub const ImageOperandsMask_ImageOperandsMakeTexelAvailableKHRMask: - root::spv::ImageOperandsMask = - ImageOperandsMask(256); - pub const ImageOperandsMask_ImageOperandsMakeTexelVisibleKHRMask: - root::spv::ImageOperandsMask = - ImageOperandsMask(512); - pub const ImageOperandsMask_ImageOperandsNonPrivateTexelKHRMask: - root::spv::ImageOperandsMask = - ImageOperandsMask(1024); - pub const ImageOperandsMask_ImageOperandsVolatileTexelKHRMask: - root::spv::ImageOperandsMask = - ImageOperandsMask(2048); - impl ::std::ops::BitOr for - root::spv::ImageOperandsMask { - type - Output - = - Self; + impl ImageOperandsMask { + pub const ImageOperandsMakeTexelVisibleMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(512); + } + impl ImageOperandsMask { + pub const ImageOperandsMakeTexelVisibleKHRMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(512); + } + impl ImageOperandsMask { + pub const ImageOperandsNonPrivateTexelMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(1024); + } + impl ImageOperandsMask { + pub const ImageOperandsNonPrivateTexelKHRMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(1024); + } + impl ImageOperandsMask { + pub const ImageOperandsVolatileTexelMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(2048); + } + impl ImageOperandsMask { + pub const ImageOperandsVolatileTexelKHRMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(2048); + } + impl ImageOperandsMask { + pub const ImageOperandsSignExtendMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(4096); + } + impl ImageOperandsMask { + pub const ImageOperandsZeroExtendMask: root::spv::ImageOperandsMask = + root::spv::ImageOperandsMask(8192); + } + impl ::std::ops::BitOr for root::spv::ImageOperandsMask { + type Output = Self; #[inline] fn bitor(self, other: Self) -> Self { ImageOperandsMask(self.0 | other.0) @@ -331,12 +392,8 @@ pub mod root { self.0 |= rhs.0; } } - impl ::std::ops::BitAnd for - root::spv::ImageOperandsMask { - type - Output - = - Self; + impl ::std::ops::BitAnd for root::spv::ImageOperandsMask { + type Output = Self; #[inline] fn bitand(self, other: Self) -> Self { ImageOperandsMask(self.0 & other.0) @@ -348,43 +405,44 @@ pub mod root { self.0 &= rhs.0; } } - #[repr(C)] + #[repr(transparent)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub struct ImageOperandsMask(pub ::std::os::raw::c_uint); - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum FPFastMathModeShift { - FPFastMathModeNotNaNShift = 0, - FPFastMathModeNotInfShift = 1, - FPFastMathModeNSZShift = 2, - FPFastMathModeAllowRecipShift = 3, - FPFastMathModeFastShift = 4, - FPFastMathModeMax = 2147483647, + pub struct ImageOperandsMask(pub u32); + pub const FPFastMathModeShift_FPFastMathModeNotNaNShift: root::spv::FPFastMathModeShift = 0; + pub const FPFastMathModeShift_FPFastMathModeNotInfShift: root::spv::FPFastMathModeShift = 1; + pub const FPFastMathModeShift_FPFastMathModeNSZShift: root::spv::FPFastMathModeShift = 2; + pub const FPFastMathModeShift_FPFastMathModeAllowRecipShift: + root::spv::FPFastMathModeShift = 3; + pub const FPFastMathModeShift_FPFastMathModeFastShift: root::spv::FPFastMathModeShift = 4; + pub const FPFastMathModeShift_FPFastMathModeMax: root::spv::FPFastMathModeShift = + 2147483647; + pub type FPFastMathModeShift = u32; + impl FPFastMathModeMask { + pub const FPFastMathModeMaskNone: root::spv::FPFastMathModeMask = + root::spv::FPFastMathModeMask(0); } - pub const FPFastMathModeMask_FPFastMathModeMaskNone: - root::spv::FPFastMathModeMask = - FPFastMathModeMask(0); - pub const FPFastMathModeMask_FPFastMathModeNotNaNMask: - root::spv::FPFastMathModeMask = - FPFastMathModeMask(1); - pub const FPFastMathModeMask_FPFastMathModeNotInfMask: - root::spv::FPFastMathModeMask = - FPFastMathModeMask(2); - pub const FPFastMathModeMask_FPFastMathModeNSZMask: - root::spv::FPFastMathModeMask = - FPFastMathModeMask(4); - pub const FPFastMathModeMask_FPFastMathModeAllowRecipMask: - root::spv::FPFastMathModeMask = - FPFastMathModeMask(8); - pub const FPFastMathModeMask_FPFastMathModeFastMask: - root::spv::FPFastMathModeMask = - FPFastMathModeMask(16); - impl ::std::ops::BitOr for - root::spv::FPFastMathModeMask { - type - Output - = - Self; + impl FPFastMathModeMask { + pub const FPFastMathModeNotNaNMask: root::spv::FPFastMathModeMask = + root::spv::FPFastMathModeMask(1); + } + impl FPFastMathModeMask { + pub const FPFastMathModeNotInfMask: root::spv::FPFastMathModeMask = + root::spv::FPFastMathModeMask(2); + } + impl FPFastMathModeMask { + pub const FPFastMathModeNSZMask: root::spv::FPFastMathModeMask = + root::spv::FPFastMathModeMask(4); + } + impl FPFastMathModeMask { + pub const FPFastMathModeAllowRecipMask: root::spv::FPFastMathModeMask = + root::spv::FPFastMathModeMask(8); + } + impl FPFastMathModeMask { + pub const FPFastMathModeFastMask: root::spv::FPFastMathModeMask = + root::spv::FPFastMathModeMask(16); + } + impl ::std::ops::BitOr for root::spv::FPFastMathModeMask { + type Output = Self; #[inline] fn bitor(self, other: Self) -> Self { FPFastMathModeMask(self.0 | other.0) @@ -396,12 +454,8 @@ pub mod root { self.0 |= rhs.0; } } - impl ::std::ops::BitAnd for - root::spv::FPFastMathModeMask { - type - Output - = - Self; + impl ::std::ops::BitAnd for root::spv::FPFastMathModeMask { + type Output = Self; #[inline] fn bitand(self, other: Self) -> Self { FPFastMathModeMask(self.0 & other.0) @@ -413,45 +467,62 @@ pub mod root { self.0 &= rhs.0; } } - #[repr(C)] + #[repr(transparent)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub struct FPFastMathModeMask(pub ::std::os::raw::c_uint); - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum FPRoundingMode { - FPRoundingModeRTE = 0, - FPRoundingModeRTZ = 1, - FPRoundingModeRTP = 2, - FPRoundingModeRTN = 3, - FPRoundingModeMax = 2147483647, + pub struct FPFastMathModeMask(pub u32); + pub const FPRoundingMode_FPRoundingModeRTE: root::spv::FPRoundingMode = 0; + pub const FPRoundingMode_FPRoundingModeRTZ: root::spv::FPRoundingMode = 1; + pub const FPRoundingMode_FPRoundingModeRTP: root::spv::FPRoundingMode = 2; + pub const FPRoundingMode_FPRoundingModeRTN: root::spv::FPRoundingMode = 3; + pub const FPRoundingMode_FPRoundingModeMax: root::spv::FPRoundingMode = 2147483647; + pub type FPRoundingMode = u32; + pub const LinkageType_LinkageTypeExport: root::spv::LinkageType = 0; + pub const LinkageType_LinkageTypeImport: root::spv::LinkageType = 1; + pub const LinkageType_LinkageTypeMax: root::spv::LinkageType = 2147483647; + pub type LinkageType = u32; + pub const AccessQualifier_AccessQualifierReadOnly: root::spv::AccessQualifier = 0; + pub const AccessQualifier_AccessQualifierWriteOnly: root::spv::AccessQualifier = 1; + pub const AccessQualifier_AccessQualifierReadWrite: root::spv::AccessQualifier = 2; + pub const AccessQualifier_AccessQualifierMax: root::spv::AccessQualifier = 2147483647; + pub type AccessQualifier = u32; + pub const FunctionParameterAttribute_FunctionParameterAttributeZext: + root::spv::FunctionParameterAttribute = 0; + pub const FunctionParameterAttribute_FunctionParameterAttributeSext: + root::spv::FunctionParameterAttribute = 1; + pub const FunctionParameterAttribute_FunctionParameterAttributeByVal: + root::spv::FunctionParameterAttribute = 2; + pub const FunctionParameterAttribute_FunctionParameterAttributeSret: + root::spv::FunctionParameterAttribute = 3; + pub const FunctionParameterAttribute_FunctionParameterAttributeNoAlias: + root::spv::FunctionParameterAttribute = 4; + pub const FunctionParameterAttribute_FunctionParameterAttributeNoCapture: + root::spv::FunctionParameterAttribute = 5; + pub const FunctionParameterAttribute_FunctionParameterAttributeNoWrite: + root::spv::FunctionParameterAttribute = 6; + pub const FunctionParameterAttribute_FunctionParameterAttributeNoReadWrite: + root::spv::FunctionParameterAttribute = 7; + pub const FunctionParameterAttribute_FunctionParameterAttributeMax: + root::spv::FunctionParameterAttribute = 2147483647; + pub type FunctionParameterAttribute = u32; + impl root::spv::Decoration { + pub const DecorationNonUniformEXT: root::spv::Decoration = + Decoration::DecorationNonUniform; } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum LinkageType { - LinkageTypeExport = 0, - LinkageTypeImport = 1, - LinkageTypeMax = 2147483647, + impl root::spv::Decoration { + pub const DecorationRestrictPointerEXT: root::spv::Decoration = + Decoration::DecorationRestrictPointer; } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum AccessQualifier { - AccessQualifierReadOnly = 0, - AccessQualifierWriteOnly = 1, - AccessQualifierReadWrite = 2, - AccessQualifierMax = 2147483647, + impl root::spv::Decoration { + pub const DecorationAliasedPointerEXT: root::spv::Decoration = + Decoration::DecorationAliasedPointer; } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum FunctionParameterAttribute { - FunctionParameterAttributeZext = 0, - FunctionParameterAttributeSext = 1, - FunctionParameterAttributeByVal = 2, - FunctionParameterAttributeSret = 3, - FunctionParameterAttributeNoAlias = 4, - FunctionParameterAttributeNoCapture = 5, - FunctionParameterAttributeNoWrite = 6, - FunctionParameterAttributeNoReadWrite = 7, - FunctionParameterAttributeMax = 2147483647, + impl root::spv::Decoration { + pub const DecorationHlslCounterBufferGOOGLE: root::spv::Decoration = + Decoration::DecorationCounterBuffer; + } + impl root::spv::Decoration { + pub const DecorationUserSemantic: root::spv::Decoration = + Decoration::DecorationHlslSemanticGOOGLE; } #[repr(u32)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] @@ -482,6 +553,7 @@ pub mod root { DecorationNonWritable = 24, DecorationNonReadable = 25, DecorationUniform = 26, + DecorationUniformId = 27, DecorationSaturatedConversion = 28, DecorationStream = 29, DecorationLocation = 30, @@ -513,27 +585,36 @@ pub mod root { DecorationPerViewNV = 5272, DecorationPerTaskNV = 5273, DecorationPerVertexNV = 5285, - DecorationNonUniformEXT = 5300, - DecorationRestrictPointerEXT = 5355, - DecorationAliasedPointerEXT = 5356, - DecorationHlslCounterBufferGOOGLE = 5634, + DecorationNonUniform = 5300, + DecorationRestrictPointer = 5355, + DecorationAliasedPointer = 5356, + DecorationCounterBuffer = 5634, DecorationHlslSemanticGOOGLE = 5635, + DecorationUserTypeGOOGLE = 5636, DecorationMax = 2147483647, } - pub const BuiltIn_BuiltInSubgroupEqMaskKHR: root::spv::BuiltIn = - BuiltIn::BuiltInSubgroupEqMask; - pub const BuiltIn_BuiltInSubgroupGeMaskKHR: root::spv::BuiltIn = - BuiltIn::BuiltInSubgroupGeMask; - pub const BuiltIn_BuiltInSubgroupGtMaskKHR: root::spv::BuiltIn = - BuiltIn::BuiltInSubgroupGtMask; - pub const BuiltIn_BuiltInSubgroupLeMaskKHR: root::spv::BuiltIn = - BuiltIn::BuiltInSubgroupLeMask; - pub const BuiltIn_BuiltInSubgroupLtMaskKHR: root::spv::BuiltIn = - BuiltIn::BuiltInSubgroupLtMask; - pub const BuiltIn_BuiltInFragmentSizeNV: root::spv::BuiltIn = - BuiltIn::BuiltInFragSizeEXT; - pub const BuiltIn_BuiltInInvocationsPerPixelNV: root::spv::BuiltIn = - BuiltIn::BuiltInFragInvocationCountEXT; + impl root::spv::BuiltIn { + pub const BuiltInSubgroupEqMaskKHR: root::spv::BuiltIn = BuiltIn::BuiltInSubgroupEqMask; + } + impl root::spv::BuiltIn { + pub const BuiltInSubgroupGeMaskKHR: root::spv::BuiltIn = BuiltIn::BuiltInSubgroupGeMask; + } + impl root::spv::BuiltIn { + pub const BuiltInSubgroupGtMaskKHR: root::spv::BuiltIn = BuiltIn::BuiltInSubgroupGtMask; + } + impl root::spv::BuiltIn { + pub const BuiltInSubgroupLeMaskKHR: root::spv::BuiltIn = BuiltIn::BuiltInSubgroupLeMask; + } + impl root::spv::BuiltIn { + pub const BuiltInSubgroupLtMaskKHR: root::spv::BuiltIn = BuiltIn::BuiltInSubgroupLtMask; + } + impl root::spv::BuiltIn { + pub const BuiltInFragmentSizeNV: root::spv::BuiltIn = BuiltIn::BuiltInFragSizeEXT; + } + impl root::spv::BuiltIn { + pub const BuiltInInvocationsPerPixelNV: root::spv::BuiltIn = + BuiltIn::BuiltInFragInvocationCountEXT; + } #[repr(u32)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub enum BuiltIn { @@ -628,30 +709,33 @@ pub mod root { BuiltInHitTNV = 5332, BuiltInHitKindNV = 5333, BuiltInIncomingRayFlagsNV = 5351, + BuiltInWarpsPerSMNV = 5374, + BuiltInSMCountNV = 5375, + BuiltInWarpIDNV = 5376, + BuiltInSMIDNV = 5377, BuiltInMax = 2147483647, } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum SelectionControlShift { - SelectionControlFlattenShift = 0, - SelectionControlDontFlattenShift = 1, - SelectionControlMax = 2147483647, + pub const SelectionControlShift_SelectionControlFlattenShift: + root::spv::SelectionControlShift = 0; + pub const SelectionControlShift_SelectionControlDontFlattenShift: + root::spv::SelectionControlShift = 1; + pub const SelectionControlShift_SelectionControlMax: root::spv::SelectionControlShift = + 2147483647; + pub type SelectionControlShift = u32; + impl SelectionControlMask { + pub const SelectionControlMaskNone: root::spv::SelectionControlMask = + root::spv::SelectionControlMask(0); } - pub const SelectionControlMask_SelectionControlMaskNone: - root::spv::SelectionControlMask = - SelectionControlMask(0); - pub const SelectionControlMask_SelectionControlFlattenMask: - root::spv::SelectionControlMask = - SelectionControlMask(1); - pub const SelectionControlMask_SelectionControlDontFlattenMask: - root::spv::SelectionControlMask = - SelectionControlMask(2); - impl ::std::ops::BitOr for - root::spv::SelectionControlMask { - type - Output - = - Self; + impl SelectionControlMask { + pub const SelectionControlFlattenMask: root::spv::SelectionControlMask = + root::spv::SelectionControlMask(1); + } + impl SelectionControlMask { + pub const SelectionControlDontFlattenMask: root::spv::SelectionControlMask = + root::spv::SelectionControlMask(2); + } + impl ::std::ops::BitOr for root::spv::SelectionControlMask { + type Output = Self; #[inline] fn bitor(self, other: Self) -> Self { SelectionControlMask(self.0 | other.0) @@ -663,12 +747,8 @@ pub mod root { self.0 |= rhs.0; } } - impl ::std::ops::BitAnd for - root::spv::SelectionControlMask { - type - Output - = - Self; + impl ::std::ops::BitAnd for root::spv::SelectionControlMask { + type Output = Self; #[inline] fn bitand(self, other: Self) -> Self { SelectionControlMask(self.0 & other.0) @@ -676,44 +756,69 @@ pub mod root { } impl ::std::ops::BitAndAssign for root::spv::SelectionControlMask { #[inline] - fn bitand_assign(&mut self, - rhs: root::spv::SelectionControlMask) { + fn bitand_assign(&mut self, rhs: root::spv::SelectionControlMask) { self.0 &= rhs.0; } } - #[repr(C)] + #[repr(transparent)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub struct SelectionControlMask(pub ::std::os::raw::c_uint); - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum LoopControlShift { - LoopControlUnrollShift = 0, - LoopControlDontUnrollShift = 1, - LoopControlDependencyInfiniteShift = 2, - LoopControlDependencyLengthShift = 3, - LoopControlMax = 2147483647, + pub struct SelectionControlMask(pub u32); + pub const LoopControlShift_LoopControlUnrollShift: root::spv::LoopControlShift = 0; + pub const LoopControlShift_LoopControlDontUnrollShift: root::spv::LoopControlShift = 1; + pub const LoopControlShift_LoopControlDependencyInfiniteShift: root::spv::LoopControlShift = + 2; + pub const LoopControlShift_LoopControlDependencyLengthShift: root::spv::LoopControlShift = + 3; + pub const LoopControlShift_LoopControlMinIterationsShift: root::spv::LoopControlShift = 4; + pub const LoopControlShift_LoopControlMaxIterationsShift: root::spv::LoopControlShift = 5; + pub const LoopControlShift_LoopControlIterationMultipleShift: root::spv::LoopControlShift = + 6; + pub const LoopControlShift_LoopControlPeelCountShift: root::spv::LoopControlShift = 7; + pub const LoopControlShift_LoopControlPartialCountShift: root::spv::LoopControlShift = 8; + pub const LoopControlShift_LoopControlMax: root::spv::LoopControlShift = 2147483647; + pub type LoopControlShift = u32; + impl LoopControlMask { + pub const LoopControlMaskNone: root::spv::LoopControlMask = + root::spv::LoopControlMask(0); } - pub const LoopControlMask_LoopControlMaskNone: - root::spv::LoopControlMask = - LoopControlMask(0); - pub const LoopControlMask_LoopControlUnrollMask: - root::spv::LoopControlMask = - LoopControlMask(1); - pub const LoopControlMask_LoopControlDontUnrollMask: - root::spv::LoopControlMask = - LoopControlMask(2); - pub const LoopControlMask_LoopControlDependencyInfiniteMask: - root::spv::LoopControlMask = - LoopControlMask(4); - pub const LoopControlMask_LoopControlDependencyLengthMask: - root::spv::LoopControlMask = - LoopControlMask(8); - impl ::std::ops::BitOr for - root::spv::LoopControlMask { - type - Output - = - Self; + impl LoopControlMask { + pub const LoopControlUnrollMask: root::spv::LoopControlMask = + root::spv::LoopControlMask(1); + } + impl LoopControlMask { + pub const LoopControlDontUnrollMask: root::spv::LoopControlMask = + root::spv::LoopControlMask(2); + } + impl LoopControlMask { + pub const LoopControlDependencyInfiniteMask: root::spv::LoopControlMask = + root::spv::LoopControlMask(4); + } + impl LoopControlMask { + pub const LoopControlDependencyLengthMask: root::spv::LoopControlMask = + root::spv::LoopControlMask(8); + } + impl LoopControlMask { + pub const LoopControlMinIterationsMask: root::spv::LoopControlMask = + root::spv::LoopControlMask(16); + } + impl LoopControlMask { + pub const LoopControlMaxIterationsMask: root::spv::LoopControlMask = + root::spv::LoopControlMask(32); + } + impl LoopControlMask { + pub const LoopControlIterationMultipleMask: root::spv::LoopControlMask = + root::spv::LoopControlMask(64); + } + impl LoopControlMask { + pub const LoopControlPeelCountMask: root::spv::LoopControlMask = + root::spv::LoopControlMask(128); + } + impl LoopControlMask { + pub const LoopControlPartialCountMask: root::spv::LoopControlMask = + root::spv::LoopControlMask(256); + } + impl ::std::ops::BitOr for root::spv::LoopControlMask { + type Output = Self; #[inline] fn bitor(self, other: Self) -> Self { LoopControlMask(self.0 | other.0) @@ -725,12 +830,8 @@ pub mod root { self.0 |= rhs.0; } } - impl ::std::ops::BitAnd for - root::spv::LoopControlMask { - type - Output - = - Self; + impl ::std::ops::BitAnd for root::spv::LoopControlMask { + type Output = Self; #[inline] fn bitand(self, other: Self) -> Self { LoopControlMask(self.0 & other.0) @@ -742,39 +843,42 @@ pub mod root { self.0 &= rhs.0; } } - #[repr(C)] + #[repr(transparent)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub struct LoopControlMask(pub ::std::os::raw::c_uint); - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum FunctionControlShift { - FunctionControlInlineShift = 0, - FunctionControlDontInlineShift = 1, - FunctionControlPureShift = 2, - FunctionControlConstShift = 3, - FunctionControlMax = 2147483647, + pub struct LoopControlMask(pub u32); + pub const FunctionControlShift_FunctionControlInlineShift: root::spv::FunctionControlShift = + 0; + pub const FunctionControlShift_FunctionControlDontInlineShift: + root::spv::FunctionControlShift = 1; + pub const FunctionControlShift_FunctionControlPureShift: root::spv::FunctionControlShift = + 2; + pub const FunctionControlShift_FunctionControlConstShift: root::spv::FunctionControlShift = + 3; + pub const FunctionControlShift_FunctionControlMax: root::spv::FunctionControlShift = + 2147483647; + pub type FunctionControlShift = u32; + impl FunctionControlMask { + pub const FunctionControlMaskNone: root::spv::FunctionControlMask = + root::spv::FunctionControlMask(0); } - pub const FunctionControlMask_FunctionControlMaskNone: - root::spv::FunctionControlMask = - FunctionControlMask(0); - pub const FunctionControlMask_FunctionControlInlineMask: - root::spv::FunctionControlMask = - FunctionControlMask(1); - pub const FunctionControlMask_FunctionControlDontInlineMask: - root::spv::FunctionControlMask = - FunctionControlMask(2); - pub const FunctionControlMask_FunctionControlPureMask: - root::spv::FunctionControlMask = - FunctionControlMask(4); - pub const FunctionControlMask_FunctionControlConstMask: - root::spv::FunctionControlMask = - FunctionControlMask(8); - impl ::std::ops::BitOr for - root::spv::FunctionControlMask { - type - Output - = - Self; + impl FunctionControlMask { + pub const FunctionControlInlineMask: root::spv::FunctionControlMask = + root::spv::FunctionControlMask(1); + } + impl FunctionControlMask { + pub const FunctionControlDontInlineMask: root::spv::FunctionControlMask = + root::spv::FunctionControlMask(2); + } + impl FunctionControlMask { + pub const FunctionControlPureMask: root::spv::FunctionControlMask = + root::spv::FunctionControlMask(4); + } + impl FunctionControlMask { + pub const FunctionControlConstMask: root::spv::FunctionControlMask = + root::spv::FunctionControlMask(8); + } + impl ::std::ops::BitOr for root::spv::FunctionControlMask { + type Output = Self; #[inline] fn bitor(self, other: Self) -> Self { FunctionControlMask(self.0 | other.0) @@ -786,12 +890,8 @@ pub mod root { self.0 |= rhs.0; } } - impl ::std::ops::BitAnd for - root::spv::FunctionControlMask { - type - Output - = - Self; + impl ::std::ops::BitAnd for root::spv::FunctionControlMask { + type Output = Self; #[inline] fn bitand(self, other: Self) -> Self { FunctionControlMask(self.0 & other.0) @@ -803,75 +903,120 @@ pub mod root { self.0 &= rhs.0; } } - #[repr(C)] + #[repr(transparent)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub struct FunctionControlMask(pub ::std::os::raw::c_uint); - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum MemorySemanticsShift { - MemorySemanticsAcquireShift = 1, - MemorySemanticsReleaseShift = 2, - MemorySemanticsAcquireReleaseShift = 3, - MemorySemanticsSequentiallyConsistentShift = 4, - MemorySemanticsUniformMemoryShift = 6, - MemorySemanticsSubgroupMemoryShift = 7, - MemorySemanticsWorkgroupMemoryShift = 8, - MemorySemanticsCrossWorkgroupMemoryShift = 9, - MemorySemanticsAtomicCounterMemoryShift = 10, - MemorySemanticsImageMemoryShift = 11, - MemorySemanticsOutputMemoryKHRShift = 12, - MemorySemanticsMakeAvailableKHRShift = 13, - MemorySemanticsMakeVisibleKHRShift = 14, - MemorySemanticsMax = 2147483647, + pub struct FunctionControlMask(pub u32); + pub const MemorySemanticsShift_MemorySemanticsAcquireShift: + root::spv::MemorySemanticsShift = 1; + pub const MemorySemanticsShift_MemorySemanticsReleaseShift: + root::spv::MemorySemanticsShift = 2; + pub const MemorySemanticsShift_MemorySemanticsAcquireReleaseShift: + root::spv::MemorySemanticsShift = 3; + pub const MemorySemanticsShift_MemorySemanticsSequentiallyConsistentShift: + root::spv::MemorySemanticsShift = 4; + pub const MemorySemanticsShift_MemorySemanticsUniformMemoryShift: + root::spv::MemorySemanticsShift = 6; + pub const MemorySemanticsShift_MemorySemanticsSubgroupMemoryShift: + root::spv::MemorySemanticsShift = 7; + pub const MemorySemanticsShift_MemorySemanticsWorkgroupMemoryShift: + root::spv::MemorySemanticsShift = 8; + pub const MemorySemanticsShift_MemorySemanticsCrossWorkgroupMemoryShift: + root::spv::MemorySemanticsShift = 9; + pub const MemorySemanticsShift_MemorySemanticsAtomicCounterMemoryShift: + root::spv::MemorySemanticsShift = 10; + pub const MemorySemanticsShift_MemorySemanticsImageMemoryShift: + root::spv::MemorySemanticsShift = 11; + pub const MemorySemanticsShift_MemorySemanticsOutputMemoryShift: + root::spv::MemorySemanticsShift = 12; + pub const MemorySemanticsShift_MemorySemanticsOutputMemoryKHRShift: + root::spv::MemorySemanticsShift = 12; + pub const MemorySemanticsShift_MemorySemanticsMakeAvailableShift: + root::spv::MemorySemanticsShift = 13; + pub const MemorySemanticsShift_MemorySemanticsMakeAvailableKHRShift: + root::spv::MemorySemanticsShift = 13; + pub const MemorySemanticsShift_MemorySemanticsMakeVisibleShift: + root::spv::MemorySemanticsShift = 14; + pub const MemorySemanticsShift_MemorySemanticsMakeVisibleKHRShift: + root::spv::MemorySemanticsShift = 14; + pub const MemorySemanticsShift_MemorySemanticsVolatileShift: + root::spv::MemorySemanticsShift = 15; + pub const MemorySemanticsShift_MemorySemanticsMax: root::spv::MemorySemanticsShift = + 2147483647; + pub type MemorySemanticsShift = u32; + impl MemorySemanticsMask { + pub const MemorySemanticsMaskNone: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(0); } - pub const MemorySemanticsMask_MemorySemanticsMaskNone: - root::spv::MemorySemanticsMask = - MemorySemanticsMask(0); - pub const MemorySemanticsMask_MemorySemanticsAcquireMask: - root::spv::MemorySemanticsMask = - MemorySemanticsMask(2); - pub const MemorySemanticsMask_MemorySemanticsReleaseMask: - root::spv::MemorySemanticsMask = - MemorySemanticsMask(4); - pub const MemorySemanticsMask_MemorySemanticsAcquireReleaseMask: - root::spv::MemorySemanticsMask = - MemorySemanticsMask(8); - pub const MemorySemanticsMask_MemorySemanticsSequentiallyConsistentMask: - root::spv::MemorySemanticsMask = - MemorySemanticsMask(16); - pub const MemorySemanticsMask_MemorySemanticsUniformMemoryMask: - root::spv::MemorySemanticsMask = - MemorySemanticsMask(64); - pub const MemorySemanticsMask_MemorySemanticsSubgroupMemoryMask: - root::spv::MemorySemanticsMask = - MemorySemanticsMask(128); - pub const MemorySemanticsMask_MemorySemanticsWorkgroupMemoryMask: - root::spv::MemorySemanticsMask = - MemorySemanticsMask(256); - pub const MemorySemanticsMask_MemorySemanticsCrossWorkgroupMemoryMask: - root::spv::MemorySemanticsMask = - MemorySemanticsMask(512); - pub const MemorySemanticsMask_MemorySemanticsAtomicCounterMemoryMask: - root::spv::MemorySemanticsMask = - MemorySemanticsMask(1024); - pub const MemorySemanticsMask_MemorySemanticsImageMemoryMask: - root::spv::MemorySemanticsMask = - MemorySemanticsMask(2048); - pub const MemorySemanticsMask_MemorySemanticsOutputMemoryKHRMask: - root::spv::MemorySemanticsMask = - MemorySemanticsMask(4096); - pub const MemorySemanticsMask_MemorySemanticsMakeAvailableKHRMask: - root::spv::MemorySemanticsMask = - MemorySemanticsMask(8192); - pub const MemorySemanticsMask_MemorySemanticsMakeVisibleKHRMask: - root::spv::MemorySemanticsMask = - MemorySemanticsMask(16384); - impl ::std::ops::BitOr for - root::spv::MemorySemanticsMask { - type - Output - = - Self; + impl MemorySemanticsMask { + pub const MemorySemanticsAcquireMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(2); + } + impl MemorySemanticsMask { + pub const MemorySemanticsReleaseMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(4); + } + impl MemorySemanticsMask { + pub const MemorySemanticsAcquireReleaseMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(8); + } + impl MemorySemanticsMask { + pub const MemorySemanticsSequentiallyConsistentMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(16); + } + impl MemorySemanticsMask { + pub const MemorySemanticsUniformMemoryMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(64); + } + impl MemorySemanticsMask { + pub const MemorySemanticsSubgroupMemoryMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(128); + } + impl MemorySemanticsMask { + pub const MemorySemanticsWorkgroupMemoryMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(256); + } + impl MemorySemanticsMask { + pub const MemorySemanticsCrossWorkgroupMemoryMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(512); + } + impl MemorySemanticsMask { + pub const MemorySemanticsAtomicCounterMemoryMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(1024); + } + impl MemorySemanticsMask { + pub const MemorySemanticsImageMemoryMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(2048); + } + impl MemorySemanticsMask { + pub const MemorySemanticsOutputMemoryMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(4096); + } + impl MemorySemanticsMask { + pub const MemorySemanticsOutputMemoryKHRMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(4096); + } + impl MemorySemanticsMask { + pub const MemorySemanticsMakeAvailableMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(8192); + } + impl MemorySemanticsMask { + pub const MemorySemanticsMakeAvailableKHRMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(8192); + } + impl MemorySemanticsMask { + pub const MemorySemanticsMakeVisibleMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(16384); + } + impl MemorySemanticsMask { + pub const MemorySemanticsMakeVisibleKHRMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(16384); + } + impl MemorySemanticsMask { + pub const MemorySemanticsVolatileMask: root::spv::MemorySemanticsMask = + root::spv::MemorySemanticsMask(32768); + } + impl ::std::ops::BitOr for root::spv::MemorySemanticsMask { + type Output = Self; #[inline] fn bitor(self, other: Self) -> Self { MemorySemanticsMask(self.0 | other.0) @@ -883,12 +1028,8 @@ pub mod root { self.0 |= rhs.0; } } - impl ::std::ops::BitAnd for - root::spv::MemorySemanticsMask { - type - Output - = - Self; + impl ::std::ops::BitAnd for root::spv::MemorySemanticsMask { + type Output = Self; #[inline] fn bitand(self, other: Self) -> Self { MemorySemanticsMask(self.0 & other.0) @@ -900,47 +1041,68 @@ pub mod root { self.0 &= rhs.0; } } - #[repr(C)] + #[repr(transparent)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub struct MemorySemanticsMask(pub ::std::os::raw::c_uint); - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum MemoryAccessShift { - MemoryAccessVolatileShift = 0, - MemoryAccessAlignedShift = 1, - MemoryAccessNontemporalShift = 2, - MemoryAccessMakePointerAvailableKHRShift = 3, - MemoryAccessMakePointerVisibleKHRShift = 4, - MemoryAccessNonPrivatePointerKHRShift = 5, - MemoryAccessMax = 2147483647, + pub struct MemorySemanticsMask(pub u32); + pub const MemoryAccessShift_MemoryAccessVolatileShift: root::spv::MemoryAccessShift = 0; + pub const MemoryAccessShift_MemoryAccessAlignedShift: root::spv::MemoryAccessShift = 1; + pub const MemoryAccessShift_MemoryAccessNontemporalShift: root::spv::MemoryAccessShift = 2; + pub const MemoryAccessShift_MemoryAccessMakePointerAvailableShift: + root::spv::MemoryAccessShift = 3; + pub const MemoryAccessShift_MemoryAccessMakePointerAvailableKHRShift: + root::spv::MemoryAccessShift = 3; + pub const MemoryAccessShift_MemoryAccessMakePointerVisibleShift: + root::spv::MemoryAccessShift = 4; + pub const MemoryAccessShift_MemoryAccessMakePointerVisibleKHRShift: + root::spv::MemoryAccessShift = 4; + pub const MemoryAccessShift_MemoryAccessNonPrivatePointerShift: + root::spv::MemoryAccessShift = 5; + pub const MemoryAccessShift_MemoryAccessNonPrivatePointerKHRShift: + root::spv::MemoryAccessShift = 5; + pub const MemoryAccessShift_MemoryAccessMax: root::spv::MemoryAccessShift = 2147483647; + pub type MemoryAccessShift = u32; + impl MemoryAccessMask { + pub const MemoryAccessMaskNone: root::spv::MemoryAccessMask = + root::spv::MemoryAccessMask(0); } - pub const MemoryAccessMask_MemoryAccessMaskNone: - root::spv::MemoryAccessMask = - MemoryAccessMask(0); - pub const MemoryAccessMask_MemoryAccessVolatileMask: - root::spv::MemoryAccessMask = - MemoryAccessMask(1); - pub const MemoryAccessMask_MemoryAccessAlignedMask: - root::spv::MemoryAccessMask = - MemoryAccessMask(2); - pub const MemoryAccessMask_MemoryAccessNontemporalMask: - root::spv::MemoryAccessMask = - MemoryAccessMask(4); - pub const MemoryAccessMask_MemoryAccessMakePointerAvailableKHRMask: - root::spv::MemoryAccessMask = - MemoryAccessMask(8); - pub const MemoryAccessMask_MemoryAccessMakePointerVisibleKHRMask: - root::spv::MemoryAccessMask = - MemoryAccessMask(16); - pub const MemoryAccessMask_MemoryAccessNonPrivatePointerKHRMask: - root::spv::MemoryAccessMask = - MemoryAccessMask(32); - impl ::std::ops::BitOr for - root::spv::MemoryAccessMask { - type - Output - = - Self; + impl MemoryAccessMask { + pub const MemoryAccessVolatileMask: root::spv::MemoryAccessMask = + root::spv::MemoryAccessMask(1); + } + impl MemoryAccessMask { + pub const MemoryAccessAlignedMask: root::spv::MemoryAccessMask = + root::spv::MemoryAccessMask(2); + } + impl MemoryAccessMask { + pub const MemoryAccessNontemporalMask: root::spv::MemoryAccessMask = + root::spv::MemoryAccessMask(4); + } + impl MemoryAccessMask { + pub const MemoryAccessMakePointerAvailableMask: root::spv::MemoryAccessMask = + root::spv::MemoryAccessMask(8); + } + impl MemoryAccessMask { + pub const MemoryAccessMakePointerAvailableKHRMask: root::spv::MemoryAccessMask = + root::spv::MemoryAccessMask(8); + } + impl MemoryAccessMask { + pub const MemoryAccessMakePointerVisibleMask: root::spv::MemoryAccessMask = + root::spv::MemoryAccessMask(16); + } + impl MemoryAccessMask { + pub const MemoryAccessMakePointerVisibleKHRMask: root::spv::MemoryAccessMask = + root::spv::MemoryAccessMask(16); + } + impl MemoryAccessMask { + pub const MemoryAccessNonPrivatePointerMask: root::spv::MemoryAccessMask = + root::spv::MemoryAccessMask(32); + } + impl MemoryAccessMask { + pub const MemoryAccessNonPrivatePointerKHRMask: root::spv::MemoryAccessMask = + root::spv::MemoryAccessMask(32); + } + impl ::std::ops::BitOr for root::spv::MemoryAccessMask { + type Output = Self; #[inline] fn bitor(self, other: Self) -> Self { MemoryAccessMask(self.0 | other.0) @@ -952,12 +1114,8 @@ pub mod root { self.0 |= rhs.0; } } - impl ::std::ops::BitAnd for - root::spv::MemoryAccessMask { - type - Output - = - Self; + impl ::std::ops::BitAnd for root::spv::MemoryAccessMask { + type Output = Self; #[inline] fn bitand(self, other: Self) -> Self { MemoryAccessMask(self.0 & other.0) @@ -969,50 +1127,47 @@ pub mod root { self.0 &= rhs.0; } } - #[repr(C)] + #[repr(transparent)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub struct MemoryAccessMask(pub ::std::os::raw::c_uint); - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum Scope { - ScopeCrossDevice = 0, - ScopeDevice = 1, - ScopeWorkgroup = 2, - ScopeSubgroup = 3, - ScopeInvocation = 4, - ScopeQueueFamilyKHR = 5, - ScopeMax = 2147483647, + pub struct MemoryAccessMask(pub u32); + pub const Scope_ScopeCrossDevice: root::spv::Scope = 0; + pub const Scope_ScopeDevice: root::spv::Scope = 1; + pub const Scope_ScopeWorkgroup: root::spv::Scope = 2; + pub const Scope_ScopeSubgroup: root::spv::Scope = 3; + pub const Scope_ScopeInvocation: root::spv::Scope = 4; + pub const Scope_ScopeQueueFamily: root::spv::Scope = 5; + pub const Scope_ScopeQueueFamilyKHR: root::spv::Scope = 5; + pub const Scope_ScopeMax: root::spv::Scope = 2147483647; + pub type Scope = u32; + pub const GroupOperation_GroupOperationReduce: root::spv::GroupOperation = 0; + pub const GroupOperation_GroupOperationInclusiveScan: root::spv::GroupOperation = 1; + pub const GroupOperation_GroupOperationExclusiveScan: root::spv::GroupOperation = 2; + pub const GroupOperation_GroupOperationClusteredReduce: root::spv::GroupOperation = 3; + pub const GroupOperation_GroupOperationPartitionedReduceNV: root::spv::GroupOperation = 6; + pub const GroupOperation_GroupOperationPartitionedInclusiveScanNV: + root::spv::GroupOperation = 7; + pub const GroupOperation_GroupOperationPartitionedExclusiveScanNV: + root::spv::GroupOperation = 8; + pub const GroupOperation_GroupOperationMax: root::spv::GroupOperation = 2147483647; + pub type GroupOperation = u32; + impl KernelEnqueueFlags { + pub const KernelEnqueueFlagsNoWait: root::spv::KernelEnqueueFlags = + root::spv::KernelEnqueueFlags(0); } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum GroupOperation { - GroupOperationReduce = 0, - GroupOperationInclusiveScan = 1, - GroupOperationExclusiveScan = 2, - GroupOperationClusteredReduce = 3, - GroupOperationPartitionedReduceNV = 6, - GroupOperationPartitionedInclusiveScanNV = 7, - GroupOperationPartitionedExclusiveScanNV = 8, - GroupOperationMax = 2147483647, + impl KernelEnqueueFlags { + pub const KernelEnqueueFlagsWaitKernel: root::spv::KernelEnqueueFlags = + root::spv::KernelEnqueueFlags(1); } - pub const KernelEnqueueFlags_KernelEnqueueFlagsNoWait: - root::spv::KernelEnqueueFlags = - KernelEnqueueFlags(0); - pub const KernelEnqueueFlags_KernelEnqueueFlagsWaitKernel: - root::spv::KernelEnqueueFlags = - KernelEnqueueFlags(1); - pub const KernelEnqueueFlags_KernelEnqueueFlagsWaitWorkGroup: - root::spv::KernelEnqueueFlags = - KernelEnqueueFlags(2); - pub const KernelEnqueueFlags_KernelEnqueueFlagsMax: - root::spv::KernelEnqueueFlags = - KernelEnqueueFlags(2147483647); - impl ::std::ops::BitOr for - root::spv::KernelEnqueueFlags { - type - Output - = - Self; + impl KernelEnqueueFlags { + pub const KernelEnqueueFlagsWaitWorkGroup: root::spv::KernelEnqueueFlags = + root::spv::KernelEnqueueFlags(2); + } + impl KernelEnqueueFlags { + pub const KernelEnqueueFlagsMax: root::spv::KernelEnqueueFlags = + root::spv::KernelEnqueueFlags(2147483647); + } + impl ::std::ops::BitOr for root::spv::KernelEnqueueFlags { + type Output = Self; #[inline] fn bitor(self, other: Self) -> Self { KernelEnqueueFlags(self.0 | other.0) @@ -1024,12 +1179,8 @@ pub mod root { self.0 |= rhs.0; } } - impl ::std::ops::BitAnd for - root::spv::KernelEnqueueFlags { - type - Output - = - Self; + impl ::std::ops::BitAnd for root::spv::KernelEnqueueFlags { + type Output = Self; #[inline] fn bitand(self, other: Self) -> Self { KernelEnqueueFlags(self.0 & other.0) @@ -1041,27 +1192,24 @@ pub mod root { self.0 &= rhs.0; } } - #[repr(C)] + #[repr(transparent)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub struct KernelEnqueueFlags(pub ::std::os::raw::c_uint); - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum KernelProfilingInfoShift { - KernelProfilingInfoCmdExecTimeShift = 0, - KernelProfilingInfoMax = 2147483647, + pub struct KernelEnqueueFlags(pub u32); + pub const KernelProfilingInfoShift_KernelProfilingInfoCmdExecTimeShift: + root::spv::KernelProfilingInfoShift = 0; + pub const KernelProfilingInfoShift_KernelProfilingInfoMax: + root::spv::KernelProfilingInfoShift = 2147483647; + pub type KernelProfilingInfoShift = u32; + impl KernelProfilingInfoMask { + pub const KernelProfilingInfoMaskNone: root::spv::KernelProfilingInfoMask = + root::spv::KernelProfilingInfoMask(0); } - pub const KernelProfilingInfoMask_KernelProfilingInfoMaskNone: - root::spv::KernelProfilingInfoMask = - KernelProfilingInfoMask(0); - pub const KernelProfilingInfoMask_KernelProfilingInfoCmdExecTimeMask: - root::spv::KernelProfilingInfoMask = - KernelProfilingInfoMask(1); - impl ::std::ops::BitOr for - root::spv::KernelProfilingInfoMask { - type - Output - = - Self; + impl KernelProfilingInfoMask { + pub const KernelProfilingInfoCmdExecTimeMask: root::spv::KernelProfilingInfoMask = + root::spv::KernelProfilingInfoMask(1); + } + impl ::std::ops::BitOr for root::spv::KernelProfilingInfoMask { + type Output = Self; #[inline] fn bitor(self, other: Self) -> Self { KernelProfilingInfoMask(self.0 | other.0) @@ -1069,17 +1217,12 @@ pub mod root { } impl ::std::ops::BitOrAssign for root::spv::KernelProfilingInfoMask { #[inline] - fn bitor_assign(&mut self, - rhs: root::spv::KernelProfilingInfoMask) { + fn bitor_assign(&mut self, rhs: root::spv::KernelProfilingInfoMask) { self.0 |= rhs.0; } } - impl ::std::ops::BitAnd for - root::spv::KernelProfilingInfoMask { - type - Output - = - Self; + impl ::std::ops::BitAnd for root::spv::KernelProfilingInfoMask { + type Output = Self; #[inline] fn bitand(self, other: Self) -> Self { KernelProfilingInfoMask(self.0 & other.0) @@ -1087,553 +1230,760 @@ pub mod root { } impl ::std::ops::BitAndAssign for root::spv::KernelProfilingInfoMask { #[inline] - fn bitand_assign(&mut self, - rhs: root::spv::KernelProfilingInfoMask) { + fn bitand_assign(&mut self, rhs: root::spv::KernelProfilingInfoMask) { self.0 &= rhs.0; } } - #[repr(C)] + #[repr(transparent)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub struct KernelProfilingInfoMask(pub ::std::os::raw::c_uint); - pub const Capability_CapabilityStorageUniformBufferBlock16: - root::spv::Capability = - Capability::CapabilityStorageBuffer16BitAccess; - pub const Capability_CapabilityUniformAndStorageBuffer16BitAccess: - root::spv::Capability = - Capability::CapabilityStorageUniform16; - pub const Capability_CapabilityShaderViewportIndexLayerNV: - root::spv::Capability = - Capability::CapabilityShaderViewportIndexLayerEXT; - pub const Capability_CapabilityShadingRateNV: root::spv::Capability = - Capability::CapabilityFragmentDensityEXT; - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum Capability { - CapabilityMatrix = 0, - CapabilityShader = 1, - CapabilityGeometry = 2, - CapabilityTessellation = 3, - CapabilityAddresses = 4, - CapabilityLinkage = 5, - CapabilityKernel = 6, - CapabilityVector16 = 7, - CapabilityFloat16Buffer = 8, - CapabilityFloat16 = 9, - CapabilityFloat64 = 10, - CapabilityInt64 = 11, - CapabilityInt64Atomics = 12, - CapabilityImageBasic = 13, - CapabilityImageReadWrite = 14, - CapabilityImageMipmap = 15, - CapabilityPipes = 17, - CapabilityGroups = 18, - CapabilityDeviceEnqueue = 19, - CapabilityLiteralSampler = 20, - CapabilityAtomicStorage = 21, - CapabilityInt16 = 22, - CapabilityTessellationPointSize = 23, - CapabilityGeometryPointSize = 24, - CapabilityImageGatherExtended = 25, - CapabilityStorageImageMultisample = 27, - CapabilityUniformBufferArrayDynamicIndexing = 28, - CapabilitySampledImageArrayDynamicIndexing = 29, - CapabilityStorageBufferArrayDynamicIndexing = 30, - CapabilityStorageImageArrayDynamicIndexing = 31, - CapabilityClipDistance = 32, - CapabilityCullDistance = 33, - CapabilityImageCubeArray = 34, - CapabilitySampleRateShading = 35, - CapabilityImageRect = 36, - CapabilitySampledRect = 37, - CapabilityGenericPointer = 38, - CapabilityInt8 = 39, - CapabilityInputAttachment = 40, - CapabilitySparseResidency = 41, - CapabilityMinLod = 42, - CapabilitySampled1D = 43, - CapabilityImage1D = 44, - CapabilitySampledCubeArray = 45, - CapabilitySampledBuffer = 46, - CapabilityImageBuffer = 47, - CapabilityImageMSArray = 48, - CapabilityStorageImageExtendedFormats = 49, - CapabilityImageQuery = 50, - CapabilityDerivativeControl = 51, - CapabilityInterpolationFunction = 52, - CapabilityTransformFeedback = 53, - CapabilityGeometryStreams = 54, - CapabilityStorageImageReadWithoutFormat = 55, - CapabilityStorageImageWriteWithoutFormat = 56, - CapabilityMultiViewport = 57, - CapabilitySubgroupDispatch = 58, - CapabilityNamedBarrier = 59, - CapabilityPipeStorage = 60, - CapabilityGroupNonUniform = 61, - CapabilityGroupNonUniformVote = 62, - CapabilityGroupNonUniformArithmetic = 63, - CapabilityGroupNonUniformBallot = 64, - CapabilityGroupNonUniformShuffle = 65, - CapabilityGroupNonUniformShuffleRelative = 66, - CapabilityGroupNonUniformClustered = 67, - CapabilityGroupNonUniformQuad = 68, - CapabilitySubgroupBallotKHR = 4423, - CapabilityDrawParameters = 4427, - CapabilitySubgroupVoteKHR = 4431, - CapabilityStorageBuffer16BitAccess = 4433, - CapabilityStorageUniform16 = 4434, - CapabilityStoragePushConstant16 = 4435, - CapabilityStorageInputOutput16 = 4436, - CapabilityDeviceGroup = 4437, - CapabilityMultiView = 4439, - CapabilityVariablePointersStorageBuffer = 4441, - CapabilityVariablePointers = 4442, - CapabilityAtomicStorageOps = 4445, - CapabilitySampleMaskPostDepthCoverage = 4447, - CapabilityStorageBuffer8BitAccess = 4448, - CapabilityUniformAndStorageBuffer8BitAccess = 4449, - CapabilityStoragePushConstant8 = 4450, - CapabilityDenormPreserve = 4464, - CapabilityDenormFlushToZero = 4465, - CapabilitySignedZeroInfNanPreserve = 4466, - CapabilityRoundingModeRTE = 4467, - CapabilityRoundingModeRTZ = 4468, - CapabilityFloat16ImageAMD = 5008, - CapabilityImageGatherBiasLodAMD = 5009, - CapabilityFragmentMaskAMD = 5010, - CapabilityStencilExportEXT = 5013, - CapabilityImageReadWriteLodAMD = 5015, - CapabilitySampleMaskOverrideCoverageNV = 5249, - CapabilityGeometryShaderPassthroughNV = 5251, - CapabilityShaderViewportIndexLayerEXT = 5254, - CapabilityShaderViewportMaskNV = 5255, - CapabilityShaderStereoViewNV = 5259, - CapabilityPerViewAttributesNV = 5260, - CapabilityFragmentFullyCoveredEXT = 5265, - CapabilityMeshShadingNV = 5266, - CapabilityImageFootprintNV = 5282, - CapabilityFragmentBarycentricNV = 5284, - CapabilityComputeDerivativeGroupQuadsNV = 5288, - CapabilityFragmentDensityEXT = 5291, - CapabilityGroupNonUniformPartitionedNV = 5297, - CapabilityShaderNonUniformEXT = 5301, - CapabilityRuntimeDescriptorArrayEXT = 5302, - CapabilityInputAttachmentArrayDynamicIndexingEXT = 5303, - CapabilityUniformTexelBufferArrayDynamicIndexingEXT = 5304, - CapabilityStorageTexelBufferArrayDynamicIndexingEXT = 5305, - CapabilityUniformBufferArrayNonUniformIndexingEXT = 5306, - CapabilitySampledImageArrayNonUniformIndexingEXT = 5307, - CapabilityStorageBufferArrayNonUniformIndexingEXT = 5308, - CapabilityStorageImageArrayNonUniformIndexingEXT = 5309, - CapabilityInputAttachmentArrayNonUniformIndexingEXT = 5310, - CapabilityUniformTexelBufferArrayNonUniformIndexingEXT = 5311, - CapabilityStorageTexelBufferArrayNonUniformIndexingEXT = 5312, - CapabilityRayTracingNV = 5340, - CapabilityVulkanMemoryModelKHR = 5345, - CapabilityVulkanMemoryModelDeviceScopeKHR = 5346, - CapabilityPhysicalStorageBufferAddressesEXT = 5347, - CapabilityComputeDerivativeGroupLinearNV = 5350, - CapabilitySubgroupShuffleINTEL = 5568, - CapabilitySubgroupBufferBlockIOINTEL = 5569, - CapabilitySubgroupImageBlockIOINTEL = 5570, - CapabilitySubgroupImageMediaBlockIOINTEL = 5579, - CapabilityMax = 2147483647, - } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum Op { - OpNop = 0, - OpUndef = 1, - OpSourceContinued = 2, - OpSource = 3, - OpSourceExtension = 4, - OpName = 5, - OpMemberName = 6, - OpString = 7, - OpLine = 8, - OpExtension = 10, - OpExtInstImport = 11, - OpExtInst = 12, - OpMemoryModel = 14, - OpEntryPoint = 15, - OpExecutionMode = 16, - OpCapability = 17, - OpTypeVoid = 19, - OpTypeBool = 20, - OpTypeInt = 21, - OpTypeFloat = 22, - OpTypeVector = 23, - OpTypeMatrix = 24, - OpTypeImage = 25, - OpTypeSampler = 26, - OpTypeSampledImage = 27, - OpTypeArray = 28, - OpTypeRuntimeArray = 29, - OpTypeStruct = 30, - OpTypeOpaque = 31, - OpTypePointer = 32, - OpTypeFunction = 33, - OpTypeEvent = 34, - OpTypeDeviceEvent = 35, - OpTypeReserveId = 36, - OpTypeQueue = 37, - OpTypePipe = 38, - OpTypeForwardPointer = 39, - OpConstantTrue = 41, - OpConstantFalse = 42, - OpConstant = 43, - OpConstantComposite = 44, - OpConstantSampler = 45, - OpConstantNull = 46, - OpSpecConstantTrue = 48, - OpSpecConstantFalse = 49, - OpSpecConstant = 50, - OpSpecConstantComposite = 51, - OpSpecConstantOp = 52, - OpFunction = 54, - OpFunctionParameter = 55, - OpFunctionEnd = 56, - OpFunctionCall = 57, - OpVariable = 59, - OpImageTexelPointer = 60, - OpLoad = 61, - OpStore = 62, - OpCopyMemory = 63, - OpCopyMemorySized = 64, - OpAccessChain = 65, - OpInBoundsAccessChain = 66, - OpPtrAccessChain = 67, - OpArrayLength = 68, - OpGenericPtrMemSemantics = 69, - OpInBoundsPtrAccessChain = 70, - OpDecorate = 71, - OpMemberDecorate = 72, - OpDecorationGroup = 73, - OpGroupDecorate = 74, - OpGroupMemberDecorate = 75, - OpVectorExtractDynamic = 77, - OpVectorInsertDynamic = 78, - OpVectorShuffle = 79, - OpCompositeConstruct = 80, - OpCompositeExtract = 81, - OpCompositeInsert = 82, - OpCopyObject = 83, - OpTranspose = 84, - OpSampledImage = 86, - OpImageSampleImplicitLod = 87, - OpImageSampleExplicitLod = 88, - OpImageSampleDrefImplicitLod = 89, - OpImageSampleDrefExplicitLod = 90, - OpImageSampleProjImplicitLod = 91, - OpImageSampleProjExplicitLod = 92, - OpImageSampleProjDrefImplicitLod = 93, - OpImageSampleProjDrefExplicitLod = 94, - OpImageFetch = 95, - OpImageGather = 96, - OpImageDrefGather = 97, - OpImageRead = 98, - OpImageWrite = 99, - OpImage = 100, - OpImageQueryFormat = 101, - OpImageQueryOrder = 102, - OpImageQuerySizeLod = 103, - OpImageQuerySize = 104, - OpImageQueryLod = 105, - OpImageQueryLevels = 106, - OpImageQuerySamples = 107, - OpConvertFToU = 109, - OpConvertFToS = 110, - OpConvertSToF = 111, - OpConvertUToF = 112, - OpUConvert = 113, - OpSConvert = 114, - OpFConvert = 115, - OpQuantizeToF16 = 116, - OpConvertPtrToU = 117, - OpSatConvertSToU = 118, - OpSatConvertUToS = 119, - OpConvertUToPtr = 120, - OpPtrCastToGeneric = 121, - OpGenericCastToPtr = 122, - OpGenericCastToPtrExplicit = 123, - OpBitcast = 124, - OpSNegate = 126, - OpFNegate = 127, - OpIAdd = 128, - OpFAdd = 129, - OpISub = 130, - OpFSub = 131, - OpIMul = 132, - OpFMul = 133, - OpUDiv = 134, - OpSDiv = 135, - OpFDiv = 136, - OpUMod = 137, - OpSRem = 138, - OpSMod = 139, - OpFRem = 140, - OpFMod = 141, - OpVectorTimesScalar = 142, - OpMatrixTimesScalar = 143, - OpVectorTimesMatrix = 144, - OpMatrixTimesVector = 145, - OpMatrixTimesMatrix = 146, - OpOuterProduct = 147, - OpDot = 148, - OpIAddCarry = 149, - OpISubBorrow = 150, - OpUMulExtended = 151, - OpSMulExtended = 152, - OpAny = 154, - OpAll = 155, - OpIsNan = 156, - OpIsInf = 157, - OpIsFinite = 158, - OpIsNormal = 159, - OpSignBitSet = 160, - OpLessOrGreater = 161, - OpOrdered = 162, - OpUnordered = 163, - OpLogicalEqual = 164, - OpLogicalNotEqual = 165, - OpLogicalOr = 166, - OpLogicalAnd = 167, - OpLogicalNot = 168, - OpSelect = 169, - OpIEqual = 170, - OpINotEqual = 171, - OpUGreaterThan = 172, - OpSGreaterThan = 173, - OpUGreaterThanEqual = 174, - OpSGreaterThanEqual = 175, - OpULessThan = 176, - OpSLessThan = 177, - OpULessThanEqual = 178, - OpSLessThanEqual = 179, - OpFOrdEqual = 180, - OpFUnordEqual = 181, - OpFOrdNotEqual = 182, - OpFUnordNotEqual = 183, - OpFOrdLessThan = 184, - OpFUnordLessThan = 185, - OpFOrdGreaterThan = 186, - OpFUnordGreaterThan = 187, - OpFOrdLessThanEqual = 188, - OpFUnordLessThanEqual = 189, - OpFOrdGreaterThanEqual = 190, - OpFUnordGreaterThanEqual = 191, - OpShiftRightLogical = 194, - OpShiftRightArithmetic = 195, - OpShiftLeftLogical = 196, - OpBitwiseOr = 197, - OpBitwiseXor = 198, - OpBitwiseAnd = 199, - OpNot = 200, - OpBitFieldInsert = 201, - OpBitFieldSExtract = 202, - OpBitFieldUExtract = 203, - OpBitReverse = 204, - OpBitCount = 205, - OpDPdx = 207, - OpDPdy = 208, - OpFwidth = 209, - OpDPdxFine = 210, - OpDPdyFine = 211, - OpFwidthFine = 212, - OpDPdxCoarse = 213, - OpDPdyCoarse = 214, - OpFwidthCoarse = 215, - OpEmitVertex = 218, - OpEndPrimitive = 219, - OpEmitStreamVertex = 220, - OpEndStreamPrimitive = 221, - OpControlBarrier = 224, - OpMemoryBarrier = 225, - OpAtomicLoad = 227, - OpAtomicStore = 228, - OpAtomicExchange = 229, - OpAtomicCompareExchange = 230, - OpAtomicCompareExchangeWeak = 231, - OpAtomicIIncrement = 232, - OpAtomicIDecrement = 233, - OpAtomicIAdd = 234, - OpAtomicISub = 235, - OpAtomicSMin = 236, - OpAtomicUMin = 237, - OpAtomicSMax = 238, - OpAtomicUMax = 239, - OpAtomicAnd = 240, - OpAtomicOr = 241, - OpAtomicXor = 242, - OpPhi = 245, - OpLoopMerge = 246, - OpSelectionMerge = 247, - OpLabel = 248, - OpBranch = 249, - OpBranchConditional = 250, - OpSwitch = 251, - OpKill = 252, - OpReturn = 253, - OpReturnValue = 254, - OpUnreachable = 255, - OpLifetimeStart = 256, - OpLifetimeStop = 257, - OpGroupAsyncCopy = 259, - OpGroupWaitEvents = 260, - OpGroupAll = 261, - OpGroupAny = 262, - OpGroupBroadcast = 263, - OpGroupIAdd = 264, - OpGroupFAdd = 265, - OpGroupFMin = 266, - OpGroupUMin = 267, - OpGroupSMin = 268, - OpGroupFMax = 269, - OpGroupUMax = 270, - OpGroupSMax = 271, - OpReadPipe = 274, - OpWritePipe = 275, - OpReservedReadPipe = 276, - OpReservedWritePipe = 277, - OpReserveReadPipePackets = 278, - OpReserveWritePipePackets = 279, - OpCommitReadPipe = 280, - OpCommitWritePipe = 281, - OpIsValidReserveId = 282, - OpGetNumPipePackets = 283, - OpGetMaxPipePackets = 284, - OpGroupReserveReadPipePackets = 285, - OpGroupReserveWritePipePackets = 286, - OpGroupCommitReadPipe = 287, - OpGroupCommitWritePipe = 288, - OpEnqueueMarker = 291, - OpEnqueueKernel = 292, - OpGetKernelNDrangeSubGroupCount = 293, - OpGetKernelNDrangeMaxSubGroupSize = 294, - OpGetKernelWorkGroupSize = 295, - OpGetKernelPreferredWorkGroupSizeMultiple = 296, - OpRetainEvent = 297, - OpReleaseEvent = 298, - OpCreateUserEvent = 299, - OpIsValidEvent = 300, - OpSetUserEventStatus = 301, - OpCaptureEventProfilingInfo = 302, - OpGetDefaultQueue = 303, - OpBuildNDRange = 304, - OpImageSparseSampleImplicitLod = 305, - OpImageSparseSampleExplicitLod = 306, - OpImageSparseSampleDrefImplicitLod = 307, - OpImageSparseSampleDrefExplicitLod = 308, - OpImageSparseSampleProjImplicitLod = 309, - OpImageSparseSampleProjExplicitLod = 310, - OpImageSparseSampleProjDrefImplicitLod = 311, - OpImageSparseSampleProjDrefExplicitLod = 312, - OpImageSparseFetch = 313, - OpImageSparseGather = 314, - OpImageSparseDrefGather = 315, - OpImageSparseTexelsResident = 316, - OpNoLine = 317, - OpAtomicFlagTestAndSet = 318, - OpAtomicFlagClear = 319, - OpImageSparseRead = 320, - OpSizeOf = 321, - OpTypePipeStorage = 322, - OpConstantPipeStorage = 323, - OpCreatePipeFromPipeStorage = 324, - OpGetKernelLocalSizeForSubgroupCount = 325, - OpGetKernelMaxNumSubgroups = 326, - OpTypeNamedBarrier = 327, - OpNamedBarrierInitialize = 328, - OpMemoryNamedBarrier = 329, - OpModuleProcessed = 330, - OpExecutionModeId = 331, - OpDecorateId = 332, - OpGroupNonUniformElect = 333, - OpGroupNonUniformAll = 334, - OpGroupNonUniformAny = 335, - OpGroupNonUniformAllEqual = 336, - OpGroupNonUniformBroadcast = 337, - OpGroupNonUniformBroadcastFirst = 338, - OpGroupNonUniformBallot = 339, - OpGroupNonUniformInverseBallot = 340, - OpGroupNonUniformBallotBitExtract = 341, - OpGroupNonUniformBallotBitCount = 342, - OpGroupNonUniformBallotFindLSB = 343, - OpGroupNonUniformBallotFindMSB = 344, - OpGroupNonUniformShuffle = 345, - OpGroupNonUniformShuffleXor = 346, - OpGroupNonUniformShuffleUp = 347, - OpGroupNonUniformShuffleDown = 348, - OpGroupNonUniformIAdd = 349, - OpGroupNonUniformFAdd = 350, - OpGroupNonUniformIMul = 351, - OpGroupNonUniformFMul = 352, - OpGroupNonUniformSMin = 353, - OpGroupNonUniformUMin = 354, - OpGroupNonUniformFMin = 355, - OpGroupNonUniformSMax = 356, - OpGroupNonUniformUMax = 357, - OpGroupNonUniformFMax = 358, - OpGroupNonUniformBitwiseAnd = 359, - OpGroupNonUniformBitwiseOr = 360, - OpGroupNonUniformBitwiseXor = 361, - OpGroupNonUniformLogicalAnd = 362, - OpGroupNonUniformLogicalOr = 363, - OpGroupNonUniformLogicalXor = 364, - OpGroupNonUniformQuadBroadcast = 365, - OpGroupNonUniformQuadSwap = 366, - OpSubgroupBallotKHR = 4421, - OpSubgroupFirstInvocationKHR = 4422, - OpSubgroupAllKHR = 4428, - OpSubgroupAnyKHR = 4429, - OpSubgroupAllEqualKHR = 4430, - OpSubgroupReadInvocationKHR = 4432, - OpGroupIAddNonUniformAMD = 5000, - OpGroupFAddNonUniformAMD = 5001, - OpGroupFMinNonUniformAMD = 5002, - OpGroupUMinNonUniformAMD = 5003, - OpGroupSMinNonUniformAMD = 5004, - OpGroupFMaxNonUniformAMD = 5005, - OpGroupUMaxNonUniformAMD = 5006, - OpGroupSMaxNonUniformAMD = 5007, - OpFragmentMaskFetchAMD = 5011, - OpFragmentFetchAMD = 5012, - OpImageSampleFootprintNV = 5283, - OpGroupNonUniformPartitionNV = 5296, - OpWritePackedPrimitiveIndices4x8NV = 5299, - OpReportIntersectionNV = 5334, - OpIgnoreIntersectionNV = 5335, - OpTerminateRayNV = 5336, - OpTraceNV = 5337, - OpTypeAccelerationStructureNV = 5341, - OpExecuteCallableNV = 5344, - OpSubgroupShuffleINTEL = 5571, - OpSubgroupShuffleDownINTEL = 5572, - OpSubgroupShuffleUpINTEL = 5573, - OpSubgroupShuffleXorINTEL = 5574, - OpSubgroupBlockReadINTEL = 5575, - OpSubgroupBlockWriteINTEL = 5576, - OpSubgroupImageBlockReadINTEL = 5577, - OpSubgroupImageBlockWriteINTEL = 5578, - OpSubgroupImageMediaBlockReadINTEL = 5580, - OpSubgroupImageMediaBlockWriteINTEL = 5581, - OpDecorateStringGOOGLE = 5632, - OpMemberDecorateStringGOOGLE = 5633, - OpMax = 2147483647, - } + pub struct KernelProfilingInfoMask(pub u32); + pub const Capability_CapabilityMatrix: root::spv::Capability = 0; + pub const Capability_CapabilityShader: root::spv::Capability = 1; + pub const Capability_CapabilityGeometry: root::spv::Capability = 2; + pub const Capability_CapabilityTessellation: root::spv::Capability = 3; + pub const Capability_CapabilityAddresses: root::spv::Capability = 4; + pub const Capability_CapabilityLinkage: root::spv::Capability = 5; + pub const Capability_CapabilityKernel: root::spv::Capability = 6; + pub const Capability_CapabilityVector16: root::spv::Capability = 7; + pub const Capability_CapabilityFloat16Buffer: root::spv::Capability = 8; + pub const Capability_CapabilityFloat16: root::spv::Capability = 9; + pub const Capability_CapabilityFloat64: root::spv::Capability = 10; + pub const Capability_CapabilityInt64: root::spv::Capability = 11; + pub const Capability_CapabilityInt64Atomics: root::spv::Capability = 12; + pub const Capability_CapabilityImageBasic: root::spv::Capability = 13; + pub const Capability_CapabilityImageReadWrite: root::spv::Capability = 14; + pub const Capability_CapabilityImageMipmap: root::spv::Capability = 15; + pub const Capability_CapabilityPipes: root::spv::Capability = 17; + pub const Capability_CapabilityGroups: root::spv::Capability = 18; + pub const Capability_CapabilityDeviceEnqueue: root::spv::Capability = 19; + pub const Capability_CapabilityLiteralSampler: root::spv::Capability = 20; + pub const Capability_CapabilityAtomicStorage: root::spv::Capability = 21; + pub const Capability_CapabilityInt16: root::spv::Capability = 22; + pub const Capability_CapabilityTessellationPointSize: root::spv::Capability = 23; + pub const Capability_CapabilityGeometryPointSize: root::spv::Capability = 24; + pub const Capability_CapabilityImageGatherExtended: root::spv::Capability = 25; + pub const Capability_CapabilityStorageImageMultisample: root::spv::Capability = 27; + pub const Capability_CapabilityUniformBufferArrayDynamicIndexing: root::spv::Capability = + 28; + pub const Capability_CapabilitySampledImageArrayDynamicIndexing: root::spv::Capability = 29; + pub const Capability_CapabilityStorageBufferArrayDynamicIndexing: root::spv::Capability = + 30; + pub const Capability_CapabilityStorageImageArrayDynamicIndexing: root::spv::Capability = 31; + pub const Capability_CapabilityClipDistance: root::spv::Capability = 32; + pub const Capability_CapabilityCullDistance: root::spv::Capability = 33; + pub const Capability_CapabilityImageCubeArray: root::spv::Capability = 34; + pub const Capability_CapabilitySampleRateShading: root::spv::Capability = 35; + pub const Capability_CapabilityImageRect: root::spv::Capability = 36; + pub const Capability_CapabilitySampledRect: root::spv::Capability = 37; + pub const Capability_CapabilityGenericPointer: root::spv::Capability = 38; + pub const Capability_CapabilityInt8: root::spv::Capability = 39; + pub const Capability_CapabilityInputAttachment: root::spv::Capability = 40; + pub const Capability_CapabilitySparseResidency: root::spv::Capability = 41; + pub const Capability_CapabilityMinLod: root::spv::Capability = 42; + pub const Capability_CapabilitySampled1D: root::spv::Capability = 43; + pub const Capability_CapabilityImage1D: root::spv::Capability = 44; + pub const Capability_CapabilitySampledCubeArray: root::spv::Capability = 45; + pub const Capability_CapabilitySampledBuffer: root::spv::Capability = 46; + pub const Capability_CapabilityImageBuffer: root::spv::Capability = 47; + pub const Capability_CapabilityImageMSArray: root::spv::Capability = 48; + pub const Capability_CapabilityStorageImageExtendedFormats: root::spv::Capability = 49; + pub const Capability_CapabilityImageQuery: root::spv::Capability = 50; + pub const Capability_CapabilityDerivativeControl: root::spv::Capability = 51; + pub const Capability_CapabilityInterpolationFunction: root::spv::Capability = 52; + pub const Capability_CapabilityTransformFeedback: root::spv::Capability = 53; + pub const Capability_CapabilityGeometryStreams: root::spv::Capability = 54; + pub const Capability_CapabilityStorageImageReadWithoutFormat: root::spv::Capability = 55; + pub const Capability_CapabilityStorageImageWriteWithoutFormat: root::spv::Capability = 56; + pub const Capability_CapabilityMultiViewport: root::spv::Capability = 57; + pub const Capability_CapabilitySubgroupDispatch: root::spv::Capability = 58; + pub const Capability_CapabilityNamedBarrier: root::spv::Capability = 59; + pub const Capability_CapabilityPipeStorage: root::spv::Capability = 60; + pub const Capability_CapabilityGroupNonUniform: root::spv::Capability = 61; + pub const Capability_CapabilityGroupNonUniformVote: root::spv::Capability = 62; + pub const Capability_CapabilityGroupNonUniformArithmetic: root::spv::Capability = 63; + pub const Capability_CapabilityGroupNonUniformBallot: root::spv::Capability = 64; + pub const Capability_CapabilityGroupNonUniformShuffle: root::spv::Capability = 65; + pub const Capability_CapabilityGroupNonUniformShuffleRelative: root::spv::Capability = 66; + pub const Capability_CapabilityGroupNonUniformClustered: root::spv::Capability = 67; + pub const Capability_CapabilityGroupNonUniformQuad: root::spv::Capability = 68; + pub const Capability_CapabilityShaderLayer: root::spv::Capability = 69; + pub const Capability_CapabilityShaderViewportIndex: root::spv::Capability = 70; + pub const Capability_CapabilitySubgroupBallotKHR: root::spv::Capability = 4423; + pub const Capability_CapabilityDrawParameters: root::spv::Capability = 4427; + pub const Capability_CapabilitySubgroupVoteKHR: root::spv::Capability = 4431; + pub const Capability_CapabilityStorageBuffer16BitAccess: root::spv::Capability = 4433; + pub const Capability_CapabilityStorageUniformBufferBlock16: root::spv::Capability = 4433; + pub const Capability_CapabilityStorageUniform16: root::spv::Capability = 4434; + pub const Capability_CapabilityUniformAndStorageBuffer16BitAccess: root::spv::Capability = + 4434; + pub const Capability_CapabilityStoragePushConstant16: root::spv::Capability = 4435; + pub const Capability_CapabilityStorageInputOutput16: root::spv::Capability = 4436; + pub const Capability_CapabilityDeviceGroup: root::spv::Capability = 4437; + pub const Capability_CapabilityMultiView: root::spv::Capability = 4439; + pub const Capability_CapabilityVariablePointersStorageBuffer: root::spv::Capability = 4441; + pub const Capability_CapabilityVariablePointers: root::spv::Capability = 4442; + pub const Capability_CapabilityAtomicStorageOps: root::spv::Capability = 4445; + pub const Capability_CapabilitySampleMaskPostDepthCoverage: root::spv::Capability = 4447; + pub const Capability_CapabilityStorageBuffer8BitAccess: root::spv::Capability = 4448; + pub const Capability_CapabilityUniformAndStorageBuffer8BitAccess: root::spv::Capability = + 4449; + pub const Capability_CapabilityStoragePushConstant8: root::spv::Capability = 4450; + pub const Capability_CapabilityDenormPreserve: root::spv::Capability = 4464; + pub const Capability_CapabilityDenormFlushToZero: root::spv::Capability = 4465; + pub const Capability_CapabilitySignedZeroInfNanPreserve: root::spv::Capability = 4466; + pub const Capability_CapabilityRoundingModeRTE: root::spv::Capability = 4467; + pub const Capability_CapabilityRoundingModeRTZ: root::spv::Capability = 4468; + pub const Capability_CapabilityFloat16ImageAMD: root::spv::Capability = 5008; + pub const Capability_CapabilityImageGatherBiasLodAMD: root::spv::Capability = 5009; + pub const Capability_CapabilityFragmentMaskAMD: root::spv::Capability = 5010; + pub const Capability_CapabilityStencilExportEXT: root::spv::Capability = 5013; + pub const Capability_CapabilityImageReadWriteLodAMD: root::spv::Capability = 5015; + pub const Capability_CapabilityShaderClockKHR: root::spv::Capability = 5055; + pub const Capability_CapabilitySampleMaskOverrideCoverageNV: root::spv::Capability = 5249; + pub const Capability_CapabilityGeometryShaderPassthroughNV: root::spv::Capability = 5251; + pub const Capability_CapabilityShaderViewportIndexLayerEXT: root::spv::Capability = 5254; + pub const Capability_CapabilityShaderViewportIndexLayerNV: root::spv::Capability = 5254; + pub const Capability_CapabilityShaderViewportMaskNV: root::spv::Capability = 5255; + pub const Capability_CapabilityShaderStereoViewNV: root::spv::Capability = 5259; + pub const Capability_CapabilityPerViewAttributesNV: root::spv::Capability = 5260; + pub const Capability_CapabilityFragmentFullyCoveredEXT: root::spv::Capability = 5265; + pub const Capability_CapabilityMeshShadingNV: root::spv::Capability = 5266; + pub const Capability_CapabilityImageFootprintNV: root::spv::Capability = 5282; + pub const Capability_CapabilityFragmentBarycentricNV: root::spv::Capability = 5284; + pub const Capability_CapabilityComputeDerivativeGroupQuadsNV: root::spv::Capability = 5288; + pub const Capability_CapabilityFragmentDensityEXT: root::spv::Capability = 5291; + pub const Capability_CapabilityShadingRateNV: root::spv::Capability = 5291; + pub const Capability_CapabilityGroupNonUniformPartitionedNV: root::spv::Capability = 5297; + pub const Capability_CapabilityShaderNonUniform: root::spv::Capability = 5301; + pub const Capability_CapabilityShaderNonUniformEXT: root::spv::Capability = 5301; + pub const Capability_CapabilityRuntimeDescriptorArray: root::spv::Capability = 5302; + pub const Capability_CapabilityRuntimeDescriptorArrayEXT: root::spv::Capability = 5302; + pub const Capability_CapabilityInputAttachmentArrayDynamicIndexing: root::spv::Capability = + 5303; + pub const Capability_CapabilityInputAttachmentArrayDynamicIndexingEXT: + root::spv::Capability = 5303; + pub const Capability_CapabilityUniformTexelBufferArrayDynamicIndexing: + root::spv::Capability = 5304; + pub const Capability_CapabilityUniformTexelBufferArrayDynamicIndexingEXT: + root::spv::Capability = 5304; + pub const Capability_CapabilityStorageTexelBufferArrayDynamicIndexing: + root::spv::Capability = 5305; + pub const Capability_CapabilityStorageTexelBufferArrayDynamicIndexingEXT: + root::spv::Capability = 5305; + pub const Capability_CapabilityUniformBufferArrayNonUniformIndexing: root::spv::Capability = + 5306; + pub const Capability_CapabilityUniformBufferArrayNonUniformIndexingEXT: + root::spv::Capability = 5306; + pub const Capability_CapabilitySampledImageArrayNonUniformIndexing: root::spv::Capability = + 5307; + pub const Capability_CapabilitySampledImageArrayNonUniformIndexingEXT: + root::spv::Capability = 5307; + pub const Capability_CapabilityStorageBufferArrayNonUniformIndexing: root::spv::Capability = + 5308; + pub const Capability_CapabilityStorageBufferArrayNonUniformIndexingEXT: + root::spv::Capability = 5308; + pub const Capability_CapabilityStorageImageArrayNonUniformIndexing: root::spv::Capability = + 5309; + pub const Capability_CapabilityStorageImageArrayNonUniformIndexingEXT: + root::spv::Capability = 5309; + pub const Capability_CapabilityInputAttachmentArrayNonUniformIndexing: + root::spv::Capability = 5310; + pub const Capability_CapabilityInputAttachmentArrayNonUniformIndexingEXT: + root::spv::Capability = 5310; + pub const Capability_CapabilityUniformTexelBufferArrayNonUniformIndexing: + root::spv::Capability = 5311; + pub const Capability_CapabilityUniformTexelBufferArrayNonUniformIndexingEXT: + root::spv::Capability = 5311; + pub const Capability_CapabilityStorageTexelBufferArrayNonUniformIndexing: + root::spv::Capability = 5312; + pub const Capability_CapabilityStorageTexelBufferArrayNonUniformIndexingEXT: + root::spv::Capability = 5312; + pub const Capability_CapabilityRayTracingNV: root::spv::Capability = 5340; + pub const Capability_CapabilityVulkanMemoryModel: root::spv::Capability = 5345; + pub const Capability_CapabilityVulkanMemoryModelKHR: root::spv::Capability = 5345; + pub const Capability_CapabilityVulkanMemoryModelDeviceScope: root::spv::Capability = 5346; + pub const Capability_CapabilityVulkanMemoryModelDeviceScopeKHR: root::spv::Capability = + 5346; + pub const Capability_CapabilityPhysicalStorageBufferAddresses: root::spv::Capability = 5347; + pub const Capability_CapabilityPhysicalStorageBufferAddressesEXT: root::spv::Capability = + 5347; + pub const Capability_CapabilityComputeDerivativeGroupLinearNV: root::spv::Capability = 5350; + pub const Capability_CapabilityCooperativeMatrixNV: root::spv::Capability = 5357; + pub const Capability_CapabilityFragmentShaderSampleInterlockEXT: root::spv::Capability = + 5363; + pub const Capability_CapabilityFragmentShaderShadingRateInterlockEXT: + root::spv::Capability = 5372; + pub const Capability_CapabilityShaderSMBuiltinsNV: root::spv::Capability = 5373; + pub const Capability_CapabilityFragmentShaderPixelInterlockEXT: root::spv::Capability = + 5378; + pub const Capability_CapabilityDemoteToHelperInvocationEXT: root::spv::Capability = 5379; + pub const Capability_CapabilitySubgroupShuffleINTEL: root::spv::Capability = 5568; + pub const Capability_CapabilitySubgroupBufferBlockIOINTEL: root::spv::Capability = 5569; + pub const Capability_CapabilitySubgroupImageBlockIOINTEL: root::spv::Capability = 5570; + pub const Capability_CapabilitySubgroupImageMediaBlockIOINTEL: root::spv::Capability = 5579; + pub const Capability_CapabilityIntegerFunctions2INTEL: root::spv::Capability = 5584; + pub const Capability_CapabilitySubgroupAvcMotionEstimationINTEL: root::spv::Capability = + 5696; + pub const Capability_CapabilitySubgroupAvcMotionEstimationIntraINTEL: + root::spv::Capability = 5697; + pub const Capability_CapabilitySubgroupAvcMotionEstimationChromaINTEL: + root::spv::Capability = 5698; + pub const Capability_CapabilityMax: root::spv::Capability = 2147483647; + pub type Capability = u32; + pub const Op_OpNop: root::spv::Op = 0; + pub const Op_OpUndef: root::spv::Op = 1; + pub const Op_OpSourceContinued: root::spv::Op = 2; + pub const Op_OpSource: root::spv::Op = 3; + pub const Op_OpSourceExtension: root::spv::Op = 4; + pub const Op_OpName: root::spv::Op = 5; + pub const Op_OpMemberName: root::spv::Op = 6; + pub const Op_OpString: root::spv::Op = 7; + pub const Op_OpLine: root::spv::Op = 8; + pub const Op_OpExtension: root::spv::Op = 10; + pub const Op_OpExtInstImport: root::spv::Op = 11; + pub const Op_OpExtInst: root::spv::Op = 12; + pub const Op_OpMemoryModel: root::spv::Op = 14; + pub const Op_OpEntryPoint: root::spv::Op = 15; + pub const Op_OpExecutionMode: root::spv::Op = 16; + pub const Op_OpCapability: root::spv::Op = 17; + pub const Op_OpTypeVoid: root::spv::Op = 19; + pub const Op_OpTypeBool: root::spv::Op = 20; + pub const Op_OpTypeInt: root::spv::Op = 21; + pub const Op_OpTypeFloat: root::spv::Op = 22; + pub const Op_OpTypeVector: root::spv::Op = 23; + pub const Op_OpTypeMatrix: root::spv::Op = 24; + pub const Op_OpTypeImage: root::spv::Op = 25; + pub const Op_OpTypeSampler: root::spv::Op = 26; + pub const Op_OpTypeSampledImage: root::spv::Op = 27; + pub const Op_OpTypeArray: root::spv::Op = 28; + pub const Op_OpTypeRuntimeArray: root::spv::Op = 29; + pub const Op_OpTypeStruct: root::spv::Op = 30; + pub const Op_OpTypeOpaque: root::spv::Op = 31; + pub const Op_OpTypePointer: root::spv::Op = 32; + pub const Op_OpTypeFunction: root::spv::Op = 33; + pub const Op_OpTypeEvent: root::spv::Op = 34; + pub const Op_OpTypeDeviceEvent: root::spv::Op = 35; + pub const Op_OpTypeReserveId: root::spv::Op = 36; + pub const Op_OpTypeQueue: root::spv::Op = 37; + pub const Op_OpTypePipe: root::spv::Op = 38; + pub const Op_OpTypeForwardPointer: root::spv::Op = 39; + pub const Op_OpConstantTrue: root::spv::Op = 41; + pub const Op_OpConstantFalse: root::spv::Op = 42; + pub const Op_OpConstant: root::spv::Op = 43; + pub const Op_OpConstantComposite: root::spv::Op = 44; + pub const Op_OpConstantSampler: root::spv::Op = 45; + pub const Op_OpConstantNull: root::spv::Op = 46; + pub const Op_OpSpecConstantTrue: root::spv::Op = 48; + pub const Op_OpSpecConstantFalse: root::spv::Op = 49; + pub const Op_OpSpecConstant: root::spv::Op = 50; + pub const Op_OpSpecConstantComposite: root::spv::Op = 51; + pub const Op_OpSpecConstantOp: root::spv::Op = 52; + pub const Op_OpFunction: root::spv::Op = 54; + pub const Op_OpFunctionParameter: root::spv::Op = 55; + pub const Op_OpFunctionEnd: root::spv::Op = 56; + pub const Op_OpFunctionCall: root::spv::Op = 57; + pub const Op_OpVariable: root::spv::Op = 59; + pub const Op_OpImageTexelPointer: root::spv::Op = 60; + pub const Op_OpLoad: root::spv::Op = 61; + pub const Op_OpStore: root::spv::Op = 62; + pub const Op_OpCopyMemory: root::spv::Op = 63; + pub const Op_OpCopyMemorySized: root::spv::Op = 64; + pub const Op_OpAccessChain: root::spv::Op = 65; + pub const Op_OpInBoundsAccessChain: root::spv::Op = 66; + pub const Op_OpPtrAccessChain: root::spv::Op = 67; + pub const Op_OpArrayLength: root::spv::Op = 68; + pub const Op_OpGenericPtrMemSemantics: root::spv::Op = 69; + pub const Op_OpInBoundsPtrAccessChain: root::spv::Op = 70; + pub const Op_OpDecorate: root::spv::Op = 71; + pub const Op_OpMemberDecorate: root::spv::Op = 72; + pub const Op_OpDecorationGroup: root::spv::Op = 73; + pub const Op_OpGroupDecorate: root::spv::Op = 74; + pub const Op_OpGroupMemberDecorate: root::spv::Op = 75; + pub const Op_OpVectorExtractDynamic: root::spv::Op = 77; + pub const Op_OpVectorInsertDynamic: root::spv::Op = 78; + pub const Op_OpVectorShuffle: root::spv::Op = 79; + pub const Op_OpCompositeConstruct: root::spv::Op = 80; + pub const Op_OpCompositeExtract: root::spv::Op = 81; + pub const Op_OpCompositeInsert: root::spv::Op = 82; + pub const Op_OpCopyObject: root::spv::Op = 83; + pub const Op_OpTranspose: root::spv::Op = 84; + pub const Op_OpSampledImage: root::spv::Op = 86; + pub const Op_OpImageSampleImplicitLod: root::spv::Op = 87; + pub const Op_OpImageSampleExplicitLod: root::spv::Op = 88; + pub const Op_OpImageSampleDrefImplicitLod: root::spv::Op = 89; + pub const Op_OpImageSampleDrefExplicitLod: root::spv::Op = 90; + pub const Op_OpImageSampleProjImplicitLod: root::spv::Op = 91; + pub const Op_OpImageSampleProjExplicitLod: root::spv::Op = 92; + pub const Op_OpImageSampleProjDrefImplicitLod: root::spv::Op = 93; + pub const Op_OpImageSampleProjDrefExplicitLod: root::spv::Op = 94; + pub const Op_OpImageFetch: root::spv::Op = 95; + pub const Op_OpImageGather: root::spv::Op = 96; + pub const Op_OpImageDrefGather: root::spv::Op = 97; + pub const Op_OpImageRead: root::spv::Op = 98; + pub const Op_OpImageWrite: root::spv::Op = 99; + pub const Op_OpImage: root::spv::Op = 100; + pub const Op_OpImageQueryFormat: root::spv::Op = 101; + pub const Op_OpImageQueryOrder: root::spv::Op = 102; + pub const Op_OpImageQuerySizeLod: root::spv::Op = 103; + pub const Op_OpImageQuerySize: root::spv::Op = 104; + pub const Op_OpImageQueryLod: root::spv::Op = 105; + pub const Op_OpImageQueryLevels: root::spv::Op = 106; + pub const Op_OpImageQuerySamples: root::spv::Op = 107; + pub const Op_OpConvertFToU: root::spv::Op = 109; + pub const Op_OpConvertFToS: root::spv::Op = 110; + pub const Op_OpConvertSToF: root::spv::Op = 111; + pub const Op_OpConvertUToF: root::spv::Op = 112; + pub const Op_OpUConvert: root::spv::Op = 113; + pub const Op_OpSConvert: root::spv::Op = 114; + pub const Op_OpFConvert: root::spv::Op = 115; + pub const Op_OpQuantizeToF16: root::spv::Op = 116; + pub const Op_OpConvertPtrToU: root::spv::Op = 117; + pub const Op_OpSatConvertSToU: root::spv::Op = 118; + pub const Op_OpSatConvertUToS: root::spv::Op = 119; + pub const Op_OpConvertUToPtr: root::spv::Op = 120; + pub const Op_OpPtrCastToGeneric: root::spv::Op = 121; + pub const Op_OpGenericCastToPtr: root::spv::Op = 122; + pub const Op_OpGenericCastToPtrExplicit: root::spv::Op = 123; + pub const Op_OpBitcast: root::spv::Op = 124; + pub const Op_OpSNegate: root::spv::Op = 126; + pub const Op_OpFNegate: root::spv::Op = 127; + pub const Op_OpIAdd: root::spv::Op = 128; + pub const Op_OpFAdd: root::spv::Op = 129; + pub const Op_OpISub: root::spv::Op = 130; + pub const Op_OpFSub: root::spv::Op = 131; + pub const Op_OpIMul: root::spv::Op = 132; + pub const Op_OpFMul: root::spv::Op = 133; + pub const Op_OpUDiv: root::spv::Op = 134; + pub const Op_OpSDiv: root::spv::Op = 135; + pub const Op_OpFDiv: root::spv::Op = 136; + pub const Op_OpUMod: root::spv::Op = 137; + pub const Op_OpSRem: root::spv::Op = 138; + pub const Op_OpSMod: root::spv::Op = 139; + pub const Op_OpFRem: root::spv::Op = 140; + pub const Op_OpFMod: root::spv::Op = 141; + pub const Op_OpVectorTimesScalar: root::spv::Op = 142; + pub const Op_OpMatrixTimesScalar: root::spv::Op = 143; + pub const Op_OpVectorTimesMatrix: root::spv::Op = 144; + pub const Op_OpMatrixTimesVector: root::spv::Op = 145; + pub const Op_OpMatrixTimesMatrix: root::spv::Op = 146; + pub const Op_OpOuterProduct: root::spv::Op = 147; + pub const Op_OpDot: root::spv::Op = 148; + pub const Op_OpIAddCarry: root::spv::Op = 149; + pub const Op_OpISubBorrow: root::spv::Op = 150; + pub const Op_OpUMulExtended: root::spv::Op = 151; + pub const Op_OpSMulExtended: root::spv::Op = 152; + pub const Op_OpAny: root::spv::Op = 154; + pub const Op_OpAll: root::spv::Op = 155; + pub const Op_OpIsNan: root::spv::Op = 156; + pub const Op_OpIsInf: root::spv::Op = 157; + pub const Op_OpIsFinite: root::spv::Op = 158; + pub const Op_OpIsNormal: root::spv::Op = 159; + pub const Op_OpSignBitSet: root::spv::Op = 160; + pub const Op_OpLessOrGreater: root::spv::Op = 161; + pub const Op_OpOrdered: root::spv::Op = 162; + pub const Op_OpUnordered: root::spv::Op = 163; + pub const Op_OpLogicalEqual: root::spv::Op = 164; + pub const Op_OpLogicalNotEqual: root::spv::Op = 165; + pub const Op_OpLogicalOr: root::spv::Op = 166; + pub const Op_OpLogicalAnd: root::spv::Op = 167; + pub const Op_OpLogicalNot: root::spv::Op = 168; + pub const Op_OpSelect: root::spv::Op = 169; + pub const Op_OpIEqual: root::spv::Op = 170; + pub const Op_OpINotEqual: root::spv::Op = 171; + pub const Op_OpUGreaterThan: root::spv::Op = 172; + pub const Op_OpSGreaterThan: root::spv::Op = 173; + pub const Op_OpUGreaterThanEqual: root::spv::Op = 174; + pub const Op_OpSGreaterThanEqual: root::spv::Op = 175; + pub const Op_OpULessThan: root::spv::Op = 176; + pub const Op_OpSLessThan: root::spv::Op = 177; + pub const Op_OpULessThanEqual: root::spv::Op = 178; + pub const Op_OpSLessThanEqual: root::spv::Op = 179; + pub const Op_OpFOrdEqual: root::spv::Op = 180; + pub const Op_OpFUnordEqual: root::spv::Op = 181; + pub const Op_OpFOrdNotEqual: root::spv::Op = 182; + pub const Op_OpFUnordNotEqual: root::spv::Op = 183; + pub const Op_OpFOrdLessThan: root::spv::Op = 184; + pub const Op_OpFUnordLessThan: root::spv::Op = 185; + pub const Op_OpFOrdGreaterThan: root::spv::Op = 186; + pub const Op_OpFUnordGreaterThan: root::spv::Op = 187; + pub const Op_OpFOrdLessThanEqual: root::spv::Op = 188; + pub const Op_OpFUnordLessThanEqual: root::spv::Op = 189; + pub const Op_OpFOrdGreaterThanEqual: root::spv::Op = 190; + pub const Op_OpFUnordGreaterThanEqual: root::spv::Op = 191; + pub const Op_OpShiftRightLogical: root::spv::Op = 194; + pub const Op_OpShiftRightArithmetic: root::spv::Op = 195; + pub const Op_OpShiftLeftLogical: root::spv::Op = 196; + pub const Op_OpBitwiseOr: root::spv::Op = 197; + pub const Op_OpBitwiseXor: root::spv::Op = 198; + pub const Op_OpBitwiseAnd: root::spv::Op = 199; + pub const Op_OpNot: root::spv::Op = 200; + pub const Op_OpBitFieldInsert: root::spv::Op = 201; + pub const Op_OpBitFieldSExtract: root::spv::Op = 202; + pub const Op_OpBitFieldUExtract: root::spv::Op = 203; + pub const Op_OpBitReverse: root::spv::Op = 204; + pub const Op_OpBitCount: root::spv::Op = 205; + pub const Op_OpDPdx: root::spv::Op = 207; + pub const Op_OpDPdy: root::spv::Op = 208; + pub const Op_OpFwidth: root::spv::Op = 209; + pub const Op_OpDPdxFine: root::spv::Op = 210; + pub const Op_OpDPdyFine: root::spv::Op = 211; + pub const Op_OpFwidthFine: root::spv::Op = 212; + pub const Op_OpDPdxCoarse: root::spv::Op = 213; + pub const Op_OpDPdyCoarse: root::spv::Op = 214; + pub const Op_OpFwidthCoarse: root::spv::Op = 215; + pub const Op_OpEmitVertex: root::spv::Op = 218; + pub const Op_OpEndPrimitive: root::spv::Op = 219; + pub const Op_OpEmitStreamVertex: root::spv::Op = 220; + pub const Op_OpEndStreamPrimitive: root::spv::Op = 221; + pub const Op_OpControlBarrier: root::spv::Op = 224; + pub const Op_OpMemoryBarrier: root::spv::Op = 225; + pub const Op_OpAtomicLoad: root::spv::Op = 227; + pub const Op_OpAtomicStore: root::spv::Op = 228; + pub const Op_OpAtomicExchange: root::spv::Op = 229; + pub const Op_OpAtomicCompareExchange: root::spv::Op = 230; + pub const Op_OpAtomicCompareExchangeWeak: root::spv::Op = 231; + pub const Op_OpAtomicIIncrement: root::spv::Op = 232; + pub const Op_OpAtomicIDecrement: root::spv::Op = 233; + pub const Op_OpAtomicIAdd: root::spv::Op = 234; + pub const Op_OpAtomicISub: root::spv::Op = 235; + pub const Op_OpAtomicSMin: root::spv::Op = 236; + pub const Op_OpAtomicUMin: root::spv::Op = 237; + pub const Op_OpAtomicSMax: root::spv::Op = 238; + pub const Op_OpAtomicUMax: root::spv::Op = 239; + pub const Op_OpAtomicAnd: root::spv::Op = 240; + pub const Op_OpAtomicOr: root::spv::Op = 241; + pub const Op_OpAtomicXor: root::spv::Op = 242; + pub const Op_OpPhi: root::spv::Op = 245; + pub const Op_OpLoopMerge: root::spv::Op = 246; + pub const Op_OpSelectionMerge: root::spv::Op = 247; + pub const Op_OpLabel: root::spv::Op = 248; + pub const Op_OpBranch: root::spv::Op = 249; + pub const Op_OpBranchConditional: root::spv::Op = 250; + pub const Op_OpSwitch: root::spv::Op = 251; + pub const Op_OpKill: root::spv::Op = 252; + pub const Op_OpReturn: root::spv::Op = 253; + pub const Op_OpReturnValue: root::spv::Op = 254; + pub const Op_OpUnreachable: root::spv::Op = 255; + pub const Op_OpLifetimeStart: root::spv::Op = 256; + pub const Op_OpLifetimeStop: root::spv::Op = 257; + pub const Op_OpGroupAsyncCopy: root::spv::Op = 259; + pub const Op_OpGroupWaitEvents: root::spv::Op = 260; + pub const Op_OpGroupAll: root::spv::Op = 261; + pub const Op_OpGroupAny: root::spv::Op = 262; + pub const Op_OpGroupBroadcast: root::spv::Op = 263; + pub const Op_OpGroupIAdd: root::spv::Op = 264; + pub const Op_OpGroupFAdd: root::spv::Op = 265; + pub const Op_OpGroupFMin: root::spv::Op = 266; + pub const Op_OpGroupUMin: root::spv::Op = 267; + pub const Op_OpGroupSMin: root::spv::Op = 268; + pub const Op_OpGroupFMax: root::spv::Op = 269; + pub const Op_OpGroupUMax: root::spv::Op = 270; + pub const Op_OpGroupSMax: root::spv::Op = 271; + pub const Op_OpReadPipe: root::spv::Op = 274; + pub const Op_OpWritePipe: root::spv::Op = 275; + pub const Op_OpReservedReadPipe: root::spv::Op = 276; + pub const Op_OpReservedWritePipe: root::spv::Op = 277; + pub const Op_OpReserveReadPipePackets: root::spv::Op = 278; + pub const Op_OpReserveWritePipePackets: root::spv::Op = 279; + pub const Op_OpCommitReadPipe: root::spv::Op = 280; + pub const Op_OpCommitWritePipe: root::spv::Op = 281; + pub const Op_OpIsValidReserveId: root::spv::Op = 282; + pub const Op_OpGetNumPipePackets: root::spv::Op = 283; + pub const Op_OpGetMaxPipePackets: root::spv::Op = 284; + pub const Op_OpGroupReserveReadPipePackets: root::spv::Op = 285; + pub const Op_OpGroupReserveWritePipePackets: root::spv::Op = 286; + pub const Op_OpGroupCommitReadPipe: root::spv::Op = 287; + pub const Op_OpGroupCommitWritePipe: root::spv::Op = 288; + pub const Op_OpEnqueueMarker: root::spv::Op = 291; + pub const Op_OpEnqueueKernel: root::spv::Op = 292; + pub const Op_OpGetKernelNDrangeSubGroupCount: root::spv::Op = 293; + pub const Op_OpGetKernelNDrangeMaxSubGroupSize: root::spv::Op = 294; + pub const Op_OpGetKernelWorkGroupSize: root::spv::Op = 295; + pub const Op_OpGetKernelPreferredWorkGroupSizeMultiple: root::spv::Op = 296; + pub const Op_OpRetainEvent: root::spv::Op = 297; + pub const Op_OpReleaseEvent: root::spv::Op = 298; + pub const Op_OpCreateUserEvent: root::spv::Op = 299; + pub const Op_OpIsValidEvent: root::spv::Op = 300; + pub const Op_OpSetUserEventStatus: root::spv::Op = 301; + pub const Op_OpCaptureEventProfilingInfo: root::spv::Op = 302; + pub const Op_OpGetDefaultQueue: root::spv::Op = 303; + pub const Op_OpBuildNDRange: root::spv::Op = 304; + pub const Op_OpImageSparseSampleImplicitLod: root::spv::Op = 305; + pub const Op_OpImageSparseSampleExplicitLod: root::spv::Op = 306; + pub const Op_OpImageSparseSampleDrefImplicitLod: root::spv::Op = 307; + pub const Op_OpImageSparseSampleDrefExplicitLod: root::spv::Op = 308; + pub const Op_OpImageSparseSampleProjImplicitLod: root::spv::Op = 309; + pub const Op_OpImageSparseSampleProjExplicitLod: root::spv::Op = 310; + pub const Op_OpImageSparseSampleProjDrefImplicitLod: root::spv::Op = 311; + pub const Op_OpImageSparseSampleProjDrefExplicitLod: root::spv::Op = 312; + pub const Op_OpImageSparseFetch: root::spv::Op = 313; + pub const Op_OpImageSparseGather: root::spv::Op = 314; + pub const Op_OpImageSparseDrefGather: root::spv::Op = 315; + pub const Op_OpImageSparseTexelsResident: root::spv::Op = 316; + pub const Op_OpNoLine: root::spv::Op = 317; + pub const Op_OpAtomicFlagTestAndSet: root::spv::Op = 318; + pub const Op_OpAtomicFlagClear: root::spv::Op = 319; + pub const Op_OpImageSparseRead: root::spv::Op = 320; + pub const Op_OpSizeOf: root::spv::Op = 321; + pub const Op_OpTypePipeStorage: root::spv::Op = 322; + pub const Op_OpConstantPipeStorage: root::spv::Op = 323; + pub const Op_OpCreatePipeFromPipeStorage: root::spv::Op = 324; + pub const Op_OpGetKernelLocalSizeForSubgroupCount: root::spv::Op = 325; + pub const Op_OpGetKernelMaxNumSubgroups: root::spv::Op = 326; + pub const Op_OpTypeNamedBarrier: root::spv::Op = 327; + pub const Op_OpNamedBarrierInitialize: root::spv::Op = 328; + pub const Op_OpMemoryNamedBarrier: root::spv::Op = 329; + pub const Op_OpModuleProcessed: root::spv::Op = 330; + pub const Op_OpExecutionModeId: root::spv::Op = 331; + pub const Op_OpDecorateId: root::spv::Op = 332; + pub const Op_OpGroupNonUniformElect: root::spv::Op = 333; + pub const Op_OpGroupNonUniformAll: root::spv::Op = 334; + pub const Op_OpGroupNonUniformAny: root::spv::Op = 335; + pub const Op_OpGroupNonUniformAllEqual: root::spv::Op = 336; + pub const Op_OpGroupNonUniformBroadcast: root::spv::Op = 337; + pub const Op_OpGroupNonUniformBroadcastFirst: root::spv::Op = 338; + pub const Op_OpGroupNonUniformBallot: root::spv::Op = 339; + pub const Op_OpGroupNonUniformInverseBallot: root::spv::Op = 340; + pub const Op_OpGroupNonUniformBallotBitExtract: root::spv::Op = 341; + pub const Op_OpGroupNonUniformBallotBitCount: root::spv::Op = 342; + pub const Op_OpGroupNonUniformBallotFindLSB: root::spv::Op = 343; + pub const Op_OpGroupNonUniformBallotFindMSB: root::spv::Op = 344; + pub const Op_OpGroupNonUniformShuffle: root::spv::Op = 345; + pub const Op_OpGroupNonUniformShuffleXor: root::spv::Op = 346; + pub const Op_OpGroupNonUniformShuffleUp: root::spv::Op = 347; + pub const Op_OpGroupNonUniformShuffleDown: root::spv::Op = 348; + pub const Op_OpGroupNonUniformIAdd: root::spv::Op = 349; + pub const Op_OpGroupNonUniformFAdd: root::spv::Op = 350; + pub const Op_OpGroupNonUniformIMul: root::spv::Op = 351; + pub const Op_OpGroupNonUniformFMul: root::spv::Op = 352; + pub const Op_OpGroupNonUniformSMin: root::spv::Op = 353; + pub const Op_OpGroupNonUniformUMin: root::spv::Op = 354; + pub const Op_OpGroupNonUniformFMin: root::spv::Op = 355; + pub const Op_OpGroupNonUniformSMax: root::spv::Op = 356; + pub const Op_OpGroupNonUniformUMax: root::spv::Op = 357; + pub const Op_OpGroupNonUniformFMax: root::spv::Op = 358; + pub const Op_OpGroupNonUniformBitwiseAnd: root::spv::Op = 359; + pub const Op_OpGroupNonUniformBitwiseOr: root::spv::Op = 360; + pub const Op_OpGroupNonUniformBitwiseXor: root::spv::Op = 361; + pub const Op_OpGroupNonUniformLogicalAnd: root::spv::Op = 362; + pub const Op_OpGroupNonUniformLogicalOr: root::spv::Op = 363; + pub const Op_OpGroupNonUniformLogicalXor: root::spv::Op = 364; + pub const Op_OpGroupNonUniformQuadBroadcast: root::spv::Op = 365; + pub const Op_OpGroupNonUniformQuadSwap: root::spv::Op = 366; + pub const Op_OpCopyLogical: root::spv::Op = 400; + pub const Op_OpPtrEqual: root::spv::Op = 401; + pub const Op_OpPtrNotEqual: root::spv::Op = 402; + pub const Op_OpPtrDiff: root::spv::Op = 403; + pub const Op_OpSubgroupBallotKHR: root::spv::Op = 4421; + pub const Op_OpSubgroupFirstInvocationKHR: root::spv::Op = 4422; + pub const Op_OpSubgroupAllKHR: root::spv::Op = 4428; + pub const Op_OpSubgroupAnyKHR: root::spv::Op = 4429; + pub const Op_OpSubgroupAllEqualKHR: root::spv::Op = 4430; + pub const Op_OpSubgroupReadInvocationKHR: root::spv::Op = 4432; + pub const Op_OpGroupIAddNonUniformAMD: root::spv::Op = 5000; + pub const Op_OpGroupFAddNonUniformAMD: root::spv::Op = 5001; + pub const Op_OpGroupFMinNonUniformAMD: root::spv::Op = 5002; + pub const Op_OpGroupUMinNonUniformAMD: root::spv::Op = 5003; + pub const Op_OpGroupSMinNonUniformAMD: root::spv::Op = 5004; + pub const Op_OpGroupFMaxNonUniformAMD: root::spv::Op = 5005; + pub const Op_OpGroupUMaxNonUniformAMD: root::spv::Op = 5006; + pub const Op_OpGroupSMaxNonUniformAMD: root::spv::Op = 5007; + pub const Op_OpFragmentMaskFetchAMD: root::spv::Op = 5011; + pub const Op_OpFragmentFetchAMD: root::spv::Op = 5012; + pub const Op_OpReadClockKHR: root::spv::Op = 5056; + pub const Op_OpImageSampleFootprintNV: root::spv::Op = 5283; + pub const Op_OpGroupNonUniformPartitionNV: root::spv::Op = 5296; + pub const Op_OpWritePackedPrimitiveIndices4x8NV: root::spv::Op = 5299; + pub const Op_OpReportIntersectionNV: root::spv::Op = 5334; + pub const Op_OpIgnoreIntersectionNV: root::spv::Op = 5335; + pub const Op_OpTerminateRayNV: root::spv::Op = 5336; + pub const Op_OpTraceNV: root::spv::Op = 5337; + pub const Op_OpTypeAccelerationStructureNV: root::spv::Op = 5341; + pub const Op_OpExecuteCallableNV: root::spv::Op = 5344; + pub const Op_OpTypeCooperativeMatrixNV: root::spv::Op = 5358; + pub const Op_OpCooperativeMatrixLoadNV: root::spv::Op = 5359; + pub const Op_OpCooperativeMatrixStoreNV: root::spv::Op = 5360; + pub const Op_OpCooperativeMatrixMulAddNV: root::spv::Op = 5361; + pub const Op_OpCooperativeMatrixLengthNV: root::spv::Op = 5362; + pub const Op_OpBeginInvocationInterlockEXT: root::spv::Op = 5364; + pub const Op_OpEndInvocationInterlockEXT: root::spv::Op = 5365; + pub const Op_OpDemoteToHelperInvocationEXT: root::spv::Op = 5380; + pub const Op_OpIsHelperInvocationEXT: root::spv::Op = 5381; + pub const Op_OpSubgroupShuffleINTEL: root::spv::Op = 5571; + pub const Op_OpSubgroupShuffleDownINTEL: root::spv::Op = 5572; + pub const Op_OpSubgroupShuffleUpINTEL: root::spv::Op = 5573; + pub const Op_OpSubgroupShuffleXorINTEL: root::spv::Op = 5574; + pub const Op_OpSubgroupBlockReadINTEL: root::spv::Op = 5575; + pub const Op_OpSubgroupBlockWriteINTEL: root::spv::Op = 5576; + pub const Op_OpSubgroupImageBlockReadINTEL: root::spv::Op = 5577; + pub const Op_OpSubgroupImageBlockWriteINTEL: root::spv::Op = 5578; + pub const Op_OpSubgroupImageMediaBlockReadINTEL: root::spv::Op = 5580; + pub const Op_OpSubgroupImageMediaBlockWriteINTEL: root::spv::Op = 5581; + pub const Op_OpUCountLeadingZerosINTEL: root::spv::Op = 5585; + pub const Op_OpUCountTrailingZerosINTEL: root::spv::Op = 5586; + pub const Op_OpAbsISubINTEL: root::spv::Op = 5587; + pub const Op_OpAbsUSubINTEL: root::spv::Op = 5588; + pub const Op_OpIAddSatINTEL: root::spv::Op = 5589; + pub const Op_OpUAddSatINTEL: root::spv::Op = 5590; + pub const Op_OpIAverageINTEL: root::spv::Op = 5591; + pub const Op_OpUAverageINTEL: root::spv::Op = 5592; + pub const Op_OpIAverageRoundedINTEL: root::spv::Op = 5593; + pub const Op_OpUAverageRoundedINTEL: root::spv::Op = 5594; + pub const Op_OpISubSatINTEL: root::spv::Op = 5595; + pub const Op_OpUSubSatINTEL: root::spv::Op = 5596; + pub const Op_OpIMul32x16INTEL: root::spv::Op = 5597; + pub const Op_OpUMul32x16INTEL: root::spv::Op = 5598; + pub const Op_OpDecorateString: root::spv::Op = 5632; + pub const Op_OpDecorateStringGOOGLE: root::spv::Op = 5632; + pub const Op_OpMemberDecorateString: root::spv::Op = 5633; + pub const Op_OpMemberDecorateStringGOOGLE: root::spv::Op = 5633; + pub const Op_OpVmeImageINTEL: root::spv::Op = 5699; + pub const Op_OpTypeVmeImageINTEL: root::spv::Op = 5700; + pub const Op_OpTypeAvcImePayloadINTEL: root::spv::Op = 5701; + pub const Op_OpTypeAvcRefPayloadINTEL: root::spv::Op = 5702; + pub const Op_OpTypeAvcSicPayloadINTEL: root::spv::Op = 5703; + pub const Op_OpTypeAvcMcePayloadINTEL: root::spv::Op = 5704; + pub const Op_OpTypeAvcMceResultINTEL: root::spv::Op = 5705; + pub const Op_OpTypeAvcImeResultINTEL: root::spv::Op = 5706; + pub const Op_OpTypeAvcImeResultSingleReferenceStreamoutINTEL: root::spv::Op = 5707; + pub const Op_OpTypeAvcImeResultDualReferenceStreamoutINTEL: root::spv::Op = 5708; + pub const Op_OpTypeAvcImeSingleReferenceStreaminINTEL: root::spv::Op = 5709; + pub const Op_OpTypeAvcImeDualReferenceStreaminINTEL: root::spv::Op = 5710; + pub const Op_OpTypeAvcRefResultINTEL: root::spv::Op = 5711; + pub const Op_OpTypeAvcSicResultINTEL: root::spv::Op = 5712; + pub const Op_OpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL: root::spv::Op = + 5713; + pub const Op_OpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL: root::spv::Op = 5714; + pub const Op_OpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL: root::spv::Op = 5715; + pub const Op_OpSubgroupAvcMceSetInterShapePenaltyINTEL: root::spv::Op = 5716; + pub const Op_OpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL: root::spv::Op = 5717; + pub const Op_OpSubgroupAvcMceSetInterDirectionPenaltyINTEL: root::spv::Op = 5718; + pub const Op_OpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL: root::spv::Op = 5719; + pub const Op_OpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL: root::spv::Op = + 5720; + pub const Op_OpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL: root::spv::Op = 5721; + pub const Op_OpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL: root::spv::Op = 5722; + pub const Op_OpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL: root::spv::Op = 5723; + pub const Op_OpSubgroupAvcMceSetMotionVectorCostFunctionINTEL: root::spv::Op = 5724; + pub const Op_OpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL: root::spv::Op = 5725; + pub const Op_OpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL: root::spv::Op = 5726; + pub const Op_OpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL: root::spv::Op = + 5727; + pub const Op_OpSubgroupAvcMceSetAcOnlyHaarINTEL: root::spv::Op = 5728; + pub const Op_OpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL: root::spv::Op = 5729; + pub const Op_OpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL: root::spv::Op = + 5730; + pub const Op_OpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL: root::spv::Op = + 5731; + pub const Op_OpSubgroupAvcMceConvertToImePayloadINTEL: root::spv::Op = 5732; + pub const Op_OpSubgroupAvcMceConvertToImeResultINTEL: root::spv::Op = 5733; + pub const Op_OpSubgroupAvcMceConvertToRefPayloadINTEL: root::spv::Op = 5734; + pub const Op_OpSubgroupAvcMceConvertToRefResultINTEL: root::spv::Op = 5735; + pub const Op_OpSubgroupAvcMceConvertToSicPayloadINTEL: root::spv::Op = 5736; + pub const Op_OpSubgroupAvcMceConvertToSicResultINTEL: root::spv::Op = 5737; + pub const Op_OpSubgroupAvcMceGetMotionVectorsINTEL: root::spv::Op = 5738; + pub const Op_OpSubgroupAvcMceGetInterDistortionsINTEL: root::spv::Op = 5739; + pub const Op_OpSubgroupAvcMceGetBestInterDistortionsINTEL: root::spv::Op = 5740; + pub const Op_OpSubgroupAvcMceGetInterMajorShapeINTEL: root::spv::Op = 5741; + pub const Op_OpSubgroupAvcMceGetInterMinorShapeINTEL: root::spv::Op = 5742; + pub const Op_OpSubgroupAvcMceGetInterDirectionsINTEL: root::spv::Op = 5743; + pub const Op_OpSubgroupAvcMceGetInterMotionVectorCountINTEL: root::spv::Op = 5744; + pub const Op_OpSubgroupAvcMceGetInterReferenceIdsINTEL: root::spv::Op = 5745; + pub const Op_OpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL: + root::spv::Op = 5746; + pub const Op_OpSubgroupAvcImeInitializeINTEL: root::spv::Op = 5747; + pub const Op_OpSubgroupAvcImeSetSingleReferenceINTEL: root::spv::Op = 5748; + pub const Op_OpSubgroupAvcImeSetDualReferenceINTEL: root::spv::Op = 5749; + pub const Op_OpSubgroupAvcImeRefWindowSizeINTEL: root::spv::Op = 5750; + pub const Op_OpSubgroupAvcImeAdjustRefOffsetINTEL: root::spv::Op = 5751; + pub const Op_OpSubgroupAvcImeConvertToMcePayloadINTEL: root::spv::Op = 5752; + pub const Op_OpSubgroupAvcImeSetMaxMotionVectorCountINTEL: root::spv::Op = 5753; + pub const Op_OpSubgroupAvcImeSetUnidirectionalMixDisableINTEL: root::spv::Op = 5754; + pub const Op_OpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL: root::spv::Op = 5755; + pub const Op_OpSubgroupAvcImeSetWeightedSadINTEL: root::spv::Op = 5756; + pub const Op_OpSubgroupAvcImeEvaluateWithSingleReferenceINTEL: root::spv::Op = 5757; + pub const Op_OpSubgroupAvcImeEvaluateWithDualReferenceINTEL: root::spv::Op = 5758; + pub const Op_OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL: root::spv::Op = 5759; + pub const Op_OpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL: root::spv::Op = 5760; + pub const Op_OpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL: root::spv::Op = + 5761; + pub const Op_OpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL: root::spv::Op = 5762; + pub const Op_OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL: root::spv::Op = + 5763; + pub const Op_OpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL: root::spv::Op = + 5764; + pub const Op_OpSubgroupAvcImeConvertToMceResultINTEL: root::spv::Op = 5765; + pub const Op_OpSubgroupAvcImeGetSingleReferenceStreaminINTEL: root::spv::Op = 5766; + pub const Op_OpSubgroupAvcImeGetDualReferenceStreaminINTEL: root::spv::Op = 5767; + pub const Op_OpSubgroupAvcImeStripSingleReferenceStreamoutINTEL: root::spv::Op = 5768; + pub const Op_OpSubgroupAvcImeStripDualReferenceStreamoutINTEL: root::spv::Op = 5769; + pub const Op_OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL: + root::spv::Op = 5770; + pub const Op_OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL: + root::spv::Op = 5771; + pub const Op_OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL: + root::spv::Op = 5772; + pub const Op_OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL: + root::spv::Op = 5773; + pub const Op_OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL: + root::spv::Op = 5774; + pub const Op_OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL: + root::spv::Op = 5775; + pub const Op_OpSubgroupAvcImeGetBorderReachedINTEL: root::spv::Op = 5776; + pub const Op_OpSubgroupAvcImeGetTruncatedSearchIndicationINTEL: root::spv::Op = 5777; + pub const Op_OpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL: root::spv::Op = + 5778; + pub const Op_OpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL: root::spv::Op = + 5779; + pub const Op_OpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL: root::spv::Op = + 5780; + pub const Op_OpSubgroupAvcFmeInitializeINTEL: root::spv::Op = 5781; + pub const Op_OpSubgroupAvcBmeInitializeINTEL: root::spv::Op = 5782; + pub const Op_OpSubgroupAvcRefConvertToMcePayloadINTEL: root::spv::Op = 5783; + pub const Op_OpSubgroupAvcRefSetBidirectionalMixDisableINTEL: root::spv::Op = 5784; + pub const Op_OpSubgroupAvcRefSetBilinearFilterEnableINTEL: root::spv::Op = 5785; + pub const Op_OpSubgroupAvcRefEvaluateWithSingleReferenceINTEL: root::spv::Op = 5786; + pub const Op_OpSubgroupAvcRefEvaluateWithDualReferenceINTEL: root::spv::Op = 5787; + pub const Op_OpSubgroupAvcRefEvaluateWithMultiReferenceINTEL: root::spv::Op = 5788; + pub const Op_OpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL: root::spv::Op = + 5789; + pub const Op_OpSubgroupAvcRefConvertToMceResultINTEL: root::spv::Op = 5790; + pub const Op_OpSubgroupAvcSicInitializeINTEL: root::spv::Op = 5791; + pub const Op_OpSubgroupAvcSicConfigureSkcINTEL: root::spv::Op = 5792; + pub const Op_OpSubgroupAvcSicConfigureIpeLumaINTEL: root::spv::Op = 5793; + pub const Op_OpSubgroupAvcSicConfigureIpeLumaChromaINTEL: root::spv::Op = 5794; + pub const Op_OpSubgroupAvcSicGetMotionVectorMaskINTEL: root::spv::Op = 5795; + pub const Op_OpSubgroupAvcSicConvertToMcePayloadINTEL: root::spv::Op = 5796; + pub const Op_OpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL: root::spv::Op = 5797; + pub const Op_OpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL: root::spv::Op = 5798; + pub const Op_OpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL: root::spv::Op = 5799; + pub const Op_OpSubgroupAvcSicSetBilinearFilterEnableINTEL: root::spv::Op = 5800; + pub const Op_OpSubgroupAvcSicSetSkcForwardTransformEnableINTEL: root::spv::Op = 5801; + pub const Op_OpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL: root::spv::Op = 5802; + pub const Op_OpSubgroupAvcSicEvaluateIpeINTEL: root::spv::Op = 5803; + pub const Op_OpSubgroupAvcSicEvaluateWithSingleReferenceINTEL: root::spv::Op = 5804; + pub const Op_OpSubgroupAvcSicEvaluateWithDualReferenceINTEL: root::spv::Op = 5805; + pub const Op_OpSubgroupAvcSicEvaluateWithMultiReferenceINTEL: root::spv::Op = 5806; + pub const Op_OpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL: root::spv::Op = + 5807; + pub const Op_OpSubgroupAvcSicConvertToMceResultINTEL: root::spv::Op = 5808; + pub const Op_OpSubgroupAvcSicGetIpeLumaShapeINTEL: root::spv::Op = 5809; + pub const Op_OpSubgroupAvcSicGetBestIpeLumaDistortionINTEL: root::spv::Op = 5810; + pub const Op_OpSubgroupAvcSicGetBestIpeChromaDistortionINTEL: root::spv::Op = 5811; + pub const Op_OpSubgroupAvcSicGetPackedIpeLumaModesINTEL: root::spv::Op = 5812; + pub const Op_OpSubgroupAvcSicGetIpeChromaModeINTEL: root::spv::Op = 5813; + pub const Op_OpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL: root::spv::Op = 5814; + pub const Op_OpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL: root::spv::Op = 5815; + pub const Op_OpSubgroupAvcSicGetInterRawSadsINTEL: root::spv::Op = 5816; + pub const Op_OpMax: root::spv::Op = 2147483647; + pub type Op = u32; } pub mod std { #[allow(unused_imports)] use self::super::super::root; - pub type string = [u64; 4usize]; } - pub mod __gnu_cxx { - #[allow(unused_imports)] - use self::super::super::root; - } - pub type __uint8_t = ::std::os::raw::c_uchar; - pub type __int32_t = ::std::os::raw::c_int; - pub type __uint32_t = ::std::os::raw::c_uint; - pub mod SPIRV_CROSS_NAMESPACE { + pub type __darwin_size_t = ::std::os::raw::c_ulong; + pub mod spirv_cross { #[allow(unused_imports)] use self::super::super::root; #[repr(u32)] @@ -1662,51 +2012,148 @@ pub mod root { ControlPointArray = 20, Char = 21, } + pub const MSLSamplerCoord_MSL_SAMPLER_COORD_NORMALIZED: root::spirv_cross::MSLSamplerCoord = + 0; + pub const MSLSamplerCoord_MSL_SAMPLER_COORD_PIXEL: root::spirv_cross::MSLSamplerCoord = 1; + pub const MSLSamplerCoord_MSL_SAMPLER_INT_MAX: root::spirv_cross::MSLSamplerCoord = + 2147483647; + pub type MSLSamplerCoord = u32; + pub const MSLSamplerFilter_MSL_SAMPLER_FILTER_NEAREST: root::spirv_cross::MSLSamplerFilter = + 0; + pub const MSLSamplerFilter_MSL_SAMPLER_FILTER_LINEAR: root::spirv_cross::MSLSamplerFilter = + 1; + pub const MSLSamplerFilter_MSL_SAMPLER_FILTER_INT_MAX: root::spirv_cross::MSLSamplerFilter = + 2147483647; + pub type MSLSamplerFilter = u32; + pub const MSLSamplerMipFilter_MSL_SAMPLER_MIP_FILTER_NONE: + root::spirv_cross::MSLSamplerMipFilter = 0; + pub const MSLSamplerMipFilter_MSL_SAMPLER_MIP_FILTER_NEAREST: + root::spirv_cross::MSLSamplerMipFilter = 1; + pub const MSLSamplerMipFilter_MSL_SAMPLER_MIP_FILTER_LINEAR: + root::spirv_cross::MSLSamplerMipFilter = 2; + pub const MSLSamplerMipFilter_MSL_SAMPLER_MIP_FILTER_INT_MAX: + root::spirv_cross::MSLSamplerMipFilter = 2147483647; + pub type MSLSamplerMipFilter = u32; + pub const MSLSamplerAddress_MSL_SAMPLER_ADDRESS_CLAMP_TO_ZERO: + root::spirv_cross::MSLSamplerAddress = 0; + pub const MSLSamplerAddress_MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE: + root::spirv_cross::MSLSamplerAddress = 1; + pub const MSLSamplerAddress_MSL_SAMPLER_ADDRESS_CLAMP_TO_BORDER: + root::spirv_cross::MSLSamplerAddress = 2; + pub const MSLSamplerAddress_MSL_SAMPLER_ADDRESS_REPEAT: + root::spirv_cross::MSLSamplerAddress = 3; + pub const MSLSamplerAddress_MSL_SAMPLER_ADDRESS_MIRRORED_REPEAT: + root::spirv_cross::MSLSamplerAddress = 4; + pub const MSLSamplerAddress_MSL_SAMPLER_ADDRESS_INT_MAX: + root::spirv_cross::MSLSamplerAddress = 2147483647; + pub type MSLSamplerAddress = u32; + pub const MSLSamplerCompareFunc_MSL_SAMPLER_COMPARE_FUNC_NEVER: + root::spirv_cross::MSLSamplerCompareFunc = 0; + pub const MSLSamplerCompareFunc_MSL_SAMPLER_COMPARE_FUNC_LESS: + root::spirv_cross::MSLSamplerCompareFunc = 1; + pub const MSLSamplerCompareFunc_MSL_SAMPLER_COMPARE_FUNC_LESS_EQUAL: + root::spirv_cross::MSLSamplerCompareFunc = 2; + pub const MSLSamplerCompareFunc_MSL_SAMPLER_COMPARE_FUNC_GREATER: + root::spirv_cross::MSLSamplerCompareFunc = 3; + pub const MSLSamplerCompareFunc_MSL_SAMPLER_COMPARE_FUNC_GREATER_EQUAL: + root::spirv_cross::MSLSamplerCompareFunc = 4; + pub const MSLSamplerCompareFunc_MSL_SAMPLER_COMPARE_FUNC_EQUAL: + root::spirv_cross::MSLSamplerCompareFunc = 5; + pub const MSLSamplerCompareFunc_MSL_SAMPLER_COMPARE_FUNC_NOT_EQUAL: + root::spirv_cross::MSLSamplerCompareFunc = 6; + pub const MSLSamplerCompareFunc_MSL_SAMPLER_COMPARE_FUNC_ALWAYS: + root::spirv_cross::MSLSamplerCompareFunc = 7; + pub const MSLSamplerCompareFunc_MSL_SAMPLER_COMPARE_FUNC_INT_MAX: + root::spirv_cross::MSLSamplerCompareFunc = 2147483647; + pub type MSLSamplerCompareFunc = u32; + pub const MSLSamplerBorderColor_MSL_SAMPLER_BORDER_COLOR_TRANSPARENT_BLACK: + root::spirv_cross::MSLSamplerBorderColor = 0; + pub const MSLSamplerBorderColor_MSL_SAMPLER_BORDER_COLOR_OPAQUE_BLACK: + root::spirv_cross::MSLSamplerBorderColor = 1; + pub const MSLSamplerBorderColor_MSL_SAMPLER_BORDER_COLOR_OPAQUE_WHITE: + root::spirv_cross::MSLSamplerBorderColor = 2; + pub const MSLSamplerBorderColor_MSL_SAMPLER_BORDER_COLOR_INT_MAX: + root::spirv_cross::MSLSamplerBorderColor = 2147483647; + pub type MSLSamplerBorderColor = u32; + pub const MSLFormatResolution_MSL_FORMAT_RESOLUTION_444: + root::spirv_cross::MSLFormatResolution = 0; + pub const MSLFormatResolution_MSL_FORMAT_RESOLUTION_422: + root::spirv_cross::MSLFormatResolution = 1; + pub const MSLFormatResolution_MSL_FORMAT_RESOLUTION_420: + root::spirv_cross::MSLFormatResolution = 2; + pub const MSLFormatResolution_MSL_FORMAT_RESOLUTION_INT_MAX: + root::spirv_cross::MSLFormatResolution = 2147483647; + pub type MSLFormatResolution = u32; + pub const MSLChromaLocation_MSL_CHROMA_LOCATION_COSITED_EVEN: + root::spirv_cross::MSLChromaLocation = 0; + pub const MSLChromaLocation_MSL_CHROMA_LOCATION_MIDPOINT: + root::spirv_cross::MSLChromaLocation = 1; + pub const MSLChromaLocation_MSL_CHROMA_LOCATION_INT_MAX: + root::spirv_cross::MSLChromaLocation = 2147483647; + pub type MSLChromaLocation = u32; + pub const MSLComponentSwizzle_MSL_COMPONENT_SWIZZLE_IDENTITY: + root::spirv_cross::MSLComponentSwizzle = 0; + pub const MSLComponentSwizzle_MSL_COMPONENT_SWIZZLE_ZERO: + root::spirv_cross::MSLComponentSwizzle = 1; + pub const MSLComponentSwizzle_MSL_COMPONENT_SWIZZLE_ONE: + root::spirv_cross::MSLComponentSwizzle = 2; + pub const MSLComponentSwizzle_MSL_COMPONENT_SWIZZLE_R: + root::spirv_cross::MSLComponentSwizzle = 3; + pub const MSLComponentSwizzle_MSL_COMPONENT_SWIZZLE_G: + root::spirv_cross::MSLComponentSwizzle = 4; + pub const MSLComponentSwizzle_MSL_COMPONENT_SWIZZLE_B: + root::spirv_cross::MSLComponentSwizzle = 5; + pub const MSLComponentSwizzle_MSL_COMPONENT_SWIZZLE_A: + root::spirv_cross::MSLComponentSwizzle = 6; + pub const MSLComponentSwizzle_MSL_COMPONENT_SWIZZLE_INT_MAX: + root::spirv_cross::MSLComponentSwizzle = 2147483647; + pub type MSLComponentSwizzle = u32; + pub const MSLSamplerYCbCrModelConversion_MSL_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY: + root::spirv_cross::MSLSamplerYCbCrModelConversion = 0; + pub const MSLSamplerYCbCrModelConversion_MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY : root :: spirv_cross :: MSLSamplerYCbCrModelConversion = 1 ; + pub const MSLSamplerYCbCrModelConversion_MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_BT_709: + root::spirv_cross::MSLSamplerYCbCrModelConversion = 2; + pub const MSLSamplerYCbCrModelConversion_MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_BT_601: + root::spirv_cross::MSLSamplerYCbCrModelConversion = 3; + pub const MSLSamplerYCbCrModelConversion_MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_BT_2020 : root :: spirv_cross :: MSLSamplerYCbCrModelConversion = 4 ; + pub const MSLSamplerYCbCrModelConversion_MSL_SAMPLER_YCBCR_MODEL_CONVERSION_INT_MAX: + root::spirv_cross::MSLSamplerYCbCrModelConversion = 2147483647; + pub type MSLSamplerYCbCrModelConversion = u32; + pub const MSLSamplerYCbCrRange_MSL_SAMPLER_YCBCR_RANGE_ITU_FULL: + root::spirv_cross::MSLSamplerYCbCrRange = 0; + pub const MSLSamplerYCbCrRange_MSL_SAMPLER_YCBCR_RANGE_ITU_NARROW: + root::spirv_cross::MSLSamplerYCbCrRange = 1; + pub const MSLSamplerYCbCrRange_MSL_SAMPLER_YCBCR_RANGE_INT_MAX: + root::spirv_cross::MSLSamplerYCbCrRange = 2147483647; + pub type MSLSamplerYCbCrRange = u32; #[repr(C)] - #[derive(Debug, Copy)] - pub struct Resource { - pub id: u32, - pub type_id: u32, - pub base_type_id: u32, - pub name: root::std::string, - } - impl Clone for Resource { - fn clone(&self) -> Self { *self } - } - #[repr(u32)] - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - pub enum MSLVertexFormat { - MSL_VERTEX_FORMAT_OTHER = 0, - MSL_VERTEX_FORMAT_UINT8 = 1, - MSL_VERTEX_FORMAT_UINT16 = 2, - MSL_VERTEX_FORMAT_INT_MAX = 2147483647, - } - #[repr(C)] - #[derive(Debug, Copy)] - pub struct MSLVertexAttr { - pub location: u32, - pub msl_buffer: u32, - pub msl_offset: u32, - pub msl_stride: u32, - pub per_instance: bool, - pub format: root::SPIRV_CROSS_NAMESPACE::MSLVertexFormat, - pub builtin: root::spv::BuiltIn, - } - impl Clone for MSLVertexAttr { - fn clone(&self) -> Self { *self } - } - #[repr(C)] - #[derive(Debug, Copy)] - pub struct MSLResourceBinding { - pub stage: root::spv::ExecutionModel, - pub desc_set: u32, - pub binding: u32, - pub msl_buffer: u32, - pub msl_texture: u32, - pub msl_sampler: u32, - } - impl Clone for MSLResourceBinding { - fn clone(&self) -> Self { *self } + #[derive(Debug, Copy, Clone)] + pub struct MSLConstexprSampler { + pub coord: root::spirv_cross::MSLSamplerCoord, + pub min_filter: root::spirv_cross::MSLSamplerFilter, + pub mag_filter: root::spirv_cross::MSLSamplerFilter, + pub mip_filter: root::spirv_cross::MSLSamplerMipFilter, + pub s_address: root::spirv_cross::MSLSamplerAddress, + pub t_address: root::spirv_cross::MSLSamplerAddress, + pub r_address: root::spirv_cross::MSLSamplerAddress, + pub compare_func: root::spirv_cross::MSLSamplerCompareFunc, + pub border_color: root::spirv_cross::MSLSamplerBorderColor, + pub lod_clamp_min: f32, + pub lod_clamp_max: f32, + pub max_anisotropy: ::std::os::raw::c_int, + pub planes: u32, + pub resolution: root::spirv_cross::MSLFormatResolution, + pub chroma_filter: root::spirv_cross::MSLSamplerFilter, + pub x_chroma_offset: root::spirv_cross::MSLChromaLocation, + pub y_chroma_offset: root::spirv_cross::MSLChromaLocation, + pub swizzle: [root::spirv_cross::MSLComponentSwizzle; 4usize], + pub ycbcr_model: root::spirv_cross::MSLSamplerYCbCrModelConversion, + pub ycbcr_range: root::spirv_cross::MSLSamplerYCbCrRange, + pub bpc: u32, + pub compare_enable: bool, + pub lod_clamp_enable: bool, + pub anisotropy_enable: bool, + pub ycbcr_conversion_enable: bool, } } pub type ScInternalCompilerBase = ::std::os::raw::c_void; @@ -1721,7 +2168,7 @@ pub mod root { CompilationError = 2, } #[repr(C)] - #[derive(Debug, Copy)] + #[derive(Debug, Copy, Clone)] pub struct ScEntryPoint { pub name: *mut ::std::os::raw::c_char, pub execution_model: root::spv::ExecutionModel, @@ -1729,32 +2176,30 @@ pub mod root { pub work_group_size_y: u32, pub work_group_size_z: u32, } - impl Clone for ScEntryPoint { - fn clone(&self) -> Self { *self } + #[repr(C)] + #[derive(Debug, Copy, Clone)] + pub struct ScBufferRange { + pub index: ::std::os::raw::c_uint, + pub offset: usize, + pub range: usize, } #[repr(C)] - #[derive(Debug, Copy)] + #[derive(Debug, Copy, Clone)] pub struct ScCombinedImageSampler { pub combined_id: u32, pub image_id: u32, pub sampler_id: u32, } - impl Clone for ScCombinedImageSampler { - fn clone(&self) -> Self { *self } - } #[repr(C)] - #[derive(Debug, Copy)] + #[derive(Debug, Copy, Clone)] pub struct ScHlslRootConstant { pub start: u32, pub end: u32, pub binding: u32, pub space: u32, } - impl Clone for ScHlslRootConstant { - fn clone(&self) -> Self { *self } - } #[repr(C)] - #[derive(Debug, Copy)] + #[derive(Debug, Copy, Clone)] pub struct ScHlslCompilerOptions { pub shader_model: i32, pub point_size_compat: bool, @@ -1762,11 +2207,8 @@ pub mod root { pub vertex_transform_clip_space: bool, pub vertex_invert_y: bool, } - impl Clone for ScHlslCompilerOptions { - fn clone(&self) -> Self { *self } - } #[repr(C)] - #[derive(Debug, Copy)] + #[derive(Debug, Copy, Clone)] pub struct ScMslCompilerOptions { pub vertex_transform_clip_space: bool, pub vertex_invert_y: bool, @@ -1786,42 +2228,30 @@ pub mod root { pub argument_buffers: bool, pub pad_fragment_output_components: bool, } - impl Clone for ScMslCompilerOptions { - fn clone(&self) -> Self { *self } - } #[repr(C)] - #[derive(Debug, Copy)] + #[derive(Debug, Copy, Clone)] pub struct ScGlslCompilerOptions { pub vertex_transform_clip_space: bool, pub vertex_invert_y: bool, pub version: u32, pub es: bool, } - impl Clone for ScGlslCompilerOptions { - fn clone(&self) -> Self { *self } - } #[repr(C)] - #[derive(Debug, Copy)] + #[derive(Debug, Copy, Clone)] pub struct ScResource { pub id: u32, pub type_id: u32, pub base_type_id: u32, pub name: *mut ::std::os::raw::c_char, } - impl Clone for ScResource { - fn clone(&self) -> Self { *self } - } #[repr(C)] - #[derive(Debug, Copy)] + #[derive(Debug, Copy, Clone)] pub struct ScResourceArray { pub data: *mut root::ScResource, pub num: usize, } - impl Clone for ScResourceArray { - fn clone(&self) -> Self { *self } - } #[repr(C)] - #[derive(Debug, Copy)] + #[derive(Debug, Copy, Clone)] pub struct ScShaderResources { pub uniform_buffers: root::ScResourceArray, pub storage_buffers: root::ScResourceArray, @@ -1835,28 +2265,26 @@ pub mod root { pub separate_images: root::ScResourceArray, pub separate_samplers: root::ScResourceArray, } - impl Clone for ScShaderResources { - fn clone(&self) -> Self { *self } - } #[repr(C)] - #[derive(Debug, Copy)] + #[derive(Debug, Copy, Clone)] pub struct ScSpecializationConstant { pub id: u32, pub constant_id: u32, } - impl Clone for ScSpecializationConstant { - fn clone(&self) -> Self { *self } - } #[repr(C)] - #[derive(Debug, Copy)] + #[derive(Debug, Copy, Clone)] pub struct ScType { - pub type_: root::SPIRV_CROSS_NAMESPACE::SPIRType_BaseType, + pub type_: root::spirv_cross::SPIRType_BaseType, pub member_types: *mut u32, pub member_types_size: usize, pub array: *mut u32, pub array_size: usize, } - impl Clone for ScType { - fn clone(&self) -> Self { *self } + #[repr(C)] + #[derive(Debug, Copy, Clone)] + pub struct ScMslConstSamplerMapping { + pub desc_set: u32, + pub binding: u32, + pub sampler: root::spirv_cross::MSLConstexprSampler, } } diff --git a/third_party/rust/spirv-cross-internal/src/bindings_wasm_functions.rs b/third_party/rust/spirv-cross-internal/src/bindings_wasm_functions.rs index 7f8eebfa22c5..4ac754170aa1 100644 --- a/third_party/rust/spirv-cross-internal/src/bindings_wasm_functions.rs +++ b/third_party/rust/spirv-cross-internal/src/bindings_wasm_functions.rs @@ -61,6 +61,14 @@ extern "C" { #[wasm_bindgen(js_namespace = sc_internal)] fn _sc_internal_compiler_get_entry_points(compiler: u32, entry_points: u32, size: u32) -> u32; + #[wasm_bindgen(js_namespace = sc_internal)] + fn _sc_internal_compiler_get_active_buffer_ranges( + compiler: u32, + id: u32, + active_buffer_ranges: u32, + size: u32, + ) -> u32; + #[wasm_bindgen(js_namespace = sc_internal)] fn _sc_internal_compiler_get_cleansed_entry_point_name( compiler: u32, @@ -358,6 +366,35 @@ pub fn sc_internal_compiler_get_entry_points( } } +pub fn sc_internal_compiler_get_active_buffer_ranges( + compiler: *const bindings::ScInternalCompilerBase, + id: u32, + active_buffer_ranges: *mut *mut bindings::ScBufferRange, + size: *mut usize, +) -> bindings::ScInternalResult { + let module = emscripten::get_module(); + unsafe { + let active_buffer_ranges_ptr_to_ptr = module.allocate(U32_SIZE); + let size_ptr = module.allocate(U32_SIZE); + + let result = map_internal_result(_sc_internal_compiler_get_active_buffer_ranges( + compiler as u32, + id, + active_buffer_ranges_ptr_to_ptr.as_offset(), + size_ptr.as_offset(), + )); + + *active_buffer_ranges = + module.get_u32(active_buffer_ranges_ptr_to_ptr) as *mut bindings::ScBufferRange; + *size = module.get_u32(size_ptr) as usize; + + module.free(size_ptr); + module.free(active_buffer_ranges_ptr_to_ptr); + + result + } +} + pub fn sc_internal_compiler_get_cleansed_entry_point_name( compiler: *const bindings::ScInternalCompilerBase, original_entry_point_name: *const ::std::os::raw::c_char, diff --git a/third_party/rust/spirv-cross-internal/src/compiler.rs b/third_party/rust/spirv-cross-internal/src/compiler.rs index 5610e947c0ff..0c8f266b9b11 100644 --- a/third_party/rust/spirv-cross-internal/src/compiler.rs +++ b/third_party/rust/spirv-cross-internal/src/compiler.rs @@ -5,7 +5,7 @@ use crate::spirv::{self, Decoration, Type}; use crate::ErrorCode; use std::ffi::CString; use std::os::raw::c_void; -use std::{mem, ptr}; +use std::{mem::MaybeUninit, ptr}; impl spirv::ExecutionModel { fn from_raw(raw: br::spv::ExecutionModel) -> Result { @@ -95,11 +95,11 @@ impl spirv::Decoration { impl spirv::Type { pub(crate) fn from_raw( - ty: br::SPIRV_CROSS_NAMESPACE::SPIRType_BaseType, + ty: br::spirv_cross::SPIRType_BaseType, member_types: Vec, array: Vec, ) -> Type { - use crate::bindings::root::SPIRV_CROSS_NAMESPACE::SPIRType_BaseType as B; + use crate::bindings::root::spirv_cross::SPIRType_BaseType as B; use crate::spirv::Type::*; match ty { B::Unknown => Unknown, @@ -139,7 +139,7 @@ pub struct Compiler { } impl Compiler { - #[cfg(any(feature = "msl", feature = "glsl", feature = "hlsl"))] + #[cfg(any(feature = "glsl", feature = "hlsl"))] pub fn compile(&mut self) -> Result { unsafe { let mut shader_ptr = ptr::null(); @@ -262,14 +262,15 @@ impl Compiler { check!(br::sc_internal_free_pointer( entry_point_raw.name as *mut c_void, )); - check!(br::sc_internal_free_pointer( - entry_point_raw_ptr as *mut c_void - )); Ok(entry_point) }) .collect::, _>>(); + check!(br::sc_internal_free_pointer( + entry_points_raw as *mut c_void, + )); + Ok(entry_points?) } } @@ -489,11 +490,12 @@ impl Compiler { pub fn get_shader_resources(&self) -> Result { unsafe { - let mut shader_resources_raw = mem::uninitialized(); + let mut shader_resources_raw = MaybeUninit::uninit(); check!(br::sc_internal_compiler_get_shader_resources( self.sc_compiler, - &mut shader_resources_raw, + shader_resources_raw.as_mut_ptr(), )); + let shader_resources_raw = shader_resources_raw.assume_init(); let fill_resources = |array_raw: &br::ScResourceArray| { let resources = (0..array_raw.num as usize) diff --git a/third_party/rust/spirv-cross-internal/src/msl.rs b/third_party/rust/spirv-cross-internal/src/msl.rs index 038b54d5b818..5d700550b607 100644 --- a/third_party/rust/spirv-cross-internal/src/msl.rs +++ b/third_party/rust/spirv-cross-internal/src/msl.rs @@ -12,9 +12,9 @@ use std::u8; pub enum Target {} pub struct TargetData { - vertex_attribute_overrides: Vec, - resource_binding_overrides: Vec, - const_samplers: Vec, + vertex_attribute_overrides: Vec, + resource_binding_overrides: Vec, + const_samplers: Vec, } impl spirv::Target for Target { @@ -34,9 +34,9 @@ pub enum Format { } impl Format { - fn as_raw(&self) -> br::SPIRV_CROSS_NAMESPACE::MSLVertexFormat { + fn as_raw(&self) -> br::spirv_cross::MSLVertexFormat { use self::Format::*; - use crate::bindings::root::SPIRV_CROSS_NAMESPACE::MSLVertexFormat as R; + use crate::bindings::root::spirv_cross::MSLVertexFormat as R; match self { Other => R::MSL_VERTEX_FORMAT_OTHER, Uint8 => R::MSL_VERTEX_FORMAT_UINT8, @@ -140,17 +140,49 @@ impl LodBase16 { pub const ZERO: Self = LodBase16(0); pub const MAX: Self = LodBase16(!0); } + impl From for LodBase16 { fn from(v: f32) -> Self { LodBase16((v * 16.0).max(0.0).min(u8::MAX as f32) as u8) } } + impl Into for LodBase16 { fn into(self) -> f32 { self.0 as f32 / 16.0 } } +/// MSL format resolution. +#[repr(C)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub enum FormatResolution { + _444 = 0, + _422 = 1, + _420 = 2, +} + +/// MSL chroma location. +#[repr(C)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub enum ChromaLocation { + CositedEven = 0, + LocationMidpoint = 1, +} + +/// MSL component swizzle. +#[repr(C)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub enum ComponentSwizzle { + Identity = 0, + Zero = 1, + One = 2, + R = 3, + G = 4, + B = 5, + A = 6, +} + /// Data fully defining a constant sampler. #[derive(Debug, Clone, Hash, Eq, PartialEq)] pub struct SamplerData { @@ -166,6 +198,36 @@ pub struct SamplerData { pub lod_clamp_min: LodBase16, pub lod_clamp_max: LodBase16, pub max_anisotropy: i32, + // Sampler YCbCr conversion parameters + pub planes: u32, + pub resolution: FormatResolution, + pub chroma_filter: SamplerFilter, + pub x_chroma_offset: ChromaLocation, + pub y_chroma_offset: ChromaLocation, + pub swizzle: [ComponentSwizzle; 4], + pub ycbcr_conversion_enable: bool, + pub ycbcr_model: SamplerYCbCrModelConversion, + pub ycbcr_range: SamplerYCbCrRange, + pub bpc: u32, +} + +/// A MSL sampler YCbCr model conversion. +#[repr(C)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub enum SamplerYCbCrModelConversion { + RgbIdentity = 0, + YCbCrIdentity = 1, + YCbCrBt709 = 2, + YCbCrBt601 = 3, + YCbCrBt2020 = 4, +} + +/// A MSL sampler YCbCr range. +#[repr(C)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub enum SamplerYCbCrRange { + ItuFull = 0, + ItuNarrow = 1, } /// A MSL shader platform. @@ -352,7 +414,7 @@ impl spirv::Compile for spirv::Ast { self.compiler.target_data.resource_binding_overrides.clear(); self.compiler.target_data.resource_binding_overrides.extend( options.resource_binding_overrides.iter().map(|(loc, res)| { - br::SPIRV_CROSS_NAMESPACE::MSLResourceBinding { + br::spirv_cross::MSLResourceBinding { stage: loc.stage.as_raw(), desc_set: loc.desc_set, binding: loc.binding, @@ -366,7 +428,7 @@ impl spirv::Compile for spirv::Ast { self.compiler.target_data.vertex_attribute_overrides.clear(); self.compiler.target_data.vertex_attribute_overrides.extend( options.vertex_attribute_overrides.iter().map(|(loc, vat)| { - br::SPIRV_CROSS_NAMESPACE::MSLVertexAttr { + br::spirv_cross::MSLVertexAttr { location: loc.0, msl_buffer: vat.buffer_id, msl_offset: vat.offset, @@ -382,13 +444,15 @@ impl spirv::Compile for spirv::Ast { ); self.compiler.target_data.const_samplers.clear(); - self.compiler.target_data.const_samplers.extend( - options.const_samplers.iter().map(|(loc, data)| unsafe { + self.compiler + .target_data + .const_samplers + .extend(options.const_samplers.iter().map(|(loc, data)| unsafe { use std::mem::transmute; - br::MslConstSamplerMapping { + br::ScMslConstSamplerMapping { desc_set: loc.desc_set, binding: loc.binding, - sampler: br::SPIRV_CROSS_NAMESPACE::MSLConstexprSampler { + sampler: br::spirv_cross::MSLConstexprSampler { coord: transmute(data.coord), min_filter: transmute(data.min_filter), mag_filter: transmute(data.mag_filter), @@ -402,13 +466,22 @@ impl spirv::Compile for spirv::Ast { lod_clamp_max: data.lod_clamp_max.into(), max_anisotropy: data.max_anisotropy, compare_enable: data.compare_func != SamplerCompareFunc::Always, - lod_clamp_enable: data.lod_clamp_min != LodBase16::ZERO || - data.lod_clamp_max != LodBase16::MAX, + lod_clamp_enable: data.lod_clamp_min != LodBase16::ZERO + || data.lod_clamp_max != LodBase16::MAX, anisotropy_enable: data.max_anisotropy != 0, + bpc: data.bpc, + chroma_filter: transmute(data.chroma_filter), + planes: data.planes, + resolution: transmute(data.resolution), + swizzle: transmute(data.swizzle), + x_chroma_offset: transmute(data.x_chroma_offset), + y_chroma_offset: transmute(data.y_chroma_offset), + ycbcr_conversion_enable: data.ycbcr_conversion_enable, + ycbcr_model: transmute(data.ycbcr_model), + ycbcr_range: transmute(data.ycbcr_range), }, } - }), - ); + })); Ok(()) } diff --git a/third_party/rust/spirv-cross-internal/src/spirv.rs b/third_party/rust/spirv-cross-internal/src/spirv.rs index 0939d82b4c54..18db2af39f73 100644 --- a/third_party/rust/spirv-cross-internal/src/spirv.rs +++ b/third_party/rust/spirv-cross-internal/src/spirv.rs @@ -176,8 +176,8 @@ pub enum BuiltIn { #[cfg(feature = "msl")] pub(crate) fn built_in_as_raw(built_in: Option) -> crate::bindings::spv::BuiltIn { - use BuiltIn::*; use crate::bindings as br; + use BuiltIn::*; match built_in { None => br::spv::BuiltIn::BuiltInMax, Some(Position) => br::spv::BuiltIn::BuiltInPosition, @@ -293,11 +293,11 @@ pub struct EntryPoint { /// Description of struct member's range. #[derive(Clone, Debug, Hash, Eq, PartialEq)] pub struct BufferRange { - /// Useful for passing to get_member_name() and get_member_decoration(), testing showes. + /// An index. Useful for passing to `get_member_name` and `get_member_decoration`. pub index: u32, - /// Bytes from start of buffer not beggining of struct, testing showes. + /// Bytes from start of buffer not beginning of struct. pub offset: usize, - /// Size of field in bytes. From https://github.com/KhronosGroup/SPIRV-Cross/issues/1176#issuecomment-542563608 + /// Size of field in bytes. pub range: usize, } @@ -455,7 +455,7 @@ where self.compiler.get_decoration(id, decoration) } - /// Gets a name. If not defined, an empty string will be returned. + /// Gets a name. If not defined, an empty string will be returned. pub fn get_name(&mut self, id: u32) -> Result { self.compiler.get_name(id) } diff --git a/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/.travis.yml b/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/.travis.yml new file mode 100644 index 000000000000..575262531db0 --- /dev/null +++ b/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/.travis.yml @@ -0,0 +1,72 @@ +language: + - cpp + - python + +python: 3.7 + +matrix: + include: + - os: linux + dist: trusty + compiler: gcc + env: + - GENERATOR="Unix Makefiles" + - ARTIFACT=gcc-trusty-64bit + - os: linux + dist: trusty + compiler: clang + env: + - GENERATOR="Unix Makefiles" + - ARTIFACT=clang-trusty-64bit + - os: osx + compiler: clang + osx_image: xcode10 + env: + - GENERATOR="Unix Makefiles" + - ARTIFACT=clang-macos-64bit + - os: windows + before_install: + - choco install python3 + - export PATH="/c/Python38:/c/Python38/Scripts:$PATH" + env: + - GENERATOR="Visual Studio 15 2017" + - ARTIFACT=vs2017-32bit + - os: windows + before_install: + - choco install python3 + - export PATH="/c/Python38:/c/Python38/Scripts:$PATH" + env: + - GENERATOR="Visual Studio 15 2017 Win64" + - ARTIFACT=vs2017-64bit + +before_script: + - "./checkout_glslang_spirv_tools.sh" + +script: + - if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then PYTHON3=$(which python); fi + - if [[ "$TRAVIS_OS_NAME" != "windows" ]]; then PYTHON3=$(which python3); fi + - "./build_glslang_spirv_tools.sh Release" + - mkdir build + - cd build + - cmake .. -DSPIRV_CROSS_WERROR=ON -DSPIRV_CROSS_MISC_WARNINGS=ON -DSPIRV_CROSS_SHARED=ON -DCMAKE_INSTALL_PREFIX=output -DCMAKE_BUILD_TYPE=Release -G "${GENERATOR}" -DPYTHON_EXECUTABLE:FILEPATH="${PYTHON3}" -DSPIRV_CROSS_ENABLE_TESTS=ON + - cmake --build . --config Release + - cmake --build . --config Release --target install + - ctest --verbose -C Release + - cd .. + +before_deploy: + - REV=${ARTIFACT}-$(git rev-parse --short=10 HEAD) + - cd build/output + - tar cf spirv-cross-${REV}.tar * + - gzip spirv-cross-${REV}.tar + - cd ../.. + - export FILE_TO_UPLOAD=build/output/spirv-cross-${REV}.tar.gz + +deploy: + provider: releases + api_key: + secure: c7YEOyzhE19TFo76UnbLWk/kikRQxsHsOxzkOqN6Q2aL8joNRw5kmcG84rGd+Rf6isX62cykCzA6qHkyJCv9QTIzcyXnLju17rLvgib7cXDcseaq8x4mFvet2yUxCglthDpFY2M2LB0Aqws71lPeYIrKXa6hCFEh8jO3AWxnaor7O3RYfNZylM9d33HgH6KLT3sDx/cukwBstmKeg7EG9OUnrSvairkPW0W2+jlq3SXPlq/WeVhf8hQs3Yg0BluExGbmLOwe9EaeUpeGuJMyHRxXypnToQv1/KwoScKpap5tYxdNWiwRGZ4lYcmKrjAYVvilTioh654oX5LQpn34mE/oe8Ko9AaATkSaoiisRFp6meWtnB39oFBoL5Yn15DqLQpRXPr1AJsnBXSGAac3aDBO1j4MIqTHmYlYlfRw3n2ZsBaFaTZnv++438SNQ54nkivyoDTIWjoOmYa9+K4mQc3415RDdQmjZTJM+lu+GAlMmNBTVbfNvrbU55Usu9Lo6BZJKKdUMvdBB78kJ5FHvcBlL+eMgmk1pABQY0IZROCt7NztHcv1UmAxoWNxveSFs5glydPNNjNS8bogc4dzBGYG0KMmILbBHihVbY2toA1M9CMdDHdp+LucfDMmzECmYSEmlx0h8win+Jjb74/qpOhaXuUZ0NnzVgCOyeUYuMQ= + file: "${FILE_TO_UPLOAD}" + skip_cleanup: true + on: + tags: true diff --git a/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/CMakeLists.txt b/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/CMakeLists.txt index c926f5c0063f..f5fcd2acba69 100644 --- a/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/CMakeLists.txt +++ b/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/CMakeLists.txt @@ -56,7 +56,7 @@ set(spirv-compiler-options "") set(spirv-compiler-defines "") set(spirv-cross-link-flags "") -message(STATUS "Finding Git version for SPIRV-Cross.") +message(STATUS "SPIRV-Cross: Finding Git version for SPIRV-Cross.") set(spirv-cross-build-version "unknown") find_package(Git) if (GIT_FOUND) @@ -67,9 +67,9 @@ if (GIT_FOUND) ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE ) - message(STATUS "Git hash: ${spirv-cross-build-version}") + message(STATUS "SPIRV-Cross: Git hash: ${spirv-cross-build-version}") else() - message(STATUS "Git not found, using unknown build version.") + message(STATUS "SPIRV-Cross: Git not found, using unknown build version.") endif() string(TIMESTAMP spirv-cross-timestamp) @@ -308,7 +308,7 @@ if (SPIRV_CROSS_STATIC) endif() set(spirv-cross-abi-major 0) -set(spirv-cross-abi-minor 19) +set(spirv-cross-abi-minor 21) set(spirv-cross-abi-patch 0) if (SPIRV_CROSS_SHARED) @@ -453,14 +453,14 @@ if (SPIRV_CROSS_CLI) if ((${spirv-cross-glslang} MATCHES "NOTFOUND") OR (${spirv-cross-spirv-as} MATCHES "NOTFOUND") OR (${spirv-cross-spirv-val} MATCHES "NOTFOUND") OR (${spirv-cross-spirv-opt} MATCHES "NOTFOUND")) set(SPIRV_CROSS_ENABLE_TESTS OFF) - message("Could not find glslang or SPIRV-Tools build under external/. Run ./checkout_glslang_spirv_tools.sh and ./build_glslang_spirv_tools.sh. Testing will be disabled.") + message("SPIRV-Cross: Testing will be disabled for SPIRV-Cross. Could not find glslang or SPIRV-Tools build under external/. To enable testing, run ./checkout_glslang_spirv_tools.sh and ./build_glslang_spirv_tools.sh first.") else() set(SPIRV_CROSS_ENABLE_TESTS ON) - message("Found glslang and SPIRV-Tools. Enabling test suite.") - message("Found glslangValidator in: ${spirv-cross-glslang}.") - message("Found spirv-as in: ${spirv-cross-spirv-as}.") - message("Found spirv-val in: ${spirv-cross-spirv-val}.") - message("Found spirv-opt in: ${spirv-cross-spirv-opt}.") + message("SPIRV-Cross: Found glslang and SPIRV-Tools. Enabling test suite.") + message("SPIRV-Cross: Found glslangValidator in: ${spirv-cross-glslang}.") + message("SPIRV-Cross: Found spirv-as in: ${spirv-cross-spirv-as}.") + message("SPIRV-Cross: Found spirv-val in: ${spirv-cross-spirv-val}.") + message("SPIRV-Cross: Found spirv-opt in: ${spirv-cross-spirv-opt}.") endif() set(spirv-cross-externals @@ -577,7 +577,7 @@ if (SPIRV_CROSS_CLI) WORKING_DIRECTORY $) endif() elseif(NOT ${PYTHONINTERP_FOUND}) - message(WARNING "Testing disabled. Could not find python3. If you have python3 installed try running " + message(WARNING "SPIRV-Cross: Testing disabled. Could not find python3. If you have python3 installed try running " "cmake with -DPYTHON_EXECUTABLE:FILEPATH=/path/to/python3 to help it find the executable") endif() endif() diff --git a/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/CODE_OF_CONDUCT.md b/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/CODE_OF_CONDUCT.md new file mode 100644 index 000000000000..a11610bd300b --- /dev/null +++ b/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/CODE_OF_CONDUCT.md @@ -0,0 +1 @@ +A reminder that this issue tracker is managed by the Khronos Group. Interactions here should follow the Khronos Code of Conduct (https://www.khronos.org/developers/code-of-conduct), which prohibits aggressive or derogatory language. Please keep the discussion friendly and civil. diff --git a/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/README.md b/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/README.md new file mode 100644 index 000000000000..831c6ff00e29 --- /dev/null +++ b/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/README.md @@ -0,0 +1,493 @@ +# SPIRV-Cross + +SPIRV-Cross is a tool designed for parsing and converting SPIR-V to other shader languages. + +[![Build Status](https://travis-ci.org/KhronosGroup/SPIRV-Cross.svg?branch=master)](https://travis-ci.org/KhronosGroup/SPIRV-Cross) +[![Build Status](https://ci.appveyor.com/api/projects/status/github/KhronosGroup/SPIRV-Cross?svg=true&branch=master)](https://ci.appveyor.com/project/HansKristian-Work/SPIRV-Cross) + +## Features + + - Convert SPIR-V to readable, usable and efficient GLSL + - Convert SPIR-V to readable, usable and efficient Metal Shading Language (MSL) + - Convert SPIR-V to readable, usable and efficient HLSL + - Convert SPIR-V to debuggable C++ [DEPRECATED] + - Convert SPIR-V to a JSON reflection format [EXPERIMENTAL] + - Reflection API to simplify the creation of Vulkan pipeline layouts + - Reflection API to modify and tweak OpDecorations + - Supports "all" of vertex, fragment, tessellation, geometry and compute shaders. + +SPIRV-Cross tries hard to emit readable and clean output from the SPIR-V. +The goal is to emit GLSL or MSL that looks like it was written by a human and not awkward IR/assembly-like code. + +NOTE: Individual features are expected to be mostly complete, but it is possible that certain obscure GLSL features are not yet supported. +However, most missing features are expected to be "trivial" improvements at this stage. + +## Building + +SPIRV-Cross has been tested on Linux, iOS/OSX, Windows and Android. CMake is the main build system. + +### Linux and macOS + +Building with CMake is recommended, as it is the only build system which is tested in continuous integration. +It is also the only build system which has install commands and other useful build system features. + +However, you can just run `make` on the command line as a fallback if you only care about the CLI tool. + +A non-ancient GCC (4.8+) or Clang (3.x+) compiler is required as SPIRV-Cross uses C++11 extensively. + +### Windows + +Building with CMake is recommended, which is the only way to target MSVC. +MinGW-w64 based compilation works with `make` as a fallback. + +### Android + +SPIRV-Cross is only useful as a library here. Use the CMake build to link SPIRV-Cross to your project. + +### C++ exceptions + +The make and CMake build flavors offer the option to treat exceptions as assertions. To disable exceptions for make just append `SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS=1` to the command line. For CMake append `-DSPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS=ON`. By default exceptions are enabled. + +### Static, shared and CLI + +You can use `-DSPIRV_CROSS_STATIC=ON/OFF` `-DSPIRV_CROSS_SHARED=ON/OFF` `-DSPIRV_CROSS_CLI=ON/OFF` to control which modules are built (and installed). + +## Usage + +### Using the C++ API + +The C++ API is the main API for SPIRV-Cross. For more in-depth documentation than what's provided in this README, +please have a look at the [Wiki](https://github.com/KhronosGroup/SPIRV-Cross/wiki). +**NOTE**: This API is not guaranteed to be ABI-stable, and it is highly recommended to link against this API statically. +The API is generally quite stable, but it can change over time, see the C API for more stability. + +To perform reflection and convert to other shader languages you can use the SPIRV-Cross API. +For example: + +```c++ +#include "spirv_glsl.hpp" +#include +#include + +extern std::vector load_spirv_file(); + +int main() +{ + // Read SPIR-V from disk or similar. + std::vector spirv_binary = load_spirv_file(); + + spirv_cross::CompilerGLSL glsl(std::move(spirv_binary)); + + // The SPIR-V is now parsed, and we can perform reflection on it. + spirv_cross::ShaderResources resources = glsl.get_shader_resources(); + + // Get all sampled images in the shader. + for (auto &resource : resources.sampled_images) + { + unsigned set = glsl.get_decoration(resource.id, spv::DecorationDescriptorSet); + unsigned binding = glsl.get_decoration(resource.id, spv::DecorationBinding); + printf("Image %s at set = %u, binding = %u\n", resource.name.c_str(), set, binding); + + // Modify the decoration to prepare it for GLSL. + glsl.unset_decoration(resource.id, spv::DecorationDescriptorSet); + + // Some arbitrary remapping if we want. + glsl.set_decoration(resource.id, spv::DecorationBinding, set * 16 + binding); + } + + // Set some options. + spirv_cross::CompilerGLSL::Options options; + options.version = 310; + options.es = true; + glsl.set_options(options); + + // Compile to GLSL, ready to give to GL driver. + std::string source = glsl.compile(); +} +``` + +### Using the C API wrapper + +To facilitate C compatibility and compatibility with foreign programming languages, a C89-compatible API wrapper is provided. Unlike the C++ API, +the goal of this wrapper is to be fully stable, both API and ABI-wise. +This is the only interface which is supported when building SPIRV-Cross as a shared library. + +An important point of the wrapper is that all memory allocations are contained in the `spvc_context`. +This simplifies the use of the API greatly. However, you should destroy the context as soon as reasonable, +or use `spvc_context_release_allocations()` if you intend to reuse the `spvc_context` object again soon. + +Most functions return a `spvc_result`, where `SPVC_SUCCESS` is the only success code. +For brevity, the code below does not do any error checking. + +```c +#include + +const SpvId *spirv = get_spirv_data(); +size_t word_count = get_spirv_word_count(); + +spvc_context context = NULL; +spvc_parsed_ir ir = NULL; +spvc_compiler compiler_glsl = NULL; +spvc_compiler_options options = NULL; +spvc_resources resources = NULL; +const spvc_reflected_resource *list = NULL; +const char *result = NULL; +size_t count; +size_t i; + +// Create context. +spvc_context_create(&context); + +// Set debug callback. +spvc_context_set_error_callback(context, error_callback, userdata); + +// Parse the SPIR-V. +spvc_context_parse_spirv(context, spirv, word_count, &ir); + +// Hand it off to a compiler instance and give it ownership of the IR. +spvc_context_create_compiler(context, SPVC_BACKEND_GLSL, ir, SPVC_CAPTURE_MODE_TAKE_OWNERSHIP, &compiler_glsl); + +// Do some basic reflection. +spvc_compiler_create_shader_resources(compiler_glsl, &resources); +spvc_resources_get_resource_list_for_type(resources, SPVC_RESOURCE_TYPE_UNIFORM_BUFFER, &list, &count); + +for (i = 0; i < count; i++) +{ + printf("ID: %u, BaseTypeID: %u, TypeID: %u, Name: %s\n", list[i].id, list[i].base_type_id, list[i].type_id, + list[i].name); + printf(" Set: %u, Binding: %u\n", + spvc_compiler_get_decoration(compiler_glsl, list[i].id, SpvDecorationDescriptorSet), + spvc_compiler_get_decoration(compiler_glsl, list[i].id, SpvDecorationBinding)); +} + +// Modify options. +spvc_compiler_create_compiler_options(context, &options); +spvc_compiler_options_set_uint(options, SPVC_COMPILER_OPTION_GLSL_VERSION, 330); +spvc_compiler_options_set_bool(options, SPVC_COMPILER_OPTION_GLSL_ES, SPVC_FALSE); +spvc_compiler_install_compiler_options(compiler_glsl, options); + +spvc_compiler_compile(compiler, &result); +printf("Cross-compiled source: %s\n", result); + +// Frees all memory we allocated so far. +spvc_context_destroy(context); +``` + +### Linking + +#### CMake add_subdirectory() + +This is the recommended way if you are using CMake and want to link against SPIRV-Cross statically. + +#### Integrating SPIRV-Cross in a custom build system + +To add SPIRV-Cross to your own codebase, just copy the source and header files from root directory +and build the relevant .cpp files you need. Make sure to build with C++11 support, e.g. `-std=c++11` in GCC and Clang. +Alternatively, the Makefile generates a libspirv-cross.a static library during build that can be linked in. + +#### Linking against SPIRV-Cross as a system library + +It is possible to link against SPIRV-Cross when it is installed as a system library, +which would be mostly relevant for Unix-like platforms. + +##### pkg-config + +For Unix-based systems, a pkg-config is installed for the C API, e.g.: + +``` +$ pkg-config spirv-cross-c-shared --libs --cflags +-I/usr/local/include/spirv_cross -L/usr/local/lib -lspirv-cross-c-shared +``` + +##### CMake + +If the project is installed, it can be found with `find_package()`, e.g.: + +``` +cmake_minimum_required(VERSION 3.5) +set(CMAKE_C_STANDARD 99) +project(Test LANGUAGES C) + +find_package(spirv_cross_c_shared) +if (spirv_cross_c_shared_FOUND) + message(STATUS "Found SPIRV-Cross C API! :)") +else() + message(STATUS "Could not find SPIRV-Cross C API! :(") +endif() + +add_executable(test test.c) +target_link_libraries(test spirv-cross-c-shared) +``` + +test.c: +```c +#include + +int main(void) +{ + spvc_context context; + spvc_context_create(&context); + spvc_context_destroy(context); +} +``` + +### CLI + +The CLI is suitable for basic cross-compilation tasks, but it cannot support the full flexibility that the API can. +Some examples below. + +#### Creating a SPIR-V file from GLSL with glslang + +``` +glslangValidator -H -V -o test.spv test.frag +``` + +#### Converting a SPIR-V file to GLSL ES + +``` +glslangValidator -H -V -o test.spv shaders/comp/basic.comp +./spirv-cross --version 310 --es test.spv +``` + +#### Converting to desktop GLSL + +``` +glslangValidator -H -V -o test.spv shaders/comp/basic.comp +./spirv-cross --version 330 --no-es test.spv --output test.comp +``` + +#### Disable prettifying optimizations + +``` +glslangValidator -H -V -o test.spv shaders/comp/basic.comp +./spirv-cross --version 310 --es test.spv --output test.comp --force-temporary +``` + +### Using shaders generated from C++ backend + +Please see `samples/cpp` where some GLSL shaders are compiled to SPIR-V, decompiled to C++ and run with test data. +Reading through the samples should explain how to use the C++ interface. +A simple Makefile is included to build all shaders in the directory. + +### Implementation notes + +When using SPIR-V and SPIRV-Cross as an intermediate step for cross-compiling between high level languages there are some considerations to take into account, +as not all features used by one high-level language are necessarily supported natively by the target shader language. +SPIRV-Cross aims to provide the tools needed to handle these scenarios in a clean and robust way, but some manual action is required to maintain compatibility. + +#### HLSL source to GLSL + +##### HLSL entry points + +When using SPIR-V shaders compiled from HLSL, there are some extra things you need to take care of. +First make sure that the entry point is used correctly. +If you forget to set the entry point correctly in glslangValidator (-e MyFancyEntryPoint), +you will likely encounter this error message: + +``` +Cannot end a function before ending the current block. +Likely cause: If this SPIR-V was created from glslang HLSL, make sure the entry point is valid. +``` + +##### Vertex/Fragment interface linking + +HLSL relies on semantics in order to effectively link together shader stages. In the SPIR-V generated by glslang, the transformation from HLSL to GLSL ends up looking like + +```c++ +struct VSOutput { + // SV_Position is rerouted to gl_Position + float4 position : SV_Position; + float4 coord : TEXCOORD0; +}; + +VSOutput main(...) {} +``` + +```c++ +struct VSOutput { + float4 coord; +} +layout(location = 0) out VSOutput _magicNameGeneratedByGlslang; +``` + +While this works, be aware of the type of the struct which is used in the vertex stage and the fragment stage. +There may be issues if the structure type name differs in vertex stage and fragment stage. + +You can make use of the reflection interface to force the name of the struct type. + +``` +// Something like this for both vertex outputs and fragment inputs. +compiler.set_name(varying_resource.base_type_id, "VertexFragmentLinkage"); +``` + +Some platform may require identical variable name for both vertex outputs and fragment inputs. (for example MacOSX) +to rename varaible base on location, please add +``` +--rename-interface-variable +``` + +#### HLSL source to legacy GLSL/ESSL + +HLSL tends to emit varying struct types to pass data between vertex and fragment. +This is not supported in legacy GL/GLES targets, so to support this, varying structs are flattened. +This is done automatically, but the API user might need to be aware that this is happening in order to support all cases. + +Modern GLES code like this: +```c++ +struct Output { + vec4 a; + vec2 b; +}; +out Output vout; +``` + +Is transformed into: +```c++ +struct Output { + vec4 a; + vec2 b; +}; +varying vec4 Output_a; +varying vec2 Output_b; +``` + +Note that now, both the struct name and the member names will participate in the linking interface between vertex and fragment, so +API users might want to ensure that both the struct names and member names match so that vertex outputs and fragment inputs can link properly. + + +#### Separate image samplers (HLSL/Vulkan) for backends which do not support it (GLSL) + +Another thing you need to remember is when using samplers and textures in HLSL these are separable, and not directly compatible with GLSL. If you need to use this with desktop GL/GLES, you need to call `Compiler::build_combined_image_samplers` first before calling `Compiler::compile`, or you will get an exception. + +```c++ +// From main.cpp +// Builds a mapping for all combinations of images and samplers. +compiler->build_combined_image_samplers(); + +// Give the remapped combined samplers new names. +// Here you can also set up decorations if you want (binding = #N). +for (auto &remap : compiler->get_combined_image_samplers()) +{ + compiler->set_name(remap.combined_id, join("SPIRV_Cross_Combined", compiler->get_name(remap.image_id), + compiler->get_name(remap.sampler_id))); +} +``` + +If your target is Vulkan GLSL, `--vulkan-semantics` will emit separate image samplers as you'd expect. +The command line client calls `Compiler::build_combined_image_samplers` automatically, but if you're calling the library, you'll need to do this yourself. + +#### Descriptor sets (Vulkan GLSL) for backends which do not support them (HLSL/GLSL/Metal) + +Descriptor sets are unique to Vulkan, so make sure that descriptor set + binding is remapped to a flat binding scheme (set always 0), so that other APIs can make sense of the bindings. +This can be done with `Compiler::set_decoration(id, spv::DecorationDescriptorSet)`. + +#### Linking by name for targets which do not support explicit locations (legacy GLSL/ESSL) + +Modern GLSL and HLSL sources (and SPIR-V) relies on explicit layout(location) qualifiers to guide the linking process between shader stages, +but older GLSL relies on symbol names to perform the linking. When emitting shaders with older versions, these layout statements will be removed, +so it is important that the API user ensures that the names of I/O variables are sanitized so that linking will work properly. +The reflection API can rename variables, struct types and struct members to deal with these scenarios using `Compiler::set_name` and friends. + +#### Clip-space conventions + +SPIRV-Cross can perform some common clip space conversions on gl_Position/SV_Position by enabling `CompilerGLSL::Options.vertex.fixup_clipspace`. +While this can be convenient, it is recommended to modify the projection matrices instead as that can achieve the same result. + +For GLSL targets, enabling this will convert a shader which assumes `[0, w]` depth range (Vulkan / D3D / Metal) into `[-w, w]` range. +For MSL and HLSL targets, enabling this will convert a shader in `[-w, w]` depth range (OpenGL) to `[0, w]` depth range. + +By default, the CLI will not enable `fixup_clipspace`, but in the API you might want to set an explicit value using `CompilerGLSL::set_options()`. + +Y-flipping of gl_Position and similar is also supported. +The use of this is discouraged, because relying on vertex shader Y-flipping tends to get quite messy. +To enable this, set `CompilerGLSL::Options.vertex.flip_vert_y` or `--flip-vert-y` in CLI. + +## Contributing + +Contributions to SPIRV-Cross are welcome. See Testing and Licensing sections for details. + +### Testing + +SPIRV-Cross maintains a test suite of shaders with reference output of how the output looks after going through a roundtrip through +glslangValidator/spirv-as then back through SPIRV-Cross again. +The reference files are stored inside the repository in order to be able to track regressions. + +All pull requests should ensure that test output does not change unexpectedly. This can be tested with: + +``` +./checkout_glslang_spirv_tools.sh # Checks out glslang and SPIRV-Tools at a fixed revision which matches the reference output. + # NOTE: Some users have reported problems cloning from git:// paths. To use https:// instead pass in + # $ PROTOCOL=https ./checkout_glslang_spirv_tools.sh + # instead. +./build_glslang_spirv_tools.sh # Builds glslang and SPIRV-Tools. +./test_shaders.sh # Runs over all changes and makes sure that there are no deltas compared to reference files. +``` + +`./test_shaders.sh` currently requires a Makefile setup with GCC/Clang to be set up. +However, on Windows, this can be rather inconvenient if a MinGW environment is not set up. +To use a spirv-cross binary you built with CMake (or otherwise), you can pass in an environment variable as such: + +``` +SPIRV_CROSS_PATH=path/to/custom/spirv-cross ./test_shaders.sh +``` + +However, when improving SPIRV-Cross there are of course legitimate cases where reference output should change. +In these cases, run: + +``` +./update_test_shaders.sh # SPIRV_CROSS_PATH also works here. +``` + +to update the reference files and include these changes as part of the pull request. +Always make sure you are running the correct version of glslangValidator as well as SPIRV-Tools when updating reference files. +See `checkout_glslang_spirv_tools.sh` which revisions are currently expected. The revisions change regularly. + +In short, the master branch should always be able to run `./test_shaders.py shaders` and friends without failure. +SPIRV-Cross uses Travis CI to test all pull requests, so it is not strictly needed to perform testing yourself if you have problems running it locally. +A pull request which does not pass testing on Travis will not be accepted however. + +When adding support for new features to SPIRV-Cross, a new shader and reference file should be added which covers usage of the new shader features in question. +Travis CI runs the test suite with the CMake, by running `ctest`. This is a more straight-forward alternative to `./test_shaders.sh`. + +### Licensing + +Contributors of new files should add a copyright header at the top of every new source code file with their copyright +along with the Apache 2.0 licensing stub. + +### Formatting + +SPIRV-Cross uses `clang-format` to automatically format code. +Please use `clang-format` with the style sheet found in `.clang-format` to automatically format code before submitting a pull request. + +To make things easy, the `format_all.sh` script can be used to format all +source files in the library. In this directory, run the following from the +command line: + + ./format_all.sh + +## Regression testing + +In shaders/ a collection of shaders are maintained for purposes of regression testing. +The current reference output is contained in reference/. +`./test_shaders.py shaders` can be run to perform regression testing. + +See `./test_shaders.py --help` for more. + +### Metal backend + +To test the roundtrip path GLSL -> SPIR-V -> MSL, `--msl` can be added, e.g. `./test_shaders.py --msl shaders-msl`. + +### HLSL backend + +To test the roundtrip path GLSL -> SPIR-V -> HLSL, `--hlsl` can be added, e.g. `./test_shaders.py --hlsl shaders-hlsl`. + +### Updating regression tests + +When legitimate changes are found, use `--update` flag to update regression files. +Otherwise, `./test_shaders.py` will fail with error code. + +### Mali Offline Compiler cycle counts + +To obtain a CSV of static shader cycle counts before and after going through spirv-cross, add +`--malisc` flag to `./test_shaders`. This requires the Mali Offline Compiler to be installed in PATH. + diff --git a/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/appveyor.yml b/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/appveyor.yml new file mode 100644 index 000000000000..2f427f18044c --- /dev/null +++ b/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/appveyor.yml @@ -0,0 +1,31 @@ + +environment: + matrix: + - GENERATOR: "Visual Studio 12 2013 Win64" + CONFIG: Debug + + - GENERATOR: "Visual Studio 12 2013 Win64" + CONFIG: Release + + - GENERATOR: "Visual Studio 14 2015 Win64" + CONFIG: Debug + + - GENERATOR: "Visual Studio 14 2015 Win64" + CONFIG: Release + + - GENERATOR: "Visual Studio 12 2013" + CONFIG: Debug + + - GENERATOR: "Visual Studio 12 2013" + CONFIG: Release + + - GENERATOR: "Visual Studio 14 2015" + CONFIG: Debug + + - GENERATOR: "Visual Studio 14 2015" + CONFIG: Release + +build_script: + - git submodule update --init + - cmake "-G%GENERATOR%" -H. -B_builds + - cmake --build _builds --config "%CONFIG%" diff --git a/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/cmake/gitversion.in.h b/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/cmake/gitversion.in.h deleted file mode 100644 index 7135e283b23d..000000000000 --- a/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/cmake/gitversion.in.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef SPIRV_CROSS_GIT_VERSION_H_ -#define SPIRV_CROSS_GIT_VERSION_H_ - -#define SPIRV_CROSS_GIT_REVISION "Git commit: @spirv-cross-build-version@ Timestamp: @spirv-cross-timestamp@" - -#endif diff --git a/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/format_all.sh b/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/format_all.sh deleted file mode 100755 index fcfffc57f864..000000000000 --- a/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/format_all.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -#for file in spirv_*.{cpp,hpp} include/spirv_cross/*.{hpp,h} samples/cpp/*.cpp main.cpp -for file in spirv_*.{cpp,hpp} main.cpp -do - echo "Formatting file: $file ..." - clang-format -style=file -i $file -done diff --git a/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/gn/BUILD.gn b/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/gn/BUILD.gn deleted file mode 100644 index 8458c1a70390..000000000000 --- a/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/gn/BUILD.gn +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (C) 2019 Google, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -config("spirv_cross_public") { - include_dirs = [ ".." ] - - defines = [ "SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS" ] -} - -source_set("spirv_cross_sources") { - public_configs = [ ":spirv_cross_public" ] - - sources = [ - "../GLSL.std.450.h", - "../spirv.hpp", - "../spirv_cfg.cpp", - "../spirv_cfg.hpp", - "../spirv_common.hpp", - "../spirv_cross.cpp", - "../spirv_cross.hpp", - "../spirv_cross_containers.hpp", - "../spirv_cross_error_handling.hpp", - "../spirv_cross_parsed_ir.cpp", - "../spirv_cross_parsed_ir.hpp", - "../spirv_cross_util.cpp", - "../spirv_cross_util.hpp", - "../spirv_glsl.cpp", - "../spirv_glsl.hpp", - "../spirv_msl.cpp", - "../spirv_msl.hpp", - "../spirv_parser.cpp", - "../spirv_parser.hpp", - "../spirv_reflect.cpp", - "../spirv_reflect.hpp", - ] - - cflags = [ "-fno-exceptions" ] - - if (is_clang) { - cflags_cc = [ - "-Wno-extra-semi", - "-Wno-ignored-qualifiers", - "-Wno-implicit-fallthrough", - "-Wno-inconsistent-missing-override", - "-Wno-missing-field-initializers", - "-Wno-newline-eof", - "-Wno-sign-compare", - "-Wno-unused-variable", - ] - } -} diff --git a/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/main.cpp b/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/main.cpp index 711b0ff19153..871c5f2fa9ac 100644 --- a/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/main.cpp +++ b/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/main.cpp @@ -552,6 +552,7 @@ struct CLIArguments bool hlsl = false; bool hlsl_compat = false; bool hlsl_support_nonzero_base = false; + HLSLBindingFlags hlsl_binding_flags = 0; bool vulkan_semantics = false; bool flatten_multidimensional_arrays = false; bool use_420pack_extension = true; @@ -614,6 +615,7 @@ static void print_help() "\t[--shader-model]\n" "\t[--hlsl-enable-compat]\n" "\t[--hlsl-support-nonzero-basevertex-baseinstance]\n" + "\t[--hlsl-auto-binding (push, cbv, srv, uav, sampler, all)]\n" "\t[--separate-shader-objects]\n" "\t[--pls-in format input-name]\n" "\t[--pls-out format output-name]\n" @@ -736,6 +738,27 @@ static ExecutionModel stage_to_execution_model(const std::string &stage) SPIRV_CROSS_THROW("Invalid stage."); } +static HLSLBindingFlags hlsl_resource_type_to_flag(const std::string &arg) +{ + if (arg == "push") + return HLSL_BINDING_AUTO_PUSH_CONSTANT_BIT; + else if (arg == "cbv") + return HLSL_BINDING_AUTO_CBV_BIT; + else if (arg == "srv") + return HLSL_BINDING_AUTO_SRV_BIT; + else if (arg == "uav") + return HLSL_BINDING_AUTO_UAV_BIT; + else if (arg == "sampler") + return HLSL_BINDING_AUTO_SAMPLER_BIT; + else if (arg == "all") + return HLSL_BINDING_AUTO_ALL; + else + { + fprintf(stderr, "Invalid resource type for --hlsl-auto-binding: %s\n", arg.c_str()); + return 0; + } +} + static string compile_iteration(const CLIArguments &args, std::vector spirv_file) { Parser spirv_parser(move(spirv_file)); @@ -939,6 +962,7 @@ static string compile_iteration(const CLIArguments &args, std::vector hlsl_opts.support_nonzero_base_vertex_base_instance = args.hlsl_support_nonzero_base; hlsl->set_hlsl_options(hlsl_opts); + hlsl->set_resource_binding_flags(args.hlsl_binding_flags); } if (build_dummy_sampler) @@ -1089,6 +1113,9 @@ static int main_inner(int argc, char *argv[]) cbs.add("--hlsl-enable-compat", [&args](CLIParser &) { args.hlsl_compat = true; }); cbs.add("--hlsl-support-nonzero-basevertex-baseinstance", [&args](CLIParser &) { args.hlsl_support_nonzero_base = true; }); + cbs.add("--hlsl-auto-binding", [&args](CLIParser &parser) { + args.hlsl_binding_flags |= hlsl_resource_type_to_flag(parser.next_string()); + }); cbs.add("--vulkan-semantics", [&args](CLIParser &) { args.vulkan_semantics = true; }); cbs.add("--flatten-multidimensional-arrays", [&args](CLIParser &) { args.flatten_multidimensional_arrays = true; }); cbs.add("--no-420pack-extension", [&args](CLIParser &) { args.use_420pack_extension = false; }); diff --git a/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/spirv_cross_c.cpp b/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/spirv_cross_c.cpp index f6e63b4a01f9..b590fe8012d9 100644 --- a/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/spirv_cross_c.cpp +++ b/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/spirv_cross_c.cpp @@ -569,6 +569,30 @@ spvc_result spvc_compiler_options_set_uint(spvc_compiler_options options, spvc_c case SPVC_COMPILER_OPTION_MSL_DYNAMIC_OFFSETS_BUFFER_INDEX: options->msl.dynamic_offsets_buffer_index = value; break; + + case SPVC_COMPILER_OPTION_MSL_TEXTURE_1D_AS_2D: + options->msl.texture_1D_as_2D = value != 0; + break; + + case SPVC_COMPILER_OPTION_MSL_ENABLE_BASE_INDEX_ZERO: + options->msl.enable_base_index_zero = value != 0; + break; + + case SPVC_COMPILER_OPTION_MSL_IOS_FRAMEBUFFER_FETCH_SUBPASS: + options->msl.ios_use_framebuffer_fetch_subpasses = value != 0; + break; + + case SPVC_COMPILER_OPTION_MSL_INVARIANT_FP_MATH: + options->msl.invariant_float_math = value != 0; + break; + + case SPVC_COMPILER_OPTION_MSL_EMULATE_CUBEMAP_ARRAY: + options->msl.emulate_cube_array = value != 0; + break; + + case SPVC_COMPILER_OPTION_MSL_ENABLE_DECORATION_BINDING: + options->msl.enable_decoration_binding = value != 0; + break; #endif default: @@ -745,6 +769,26 @@ spvc_variable_id spvc_compiler_hlsl_remap_num_workgroups_builtin(spvc_compiler c #endif } +spvc_result spvc_compiler_hlsl_set_resource_binding_flags(spvc_compiler compiler, + spvc_hlsl_binding_flags flags) +{ +#if SPIRV_CROSS_C_API_HLSL + if (compiler->backend != SPVC_BACKEND_HLSL) + { + compiler->context->report_error("HLSL function used on a non-HLSL backend."); + return SPVC_ERROR_INVALID_ARGUMENT; + } + + auto &hlsl = *static_cast(compiler->compiler.get()); + hlsl.set_resource_binding_flags(flags); + return SPVC_SUCCESS; +#else + (void)flags; + compiler->context->report_error("HLSL function used on a non-HLSL backend."); + return SPVC_ERROR_INVALID_ARGUMENT; +#endif +} + spvc_bool spvc_compiler_msl_is_rasterization_disabled(spvc_compiler compiler) { #if SPIRV_CROSS_C_API_MSL @@ -1639,6 +1683,11 @@ spvc_type spvc_compiler_get_type_handle(spvc_compiler compiler, spvc_type_id id) SPVC_END_SAFE_SCOPE(compiler->context, nullptr) } +spvc_type_id spvc_type_get_base_type_id(spvc_type type) +{ + return type->self; +} + static spvc_basetype convert_basetype(SPIRType::BaseType type) { // For now the enums match up. diff --git a/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/spirv_cross_c.h b/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/spirv_cross_c.h index f950803ffb76..a3ad84be1993 100644 --- a/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/spirv_cross_c.h +++ b/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/spirv_cross_c.h @@ -33,7 +33,7 @@ extern "C" { /* Bumped if ABI or API breaks backwards compatibility. */ #define SPVC_C_API_VERSION_MAJOR 0 /* Bumped if APIs or enumerations are added in a backwards compatible way. */ -#define SPVC_C_API_VERSION_MINOR 19 +#define SPVC_C_API_VERSION_MINOR 21 /* Bumped if internal implementation details change. */ #define SPVC_C_API_VERSION_PATCH 0 @@ -466,6 +466,18 @@ typedef struct spvc_msl_sampler_ycbcr_conversion */ SPVC_PUBLIC_API void spvc_msl_sampler_ycbcr_conversion_init(spvc_msl_sampler_ycbcr_conversion *conv); +/* Maps to C++ API. */ +typedef enum spvc_hlsl_binding_flag_bits +{ + SPVC_HLSL_BINDING_AUTO_PUSH_CONSTANT_BIT = 1 << 0, + SPVC_HLSL_BINDING_AUTO_CBV_BIT = 1 << 1, + SPVC_HLSL_BINDING_AUTO_SRV_BIT = 1 << 2, + SPVC_HLSL_BINDING_AUTO_UAV_BIT = 1 << 3, + SPVC_HLSL_BINDING_AUTO_SAMPLER_BIT = 1 << 4, + SPVC_HLSL_BINDING_AUTO_ALL = 0x7fffffff +} spvc_hlsl_binding_flag_bits; +typedef unsigned spvc_hlsl_binding_flags; + /* Maps to the various spirv_cross::Compiler*::Option structures. See C++ API for defaults and details. */ typedef enum spvc_compiler_option { @@ -527,6 +539,12 @@ typedef enum spvc_compiler_option SPVC_COMPILER_OPTION_MSL_VIEW_INDEX_FROM_DEVICE_INDEX = 41 | SPVC_COMPILER_OPTION_MSL_BIT, SPVC_COMPILER_OPTION_MSL_DISPATCH_BASE = 42 | SPVC_COMPILER_OPTION_MSL_BIT, SPVC_COMPILER_OPTION_MSL_DYNAMIC_OFFSETS_BUFFER_INDEX = 43 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_TEXTURE_1D_AS_2D = 44 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_ENABLE_BASE_INDEX_ZERO = 45 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_IOS_FRAMEBUFFER_FETCH_SUBPASS = 46 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_INVARIANT_FP_MATH = 47 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_EMULATE_CUBEMAP_ARRAY = 48 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_ENABLE_DECORATION_BINDING = 49 | SPVC_COMPILER_OPTION_MSL_BIT, SPVC_COMPILER_OPTION_INT_MAX = 0x7fffffff } spvc_compiler_option; @@ -600,6 +618,9 @@ SPVC_PUBLIC_API spvc_result spvc_compiler_hlsl_add_vertex_attribute_remap(spvc_c size_t remaps); SPVC_PUBLIC_API spvc_variable_id spvc_compiler_hlsl_remap_num_workgroups_builtin(spvc_compiler compiler); +SPVC_PUBLIC_API spvc_result spvc_compiler_hlsl_set_resource_binding_flags(spvc_compiler compiler, + spvc_hlsl_binding_flags flags); + /* * MSL specifics. * Maps to C++ API. @@ -713,6 +734,12 @@ SPVC_PUBLIC_API SpvExecutionModel spvc_compiler_get_execution_model(spvc_compile */ SPVC_PUBLIC_API spvc_type spvc_compiler_get_type_handle(spvc_compiler compiler, spvc_type_id id); +/* Pulls out SPIRType::self. This effectively gives the type ID without array or pointer qualifiers. + * This is necessary when reflecting decoration/name information on members of a struct, + * which are placed in the base type, not the qualified type. + * This is similar to spvc_reflected_resource::base_type_id. */ +SPVC_PUBLIC_API spvc_type_id spvc_type_get_base_type_id(spvc_type type); + SPVC_PUBLIC_API spvc_basetype spvc_type_get_basetype(spvc_type type); SPVC_PUBLIC_API unsigned spvc_type_get_bit_width(spvc_type type); SPVC_PUBLIC_API unsigned spvc_type_get_vector_size(spvc_type type); diff --git a/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/spirv_glsl.cpp b/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/spirv_glsl.cpp index 97ad0ef1c348..e9925165f78d 100644 --- a/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/spirv_glsl.cpp +++ b/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/spirv_glsl.cpp @@ -1336,7 +1336,8 @@ uint32_t CompilerGLSL::type_to_packed_size(const SPIRType &type, const Bitset &f } bool CompilerGLSL::buffer_is_packing_standard(const SPIRType &type, BufferPackingStandard packing, - uint32_t start_offset, uint32_t end_offset) + uint32_t *failed_validation_index, uint32_t start_offset, + uint32_t end_offset) { // This is very tricky and error prone, but try to be exhaustive and correct here. // SPIR-V doesn't directly say if we're using std430 or std140. @@ -1417,18 +1418,28 @@ bool CompilerGLSL::buffer_is_packing_standard(const SPIRType &type, BufferPackin if (!packing_has_flexible_offset(packing)) { if (actual_offset != offset) // This cannot be the packing we're looking for. + { + if (failed_validation_index) + *failed_validation_index = i; return false; + } } else if ((actual_offset & (alignment - 1)) != 0) { // We still need to verify that alignment rules are observed, even if we have explicit offset. + if (failed_validation_index) + *failed_validation_index = i; return false; } // Verify array stride rules. if (!memb_type.array.empty() && type_to_packed_array_stride(memb_type, member_flags, packing) != type_struct_member_array_stride(type, i)) + { + if (failed_validation_index) + *failed_validation_index = i; return false; + } // Verify that sub-structs also follow packing rules. // We cannot use enhanced layouts on substructs, so they better be up to spec. @@ -1437,6 +1448,8 @@ bool CompilerGLSL::buffer_is_packing_standard(const SPIRType &type, BufferPackin if (!memb_type.pointer && !memb_type.member_types.empty() && !buffer_is_packing_standard(memb_type, substruct_packing)) { + if (failed_validation_index) + *failed_validation_index = i; return false; } } @@ -10762,8 +10775,10 @@ string CompilerGLSL::to_array_size(const SPIRType &type, uint32_t index) // Tessellation control and evaluation shaders must have either gl_MaxPatchVertices or unsized arrays for input arrays. // Opt for unsized as it's the more "correct" variant to use. - if (type.storage == StorageClassInput && (get_entry_point().model == ExecutionModelTessellationControl || - get_entry_point().model == ExecutionModelTessellationEvaluation)) + if (type.storage == StorageClassInput && + (get_entry_point().model == ExecutionModelTessellationControl || + get_entry_point().model == ExecutionModelTessellationEvaluation) && + index == uint32_t(type.array.size() - 1)) return ""; auto &size = type.array[index]; @@ -12765,14 +12780,14 @@ void CompilerGLSL::unroll_array_from_complex_load(uint32_t target_id, uint32_t s auto new_expr = join("_", target_id, "_unrolled"); statement(variable_decl(type, new_expr, target_id), ";"); string array_expr; - if (type.array_size_literal.front()) + if (type.array_size_literal.back()) { - array_expr = convert_to_string(type.array.front()); - if (type.array.front() == 0) + array_expr = convert_to_string(type.array.back()); + if (type.array.back() == 0) SPIRV_CROSS_THROW("Cannot unroll an array copy from unsized array."); } else - array_expr = to_expression(type.array.front()); + array_expr = to_expression(type.array.back()); // The array size might be a specialization constant, so use a for-loop instead. statement("for (int i = 0; i < int(", array_expr, "); i++)"); diff --git a/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/spirv_glsl.hpp b/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/spirv_glsl.hpp index 709a9abb6c1a..3326a2449624 100644 --- a/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/spirv_glsl.hpp +++ b/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/spirv_glsl.hpp @@ -558,7 +558,8 @@ protected: virtual void emit_block_hints(const SPIRBlock &block); virtual std::string to_initializer_expression(const SPIRVariable &var); - bool buffer_is_packing_standard(const SPIRType &type, BufferPackingStandard packing, uint32_t start_offset = 0, + bool buffer_is_packing_standard(const SPIRType &type, BufferPackingStandard packing, + uint32_t *failed_index = nullptr, uint32_t start_offset = 0, uint32_t end_offset = ~(0u)); std::string buffer_to_packing_standard(const SPIRType &type, bool support_std430_without_scalar_layout); diff --git a/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/spirv_hlsl.cpp b/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/spirv_hlsl.cpp index 5045d2ea8b59..ae7a4d55c730 100644 --- a/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/spirv_hlsl.cpp +++ b/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/spirv_hlsl.cpp @@ -1867,11 +1867,6 @@ void CompilerHLSL::emit_buffer_block(const SPIRVariable &var) { if (type.array.empty()) { - if (buffer_is_packing_standard(type, BufferPackingHLSLCbufferPackOffset)) - set_extended_decoration(type.self, SPIRVCrossDecorationExplicitOffset); - else - SPIRV_CROSS_THROW("cbuffer cannot be expressed with either HLSL packing layout or packoffset."); - // Flatten the top-level struct so we can use packoffset, // this restriction is similar to GLSL where layout(offset) is not possible on sub-structs. flattened_structs.insert(var.self); @@ -1892,6 +1887,16 @@ void CompilerHLSL::emit_buffer_block(const SPIRVariable &var) if (buffer_name.empty()) buffer_name = join("_", get(var.basetype).self, "_", var.self); + uint32_t failed_index = 0; + if (buffer_is_packing_standard(type, BufferPackingHLSLCbufferPackOffset, &failed_index)) + set_extended_decoration(type.self, SPIRVCrossDecorationExplicitOffset); + else + { + SPIRV_CROSS_THROW(join("cbuffer ID ", var.self, " (name: ", buffer_name, "), member index ", + failed_index, " (name: ", to_member_name(type, failed_index), + ") cannot be expressed with either HLSL packing layout or packoffset.")); + } + block_names.insert(buffer_name); // Save for post-reflection later. @@ -1927,13 +1932,18 @@ void CompilerHLSL::emit_buffer_block(const SPIRVariable &var) SPIRV_CROSS_THROW( "Need ConstantBuffer to use arrays of UBOs, but this is only supported in SM 5.1."); - // ConstantBuffer does not support packoffset, so it is unuseable unless everything aligns as we expect. - if (!buffer_is_packing_standard(type, BufferPackingHLSLCbuffer)) - SPIRV_CROSS_THROW("HLSL ConstantBuffer cannot be expressed with normal HLSL packing rules."); - add_resource_name(type.self); add_resource_name(var.self); + // ConstantBuffer does not support packoffset, so it is unuseable unless everything aligns as we expect. + uint32_t failed_index = 0; + if (!buffer_is_packing_standard(type, BufferPackingHLSLCbuffer, &failed_index)) + { + SPIRV_CROSS_THROW(join("HLSL ConstantBuffer ID ", var.self, " (name: ", to_name(type.self), + "), member index ", failed_index, " (name: ", to_member_name(type, failed_index), + ") cannot be expressed with normal HLSL packing rules.")); + } + emit_struct(get(type.self)); statement("ConstantBuffer<", to_name(type.self), "> ", to_name(var.self), type_to_array_glsl(type), to_resource_binding(var), ";"); @@ -1953,11 +1963,16 @@ void CompilerHLSL::emit_push_constant_block(const SPIRVariable &var) { auto &type = get(var.basetype); - if (buffer_is_packing_standard(type, BufferPackingHLSLCbufferPackOffset, layout.start, layout.end)) + uint32_t failed_index = 0; + if (buffer_is_packing_standard(type, BufferPackingHLSLCbufferPackOffset, &failed_index, layout.start, + layout.end)) set_extended_decoration(type.self, SPIRVCrossDecorationExplicitOffset); else - SPIRV_CROSS_THROW( - "root constant cbuffer cannot be expressed with either HLSL packing layout or packoffset."); + { + SPIRV_CROSS_THROW(join("Root constant cbuffer ID ", var.self, " (name: ", to_name(type.self), ")", + ", member index ", failed_index, " (name: ", to_member_name(type, failed_index), + ") cannot be expressed with either HLSL packing layout or packoffset.")); + } flattened_structs.insert(var.self); type.member_name_cache.clear(); @@ -1965,7 +1980,7 @@ void CompilerHLSL::emit_push_constant_block(const SPIRVariable &var) auto &memb = ir.meta[type.self].members; statement("cbuffer SPIRV_CROSS_RootConstant_", to_name(var.self), - to_resource_register('b', layout.binding, layout.space)); + to_resource_register(HLSL_BINDING_AUTO_PUSH_CONSTANT_BIT, 'b', layout.binding, layout.space)); begin_scope(); // Index of the next field in the generated root constant constant buffer @@ -2928,21 +2943,31 @@ string CompilerHLSL::to_resource_binding(const SPIRVariable &var) const auto &type = get(var.basetype); char space = '\0'; + HLSLBindingFlags resource_flags = 0; + switch (type.basetype) { case SPIRType::SampledImage: space = 't'; // SRV + resource_flags = HLSL_BINDING_AUTO_SRV_BIT; break; case SPIRType::Image: if (type.image.sampled == 2 && type.image.dim != DimSubpassData) + { space = 'u'; // UAV + resource_flags = HLSL_BINDING_AUTO_UAV_BIT; + } else + { space = 't'; // SRV + resource_flags = HLSL_BINDING_AUTO_SRV_BIT; + } break; case SPIRType::Sampler: space = 's'; + resource_flags = HLSL_BINDING_AUTO_SAMPLER_BIT; break; case SPIRType::Struct: @@ -2955,18 +2980,26 @@ string CompilerHLSL::to_resource_binding(const SPIRVariable &var) Bitset flags = ir.get_buffer_block_flags(var); bool is_readonly = flags.get(DecorationNonWritable); space = is_readonly ? 't' : 'u'; // UAV + resource_flags = is_readonly ? HLSL_BINDING_AUTO_SRV_BIT : HLSL_BINDING_AUTO_UAV_BIT; } else if (has_decoration(type.self, DecorationBlock)) + { space = 'b'; // Constant buffers + resource_flags = HLSL_BINDING_AUTO_CBV_BIT; + } } else if (storage == StorageClassPushConstant) + { space = 'b'; // Constant buffers + resource_flags = HLSL_BINDING_AUTO_PUSH_CONSTANT_BIT; + } else if (storage == StorageClassStorageBuffer) { // UAV or SRV depending on readonly flag. Bitset flags = ir.get_buffer_block_flags(var); bool is_readonly = flags.get(DecorationNonWritable); space = is_readonly ? 't' : 'u'; + resource_flags = is_readonly ? HLSL_BINDING_AUTO_SRV_BIT : HLSL_BINDING_AUTO_UAV_BIT; } break; @@ -2978,7 +3011,7 @@ string CompilerHLSL::to_resource_binding(const SPIRVariable &var) if (!space) return ""; - return to_resource_register(space, get_decoration(var.self, DecorationBinding), + return to_resource_register(resource_flags, space, get_decoration(var.self, DecorationBinding), get_decoration(var.self, DecorationDescriptorSet)); } @@ -2988,16 +3021,21 @@ string CompilerHLSL::to_resource_binding_sampler(const SPIRVariable &var) if (!has_decoration(var.self, DecorationBinding)) return ""; - return to_resource_register('s', get_decoration(var.self, DecorationBinding), + return to_resource_register(HLSL_BINDING_AUTO_SAMPLER_BIT, 's', get_decoration(var.self, DecorationBinding), get_decoration(var.self, DecorationDescriptorSet)); } -string CompilerHLSL::to_resource_register(char space, uint32_t binding, uint32_t space_set) +string CompilerHLSL::to_resource_register(uint32_t flags, char space, uint32_t binding, uint32_t space_set) { - if (hlsl_options.shader_model >= 51) - return join(" : register(", space, binding, ", space", space_set, ")"); + if ((flags & resource_binding_flags) == 0) + { + if (hlsl_options.shader_model >= 51) + return join(" : register(", space, binding, ", space", space_set, ")"); + else + return join(" : register(", space, binding, ")"); + } else - return join(" : register(", space, binding, ")"); + return ""; } void CompilerHLSL::emit_modern_uniform(const SPIRVariable &var) @@ -4878,6 +4916,11 @@ VariableID CompilerHLSL::remap_num_workgroups_builtin() return variable_id; } +void CompilerHLSL::set_resource_binding_flags(HLSLBindingFlags flags) +{ + resource_binding_flags = flags; +} + void CompilerHLSL::validate_shader_model() { // Check for nonuniform qualifier. diff --git a/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/spirv_hlsl.hpp b/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/spirv_hlsl.hpp index eb968f0034bd..b0db688b6c61 100644 --- a/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/spirv_hlsl.hpp +++ b/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/spirv_hlsl.hpp @@ -41,6 +41,32 @@ struct RootConstants uint32_t space; }; +// For finer control, decorations may be removed from specific resources instead with unset_decoration(). +enum HLSLBindingFlagBits +{ + // Push constant (root constant) resources will be declared as CBVs (b-space) without a register() declaration. + // A register will be automatically assigned by the D3D compiler, but must therefore be reflected in D3D-land. + // Push constants do not normally have a DecorationBinding set, but if they do, this can be used to ignore it. + HLSL_BINDING_AUTO_PUSH_CONSTANT_BIT = 1 << 0, + + // cbuffer resources will be declared as CBVs (b-space) without a register() declaration. + // A register will be automatically assigned, but must be reflected in D3D-land. + HLSL_BINDING_AUTO_CBV_BIT = 1 << 1, + + // All SRVs (t-space) will be declared without a register() declaration. + HLSL_BINDING_AUTO_SRV_BIT = 1 << 2, + + // All UAVs (u-space) will be declared without a register() declaration. + HLSL_BINDING_AUTO_UAV_BIT = 1 << 3, + + // All samplers (s-space) will be declared without a register() declaration. + HLSL_BINDING_AUTO_SAMPLER_BIT = 1 << 4, + + // No resources will be declared with register(). + HLSL_BINDING_AUTO_ALL = 0x7fffffff +}; +using HLSLBindingFlags = uint32_t; + class CompilerHLSL : public CompilerGLSL { public: @@ -116,6 +142,9 @@ public: // so the calling application should declare explicit bindings on this ID before calling compile(). VariableID remap_num_workgroups_builtin(); + // Controls how resource bindings are declared in the output HLSL. + void set_resource_binding_flags(HLSLBindingFlags flags); + private: std::string type_to_glsl(const SPIRType &type, uint32_t id = 0) override; std::string image_type_hlsl(const SPIRType &type, uint32_t id); @@ -149,7 +178,7 @@ private: std::string to_sampler_expression(uint32_t id); std::string to_resource_binding(const SPIRVariable &var); std::string to_resource_binding_sampler(const SPIRVariable &var); - std::string to_resource_register(char space, uint32_t binding, uint32_t set); + std::string to_resource_register(HLSLBindingFlags flags, char space, uint32_t binding, uint32_t set); void emit_sampled_image_op(uint32_t result_type, uint32_t result_id, uint32_t image_id, uint32_t samp_id) override; void emit_access_chain(const Instruction &instruction); void emit_load(const Instruction &instruction); @@ -221,6 +250,7 @@ private: std::string to_semantic(uint32_t location, spv::ExecutionModel em, spv::StorageClass sc); uint32_t num_workgroups_builtin = 0; + HLSLBindingFlags resource_binding_flags = 0; // Custom root constant layout, which should be emitted // when translating push constant ranges. diff --git a/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/spirv_msl.cpp b/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/spirv_msl.cpp index bc79e55bec0e..b16e1e80ece8 100644 --- a/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/spirv_msl.cpp +++ b/third_party/rust/spirv-cross-internal/src/vendor/SPIRV-Cross/spirv_msl.cpp @@ -3193,10 +3193,17 @@ string CompilerMSL::unpack_expression_type(string expr_str, const SPIRType &type ".xyz", }; - // std140 array cases for vectors. if (physical_type && is_vector(*physical_type) && is_array(*physical_type) && physical_type->vecsize > type.vecsize && !expression_ends_with(expr_str, swizzle_lut[type.vecsize - 1])) { + // std140 array cases for vectors. + assert(type.vecsize >= 1 && type.vecsize <= 3); + return enclose_expression(expr_str) + swizzle_lut[type.vecsize - 1]; + } + else if (physical_type && is_matrix(*physical_type) && is_vector(type) && + physical_type->vecsize > type.vecsize) + { + // Extract column from padded matrix. assert(type.vecsize >= 1 && type.vecsize <= 3); return enclose_expression(expr_str) + swizzle_lut[type.vecsize - 1]; } @@ -3239,12 +3246,7 @@ string CompilerMSL::unpack_expression_type(string expr_str, const SPIRType &type } else { - // Don't expose "spvUnsafeArray" when unpacking expressions, - // the input "type" will be the unpacked type and might also appear in l-value expressions - use_builtin_array = true; - string unpack_expr = join(type_to_glsl(type), "(", expr_str, ")"); - use_builtin_array = false; - return unpack_expr; + return join(type_to_glsl(type), "(", expr_str, ")"); } } diff --git a/third_party/rust/spirv-cross-internal/src/wrapper.cpp b/third_party/rust/spirv-cross-internal/src/wrapper.cpp index 242b01c82b84..d9c2ddbe28ab 100644 --- a/third_party/rust/spirv-cross-internal/src/wrapper.cpp +++ b/third_party/rust/spirv-cross-internal/src/wrapper.cpp @@ -109,7 +109,7 @@ extern "C" ScInternalResult sc_internal_compiler_msl_compile(const ScInternalCompilerBase *compiler, const char **shader, const spirv_cross::MSLVertexAttr *p_vat_overrides, const size_t vat_override_count, const spirv_cross::MSLResourceBinding *p_res_overrides, const size_t res_override_count, - const MslConstSamplerMapping *p_const_samplers, const size_t const_sampler_count) + const ScMslConstSamplerMapping *p_const_samplers, const size_t const_sampler_count) { INTERNAL_RESULT( do { @@ -247,23 +247,19 @@ extern "C" auto const &comp = *((spirv_cross::Compiler *)compiler); auto const &sc_entry_point_names_and_stages = comp.get_entry_points_and_stages(); auto const sc_size = sc_entry_point_names_and_stages.size(); - auto const &sc_entry_points = std::make_unique(sc_size); - for (uint32_t i = 0; i < sc_size; i++) - { - auto const &sc_entry_point = sc_entry_point_names_and_stages[i]; - sc_entry_points[i] = comp.get_entry_point(sc_entry_point.name, sc_entry_point.execution_model); - } *entry_points = (ScEntryPoint *)malloc(sc_size * sizeof(ScEntryPoint)); *size = sc_size; for (uint32_t i = 0; i < sc_size; i++) { - auto const &sc_entry_point = sc_entry_points[i]; - entry_points[i]->name = strdup(sc_entry_point.name.c_str()); - entry_points[i]->execution_model = sc_entry_point.model; - entry_points[i]->work_group_size_x = sc_entry_point.workgroup_size.x; - entry_points[i]->work_group_size_y = sc_entry_point.workgroup_size.y; - entry_points[i]->work_group_size_z = sc_entry_point.workgroup_size.z; + auto const &sc_entry_point = sc_entry_point_names_and_stages[i]; + auto const &sc_spir_entry_point = comp.get_entry_point(sc_entry_point.name, sc_entry_point.execution_model); + auto &entry_point = (*entry_points)[i]; + entry_point.name = strdup(sc_entry_point.name.c_str()); + entry_point.execution_model = sc_spir_entry_point.model; + entry_point.work_group_size_x = sc_spir_entry_point.workgroup_size.x; + entry_point.work_group_size_y = sc_spir_entry_point.workgroup_size.y; + entry_point.work_group_size_z = sc_spir_entry_point.workgroup_size.z; } } while (0);) } @@ -281,9 +277,10 @@ extern "C" for (uint32_t i = 0; i < sc_size; i++) { auto const &sc_active_buffer_range = sc_active_buffer_ranges[i]; - active_buffer_ranges[i]->index = sc_active_buffer_range.index; - active_buffer_ranges[i]->offset = sc_active_buffer_range.offset; - active_buffer_ranges[i]->range = sc_active_buffer_range.range; + auto &active_buffer_range = (*active_buffer_ranges)[i]; + active_buffer_range.index = sc_active_buffer_range.index; + active_buffer_range.offset = sc_active_buffer_range.offset; + active_buffer_range.range = sc_active_buffer_range.range; } } while (0);) } diff --git a/third_party/rust/spirv-cross-internal/src/wrapper.hpp b/third_party/rust/spirv-cross-internal/src/wrapper.hpp index 994ec2d232eb..f8efd7a46960 100644 --- a/third_party/rust/spirv-cross-internal/src/wrapper.hpp +++ b/third_party/rust/spirv-cross-internal/src/wrapper.hpp @@ -140,11 +140,11 @@ extern "C" #endif #ifdef SPIRV_CROSS_WRAPPER_MSL - typedef struct MslConstSamplerMapping { + typedef struct ScMslConstSamplerMapping { uint32_t desc_set; uint32_t binding; spirv_cross::MSLConstexprSampler sampler; - } MslConstSamplerMapping; + } ScMslConstSamplerMapping; ScInternalResult sc_internal_compiler_msl_new(ScInternalCompilerMsl **compiler, const uint32_t *ir, const size_t size); ScInternalResult sc_internal_compiler_msl_set_options(const ScInternalCompilerMsl *compiler, const ScMslCompilerOptions *options); @@ -152,7 +152,7 @@ extern "C" ScInternalResult sc_internal_compiler_msl_compile(const ScInternalCompilerBase *compiler, const char **shader, const spirv_cross::MSLVertexAttr *p_vat_overrides, const size_t vat_override_count, const spirv_cross::MSLResourceBinding *p_res_overrides, const size_t res_override_count, - const MslConstSamplerMapping *p_const_samplers, const size_t const_sampler_count); + const ScMslConstSamplerMapping *p_const_samplers, const size_t const_sampler_count); #endif #ifdef SPIRV_CROSS_WRAPPER_GLSL diff --git a/third_party/rust/spirv-cross-internal/tests/shaders/multiple_entry_points.cl b/third_party/rust/spirv-cross-internal/tests/shaders/multiple_entry_points.cl new file mode 100644 index 000000000000..adbeb8de38a4 --- /dev/null +++ b/third_party/rust/spirv-cross-internal/tests/shaders/multiple_entry_points.cl @@ -0,0 +1,2 @@ +__kernel void entry_1() {} +__kernel void entry_2() {} diff --git a/third_party/rust/spirv-cross-internal/tests/shaders/multiple_entry_points.cl.spv b/third_party/rust/spirv-cross-internal/tests/shaders/multiple_entry_points.cl.spv new file mode 100644 index 0000000000000000000000000000000000000000..1ba9a7cf8d2e05979ccad23ae8dc1da1ddaa859e GIT binary patch literal 432 zcmYjNNeaS15G<3p#SIY=ZxLT$1QC3If)`Qn8dMT5qL4(;pL-FkiZkL2-Az?*HRC$t zDo_H@M~7ag3#VXwG_c$3qxEVVW%(^Voa5+tPm(zO!PDiCW$J0eRVey8DD#5dKzz;9 zXEfpLFW35UAv<(|2W<%t6-Fu15vH&LixaLH>=;~U)Q%yoXEcvEQTgwSykdSptTW&Y zk5M~i0g-fSulSH&ywzc`)nT#KF=eItq(eQbQ*L42>Kk79898Z^Ue!qBi@wSDW@b*a MHTt9fn)YAK2aYBk^Z)<= literal 0 HcmV?d00001 diff --git a/third_party/rust/spirv-cross-internal/tests/shaders/two_ubo.vert b/third_party/rust/spirv-cross-internal/tests/shaders/two_ubo.vert new file mode 100644 index 000000000000..f84c0a5ca76a --- /dev/null +++ b/third_party/rust/spirv-cross-internal/tests/shaders/two_ubo.vert @@ -0,0 +1,20 @@ +#version 310 es + +layout(std140) uniform ubo1 +{ + mat4 a; + float b; + vec4 c[2]; +}; + +layout(std140) uniform ubo2 +{ + float d; + vec3 e; + vec3 f; +}; + +void main() +{ + gl_Position = vec4(a[1][2] + b + c[1].y + d + e.x + f.z); +} diff --git a/third_party/rust/spirv-cross-internal/tests/shaders/two_ubo.vert.spv b/third_party/rust/spirv-cross-internal/tests/shaders/two_ubo.vert.spv new file mode 100644 index 0000000000000000000000000000000000000000..acd236aacea11057f684133172faa784e77cd318 GIT binary patch literal 1480 zcmY+CO;1x%5QdK}1r!Al`B1Ukg5PLGqr}8$?5ZqSG$CtbYY<@@IaZe=V~0e2bcSa$moF8Vt`q4$r2;pM;NW}K~g4$~bs~iwHzp{|cf6RI@ZJ!!>_UQB8b*$Ib9@KVem2s=w75BnB?yvBp zR&|+Yf9LstI*&Zx5gVYs9hh%>hiz1m-^uQUx5T1vo-y;HZ$aL^^2F@xd(n3VRJ;SH zS#j3z+_^y4;k$>i-%p$W-Y2kv_%gAz;%}=se}5^h5_{VMbJCpp?Drb+#->I;eVUt+C zHz}}k-P;*DRa{`R&XhOKx6Lzblk@g5gL^$h6=!&kwV%7~(^H-|QN;g%_}%qjIPV(j zPTta8yvI8#oNFC5-=jkA1}axxaBfxjZ#dT6My(T%H}2wF=Pj)D22tO}Hy`y|`1TNw zckki5vol8BeSGn#dw_4Pc#lb(_YrEHZ+fGseT;7|YMUZrf@A*ZH<^ScYc!T}}q_IAg literal 0 HcmV?d00001 diff --git a/third_party/rust/spirv-cross-internal/tests/spirv_tests.rs b/third_party/rust/spirv-cross-internal/tests/spirv_tests.rs index 216c798052ed..30930da735fc 100644 --- a/third_party/rust/spirv-cross-internal/tests/spirv_tests.rs +++ b/third_party/rust/spirv-cross-internal/tests/spirv_tests.rs @@ -4,16 +4,18 @@ mod common; use crate::common::words_from_bytes; #[test] -fn ast_gets_entry_points() { - let module = - spirv::Module::from_words(words_from_bytes(include_bytes!("shaders/simple.vert.spv"))); +fn ast_gets_multiple_entry_points() { + let module = spirv::Module::from_words(words_from_bytes(include_bytes!( + "shaders/multiple_entry_points.cl.spv" + ))); let entry_points = spirv::Ast::::parse(&module) .unwrap() .get_entry_points() .unwrap(); - assert_eq!(entry_points.len(), 1); - assert_eq!(entry_points[0].name, "main"); + assert_eq!(entry_points.len(), 2); + assert!(entry_points.iter().any(|e| e.name == "entry_1")); + assert!(entry_points.iter().any(|e| e.name == "entry_2")); } #[test] @@ -229,7 +231,7 @@ fn ast_sets_member_decoration() { } #[test] -fn as_gets_specialization_constants() { +fn ast_gets_specialization_constants() { let comp = spirv::Module::from_words(words_from_bytes(include_bytes!( "shaders/specialization.comp.spv" ))); @@ -239,7 +241,7 @@ fn as_gets_specialization_constants() { } #[test] -fn as_gets_work_group_size_specialization_constants() { +fn ast_gets_work_group_size_specialization_constants() { let comp = spirv::Module::from_words(words_from_bytes(include_bytes!( "shaders/workgroup.comp.spv" ))); @@ -265,3 +267,57 @@ fn as_gets_work_group_size_specialization_constants() { } ); } + +#[test] +fn ast_gets_active_buffer_ranges() { + let module = + spirv::Module::from_words(words_from_bytes(include_bytes!("shaders/two_ubo.vert.spv"))); + let ast = spirv::Ast::::parse(&module).unwrap(); + + let uniform_buffers = ast.get_shader_resources().unwrap().uniform_buffers; + assert_eq!(uniform_buffers.len(), 2); + + let ubo1 = ast.get_active_buffer_ranges(uniform_buffers[0].id).unwrap(); + assert_eq!( + ubo1, + [ + spirv::BufferRange { + index: 0, + offset: 0, + range: 64, + }, + spirv::BufferRange { + index: 1, + offset: 64, + range: 16, + }, + spirv::BufferRange { + index: 2, + offset: 80, + range: 32, + } + ] + ); + + let ubo2 = ast.get_active_buffer_ranges(uniform_buffers[1].id).unwrap(); + assert_eq!( + ubo2, + [ + spirv::BufferRange { + index: 0, + offset: 0, + range: 16, + }, + spirv::BufferRange { + index: 1, + offset: 16, + range: 16, + }, + spirv::BufferRange { + index: 2, + offset: 32, + range: 12, + } + ] + ); +} diff --git a/third_party/rust/spirv_cross/.cargo-checksum.json b/third_party/rust/spirv_cross/.cargo-checksum.json index 3ee85576360e..25446a7842d7 100644 --- a/third_party/rust/spirv_cross/.cargo-checksum.json +++ b/third_party/rust/spirv_cross/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"4f16705c4ddd3d88ce861283f056fa71ab6b66d5abd95f659cdef9b215ab0b11","README.md":"396d46be2d3b596a4b2ab869b2647f667100fd581e91490dc61fac4faa5d9147","lib.rs":"8d56ad9d9fffbe139fa7c206b593627dd2dac104ca7d772b4e4eaf0f8d3291fc"},"package":null} \ No newline at end of file +{"files":{"Cargo.toml":"ca73037387b7d6a3301eb8056d6177da2c8b106bb9950aa831e87fad30636781","README.md":"396d46be2d3b596a4b2ab869b2647f667100fd581e91490dc61fac4faa5d9147","lib.rs":"8d56ad9d9fffbe139fa7c206b593627dd2dac104ca7d772b4e4eaf0f8d3291fc"},"package":null} \ No newline at end of file diff --git a/third_party/rust/spirv_cross/Cargo.toml b/third_party/rust/spirv_cross/Cargo.toml index ea3590236a32..d60ea5b65706 100644 --- a/third_party/rust/spirv_cross/Cargo.toml +++ b/third_party/rust/spirv_cross/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "spirv_cross" -version = "0.16.0" +version = "0.18.0" license = "MIT/Apache-2.0" edition = "2018"