Bug 1622846 - Update wgpu and WebGPU IDL for the new copy views API r=webidl,jgilbert,smaug

Updates wgpu, WebGPU IDL, in particular the ImageCopyXxx types and render pass attachments.
Adds explicit mapping of the GPUTextureFormat enum. Our old casting was incorrect, because the enums diverged a bit.

Differential Revision: https://phabricator.services.mozilla.com/D110997
This commit is contained in:
Dzmitry Malyshau 2021-04-08 14:35:42 +00:00
Родитель 75c83b4a52
Коммит 89ced952fc
484 изменённых файлов: 49749 добавлений и 38710 удалений

Просмотреть файл

@ -2,6 +2,11 @@
# It was generated by `mach vendor rust`.
# Please do not edit.
[source."https://github.com/zakarumych/gpu-alloc.git"]
git = "https://github.com/zakarumych/gpu-alloc.git"
replace-with = "vendored-sources"
rev = "2cd1ad650cdd24d1647b6041f77ced0cbf1ff2a6"
[source."https://github.com/shravanrn/nix/"]
git = "https://github.com/shravanrn/nix/"
replace-with = "vendored-sources"
@ -100,7 +105,7 @@ rev = "fd4ed671ef495af4dcda4c4cba3ef8d426db8af1"
[source."https://github.com/gfx-rs/naga"]
git = "https://github.com/gfx-rs/naga"
replace-with = "vendored-sources"
tag = "gfx-12"
tag = "gfx-20"
[source."https://github.com/gfx-rs/metal-rs"]
git = "https://github.com/gfx-rs/metal-rs"
@ -110,7 +115,7 @@ rev = "439c986eb7a9b91e88b61def2daa66e4043fcbef"
[source."https://github.com/gfx-rs/gfx"]
git = "https://github.com/gfx-rs/gfx"
replace-with = "vendored-sources"
rev = "0a201d1c406b5119ec11068293a40e50ec0be4c8"
rev = "3ee1ca9ba486b166a52765024d8d149cbb28d486"
[source."https://github.com/gfx-rs/d3d12-rs"]
git = "https://github.com/gfx-rs/d3d12-rs"

89
Cargo.lock сгенерированный
Просмотреть файл

@ -85,11 +85,11 @@ dependencies = [
[[package]]
name = "ash"
version = "0.31.0"
version = "0.32.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c69a8137596e84c22d57f3da1b5de1d4230b1742a710091c85f4d7ce50f00f38"
checksum = "06063a002a77d2734631db74e8f4ce7148b77fe522e6bca46f2ae7774fd48112"
dependencies = [
"libloading 0.6.2",
"libloading 0.7.0",
]
[[package]]
@ -647,6 +647,16 @@ dependencies = [
"objc",
]
[[package]]
name = "codespan-reporting"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e"
dependencies = [
"termcolor",
"unicode-width",
]
[[package]]
name = "comedy"
version = "0.2.0"
@ -1834,7 +1844,7 @@ dependencies = [
[[package]]
name = "gfx-auxil"
version = "0.8.0"
source = "git+https://github.com/gfx-rs/gfx?rev=0a201d1c406b5119ec11068293a40e50ec0be4c8#0a201d1c406b5119ec11068293a40e50ec0be4c8"
source = "git+https://github.com/gfx-rs/gfx?rev=3ee1ca9ba486b166a52765024d8d149cbb28d486#3ee1ca9ba486b166a52765024d8d149cbb28d486"
dependencies = [
"fxhash",
"gfx-hal",
@ -1844,7 +1854,7 @@ dependencies = [
[[package]]
name = "gfx-backend-dx11"
version = "0.7.0"
source = "git+https://github.com/gfx-rs/gfx?rev=0a201d1c406b5119ec11068293a40e50ec0be4c8#0a201d1c406b5119ec11068293a40e50ec0be4c8"
source = "git+https://github.com/gfx-rs/gfx?rev=3ee1ca9ba486b166a52765024d8d149cbb28d486#3ee1ca9ba486b166a52765024d8d149cbb28d486"
dependencies = [
"arrayvec",
"bitflags",
@ -1865,7 +1875,7 @@ dependencies = [
[[package]]
name = "gfx-backend-dx12"
version = "0.7.0"
source = "git+https://github.com/gfx-rs/gfx?rev=0a201d1c406b5119ec11068293a40e50ec0be4c8#0a201d1c406b5119ec11068293a40e50ec0be4c8"
source = "git+https://github.com/gfx-rs/gfx?rev=3ee1ca9ba486b166a52765024d8d149cbb28d486#3ee1ca9ba486b166a52765024d8d149cbb28d486"
dependencies = [
"arrayvec",
"bit-set",
@ -1879,13 +1889,14 @@ dependencies = [
"raw-window-handle",
"smallvec",
"spirv_cross",
"thunderdome",
"winapi 0.3.9",
]
[[package]]
name = "gfx-backend-empty"
version = "0.7.0"
source = "git+https://github.com/gfx-rs/gfx?rev=0a201d1c406b5119ec11068293a40e50ec0be4c8#0a201d1c406b5119ec11068293a40e50ec0be4c8"
source = "git+https://github.com/gfx-rs/gfx?rev=3ee1ca9ba486b166a52765024d8d149cbb28d486#3ee1ca9ba486b166a52765024d8d149cbb28d486"
dependencies = [
"gfx-hal",
"log",
@ -1895,7 +1906,7 @@ dependencies = [
[[package]]
name = "gfx-backend-metal"
version = "0.7.0"
source = "git+https://github.com/gfx-rs/gfx?rev=0a201d1c406b5119ec11068293a40e50ec0be4c8#0a201d1c406b5119ec11068293a40e50ec0be4c8"
source = "git+https://github.com/gfx-rs/gfx?rev=3ee1ca9ba486b166a52765024d8d149cbb28d486#3ee1ca9ba486b166a52765024d8d149cbb28d486"
dependencies = [
"arrayvec",
"bitflags",
@ -1920,7 +1931,7 @@ dependencies = [
[[package]]
name = "gfx-backend-vulkan"
version = "0.7.0"
source = "git+https://github.com/gfx-rs/gfx?rev=0a201d1c406b5119ec11068293a40e50ec0be4c8#0a201d1c406b5119ec11068293a40e50ec0be4c8"
source = "git+https://github.com/gfx-rs/gfx?rev=3ee1ca9ba486b166a52765024d8d149cbb28d486#3ee1ca9ba486b166a52765024d8d149cbb28d486"
dependencies = [
"arrayvec",
"ash",
@ -1940,7 +1951,7 @@ dependencies = [
[[package]]
name = "gfx-hal"
version = "0.7.0"
source = "git+https://github.com/gfx-rs/gfx?rev=0a201d1c406b5119ec11068293a40e50ec0be4c8#0a201d1c406b5119ec11068293a40e50ec0be4c8"
source = "git+https://github.com/gfx-rs/gfx?rev=3ee1ca9ba486b166a52765024d8d149cbb28d486#3ee1ca9ba486b166a52765024d8d149cbb28d486"
dependencies = [
"bitflags",
"naga",
@ -2171,20 +2182,17 @@ dependencies = [
[[package]]
name = "gpu-alloc"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e7724b9aef57ea36d70faf54e0ee6265f86e41de16bed8333efdeab5b00e16b"
version = "0.4.2"
source = "git+https://github.com/zakarumych/gpu-alloc.git?rev=2cd1ad650cdd24d1647b6041f77ced0cbf1ff2a6#2cd1ad650cdd24d1647b6041f77ced0cbf1ff2a6"
dependencies = [
"bitflags",
"gpu-alloc-types",
"tracing",
]
[[package]]
name = "gpu-alloc-types"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "54804d0d6bc9d7f26db4eaec1ad10def69b599315f487d32c334a80d1efe67a5"
version = "0.2.1"
source = "git+https://github.com/zakarumych/gpu-alloc.git?rev=2cd1ad650cdd24d1647b6041f77ced0cbf1ff2a6#2cd1ad650cdd24d1647b6041f77ced0cbf1ff2a6"
dependencies = [
"bitflags",
]
@ -2198,7 +2206,6 @@ dependencies = [
"bitflags",
"gpu-descriptor-types",
"hashbrown",
"tracing",
]
[[package]]
@ -3337,10 +3344,11 @@ checksum = "a2983372caf4480544083767bf2d27defafe32af49ab4df3a0b7fc90793a3664"
[[package]]
name = "naga"
version = "0.3.1"
source = "git+https://github.com/gfx-rs/naga?tag=gfx-12#fa7d4d8b51d4eeffe9f648d285466637f733a4a1"
source = "git+https://github.com/gfx-rs/naga?tag=gfx-20#0369ee181ed9cd315635fc0e3d99deecdbc72246"
dependencies = [
"bit-set",
"bitflags",
"codespan-reporting",
"fxhash",
"log",
"num-traits",
@ -4031,6 +4039,12 @@ dependencies = [
"uuid",
]
[[package]]
name = "profiling"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0c71198452babfbba7419e716d29853c462d59da73c41485ab7dc8b4dc0c4be"
[[package]]
name = "prost"
version = "0.6.1"
@ -4169,7 +4183,7 @@ dependencies = [
[[package]]
name = "range-alloc"
version = "0.1.2"
source = "git+https://github.com/gfx-rs/gfx?rev=0a201d1c406b5119ec11068293a40e50ec0be4c8#0a201d1c406b5119ec11068293a40e50ec0be4c8"
source = "git+https://github.com/gfx-rs/gfx?rev=3ee1ca9ba486b166a52765024d8d149cbb28d486#3ee1ca9ba486b166a52765024d8d149cbb28d486"
[[package]]
name = "raw-cpuid"
@ -5446,38 +5460,6 @@ version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860"
[[package]]
name = "tracing"
version = "0.1.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0987850db3733619253fe60e17cb59b82d37c7e6c0236bb81e4d6b87c879f27"
dependencies = [
"cfg-if 0.1.10",
"pin-project-lite 0.1.4",
"tracing-attributes",
"tracing-core",
]
[[package]]
name = "tracing-attributes"
version = "0.1.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a8a9bd1db7706f2373a190b0d067146caa39350c486f3d455b0e33b431f94c07"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "tracing-core"
version = "0.1.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f50de3927f93d202783f4513cda820ab47ef17f624b03c096e86ef00c67e6b5f"
dependencies = [
"lazy_static",
]
[[package]]
name = "tracy-rs"
version = "0.1.2"
@ -5937,13 +5919,14 @@ dependencies = [
"gfx-hal",
"gpu-alloc",
"gpu-descriptor",
"log",
"naga",
"parking_lot",
"profiling",
"ron",
"serde",
"smallvec",
"thiserror",
"tracing",
"wgpu-types",
]

Просмотреть файл

@ -22,8 +22,8 @@ GPU_IMPL_CYCLE_COLLECTION(CommandEncoder, mParent, mBridge)
GPU_IMPL_JS_WRAP(CommandEncoder)
void CommandEncoder::ConvertTextureDataLayoutToFFI(
const dom::GPUTextureDataLayout& aLayout,
ffi::WGPUTextureDataLayout* aLayoutFFI) {
const dom::GPUImageDataLayout& aLayout,
ffi::WGPUImageDataLayout* aLayoutFFI) {
*aLayoutFFI = {};
aLayoutFFI->offset = aLayout.mOffset;
aLayoutFFI->bytes_per_row = aLayout.mBytesPerRow;
@ -31,12 +31,13 @@ void CommandEncoder::ConvertTextureDataLayoutToFFI(
}
void CommandEncoder::ConvertTextureCopyViewToFFI(
const dom::GPUTextureCopyView& aView, ffi::WGPUTextureCopyView* aViewFFI) {
const dom::GPUImageCopyTexture& aCopy,
ffi::WGPUImageCopyTexture* aViewFFI) {
*aViewFFI = {};
aViewFFI->texture = aView.mTexture->mId;
aViewFFI->mip_level = aView.mMipLevel;
if (aView.mOrigin.WasPassed()) {
const auto& origin = aView.mOrigin.Value();
aViewFFI->texture = aCopy.mTexture->mId;
aViewFFI->mip_level = aCopy.mMipLevel;
if (aCopy.mOrigin.WasPassed()) {
const auto& origin = aCopy.mOrigin.Value();
if (origin.IsRangeEnforcedUnsignedLongSequence()) {
const auto& seq = origin.GetAsRangeEnforcedUnsignedLongSequence();
aViewFFI->origin.x = seq.Length() > 0 ? seq[0] : 0;
@ -71,18 +72,18 @@ void CommandEncoder::ConvertExtent3DToFFI(const dom::GPUExtent3D& aExtent,
}
}
static ffi::WGPUBufferCopyView ConvertBufferCopyView(
const dom::GPUBufferCopyView& aView) {
ffi::WGPUBufferCopyView view = {};
view.buffer = aView.mBuffer->mId;
CommandEncoder::ConvertTextureDataLayoutToFFI(aView, &view.layout);
static ffi::WGPUImageCopyBuffer ConvertBufferCopyView(
const dom::GPUImageCopyBuffer& aCopy) {
ffi::WGPUImageCopyBuffer view = {};
view.buffer = aCopy.mBuffer->mId;
CommandEncoder::ConvertTextureDataLayoutToFFI(aCopy, &view.layout);
return view;
}
static ffi::WGPUTextureCopyView ConvertTextureCopyView(
const dom::GPUTextureCopyView& aView) {
ffi::WGPUTextureCopyView view = {};
CommandEncoder::ConvertTextureCopyViewToFFI(aView, &view);
static ffi::WGPUImageCopyTexture ConvertTextureCopyView(
const dom::GPUImageCopyTexture& aCopy) {
ffi::WGPUImageCopyTexture view = {};
CommandEncoder::ConvertTextureCopyViewToFFI(aCopy, &view);
return view;
}
@ -123,8 +124,8 @@ void CommandEncoder::CopyBufferToBuffer(const Buffer& aSource,
}
void CommandEncoder::CopyBufferToTexture(
const dom::GPUBufferCopyView& aSource,
const dom::GPUTextureCopyView& aDestination,
const dom::GPUImageCopyBuffer& aSource,
const dom::GPUImageCopyTexture& aDestination,
const dom::GPUExtent3D& aCopySize) {
if (mValid) {
ipc::ByteBuf bb;
@ -135,8 +136,8 @@ void CommandEncoder::CopyBufferToTexture(
}
}
void CommandEncoder::CopyTextureToBuffer(
const dom::GPUTextureCopyView& aSource,
const dom::GPUBufferCopyView& aDestination,
const dom::GPUImageCopyTexture& aSource,
const dom::GPUImageCopyBuffer& aDestination,
const dom::GPUExtent3D& aCopySize) {
if (mValid) {
ipc::ByteBuf bb;
@ -147,8 +148,8 @@ void CommandEncoder::CopyTextureToBuffer(
}
}
void CommandEncoder::CopyTextureToTexture(
const dom::GPUTextureCopyView& aSource,
const dom::GPUTextureCopyView& aDestination,
const dom::GPUImageCopyTexture& aSource,
const dom::GPUImageCopyTexture& aDestination,
const dom::GPUExtent3D& aCopySize) {
if (mValid) {
ipc::ByteBuf bb;

Просмотреть файл

@ -17,25 +17,25 @@ class ErrorResult;
namespace dom {
struct GPUComputePassDescriptor;
struct GPUTextureDataLayout;
class HTMLCanvasElement;
template <typename T>
class Sequence;
struct GPUCommandBufferDescriptor;
class GPUComputePipelineOrGPURenderPipeline;
class RangeEnforcedUnsignedLongSequenceOrGPUExtent3DDict;
struct GPUBufferCopyView;
struct GPUCommandBufferDescriptor;
struct GPUImageCopyBuffer;
struct GPUImageCopyTexture;
struct GPUImageBitmapCopyView;
struct GPUImageDataLayout;
struct GPURenderPassDescriptor;
struct GPUTextureCopyView;
typedef RangeEnforcedUnsignedLongSequenceOrGPUExtent3DDict GPUExtent3D;
} // namespace dom
namespace webgpu {
namespace ffi {
struct WGPUComputePass;
struct WGPURenderPass;
struct WGPUTextureDataLayout;
struct WGPUTextureCopyView_TextureId;
struct WGPUImageDataLayout;
struct WGPUImageCopyTexture_TextureId;
struct WGPUExtent3d;
} // namespace ffi
@ -56,11 +56,11 @@ class CommandEncoder final : public ObjectBase, public ChildOf<Device> {
const RawId mId;
static void ConvertTextureDataLayoutToFFI(
const dom::GPUTextureDataLayout& aLayout,
ffi::WGPUTextureDataLayout* aLayoutFFI);
const dom::GPUImageDataLayout& aLayout,
ffi::WGPUImageDataLayout* aLayoutFFI);
static void ConvertTextureCopyViewToFFI(
const dom::GPUTextureCopyView& aView,
ffi::WGPUTextureCopyView_TextureId* aViewFFI);
const dom::GPUImageCopyTexture& aCopy,
ffi::WGPUImageCopyTexture_TextureId* aViewFFI);
static void ConvertExtent3DToFFI(const dom::GPUExtent3D& aExtent,
ffi::WGPUExtent3d* aExtentFFI);
@ -80,14 +80,14 @@ class CommandEncoder final : public ObjectBase, public ChildOf<Device> {
const Buffer& aDestination,
BufferAddress aDestinationOffset,
BufferAddress aSize);
void CopyBufferToTexture(const dom::GPUBufferCopyView& aSource,
const dom::GPUTextureCopyView& aDestination,
void CopyBufferToTexture(const dom::GPUImageCopyBuffer& aSource,
const dom::GPUImageCopyTexture& aDestination,
const dom::GPUExtent3D& aCopySize);
void CopyTextureToBuffer(const dom::GPUTextureCopyView& aSource,
const dom::GPUBufferCopyView& aDestination,
void CopyTextureToBuffer(const dom::GPUImageCopyTexture& aSource,
const dom::GPUImageCopyBuffer& aDestination,
const dom::GPUExtent3D& aCopySize);
void CopyTextureToTexture(const dom::GPUTextureCopyView& aSource,
const dom::GPUTextureCopyView& aDestination,
void CopyTextureToTexture(const dom::GPUImageCopyTexture& aSource,
const dom::GPUImageCopyTexture& aDestination,
const dom::GPUExtent3D& aCopySize);
already_AddRefed<ComputePassEncoder> BeginComputePass(

Просмотреть файл

@ -88,13 +88,13 @@ void Queue::WriteBuffer(const Buffer& aBuffer, uint64_t aBufferOffset,
}
}
void Queue::WriteTexture(const dom::GPUTextureCopyView& aDestination,
void Queue::WriteTexture(const dom::GPUImageCopyTexture& aDestination,
const dom::ArrayBufferViewOrArrayBuffer& aData,
const dom::GPUTextureDataLayout& aDataLayout,
const dom::GPUImageDataLayout& aDataLayout,
const dom::GPUExtent3D& aSize, ErrorResult& aRv) {
ffi::WGPUTextureCopyView copyView = {};
ffi::WGPUImageCopyTexture copyView = {};
CommandEncoder::ConvertTextureCopyViewToFFI(aDestination, &copyView);
ffi::WGPUTextureDataLayout dataLayout = {};
ffi::WGPUImageDataLayout dataLayout = {};
CommandEncoder::ConvertTextureDataLayoutToFFI(aDataLayout, &dataLayout);
dataLayout.offset = 0; // our Shmem has the contents starting from 0.
ffi::WGPUExtent3d extent = {};

Просмотреть файл

@ -20,8 +20,8 @@ template <typename T>
class Optional;
template <typename T>
class Sequence;
struct GPUTextureCopyView;
struct GPUTextureDataLayout;
struct GPUImageCopyTexture;
struct GPUImageDataLayout;
struct TextureCopyView;
struct TextureDataLayout;
typedef RangeEnforcedUnsignedLongSequenceOrGPUExtent3DDict GPUExtent3D;
@ -48,9 +48,9 @@ class Queue final : public ObjectBase, public ChildOf<Device> {
uint64_t aDataOffset, const dom::Optional<uint64_t>& aSize,
ErrorResult& aRv);
void WriteTexture(const dom::GPUTextureCopyView& aDestination,
void WriteTexture(const dom::GPUImageCopyTexture& aDestination,
const dom::ArrayBufferViewOrArrayBuffer& aData,
const dom::GPUTextureDataLayout& aDataLayout,
const dom::GPUImageDataLayout& aDataLayout,
const dom::GPUExtent3D& aSize, ErrorResult& aRv);
private:

Просмотреть файл

@ -54,10 +54,10 @@ ffi::WGPURenderPass* BeginRenderPass(
RawId aEncoderId, const dom::GPURenderPassDescriptor& aDesc) {
ffi::WGPURenderPassDescriptor desc = {};
ffi::WGPUDepthStencilAttachmentDescriptor dsDesc = {};
ffi::WGPURenderPassDepthStencilAttachment dsDesc = {};
if (aDesc.mDepthStencilAttachment.WasPassed()) {
const auto& dsa = aDesc.mDepthStencilAttachment.Value();
dsDesc.attachment = dsa.mView->mId;
dsDesc.view = dsa.mView->mId;
if (dsa.mDepthLoadValue.IsFloat()) {
dsDesc.depth.load_op = ffi::WGPULoadOp_Clear;
@ -83,15 +83,15 @@ ffi::WGPURenderPass* BeginRenderPass(
desc.depth_stencil_attachment = &dsDesc;
}
std::array<ffi::WGPUColorAttachmentDescriptor, WGPUMAX_COLOR_TARGETS>
std::array<ffi::WGPURenderPassColorAttachment, WGPUMAX_COLOR_TARGETS>
colorDescs = {};
desc.color_attachments = colorDescs.data();
desc.color_attachments_length = aDesc.mColorAttachments.Length();
for (size_t i = 0; i < aDesc.mColorAttachments.Length(); ++i) {
const auto& ca = aDesc.mColorAttachments[i];
ffi::WGPUColorAttachmentDescriptor& cd = colorDescs[i];
cd.attachment = ca.mView->mId;
ffi::WGPURenderPassColorAttachment& cd = colorDescs[i];
cd.view = ca.mView->mId;
cd.channel.store_op = ConvertStoreOp(ca.mStoreOp);
if (ca.mResolveTarget.WasPassed()) {

Просмотреть файл

@ -59,10 +59,6 @@ static Maybe<uint8_t> GetBytesPerBlock(dom::GPUTextureFormat format) {
case dom::GPUTextureFormat::Rgba32sint:
case dom::GPUTextureFormat::Rgba32float:
return Some<uint8_t>(16u);
case dom::GPUTextureFormat::Stencil8:
return Some<uint8_t>(1u);
case dom::GPUTextureFormat::Depth16unorm:
return Some<uint8_t>(2u);
case dom::GPUTextureFormat::Depth32float:
return Some<uint8_t>(4u);
case dom::GPUTextureFormat::Bc1_rgba_unorm:
@ -83,8 +79,6 @@ static Maybe<uint8_t> GetBytesPerBlock(dom::GPUTextureFormat format) {
return Some<uint8_t>(16u);
case dom::GPUTextureFormat::Depth24plus:
case dom::GPUTextureFormat::Depth24plus_stencil8:
case dom::GPUTextureFormat::Depth24unorm_stencil8:
case dom::GPUTextureFormat::Depth32float_stencil8:
case dom::GPUTextureFormat::EndGuard_:
return Nothing();
}

Просмотреть файл

@ -24,6 +24,119 @@ static ffi::WGPUCompareFunction ConvertCompareFunction(
return ffi::WGPUCompareFunction(UnderlyingValue(aCompare) + 1);
}
static ffi::WGPUTextureFormat ConvertTextureFormat(
const dom::GPUTextureFormat& aFormat) {
switch (aFormat) {
case dom::GPUTextureFormat::R8unorm:
return ffi::WGPUTextureFormat_R8Unorm;
case dom::GPUTextureFormat::R8snorm:
return ffi::WGPUTextureFormat_R8Snorm;
case dom::GPUTextureFormat::R8uint:
return ffi::WGPUTextureFormat_R8Uint;
case dom::GPUTextureFormat::R8sint:
return ffi::WGPUTextureFormat_R8Sint;
case dom::GPUTextureFormat::R16uint:
return ffi::WGPUTextureFormat_R16Uint;
case dom::GPUTextureFormat::R16sint:
return ffi::WGPUTextureFormat_R16Sint;
case dom::GPUTextureFormat::R16float:
return ffi::WGPUTextureFormat_R16Float;
case dom::GPUTextureFormat::Rg8unorm:
return ffi::WGPUTextureFormat_Rg8Unorm;
case dom::GPUTextureFormat::Rg8snorm:
return ffi::WGPUTextureFormat_Rg8Snorm;
case dom::GPUTextureFormat::Rg8uint:
return ffi::WGPUTextureFormat_Rg8Uint;
case dom::GPUTextureFormat::Rg8sint:
return ffi::WGPUTextureFormat_Rg8Sint;
case dom::GPUTextureFormat::R32uint:
return ffi::WGPUTextureFormat_R32Uint;
case dom::GPUTextureFormat::R32sint:
return ffi::WGPUTextureFormat_R32Sint;
case dom::GPUTextureFormat::R32float:
return ffi::WGPUTextureFormat_R32Float;
case dom::GPUTextureFormat::Rg16uint:
return ffi::WGPUTextureFormat_Rg16Uint;
case dom::GPUTextureFormat::Rg16sint:
return ffi::WGPUTextureFormat_Rg16Sint;
case dom::GPUTextureFormat::Rg16float:
return ffi::WGPUTextureFormat_Rg16Float;
case dom::GPUTextureFormat::Rgba8unorm:
return ffi::WGPUTextureFormat_Rgba8Unorm;
case dom::GPUTextureFormat::Rgba8unorm_srgb:
return ffi::WGPUTextureFormat_Rgba8UnormSrgb;
case dom::GPUTextureFormat::Rgba8snorm:
return ffi::WGPUTextureFormat_Rgba8Snorm;
case dom::GPUTextureFormat::Rgba8uint:
return ffi::WGPUTextureFormat_Rgba8Uint;
case dom::GPUTextureFormat::Rgba8sint:
return ffi::WGPUTextureFormat_Rgba8Sint;
case dom::GPUTextureFormat::Bgra8unorm:
return ffi::WGPUTextureFormat_Bgra8Unorm;
case dom::GPUTextureFormat::Bgra8unorm_srgb:
return ffi::WGPUTextureFormat_Bgra8UnormSrgb;
case dom::GPUTextureFormat::Rgb10a2unorm:
return ffi::WGPUTextureFormat_Rgb10a2Unorm;
case dom::GPUTextureFormat::Rg11b10float:
return ffi::WGPUTextureFormat_Rg11b10Float;
case dom::GPUTextureFormat::Rg32uint:
return ffi::WGPUTextureFormat_Rg32Uint;
case dom::GPUTextureFormat::Rg32sint:
return ffi::WGPUTextureFormat_Rg32Sint;
case dom::GPUTextureFormat::Rg32float:
return ffi::WGPUTextureFormat_Rg32Float;
case dom::GPUTextureFormat::Rgba16uint:
return ffi::WGPUTextureFormat_Rgba16Uint;
case dom::GPUTextureFormat::Rgba16sint:
return ffi::WGPUTextureFormat_Rgba16Sint;
case dom::GPUTextureFormat::Rgba16float:
return ffi::WGPUTextureFormat_Rgba16Float;
case dom::GPUTextureFormat::Rgba32uint:
return ffi::WGPUTextureFormat_Rgba32Uint;
case dom::GPUTextureFormat::Rgba32sint:
return ffi::WGPUTextureFormat_Rgba32Sint;
case dom::GPUTextureFormat::Rgba32float:
return ffi::WGPUTextureFormat_Rgba32Float;
case dom::GPUTextureFormat::Depth32float:
return ffi::WGPUTextureFormat_Depth32Float;
case dom::GPUTextureFormat::Bc1_rgba_unorm:
return ffi::WGPUTextureFormat_Bc1RgbaUnorm;
case dom::GPUTextureFormat::Bc1_rgba_unorm_srgb:
return ffi::WGPUTextureFormat_Bc1RgbaUnormSrgb;
case dom::GPUTextureFormat::Bc4_r_unorm:
return ffi::WGPUTextureFormat_Bc4RUnorm;
case dom::GPUTextureFormat::Bc4_r_snorm:
return ffi::WGPUTextureFormat_Bc4RSnorm;
case dom::GPUTextureFormat::Bc2_rgba_unorm:
return ffi::WGPUTextureFormat_Bc2RgbaUnorm;
case dom::GPUTextureFormat::Bc2_rgba_unorm_srgb:
return ffi::WGPUTextureFormat_Bc2RgbaUnormSrgb;
case dom::GPUTextureFormat::Bc3_rgba_unorm:
return ffi::WGPUTextureFormat_Bc3RgbaUnorm;
case dom::GPUTextureFormat::Bc3_rgba_unorm_srgb:
return ffi::WGPUTextureFormat_Bc3RgbaUnormSrgb;
case dom::GPUTextureFormat::Bc5_rg_unorm:
return ffi::WGPUTextureFormat_Bc5RgUnorm;
case dom::GPUTextureFormat::Bc5_rg_snorm:
return ffi::WGPUTextureFormat_Bc5RgSnorm;
case dom::GPUTextureFormat::Bc6h_rgb_ufloat:
return ffi::WGPUTextureFormat_Bc6hRgbUfloat;
case dom::GPUTextureFormat::Bc6h_rgb_float:
return ffi::WGPUTextureFormat_Bc6hRgbSfloat;
case dom::GPUTextureFormat::Bc7_rgba_unorm:
return ffi::WGPUTextureFormat_Bc7RgbaUnorm;
case dom::GPUTextureFormat::Bc7_rgba_unorm_srgb:
return ffi::WGPUTextureFormat_Bc7RgbaUnormSrgb;
case dom::GPUTextureFormat::Depth24plus:
return ffi::WGPUTextureFormat_Depth24Plus;
case dom::GPUTextureFormat::Depth24plus_stencil8:
return ffi::WGPUTextureFormat_Depth24PlusStencil8;
case dom::GPUTextureFormat::EndGuard_:
MOZ_ASSERT_UNREACHABLE();
}
MOZ_CRASH("unexpected texture format enum");
}
static ffi::WGPUClient* initialize() {
ffi::WGPUInfrastructure infra = ffi::wgpu_client_new();
return infra.client;
@ -150,7 +263,7 @@ RawId WebGPUChild::DeviceCreateTexture(RawId aSelfId,
desc.mip_level_count = aDesc.mMipLevelCount;
desc.sample_count = aDesc.mSampleCount;
desc.dimension = ffi::WGPUTextureDimension(aDesc.mDimension);
desc.format = ffi::WGPUTextureFormat(aDesc.mFormat);
desc.format = ConvertTextureFormat(aDesc.mFormat);
desc.usage = aDesc.mUsage;
ByteBuf bb;
@ -174,7 +287,7 @@ RawId WebGPUChild::TextureCreateView(
ffi::WGPUTextureFormat format = ffi::WGPUTextureFormat_Sentinel;
if (aDesc.mFormat.WasPassed()) {
format = ffi::WGPUTextureFormat(aDesc.mFormat.Value());
format = ConvertTextureFormat(aDesc.mFormat.Value());
desc.format = &format;
}
ffi::WGPUTextureViewDimension dimension =
@ -186,7 +299,7 @@ RawId WebGPUChild::TextureCreateView(
desc.aspect = ffi::WGPUTextureAspect(aDesc.mAspect);
desc.base_mip_level = aDesc.mBaseMipLevel;
desc.level_count =
desc.mip_level_count =
aDesc.mMipLevelCount.WasPassed() ? aDesc.mMipLevelCount.Value() : 0;
desc.base_array_layer = aDesc.mBaseArrayLayer;
desc.array_layer_count =
@ -301,7 +414,7 @@ RawId WebGPUChild::DeviceCreateBindGroupLayout(
if (entry.mStorageTexture.WasPassed()) {
const auto& texture = entry.mStorageTexture.Value();
data.dim = ffi::WGPUTextureViewDimension(texture.mViewDimension);
data.format = ffi::WGPUTextureFormat(texture.mFormat);
data.format = ConvertTextureFormat(texture.mFormat);
}
optional.AppendElement(data);
}
@ -530,7 +643,7 @@ static ffi::WGPUStencilFaceState ConvertStencilFaceState(
static ffi::WGPUDepthStencilState ConvertDepthStencilState(
const dom::GPUDepthStencilState& aDesc) {
ffi::WGPUDepthStencilState desc = {};
desc.format = ffi::WGPUTextureFormat(aDesc.mFormat);
desc.format = ConvertTextureFormat(aDesc.mFormat);
desc.depth_write_enabled = aDesc.mDepthWriteEnabled;
desc.depth_compare = ConvertCompareFunction(aDesc.mDepthCompare);
desc.stencil.front = ConvertStencilFaceState(aDesc.mStencilFront);
@ -612,7 +725,7 @@ RawId WebGPUChild::DeviceCreateRenderPipeline(
// so that we can have non-stale pointers into it.
for (const auto& colorState : stage.mTargets) {
ffi::WGPUColorTargetState desc = {};
desc.format = ffi::WGPUTextureFormat(colorState.mFormat);
desc.format = ConvertTextureFormat(colorState.mFormat);
desc.write_mask = colorState.mWriteMask;
colorStates.AppendElement(desc);
ffi::WGPUBlendState bs = {};

Просмотреть файл

@ -579,15 +579,15 @@ ipc::IPCResult WebGPUParent::RecvSwapChainPresent(
}
}
const ffi::WGPUTextureCopyView texView = {
const ffi::WGPUImageCopyTexture texView = {
aTextureId,
};
const ffi::WGPUTextureDataLayout bufLayout = {
const ffi::WGPUImageDataLayout bufLayout = {
0,
data->mSourcePitch,
0,
};
const ffi::WGPUBufferCopyView bufView = {
const ffi::WGPUImageCopyBuffer bufView = {
bufferId,
bufLayout,
};

Просмотреть файл

@ -326,8 +326,8 @@ enum GPUTextureFormat {
"rgba32float",
// Depth and stencil formats
"stencil8",
"depth16unorm",
//"stencil8", //TODO
//"depth16unorm",
"depth24plus",
"depth24plus-stencil8",
"depth32float",
@ -350,10 +350,10 @@ enum GPUTextureFormat {
"bc7-rgba-unorm-srgb",
// "depth24unorm-stencil8" feature
"depth24unorm-stencil8",
//"depth24unorm-stencil8",
// "depth32float-stencil8" feature
"depth32float-stencil8",
//"depth32float-stencil8",
};
typedef [EnforceRange] unsigned long GPUTextureUsageFlags;
@ -741,6 +741,8 @@ dictionary GPUPrimitiveState {
GPUIndexFormat stripIndexFormat;
GPUFrontFace frontFace = "ccw";
GPUCullMode cullMode = "none";
// Enable depth clamping (requires "depth-clamping" feature)
boolean clampDepth = false;
};
dictionary GPUMultisampleState {
@ -840,9 +842,6 @@ dictionary GPUDepthStencilState {
GPUDepthBias depthBias = 0;
float depthBiasSlopeScale = 0;
float depthBiasClamp = 0;
// Enable depth clamping (requires "depth-clamping" feature)
boolean clampDepth = false;
};
dictionary GPURenderPipelineDescriptor : GPUPipelineDescriptorBase {
@ -875,15 +874,15 @@ enum GPUStoreOp {
"clear"
};
dictionary GPURenderPassColorAttachmentDescriptor {
dictionary GPURenderPassColorAttachment {
required GPUTextureView view;
GPUTextureView resolveTarget;
required (GPULoadOp or GPUColor) loadValue;
GPUStoreOp storeOp = "store";
required GPUStoreOp storeOp;
};
dictionary GPURenderPassDepthStencilAttachmentDescriptor {
dictionary GPURenderPassDepthStencilAttachment {
required GPUTextureView view;
required (GPULoadOp or float) depthLoadValue;
@ -894,25 +893,26 @@ dictionary GPURenderPassDepthStencilAttachmentDescriptor {
};
dictionary GPURenderPassDescriptor : GPUObjectDescriptorBase {
required sequence<GPURenderPassColorAttachmentDescriptor> colorAttachments;
GPURenderPassDepthStencilAttachmentDescriptor depthStencilAttachment;
required sequence<GPURenderPassColorAttachment> colorAttachments;
GPURenderPassDepthStencilAttachment depthStencilAttachment;
GPUQuerySet occlusionQuerySet;
};
dictionary GPUTextureDataLayout {
dictionary GPUImageDataLayout {
GPUSize64 offset = 0;
required GPUSize32 bytesPerRow;
GPUSize32 rowsPerImage = 0;
};
dictionary GPUBufferCopyView : GPUTextureDataLayout {
dictionary GPUImageCopyBuffer : GPUImageDataLayout {
required GPUBuffer buffer;
};
dictionary GPUTextureCopyView {
dictionary GPUImageCopyTexture {
required GPUTexture texture;
GPUIntegerCoordinate mipLevel = 0;
GPUOrigin3D origin;
GPUTextureAspect aspect = "all";
};
dictionary GPUImageBitmapCopyView {
@ -939,24 +939,24 @@ interface GPUCommandEncoder {
GPUSize64 size);
void copyBufferToTexture(
GPUBufferCopyView source,
GPUTextureCopyView destination,
GPUImageCopyBuffer source,
GPUImageCopyTexture destination,
GPUExtent3D copySize);
void copyTextureToBuffer(
GPUTextureCopyView source,
GPUBufferCopyView destination,
GPUImageCopyTexture source,
GPUImageCopyBuffer destination,
GPUExtent3D copySize);
void copyTextureToTexture(
GPUTextureCopyView source,
GPUTextureCopyView destination,
GPUImageCopyTexture source,
GPUImageCopyTexture destination,
GPUExtent3D copySize);
/*
void copyImageBitmapToTexture(
GPUImageBitmapCopyView source,
GPUTextureCopyView destination,
GPUImageCopyTexture destination,
GPUExtent3D copySize);
*/
@ -1137,9 +1137,9 @@ interface GPUQueue {
[Throws]
void writeTexture(
GPUTextureCopyView destination,
GPUImageCopyTexture destination,
BufferSource data,
GPUTextureDataLayout dataLayout,
GPUImageDataLayout dataLayout,
GPUExtent3D size);
};
GPUQueue includes GPUObjectBase;

16
gfx/wgpu/.github/workflows/ci.yml поставляемый
Просмотреть файл

@ -113,3 +113,19 @@ jobs:
run: cargo clippy
- if: matrix.channel == 'nightly'
run: cargo test -- --nocapture
lint:
name: Clippy
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- run: rustup component add clippy
- uses: actions-rs/cargo@v1
with:
command: clippy
args: -- -D warnings

271
gfx/wgpu/Cargo.lock сгенерированный
Просмотреть файл

@ -36,15 +36,6 @@ dependencies = [
"xml-rs",
]
[[package]]
name = "ansi_term"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2"
dependencies = [
"winapi 0.3.9",
]
[[package]]
name = "arrayvec"
version = "0.5.2"
@ -56,11 +47,11 @@ dependencies = [
[[package]]
name = "ash"
version = "0.31.0"
version = "0.32.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c69a8137596e84c22d57f3da1b5de1d4230b1742a710091c85f4d7ce50f00f38"
checksum = "77ea56be8250318e64923c7e65b82a35b5c29dfb6dc1c7d1c0b288c4b1bbb084"
dependencies = [
"libloading 0.6.5",
"libloading 0.7.0",
]
[[package]]
@ -162,19 +153,6 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e"
[[package]]
name = "chrono"
version = "0.4.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73"
dependencies = [
"libc",
"num-integer",
"num-traits",
"time",
"winapi 0.3.9",
]
[[package]]
name = "cloudabi"
version = "0.1.0"
@ -215,6 +193,16 @@ dependencies = [
"objc",
]
[[package]]
name = "codespan-reporting"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e"
dependencies = [
"termcolor",
"unicode-width",
]
[[package]]
name = "copyless"
version = "0.1.5"
@ -477,7 +465,7 @@ dependencies = [
[[package]]
name = "gfx-auxil"
version = "0.8.0"
source = "git+https://github.com/gfx-rs/gfx?rev=0a201d1c406b5119ec11068293a40e50ec0be4c8#0a201d1c406b5119ec11068293a40e50ec0be4c8"
source = "git+https://github.com/gfx-rs/gfx?rev=3ee1ca9ba486b166a52765024d8d149cbb28d486#3ee1ca9ba486b166a52765024d8d149cbb28d486"
dependencies = [
"fxhash",
"gfx-hal",
@ -487,7 +475,7 @@ dependencies = [
[[package]]
name = "gfx-backend-dx11"
version = "0.7.0"
source = "git+https://github.com/gfx-rs/gfx?rev=0a201d1c406b5119ec11068293a40e50ec0be4c8#0a201d1c406b5119ec11068293a40e50ec0be4c8"
source = "git+https://github.com/gfx-rs/gfx?rev=3ee1ca9ba486b166a52765024d8d149cbb28d486#3ee1ca9ba486b166a52765024d8d149cbb28d486"
dependencies = [
"arrayvec",
"bitflags",
@ -508,7 +496,7 @@ dependencies = [
[[package]]
name = "gfx-backend-dx12"
version = "0.7.0"
source = "git+https://github.com/gfx-rs/gfx?rev=0a201d1c406b5119ec11068293a40e50ec0be4c8#0a201d1c406b5119ec11068293a40e50ec0be4c8"
source = "git+https://github.com/gfx-rs/gfx?rev=3ee1ca9ba486b166a52765024d8d149cbb28d486#3ee1ca9ba486b166a52765024d8d149cbb28d486"
dependencies = [
"arrayvec",
"bit-set",
@ -522,13 +510,14 @@ dependencies = [
"raw-window-handle",
"smallvec",
"spirv_cross",
"thunderdome",
"winapi 0.3.9",
]
[[package]]
name = "gfx-backend-empty"
version = "0.7.0"
source = "git+https://github.com/gfx-rs/gfx?rev=0a201d1c406b5119ec11068293a40e50ec0be4c8#0a201d1c406b5119ec11068293a40e50ec0be4c8"
source = "git+https://github.com/gfx-rs/gfx?rev=3ee1ca9ba486b166a52765024d8d149cbb28d486#3ee1ca9ba486b166a52765024d8d149cbb28d486"
dependencies = [
"gfx-hal",
"log",
@ -538,7 +527,7 @@ dependencies = [
[[package]]
name = "gfx-backend-metal"
version = "0.7.0"
source = "git+https://github.com/gfx-rs/gfx?rev=0a201d1c406b5119ec11068293a40e50ec0be4c8#0a201d1c406b5119ec11068293a40e50ec0be4c8"
source = "git+https://github.com/gfx-rs/gfx?rev=3ee1ca9ba486b166a52765024d8d149cbb28d486#3ee1ca9ba486b166a52765024d8d149cbb28d486"
dependencies = [
"arrayvec",
"bitflags",
@ -563,7 +552,7 @@ dependencies = [
[[package]]
name = "gfx-backend-vulkan"
version = "0.7.0"
source = "git+https://github.com/gfx-rs/gfx?rev=0a201d1c406b5119ec11068293a40e50ec0be4c8#0a201d1c406b5119ec11068293a40e50ec0be4c8"
source = "git+https://github.com/gfx-rs/gfx?rev=3ee1ca9ba486b166a52765024d8d149cbb28d486#3ee1ca9ba486b166a52765024d8d149cbb28d486"
dependencies = [
"arrayvec",
"ash",
@ -583,7 +572,7 @@ dependencies = [
[[package]]
name = "gfx-hal"
version = "0.7.0"
source = "git+https://github.com/gfx-rs/gfx?rev=0a201d1c406b5119ec11068293a40e50ec0be4c8#0a201d1c406b5119ec11068293a40e50ec0be4c8"
source = "git+https://github.com/gfx-rs/gfx?rev=3ee1ca9ba486b166a52765024d8d149cbb28d486#3ee1ca9ba486b166a52765024d8d149cbb28d486"
dependencies = [
"bitflags",
"naga",
@ -593,20 +582,17 @@ dependencies = [
[[package]]
name = "gpu-alloc"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e7724b9aef57ea36d70faf54e0ee6265f86e41de16bed8333efdeab5b00e16b"
version = "0.4.2"
source = "git+https://github.com/zakarumych/gpu-alloc.git?rev=2cd1ad650cdd24d1647b6041f77ced0cbf1ff2a6#2cd1ad650cdd24d1647b6041f77ced0cbf1ff2a6"
dependencies = [
"bitflags",
"gpu-alloc-types",
"tracing",
]
[[package]]
name = "gpu-alloc-types"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "54804d0d6bc9d7f26db4eaec1ad10def69b599315f487d32c334a80d1efe67a5"
version = "0.2.1"
source = "git+https://github.com/zakarumych/gpu-alloc.git?rev=2cd1ad650cdd24d1647b6041f77ced0cbf1ff2a6#2cd1ad650cdd24d1647b6041f77ced0cbf1ff2a6"
dependencies = [
"bitflags",
]
@ -620,7 +606,6 @@ dependencies = [
"bitflags",
"gpu-descriptor-types",
"hashbrown",
"tracing",
]
[[package]]
@ -696,12 +681,6 @@ dependencies = [
"libc",
]
[[package]]
name = "itoa"
version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6"
[[package]]
name = "jni-sys"
version = "0.3.0"
@ -801,8 +780,6 @@ dependencies = [
"cfg-if 0.1.10",
"generator",
"scoped-tls",
"serde",
"serde_json",
]
[[package]]
@ -814,15 +791,6 @@ dependencies = [
"libc",
]
[[package]]
name = "matchers"
version = "0.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1"
dependencies = [
"regex-automata",
]
[[package]]
name = "maybe-uninit"
version = "2.0.0"
@ -903,10 +871,11 @@ dependencies = [
[[package]]
name = "naga"
version = "0.3.1"
source = "git+https://github.com/gfx-rs/naga?tag=gfx-12#fa7d4d8b51d4eeffe9f648d285466637f733a4a1"
source = "git+https://github.com/gfx-rs/naga?tag=gfx-20#0369ee181ed9cd315635fc0e3d99deecdbc72246"
dependencies = [
"bit-set",
"bitflags",
"codespan-reporting",
"fxhash",
"log",
"num-traits",
@ -993,16 +962,6 @@ dependencies = [
"version_check",
]
[[package]]
name = "num-integer"
version = "0.1.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db"
dependencies = [
"autocfg",
"num-traits",
]
[[package]]
name = "num-traits"
version = "0.2.14"
@ -1110,12 +1069,6 @@ dependencies = [
"indexmap",
]
[[package]]
name = "pin-project-lite"
version = "0.1.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b"
[[package]]
name = "pkg-config"
version = "0.3.19"
@ -1133,7 +1086,6 @@ dependencies = [
"ron",
"serde",
"wgpu-core",
"wgpu-subscriber",
"wgpu-types",
"winit",
]
@ -1156,6 +1108,12 @@ dependencies = [
"unicode-xid",
]
[[package]]
name = "profiling"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0c71198452babfbba7419e716d29853c462d59da73c41485ab7dc8b4dc0c4be"
[[package]]
name = "quote"
version = "1.0.7"
@ -1168,7 +1126,7 @@ dependencies = [
[[package]]
name = "range-alloc"
version = "0.1.2"
source = "git+https://github.com/gfx-rs/gfx?rev=0a201d1c406b5119ec11068293a40e50ec0be4c8#0a201d1c406b5119ec11068293a40e50ec0be4c8"
source = "git+https://github.com/gfx-rs/gfx?rev=3ee1ca9ba486b166a52765024d8d149cbb28d486#3ee1ca9ba486b166a52765024d8d149cbb28d486"
[[package]]
name = "raw-window-handle"
@ -1197,16 +1155,6 @@ dependencies = [
"thread_local",
]
[[package]]
name = "regex-automata"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4"
dependencies = [
"byteorder",
"regex-syntax",
]
[[package]]
name = "regex-syntax"
version = "0.6.21"
@ -1264,12 +1212,6 @@ dependencies = [
"owned_ttf_parser",
]
[[package]]
name = "ryu"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e"
[[package]]
name = "same-file"
version = "1.0.6"
@ -1326,27 +1268,6 @@ dependencies = [
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.59"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dcac07dbffa1c65e7f816ab9eba78eb142c6d44410f4eeba1e26e4f5dfa56b95"
dependencies = [
"itoa",
"ryu",
"serde",
]
[[package]]
name = "sharded-slab"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b4921be914e16899a80adefb821f8ddb7974e3f1250223575a44ed994882127"
dependencies = [
"lazy_static",
"loom",
]
[[package]]
name = "slab"
version = "0.4.2"
@ -1454,17 +1375,6 @@ dependencies = [
"syn",
]
[[package]]
name = "thread-id"
version = "3.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7fbf4c9d56b320106cd64fd024dadfa0be7cb4706725fc44a7d7ce952d820c1"
dependencies = [
"libc",
"redox_syscall",
"winapi 0.3.9",
]
[[package]]
name = "thread_local"
version = "1.0.1"
@ -1480,17 +1390,6 @@ version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7572415bd688d401c52f6e36f4c8e805b9ae1622619303b9fa835d531db0acae"
[[package]]
name = "time"
version = "0.1.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255"
dependencies = [
"libc",
"wasi",
"winapi 0.3.9",
]
[[package]]
name = "toml"
version = "0.5.7"
@ -1500,87 +1399,18 @@ dependencies = [
"serde",
]
[[package]]
name = "tracing"
version = "0.1.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0987850db3733619253fe60e17cb59b82d37c7e6c0236bb81e4d6b87c879f27"
dependencies = [
"cfg-if 0.1.10",
"pin-project-lite",
"tracing-attributes",
"tracing-core",
]
[[package]]
name = "tracing-attributes"
version = "0.1.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "80e0ccfc3378da0cce270c946b676a376943f5cd16aeba64568e7939806f4ada"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "tracing-core"
version = "0.1.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f50de3927f93d202783f4513cda820ab47ef17f624b03c096e86ef00c67e6b5f"
dependencies = [
"lazy_static",
]
[[package]]
name = "tracing-log"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e0f8c7178e13481ff6765bd169b33e8d554c5d2bbede5e32c356194be02b9b9"
dependencies = [
"lazy_static",
"log",
"tracing-core",
]
[[package]]
name = "tracing-serde"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fb65ea441fbb84f9f6748fd496cf7f63ec9af5bca94dd86456978d055e8eb28b"
dependencies = [
"serde",
"tracing-core",
]
[[package]]
name = "tracing-subscriber"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1fa8f0c8f4c594e4fc9debc1990deab13238077271ba84dd853d54902ee3401"
dependencies = [
"ansi_term",
"chrono",
"lazy_static",
"matchers",
"regex",
"serde",
"serde_json",
"sharded-slab",
"smallvec",
"thread_local",
"tracing",
"tracing-core",
"tracing-log",
"tracing-serde",
]
[[package]]
name = "ttf-parser"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3e5d7cd7ab3e47dda6e56542f4bbf3824c15234958c6e1bd6aaa347e93499fdc"
[[package]]
name = "unicode-width"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
[[package]]
name = "unicode-xid"
version = "0.2.1"
@ -1604,12 +1434,6 @@ dependencies = [
"winapi-util",
]
[[package]]
name = "wasi"
version = "0.10.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f"
[[package]]
name = "wasm-bindgen"
version = "0.2.60"
@ -1754,30 +1578,19 @@ dependencies = [
"gfx-hal",
"gpu-alloc",
"gpu-descriptor",
"log",
"loom",
"naga",
"parking_lot",
"profiling",
"raw-window-handle",
"ron",
"serde",
"smallvec",
"thiserror",
"tracing",
"wgpu-types",
]
[[package]]
name = "wgpu-subscriber"
version = "0.1.0"
source = "git+https://github.com/gfx-rs/subscriber.git?rev=cdc9feb53f152f9c41905ed9efeff2c1ed214361#cdc9feb53f152f9c41905ed9efeff2c1ed214361"
dependencies = [
"parking_lot",
"thread-id",
"tracing",
"tracing-log",
"tracing-subscriber",
]
[[package]]
name = "wgpu-types"
version = "0.7.0"

Просмотреть файл

@ -33,15 +33,5 @@ path = "../wgpu-core"
package = "wgpu-core"
features = ["replay", "raw-window-handle"]
#[target.'cfg(any(target_os = "ios", target_os = "macos"))'.dependencies.gfx-backend-metal]
#git = "https://github.com/gfx-rs/gfx"
#rev = "" # insert revision here
#features = ["auto-capture"]
[dependencies.wgpu-subscriber]
git = "https://github.com/gfx-rs/subscriber.git"
rev = "cdc9feb53f152f9c41905ed9efeff2c1ed214361"
version = "0.1"
[dev-dependencies]
serde = "1"

Просмотреть файл

@ -17,12 +17,7 @@ fn main() {
#[cfg(feature = "winit")]
use winit::{event_loop::EventLoop, window::WindowBuilder};
wgpu_subscriber::initialize_default_subscriber(
std::env::var("WGPU_CHROME_TRACE")
.as_ref()
.map(Path::new)
.ok(),
);
env_logger::init();
#[cfg(feature = "renderdoc")]
#[cfg_attr(feature = "winit", allow(unused))]
@ -144,8 +139,12 @@ fn main() {
desc.height,
));
resize_desc = Some(desc);
break;
} else {
gfx_select!(device => global.device_create_swap_chain(device, surface, &desc)).unwrap();
let (_, error) = gfx_select!(device => global.device_create_swap_chain(device, surface, &desc));
if let Some(e) = error {
panic!("{:?}", e);
}
}
}
Some(trace::Action::PresentSwapChain(id)) => {
@ -163,7 +162,10 @@ fn main() {
Event::WindowEvent { event, .. } => match event {
WindowEvent::Resized(_) => {
if let Some(desc) = resize_desc.take() {
gfx_select!(device => global.device_create_swap_chain(device, surface, &desc)).unwrap();
let (_, error) = gfx_select!(device => global.device_create_swap_chain(device, surface, &desc));
if let Some(e) = error {
panic!("{:?}", e);
}
}
}
WindowEvent::KeyboardInput {

Просмотреть файл

@ -1,6 +1,3 @@
[[builtin(global_invocation_id)]]
var global_id: vec3<u32>;
[[block]]
struct InOutBuffer {
data: [[stride(4)]] array<u32>;
@ -10,6 +7,6 @@ struct InOutBuffer {
var<storage> buffer: [[access(read_write)]] InOutBuffer;
[[stage(compute), workgroup_size(1)]]
fn main() {
fn main([[builtin(global_invocation_id)]] global_id: vec3<u32>) {
buffer.data[global_id.x] = buffer.data[global_id.x] + global_id.x;
}

Просмотреть файл

@ -96,7 +96,7 @@
),
target_colors: [
(
attachment: Id(0, 1, Empty),
view: Id(0, 1, Empty),
resolve_target: None,
channel: (
load_op: Clear,
@ -123,8 +123,8 @@
buffer: Id(0, 1, Empty),
layout: (
offset: 0,
bytes_per_row: 256,
rows_per_image: 64,
bytes_per_row: Some(256),
rows_per_image: Some(64),
),
),
size: (

Просмотреть файл

@ -1,24 +1,16 @@
[[builtin(vertex_index)]]
var<in> in_vertex_index: u32;
[[builtin(position)]]
var<out> out_pos: vec4<f32>;
[[stage(vertex)]]
fn vs_main() {
fn vs_main([[builtin(vertex_index)]] vertex_index: u32) -> [[builtin(position)]] vec4<f32> {
// hacky way to draw a large triangle
var tmp1: i32 = i32(in_vertex_index) / 2;
var tmp2: i32 = i32(in_vertex_index) & 1;
var pos: vec2<f32> = vec2<f32>(
const tmp1 = i32(vertex_index) / 2;
const tmp2 = i32(vertex_index) & 1;
const pos = vec2<f32>(
f32(tmp1) * 4.0 - 1.0,
f32(tmp2) * 4.0 - 1.0
);
out_pos = vec4<f32>(pos, 0.0, 1.0);
return vec4<f32>(pos, 0.0, 1.0);
}
[[location(0)]]
var<out> out_color: vec4<f32>;
[[stage(fragment)]]
fn fs_main() {
out_color = vec4<f32>(1.0, 1.0, 1.0, 1.0);
fn fs_main() -> [[location(0)]] vec4<f32> {
return vec4<f32>(1.0, 1.0, 1.0, 1.0);
}

Просмотреть файл

@ -127,10 +127,10 @@ impl Test<'_> {
for expect in self.expectations {
println!("\t\t\tChecking {}", expect.name);
let buffer = wgc::id::TypedId::zip(expect.buffer.index, expect.buffer.epoch, backend);
let ptr =
wgc::gfx_select!(device => global.buffer_get_mapped_range(buffer, expect.offset, None))
let (ptr, size) =
wgc::gfx_select!(device => global.buffer_get_mapped_range(buffer, expect.offset, wgt::BufferSize::new(expect.data.len() as wgt::BufferAddress)))
.unwrap();
let contents = unsafe { slice::from_raw_parts(ptr, expect.data.len()) };
let contents = unsafe { slice::from_raw_parts(ptr, size as usize) };
let expected_data = match expect.data {
ExpectedData::Raw(vec) => vec,
ExpectedData::File(name, size) => {
@ -217,12 +217,7 @@ impl Corpus {
#[test]
fn test_api() {
wgpu_subscriber::initialize_default_subscriber(
std::env::var("WGPU_CHROME_TRACE")
.as_ref()
.map(Path::new)
.ok(),
);
env_logger::init();
Corpus::run_from(PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("tests/data/all.ron"))
}

Просмотреть файл

@ -27,39 +27,41 @@ arrayvec = "0.5"
bitflags = "1.0"
copyless = "0.1"
fxhash = "0.2"
log = "0.4"
parking_lot = "0.11"
profiling = { version = "0.1.10", default-features = false } # Need 0.1.10+ to prevent compile errors with proc macros disabled
raw-window-handle = { version = "0.3", optional = true }
ron = { version = "0.6", optional = true }
serde = { version = "1.0", features = ["serde_derive"], optional = true }
smallvec = "1"
tracing = { version = "0.1", default-features = false, features = ["std"] }
thiserror = "1"
gpu-alloc = { version = "0.3", features = ["tracing"] }
gpu-descriptor = { version = "0.1", features = ["tracing"] }
# Update to 0.4 when it will be available
gpu-alloc = { git = "https://github.com/zakarumych/gpu-alloc.git", rev = "2cd1ad650cdd24d1647b6041f77ced0cbf1ff2a6" }
gpu-descriptor = { version = "0.1" }
hal = { package = "gfx-hal", git = "https://github.com/gfx-rs/gfx", rev = "0a201d1c406b5119ec11068293a40e50ec0be4c8" }
gfx-backend-empty = { git = "https://github.com/gfx-rs/gfx", rev = "0a201d1c406b5119ec11068293a40e50ec0be4c8" }
hal = { package = "gfx-hal", git = "https://github.com/gfx-rs/gfx", rev = "3ee1ca9ba486b166a52765024d8d149cbb28d486" }
gfx-backend-empty = { git = "https://github.com/gfx-rs/gfx", rev = "3ee1ca9ba486b166a52765024d8d149cbb28d486" }
[target.'cfg(all(not(target_arch = "wasm32"), all(unix, not(target_os = "ios"), not(target_os = "macos"))))'.dependencies]
gfx-backend-vulkan = { git = "https://github.com/gfx-rs/gfx", rev = "0a201d1c406b5119ec11068293a40e50ec0be4c8", features = ["naga"] }
#gfx-backend-gl = { git = "https://github.com/gfx-rs/gfx", rev = "0a201d1c406b5119ec11068293a40e50ec0be4c8" }
gfx-backend-vulkan = { git = "https://github.com/gfx-rs/gfx", rev = "3ee1ca9ba486b166a52765024d8d149cbb28d486", features = ["naga"] }
#gfx-backend-gl = { git = "https://github.com/gfx-rs/gfx", rev = "3ee1ca9ba486b166a52765024d8d149cbb28d486" }
[target.'cfg(all(not(target_arch = "wasm32"), any(target_os = "ios", target_os = "macos")))'.dependencies]
gfx-backend-metal = { git = "https://github.com/gfx-rs/gfx", rev = "0a201d1c406b5119ec11068293a40e50ec0be4c8" }
gfx-backend-vulkan = { git = "https://github.com/gfx-rs/gfx", rev = "0a201d1c406b5119ec11068293a40e50ec0be4c8", optional = true }
gfx-backend-metal = { git = "https://github.com/gfx-rs/gfx", rev = "3ee1ca9ba486b166a52765024d8d149cbb28d486" }
gfx-backend-vulkan = { git = "https://github.com/gfx-rs/gfx", rev = "3ee1ca9ba486b166a52765024d8d149cbb28d486", optional = true }
[target.'cfg(all(not(target_arch = "wasm32"), windows))'.dependencies]
gfx-backend-dx12 = { git = "https://github.com/gfx-rs/gfx", rev = "0a201d1c406b5119ec11068293a40e50ec0be4c8" }
gfx-backend-dx11 = { git = "https://github.com/gfx-rs/gfx", rev = "0a201d1c406b5119ec11068293a40e50ec0be4c8" }
gfx-backend-vulkan = { git = "https://github.com/gfx-rs/gfx", rev = "0a201d1c406b5119ec11068293a40e50ec0be4c8", features = ["naga"] }
gfx-backend-dx12 = { git = "https://github.com/gfx-rs/gfx", rev = "3ee1ca9ba486b166a52765024d8d149cbb28d486" }
gfx-backend-dx11 = { git = "https://github.com/gfx-rs/gfx", rev = "3ee1ca9ba486b166a52765024d8d149cbb28d486" }
gfx-backend-vulkan = { git = "https://github.com/gfx-rs/gfx", rev = "3ee1ca9ba486b166a52765024d8d149cbb28d486", features = ["naga"] }
[target.'cfg(target_arch = "wasm32")'.dependencies]
#gfx-backend-gl = { git = "https://github.com/gfx-rs/gfx", rev = "0a201d1c406b5119ec11068293a40e50ec0be4c8" }
#gfx-backend-gl = { git = "https://github.com/gfx-rs/gfx", rev = "3ee1ca9ba486b166a52765024d8d149cbb28d486" }
[dependencies.naga]
git = "https://github.com/gfx-rs/naga"
tag = "gfx-12"
tag = "gfx-20"
features = ["spv-in", "spv-out", "wgsl-in"]
[dependencies.wgt]

Просмотреть файл

@ -32,7 +32,7 @@ impl<B: hal::Backend> CommandPool<B> {
for i in (0..self.pending.len()).rev() {
if self.pending[i].1 <= last_done_index {
let (cmd_buf, index) = self.pending.swap_remove(i);
tracing::trace!(
log::trace!(
"recycling cmdbuf submitted in {} when {} is last done",
index,
last_done_index,
@ -84,11 +84,13 @@ pub struct CommandAllocator<B: hal::Backend> {
}
impl<B: GfxBackend> CommandAllocator<B> {
#[allow(clippy::too_many_arguments)]
pub(crate) fn allocate(
&self,
device_id: Stored<DeviceId>,
device: &B::Device,
limits: wgt::Limits,
downlevel: wgt::DownlevelProperties,
private_features: PrivateFeatures,
label: &crate::Label,
#[cfg(feature = "trace")] enable_tracing: bool,
@ -100,7 +102,7 @@ impl<B: GfxBackend> CommandAllocator<B> {
use std::collections::hash_map::Entry;
let pool = match inner.pools.entry(thread_id) {
Entry::Vacant(e) => {
tracing::info!("Starting on thread {:?}", thread_id);
log::info!("Starting on thread {:?}", thread_id);
let raw = unsafe {
device
.create_command_pool(
@ -131,6 +133,7 @@ impl<B: GfxBackend> CommandAllocator<B> {
used_swap_chains: Default::default(),
buffer_memory_init_actions: Default::default(),
limits,
downlevel,
private_features,
has_labels: label.is_some(),
#[cfg(feature = "trace")]
@ -151,7 +154,7 @@ impl<B: hal::Backend> CommandAllocator<B> {
device: &B::Device,
) -> Result<Self, CommandAllocatorError> {
let internal_thread_id = thread::current().id();
tracing::info!("Starting on (internal) thread {:?}", internal_thread_id);
log::info!("Starting on (internal) thread {:?}", internal_thread_id);
let mut pools = FastHashMap::default();
pools.insert(
internal_thread_id,
@ -250,7 +253,7 @@ impl<B: hal::Backend> CommandAllocator<B> {
}
}
for thread_id in remove_threads {
tracing::info!("Removing from thread {:?}", thread_id);
log::info!("Removing from thread {:?}", thread_id);
let pool = inner.pools.remove(&thread_id).unwrap();
pool.destroy(device);
}
@ -263,7 +266,7 @@ impl<B: hal::Backend> CommandAllocator<B> {
pool.recycle(raw);
}
if pool.total != pool.available.len() {
tracing::error!(
log::error!(
"Some command buffers are still recorded, only tracking {} / {}",
pool.available.len(),
pool.total

Просмотреть файл

@ -194,7 +194,7 @@ impl Binder {
bind_group: &BindGroup<B>,
offsets: &[wgt::DynamicOffset],
) -> &'a [EntryPayload] {
tracing::trace!("\tBinding [{}] = group {:?}", index, bind_group_id);
log::trace!("\tBinding [{}] = group {:?}", index, bind_group_id);
debug_assert_eq!(B::VARIANT, bind_group_id.0.backend());
let payload = &mut self.payloads[index];

Просмотреть файл

@ -51,7 +51,6 @@ use crate::{
id,
memory_init_tracker::{MemoryInitKind, MemoryInitTrackerAction},
resource::BufferUse,
span,
track::{TrackerSet, UsageConflict},
validation::check_buffer_usage,
Label, LabelHelpers, LifeGuard, Stored, MAX_BIND_GROUPS,
@ -92,7 +91,7 @@ impl RenderBundleEncoder {
parent_id: id::DeviceId,
base: Option<BasePass<RenderCommand>>,
) -> Result<Self, CreateRenderBundleError> {
span!(_guard, INFO, "RenderBundleEncoder::new");
profiling::scope!("RenderBundleEncoder::new");
Ok(Self {
base: base.unwrap_or_else(|| BasePass::new(&desc.label)),
parent_id,
@ -512,7 +511,7 @@ impl RenderBundleEncoder {
offset: wgt::BufferAddress,
size: Option<wgt::BufferSize>,
) {
span!(_guard, DEBUG, "RenderBundle::set_index_buffer");
profiling::scope!("RenderBundle::set_index_buffer");
self.base.commands.push(RenderCommand::SetIndexBuffer {
buffer_id,
index_format,
@ -1150,7 +1149,7 @@ where
pub mod bundle_ffi {
use super::{RenderBundleEncoder, RenderCommand};
use crate::{id, span, RawString};
use crate::{id, RawString};
use std::{convert::TryInto, slice};
use wgt::{BufferAddress, BufferSize, DynamicOffset};
@ -1158,8 +1157,6 @@ pub mod bundle_ffi {
///
/// This function is unsafe as there is no guarantee that the given pointer is
/// valid for `offset_length` elements.
// TODO: There might be other safety issues, such as using the unsafe
// `RawPass::encode` and `RawPass::encode_slice`.
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_bundle_set_bind_group(
bundle: &mut RenderBundleEncoder,
@ -1168,7 +1165,6 @@ pub mod bundle_ffi {
offsets: *const DynamicOffset,
offset_length: usize,
) {
span!(_guard, DEBUG, "RenderBundle::set_bind_group");
bundle.base.commands.push(RenderCommand::SetBindGroup {
index: index.try_into().unwrap(),
num_dynamic_offsets: offset_length.try_into().unwrap(),
@ -1187,7 +1183,6 @@ pub mod bundle_ffi {
bundle: &mut RenderBundleEncoder,
pipeline_id: id::RenderPipelineId,
) {
span!(_guard, DEBUG, "RenderBundle::set_pipeline");
bundle
.base
.commands
@ -1202,7 +1197,6 @@ pub mod bundle_ffi {
offset: BufferAddress,
size: Option<BufferSize>,
) {
span!(_guard, DEBUG, "RenderBundle::set_vertex_buffer");
bundle.base.commands.push(RenderCommand::SetVertexBuffer {
slot,
buffer_id,
@ -1211,6 +1205,10 @@ pub mod bundle_ffi {
});
}
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given pointer is
/// valid for `data` elements.
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_bundle_set_push_constants(
pass: &mut RenderBundleEncoder,
@ -1219,7 +1217,6 @@ pub mod bundle_ffi {
size_bytes: u32,
data: *const u8,
) {
span!(_guard, DEBUG, "RenderBundle::set_push_constants");
assert_eq!(
offset & (wgt::PUSH_CONSTANT_ALIGNMENT - 1),
0,
@ -1257,7 +1254,6 @@ pub mod bundle_ffi {
first_vertex: u32,
first_instance: u32,
) {
span!(_guard, DEBUG, "RenderBundle::draw");
bundle.base.commands.push(RenderCommand::Draw {
vertex_count,
instance_count,
@ -1275,7 +1271,6 @@ pub mod bundle_ffi {
base_vertex: i32,
first_instance: u32,
) {
span!(_guard, DEBUG, "RenderBundle::draw_indexed");
bundle.base.commands.push(RenderCommand::DrawIndexed {
index_count,
instance_count,
@ -1291,7 +1286,6 @@ pub mod bundle_ffi {
buffer_id: id::BufferId,
offset: BufferAddress,
) {
span!(_guard, DEBUG, "RenderBundle::draw_indirect");
bundle.base.commands.push(RenderCommand::MultiDrawIndirect {
buffer_id,
offset,
@ -1306,7 +1300,6 @@ pub mod bundle_ffi {
buffer_id: id::BufferId,
offset: BufferAddress,
) {
span!(_guard, DEBUG, "RenderBundle::draw_indexed_indirect");
bundle.base.commands.push(RenderCommand::MultiDrawIndirect {
buffer_id,
offset,
@ -1315,27 +1308,32 @@ pub mod bundle_ffi {
});
}
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given `label`
/// is a valid null-terminated string.
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_bundle_push_debug_group(
_bundle: &mut RenderBundleEncoder,
_label: RawString,
) {
span!(_guard, DEBUG, "RenderBundle::push_debug_group");
//TODO
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_bundle_pop_debug_group(_bundle: &mut RenderBundleEncoder) {
span!(_guard, DEBUG, "RenderBundle::pop_debug_group");
pub extern "C" fn wgpu_render_bundle_pop_debug_group(_bundle: &mut RenderBundleEncoder) {
//TODO
}
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given `label`
/// is a valid null-terminated string.
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_bundle_insert_debug_marker(
_bundle: &mut RenderBundleEncoder,
_label: RawString,
) {
span!(_guard, DEBUG, "RenderBundle::insert_debug_marker");
//TODO
}
}

Просмотреть файл

@ -12,10 +12,9 @@ use crate::{
id,
memory_init_tracker::{MemoryInitKind, MemoryInitTrackerAction},
resource::{Buffer, BufferUse, Texture},
span,
track::{TrackerSet, UsageConflict},
validation::{check_buffer_usage, MissingBufferUsageError},
Label,
Label, DOWNLEVEL_ERROR_WARNING_MESSAGE,
};
use hal::command::CommandBuffer as _;
@ -154,6 +153,11 @@ pub enum ComputePassErrorInner {
MissingBufferUsage(#[from] MissingBufferUsageError),
#[error("cannot pop debug group, because number of pushed debug groups is zero")]
InvalidPopDebugGroup,
#[error(
"Compute shaders are not supported by the underlying platform. {}",
DOWNLEVEL_ERROR_WARNING_MESSAGE
)]
ComputeShadersUnsupported,
#[error(transparent)]
Dispatch(#[from] DispatchError),
#[error(transparent)]
@ -221,7 +225,7 @@ impl State {
self.trackers.merge_extend(&bind_group_guard[id].used)?;
}
tracing::trace!("Encoding dispatch barriers");
log::trace!("Encoding dispatch barriers");
CommandBuffer::insert_barriers(
raw_cmd_buf,
@ -253,7 +257,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
encoder_id: id::CommandEncoderId,
base: BasePassRef<ComputeCommand>,
) -> Result<(), ComputePassError> {
span!(_guard, INFO, "CommandEncoder::run_compute_pass");
profiling::scope!("CommandEncoder::run_compute_pass");
let scope = PassErrorScope::Pass(encoder_id);
let hub = B::hub(self);
@ -271,6 +275,17 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
});
}
if !cmd_buf
.downlevel
.flags
.contains(wgt::DownlevelFlags::COMPUTE_SHADERS)
{
return Err(ComputePassError {
scope: PassErrorScope::Pass(encoder_id),
inner: ComputePassErrorInner::ComputeShadersUnsupported,
});
}
if let Some(ref label) = base.label {
unsafe {
raw.begin_debug_marker(label, 0);
@ -649,7 +664,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
pub mod compute_ffi {
use super::{ComputeCommand, ComputePass};
use crate::{id, span, RawString};
use crate::{id, RawString};
use std::{convert::TryInto, ffi, slice};
use wgt::{BufferAddress, DynamicOffset};
@ -657,8 +672,6 @@ pub mod compute_ffi {
///
/// This function is unsafe as there is no guarantee that the given pointer is
/// valid for `offset_length` elements.
// TODO: There might be other safety issues, such as using the unsafe
// `RawPass::encode` and `RawPass::encode_slice`.
#[no_mangle]
pub unsafe extern "C" fn wgpu_compute_pass_set_bind_group(
pass: &mut ComputePass,
@ -667,7 +680,6 @@ pub mod compute_ffi {
offsets: *const DynamicOffset,
offset_length: usize,
) {
span!(_guard, DEBUG, "ComputePass::set_bind_group");
pass.base.commands.push(ComputeCommand::SetBindGroup {
index: index.try_into().unwrap(),
num_dynamic_offsets: offset_length.try_into().unwrap(),
@ -685,12 +697,15 @@ pub mod compute_ffi {
pass: &mut ComputePass,
pipeline_id: id::ComputePipelineId,
) {
span!(_guard, DEBUG, "ComputePass::set_pipeline");
pass.base
.commands
.push(ComputeCommand::SetPipeline(pipeline_id));
}
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given pointer is
/// valid for `size_bytes` bytes.
#[no_mangle]
pub unsafe extern "C" fn wgpu_compute_pass_set_push_constant(
pass: &mut ComputePass,
@ -698,7 +713,6 @@ pub mod compute_ffi {
size_bytes: u32,
data: *const u8,
) {
span!(_guard, DEBUG, "ComputePass::set_push_constant");
assert_eq!(
offset & (wgt::PUSH_CONSTANT_ALIGNMENT - 1),
0,
@ -734,7 +748,6 @@ pub mod compute_ffi {
groups_y: u32,
groups_z: u32,
) {
span!(_guard, DEBUG, "ComputePass::dispatch");
pass.base
.commands
.push(ComputeCommand::Dispatch([groups_x, groups_y, groups_z]));
@ -746,19 +759,21 @@ pub mod compute_ffi {
buffer_id: id::BufferId,
offset: BufferAddress,
) {
span!(_guard, DEBUG, "ComputePass::dispatch_indirect");
pass.base
.commands
.push(ComputeCommand::DispatchIndirect { buffer_id, offset });
}
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given `label`
/// is a valid null-terminated string.
#[no_mangle]
pub unsafe extern "C" fn wgpu_compute_pass_push_debug_group(
pass: &mut ComputePass,
label: RawString,
color: u32,
) {
span!(_guard, DEBUG, "ComputePass::push_debug_group");
let bytes = ffi::CStr::from_ptr(label).to_bytes();
pass.base.string_data.extend_from_slice(bytes);
@ -770,17 +785,19 @@ pub mod compute_ffi {
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_pop_debug_group(pass: &mut ComputePass) {
span!(_guard, DEBUG, "ComputePass::pop_debug_group");
pass.base.commands.push(ComputeCommand::PopDebugGroup);
}
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given `label`
/// is a valid null-terminated string.
#[no_mangle]
pub unsafe extern "C" fn wgpu_compute_pass_insert_debug_marker(
pass: &mut ComputePass,
label: RawString,
color: u32,
) {
span!(_guard, DEBUG, "ComputePass::insert_debug_marker");
let bytes = ffi::CStr::from_ptr(label).to_bytes();
pass.base.string_data.extend_from_slice(bytes);
@ -791,13 +808,11 @@ pub mod compute_ffi {
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_compute_pass_write_timestamp(
pub extern "C" fn wgpu_compute_pass_write_timestamp(
pass: &mut ComputePass,
query_set_id: id::QuerySetId,
query_index: u32,
) {
span!(_guard, DEBUG, "ComputePass::write_timestamp");
pass.base.commands.push(ComputeCommand::WriteTimestamp {
query_set_id,
query_index,
@ -805,17 +820,11 @@ pub mod compute_ffi {
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_compute_pass_begin_pipeline_statistics_query(
pub extern "C" fn wgpu_compute_pass_begin_pipeline_statistics_query(
pass: &mut ComputePass,
query_set_id: id::QuerySetId,
query_index: u32,
) {
span!(
_guard,
DEBUG,
"ComputePass::begin_pipeline_statistics query"
);
pass.base
.commands
.push(ComputeCommand::BeginPipelineStatisticsQuery {
@ -825,11 +834,7 @@ pub mod compute_ffi {
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_compute_pass_end_pipeline_statistics_query(
pass: &mut ComputePass,
) {
span!(_guard, DEBUG, "ComputePass::end_pipeline_statistics_query");
pub extern "C" fn wgpu_compute_pass_end_pipeline_statistics_query(pass: &mut ComputePass) {
pass.base
.commands
.push(ComputeCommand::EndPipelineStatisticsQuery);

Просмотреть файл

@ -26,7 +26,6 @@ use crate::{
id,
memory_init_tracker::MemoryInitTrackerAction,
resource::{Buffer, Texture},
span,
track::TrackerSet,
Label, PrivateFeatures, Stored,
};
@ -49,6 +48,7 @@ pub struct CommandBuffer<B: hal::Backend> {
pub(crate) used_swap_chains: SmallVec<[Stored<id::SwapChainId>; 1]>,
pub(crate) buffer_memory_init_actions: Vec<MemoryInitTrackerAction<id::BufferId>>,
limits: wgt::Limits,
downlevel: wgt::DownlevelProperties,
private_features: PrivateFeatures,
has_labels: bool,
#[cfg(feature = "trace")]
@ -175,7 +175,7 @@ impl<C: Clone> BasePass<C> {
pub fn as_ref(&self) -> BasePassRef<C> {
BasePassRef {
label: self.label.as_ref().map(String::as_str),
label: self.label.as_deref(),
commands: &self.commands,
dynamic_offsets: &self.dynamic_offsets,
string_data: &self.string_data,
@ -198,7 +198,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
encoder_id: id::CommandEncoderId,
_desc: &wgt::CommandBufferDescriptor<Label>,
) -> (id::CommandBufferId, Option<CommandEncoderError>) {
span!(_guard, INFO, "CommandEncoder::finish");
profiling::scope!("CommandEncoder::finish");
let hub = B::hub(self);
let mut token = Token::root();
@ -217,7 +217,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.expect("Used swap chain frame has already presented");
cmd_buf.trackers.views.remove(view_id.value);
}
tracing::trace!("Command buffer {:?} {:#?}", encoder_id, cmd_buf.trackers);
log::trace!("Command buffer {:?} {:#?}", encoder_id, cmd_buf.trackers);
None
}
Err(e) => Some(e),
@ -231,7 +231,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
encoder_id: id::CommandEncoderId,
label: &str,
) -> Result<(), CommandEncoderError> {
span!(_guard, DEBUG, "CommandEncoder::push_debug_group");
profiling::scope!("CommandEncoder::push_debug_group");
let hub = B::hub(self);
let mut token = Token::root();
@ -251,7 +251,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
encoder_id: id::CommandEncoderId,
label: &str,
) -> Result<(), CommandEncoderError> {
span!(_guard, DEBUG, "CommandEncoder::insert_debug_marker");
profiling::scope!("CommandEncoder::insert_debug_marker");
let hub = B::hub(self);
let mut token = Token::root();
@ -270,7 +270,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
&self,
encoder_id: id::CommandEncoderId,
) -> Result<(), CommandEncoderError> {
span!(_guard, DEBUG, "CommandEncoder::pop_debug_marker");
profiling::scope!("CommandEncoder::pop_debug_marker");
let hub = B::hub(self);
let mut token = Token::root();

Просмотреть файл

@ -39,12 +39,12 @@ impl<B: hal::Backend> QueryResetMap<B> {
query: u32,
) -> bool {
let (index, epoch, _) = id.unzip();
let (vec, _) = self
let vec_pair = self
.map
.entry(index)
.or_insert_with(|| (vec![false; query_set.desc.count as usize], epoch));
std::mem::replace(&mut vec[query as usize], true)
std::mem::replace(&mut vec_pair.0[query as usize], true)
}
pub fn reset_queries(
@ -62,9 +62,8 @@ impl<B: hal::Backend> QueryResetMap<B> {
// Need to find all "runs" of values which need resets. If the state vector is:
// [false, true, true, false, true], we want to reset [1..3, 4..5]. This minimizes
// the amount of resets needed.
let mut state_iter = state.into_iter().chain(iter::once(false)).enumerate();
let mut run_start: Option<u32> = None;
while let Some((idx, value)) = state_iter.next() {
for (idx, value) in state.into_iter().chain(iter::once(false)).enumerate() {
match (run_start, value) {
// We're inside of a run, do nothing
(Some(..), true) => {}
@ -151,7 +150,7 @@ pub enum ResolveError {
end_query: u32,
query_set_size: u32,
},
#[error("Resolving queries {start_query}..{end_query} ({stride} byte queries) will end up overruning the bounds of the destination buffer of size {buffer_size} using offsets {buffer_start_offset}..{buffer_end_offset}")]
#[error("Resolving queries {start_query}..{end_query} ({stride} byte queries) will end up overrunning the bounds of the destination buffer of size {buffer_size} using offsets {buffer_start_offset}..{buffer_end_offset}")]
BufferOverrun {
start_query: u32,
end_query: u32,
@ -174,7 +173,7 @@ impl<B: GfxBackend> QuerySet<B> {
if let Some(reset) = reset_state {
let used = reset.use_query_set(query_set_id, self, query_index);
if used {
return Err(QueryUseError::UsedTwiceInsideRenderpass { query_index }.into());
return Err(QueryUseError::UsedTwiceInsideRenderpass { query_index });
}
}
@ -183,16 +182,14 @@ impl<B: GfxBackend> QuerySet<B> {
return Err(QueryUseError::IncompatibleType {
query_type,
set_type: simple_set_type,
}
.into());
});
}
if query_index >= self.desc.count {
return Err(QueryUseError::OutOfBounds {
query_index,
query_set_size: self.desc.count,
}
.into());
});
}
let hal_query = hal::query::Query::<B> {
@ -249,8 +246,7 @@ impl<B: GfxBackend> QuerySet<B> {
return Err(QueryUseError::AlreadyStarted {
active_query_index: old_idx,
new_query_index: query_index,
}
.into());
});
}
unsafe {

Просмотреть файл

@ -11,15 +11,14 @@ use crate::{
},
conv,
device::{
AttachmentData, AttachmentDataVec, Device, RenderPassCompatibilityError, RenderPassContext,
RenderPassKey, RenderPassLock, MAX_COLOR_TARGETS, MAX_VERTEX_BUFFERS,
AttachmentData, AttachmentDataVec, Device, FramebufferKey, RenderPassCompatibilityError,
RenderPassContext, RenderPassKey, RenderPassLock, MAX_COLOR_TARGETS, MAX_VERTEX_BUFFERS,
},
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Storage, Token},
id,
memory_init_tracker::{MemoryInitKind, MemoryInitTrackerAction},
pipeline::PipelineFlags,
resource::{BufferUse, Texture, TextureUse, TextureView, TextureViewInner},
span,
track::{TextureSelector, TrackerSet, UsageConflict},
validation::{
check_buffer_usage, check_texture_usage, MissingBufferUsageError, MissingTextureUsageError,
@ -98,9 +97,9 @@ pub struct PassChannel<V> {
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(any(feature = "serial-pass", feature = "trace"), derive(Serialize))]
#[cfg_attr(any(feature = "serial-pass", feature = "replay"), derive(Deserialize))]
pub struct ColorAttachmentDescriptor {
pub struct RenderPassColorAttachment {
/// The view to use as an attachment.
pub attachment: id::TextureViewId,
pub view: id::TextureViewId,
/// The view that will receive the resolved output if multisampling is used.
pub resolve_target: Option<id::TextureViewId>,
/// What operations will be performed on this color attachment.
@ -112,16 +111,16 @@ pub struct ColorAttachmentDescriptor {
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(any(feature = "serial-pass", feature = "trace"), derive(Serialize))]
#[cfg_attr(any(feature = "serial-pass", feature = "replay"), derive(Deserialize))]
pub struct DepthStencilAttachmentDescriptor {
pub struct RenderPassDepthStencilAttachment {
/// The view to use as an attachment.
pub attachment: id::TextureViewId,
pub view: id::TextureViewId,
/// What operations will be performed on the depth part of the attachment.
pub depth: PassChannel<f32>,
/// What operations will be performed on the stencil part of the attachment.
pub stencil: PassChannel<u32>,
}
impl DepthStencilAttachmentDescriptor {
impl RenderPassDepthStencilAttachment {
fn is_read_only(&self, aspects: hal::format::Aspects) -> Result<bool, RenderPassErrorInner> {
if aspects.contains(hal::format::Aspects::DEPTH) && !self.depth.read_only {
return Ok(false);
@ -144,17 +143,17 @@ impl DepthStencilAttachmentDescriptor {
pub struct RenderPassDescriptor<'a> {
pub label: Label<'a>,
/// The color attachments of the render pass.
pub color_attachments: Cow<'a, [ColorAttachmentDescriptor]>,
pub color_attachments: Cow<'a, [RenderPassColorAttachment]>,
/// The depth and stencil attachment of the render pass, if any.
pub depth_stencil_attachment: Option<&'a DepthStencilAttachmentDescriptor>,
pub depth_stencil_attachment: Option<&'a RenderPassDepthStencilAttachment>,
}
#[cfg_attr(feature = "serial-pass", derive(Deserialize, Serialize))]
pub struct RenderPass {
base: BasePass<RenderCommand>,
parent_id: id::CommandEncoderId,
color_targets: ArrayVec<[ColorAttachmentDescriptor; MAX_COLOR_TARGETS]>,
depth_stencil_target: Option<DepthStencilAttachmentDescriptor>,
color_targets: ArrayVec<[RenderPassColorAttachment; MAX_COLOR_TARGETS]>,
depth_stencil_target: Option<RenderPassDepthStencilAttachment>,
}
impl RenderPass {
@ -187,7 +186,7 @@ impl RenderPass {
offset: BufferAddress,
size: Option<BufferSize>,
) {
span!(_guard, DEBUG, "RenderPass::set_index_buffer");
profiling::scope!("RenderPass::set_index_buffer");
self.base.commands.push(RenderCommand::SetIndexBuffer {
buffer_id,
index_format,
@ -513,8 +512,8 @@ struct RenderPassInfo<'a, B: hal::Backend> {
impl<'a, B: GfxBackend> RenderPassInfo<'a, B> {
fn start(
raw: &mut B::CommandBuffer,
color_attachments: &[ColorAttachmentDescriptor],
depth_stencil_attachment: Option<&DepthStencilAttachmentDescriptor>,
color_attachments: &[RenderPassColorAttachment],
depth_stencil_attachment: Option<&RenderPassDepthStencilAttachment>,
cmd_buf: &CommandBuffer<B>,
device: &Device<B>,
view_guard: &'a Storage<TextureView<B>, id::TextureViewId>,
@ -563,8 +562,8 @@ impl<'a, B: GfxBackend> RenderPassInfo<'a, B> {
Some(at) => {
let view = trackers
.views
.use_extend(&*view_guard, at.attachment, (), ())
.map_err(|_| RenderPassErrorInner::InvalidAttachment(at.attachment))?;
.use_extend(&*view_guard, at.view, (), ())
.map_err(|_| RenderPassErrorInner::InvalidAttachment(at.view))?;
add_view(view, "depth")?;
depth_stencil_aspects = view.aspects;
@ -621,8 +620,8 @@ impl<'a, B: GfxBackend> RenderPassInfo<'a, B> {
for at in color_attachments {
let view = trackers
.views
.use_extend(&*view_guard, at.attachment, (), ())
.map_err(|_| RenderPassErrorInner::InvalidAttachment(at.attachment))?;
.use_extend(&*view_guard, at.view, (), ())
.map_err(|_| RenderPassErrorInner::InvalidAttachment(at.view))?;
add_view(view, "color")?;
let layouts = match view.inner {
@ -804,7 +803,7 @@ impl<'a, B: GfxBackend> RenderPassInfo<'a, B> {
inputs: &[],
preserves: &[],
};
let all = entry.key().all().map(|(at, _)| at.clone());
let all = entry.key().all().map(|&(ref at, _)| at.clone());
let pass = unsafe {
device
@ -819,21 +818,21 @@ impl<'a, B: GfxBackend> RenderPassInfo<'a, B> {
let view_data = AttachmentData {
colors: color_attachments
.iter()
.map(|at| view_guard.get(at.attachment).unwrap())
.map(|at| view_guard.get(at.view).unwrap())
.collect(),
resolves: color_attachments
.iter()
.filter_map(|at| at.resolve_target)
.map(|attachment| view_guard.get(attachment).unwrap())
.collect(),
depth_stencil: depth_stencil_attachment
.map(|at| view_guard.get(at.attachment).unwrap()),
depth_stencil: depth_stencil_attachment.map(|at| view_guard.get(at.view).unwrap()),
};
let extent = extent.ok_or(RenderPassErrorInner::MissingAttachments)?;
let fb_key = (
view_data.map(|view| view.framebuffer_attachment.clone()),
let fb_key = FramebufferKey {
attachments: view_data.map(|view| view.framebuffer_attachment.clone()),
extent,
);
samples: sample_count,
};
let context = RenderPassContext {
attachments: view_data.map(|view| view.format),
sample_count,
@ -848,7 +847,7 @@ impl<'a, B: GfxBackend> RenderPassInfo<'a, B> {
.raw
.create_framebuffer(
&render_pass,
e.key().0.all().map(|fat| fat.clone()),
e.key().attachments.all().cloned(),
conv::map_extent(&extent, wgt::TextureDimension::D3),
)
.or(Err(RenderPassErrorInner::OutOfMemory))?
@ -874,7 +873,7 @@ impl<'a, B: GfxBackend> RenderPassInfo<'a, B> {
.zip(&rp_key.colors)
.zip(raw_views.colors)
.map(
|((at, (rat, _layout)), image_view)| hal::command::RenderAttachmentInfo {
|((at, &(ref rat, _layout)), image_view)| hal::command::RenderAttachmentInfo {
image_view,
clear_value: match at.channel.load_op {
LoadOp::Load => Default::default(),
@ -1015,10 +1014,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
&self,
encoder_id: id::CommandEncoderId,
base: BasePassRef<RenderCommand>,
color_attachments: &[ColorAttachmentDescriptor],
depth_stencil_attachment: Option<&DepthStencilAttachmentDescriptor>,
color_attachments: &[RenderPassColorAttachment],
depth_stencil_attachment: Option<&RenderPassDepthStencilAttachment>,
) -> Result<(), RenderPassError> {
span!(_guard, INFO, "CommandEncoder::run_render_pass");
profiling::scope!("CommandEncoder::run_render_pass");
let scope = PassErrorScope::Pass(encoder_id);
let hub = B::hub(self);
@ -1051,7 +1050,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (texture_guard, mut token) = hub.textures.read(&mut token);
let (view_guard, _) = hub.texture_views.read(&mut token);
tracing::trace!(
log::trace!(
"Encoding render pass begin in command buffer {:?}",
encoder_id
);
@ -1923,7 +1922,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
}
tracing::trace!("Merging {:?} with the render pass", encoder_id);
log::trace!("Merging {:?} with the render pass", encoder_id);
unsafe {
raw.end_render_pass();
}
@ -1945,7 +1944,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
if let Some(ref mut list) = cmd_buf.commands {
list.push(crate::device::trace::Command::RunRenderPass {
base: BasePass::from_ref(base),
target_colors: color_attachments.iter().cloned().collect(),
target_colors: color_attachments.to_vec(),
target_depth_stencil: depth_stencil_attachment.cloned(),
});
}
@ -1982,7 +1981,7 @@ pub mod render_ffi {
super::{Rect, RenderCommand},
RenderPass,
};
use crate::{id, span, RawString};
use crate::{id, RawString};
use std::{convert::TryInto, ffi, num::NonZeroU32, slice};
use wgt::{BufferAddress, BufferSize, Color, DynamicOffset};
@ -1990,8 +1989,6 @@ pub mod render_ffi {
///
/// This function is unsafe as there is no guarantee that the given pointer is
/// valid for `offset_length` elements.
// TODO: There might be other safety issues, such as using the unsafe
// `RawPass::encode` and `RawPass::encode_slice`.
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_set_bind_group(
pass: &mut RenderPass,
@ -2000,7 +1997,6 @@ pub mod render_ffi {
offsets: *const DynamicOffset,
offset_length: usize,
) {
span!(_guard, DEBUG, "RenderPass::set_bind_group");
pass.base.commands.push(RenderCommand::SetBindGroup {
index: index.try_into().unwrap(),
num_dynamic_offsets: offset_length.try_into().unwrap(),
@ -2018,7 +2014,6 @@ pub mod render_ffi {
pass: &mut RenderPass,
pipeline_id: id::RenderPipelineId,
) {
span!(_guard, DEBUG, "RenderPass::set_pipeline");
pass.base
.commands
.push(RenderCommand::SetPipeline(pipeline_id));
@ -2032,7 +2027,6 @@ pub mod render_ffi {
offset: BufferAddress,
size: Option<BufferSize>,
) {
span!(_guard, DEBUG, "RenderPass::set_vertex_buffer");
pass.base.commands.push(RenderCommand::SetVertexBuffer {
slot,
buffer_id,
@ -2043,7 +2037,6 @@ pub mod render_ffi {
#[no_mangle]
pub extern "C" fn wgpu_render_pass_set_blend_color(pass: &mut RenderPass, color: &Color) {
span!(_guard, DEBUG, "RenderPass::set_blend_color");
pass.base
.commands
.push(RenderCommand::SetBlendColor(*color));
@ -2051,7 +2044,6 @@ pub mod render_ffi {
#[no_mangle]
pub extern "C" fn wgpu_render_pass_set_stencil_reference(pass: &mut RenderPass, value: u32) {
span!(_guard, DEBUG, "RenderPass::set_stencil_buffer");
pass.base
.commands
.push(RenderCommand::SetStencilReference(value));
@ -2067,7 +2059,6 @@ pub mod render_ffi {
depth_min: f32,
depth_max: f32,
) {
span!(_guard, DEBUG, "RenderPass::set_viewport");
pass.base.commands.push(RenderCommand::SetViewport {
rect: Rect { x, y, w, h },
depth_min,
@ -2083,12 +2074,15 @@ pub mod render_ffi {
w: u32,
h: u32,
) {
span!(_guard, DEBUG, "RenderPass::set_scissor_rect");
pass.base
.commands
.push(RenderCommand::SetScissor(Rect { x, y, w, h }));
}
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given pointer is
/// valid for `size_bytes` bytes.
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_set_push_constants(
pass: &mut RenderPass,
@ -2097,7 +2091,6 @@ pub mod render_ffi {
size_bytes: u32,
data: *const u8,
) {
span!(_guard, DEBUG, "RenderPass::set_push_constants");
assert_eq!(
offset & (wgt::PUSH_CONSTANT_ALIGNMENT - 1),
0,
@ -2135,7 +2128,6 @@ pub mod render_ffi {
first_vertex: u32,
first_instance: u32,
) {
span!(_guard, DEBUG, "RenderPass::draw");
pass.base.commands.push(RenderCommand::Draw {
vertex_count,
instance_count,
@ -2153,7 +2145,6 @@ pub mod render_ffi {
base_vertex: i32,
first_instance: u32,
) {
span!(_guard, DEBUG, "RenderPass::draw_indexed");
pass.base.commands.push(RenderCommand::DrawIndexed {
index_count,
instance_count,
@ -2169,7 +2160,6 @@ pub mod render_ffi {
buffer_id: id::BufferId,
offset: BufferAddress,
) {
span!(_guard, DEBUG, "RenderPass::draw_indirect");
pass.base.commands.push(RenderCommand::MultiDrawIndirect {
buffer_id,
offset,
@ -2184,7 +2174,6 @@ pub mod render_ffi {
buffer_id: id::BufferId,
offset: BufferAddress,
) {
span!(_guard, DEBUG, "RenderPass::draw_indexed_indirect");
pass.base.commands.push(RenderCommand::MultiDrawIndirect {
buffer_id,
offset,
@ -2200,7 +2189,6 @@ pub mod render_ffi {
offset: BufferAddress,
count: u32,
) {
span!(_guard, DEBUG, "RenderPass::multi_draw_indirect");
pass.base.commands.push(RenderCommand::MultiDrawIndirect {
buffer_id,
offset,
@ -2216,7 +2204,6 @@ pub mod render_ffi {
offset: BufferAddress,
count: u32,
) {
span!(_guard, DEBUG, "RenderPass::multi_draw_indexed_indirect");
pass.base.commands.push(RenderCommand::MultiDrawIndirect {
buffer_id,
offset,
@ -2234,7 +2221,6 @@ pub mod render_ffi {
count_buffer_offset: BufferAddress,
max_count: u32,
) {
span!(_guard, DEBUG, "RenderPass::multi_draw_indirect_count");
pass.base
.commands
.push(RenderCommand::MultiDrawIndirectCount {
@ -2256,11 +2242,6 @@ pub mod render_ffi {
count_buffer_offset: BufferAddress,
max_count: u32,
) {
span!(
_guard,
DEBUG,
"RenderPass::multi_draw_indexed_indirect_count"
);
pass.base
.commands
.push(RenderCommand::MultiDrawIndirectCount {
@ -2273,13 +2254,16 @@ pub mod render_ffi {
});
}
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given `label`
/// is a valid null-terminated string.
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_push_debug_group(
pass: &mut RenderPass,
label: RawString,
color: u32,
) {
span!(_guard, DEBUG, "RenderPass::push_debug_group");
let bytes = ffi::CStr::from_ptr(label).to_bytes();
pass.base.string_data.extend_from_slice(bytes);
@ -2291,17 +2275,19 @@ pub mod render_ffi {
#[no_mangle]
pub extern "C" fn wgpu_render_pass_pop_debug_group(pass: &mut RenderPass) {
span!(_guard, DEBUG, "RenderPass::pop_debug_group");
pass.base.commands.push(RenderCommand::PopDebugGroup);
}
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given `label`
/// is a valid null-terminated string.
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_insert_debug_marker(
pass: &mut RenderPass,
label: RawString,
color: u32,
) {
span!(_guard, DEBUG, "RenderPass::insert_debug_marker");
let bytes = ffi::CStr::from_ptr(label).to_bytes();
pass.base.string_data.extend_from_slice(bytes);
@ -2312,13 +2298,11 @@ pub mod render_ffi {
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_write_timestamp(
pub extern "C" fn wgpu_render_pass_write_timestamp(
pass: &mut RenderPass,
query_set_id: id::QuerySetId,
query_index: u32,
) {
span!(_guard, DEBUG, "RenderPass::write_timestamp");
pass.base.commands.push(RenderCommand::WriteTimestamp {
query_set_id,
query_index,
@ -2326,13 +2310,11 @@ pub mod render_ffi {
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_begin_pipeline_statistics_query(
pub extern "C" fn wgpu_render_pass_begin_pipeline_statistics_query(
pass: &mut RenderPass,
query_set_id: id::QuerySetId,
query_index: u32,
) {
span!(_guard, DEBUG, "RenderPass::begin_pipeline_statistics query");
pass.base
.commands
.push(RenderCommand::BeginPipelineStatisticsQuery {
@ -2342,21 +2324,22 @@ pub mod render_ffi {
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_end_pipeline_statistics_query(pass: &mut RenderPass) {
span!(_guard, DEBUG, "RenderPass::end_pipeline_statistics_query");
pub extern "C" fn wgpu_render_pass_end_pipeline_statistics_query(pass: &mut RenderPass) {
pass.base
.commands
.push(RenderCommand::EndPipelineStatisticsQuery);
}
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given pointer is
/// valid for `render_bundle_ids_length` elements.
#[no_mangle]
pub unsafe fn wgpu_render_pass_execute_bundles(
pass: &mut RenderPass,
render_bundle_ids: *const id::RenderBundleId,
render_bundle_ids_length: usize,
) {
span!(_guard, DEBUG, "RenderPass::execute_bundles");
for &bundle_id in slice::from_raw_parts(render_bundle_ids, render_bundle_ids_length) {
pass.base
.commands

Просмотреть файл

@ -12,7 +12,6 @@ use crate::{
id::{BufferId, CommandEncoderId, TextureId},
memory_init_tracker::{MemoryInitKind, MemoryInitTrackerAction},
resource::{BufferUse, Texture, TextureErrorDimension, TextureUse},
span,
track::TextureSelector,
};
@ -24,8 +23,8 @@ use std::iter;
pub(crate) const BITS_PER_BYTE: u32 = 8;
pub type BufferCopyView = wgt::BufferCopyView<BufferId>;
pub type TextureCopyView = wgt::TextureCopyView<TextureId>;
pub type ImageCopyBuffer = wgt::ImageCopyBuffer<BufferId>;
pub type ImageCopyTexture = wgt::ImageCopyTexture<TextureId>;
#[derive(Clone, Debug)]
pub enum CopySide {
@ -46,14 +45,14 @@ pub enum TransferError {
MissingCopySrcUsageFlag,
#[error("destination buffer/texture is missing the `COPY_DST` usage flag")]
MissingCopyDstUsageFlag(Option<BufferId>, Option<TextureId>),
#[error("copy of {start_offset}..{end_offset} would end up overruning the bounds of the {side:?} buffer of size {buffer_size}")]
#[error("copy of {start_offset}..{end_offset} would end up overrunning the bounds of the {side:?} buffer of size {buffer_size}")]
BufferOverrun {
start_offset: BufferAddress,
end_offset: BufferAddress,
buffer_size: BufferAddress,
side: CopySide,
},
#[error("copy of {dimension:?} {start_offset}..{end_offset} would end up overruning the bounds of the {side:?} texture of {dimension:?} size {texture_size}")]
#[error("copy of {dimension:?} {start_offset}..{end_offset} would end up overrunning the bounds of the {side:?} texture of {dimension:?} size {texture_size}")]
TextureOverrun {
start_offset: u32,
end_offset: u32,
@ -77,6 +76,10 @@ pub enum TransferError {
UnalignedBytesPerRow,
#[error("number of rows per image is not a multiple of block height")]
UnalignedRowsPerImage,
#[error("number of bytes per row needs to be specified since more than one row is copied")]
UnspecifiedBytesPerRow,
#[error("number of rows per image needs to be specified since more than one image is copied")]
UnspecifiedRowsPerImage,
#[error("number of bytes per row is less than the number of bytes in a complete row")]
InvalidBytesPerRow,
#[error("image is 1D and the copy height and depth are not both set to 1")]
@ -103,7 +106,7 @@ pub enum CopyError {
//TODO: we currently access each texture twice for a transfer,
// once only to get the aspect flags, which is unfortunate.
pub(crate) fn texture_copy_view_to_hal<B: hal::Backend>(
view: &TextureCopyView,
view: &ImageCopyTexture,
size: &Extent3d,
texture_guard: &Storage<Texture<B>, TextureId>,
) -> Result<
@ -149,15 +152,16 @@ pub(crate) fn texture_copy_view_to_hal<B: hal::Backend>(
))
}
/// Function copied with minor modifications from webgpu standard https://gpuweb.github.io/gpuweb/#valid-texture-copy-range
/// Function copied with some modifications from webgpu standard <https://gpuweb.github.io/gpuweb/#copy-between-buffer-texture>
/// If successful, returns number of buffer bytes required for this copy.
pub(crate) fn validate_linear_texture_data(
layout: &wgt::TextureDataLayout,
layout: &wgt::ImageDataLayout,
format: wgt::TextureFormat,
buffer_size: BufferAddress,
buffer_side: CopySide,
bytes_per_block: BufferAddress,
copy_size: &Extent3d,
need_copy_aligned_rows: bool,
) -> Result<BufferAddress, TransferError> {
// Convert all inputs to BufferAddress (u64) to prevent overflow issues
let copy_width = copy_size.width as BufferAddress;
@ -165,14 +169,32 @@ pub(crate) fn validate_linear_texture_data(
let copy_depth = copy_size.depth_or_array_layers as BufferAddress;
let offset = layout.offset;
let rows_per_image = layout.rows_per_image as BufferAddress;
let bytes_per_row = layout.bytes_per_row as BufferAddress;
let (block_width, block_height) = format.describe().block_dimensions;
let block_width = block_width as BufferAddress;
let block_height = block_height as BufferAddress;
let block_size = bytes_per_block;
let width_in_blocks = copy_width / block_width;
let height_in_blocks = copy_height / block_height;
let bytes_per_row = if let Some(bytes_per_row) = layout.bytes_per_row {
bytes_per_row.get() as BufferAddress
} else {
if copy_depth > 1 || height_in_blocks > 1 {
return Err(TransferError::UnspecifiedBytesPerRow);
}
bytes_per_block * width_in_blocks
};
let rows_per_image = if let Some(rows_per_image) = layout.rows_per_image {
rows_per_image.get() as BufferAddress
} else {
if copy_depth > 1 {
return Err(TransferError::UnspecifiedRowsPerImage);
}
copy_height
};
if copy_width % block_width != 0 {
return Err(TransferError::UnalignedCopyWidth);
}
@ -183,23 +205,28 @@ pub(crate) fn validate_linear_texture_data(
return Err(TransferError::UnalignedRowsPerImage);
}
let bytes_in_a_complete_row = block_size * copy_width / block_width;
if need_copy_aligned_rows {
let bytes_per_row_alignment = wgt::COPY_BYTES_PER_ROW_ALIGNMENT as BufferAddress;
if bytes_per_row_alignment % bytes_per_block != 0 {
return Err(TransferError::UnalignedBytesPerRow);
}
if bytes_per_row % bytes_per_row_alignment != 0 {
return Err(TransferError::UnalignedBytesPerRow);
}
}
let bytes_in_last_row = block_size * width_in_blocks;
let required_bytes_in_copy = if copy_width == 0 || copy_height == 0 || copy_depth == 0 {
0
} else {
let actual_rows_per_image = if rows_per_image == 0 {
copy_height
} else {
rows_per_image
};
let texel_block_rows_per_image = actual_rows_per_image / block_height;
let texel_block_rows_per_image = rows_per_image / block_height;
let bytes_per_image = bytes_per_row * texel_block_rows_per_image;
let bytes_in_last_slice =
bytes_per_row * (copy_height / block_height - 1) + bytes_in_a_complete_row;
let bytes_in_last_slice = bytes_per_row * (height_in_blocks - 1) + bytes_in_last_row;
bytes_per_image * (copy_depth - 1) + bytes_in_last_slice
};
if rows_per_image != 0 && rows_per_image < copy_height {
if rows_per_image < copy_height {
return Err(TransferError::InvalidRowsPerImage);
}
if offset + required_bytes_in_copy > buffer_size {
@ -213,18 +240,15 @@ pub(crate) fn validate_linear_texture_data(
if offset % block_size != 0 {
return Err(TransferError::UnalignedBufferOffset(offset));
}
if copy_height > 1 && bytes_per_row < bytes_in_a_complete_row {
if copy_height > 1 && bytes_per_row < bytes_in_last_row {
return Err(TransferError::InvalidBytesPerRow);
}
if copy_depth > 1 && rows_per_image == 0 {
return Err(TransferError::InvalidRowsPerImage);
}
Ok(required_bytes_in_copy)
}
/// Function copied with minor modifications from webgpu standard <https://gpuweb.github.io/gpuweb/#valid-texture-copy-range>
pub(crate) fn validate_texture_copy_range(
texture_copy_view: &TextureCopyView,
texture_copy_view: &ImageCopyTexture,
texture_format: wgt::TextureFormat,
texture_dimension: hal::image::Kind,
texture_side: CopySide,
@ -310,10 +334,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
destination_offset: BufferAddress,
size: BufferAddress,
) -> Result<(), CopyError> {
span!(_guard, INFO, "CommandEncoder::copy_buffer_to_buffer");
profiling::scope!("CommandEncoder::copy_buffer_to_buffer");
if source == destination {
Err(TransferError::SameSourceDestinationBuffer)?
return Err(TransferError::SameSourceDestinationBuffer.into());
}
let hub = B::hub(self);
let mut token = Token::root();
@ -343,7 +367,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.as_ref()
.ok_or(TransferError::InvalidBuffer(source))?;
if !src_buffer.usage.contains(BufferUsage::COPY_SRC) {
Err(TransferError::MissingCopySrcUsageFlag)?
return Err(TransferError::MissingCopySrcUsageFlag.into());
}
// expecting only a single barrier
let src_barrier = src_pending
@ -360,46 +384,45 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.as_ref()
.ok_or(TransferError::InvalidBuffer(destination))?;
if !dst_buffer.usage.contains(BufferUsage::COPY_DST) {
Err(TransferError::MissingCopyDstUsageFlag(
Some(destination),
None,
))?
return Err(TransferError::MissingCopyDstUsageFlag(Some(destination), None).into());
}
let dst_barrier = dst_pending
.map(|pending| pending.into_hal(dst_buffer))
.next();
if size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
Err(TransferError::UnalignedCopySize(size))?
return Err(TransferError::UnalignedCopySize(size).into());
}
if source_offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
Err(TransferError::UnalignedBufferOffset(source_offset))?
return Err(TransferError::UnalignedBufferOffset(source_offset).into());
}
if destination_offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
Err(TransferError::UnalignedBufferOffset(destination_offset))?
return Err(TransferError::UnalignedBufferOffset(destination_offset).into());
}
let source_end_offset = source_offset + size;
let destination_end_offset = destination_offset + size;
if source_end_offset > src_buffer.size {
Err(TransferError::BufferOverrun {
return Err(TransferError::BufferOverrun {
start_offset: source_offset,
end_offset: source_end_offset,
buffer_size: src_buffer.size,
side: CopySide::Source,
})?
}
.into());
}
if destination_end_offset > dst_buffer.size {
Err(TransferError::BufferOverrun {
return Err(TransferError::BufferOverrun {
start_offset: destination_offset,
end_offset: destination_end_offset,
buffer_size: dst_buffer.size,
side: CopySide::Destination,
})?
}
.into());
}
if size == 0 {
tracing::trace!("Ignoring copy_buffer_to_buffer of size 0");
log::trace!("Ignoring copy_buffer_to_buffer of size 0");
return Ok(());
}
@ -445,11 +468,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
pub fn command_encoder_copy_buffer_to_texture<B: GfxBackend>(
&self,
command_encoder_id: CommandEncoderId,
source: &BufferCopyView,
destination: &TextureCopyView,
source: &ImageCopyBuffer,
destination: &ImageCopyTexture,
copy_size: &Extent3d,
) -> Result<(), CopyError> {
span!(_guard, INFO, "CommandEncoder::copy_buffer_to_texture");
profiling::scope!("CommandEncoder::copy_buffer_to_texture");
let hub = B::hub(self);
let mut token = Token::root();
@ -470,7 +493,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
if copy_size.width == 0 || copy_size.height == 0 || copy_size.depth_or_array_layers == 0 {
tracing::trace!("Ignoring copy_buffer_to_texture of size 0");
log::trace!("Ignoring copy_buffer_to_texture of size 0");
return Ok(());
}
@ -484,7 +507,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.as_ref()
.ok_or(TransferError::InvalidBuffer(source.buffer))?;
if !src_buffer.usage.contains(BufferUsage::COPY_SRC) {
Err(TransferError::MissingCopySrcUsageFlag)?
return Err(TransferError::MissingCopySrcUsageFlag.into());
}
let src_barriers = src_pending.map(|pending| pending.into_hal(src_buffer));
@ -503,25 +526,16 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.as_ref()
.ok_or(TransferError::InvalidTexture(destination.texture))?;
if !dst_texture.usage.contains(TextureUsage::COPY_DST) {
Err(TransferError::MissingCopyDstUsageFlag(
None,
Some(destination.texture),
))?
return Err(
TransferError::MissingCopyDstUsageFlag(None, Some(destination.texture)).into(),
);
}
let dst_barriers = dst_pending.map(|pending| pending.into_hal(dst_texture));
let bytes_per_row_alignment = wgt::COPY_BYTES_PER_ROW_ALIGNMENT;
let bytes_per_block = conv::map_texture_format(dst_texture.format, cmd_buf.private_features)
.surface_desc()
.bits as u32
/ BITS_PER_BYTE;
let src_bytes_per_row = source.layout.bytes_per_row;
if bytes_per_row_alignment % bytes_per_block != 0 {
Err(TransferError::UnalignedBytesPerRow)?
}
if src_bytes_per_row % bytes_per_row_alignment != 0 {
Err(TransferError::UnalignedBytesPerRow)?
}
validate_texture_copy_range(
destination,
dst_texture.format,
@ -536,6 +550,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
CopySide::Source,
bytes_per_block as BufferAddress,
copy_size,
true,
)?;
cmd_buf.buffer_memory_init_actions.extend(
@ -551,9 +566,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (block_width, _) = dst_texture.format.describe().block_dimensions;
if !conv::is_valid_copy_dst_texture_format(dst_texture.format) {
Err(TransferError::CopyToForbiddenTextureFormat(
dst_texture.format,
))?
return Err(TransferError::CopyToForbiddenTextureFormat(dst_texture.format).into());
}
// WebGPU uses the physical size of the texture for copies whereas vulkan uses
@ -567,11 +580,20 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
depth_or_array_layers: copy_size.depth_or_array_layers,
};
let buffer_width = (source.layout.bytes_per_row / bytes_per_block) * block_width as u32;
let buffer_width = if let Some(bytes_per_row) = source.layout.bytes_per_row {
(bytes_per_row.get() / bytes_per_block) * block_width as u32
} else {
image_extent.width
};
let buffer_height = if let Some(rows_per_image) = source.layout.rows_per_image {
rows_per_image.get()
} else {
0
};
let region = hal::command::BufferImageCopy {
buffer_offset: source.layout.offset,
buffer_width,
buffer_height: source.layout.rows_per_image,
buffer_height,
image_layers: dst_layers,
image_offset: dst_offset,
image_extent: conv::map_extent(&image_extent, dst_texture.dimension),
@ -596,11 +618,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
pub fn command_encoder_copy_texture_to_buffer<B: GfxBackend>(
&self,
command_encoder_id: CommandEncoderId,
source: &TextureCopyView,
destination: &BufferCopyView,
source: &ImageCopyTexture,
destination: &ImageCopyBuffer,
copy_size: &Extent3d,
) -> Result<(), CopyError> {
span!(_guard, INFO, "CommandEncoder::copy_texture_to_buffer");
profiling::scope!("CommandEncoder::copy_texture_to_buffer");
let hub = B::hub(self);
let mut token = Token::root();
@ -621,7 +643,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
if copy_size.width == 0 || copy_size.height == 0 || copy_size.depth_or_array_layers == 0 {
tracing::trace!("Ignoring copy_texture_to_buffer of size 0");
log::trace!("Ignoring copy_texture_to_buffer of size 0");
return Ok(());
}
@ -640,7 +662,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.as_ref()
.ok_or(TransferError::InvalidTexture(source.texture))?;
if !src_texture.usage.contains(TextureUsage::COPY_SRC) {
Err(TransferError::MissingCopySrcUsageFlag)?
return Err(TransferError::MissingCopySrcUsageFlag.into());
}
let src_barriers = src_pending.map(|pending| pending.into_hal(src_texture));
@ -654,25 +676,16 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.as_ref()
.ok_or(TransferError::InvalidBuffer(destination.buffer))?;
if !dst_buffer.usage.contains(BufferUsage::COPY_DST) {
Err(TransferError::MissingCopyDstUsageFlag(
Some(destination.buffer),
None,
))?
return Err(
TransferError::MissingCopyDstUsageFlag(Some(destination.buffer), None).into(),
);
}
let dst_barrier = dst_barriers.map(|pending| pending.into_hal(dst_buffer));
let bytes_per_row_alignment = wgt::COPY_BYTES_PER_ROW_ALIGNMENT;
let bytes_per_block = conv::map_texture_format(src_texture.format, cmd_buf.private_features)
.surface_desc()
.bits as u32
/ BITS_PER_BYTE;
let dst_bytes_per_row = destination.layout.bytes_per_row;
if bytes_per_row_alignment % bytes_per_block != 0 {
Err(TransferError::UnalignedBytesPerRow)?
}
if dst_bytes_per_row % bytes_per_row_alignment != 0 {
Err(TransferError::UnalignedBytesPerRow)?
}
validate_texture_copy_range(
source,
src_texture.format,
@ -687,13 +700,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
CopySide::Destination,
bytes_per_block as BufferAddress,
copy_size,
true,
)?;
let (block_width, _) = src_texture.format.describe().block_dimensions;
if !conv::is_valid_copy_src_texture_format(src_texture.format) {
Err(TransferError::CopyFromForbiddenTextureFormat(
src_texture.format,
))?
return Err(TransferError::CopyFromForbiddenTextureFormat(src_texture.format).into());
}
cmd_buf.buffer_memory_init_actions.extend(
@ -721,12 +733,20 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
depth_or_array_layers: copy_size.depth_or_array_layers,
};
let buffer_width =
(destination.layout.bytes_per_row / bytes_per_block) * block_width as u32;
let buffer_width = if let Some(bytes_per_row) = destination.layout.bytes_per_row {
(bytes_per_row.get() / bytes_per_block) * block_width as u32
} else {
image_extent.width
};
let buffer_height = if let Some(rows_per_image) = destination.layout.rows_per_image {
rows_per_image.get()
} else {
0
};
let region = hal::command::BufferImageCopy {
buffer_offset: destination.layout.offset,
buffer_width,
buffer_height: destination.layout.rows_per_image,
buffer_height,
image_layers: src_layers,
image_offset: src_offset,
image_extent: conv::map_extent(&image_extent, src_texture.dimension),
@ -751,11 +771,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
pub fn command_encoder_copy_texture_to_texture<B: GfxBackend>(
&self,
command_encoder_id: CommandEncoderId,
source: &TextureCopyView,
destination: &TextureCopyView,
source: &ImageCopyTexture,
destination: &ImageCopyTexture,
copy_size: &Extent3d,
) -> Result<(), CopyError> {
span!(_guard, INFO, "CommandEncoder::copy_texture_to_texture");
profiling::scope!("CommandEncoder::copy_texture_to_texture");
let hub = B::hub(self);
let mut token = Token::root();
@ -769,7 +789,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (dst_layers, dst_selector, dst_offset) =
texture_copy_view_to_hal(destination, copy_size, &*texture_guard)?;
if src_layers.aspects != dst_layers.aspects {
Err(TransferError::MismatchedAspects)?
return Err(TransferError::MismatchedAspects.into());
}
#[cfg(feature = "trace")]
@ -782,7 +802,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
if copy_size.width == 0 || copy_size.height == 0 || copy_size.depth_or_array_layers == 0 {
tracing::trace!("Ignoring copy_texture_to_texture of size 0");
log::trace!("Ignoring copy_texture_to_texture of size 0");
return Ok(());
}
@ -801,7 +821,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.as_ref()
.ok_or(TransferError::InvalidTexture(source.texture))?;
if !src_texture.usage.contains(TextureUsage::COPY_SRC) {
Err(TransferError::MissingCopySrcUsageFlag)?
return Err(TransferError::MissingCopySrcUsageFlag.into());
}
//TODO: try to avoid this the collection. It's needed because both
// `src_pending` and `dst_pending` try to hold `trackers.textures` mutably.
@ -824,10 +844,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.as_ref()
.ok_or(TransferError::InvalidTexture(destination.texture))?;
if !dst_texture.usage.contains(TextureUsage::COPY_DST) {
Err(TransferError::MissingCopyDstUsageFlag(
None,
Some(destination.texture),
))?
return Err(
TransferError::MissingCopyDstUsageFlag(None, Some(destination.texture)).into(),
);
}
barriers.extend(dst_pending.map(|pending| pending.into_hal(dst_texture)));

Просмотреть файл

@ -154,6 +154,25 @@ pub fn map_shader_stage_flags(shader_stage_flags: wgt::ShaderStage) -> hal::pso:
value
}
pub fn map_hal_flags_to_shader_stage(
shader_stage_flags: hal::pso::ShaderStageFlags,
) -> wgt::ShaderStage {
use hal::pso::ShaderStageFlags as H;
use wgt::ShaderStage as Ss;
let mut value = Ss::empty();
if shader_stage_flags.contains(H::VERTEX) {
value |= Ss::VERTEX;
}
if shader_stage_flags.contains(H::FRAGMENT) {
value |= Ss::FRAGMENT;
}
if shader_stage_flags.contains(H::COMPUTE) {
value |= Ss::COMPUTE;
}
value
}
pub fn map_extent(extent: &wgt::Extent3d, dim: wgt::TextureDimension) -> hal::image::Extent {
hal::image::Extent {
width: extent.width,
@ -683,7 +702,7 @@ pub(crate) fn map_texture_state(
}
pub fn map_query_type(ty: &wgt::QueryType) -> (hal::query::Type, u32) {
match ty {
match *ty {
wgt::QueryType::PipelineStatistics(pipeline_statistics) => {
let mut ps = hal::query::PipelineStatistic::empty();
ps.set(
@ -790,23 +809,16 @@ pub fn map_primitive_state_to_rasterizer(
depth_stencil: Option<&wgt::DepthStencilState>,
) -> hal::pso::Rasterizer {
use hal::pso;
let (depth_clamping, depth_bias) = match depth_stencil {
Some(dsd) => {
let bias = if dsd.bias.is_enabled() {
Some(pso::State::Static(pso::DepthBias {
const_factor: dsd.bias.constant as f32,
slope_factor: dsd.bias.slope_scale,
clamp: dsd.bias.clamp,
}))
} else {
None
};
(dsd.clamp_depth, bias)
}
None => (false, None),
let depth_bias = match depth_stencil {
Some(dsd) if dsd.bias.is_enabled() => Some(pso::State::Static(pso::DepthBias {
const_factor: dsd.bias.constant as f32,
slope_factor: dsd.bias.slope_scale,
clamp: dsd.bias.clamp,
})),
_ => None,
};
pso::Rasterizer {
depth_clamping,
depth_clamping: desc.clamp_depth,
polygon_mode: match desc.polygon_mode {
wgt::PolygonMode::Fill => pso::PolygonMode::Fill,
wgt::PolygonMode::Line => pso::PolygonMode::Line,
@ -822,7 +834,7 @@ pub fn map_primitive_state_to_rasterizer(
wgt::FrontFace::Cw => pso::FrontFace::Clockwise,
},
depth_bias,
conservative: false,
conservative: desc.conservative,
line_width: pso::State::Static(1.0),
}
}

Просмотреть файл

@ -15,9 +15,9 @@ struct MemoryDevice<'a, B: hal::Backend>(&'a B::Device);
impl<B: hal::Backend> MemoryAllocator<B> {
pub fn new(mem_props: hal::adapter::MemoryProperties, limits: hal::Limits) -> Self {
let mem_config = gpu_alloc::Config {
dedicated_treshold: 32 << 20,
preferred_dedicated_treshold: 8 << 20,
transient_dedicated_treshold: 128 << 20,
dedicated_threshold: 32 << 20,
preferred_dedicated_threshold: 8 << 20,
transient_dedicated_threshold: 128 << 20,
linear_chunk: 128 << 20,
minimal_buddy_size: 1 << 10,
initial_buddy_dedicated_size: 8 << 20,
@ -43,7 +43,7 @@ impl<B: hal::Backend> MemoryAllocator<B> {
.collect::<Vec<_>>(),
),
max_memory_allocation_count: if limits.max_memory_allocation_count == 0 {
tracing::warn!("max_memory_allocation_count is not set by gfx-rs backend");
log::warn!("max_memory_allocation_count is not set by gfx-rs backend");
!0
} else {
limits.max_memory_allocation_count.min(!0u32 as usize) as u32
@ -169,7 +169,7 @@ impl<B: hal::Backend> MemoryBlock<B> {
) -> hal::memory::Segment {
hal::memory::Segment {
offset: self.0.offset() + inner_offset,
size: size.or(Some(self.0.size())),
size: size.or_else(|| Some(self.0.size())),
}
}
@ -205,13 +205,14 @@ impl<B: hal::Backend> MemoryBlock<B> {
}
impl<B: hal::Backend> gpu_alloc::MemoryDevice<B::Memory> for MemoryDevice<'_, B> {
#[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))]
unsafe fn allocate_memory(
&self,
size: u64,
memory_type: u32,
flags: gpu_alloc::AllocationFlags,
) -> Result<B::Memory, gpu_alloc::OutOfMemory> {
profiling::scope!("Allocate Memory");
assert!(flags.is_empty());
self.0
@ -219,18 +220,18 @@ impl<B: hal::Backend> gpu_alloc::MemoryDevice<B::Memory> for MemoryDevice<'_, B>
.map_err(|_| gpu_alloc::OutOfMemory::OutOfDeviceMemory)
}
#[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))]
unsafe fn deallocate_memory(&self, memory: B::Memory) {
profiling::scope!("Deallocate Memory");
self.0.free_memory(memory);
}
#[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))]
unsafe fn map_memory(
&self,
memory: &mut B::Memory,
offset: u64,
size: u64,
) -> Result<NonNull<u8>, gpu_alloc::DeviceMapError> {
profiling::scope!("Map memory");
match self.0.map_memory(
memory,
hal::memory::Segment {
@ -247,16 +248,16 @@ impl<B: hal::Backend> gpu_alloc::MemoryDevice<B::Memory> for MemoryDevice<'_, B>
}
}
#[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))]
unsafe fn unmap_memory(&self, memory: &mut B::Memory) {
profiling::scope!("Unmap memory");
self.0.unmap_memory(memory);
}
#[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))]
unsafe fn invalidate_memory_ranges(
&self,
ranges: &[gpu_alloc::MappedMemoryRange<'_, B::Memory>],
) -> Result<(), gpu_alloc::OutOfMemory> {
profiling::scope!("Invalidate memory ranges");
self.0
.invalidate_mapped_memory_ranges(ranges.iter().map(|r| {
(
@ -270,11 +271,11 @@ impl<B: hal::Backend> gpu_alloc::MemoryDevice<B::Memory> for MemoryDevice<'_, B>
.map_err(|_| gpu_alloc::OutOfMemory::OutOfHostMemory)
}
#[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))]
unsafe fn flush_memory_ranges(
&self,
ranges: &[gpu_alloc::MappedMemoryRange<'_, B::Memory>],
) -> Result<(), gpu_alloc::OutOfMemory> {
profiling::scope!("Flush memory ranges");
self.0
.flush_mapped_memory_ranges(ranges.iter().map(|r| {
(

Просмотреть файл

@ -37,7 +37,7 @@ impl<B: hal::Backend> DescriptorAllocator<B> {
)
}
.map_err(|err| {
tracing::warn!("Descriptor set allocation failed: {}", err);
log::warn!("Descriptor set allocation failed: {}", err);
DeviceError::OutOfMemory
})
}

Просмотреть файл

@ -146,12 +146,12 @@ impl<B: hal::Backend> NonReferencedResources<B> {
if !self.buffers.is_empty() || !self.images.is_empty() {
let mut allocator = memory_allocator_mutex.lock();
for (raw, memory) in self.buffers.drain(..) {
tracing::trace!("Buffer {:?} is destroyed with memory {:?}", raw, memory);
log::trace!("Buffer {:?} is destroyed with memory {:?}", raw, memory);
device.destroy_buffer(raw);
allocator.free(device, memory);
}
for (raw, memory) in self.images.drain(..) {
tracing::trace!("Image {:?} is destroyed with memory {:?}", raw, memory);
log::trace!("Image {:?} is destroyed with memory {:?}", raw, memory);
device.destroy_image(raw);
allocator.free(device, memory);
}
@ -290,7 +290,7 @@ impl<B: hal::Backend> LifetimeTracker<B> {
fn wait_idle(&self, device: &B::Device) -> Result<(), WaitIdleError> {
if !self.active.is_empty() {
tracing::debug!("Waiting for IDLE...");
log::debug!("Waiting for IDLE...");
let status = unsafe {
device
.wait_for_fences(
@ -300,9 +300,9 @@ impl<B: hal::Backend> LifetimeTracker<B> {
)
.map_err(DeviceError::from)?
};
tracing::debug!("...Done");
log::debug!("...Done");
if status == false {
if !status {
// We timed out while waiting for the fences
return Err(WaitIdleError::StuckGpu);
}
@ -332,7 +332,7 @@ impl<B: hal::Backend> LifetimeTracker<B> {
};
for a in self.active.drain(..done_count) {
tracing::trace!("Active submission {} is done", a.index);
log::trace!("Active submission {} is done", a.index);
self.free_resources.extend(a.last_resources);
self.ready_to_map.extend(a.mapped);
unsafe {
@ -389,7 +389,9 @@ impl<B: GfxBackend> LifetimeTracker<B> {
while let Some(id) = self.suspected_resources.render_bundles.pop() {
if trackers.bundles.remove_abandoned(id) {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyRenderBundle(id.0)));
if let Some(t) = trace {
t.lock().add(trace::Action::DestroyRenderBundle(id.0));
}
if let Some(res) = hub.render_bundles.unregister_locked(id.0, &mut *guard) {
self.suspected_resources.add_trackers(&res.used);
@ -405,7 +407,9 @@ impl<B: GfxBackend> LifetimeTracker<B> {
while let Some(id) = self.suspected_resources.bind_groups.pop() {
if trackers.bind_groups.remove_abandoned(id) {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyBindGroup(id.0)));
if let Some(t) = trace {
t.lock().add(trace::Action::DestroyBindGroup(id.0));
}
if let Some(res) = hub.bind_groups.unregister_locked(id.0, &mut *guard) {
self.suspected_resources.add_trackers(&res.used);
@ -429,7 +433,9 @@ impl<B: GfxBackend> LifetimeTracker<B> {
for id in self.suspected_resources.texture_views.drain(..) {
if trackers.views.remove_abandoned(id) {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyTextureView(id.0)));
if let Some(t) = trace {
t.lock().add(trace::Action::DestroyTextureView(id.0));
}
if let Some(res) = hub.texture_views.unregister_locked(id.0, &mut *guard) {
let raw = match res.inner {
@ -459,7 +465,9 @@ impl<B: GfxBackend> LifetimeTracker<B> {
for id in self.suspected_resources.textures.drain(..) {
if trackers.textures.remove_abandoned(id) {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyTexture(id.0)));
if let Some(t) = trace {
t.lock().add(trace::Action::DestroyTexture(id.0));
}
if let Some(res) = hub.textures.unregister_locked(id.0, &mut *guard) {
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
@ -481,7 +489,9 @@ impl<B: GfxBackend> LifetimeTracker<B> {
for id in self.suspected_resources.samplers.drain(..) {
if trackers.samplers.remove_abandoned(id) {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroySampler(id.0)));
if let Some(t) = trace {
t.lock().add(trace::Action::DestroySampler(id.0));
}
if let Some(res) = hub.samplers.unregister_locked(id.0, &mut *guard) {
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
@ -503,8 +513,10 @@ impl<B: GfxBackend> LifetimeTracker<B> {
for id in self.suspected_resources.buffers.drain(..) {
if trackers.buffers.remove_abandoned(id) {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyBuffer(id.0)));
tracing::debug!("Buffer {:?} is detached", id);
if let Some(t) = trace {
t.lock().add(trace::Action::DestroyBuffer(id.0));
}
log::debug!("Buffer {:?} is detached", id);
if let Some(res) = hub.buffers.unregister_locked(id.0, &mut *guard) {
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
@ -536,7 +548,9 @@ impl<B: GfxBackend> LifetimeTracker<B> {
for id in self.suspected_resources.compute_pipelines.drain(..) {
if trackers.compute_pipes.remove_abandoned(id) {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyComputePipeline(id.0)));
if let Some(t) = trace {
t.lock().add(trace::Action::DestroyComputePipeline(id.0));
}
if let Some(res) = hub.compute_pipelines.unregister_locked(id.0, &mut *guard) {
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
@ -558,7 +572,9 @@ impl<B: GfxBackend> LifetimeTracker<B> {
for id in self.suspected_resources.render_pipelines.drain(..) {
if trackers.render_pipes.remove_abandoned(id) {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyRenderPipeline(id.0)));
if let Some(t) = trace {
t.lock().add(trace::Action::DestroyRenderPipeline(id.0));
}
if let Some(res) = hub.render_pipelines.unregister_locked(id.0, &mut *guard) {
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
@ -584,7 +600,9 @@ impl<B: GfxBackend> LifetimeTracker<B> {
//Note: this has to happen after all the suspected pipelines are destroyed
if ref_count.load() == 1 {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyPipelineLayout(id.0)));
if let Some(t) = trace {
t.lock().add(trace::Action::DestroyPipelineLayout(id.0));
}
if let Some(lay) = hub.pipeline_layouts.unregister_locked(id.0, &mut *guard) {
self.suspected_resources
@ -606,7 +624,9 @@ impl<B: GfxBackend> LifetimeTracker<B> {
// encounter could drop the refcount to 0.
if guard[id].multi_ref_count.dec_and_check_empty() {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyBindGroupLayout(id.0)));
if let Some(t) = trace {
t.lock().add(trace::Action::DestroyBindGroupLayout(id.0));
}
if let Some(lay) = hub.bind_group_layouts.unregister_locked(id.0, &mut *guard) {
self.free_resources.descriptor_set_layouts.push(lay.raw);
}
@ -651,7 +671,7 @@ impl<B: GfxBackend> LifetimeTracker<B> {
let buf = &buffer_guard[resource_id];
let submit_index = buf.life_guard.submission_index.load(Ordering::Acquire);
tracing::trace!(
log::trace!(
"Mapping of {:?} at submission {:?} gets assigned to active {:?}",
resource_id,
submit_index,
@ -685,7 +705,7 @@ impl<B: GfxBackend> LifetimeTracker<B> {
if buffer.life_guard.ref_count.is_none() && trackers.buffers.remove_abandoned(buffer_id)
{
buffer.map_state = resource::BufferMapState::Idle;
tracing::debug!("Mapping request is dropped because the buffer is destroyed.");
log::debug!("Mapping request is dropped because the buffer is destroyed.");
if let Some(buf) = hub
.buffers
.unregister_locked(buffer_id.0, &mut *buffer_guard)
@ -709,7 +729,7 @@ impl<B: GfxBackend> LifetimeTracker<B> {
_ => panic!("No pending mapping."),
};
let status = if mapping.range.start != mapping.range.end {
tracing::debug!("Buffer {:?} map state -> Active", buffer_id);
log::debug!("Buffer {:?} map state -> Active", buffer_id);
let host = mapping.op.host;
let size = mapping.range.end - mapping.range.start;
match super::map_buffer(raw, buffer, mapping.range.start, size, host) {
@ -725,7 +745,7 @@ impl<B: GfxBackend> LifetimeTracker<B> {
resource::BufferMapAsyncStatus::Success
}
Err(e) => {
tracing::error!("Mapping failed {:?}", e);
log::error!("Mapping failed {:?}", e);
resource::BufferMapAsyncStatus::Error
}
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -7,7 +7,7 @@ use crate::device::trace::Action;
use crate::{
command::{
texture_copy_view_to_hal, validate_linear_texture_data, validate_texture_copy_range,
CommandAllocator, CommandBuffer, CopySide, TextureCopyView, TransferError, BITS_PER_BYTE,
CommandAllocator, CommandBuffer, CopySide, ImageCopyTexture, TransferError, BITS_PER_BYTE,
},
conv,
device::{alloc, DeviceError, WaitIdleError},
@ -15,7 +15,7 @@ use crate::{
id,
memory_init_tracker::MemoryInitKind,
resource::{BufferAccessError, BufferMapState, BufferUse, TextureUse},
span, FastHashMap, FastHashSet,
FastHashMap, FastHashSet,
};
use hal::{command::CommandBuffer as _, device::Device as _, queue::Queue as _};
@ -111,7 +111,11 @@ impl<B: hal::Backend> super::Device<B> {
fn prepare_stage(&mut self, size: wgt::BufferAddress) -> Result<StagingData<B>, DeviceError> {
let mut buffer = unsafe {
self.raw
.create_buffer(size, hal::buffer::Usage::TRANSFER_SRC)
.create_buffer(
size,
hal::buffer::Usage::TRANSFER_SRC,
hal::memory::SparseFlags::empty(),
)
.map_err(|err| match err {
hal::buffer::CreationError::OutOfMemory(_) => DeviceError::OutOfMemory,
_ => panic!("failed to create staging buffer: {}", err),
@ -188,7 +192,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
buffer_offset: wgt::BufferAddress,
data: &[u8],
) -> Result<(), QueueWriteError> {
span!(_guard, INFO, "Queue::write_buffer");
profiling::scope!("Queue::write_buffer");
let hub = B::hub(self);
let mut token = Token::root();
@ -212,7 +216,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let data_size = data.len() as wgt::BufferAddress;
if data_size == 0 {
tracing::trace!("Ignoring write_buffer of size 0");
log::trace!("Ignoring write_buffer of size 0");
return Ok(());
}
@ -229,26 +233,24 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.as_ref()
.ok_or(TransferError::InvalidBuffer(buffer_id))?;
if !dst.usage.contains(wgt::BufferUsage::COPY_DST) {
Err(TransferError::MissingCopyDstUsageFlag(
Some(buffer_id),
None,
))?;
return Err(TransferError::MissingCopyDstUsageFlag(Some(buffer_id), None).into());
}
dst.life_guard.use_at(device.active_submission_index + 1);
if data_size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
Err(TransferError::UnalignedCopySize(data_size))?
return Err(TransferError::UnalignedCopySize(data_size).into());
}
if buffer_offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
Err(TransferError::UnalignedBufferOffset(buffer_offset))?
return Err(TransferError::UnalignedBufferOffset(buffer_offset).into());
}
if buffer_offset + data_size > dst.size {
Err(TransferError::BufferOverrun {
return Err(TransferError::BufferOverrun {
start_offset: buffer_offset,
end_offset: buffer_offset + data_size,
buffer_size: dst.size,
side: CopySide::Destination,
})?
}
.into());
}
let region = hal::command::BufferCopy {
@ -292,12 +294,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
pub fn queue_write_texture<B: GfxBackend>(
&self,
queue_id: id::QueueId,
destination: &TextureCopyView,
destination: &ImageCopyTexture,
data: &[u8],
data_layout: &wgt::TextureDataLayout,
data_layout: &wgt::ImageDataLayout,
size: &wgt::Extent3d,
) -> Result<(), QueueWriteError> {
span!(_guard, INFO, "Queue::write_texture");
profiling::scope!("Queue::write_texture");
let hub = B::hub(self);
let mut token = Token::root();
@ -322,7 +324,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
if size.width == 0 || size.height == 0 || size.depth_or_array_layers == 0 {
tracing::trace!("Ignoring write_texture of size 0");
log::trace!("Ignoring write_texture of size 0");
return Ok(());
}
@ -338,6 +340,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
CopySide::Source,
bytes_per_block as wgt::BufferAddress,
size,
false,
)?;
let (block_width, block_height) = texture_format.describe().block_dimensions;
@ -345,13 +348,18 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let block_height = block_height as u32;
if !conv::is_valid_copy_dst_texture_format(texture_format) {
Err(TransferError::CopyToForbiddenTextureFormat(texture_format))?
return Err(TransferError::CopyToForbiddenTextureFormat(texture_format).into());
}
let width_blocks = size.width / block_width;
let height_blocks = size.height / block_width;
let texel_rows_per_image = data_layout.rows_per_image;
let block_rows_per_image = data_layout.rows_per_image / block_height;
let texel_rows_per_image = if let Some(rows_per_image) = data_layout.rows_per_image {
rows_per_image.get()
} else {
// doesn't really matter because we need this only if we copy more than one layer, and then we validate for this being not None
size.height
};
let block_rows_per_image = texel_rows_per_image / block_height;
let bytes_per_row_alignment = get_lowest_common_denom(
device.hal_limits.optimal_buffer_copy_pitch_alignment as u32,
@ -380,10 +388,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.ok_or(TransferError::InvalidTexture(destination.texture))?;
if !dst.usage.contains(wgt::TextureUsage::COPY_DST) {
Err(TransferError::MissingCopyDstUsageFlag(
None,
Some(destination.texture),
))?
return Err(
TransferError::MissingCopyDstUsageFlag(None, Some(destination.texture)).into(),
);
}
validate_texture_copy_range(
destination,
@ -394,23 +401,27 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
)?;
dst.life_guard.use_at(device.active_submission_index + 1);
let bytes_per_row = if let Some(bytes_per_row) = data_layout.bytes_per_row {
bytes_per_row.get()
} else {
width_blocks * bytes_per_block
};
let ptr = stage.memory.map(&device.raw, 0, stage_size)?;
unsafe {
//TODO: https://github.com/zakarumych/gpu-alloc/issues/13
if stage_bytes_per_row == data_layout.bytes_per_row {
if stage_bytes_per_row == bytes_per_row {
// Fast path if the data isalready being aligned optimally.
ptr::copy_nonoverlapping(data.as_ptr(), ptr.as_ptr(), stage_size as usize);
} else {
// Copy row by row into the optimal alignment.
let copy_bytes_per_row =
stage_bytes_per_row.min(data_layout.bytes_per_row) as usize;
let copy_bytes_per_row = stage_bytes_per_row.min(bytes_per_row) as usize;
for layer in 0..size.depth_or_array_layers {
let rows_offset = layer * block_rows_per_image;
for row in 0..height_blocks {
ptr::copy_nonoverlapping(
data.as_ptr().offset(
(rows_offset + row) as isize * data_layout.bytes_per_row as isize,
),
data.as_ptr()
.offset((rows_offset + row) as isize * bytes_per_row as isize),
ptr.as_ptr().offset(
(rows_offset + row) as isize * stage_bytes_per_row as isize,
),
@ -501,7 +512,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.get(cmb_id)
.map_err(|_| QueueSubmitError::InvalidCommandBuffer(cmb_id))?;
if cmdbuf.buffer_memory_init_actions.len() == 0 {
if cmdbuf.buffer_memory_init_actions.is_empty() {
continue;
}
@ -608,7 +619,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
queue_id: id::QueueId,
command_buffer_ids: &[id::CommandBufferId],
) -> Result<(), QueueSubmitError> {
span!(_guard, INFO, "Queue::submit");
profiling::scope!("Queue::submit");
self.initialize_used_uninitialized_memory::<B>(queue_id, command_buffer_ids)?;
@ -681,11 +692,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
for id in cmdbuf.trackers.buffers.used() {
let buffer = &mut buffer_guard[id];
if buffer.raw.is_none() {
return Err(QueueSubmitError::DestroyedBuffer(id.0))?;
return Err(QueueSubmitError::DestroyedBuffer(id.0));
}
if !buffer.life_guard.use_at(submit_index) {
if let BufferMapState::Active { .. } = buffer.map_state {
tracing::warn!("Dropped buffer has a pending mapping.");
log::warn!("Dropped buffer has a pending mapping.");
super::unmap_buffer(&device.raw, buffer)?;
}
device.temp_suspected.buffers.push(id);
@ -699,7 +710,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
for id in cmdbuf.trackers.textures.used() {
let texture = &texture_guard[id];
if texture.raw.is_none() {
return Err(QueueSubmitError::DestroyedTexture(id.0))?;
return Err(QueueSubmitError::DestroyedTexture(id.0));
}
if !texture.life_guard.use_at(submit_index) {
device.temp_suspected.textures.push(id);
@ -744,7 +755,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
transit
.begin_primary(hal::command::CommandBufferFlags::ONE_TIME_SUBMIT);
}
tracing::trace!("Stitching command buffer {:?} before submission", cmb_id);
log::trace!("Stitching command buffer {:?} before submission", cmb_id);
CommandBuffer::insert_barriers(
&mut transit,
&mut *trackers,
@ -758,7 +769,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
cmdbuf.raw.insert(0, transit);
}
tracing::trace!("Device after submission {}: {:#?}", submit_index, trackers);
log::trace!("Device after submission {}: {:#?}", submit_index, trackers);
}
// now prepare the GPU submission
@ -827,7 +838,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
&self,
queue_id: id::QueueId,
) -> Result<f32, InvalidQueue> {
span!(_guard, INFO, "Queue::get_timestamp_period");
profiling::scope!("Queue::get_timestamp_period");
let hub = B::hub(self);
let mut token = Token::root();

Просмотреть файл

@ -26,6 +26,7 @@ pub(crate) fn new_render_bundle_encoder_descriptor<'a>(
}
}
#[allow(clippy::large_enum_variant)]
#[derive(Debug)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
@ -107,9 +108,9 @@ pub enum Action<'a> {
queued: bool,
},
WriteTexture {
to: crate::command::TextureCopyView,
to: crate::command::ImageCopyTexture,
data: FileName,
layout: wgt::TextureDataLayout,
layout: wgt::ImageDataLayout,
size: wgt::Extent3d,
},
Submit(crate::SubmissionIndex, Vec<Command>),
@ -127,18 +128,18 @@ pub enum Command {
size: wgt::BufferAddress,
},
CopyBufferToTexture {
src: crate::command::BufferCopyView,
dst: crate::command::TextureCopyView,
src: crate::command::ImageCopyBuffer,
dst: crate::command::ImageCopyTexture,
size: wgt::Extent3d,
},
CopyTextureToBuffer {
src: crate::command::TextureCopyView,
dst: crate::command::BufferCopyView,
src: crate::command::ImageCopyTexture,
dst: crate::command::ImageCopyBuffer,
size: wgt::Extent3d,
},
CopyTextureToTexture {
src: crate::command::TextureCopyView,
dst: crate::command::TextureCopyView,
src: crate::command::ImageCopyTexture,
dst: crate::command::ImageCopyTexture,
size: wgt::Extent3d,
},
WriteTimestamp {
@ -157,8 +158,8 @@ pub enum Command {
},
RunRenderPass {
base: crate::command::BasePass<crate::command::RenderCommand>,
target_colors: Vec<crate::command::ColorAttachmentDescriptor>,
target_depth_stencil: Option<crate::command::DepthStencilAttachmentDescriptor>,
target_colors: Vec<crate::command::RenderPassColorAttachment>,
target_depth_stencil: Option<crate::command::RenderPassDepthStencilAttachment>,
},
}
@ -174,7 +175,7 @@ pub struct Trace {
#[cfg(feature = "trace")]
impl Trace {
pub fn new(path: &std::path::Path) -> Result<Self, std::io::Error> {
tracing::info!("Tracing into '{:?}'", path);
log::info!("Tracing into '{:?}'", path);
let mut file = std::fs::File::create(path.join(FILE_NAME))?;
file.write_all(b"[\n")?;
Ok(Self {
@ -198,7 +199,7 @@ impl Trace {
let _ = writeln!(self.file, "{},", string);
}
Err(e) => {
tracing::warn!("RON serialization failure: {:?}", e);
log::warn!("RON serialization failure: {:?}", e);
}
}
}

Просмотреть файл

@ -15,7 +15,6 @@ use crate::{
instance::{Adapter, Instance, Surface},
pipeline::{ComputePipeline, RenderPipeline, ShaderModule},
resource::{Buffer, Sampler, Texture, TextureView},
span,
swap_chain::SwapChain,
Epoch, Index,
};
@ -218,7 +217,6 @@ impl<T, I: TypedId> Storage<T, I> {
}
_ => None,
})
.into_iter()
}
}
@ -586,7 +584,7 @@ impl<B: GfxBackend, F: GlobalIdentityHandlerFactory> Hub<B, F> {
let mut devices = self.devices.data.write();
for element in devices.map.iter_mut() {
if let Element::Occupied(device, _) = element {
if let Element::Occupied(ref mut device, _) = *element {
device.prepare_to_die();
}
}
@ -762,7 +760,7 @@ pub struct Global<G: GlobalIdentityHandlerFactory> {
impl<G: GlobalIdentityHandlerFactory> Global<G> {
pub fn new(name: &str, factory: G, backends: wgt::BackendBit) -> Self {
span!(_guard, INFO, "Global::new");
profiling::scope!("Global::new");
Self {
instance: Instance::new(name, 1, backends),
surfaces: Registry::without_backend(&factory, "Surface"),
@ -781,7 +779,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
impl<G: GlobalIdentityHandlerFactory> Drop for Global<G> {
fn drop(&mut self) {
if !thread::panicking() {
tracing::info!("Dropping Global");
log::info!("Dropping Global");
let mut surface_guard = self.surfaces.data.write();
// destroy hubs

Просмотреть файл

@ -7,7 +7,7 @@ use crate::{
device::{Device, DeviceDescriptor},
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Input, Token},
id::{AdapterId, DeviceId, SurfaceId, Valid},
span, LabelHelpers, LifeGuard, PrivateFeatures, Stored, MAX_BIND_GROUPS,
LabelHelpers, LifeGuard, PrivateFeatures, Stored, DOWNLEVEL_WARNING_MESSAGE, MAX_BIND_GROUPS,
};
use wgt::{Backend, BackendBit, PowerPreference, BIND_BUFFER_ALIGNMENT};
@ -122,12 +122,13 @@ pub struct Adapter<B: hal::Backend> {
features: wgt::Features,
pub(crate) private_features: PrivateFeatures,
limits: wgt::Limits,
downlevel: wgt::DownlevelProperties,
life_guard: LifeGuard,
}
impl<B: GfxBackend> Adapter<B> {
fn new(raw: hal::adapter::Adapter<B>) -> Self {
span!(_guard, INFO, "Adapter::new");
profiling::scope!("Adapter::new");
let adapter_features = raw.physical_device.features();
let properties = raw.physical_device.properties();
@ -192,6 +193,10 @@ impl<B: GfxBackend> Adapter<B> {
wgt::Features::SHADER_FLOAT64,
adapter_features.contains(hal::Features::SHADER_FLOAT64),
);
features.set(
wgt::Features::CONSERVATIVE_RASTERIZATION,
adapter_features.contains(hal::Features::CONSERVATIVE_RASTERIZATION),
);
#[cfg(not(target_os = "ios"))]
//TODO: https://github.com/gfx-rs/gfx/issues/3346
features.set(wgt::Features::ADDRESS_MODE_CLAMP_TO_BORDER, true);
@ -270,11 +275,47 @@ impl<B: GfxBackend> Adapter<B> {
.max(MIN_PUSH_CONSTANT_SIZE), // As an extension, the default is always 0, so define a separate minimum.
};
let mut downlevel_flags = wgt::DownlevelFlags::empty();
downlevel_flags.set(
wgt::DownlevelFlags::COMPUTE_SHADERS,
properties.downlevel.compute_shaders,
);
downlevel_flags.set(
wgt::DownlevelFlags::STORAGE_IMAGES,
properties.downlevel.storage_images,
);
downlevel_flags.set(
wgt::DownlevelFlags::READ_ONLY_DEPTH_STENCIL,
properties.downlevel.read_only_depth_stencil,
);
downlevel_flags.set(
wgt::DownlevelFlags::DEVICE_LOCAL_IMAGE_COPIES,
properties.downlevel.device_local_image_copies,
);
downlevel_flags.set(
wgt::DownlevelFlags::NON_POWER_OF_TWO_MIPMAPPED_TEXTURES,
properties.downlevel.non_power_of_two_mipmapped_textures,
);
downlevel_flags.set(
wgt::DownlevelFlags::ANISOTROPIC_FILTERING,
private_features.anisotropic_filtering,
);
let downlevel = wgt::DownlevelProperties {
flags: downlevel_flags,
shader_model: match properties.downlevel.shader_model {
hal::DownlevelShaderModel::ShaderModel2 => wgt::ShaderModel::Sm2,
hal::DownlevelShaderModel::ShaderModel4 => wgt::ShaderModel::Sm4,
hal::DownlevelShaderModel::ShaderModel5 => wgt::ShaderModel::Sm5,
},
};
Self {
raw,
features,
private_features,
limits,
downlevel,
life_guard: LifeGuard::new("<Adapter>"),
}
}
@ -283,7 +324,7 @@ impl<B: GfxBackend> Adapter<B> {
&self,
surface: &mut Surface,
) -> Result<wgt::TextureFormat, GetSwapChainPreferredFormatError> {
span!(_guard, INFO, "Adapter::get_swap_chain_preferred_format");
profiling::scope!("Adapter::get_swap_chain_preferred_format");
let formats = {
let surface = B::get_surface_mut(surface);
@ -375,13 +416,17 @@ impl<B: GfxBackend> Adapter<B> {
));
}
if !self.downlevel.is_webgpu_compliant() {
log::warn!("{}", DOWNLEVEL_WARNING_MESSAGE);
}
// Verify feature preconditions
if desc
.features
.contains(wgt::Features::MAPPABLE_PRIMARY_BUFFERS)
&& self.raw.info.device_type == hal::adapter::DeviceType::DiscreteGpu
{
tracing::warn!("Feature MAPPABLE_PRIMARY_BUFFERS enabled on a discrete gpu. This is a massive performance footgun and likely not what you wanted");
log::warn!("Feature MAPPABLE_PRIMARY_BUFFERS enabled on a discrete gpu. This is a massive performance footgun and likely not what you wanted");
}
let phd = &self.raw.physical_device;
@ -397,7 +442,7 @@ impl<B: GfxBackend> Adapter<B> {
| hal::Features::IMAGE_CUBE_ARRAY;
let mut enabled_features = available_features & wishful_features;
if enabled_features != wishful_features {
tracing::warn!(
log::warn!(
"Missing internal features: {:?}",
wishful_features - enabled_features
);
@ -464,6 +509,11 @@ impl<B: GfxBackend> Adapter<B> {
hal::Features::SHADER_FLOAT64,
desc.features.contains(wgt::Features::SHADER_FLOAT64),
);
enabled_features.set(
hal::Features::CONSERVATIVE_RASTERIZATION,
desc.features
.contains(wgt::Features::CONSERVATIVE_RASTERIZATION),
);
let family = self
.raw
@ -514,6 +564,7 @@ impl<B: GfxBackend> Adapter<B> {
mem_props,
limits,
self.private_features,
self.downlevel,
desc,
trace_path,
)
@ -599,7 +650,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
handle: &impl raw_window_handle::HasRawWindowHandle,
id_in: Input<G, SurfaceId>,
) -> SurfaceId {
span!(_guard, INFO, "Instance::create_surface");
profiling::scope!("Instance::create_surface");
let surface = unsafe {
backends_map! {
@ -607,7 +658,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
inst
.as_ref()
.and_then(|inst| inst.create_surface(handle).map_err(|e| {
tracing::warn!("Error: {:?}", e);
log::warn!("Error: {:?}", e);
}).ok())
};
@ -637,16 +688,17 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
layer: *mut std::ffi::c_void,
id_in: Input<G, SurfaceId>,
) -> SurfaceId {
span!(_guard, INFO, "Instance::instance_create_surface_metal");
profiling::scope!("Instance::instance_create_surface_metal");
let surface =
Surface {
#[cfg(feature = "vulkan-portability")]
vulkan: None, //TODO: create_surface_from_layer ?
metal: self.instance.metal.as_ref().map(|inst| {
inst.create_surface_from_layer(unsafe { std::mem::transmute(layer) })
}),
};
let surface = Surface {
#[cfg(feature = "gfx-backend-vulkan")]
vulkan: None, //TODO: create_surface_from_layer ?
metal: self.instance.metal.as_ref().map(|inst| {
// we don't want to link to metal-rs for this
#[allow(clippy::transmute_ptr_to_ref)]
inst.create_surface_from_layer(unsafe { std::mem::transmute(layer) })
}),
};
let mut token = Token::root();
let id = self.surfaces.prepare(id_in).assign(surface, &mut token);
@ -654,14 +706,14 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
pub fn surface_drop(&self, id: SurfaceId) {
span!(_guard, INFO, "Surface::drop");
profiling::scope!("Surface::drop");
let mut token = Token::root();
let (surface, _) = self.surfaces.unregister(id, &mut token);
self.instance.destroy_surface(surface.unwrap());
}
pub fn enumerate_adapters(&self, inputs: AdapterInputs<Input<G, AdapterId>>) -> Vec<AdapterId> {
span!(_guard, INFO, "Instance::enumerate_adapters");
profiling::scope!("Instance::enumerate_adapters");
let instance = &self.instance;
let mut token = Token::root();
@ -669,12 +721,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
backends_map! {
let map = |(instance_field, backend, backend_info, backend_hub)| {
if let Some(inst) = instance_field {
if let Some(ref inst) = *instance_field {
let hub = backend_hub(self);
if let Some(id_backend) = inputs.find(backend) {
for raw in inst.enumerate_adapters() {
let adapter = Adapter::new(raw);
tracing::info!("Adapter {} {:?}", backend_info, adapter.raw.info);
log::info!("Adapter {} {:?}", backend_info, adapter.raw.info);
let id = hub.adapters
.prepare(id_backend.clone())
.assign(adapter, &mut token);
@ -704,7 +756,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
desc: &RequestAdapterOptions,
inputs: AdapterInputs<Input<G, AdapterId>>,
) -> Result<AdapterId, RequestAdapterError> {
span!(_guard, INFO, "Instance::pick_adapter");
profiling::scope!("Instance::pick_adapter");
let instance = &self.instance;
let mut token = Token::root();
@ -727,7 +779,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
backends_map! {
let map = |(instance_backend, id_backend, surface_backend)| {
match instance_backend {
match *instance_backend {
Some(ref inst) if id_backend.is_some() => {
let mut adapters = inst.enumerate_adapters();
if let Some(surface_backend) = compatible_surface.and_then(surface_backend) {
@ -822,7 +874,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let map = |(info_adapter, id_backend, mut adapters_backend, backend_hub)| {
if selected < adapters_backend.len() {
let adapter = Adapter::new(adapters_backend.swap_remove(selected));
tracing::info!("Adapter {} {:?}", info_adapter, adapter.raw.info);
log::info!("Adapter {} {:?}", info_adapter, adapter.raw.info);
let id = backend_hub(self).adapters
.prepare(id_backend.take().unwrap())
.assign(adapter, &mut token);
@ -851,7 +903,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
id_dx11.take(),
id_gl.take(),
);
tracing::warn!("Some adapters are present, but enumerating them failed!");
log::warn!("Some adapters are present, but enumerating them failed!");
Err(RequestAdapterError::NotFound)
}
@ -859,7 +911,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
&self,
adapter_id: AdapterId,
) -> Result<wgt::AdapterInfo, InvalidAdapter> {
span!(_guard, INFO, "Adapter::get_info");
profiling::scope!("Adapter::get_info");
let hub = B::hub(self);
let mut token = Token::root();
@ -875,7 +927,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
adapter_id: AdapterId,
format: wgt::TextureFormat,
) -> Result<wgt::TextureFormatFeatures, InvalidAdapter> {
span!(_guard, INFO, "Adapter::get_texture_format_features");
profiling::scope!("Adapter::get_texture_format_features");
let hub = B::hub(self);
let mut token = Token::root();
@ -890,7 +942,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
&self,
adapter_id: AdapterId,
) -> Result<wgt::Features, InvalidAdapter> {
span!(_guard, INFO, "Adapter::features");
profiling::scope!("Adapter::features");
let hub = B::hub(self);
let mut token = Token::root();
@ -905,7 +957,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
&self,
adapter_id: AdapterId,
) -> Result<wgt::Limits, InvalidAdapter> {
span!(_guard, INFO, "Adapter::limits");
profiling::scope!("Adapter::limits");
let hub = B::hub(self);
let mut token = Token::root();
@ -916,8 +968,23 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.map_err(|_| InvalidAdapter)
}
pub fn adapter_downlevel_properties<B: GfxBackend>(
&self,
adapter_id: AdapterId,
) -> Result<wgt::DownlevelProperties, InvalidAdapter> {
profiling::scope!("Adapter::downlevel_properties");
let hub = B::hub(self);
let mut token = Token::root();
let (adapter_guard, _) = hub.adapters.read(&mut token);
adapter_guard
.get(adapter_id)
.map(|adapter| adapter.downlevel)
.map_err(|_| InvalidAdapter)
}
pub fn adapter_drop<B: GfxBackend>(&self, adapter_id: AdapterId) {
span!(_guard, INFO, "Adapter::drop");
profiling::scope!("Adapter::drop");
let hub = B::hub(self);
let mut token = Token::root();
@ -942,7 +1009,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
trace_path: Option<&std::path::Path>,
id_in: Input<G, DeviceId>,
) -> (DeviceId, Option<RequestDeviceError>) {
span!(_guard, INFO, "Adapter::request_device");
profiling::scope!("Adapter::request_device");
let hub = B::hub(self);
let mut token = Token::root();

Просмотреть файл

@ -2,14 +2,26 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![allow(
// We use loops for getting early-out of scope without closures.
clippy::never_loop,
// We don't use syntax sugar where it's not necessary.
clippy::match_like_matches_macro,
// Redundant matching is more explicit.
clippy::redundant_pattern_matching,
// Explicit lifetimes are often easier to reason about.
clippy::needless_lifetimes,
// No need for defaults in the internal types.
clippy::new_without_default,
)]
#![warn(
trivial_casts,
trivial_numeric_casts,
unused_extern_crates,
unused_qualifications
unused_qualifications,
// We don't match on a reference, unless required.
clippy::pattern_type_mismatch,
)]
// We use loops for getting early-out of scope without closures.
#![allow(clippy::never_loop)]
#[macro_use]
mod macros;
@ -225,6 +237,17 @@ struct PrivateFeatures {
texture_d24_s8: bool,
}
const DOWNLEVEL_WARNING_MESSAGE: &str = "The underlying API or device in use does not \
support enough features to be a fully compliant implementation of WebGPU. A subset of the features can still be used. \
If you are running this program on native and not in a browser and wish to limit the features you use to the supported subset, \
call Adapter::downlevel_properties or Device::downlevel_properties to get a listing of the features the current \
platform supports.";
const DOWNLEVEL_ERROR_WARNING_MESSAGE: &str = "This is not an invalid use of WebGPU: the underlying API or device does not \
support enough features to be a fully compliant implementation. A subset of the features can still be used. \
If you are running this program on native and not in a browser and wish to work around this issue, call \
Adapter::downlevel_properties or Device::downlevel_properties to get a listing of the features the current \
platform supports.";
#[macro_export]
macro_rules! gfx_select {
($id:expr => $global:ident.$method:ident( $($param:expr),* )) => {
@ -246,18 +269,6 @@ macro_rules! gfx_select {
};
}
#[macro_export]
macro_rules! span {
($guard_name:tt, $level:ident, $name:expr, $($fields:tt)*) => {
let span = tracing::span!(tracing::Level::$level, $name, $($fields)*);
let $guard_name = span.enter();
};
($guard_name:tt, $level:ident, $name:expr) => {
let span = tracing::span!(tracing::Level::$level, $name);
let $guard_name = span.enter();
};
}
/// Fast hash map used internally.
type FastHashMap<K, V> =
std::collections::HashMap<K, V, std::hash::BuildHasherDefault<fxhash::FxHasher>>;

Просмотреть файл

@ -157,10 +157,10 @@ impl MemoryInitTracker {
// Drains uninitialized ranges in a query range.
#[must_use]
pub(crate) fn drain<'a>(
&'a mut self,
pub(crate) fn drain(
&mut self,
drain_range: Range<wgt::BufferAddress>,
) -> MemoryInitTrackerDrain<'a> {
) -> MemoryInitTrackerDrain {
let index = self.lower_bound(drain_range.start);
MemoryInitTrackerDrain {
drain_range,

Просмотреть файл

@ -7,7 +7,7 @@ use crate::{
device::{DeviceError, RenderPassContext},
hub::Resource,
id::{DeviceId, PipelineLayoutId, ShaderModuleId},
validation, Label, LifeGuard, Stored,
validation, Label, LifeGuard, Stored, DOWNLEVEL_ERROR_WARNING_MESSAGE,
};
use std::borrow::Cow;
use thiserror::Error;
@ -59,7 +59,7 @@ pub enum CreateShaderModuleError {
#[error(transparent)]
Device(#[from] DeviceError),
#[error(transparent)]
Validation(#[from] naga::proc::ValidationError),
Validation(#[from] naga::valid::ValidationError),
#[error("missing required device features {0:?}")]
MissingFeature(wgt::Features),
}
@ -113,6 +113,13 @@ pub enum CreateComputePipelineError {
Implicit(#[from] ImplicitLayoutError),
#[error(transparent)]
Stage(validation::StageError),
#[error("Internal error: {0}")]
Internal(String),
#[error(
"Compute shaders are not supported by the underlying platform. {}",
DOWNLEVEL_ERROR_WARNING_MESSAGE
)]
ComputeShadersUnsupported,
}
#[derive(Debug)]
@ -224,6 +231,8 @@ pub enum CreateRenderPipelineError {
strip_index_format: Option<wgt::IndexFormat>,
topology: wgt::PrimitiveTopology,
},
#[error("Conservative Rasterization is only supported for wgt::PolygonMode::Fill")]
ConservativeRasterizationNonFillPolygonMode,
#[error("missing required device features {0:?}")]
MissingFeature(wgt::Features),
#[error("error in stage {flag:?}")]
@ -232,6 +241,11 @@ pub enum CreateRenderPipelineError {
#[source]
error: validation::StageError,
},
#[error("Internal error in stage {stage:?}: {error}")]
Internal {
stage: wgt::ShaderStage,
error: String,
},
}
bitflags::bitflags! {

Просмотреть файл

@ -121,7 +121,7 @@ unsafe impl Sync for BufferMapOperation {}
impl BufferMapOperation {
pub(crate) fn call_error(self) {
tracing::error!("wgpu_buffer_map_async failed: buffer mapping is pending");
log::error!("wgpu_buffer_map_async failed: buffer mapping is pending");
unsafe {
(self.callback)(BufferMapAsyncStatus::Error, self.user_data);
}
@ -144,6 +144,22 @@ pub enum BufferAccessError {
NotMapped,
#[error("buffer map range does not respect `COPY_BUFFER_ALIGNMENT`")]
UnalignedRange,
#[error("buffer offset invalid: offset {offset} must be multiple of 8")]
UnalignedOffset { offset: wgt::BufferAddress },
#[error("buffer range size invalid: range_size {range_size} must be multiple of 4")]
UnalignedRangeSize { range_size: wgt::BufferAddress },
#[error("buffer access out of bounds: index {index} would underrun the buffer (limit: {min})")]
OutOfBoundsUnderrun {
index: wgt::BufferAddress,
min: wgt::BufferAddress,
},
#[error(
"buffer access out of bounds: last index {index} would overrun the buffer (limit: {max})"
)]
OutOfBoundsOverrun {
index: wgt::BufferAddress,
max: wgt::BufferAddress,
},
}
#[derive(Debug)]
@ -286,7 +302,7 @@ pub struct TextureViewDescriptor<'a> {
/// Mip level count.
/// If `Some(count)`, `base_mip_level + count` must be less or equal to underlying texture mip count.
/// If `None`, considered to include the rest of the mipmap levels, but at least 1 in total.
pub level_count: Option<NonZeroU32>,
pub mip_level_count: Option<NonZeroU32>,
/// Base array layer.
pub base_array_layer: u32,
/// Layer count.

Просмотреть файл

@ -39,7 +39,7 @@ use crate::{
device::DeviceError,
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Input, Token},
id::{DeviceId, SwapChainId, TextureViewId, Valid},
resource, span,
resource,
track::TextureSelector,
LifeGuard, PrivateFeatures, Stored, SubmissionIndex,
};
@ -139,7 +139,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
swap_chain_id: SwapChainId,
view_id_in: Input<G, TextureViewId>,
) -> Result<SwapChainOutput, SwapChainError> {
span!(_guard, INFO, "SwapChain::get_next_texture");
profiling::scope!("SwapChain::get_next_texture");
let hub = B::hub(self);
let mut token = Token::root();
@ -172,11 +172,15 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
Err(err) => (
None,
match err {
hal::window::AcquireError::OutOfMemory(_) => Err(DeviceError::OutOfMemory)?,
hal::window::AcquireError::OutOfMemory(_) => {
return Err(DeviceError::OutOfMemory.into())
}
hal::window::AcquireError::NotReady { .. } => SwapChainStatus::Timeout,
hal::window::AcquireError::OutOfDate(_) => SwapChainStatus::Outdated,
hal::window::AcquireError::SurfaceLost(_) => SwapChainStatus::Lost,
hal::window::AcquireError::DeviceLost(_) => Err(DeviceError::Lost)?,
hal::window::AcquireError::DeviceLost(_) => {
return Err(DeviceError::Lost.into())
}
},
),
};
@ -238,7 +242,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
&self,
swap_chain_id: SwapChainId,
) -> Result<SwapChainStatus, SwapChainError> {
span!(_guard, INFO, "SwapChain::present");
profiling::scope!("SwapChain::present");
let hub = B::hub(self);
let mut token = Token::root();
@ -283,7 +287,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let queue = &mut device.queue_group.queues[0];
let result = unsafe { queue.present(B::get_surface_mut(surface), image, sem) };
tracing::debug!(trace = true, "Presented. End of Frame");
log::debug!("Presented. End of Frame");
match result {
Ok(None) => Ok(SwapChainStatus::Good),

Просмотреть файл

@ -130,7 +130,7 @@ impl PendingTransition<BufferState> {
self,
buf: &'a resource::Buffer<B>,
) -> hal::memory::Barrier<'a, B> {
tracing::trace!("\tbuffer -> {:?}", self);
log::trace!("\tbuffer -> {:?}", self);
let &(ref target, _) = buf.raw.as_ref().expect("Buffer is destroyed");
hal::memory::Barrier::Buffer {
states: conv::map_buffer_state(self.usage.start)
@ -148,7 +148,7 @@ impl PendingTransition<TextureState> {
self,
tex: &'a resource::Texture<B>,
) -> hal::memory::Barrier<'a, B> {
tracing::trace!("\ttexture -> {:?}", self);
log::trace!("\ttexture -> {:?}", self);
let &(ref target, _) = tex.raw.as_ref().expect("Texture is destroyed");
let aspects = tex.aspects;
hal::memory::Barrier::Image {
@ -195,6 +195,10 @@ impl<S: ResourceState + fmt::Debug> fmt::Debug for ResourceTracker<S> {
}
}
#[allow(
// Explicit lifetimes are easier to reason about here.
clippy::needless_lifetimes,
)]
impl<S: ResourceState> ResourceTracker<S> {
/// Create a new empty tracker.
pub fn new(backend: wgt::Backend) -> Self {

Просмотреть файл

@ -66,6 +66,7 @@ impl<I: Copy + PartialOrd, T: Copy + PartialEq> RangedStates<I, T> {
}
/// Merge the neighboring ranges together, where possible.
#[allow(clippy::suspicious_operation_groupings)]
pub fn coalesce(&mut self) {
let mut num_removed = 0;
let mut iter = self.ranges.iter_mut();
@ -204,40 +205,40 @@ impl<'a, I: Copy + Debug + Ord, T: Copy + Debug> Iterator for Merge<'a, I, T> {
fn next(&mut self) -> Option<Self::Item> {
match (self.sa.peek(), self.sb.peek()) {
// we have both streams
(Some(&(ref ra, va)), Some(&(ref rb, vb))) => {
(Some(&&(ref ra, va)), Some(&&(ref rb, vb))) => {
let (range, usage) = if ra.start < self.base {
// in the middle of the left stream
let (end, end_value) = if self.base == rb.start {
// right stream is starting
debug_assert!(self.base < ra.end);
(rb.end, Some(*vb))
(rb.end, Some(vb))
} else {
// right hasn't started yet
debug_assert!(self.base < rb.start);
(rb.start, None)
};
(self.base..ra.end.min(end), Some(*va)..end_value)
(self.base..ra.end.min(end), Some(va)..end_value)
} else if rb.start < self.base {
// in the middle of the right stream
let (end, start_value) = if self.base == ra.start {
// left stream is starting
debug_assert!(self.base < rb.end);
(ra.end, Some(*va))
(ra.end, Some(va))
} else {
// left hasn't started yet
debug_assert!(self.base < ra.start);
(ra.start, None)
};
(self.base..rb.end.min(end), start_value..Some(*vb))
(self.base..rb.end.min(end), start_value..Some(vb))
} else {
// no active streams
match ra.start.cmp(&rb.start) {
// both are starting
Ordering::Equal => (ra.start..ra.end.min(rb.end), Some(*va)..Some(*vb)),
Ordering::Equal => (ra.start..ra.end.min(rb.end), Some(va)..Some(vb)),
// only left is starting
Ordering::Less => (ra.start..rb.start.min(ra.end), Some(*va)..None),
Ordering::Less => (ra.start..rb.start.min(ra.end), Some(va)..None),
// only right is starting
Ordering::Greater => (rb.start..ra.start.min(rb.end), None..Some(*vb)),
Ordering::Greater => (rb.start..ra.start.min(rb.end), None..Some(vb)),
}
};
self.base = range.end;
@ -250,18 +251,18 @@ impl<'a, I: Copy + Debug + Ord, T: Copy + Debug> Iterator for Merge<'a, I, T> {
Some((range, usage))
}
// only right stream
(None, Some(&(ref rb, vb))) => {
(None, Some(&&(ref rb, vb))) => {
let range = self.base.max(rb.start)..rb.end;
self.base = rb.end;
let _ = self.sb.next();
Some((range, None..Some(*vb)))
Some((range, None..Some(vb)))
}
// only left stream
(Some(&(ref ra, va)), None) => {
(Some(&&(ref ra, va)), None) => {
let range = self.base.max(ra.start)..ra.end;
self.base = ra.end;
let _ = self.sa.next();
Some((range, Some(*va)..None))
Some((range, Some(va)..None))
}
// done
(None, None) => None,

Просмотреть файл

@ -3,7 +3,7 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{binding_model::BindEntryMap, FastHashMap};
use naga::proc::analyzer::GlobalUse;
use naga::valid::GlobalUse;
use std::collections::hash_map::Entry;
use thiserror::Error;
use wgt::{BindGroupLayoutEntry, BindingType};
@ -172,71 +172,6 @@ pub enum StageError {
},
}
fn get_aligned_type_size(
module: &naga::Module,
handle: naga::Handle<naga::Type>,
allow_unbound: bool,
) -> wgt::BufferAddress {
use naga::TypeInner as Ti;
//TODO: take alignment into account!
match module.types[handle].inner {
Ti::Scalar { kind: _, width } => width as wgt::BufferAddress,
Ti::Vector {
size,
kind: _,
width,
} => size as wgt::BufferAddress * width as wgt::BufferAddress,
Ti::Matrix {
rows,
columns,
width,
} => {
rows as wgt::BufferAddress * columns as wgt::BufferAddress * width as wgt::BufferAddress
}
Ti::Pointer { .. } => 4,
Ti::Array {
base,
size: naga::ArraySize::Constant(const_handle),
stride,
} => {
let base_size = match stride {
Some(stride) => stride.get() as wgt::BufferAddress,
None => get_aligned_type_size(module, base, false),
};
let count = match module.constants[const_handle].inner {
naga::ConstantInner::Scalar {
value: naga::ScalarValue::Uint(value),
width: _,
} => value,
ref other => panic!("Invalid array size constant: {:?}", other),
};
base_size * count
}
Ti::Array {
base,
size: naga::ArraySize::Dynamic,
stride,
} if allow_unbound => match stride {
Some(stride) => stride.get() as wgt::BufferAddress,
None => get_aligned_type_size(module, base, false),
},
Ti::Struct {
block: _,
ref members,
} => {
let mut offset = 0;
for member in members {
offset += match member.span {
Some(span) => span.get() as wgt::BufferAddress,
None => get_aligned_type_size(module, member.ty, false),
}
}
offset
}
_ => panic!("Unexpected struct field"),
}
}
fn map_storage_format_to_naga(format: wgt::TextureFormat) -> Option<naga::StorageFormat> {
use naga::StorageFormat as Sf;
use wgt::TextureFormat as Tf;
@ -690,26 +625,77 @@ pub fn check_texture_format(format: wgt::TextureFormat, output: &NumericType) ->
pub type StageIo = FastHashMap<wgt::ShaderLocation, NumericType>;
impl Interface {
pub fn new(module: &naga::Module, analysis: &naga::proc::analyzer::Analysis) -> Self {
fn populate(
list: &mut Vec<Varying>,
binding: Option<&naga::Binding>,
ty: naga::Handle<naga::Type>,
arena: &naga::Arena<naga::Type>,
) {
let numeric_ty = match arena[ty].inner {
naga::TypeInner::Scalar { kind, width } => NumericType {
dim: NumericDimension::Scalar,
kind,
width,
},
naga::TypeInner::Vector { size, kind, width } => NumericType {
dim: NumericDimension::Vector(size),
kind,
width,
},
naga::TypeInner::Matrix {
columns,
rows,
width,
} => NumericType {
dim: NumericDimension::Matrix(columns, rows),
kind: naga::ScalarKind::Float,
width,
},
naga::TypeInner::Struct {
block: _,
ref members,
} => {
for member in members {
Self::populate(list, member.binding.as_ref(), member.ty, arena);
}
return;
}
ref other => {
log::error!("Unexpected varying type: {:?}", other);
return;
}
};
let varying = match binding {
Some(&naga::Binding::Location(location, _)) => Varying::Local {
location,
ty: numeric_ty,
},
Some(&naga::Binding::BuiltIn(built_in)) => Varying::BuiltIn(built_in),
None => {
log::error!("Missing binding for a varying");
return;
}
};
list.push(varying);
}
pub fn new(module: &naga::Module, info: &naga::valid::ModuleInfo) -> Self {
let mut resources = naga::Arena::new();
let mut resource_mapping = FastHashMap::default();
for (var_handle, var) in module.global_variables.iter() {
let (group, binding) = match var.binding {
Some(naga::Binding::Resource { group, binding }) => (group, binding),
Some(ref br) => (br.group, br.binding),
_ => continue,
};
let ty = match module.types[var.ty].inner {
naga::TypeInner::Struct {
block: true,
ref members,
members: _,
} => {
let mut actual_size = 0;
for (i, member) in members.iter().enumerate() {
actual_size +=
get_aligned_type_size(module, member.ty, i + 1 == members.len());
}
let actual_size = info.layouter[var.ty].size;
ResourceType::Buffer {
size: wgt::BufferSize::new(actual_size).unwrap(),
size: wgt::BufferSize::new(actual_size as u64).unwrap(),
}
}
naga::TypeInner::Image {
@ -722,7 +708,10 @@ impl Interface {
class,
},
naga::TypeInner::Sampler { comparison } => ResourceType::Sampler { comparison },
ref other => panic!("Unexpected resource type: {:?}", other),
ref other => {
log::error!("Unexpected resource type: {:?}", other);
continue;
}
};
let handle = resources.append(Resource {
group,
@ -735,58 +724,32 @@ impl Interface {
let mut entry_points = FastHashMap::default();
entry_points.reserve(module.entry_points.len());
for (&(stage, ref ep_name), _entry_point) in module.entry_points.iter() {
let info = analysis.get_entry_point(stage, ep_name);
for (index, entry_point) in (&module.entry_points).iter().enumerate() {
let info = info.get_entry_point(index);
let mut ep = EntryPoint::default();
for arg in entry_point.function.arguments.iter() {
Self::populate(&mut ep.inputs, arg.binding.as_ref(), arg.ty, &module.types);
}
if let Some(ref result) = entry_point.function.result {
Self::populate(
&mut ep.outputs,
result.binding.as_ref(),
result.ty,
&module.types,
);
}
for (var_handle, var) in module.global_variables.iter() {
let usage = info[var_handle];
if usage.is_empty() {
continue;
}
let varying = match var.binding {
Some(naga::Binding::Resource { .. }) => {
ep.resources.push((resource_mapping[&var_handle], usage));
None
}
Some(naga::Binding::Location(location)) => {
let ty = match module.types[var.ty].inner {
naga::TypeInner::Scalar { kind, width } => NumericType {
dim: NumericDimension::Scalar,
kind,
width,
},
naga::TypeInner::Vector { size, kind, width } => NumericType {
dim: NumericDimension::Vector(size),
kind,
width,
},
naga::TypeInner::Matrix {
columns,
rows,
width,
} => NumericType {
dim: NumericDimension::Matrix(columns, rows),
kind: naga::ScalarKind::Float,
width,
},
ref other => panic!("Unexpected varying type: {:?}", other),
};
Some(Varying::Local { location, ty })
}
Some(naga::Binding::BuiltIn(built_in)) => Some(Varying::BuiltIn(built_in)),
_ => None,
};
if let Some(varying) = varying {
match var.class {
naga::StorageClass::Input => ep.inputs.push(varying),
naga::StorageClass::Output => ep.outputs.push(varying),
_ => (),
}
if var.binding.is_some() {
ep.resources.push((resource_mapping[&var_handle], usage));
}
}
entry_points.insert((stage, ep_name.clone()), ep);
entry_points.insert((entry_point.stage, entry_point.name.clone()), ep);
}
Interface {
@ -837,7 +800,7 @@ impl Interface {
.ok_or(BindingError::Missing)
.and_then(|set| {
let ty = res.derive_binding_type(usage)?;
Ok(match set.entry(res.binding) {
match set.entry(res.binding) {
Entry::Occupied(e) if e.get().ty != ty => {
return Err(BindingError::InconsistentlyDerivedType)
}
@ -852,7 +815,8 @@ impl Interface {
count: None,
});
}
})
}
Ok(())
}),
};
if let Err(error) = result {

Просмотреть файл

@ -6,11 +6,15 @@
* This API is used for targeting both Web and Native.
*/
// The intra doc links to the wgpu crate in this crate actually succesfully link to the types in the wgpu crate, when built from the wgpu crate.
// However when building from both the wgpu crate or this crate cargo doc will claim all the links cannot be resolved
// despite the fact that it works fine when it needs to.
// So we just disable those warnings.
#![allow(broken_intra_doc_links)]
#![allow(
// The intra doc links to the wgpu crate in this crate actually succesfully link to the types in the wgpu crate, when built from the wgpu crate.
// However when building from both the wgpu crate or this crate cargo doc will claim all the links cannot be resolved
// despite the fact that it works fine when it needs to.
// So we just disable those warnings.
broken_intra_doc_links,
// We don't use syntax sugar where it's not necessary.
clippy::match_like_matches_macro,
)]
#![warn(missing_docs)]
#[cfg(feature = "serde")]
@ -30,7 +34,7 @@ pub type DynamicOffset = u32;
///
/// This doesn't apply to [`Queue::write_texture`].
///
/// [`bytes_per_row`]: TextureDataLayout::bytes_per_row
/// [`bytes_per_row`]: ImageDataLayout::bytes_per_row
pub const COPY_BYTES_PER_ROW_ALIGNMENT: u32 = 256;
/// Bound uniform/storage buffer offsets must be aligned to this number.
pub const BIND_BUFFER_ALIGNMENT: BufferAddress = 256;
@ -403,7 +407,7 @@ bitflags::bitflags! {
/// Enables 64-bit floating point types in SPIR-V shaders.
///
/// Note: even when supported by GPU hardware, 64-bit floating point operations are
/// frequently between 16 and 64 _times_ slower than equivelent operations on 32-bit floats.
/// frequently between 16 and 64 _times_ slower than equivalent operations on 32-bit floats.
///
/// Supported Platforms:
/// - Vulkan
@ -418,6 +422,17 @@ bitflags::bitflags! {
///
/// This is a native-only feature.
const VERTEX_ATTRIBUTE_64BIT = 0x0000_0000_4000_0000;
/// Allows the user to set a overestimation-conservative-rasterization in [`PrimitiveState::conservative`]
///
/// Processing of degenerate triangles/lines is hardware specific.
/// Only triangles are supported.
///
/// Supported platforms:
/// - DX12
/// - Vulkan
///
/// This is a native only feature.
const CONSERVATIVE_RASTERIZATION = 0x0000_0000_8000_0000;
/// Features which are part of the upstream WebGPU standard.
const ALL_WEBGPU = 0x0000_0000_0000_FFFF;
/// Features that are only available when targeting native (not web).
@ -523,6 +538,70 @@ impl Default for Limits {
}
}
/// Lists various ways the underlying platform does not conform to the WebGPU standard.
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct DownlevelProperties {
/// Combined boolean flags.
pub flags: DownlevelFlags,
/// Which collections of features shaders support. Defined in terms of D3D's shader models.
pub shader_model: ShaderModel,
}
impl Default for DownlevelProperties {
// Note, this defaults to all on, as that is the default assumption in wgpu.
// gfx-hal's equivalent structure defaults to all off.
fn default() -> Self {
Self {
flags: DownlevelFlags::COMPLIANT,
shader_model: ShaderModel::Sm5,
}
}
}
impl DownlevelProperties {
/// Returns true if the underlying platform offers complete support of the baseline WebGPU standard.
///
/// If this returns false, some parts of the API will result in validation errors where they would not normally.
/// These parts can be determined by the values in this structure.
pub fn is_webgpu_compliant(self) -> bool {
self == Self::default()
}
}
bitflags::bitflags! {
/// Binary flags listing various ways the underlying platform does not conform to the WebGPU standard.
pub struct DownlevelFlags: u32 {
/// The device supports compiling and using compute shaders.
const COMPUTE_SHADERS = 0x0000_0001;
/// Supports creating storage images.
const STORAGE_IMAGES = 0x0000_0002;
/// Supports reading from a depth/stencil buffer while using as a read-only depth/stencil attachment.
const READ_ONLY_DEPTH_STENCIL = 0x0000_0004;
/// Supports:
/// - copy_image_to_image
/// - copy_buffer_to_image and copy_image_to_buffer with a buffer without a MAP_* usage
const DEVICE_LOCAL_IMAGE_COPIES = 0x0000_0008;
/// Supports textures with mipmaps which have a non power of two size.
const NON_POWER_OF_TWO_MIPMAPPED_TEXTURES = 0x0000_0010;
/// Supports samplers with anisotropic filtering
const ANISOTROPIC_FILTERING = 0x0000_0020;
/// All flags are in their compliant state.
const COMPLIANT = 0x0000_003F;
}
}
/// Collections of shader features a device supports if they support less than WebGPU normally allows.
// TODO: Fill out the differences between shader models more completely
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum ShaderModel {
/// Extremely limited shaders, including a total instruction limit.
Sm2,
/// Missing minor features and storage images.
Sm4,
/// WebGPU supports shader module 5.
Sm5,
}
/// Supported physical device types.
#[repr(u8)]
#[derive(Clone, Debug, PartialEq)]
@ -749,6 +828,13 @@ impl BlendComponent {
operation: BlendOperation::Add,
};
/// Blend state of (1 * src) + ((1 - src_alpha) * dst)
pub const OVER: Self = BlendComponent {
src_factor: BlendFactor::One,
dst_factor: BlendFactor::OneMinusSrcAlpha,
operation: BlendOperation::Add,
};
/// Returns true if the state relies on the constant color, which is
/// set independently on a render command encoder.
pub fn uses_color(&self) -> bool {
@ -782,6 +868,30 @@ pub struct BlendState {
pub alpha: BlendComponent,
}
impl BlendState {
/// Blend mode that does no color blending, just overwrites the output with the contents of the shader.
pub const REPLACE: Self = Self {
color: BlendComponent::REPLACE,
alpha: BlendComponent::REPLACE,
};
/// Blend mode that does standard alpha blending with non-premultiplied alpha.
pub const ALPHA_BLENDING: Self = Self {
color: BlendComponent {
src_factor: BlendFactor::SrcAlpha,
dst_factor: BlendFactor::OneMinusSrcAlpha,
operation: BlendOperation::Add,
},
alpha: BlendComponent::OVER,
};
/// Blend mode that does standard alpha blending with premultiplied alpha.
pub const PREMULTIPLIED_ALPHA_BLENDING: Self = Self {
color: BlendComponent::OVER,
alpha: BlendComponent::OVER,
};
}
/// Describes the color state of a render pipeline.
#[repr(C)]
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
@ -913,11 +1023,21 @@ pub struct PrimitiveState {
/// The face culling mode.
#[cfg_attr(any(feature = "trace", feature = "replay"), serde(default))]
pub cull_mode: Option<Face>,
/// If set to true, the polygon depth is clamped to 0-1 range instead of being clipped.
///
/// Enabling this requires `Features::DEPTH_CLAMPING` to be enabled.
#[cfg_attr(any(feature = "trace", feature = "replay"), serde(default))]
pub clamp_depth: bool,
/// Controls the way each polygon is rasterized. Can be either `Fill` (default), `Line` or `Point`
///
/// Setting this to something other than `Fill` requires `Features::NON_FILL_POLYGON_MODE` to be enabled.
#[cfg_attr(any(feature = "trace", feature = "replay"), serde(default))]
pub polygon_mode: PolygonMode,
/// If set to true, the primitives are rendered with conservative overestimation. I.e. any rastered pixel touched by it is filled.
/// Only valid for PolygonMode::Fill!
///
/// Enabling this requires `Features::CONSERVATIVE_RASTERIZATION` to be enabled.
pub conservative: bool,
}
/// Describes the multi-sampling state of a render pipeline.
@ -1636,11 +1756,6 @@ pub struct DepthStencilState {
/// Depth bias state.
#[cfg_attr(any(feature = "trace", feature = "replay"), serde(default))]
pub bias: DepthBiasState,
/// If enabled polygon depth is clamped to 0-1 range instead of being clipped.
///
/// Requires `Features::DEPTH_CLAMPING` enabled.
#[cfg_attr(any(feature = "trace", feature = "replay"), serde(default))]
pub clamp_depth: bool,
}
impl DepthStencilState {
@ -2482,23 +2597,23 @@ impl<T> Default for RenderBundleDescriptor<Option<T>> {
#[derive(Clone, Debug, Default)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub struct TextureDataLayout {
pub struct ImageDataLayout {
/// Offset into the buffer that is the start of the texture. Must be a multiple of texture block size.
/// For non-compressed textures, this is 1.
pub offset: BufferAddress,
/// Bytes per "row" of the image. This represents one row of pixels in the x direction. Compressed
/// textures include multiple rows of pixels in each "row". May be 0 for 1D texture copies.
/// textures include multiple rows of pixels in each "row".
/// Required if there are multiple rows (i.e. height or depth is more than one pixel or pixel block for compressed textures)
///
/// Must be a multiple of 256 for [`CommandEncoder::copy_buffer_to_texture`] and [`CommandEncoder::copy_texture_to_buffer`].
/// [`Queue::write_texture`] does not have this requirement.
///
/// Must be a multiple of the texture block size. For non-compressed textures, this is 1.
pub bytes_per_row: u32,
pub bytes_per_row: Option<NonZeroU32>,
/// Rows that make up a single "image". Each "image" is one layer in the z direction of a 3D image. May be larger
/// than `copy_size.y`.
///
/// May be 0 for 2D texture copies.
pub rows_per_image: u32,
/// Required if there are multiple images (i.e. the depth is more than one)
pub rows_per_image: Option<NonZeroU32>,
}
/// Specific type of a buffer binding.
@ -2745,11 +2860,11 @@ pub struct BindGroupLayoutEntry {
#[derive(Clone, Debug)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub struct BufferCopyView<B> {
pub struct ImageCopyBuffer<B> {
/// The buffer to be copied to/from.
pub buffer: B,
/// The layout of the texture data in this buffer.
pub layout: TextureDataLayout,
pub layout: ImageDataLayout,
}
/// View of a texture which can be used to copy to/from a buffer/texture.
@ -2757,7 +2872,7 @@ pub struct BufferCopyView<B> {
#[derive(Clone, Debug)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub struct TextureCopyView<T> {
pub struct ImageCopyTexture<T> {
/// The texture to be copied to/from.
pub texture: T,
/// The target mip level of the texture.
@ -2822,12 +2937,12 @@ bitflags::bitflags! {
///
/// The amount of values written when resolved depends
/// on the amount of flags. If 3 flags are enabled, 3
/// 64-bit values will be writen per-query.
/// 64-bit values will be written per-query.
///
/// The order they are written is the order they are declared
/// in this bitflags. If you enabled `CLIPPER_PRIMITIVES_OUT`
/// and `COMPUTE_SHADER_INVOCATIONS`, it would write 16 bytes,
/// the first 8 bytes being the primative out value, the last 8
/// the first 8 bytes being the primitive out value, the last 8
/// bytes being the compute shader invocation count.
#[repr(transparent)]
#[cfg_attr(feature = "trace", derive(Serialize))]
@ -2848,7 +2963,7 @@ bitflags::bitflags! {
/// derivatives.
const FRAGMENT_SHADER_INVOCATIONS = 0x08;
/// Amount of times a compute shader is invoked. This will
/// be equivilent to the dispatch count times the workgroup size.
/// be equivalent to the dispatch count times the workgroup size.
const COMPUTE_SHADER_INVOCATIONS = 0x10;
}
}

Просмотреть файл

@ -135,6 +135,7 @@ pub struct PrimitiveState<'a> {
front_face: wgt::FrontFace,
cull_mode: Option<&'a wgt::Face>,
polygon_mode: wgt::PolygonMode,
clamp_depth: bool,
}
impl PrimitiveState<'_> {
@ -145,6 +146,8 @@ impl PrimitiveState<'_> {
front_face: self.front_face.clone(),
cull_mode: self.cull_mode.cloned(),
polygon_mode: self.polygon_mode,
clamp_depth: self.clamp_depth,
conservative: false,
}
}
}
@ -248,7 +251,7 @@ pub struct TextureViewDescriptor<'a> {
dimension: Option<&'a wgt::TextureViewDimension>,
aspect: wgt::TextureAspect,
base_mip_level: u32,
level_count: Option<NonZeroU32>,
mip_level_count: Option<NonZeroU32>,
base_array_layer: u32,
array_layer_count: Option<NonZeroU32>,
}
@ -524,7 +527,7 @@ pub extern "C" fn wgpu_client_create_texture_view(
dimension: desc.dimension.cloned(),
aspect: desc.aspect,
base_mip_level: desc.base_mip_level,
level_count: desc.level_count,
mip_level_count: desc.mip_level_count,
base_array_layer: desc.base_array_layer,
array_layer_count: desc.array_layer_count,
};
@ -636,9 +639,9 @@ pub unsafe extern "C" fn wgpu_compute_pass_destroy(pass: *mut wgc::command::Comp
#[repr(C)]
pub struct RenderPassDescriptor {
pub label: RawString,
pub color_attachments: *const wgc::command::ColorAttachmentDescriptor,
pub color_attachments: *const wgc::command::RenderPassColorAttachment,
pub color_attachments_length: usize,
pub depth_stencil_attachment: *const wgc::command::DepthStencilAttachmentDescriptor,
pub depth_stencil_attachment: *const wgc::command::RenderPassDepthStencilAttachment,
}
#[no_mangle]
@ -955,8 +958,8 @@ pub unsafe extern "C" fn wgpu_command_encoder_copy_buffer_to_buffer(
#[no_mangle]
pub unsafe extern "C" fn wgpu_command_encoder_copy_texture_to_buffer(
src: wgc::command::TextureCopyView,
dst: wgc::command::BufferCopyView,
src: wgc::command::ImageCopyTexture,
dst: wgc::command::ImageCopyBuffer,
size: wgt::Extent3d,
bb: &mut ByteBuf,
) {
@ -966,8 +969,8 @@ pub unsafe extern "C" fn wgpu_command_encoder_copy_texture_to_buffer(
#[no_mangle]
pub unsafe extern "C" fn wgpu_command_encoder_copy_buffer_to_texture(
src: wgc::command::BufferCopyView,
dst: wgc::command::TextureCopyView,
src: wgc::command::ImageCopyBuffer,
dst: wgc::command::ImageCopyTexture,
size: wgt::Extent3d,
bb: &mut ByteBuf,
) {
@ -977,8 +980,8 @@ pub unsafe extern "C" fn wgpu_command_encoder_copy_buffer_to_texture(
#[no_mangle]
pub unsafe extern "C" fn wgpu_command_encoder_copy_texture_to_texture(
src: wgc::command::TextureCopyView,
dst: wgc::command::TextureCopyView,
src: wgc::command::ImageCopyTexture,
dst: wgc::command::ImageCopyTexture,
size: wgt::Extent3d,
bb: &mut ByteBuf,
) {
@ -1009,8 +1012,8 @@ pub unsafe extern "C" fn wgpu_queue_write_buffer(
#[no_mangle]
pub unsafe extern "C" fn wgpu_queue_write_texture(
dst: wgt::TextureCopyView<id::TextureId>,
layout: wgt::TextureDataLayout,
dst: wgt::ImageCopyTexture<id::TextureId>,
layout: wgt::ImageDataLayout,
size: wgt::Extent3d,
bb: &mut ByteBuf,
) {

Просмотреть файл

@ -126,8 +126,8 @@ enum QueueWriteAction {
offset: wgt::BufferAddress,
},
Texture {
dst: wgt::TextureCopyView<id::TextureId>,
layout: wgt::TextureDataLayout,
dst: wgt::ImageCopyTexture<id::TextureId>,
layout: wgt::ImageDataLayout,
size: wgt::Extent3d,
},
}

Просмотреть файл

@ -215,7 +215,7 @@ pub unsafe extern "C" fn wgpu_server_buffer_get_mapped_range(
start,
size
))
.unwrap()
.unwrap().0
}
#[no_mangle]
@ -556,8 +556,8 @@ pub extern "C" fn wgpu_server_command_buffer_drop(global: &Global, self_id: id::
pub unsafe extern "C" fn wgpu_server_encoder_copy_texture_to_buffer(
global: &Global,
self_id: id::CommandEncoderId,
source: &wgc::command::TextureCopyView,
destination: &wgc::command::BufferCopyView,
source: &wgc::command::ImageCopyTexture,
destination: &wgc::command::ImageCopyBuffer,
size: &wgt::Extent3d,
) {
gfx_select!(self_id => global.command_encoder_copy_texture_to_buffer(self_id, source, destination, size)).unwrap();

2
third_party/rust/ash/.cargo-checksum.json поставляемый

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

4
third_party/rust/ash/Cargo.toml поставляемый
Просмотреть файл

@ -13,7 +13,7 @@
[package]
edition = "2018"
name = "ash"
version = "0.31.0"
version = "0.32.1"
authors = ["maik klein <maikklein@googlemail.com>"]
description = "Vulkan bindings for Rust"
documentation = "https://docs.rs/ash"
@ -24,7 +24,7 @@ repository = "https://github.com/MaikKlein/ash"
[package.metadata.release]
no-dev-version = true
[dependencies.libloading]
version = "0.6.1"
version = "0.7"
optional = true
[features]

672
third_party/rust/ash/src/device.rs поставляемый
Просмотреть файл

@ -61,16 +61,14 @@ pub trait DeviceV1_2: DeviceV1_1 {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::RenderPass> {
let mut renderpass = mem::zeroed();
let err_code = self.fp_v1_2().create_render_pass2(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut renderpass,
);
match err_code {
vk::Result::SUCCESS => Ok(renderpass),
_ => Err(err_code),
}
self.fp_v1_2()
.create_render_pass2(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut renderpass,
)
.result_with_success(renderpass)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCmdBeginRenderPass2.html>"]
@ -122,13 +120,9 @@ pub trait DeviceV1_2: DeviceV1_1 {
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetSemaphoreCounterValue.html>"]
unsafe fn get_semaphore_counter_value(&self, semaphore: vk::Semaphore) -> VkResult<u64> {
let mut value = 0;
let err_code =
self.fp_v1_2()
.get_semaphore_counter_value(self.handle(), semaphore, &mut value);
match err_code {
vk::Result::SUCCESS => Ok(value),
_ => Err(err_code),
}
self.fp_v1_2()
.get_semaphore_counter_value(self.handle(), semaphore, &mut value)
.result_with_success(value)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkWaitSemaphores.html>"]
@ -137,22 +131,16 @@ pub trait DeviceV1_2: DeviceV1_1 {
wait_info: &vk::SemaphoreWaitInfo,
timeout: u64,
) -> VkResult<()> {
let err_code = self
.fp_v1_2()
.wait_semaphores(self.handle(), wait_info, timeout);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
self.fp_v1_2()
.wait_semaphores(self.handle(), wait_info, timeout)
.into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkSignalSemaphore.html>"]
unsafe fn signal_semaphore(&self, signal_info: &vk::SemaphoreSignalInfo) -> VkResult<()> {
let err_code = self.fp_v1_2().signal_semaphore(self.handle(), signal_info);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
self.fp_v1_2()
.signal_semaphore(self.handle(), signal_info)
.into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetBufferDeviceAddress.html>"]
@ -186,28 +174,16 @@ pub trait DeviceV1_1: DeviceV1_0 {
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkBindBufferMemory2.html>"]
unsafe fn bind_buffer_memory2(&self, bind_infos: &[vk::BindBufferMemoryInfo]) -> VkResult<()> {
let err_code = self.fp_v1_1().bind_buffer_memory2(
self.handle(),
bind_infos.len() as _,
bind_infos.as_ptr(),
);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
self.fp_v1_1()
.bind_buffer_memory2(self.handle(), bind_infos.len() as _, bind_infos.as_ptr())
.into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkBindImageMemory2.html>"]
unsafe fn bind_image_memory2(&self, bind_infos: &[vk::BindImageMemoryInfo]) -> VkResult<()> {
let err_code = self.fp_v1_1().bind_image_memory2(
self.handle(),
bind_infos.len() as _,
bind_infos.as_ptr(),
);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
self.fp_v1_1()
.bind_image_memory2(self.handle(), bind_infos.len() as _, bind_infos.as_ptr())
.into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetDeviceGroupPeerMemoryFeatures.html>"]
@ -322,16 +298,14 @@ pub trait DeviceV1_1: DeviceV1_0 {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::SamplerYcbcrConversion> {
let mut ycbcr_conversion = mem::zeroed();
let err_code = self.fp_v1_1().create_sampler_ycbcr_conversion(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut ycbcr_conversion,
);
match err_code {
vk::Result::SUCCESS => Ok(ycbcr_conversion),
_ => Err(err_code),
}
self.fp_v1_1()
.create_sampler_ycbcr_conversion(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut ycbcr_conversion,
)
.result_with_success(ycbcr_conversion)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkDestroySamplerYcbcrConversion.html>"]
@ -354,16 +328,14 @@ pub trait DeviceV1_1: DeviceV1_0 {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::DescriptorUpdateTemplate> {
let mut descriptor_update_template = mem::zeroed();
let err_code = self.fp_v1_1().create_descriptor_update_template(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut descriptor_update_template,
);
match err_code {
vk::Result::SUCCESS => Ok(descriptor_update_template),
_ => Err(err_code),
}
self.fp_v1_1()
.create_descriptor_update_template(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut descriptor_update_template,
)
.result_with_success(descriptor_update_template)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkDestroyDescriptorUpdateTemplate.html>"]
@ -456,16 +428,14 @@ pub trait DeviceV1_0 {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::Event> {
let mut event = mem::zeroed();
let err_code = self.fp_v1_0().create_event(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut event,
);
match err_code {
vk::Result::SUCCESS => Ok(event),
_ => Err(err_code),
}
self.fp_v1_0()
.create_event(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut event,
)
.result_with_success(event)
}
/// Returns true if the event was set, and false if the event was reset, otherwise it will
@ -482,20 +452,12 @@ pub trait DeviceV1_0 {
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkSetEvent.html>"]
unsafe fn set_event(&self, event: vk::Event) -> VkResult<()> {
let err_code = self.fp_v1_0().set_event(self.handle(), event);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
self.fp_v1_0().set_event(self.handle(), event).into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkResetEvent.html>"]
unsafe fn reset_event(&self, event: vk::Event) -> VkResult<()> {
let err_code = self.fp_v1_0().reset_event(self.handle(), event);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
self.fp_v1_0().reset_event(self.handle(), event).into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCmdSetEvent.html>"]
unsafe fn cmd_set_event(
@ -736,13 +698,15 @@ pub trait DeviceV1_0 {
&self,
pool: vk::DescriptorPool,
descriptor_sets: &[vk::DescriptorSet],
) {
self.fp_v1_0().free_descriptor_sets(
self.handle(),
pool,
descriptor_sets.len() as u32,
descriptor_sets.as_ptr(),
);
) -> VkResult<()> {
self.fp_v1_0()
.free_descriptor_sets(
self.handle(),
pool,
descriptor_sets.len() as u32,
descriptor_sets.as_ptr(),
)
.into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkUpdateDescriptorSets.html>"]
@ -767,16 +731,14 @@ pub trait DeviceV1_0 {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::Sampler> {
let mut sampler = mem::zeroed();
let err_code = self.fp_v1_0().create_sampler(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut sampler,
);
match err_code {
vk::Result::SUCCESS => Ok(sampler),
_ => Err(err_code),
}
self.fp_v1_0()
.create_sampler(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut sampler,
)
.result_with_success(sampler)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCmdBlitImage.html>"]
@ -942,10 +904,7 @@ pub trait DeviceV1_0 {
);
desc_set.set_len(create_info.descriptor_set_count as usize);
match err_code {
vk::Result::SUCCESS => Ok(desc_set),
_ => Err(err_code),
}
err_code.result_with_success(desc_set)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCreateDescriptorSetLayout.html>"]
@ -955,25 +914,19 @@ pub trait DeviceV1_0 {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::DescriptorSetLayout> {
let mut layout = mem::zeroed();
let err_code = self.fp_v1_0().create_descriptor_set_layout(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut layout,
);
match err_code {
vk::Result::SUCCESS => Ok(layout),
_ => Err(err_code),
}
self.fp_v1_0()
.create_descriptor_set_layout(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut layout,
)
.result_with_success(layout)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkDeviceWaitIdle.html>"]
unsafe fn device_wait_idle(&self) -> VkResult<()> {
let err_code = self.fp_v1_0().device_wait_idle(self.handle());
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
self.fp_v1_0().device_wait_idle(self.handle()).into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCreateDescriptorPool.html>"]
@ -983,16 +936,14 @@ pub trait DeviceV1_0 {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::DescriptorPool> {
let mut pool = mem::zeroed();
let err_code = self.fp_v1_0().create_descriptor_pool(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut pool,
);
match err_code {
vk::Result::SUCCESS => Ok(pool),
_ => Err(err_code),
}
self.fp_v1_0()
.create_descriptor_pool(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut pool,
)
.result_with_success(pool)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkResetDescriptorPool.html>"]
@ -1001,13 +952,9 @@ pub trait DeviceV1_0 {
pool: vk::DescriptorPool,
flags: vk::DescriptorPoolResetFlags,
) -> VkResult<()> {
let err_code = self
.fp_v1_0()
.reset_descriptor_pool(self.handle(), pool, flags);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
self.fp_v1_0()
.reset_descriptor_pool(self.handle(), pool, flags)
.into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkResetCommandPool.html>"]
@ -1016,13 +963,9 @@ pub trait DeviceV1_0 {
command_pool: vk::CommandPool,
flags: vk::CommandPoolResetFlags,
) -> VkResult<()> {
let err_code = self
.fp_v1_0()
.reset_command_pool(self.handle(), command_pool, flags);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
self.fp_v1_0()
.reset_command_pool(self.handle(), command_pool, flags)
.into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkResetCommandBuffer.html>"]
@ -1031,22 +974,16 @@ pub trait DeviceV1_0 {
command_buffer: vk::CommandBuffer,
flags: vk::CommandBufferResetFlags,
) -> VkResult<()> {
let err_code = self.fp_v1_0().reset_command_buffer(command_buffer, flags);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
self.fp_v1_0()
.reset_command_buffer(command_buffer, flags)
.into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkResetFences.html>"]
unsafe fn reset_fences(&self, fences: &[vk::Fence]) -> VkResult<()> {
let err_code =
self.fp_v1_0()
.reset_fences(self.handle(), fences.len() as u32, fences.as_ptr());
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
self.fp_v1_0()
.reset_fences(self.handle(), fences.len() as u32, fences.as_ptr())
.into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCmdBindIndexBuffer.html>"]
@ -1451,30 +1388,23 @@ pub trait DeviceV1_0 {
flags: vk::QueryResultFlags,
) -> VkResult<()> {
let data_length = query_count as usize;
assert!(
mem::size_of::<T>() <= mem::size_of::<u64>(),
"T can not be bigger than an u64"
);
assert!(
data_length <= data.len(),
"query_count was higher than the length of the slice"
);
let data_size = mem::size_of::<T>() * data_length;
let err_code = self.fp_v1_0().get_query_pool_results(
self.handle(),
query_pool,
first_query,
query_count,
data_size,
data.as_mut_ptr() as *mut _,
mem::size_of::<T>() as _,
flags,
);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
self.fp_v1_0()
.get_query_pool_results(
self.handle(),
query_pool,
first_query,
query_count,
data_size,
data.as_mut_ptr() as *mut _,
mem::size_of::<T>() as _,
flags,
)
.into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCmdBeginQuery.html>"]
@ -1531,16 +1461,14 @@ pub trait DeviceV1_0 {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::Semaphore> {
let mut semaphore = mem::zeroed();
let err_code = self.fp_v1_0().create_semaphore(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut semaphore,
);
match err_code {
vk::Result::SUCCESS => Ok(semaphore),
_ => Err(err_code),
}
self.fp_v1_0()
.create_semaphore(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut semaphore,
)
.result_with_success(semaphore)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCreateGraphicsPipelines.html>"]
@ -1596,16 +1524,14 @@ pub trait DeviceV1_0 {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::Buffer> {
let mut buffer = mem::zeroed();
let err_code = self.fp_v1_0().create_buffer(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut buffer,
);
match err_code {
vk::Result::SUCCESS => Ok(buffer),
_ => Err(err_code),
}
self.fp_v1_0()
.create_buffer(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut buffer,
)
.result_with_success(buffer)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCreatePipelineLayout.html>"]
@ -1615,16 +1541,14 @@ pub trait DeviceV1_0 {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::PipelineLayout> {
let mut pipeline_layout = mem::zeroed();
let err_code = self.fp_v1_0().create_pipeline_layout(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut pipeline_layout,
);
match err_code {
vk::Result::SUCCESS => Ok(pipeline_layout),
_ => Err(err_code),
}
self.fp_v1_0()
.create_pipeline_layout(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut pipeline_layout,
)
.result_with_success(pipeline_layout)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCreatePipelineCache.html>"]
@ -1634,17 +1558,14 @@ pub trait DeviceV1_0 {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::PipelineCache> {
let mut pipeline_cache = mem::zeroed();
let err_code = self.fp_v1_0().create_pipeline_cache(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut pipeline_cache,
);
match err_code {
vk::Result::SUCCESS => Ok(pipeline_cache),
_ => Err(err_code),
}
self.fp_v1_0()
.create_pipeline_cache(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut pipeline_cache,
)
.result_with_success(pipeline_cache)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetPipelineCacheData.html>"]
@ -1653,15 +1574,14 @@ pub trait DeviceV1_0 {
pipeline_cache: vk::PipelineCache,
) -> VkResult<Vec<u8>> {
let mut data_size: usize = 0;
let err_code = self.fp_v1_0().get_pipeline_cache_data(
self.handle(),
pipeline_cache,
&mut data_size,
ptr::null_mut(),
);
if err_code != vk::Result::SUCCESS {
return Err(err_code);
};
self.fp_v1_0()
.get_pipeline_cache_data(
self.handle(),
pipeline_cache,
&mut data_size,
ptr::null_mut(),
)
.result()?;
let mut data: Vec<u8> = Vec::with_capacity(data_size);
let err_code = self.fp_v1_0().get_pipeline_cache_data(
self.handle(),
@ -1670,10 +1590,7 @@ pub trait DeviceV1_0 {
data.as_mut_ptr() as _,
);
data.set_len(data_size);
match err_code {
vk::Result::SUCCESS => Ok(data),
_ => Err(err_code),
}
err_code.result_with_success(data)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkMapMemory.html>"]
@ -1685,13 +1602,9 @@ pub trait DeviceV1_0 {
flags: vk::MemoryMapFlags,
) -> VkResult<*mut c_void> {
let mut data: *mut c_void = ptr::null_mut();
let err_code =
self.fp_v1_0()
.map_memory(self.handle(), memory, offset, size, flags, &mut data);
match err_code {
vk::Result::SUCCESS => Ok(data),
_ => Err(err_code),
}
self.fp_v1_0()
.map_memory(self.handle(), memory, offset, size, flags, &mut data)
.result_with_success(data)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkUnmapMemory.html>"]
@ -1704,28 +1617,16 @@ pub trait DeviceV1_0 {
&self,
ranges: &[vk::MappedMemoryRange],
) -> VkResult<()> {
let err_code = self.fp_v1_0().invalidate_mapped_memory_ranges(
self.handle(),
ranges.len() as u32,
ranges.as_ptr(),
);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
self.fp_v1_0()
.invalidate_mapped_memory_ranges(self.handle(), ranges.len() as u32, ranges.as_ptr())
.into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkFlushMappedMemoryRanges.html>"]
unsafe fn flush_mapped_memory_ranges(&self, ranges: &[vk::MappedMemoryRange]) -> VkResult<()> {
let err_code = self.fp_v1_0().flush_mapped_memory_ranges(
self.handle(),
ranges.len() as u32,
ranges.as_ptr(),
);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
self.fp_v1_0()
.flush_mapped_memory_ranges(self.handle(), ranges.len() as u32, ranges.as_ptr())
.into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCreateFramebuffer.html>"]
@ -1735,16 +1636,14 @@ pub trait DeviceV1_0 {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::Framebuffer> {
let mut framebuffer = mem::zeroed();
let err_code = self.fp_v1_0().create_framebuffer(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut framebuffer,
);
match err_code {
vk::Result::SUCCESS => Ok(framebuffer),
_ => Err(err_code),
}
self.fp_v1_0()
.create_framebuffer(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut framebuffer,
)
.result_with_success(framebuffer)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetDeviceQueue.html>"]
@ -1787,16 +1686,14 @@ pub trait DeviceV1_0 {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::RenderPass> {
let mut renderpass = mem::zeroed();
let err_code = self.fp_v1_0().create_render_pass(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut renderpass,
);
match err_code {
vk::Result::SUCCESS => Ok(renderpass),
_ => Err(err_code),
}
self.fp_v1_0()
.create_render_pass(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut renderpass,
)
.result_with_success(renderpass)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkBeginCommandBuffer.html>"]
@ -1805,22 +1702,14 @@ pub trait DeviceV1_0 {
command_buffer: vk::CommandBuffer,
begin_info: &vk::CommandBufferBeginInfo,
) -> VkResult<()> {
let err_code = self
.fp_v1_0()
.begin_command_buffer(command_buffer, begin_info);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
self.fp_v1_0()
.begin_command_buffer(command_buffer, begin_info)
.into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkEndCommandBuffer.html>"]
unsafe fn end_command_buffer(&self, command_buffer: vk::CommandBuffer) -> VkResult<()> {
let err_code = self.fp_v1_0().end_command_buffer(command_buffer);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
self.fp_v1_0().end_command_buffer(command_buffer).into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkWaitForFences.html>"]
@ -1830,17 +1719,15 @@ pub trait DeviceV1_0 {
wait_all: bool,
timeout: u64,
) -> VkResult<()> {
let err_code = self.fp_v1_0().wait_for_fences(
self.handle(),
fences.len() as u32,
fences.as_ptr(),
wait_all as u32,
timeout,
);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
self.fp_v1_0()
.wait_for_fences(
self.handle(),
fences.len() as u32,
fences.as_ptr(),
wait_all as u32,
timeout,
)
.into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetFenceStatus.html>"]
@ -1855,11 +1742,7 @@ pub trait DeviceV1_0 {
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkQueueWaitIdle.html>"]
unsafe fn queue_wait_idle(&self, queue: vk::Queue) -> VkResult<()> {
let err_code = self.fp_v1_0().queue_wait_idle(queue);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
self.fp_v1_0().queue_wait_idle(queue).into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkQueueSubmit.html>"]
@ -1869,13 +1752,9 @@ pub trait DeviceV1_0 {
submits: &[vk::SubmitInfo],
fence: vk::Fence,
) -> VkResult<()> {
let err_code =
self.fp_v1_0()
.queue_submit(queue, submits.len() as u32, submits.as_ptr(), fence);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
self.fp_v1_0()
.queue_submit(queue, submits.len() as u32, submits.as_ptr(), fence)
.into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCreateBufferView.html>"]
@ -1885,16 +1764,14 @@ pub trait DeviceV1_0 {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::BufferView> {
let mut buffer_view = mem::zeroed();
let err_code = self.fp_v1_0().create_buffer_view(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut buffer_view,
);
match err_code {
vk::Result::SUCCESS => Ok(buffer_view),
_ => Err(err_code),
}
self.fp_v1_0()
.create_buffer_view(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut buffer_view,
)
.result_with_success(buffer_view)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkDestroyBufferView.html>"]
@ -1917,16 +1794,14 @@ pub trait DeviceV1_0 {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::ImageView> {
let mut image_view = mem::zeroed();
let err_code = self.fp_v1_0().create_image_view(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut image_view,
);
match err_code {
vk::Result::SUCCESS => Ok(image_view),
_ => Err(err_code),
}
self.fp_v1_0()
.create_image_view(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut image_view,
)
.result_with_success(image_view)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkAllocateCommandBuffers.html>"]
@ -1941,10 +1816,7 @@ pub trait DeviceV1_0 {
buffers.as_mut_ptr(),
);
buffers.set_len(create_info.command_buffer_count as usize);
match err_code {
vk::Result::SUCCESS => Ok(buffers),
_ => Err(err_code),
}
err_code.result_with_success(buffers)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCreateCommandPool.html>"]
@ -1954,16 +1826,14 @@ pub trait DeviceV1_0 {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::CommandPool> {
let mut pool = mem::zeroed();
let err_code = self.fp_v1_0().create_command_pool(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut pool,
);
match err_code {
vk::Result::SUCCESS => Ok(pool),
_ => Err(err_code),
}
self.fp_v1_0()
.create_command_pool(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut pool,
)
.result_with_success(pool)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCreateQueryPool.html>"]
@ -1973,16 +1843,14 @@ pub trait DeviceV1_0 {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::QueryPool> {
let mut pool = mem::zeroed();
let err_code = self.fp_v1_0().create_query_pool(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut pool,
);
match err_code {
vk::Result::SUCCESS => Ok(pool),
_ => Err(err_code),
}
self.fp_v1_0()
.create_query_pool(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut pool,
)
.result_with_success(pool)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCreateImage.html>"]
@ -1992,16 +1860,14 @@ pub trait DeviceV1_0 {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::Image> {
let mut image = mem::zeroed();
let err_code = self.fp_v1_0().create_image(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut image,
);
match err_code {
vk::Result::SUCCESS => Ok(image),
_ => Err(err_code),
}
self.fp_v1_0()
.create_image(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut image,
)
.result_with_success(image)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetImageSubresourceLayout.html>"]
@ -2043,16 +1909,14 @@ pub trait DeviceV1_0 {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::DeviceMemory> {
let mut memory = mem::zeroed();
let err_code = self.fp_v1_0().allocate_memory(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut memory,
);
match err_code {
vk::Result::SUCCESS => Ok(memory),
_ => Err(err_code),
}
self.fp_v1_0()
.allocate_memory(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut memory,
)
.result_with_success(memory)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCreateShaderModule.html>"]
@ -2062,16 +1926,14 @@ pub trait DeviceV1_0 {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::ShaderModule> {
let mut shader = mem::zeroed();
let err_code = self.fp_v1_0().create_shader_module(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut shader,
);
match err_code {
vk::Result::SUCCESS => Ok(shader),
_ => Err(err_code),
}
self.fp_v1_0()
.create_shader_module(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut shader,
)
.result_with_success(shader)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCreateFence.html>"]
@ -2081,16 +1943,14 @@ pub trait DeviceV1_0 {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::Fence> {
let mut fence = mem::zeroed();
let err_code = self.fp_v1_0().create_fence(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut fence,
);
match err_code {
vk::Result::SUCCESS => Ok(fence),
_ => Err(err_code),
}
self.fp_v1_0()
.create_fence(
self.handle(),
create_info,
allocation_callbacks.as_raw_ptr(),
&mut fence,
)
.result_with_success(fence)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkBindBufferMemory.html>"]
@ -2100,13 +1960,9 @@ pub trait DeviceV1_0 {
device_memory: vk::DeviceMemory,
offset: vk::DeviceSize,
) -> VkResult<()> {
let err_code =
self.fp_v1_0()
.bind_buffer_memory(self.handle(), buffer, device_memory, offset);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
self.fp_v1_0()
.bind_buffer_memory(self.handle(), buffer, device_memory, offset)
.into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkBindImageMemory.html>"]
@ -2116,13 +1972,9 @@ pub trait DeviceV1_0 {
device_memory: vk::DeviceMemory,
offset: vk::DeviceSize,
) -> VkResult<()> {
let err_code =
self.fp_v1_0()
.bind_image_memory(self.handle(), image, device_memory, offset);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
self.fp_v1_0()
.bind_image_memory(self.handle(), image, device_memory, offset)
.into()
}
}

53
third_party/rust/ash/src/entry.rs поставляемый
Просмотреть файл

@ -58,17 +58,14 @@ pub trait EntryV1_0 {
unsafe {
let mut num = 0;
self.fp_v1_0()
.enumerate_instance_layer_properties(&mut num, ptr::null_mut());
.enumerate_instance_layer_properties(&mut num, ptr::null_mut())
.result()?;
let mut v = Vec::with_capacity(num as usize);
let err_code = self
.fp_v1_0()
.enumerate_instance_layer_properties(&mut num, v.as_mut_ptr());
v.set_len(num as usize);
match err_code {
vk::Result::SUCCESS => Ok(v),
_ => Err(err_code),
}
err_code.result_with_success(v)
}
}
@ -76,11 +73,9 @@ pub trait EntryV1_0 {
fn enumerate_instance_extension_properties(&self) -> VkResult<Vec<vk::ExtensionProperties>> {
unsafe {
let mut num = 0;
self.fp_v1_0().enumerate_instance_extension_properties(
ptr::null(),
&mut num,
ptr::null_mut(),
);
self.fp_v1_0()
.enumerate_instance_extension_properties(ptr::null(), &mut num, ptr::null_mut())
.result()?;
let mut data = Vec::with_capacity(num as usize);
let err_code = self.fp_v1_0().enumerate_instance_extension_properties(
ptr::null(),
@ -88,10 +83,7 @@ pub trait EntryV1_0 {
data.as_mut_ptr(),
);
data.set_len(num as usize);
match err_code {
vk::Result::SUCCESS => Ok(data),
_ => Err(err_code),
}
err_code.result_with_success(data)
}
}
@ -119,14 +111,14 @@ impl<L> EntryV1_0 for EntryCustom<L> {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> Result<Self::Instance, InstanceError> {
let mut instance: vk::Instance = mem::zeroed();
let err_code = self.fp_v1_0().create_instance(
create_info,
allocation_callbacks.as_raw_ptr(),
&mut instance,
);
if err_code != vk::Result::SUCCESS {
return Err(InstanceError::VkError(err_code));
}
self.fp_v1_0()
.create_instance(
create_info,
allocation_callbacks.as_raw_ptr(),
&mut instance,
)
.result()
.map_err(InstanceError::VkError)?;
Ok(Instance::load(&self.static_fn, instance))
}
fn fp_v1_0(&self) -> &vk::EntryFnV1_0 {
@ -146,11 +138,9 @@ pub trait EntryV1_1: EntryV1_0 {
fn enumerate_instance_version(&self) -> VkResult<u32> {
unsafe {
let mut api_version = 0;
let err_code = self.fp_v1_1().enumerate_instance_version(&mut api_version);
match err_code {
vk::Result::SUCCESS => Ok(api_version),
_ => Err(err_code),
}
self.fp_v1_1()
.enumerate_instance_version(&mut api_version)
.result_with_success(api_version)
}
}
}
@ -228,11 +218,8 @@ impl<L> EntryCustom<L> {
)
};
if let Some(enumerate_instance_version) = enumerate_instance_version {
let err_code = (enumerate_instance_version)(&mut api_version);
match err_code {
vk::Result::SUCCESS => Ok(Some(api_version)),
_ => Err(err_code),
}
(enumerate_instance_version)(&mut api_version)
.result_with_success(Some(api_version))
} else {
Ok(None)
}

24
third_party/rust/ash/src/entry_libloading.rs поставляемый
Просмотреть файл

@ -1,6 +1,7 @@
use crate::entry::EntryCustom;
use libloading::Library;
use std::error::Error;
use std::ffi::OsStr;
use std::fmt;
use std::ptr;
use std::sync::Arc;
@ -39,6 +40,12 @@ impl Error for LoadingError {
}
impl EntryCustom<Arc<Library>> {
/// Load default Vulkan library for the current platform
///
/// # Safety
/// `dlopen`ing native libraries is inherently unsafe. The safety guidelines
/// for [`Library::new`] and [`Library::get`] apply here.
///
/// ```rust,no_run
/// use ash::{vk, Entry, version::EntryV1_0};
/// # fn main() -> Result<(), Box<std::error::Error>> {
@ -54,12 +61,19 @@ impl EntryCustom<Arc<Library>> {
/// let instance = unsafe { entry.create_instance(&create_info, None)? };
/// # Ok(()) }
/// ```
pub fn new() -> Result<Entry, LoadingError> {
let lib = Library::new(&LIB_PATH)
.map_err(LoadingError)
.map(Arc::new)?;
pub unsafe fn new() -> Result<Entry, LoadingError> {
Self::with_library(&LIB_PATH)
}
Ok(Self::new_custom(lib, |vk_lib, name| unsafe {
/// Load Vulkan library at `path`
///
/// # Safety
/// `dlopen`ing native libraries is inherently unsafe. The safety guidelines
/// for [`Library::new`] and [`Library::get`] apply here.
pub unsafe fn with_library(path: &impl AsRef<OsStr>) -> Result<Entry, LoadingError> {
let lib = Library::new(path).map_err(LoadingError).map(Arc::new)?;
Ok(Self::new_custom(lib, |vk_lib, name| {
vk_lib
.get(name.to_bytes_with_nul())
.map(|symbol| *symbol)

Просмотреть файл

@ -28,13 +28,9 @@ impl DebugMarker {
device: vk::Device,
name_info: &vk::DebugMarkerObjectNameInfoEXT,
) -> VkResult<()> {
let err_code = self
.debug_marker_fn
.debug_marker_set_object_name_ext(device, name_info);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
self.debug_marker_fn
.debug_marker_set_object_name_ext(device, name_info)
.into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCmdDebugMarkerBeginEXT.html>"]

Просмотреть файл

@ -47,16 +47,14 @@ impl DebugReport {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::DebugReportCallbackEXT> {
let mut debug_cb = mem::zeroed();
let err_code = self.debug_report_fn.create_debug_report_callback_ext(
self.handle,
create_info,
allocation_callbacks.as_raw_ptr(),
&mut debug_cb,
);
match err_code {
vk::Result::SUCCESS => Ok(debug_cb),
_ => Err(err_code),
}
self.debug_report_fn
.create_debug_report_callback_ext(
self.handle,
create_info,
allocation_callbacks.as_raw_ptr(),
&mut debug_cb,
)
.result_with_success(debug_cb)
}
pub fn fp(&self) -> &vk::ExtDebugReportFn {

Просмотреть файл

@ -32,13 +32,9 @@ impl DebugUtils {
device: vk::Device,
name_info: &vk::DebugUtilsObjectNameInfoEXT,
) -> VkResult<()> {
let err_code = self
.debug_utils_fn
.set_debug_utils_object_name_ext(device, name_info);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
self.debug_utils_fn
.set_debug_utils_object_name_ext(device, name_info)
.into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkSetDebugUtilsObjectTagEXT.html>"]
@ -47,13 +43,9 @@ impl DebugUtils {
device: vk::Device,
tag_info: &vk::DebugUtilsObjectTagInfoEXT,
) -> VkResult<()> {
let err_code = self
.debug_utils_fn
.set_debug_utils_object_tag_ext(device, tag_info);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
self.debug_utils_fn
.set_debug_utils_object_tag_ext(device, tag_info)
.into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCmdBeginDebugUtilsLabelEXT.html>"]
@ -114,16 +106,14 @@ impl DebugUtils {
allocator: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::DebugUtilsMessengerEXT> {
let mut messenger = mem::zeroed();
let err_code = self.debug_utils_fn.create_debug_utils_messenger_ext(
self.handle,
create_info,
allocator.as_raw_ptr(),
&mut messenger,
);
match err_code {
vk::Result::SUCCESS => Ok(messenger),
_ => Err(err_code),
}
self.debug_utils_fn
.create_debug_utils_messenger_ext(
self.handle,
create_info,
allocator.as_raw_ptr(),
&mut messenger,
)
.result_with_success(messenger)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkDestroyDebugUtilsMessengerEXT.html>"]

Просмотреть файл

@ -34,16 +34,14 @@ impl MetalSurface {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::SurfaceKHR> {
let mut surface = mem::zeroed();
let err_code = self.metal_surface_fn.create_metal_surface_ext(
self.handle,
create_info,
allocation_callbacks.as_raw_ptr(),
&mut surface,
);
match err_code {
vk::Result::SUCCESS => Ok(surface),
_ => Err(err_code),
}
self.metal_surface_fn
.create_metal_surface_ext(
self.handle,
create_info,
allocation_callbacks.as_raw_ptr(),
&mut surface,
)
.result_with_success(surface)
}
pub fn fp(&self) -> &vk::ExtMetalSurfaceFn {

Просмотреть файл

@ -1,9 +1,15 @@
#[allow(deprecated)]
pub use self::debug_marker::DebugMarker;
#[allow(deprecated)]
pub use self::debug_report::DebugReport;
pub use self::debug_utils::DebugUtils;
pub use self::metal_surface::MetalSurface;
pub use self::tooling_info::ToolingInfo;
#[deprecated(note = "Please use the [DebugUtils](struct.DebugUtils.html) extension instead.")]
mod debug_marker;
#[deprecated(note = "Please use the [DebugUtils](struct.DebugUtils.html) extension instead.")]
mod debug_report;
mod debug_utils;
mod metal_surface;
mod tooling_info;

54
third_party/rust/ash/src/extensions/ext/tooling_info.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,54 @@
#![allow(dead_code)]
use crate::prelude::*;
use crate::version::{EntryV1_0, InstanceV1_0};
use crate::vk;
use std::ffi::CStr;
use std::mem;
use std::ptr;
#[derive(Clone)]
pub struct ToolingInfo {
handle: vk::Instance,
tooling_info_fn: vk::ExtToolingInfoFn,
}
impl ToolingInfo {
pub fn new<E: EntryV1_0, I: InstanceV1_0>(entry: &E, instance: &I) -> ToolingInfo {
let tooling_info_fn = vk::ExtToolingInfoFn::load(|name| unsafe {
mem::transmute(entry.get_instance_proc_addr(instance.handle(), name.as_ptr()))
});
ToolingInfo {
handle: instance.handle(),
tooling_info_fn,
}
}
pub fn name() -> &'static CStr {
vk::ExtToolingInfoFn::name()
}
#[doc = "https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetPhysicalDeviceToolPropertiesEXT.html"]
pub unsafe fn get_physical_device_tool_properties(
&self,
physical_device: vk::PhysicalDevice,
) -> VkResult<Vec<vk::PhysicalDeviceToolPropertiesEXT>> {
let mut count = 0;
self.tooling_info_fn
.get_physical_device_tool_properties_ext(physical_device, &mut count, ptr::null_mut())
.result()?;
let mut v = Vec::with_capacity(count as usize);
let err_code = self
.tooling_info_fn
.get_physical_device_tool_properties_ext(physical_device, &mut count, v.as_mut_ptr());
v.set_len(count as usize);
err_code.result_with_success(v)
}
pub fn fp(&self) -> &vk::ExtToolingInfoFn {
&self.tooling_info_fn
}
pub fn instance(&self) -> vk::Instance {
self.handle
}
}

Просмотреть файл

@ -0,0 +1,330 @@
#![allow(dead_code)]
use crate::prelude::*;
use crate::version::{DeviceV1_0, InstanceV1_0, InstanceV1_1};
use crate::vk;
use crate::RawPtr;
use std::ffi::CStr;
use std::mem;
#[derive(Clone)]
pub struct AccelerationStructure {
handle: vk::Device,
acceleration_structure_fn: vk::KhrAccelerationStructureFn,
}
impl AccelerationStructure {
pub fn new<I: InstanceV1_0, D: DeviceV1_0>(instance: &I, device: &D) -> Self {
let acceleration_structure_fn = vk::KhrAccelerationStructureFn::load(|name| unsafe {
mem::transmute(instance.get_device_proc_addr(device.handle(), name.as_ptr()))
});
Self {
handle: device.handle(),
acceleration_structure_fn,
}
}
pub unsafe fn get_properties<I: InstanceV1_1>(
instance: &I,
pdevice: vk::PhysicalDevice,
) -> vk::PhysicalDeviceAccelerationStructurePropertiesKHR {
let mut props_rt = vk::PhysicalDeviceAccelerationStructurePropertiesKHR::default();
{
let mut props = vk::PhysicalDeviceProperties2::builder().push_next(&mut props_rt);
instance.get_physical_device_properties2(pdevice, &mut props);
}
props_rt
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCreateAccelerationStructureKHR.html>"]
pub unsafe fn create_acceleration_structure(
&self,
create_info: &vk::AccelerationStructureCreateInfoKHR,
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::AccelerationStructureKHR> {
let mut accel_struct = mem::zeroed();
self.acceleration_structure_fn
.create_acceleration_structure_khr(
self.handle,
create_info,
allocation_callbacks.as_raw_ptr(),
&mut accel_struct,
)
.result_with_success(accel_struct)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkDestroyAccelerationStructureKHR.html>"]
pub unsafe fn destroy_acceleration_structure(
&self,
accel_struct: vk::AccelerationStructureKHR,
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) {
self.acceleration_structure_fn
.destroy_acceleration_structure_khr(
self.handle,
accel_struct,
allocation_callbacks.as_raw_ptr(),
);
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCmdBuildAccelerationStructuresKHR.html>"]
pub unsafe fn cmd_build_acceleration_structures(
&self,
command_buffer: vk::CommandBuffer,
infos: &[vk::AccelerationStructureBuildGeometryInfoKHR],
build_range_infos: &[&[vk::AccelerationStructureBuildRangeInfoKHR]],
) {
assert_eq!(infos.len(), build_range_infos.len());
let build_range_infos = build_range_infos
.iter()
.zip(infos.iter())
.map(|(range_info, info)| {
assert_eq!(range_info.len(), info.geometry_count as usize);
range_info.as_ptr()
})
.collect::<Vec<_>>();
self.acceleration_structure_fn
.cmd_build_acceleration_structures_khr(
command_buffer,
infos.len() as _,
infos.as_ptr(),
build_range_infos.as_ptr(),
);
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCmdBuildAccelerationStructuresIndirectKHR.html>"]
pub unsafe fn cmd_build_acceleration_structures_indirect(
&self,
command_buffer: vk::CommandBuffer,
infos: &[vk::AccelerationStructureBuildGeometryInfoKHR],
indirect_device_addresses: &[vk::DeviceAddress],
indirect_strides: &[u32],
max_primitive_counts: &[&[u32]],
) {
assert_eq!(infos.len(), indirect_device_addresses.len());
assert_eq!(infos.len(), indirect_strides.len());
assert_eq!(infos.len(), max_primitive_counts.len());
let max_primitive_counts = max_primitive_counts
.iter()
.zip(infos.iter())
.map(|(cnt, info)| {
assert_eq!(cnt.len(), info.geometry_count as usize);
cnt.as_ptr()
})
.collect::<Vec<_>>();
self.acceleration_structure_fn
.cmd_build_acceleration_structures_indirect_khr(
command_buffer,
infos.len() as _,
infos.as_ptr(),
indirect_device_addresses.as_ptr(),
indirect_strides.as_ptr(),
max_primitive_counts.as_ptr(),
);
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkBuildAccelerationStructuresKHR.html>"]
pub unsafe fn build_acceleration_structures(
&self,
deferred_operation: vk::DeferredOperationKHR,
infos: &[vk::AccelerationStructureBuildGeometryInfoKHR],
build_range_infos: &[&[vk::AccelerationStructureBuildRangeInfoKHR]],
) -> VkResult<()> {
assert_eq!(infos.len(), build_range_infos.len());
let build_range_infos = build_range_infos
.iter()
.zip(infos.iter())
.map(|(range_info, info)| {
assert_eq!(range_info.len(), info.geometry_count as usize);
range_info.as_ptr()
})
.collect::<Vec<_>>();
self.acceleration_structure_fn
.build_acceleration_structures_khr(
self.handle,
deferred_operation,
infos.len() as _,
infos.as_ptr(),
build_range_infos.as_ptr(),
)
.into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCopyAccelerationStructureKHR.html>"]
pub unsafe fn copy_acceleration_structure(
&self,
deferred_operation: vk::DeferredOperationKHR,
info: &vk::CopyAccelerationStructureInfoKHR,
) -> VkResult<()> {
self.acceleration_structure_fn
.copy_acceleration_structure_khr(self.handle, deferred_operation, info as *const _)
.into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCopyAccelerationStructureToMemoryKHR.html>"]
pub unsafe fn copy_acceleration_structure_to_memory(
&self,
deferred_operation: vk::DeferredOperationKHR,
info: &vk::CopyAccelerationStructureToMemoryInfoKHR,
) -> VkResult<()> {
self.acceleration_structure_fn
.copy_acceleration_structure_to_memory_khr(
self.handle,
deferred_operation,
info as *const _,
)
.into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCopyMemoryToAccelerationStructureKHR.html>"]
pub unsafe fn copy_memory_to_acceleration_structure(
&self,
deferred_operation: vk::DeferredOperationKHR,
info: &vk::CopyMemoryToAccelerationStructureInfoKHR,
) -> VkResult<()> {
self.acceleration_structure_fn
.copy_memory_to_acceleration_structure_khr(
self.handle,
deferred_operation,
info as *const _,
)
.into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkWriteAccelerationStructuresPropertiesKHR.html>"]
pub unsafe fn write_acceleration_structures_properties(
&self,
acceleration_structures: &[vk::AccelerationStructureKHR],
query_type: vk::QueryType,
data: &mut [u8],
stride: usize,
) -> VkResult<()> {
self.acceleration_structure_fn
.write_acceleration_structures_properties_khr(
self.handle,
acceleration_structures.len() as _,
acceleration_structures.as_ptr(),
query_type,
data.len(),
data.as_mut_ptr() as *mut std::ffi::c_void,
stride,
)
.into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCmdCopyAccelerationStructureKHR.html>"]
pub unsafe fn cmd_copy_acceleration_structure(
&self,
command_buffer: vk::CommandBuffer,
info: &vk::CopyAccelerationStructureInfoKHR,
) {
self.acceleration_structure_fn
.cmd_copy_acceleration_structure_khr(command_buffer, info);
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCmdCopyAccelerationStructureToMemoryKHR.html>"]
pub unsafe fn cmd_copy_acceleration_structure_to_memory(
&self,
command_buffer: vk::CommandBuffer,
info: &vk::CopyAccelerationStructureToMemoryInfoKHR,
) {
self.acceleration_structure_fn
.cmd_copy_acceleration_structure_to_memory_khr(command_buffer, info as *const _);
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCmdCopyMemoryToAccelerationStructureKHR.html>"]
pub unsafe fn cmd_copy_memory_to_acceleration_structure(
&self,
command_buffer: vk::CommandBuffer,
info: &vk::CopyMemoryToAccelerationStructureInfoKHR,
) {
self.acceleration_structure_fn
.cmd_copy_memory_to_acceleration_structure_khr(command_buffer, info as *const _);
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetAccelerationStructureHandleKHR.html>"]
pub unsafe fn get_acceleration_structure_device_address(
&self,
info: &vk::AccelerationStructureDeviceAddressInfoKHR,
) -> vk::DeviceAddress {
self.acceleration_structure_fn
.get_acceleration_structure_device_address_khr(self.handle, info as *const _)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCmdWriteAccelerationStructuresPropertiesKHR.html>"]
pub unsafe fn cmd_write_acceleration_structures_properties(
&self,
command_buffer: vk::CommandBuffer,
structures: &[vk::AccelerationStructureKHR],
query_type: vk::QueryType,
query_pool: vk::QueryPool,
first_query: u32,
) {
self.acceleration_structure_fn
.cmd_write_acceleration_structures_properties_khr(
command_buffer,
structures.len() as _,
structures.as_ptr(),
query_type,
query_pool,
first_query,
);
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetDeviceAccelerationStructureCompatibilityKHR.html>"]
pub unsafe fn get_device_acceleration_structure_compatibility(
&self,
version: &vk::AccelerationStructureVersionInfoKHR,
) -> vk::AccelerationStructureCompatibilityKHR {
let mut compatibility = vk::AccelerationStructureCompatibilityKHR::default();
self.acceleration_structure_fn
.get_device_acceleration_structure_compatibility_khr(
self.handle,
version,
&mut compatibility as *mut _,
);
compatibility
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetAccelerationStructureBuildSizesKHR.html>"]
pub unsafe fn get_acceleration_structure_build_sizes(
&self,
build_type: vk::AccelerationStructureBuildTypeKHR,
build_info: &vk::AccelerationStructureBuildGeometryInfoKHR,
max_primitive_counts: &[u32],
) -> vk::AccelerationStructureBuildSizesInfoKHR {
assert_eq!(max_primitive_counts.len(), build_info.geometry_count as _);
let mut size_info = vk::AccelerationStructureBuildSizesInfoKHR::default();
self.acceleration_structure_fn
.get_acceleration_structure_build_sizes_khr(
self.handle,
build_type,
build_info as *const _,
max_primitive_counts.as_ptr(),
&mut size_info as *mut _,
);
size_info
}
pub fn name() -> &'static CStr {
vk::KhrAccelerationStructureFn::name()
}
pub fn fp(&self) -> &vk::KhrAccelerationStructureFn {
&self.acceleration_structure_fn
}
pub fn device(&self) -> vk::Device {
self.handle
}
}

Просмотреть файл

@ -34,16 +34,14 @@ impl AndroidSurface {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::SurfaceKHR> {
let mut surface = mem::zeroed();
let err_code = self.android_surface_fn.create_android_surface_khr(
self.handle,
create_info,
allocation_callbacks.as_raw_ptr(),
&mut surface,
);
match err_code {
vk::Result::SUCCESS => Ok(surface),
_ => Err(err_code),
}
self.android_surface_fn
.create_android_surface_khr(
self.handle,
create_info,
allocation_callbacks.as_raw_ptr(),
&mut surface,
)
.result_with_success(surface)
}
pub fn fp(&self) -> &vk::KhrAndroidSurfaceFn {

92
third_party/rust/ash/src/extensions/khr/create_render_pass2.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,92 @@
#![allow(dead_code)]
use crate::prelude::*;
use crate::version::{DeviceV1_0, InstanceV1_0};
use crate::vk;
use crate::RawPtr;
use std::ffi::CStr;
use std::mem;
#[derive(Clone)]
pub struct CreateRenderPass2 {
handle: vk::Device,
khr_create_renderpass2_fn: vk::KhrCreateRenderpass2Fn,
}
impl CreateRenderPass2 {
pub fn new<I: InstanceV1_0, D: DeviceV1_0>(instance: &I, device: &D) -> CreateRenderPass2 {
let khr_create_renderpass2_fn = vk::KhrCreateRenderpass2Fn::load(|name| unsafe {
mem::transmute(instance.get_device_proc_addr(device.handle(), name.as_ptr()))
});
CreateRenderPass2 {
handle: device.handle(),
khr_create_renderpass2_fn,
}
}
pub fn name() -> &'static CStr {
vk::KhrCreateRenderpass2Fn::name()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCreateRenderPass2.html>"]
pub unsafe fn create_render_pass2(
&self,
create_info: &vk::RenderPassCreateInfo2,
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::RenderPass> {
let mut renderpass = mem::zeroed();
self.khr_create_renderpass2_fn
.create_render_pass2_khr(
self.handle,
create_info,
allocation_callbacks.as_raw_ptr(),
&mut renderpass,
)
.result_with_success(renderpass)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCmdBeginRenderPass2.html>"]
pub unsafe fn cmd_begin_render_pass2(
&self,
command_buffer: vk::CommandBuffer,
render_pass_begin_info: &vk::RenderPassBeginInfo,
subpass_begin_info: &vk::SubpassBeginInfo,
) {
self.khr_create_renderpass2_fn.cmd_begin_render_pass2_khr(
command_buffer,
render_pass_begin_info,
subpass_begin_info,
);
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCmdNextSubpass2.html>"]
pub unsafe fn cmd_next_subpass2(
&self,
command_buffer: vk::CommandBuffer,
subpass_begin_info: &vk::SubpassBeginInfo,
subpass_end_info: &vk::SubpassEndInfo,
) {
self.khr_create_renderpass2_fn.cmd_next_subpass2_khr(
command_buffer,
subpass_begin_info,
subpass_end_info,
);
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCmdEndRenderPass2.html>"]
pub unsafe fn cmd_end_render_pass2(
&self,
command_buffer: vk::CommandBuffer,
subpass_end_info: &vk::SubpassEndInfo,
) {
self.khr_create_renderpass2_fn
.cmd_end_render_pass2_khr(command_buffer, subpass_end_info);
}
pub fn fp(&self) -> &vk::KhrCreateRenderpass2Fn {
&self.khr_create_renderpass2_fn
}
pub fn device(&self) -> vk::Device {
self.handle
}
}

Просмотреть файл

@ -0,0 +1,95 @@
#![allow(dead_code)]
use crate::prelude::*;
use crate::version::{DeviceV1_0, InstanceV1_0};
use crate::vk;
use crate::RawPtr;
use std::ffi::CStr;
use std::mem;
#[derive(Clone)]
pub struct DeferredHostOperations {
handle: vk::Device,
deferred_host_operations_fn: vk::KhrDeferredHostOperationsFn,
}
impl DeferredHostOperations {
pub fn new<I: InstanceV1_0, D: DeviceV1_0>(instance: &I, device: &D) -> Self {
let deferred_host_operations_fn = vk::KhrDeferredHostOperationsFn::load(|name| unsafe {
mem::transmute(instance.get_device_proc_addr(device.handle(), name.as_ptr()))
});
Self {
handle: device.handle(),
deferred_host_operations_fn,
}
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCreateDeferredOperationKHR.html>"]
pub unsafe fn create_deferred_operation(
&self,
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::DeferredOperationKHR> {
let mut operation = mem::zeroed();
self.deferred_host_operations_fn
.create_deferred_operation_khr(
self.handle,
allocation_callbacks.as_raw_ptr(),
&mut operation,
)
.result_with_success(operation)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkDeferredOperationJoinKHR.html>"]
pub unsafe fn deferred_operation_join(
&self,
operation: vk::DeferredOperationKHR,
) -> VkResult<()> {
self.deferred_host_operations_fn
.deferred_operation_join_khr(self.handle, operation)
.result()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkDestroyDeferredOperationKHR.html>"]
pub unsafe fn destroy_deferred_operation(
&self,
operation: vk::DeferredOperationKHR,
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) {
self.deferred_host_operations_fn
.destroy_deferred_operation_khr(
self.handle,
operation,
allocation_callbacks.as_raw_ptr(),
);
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetDeferredOperationMaxConcurrencyKHR.html>"]
pub unsafe fn get_deferred_operation_max_concurrency(
&self,
operation: vk::DeferredOperationKHR,
) -> u32 {
self.deferred_host_operations_fn
.get_deferred_operation_max_concurrency_khr(self.handle, operation)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetDeferredOperationResultKHR.html>"]
pub unsafe fn get_deferred_operation_result(
&self,
operation: vk::DeferredOperationKHR,
) -> VkResult<()> {
self.deferred_host_operations_fn
.get_deferred_operation_result_khr(self.handle, operation)
.result()
}
pub fn name() -> &'static CStr {
vk::KhrDeferredHostOperationsFn::name()
}
pub fn fp(&self) -> &vk::KhrDeferredHostOperationsFn {
&self.deferred_host_operations_fn
}
pub fn device(&self) -> vk::Device {
self.handle
}
}

Просмотреть файл

@ -33,11 +33,13 @@ impl Display {
physical_device: vk::PhysicalDevice,
) -> VkResult<Vec<vk::DisplayPropertiesKHR>> {
let mut count = 0;
self.display_fn.get_physical_device_display_properties_khr(
physical_device,
&mut count,
ptr::null_mut(),
);
self.display_fn
.get_physical_device_display_properties_khr(
physical_device,
&mut count,
ptr::null_mut(),
)
.result()?;
let mut v = Vec::with_capacity(count as usize);
let err_code = self.display_fn.get_physical_device_display_properties_khr(
physical_device,
@ -45,10 +47,7 @@ impl Display {
v.as_mut_ptr(),
);
v.set_len(count as usize);
match err_code {
vk::Result::SUCCESS => Ok(v),
_ => Err(err_code),
}
err_code.result_with_success(v)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetPhysicalDeviceDisplayPlanePropertiesKHR.html>"]
@ -57,12 +56,16 @@ impl Display {
physical_device: vk::PhysicalDevice,
) -> VkResult<Vec<vk::DisplayPlanePropertiesKHR>> {
let mut count = 0;
self.display_fn
let err_code = self
.display_fn
.get_physical_device_display_plane_properties_khr(
physical_device,
&mut count,
ptr::null_mut(),
);
if err_code != vk::Result::SUCCESS {
return Err(err_code);
}
let mut v = Vec::with_capacity(count as usize);
let err_code = self
.display_fn
@ -72,10 +75,7 @@ impl Display {
v.as_mut_ptr(),
);
v.set_len(count as usize);
match err_code {
vk::Result::SUCCESS => Ok(v),
_ => Err(err_code),
}
err_code.result_with_success(v)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetDisplayPlaneSupportedDisplaysKHR.html>"]
@ -85,12 +85,14 @@ impl Display {
plane_index: u32,
) -> VkResult<Vec<vk::DisplayKHR>> {
let mut count = 0;
self.display_fn.get_display_plane_supported_displays_khr(
physical_device,
plane_index,
&mut count,
ptr::null_mut(),
);
self.display_fn
.get_display_plane_supported_displays_khr(
physical_device,
plane_index,
&mut count,
ptr::null_mut(),
)
.result()?;
let mut v = Vec::with_capacity(count as usize);
let err_code = self.display_fn.get_display_plane_supported_displays_khr(
physical_device,
@ -99,10 +101,7 @@ impl Display {
v.as_mut_ptr(),
);
v.set_len(count as usize);
match err_code {
vk::Result::SUCCESS => Ok(v),
_ => Err(err_code),
}
err_code.result_with_success(v)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetDisplayModePropertiesKHR.html>"]
@ -112,12 +111,9 @@ impl Display {
display: vk::DisplayKHR,
) -> VkResult<Vec<vk::DisplayModePropertiesKHR>> {
let mut count = 0;
self.display_fn.get_display_mode_properties_khr(
physical_device,
display,
&mut count,
ptr::null_mut(),
);
self.display_fn
.get_display_mode_properties_khr(physical_device, display, &mut count, ptr::null_mut())
.result()?;
let mut v = Vec::with_capacity(count as usize);
let err_code = self.display_fn.get_display_mode_properties_khr(
physical_device,
@ -126,10 +122,7 @@ impl Display {
v.as_mut_ptr(),
);
v.set_len(count as usize);
match err_code {
vk::Result::SUCCESS => Ok(v),
_ => Err(err_code),
}
err_code.result_with_success(v)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCreateDisplayModeKHR.html>"]
@ -141,17 +134,15 @@ impl Display {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::DisplayModeKHR> {
let mut display_mode = mem::MaybeUninit::zeroed();
let err_code = self.display_fn.create_display_mode_khr(
physical_device,
display,
create_info,
allocation_callbacks.as_raw_ptr(),
display_mode.as_mut_ptr(),
);
match err_code {
vk::Result::SUCCESS => Ok(display_mode.assume_init()),
_ => Err(err_code),
}
self.display_fn
.create_display_mode_khr(
physical_device,
display,
create_info,
allocation_callbacks.as_raw_ptr(),
display_mode.as_mut_ptr(),
)
.result_with_success(display_mode.assume_init())
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetDisplayPlaneCapabilitiesKHR.html>"]
@ -162,16 +153,14 @@ impl Display {
plane_index: u32,
) -> VkResult<vk::DisplayPlaneCapabilitiesKHR> {
let mut display_plane_capabilities = mem::MaybeUninit::zeroed();
let err_code = self.display_fn.get_display_plane_capabilities_khr(
physical_device,
mode,
plane_index,
display_plane_capabilities.as_mut_ptr(),
);
match err_code {
vk::Result::SUCCESS => Ok(display_plane_capabilities.assume_init()),
_ => Err(err_code),
}
self.display_fn
.get_display_plane_capabilities_khr(
physical_device,
mode,
plane_index,
display_plane_capabilities.as_mut_ptr(),
)
.result_with_success(display_plane_capabilities.assume_init())
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCreateDisplayPlaneSurfaceKHR.html>"]
@ -181,16 +170,14 @@ impl Display {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::SurfaceKHR> {
let mut surface = mem::MaybeUninit::zeroed();
let err_code = self.display_fn.create_display_plane_surface_khr(
self.handle,
create_info,
allocation_callbacks.as_raw_ptr(),
surface.as_mut_ptr(),
);
match err_code {
vk::Result::SUCCESS => Ok(surface.assume_init()),
_ => Err(err_code),
}
self.display_fn
.create_display_plane_surface_khr(
self.handle,
create_info,
allocation_callbacks.as_raw_ptr(),
surface.as_mut_ptr(),
)
.result_with_success(surface.assume_init())
}
pub fn fp(&self) -> &vk::KhrDisplayFn {

Просмотреть файл

@ -42,10 +42,7 @@ impl DisplaySwapchain {
swapchains.as_mut_ptr(),
);
swapchains.set_len(create_infos.len());
match err_code {
vk::Result::SUCCESS => Ok(swapchains),
_ => Err(err_code),
}
err_code.result_with_success(swapchains)
}
pub fn fp(&self) -> &vk::KhrDisplaySwapchainFn {

Просмотреть файл

@ -28,13 +28,10 @@ impl ExternalMemoryFd {
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetMemoryFdKHR.html>"]
pub unsafe fn get_memory_fd(&self, create_info: &vk::MemoryGetFdInfoKHR) -> VkResult<i32> {
let mut fd = -1;
let err_code =
self.external_memory_fd_fn
.get_memory_fd_khr(self.handle, create_info, &mut fd);
match err_code {
vk::Result::SUCCESS => Ok(fd),
_ => Err(err_code),
}
self.external_memory_fd_fn
.get_memory_fd_khr(self.handle, create_info, &mut fd)
.result_with_success(fd)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetMemoryFdPropertiesKHR.html>"]
@ -44,16 +41,9 @@ impl ExternalMemoryFd {
fd: i32,
) -> VkResult<vk::MemoryFdPropertiesKHR> {
let mut memory_fd_properties = mem::zeroed();
let err_code = self.external_memory_fd_fn.get_memory_fd_properties_khr(
self.handle,
handle_type,
fd,
&mut memory_fd_properties,
);
match err_code {
vk::Result::SUCCESS => Ok(memory_fd_properties),
_ => Err(err_code),
}
self.external_memory_fd_fn
.get_memory_fd_properties_khr(self.handle, handle_type, fd, &mut memory_fd_properties)
.result_with_success(memory_fd_properties)
}
pub fn fp(&self) -> &vk::KhrExternalMemoryFdFn {

Просмотреть файл

@ -1,10 +1,14 @@
pub use self::acceleration_structure::AccelerationStructure;
pub use self::android_surface::AndroidSurface;
pub use self::create_render_pass2::CreateRenderPass2;
pub use self::deferred_host_operations::DeferredHostOperations;
pub use self::display::Display;
pub use self::display_swapchain::DisplaySwapchain;
pub use self::draw_indirect_count::DrawIndirectCount;
pub use self::external_memory_fd::ExternalMemoryFd;
pub use self::pipeline_executable_properties::PipelineExecutableProperties;
pub use self::push_descriptor::PushDescriptor;
pub use self::ray_tracing::RayTracing;
pub use self::ray_tracing_pipeline::RayTracingPipeline;
pub use self::surface::Surface;
pub use self::swapchain::Swapchain;
pub use self::timeline_semaphore::TimelineSemaphore;
@ -13,13 +17,17 @@ pub use self::win32_surface::Win32Surface;
pub use self::xcb_surface::XcbSurface;
pub use self::xlib_surface::XlibSurface;
mod acceleration_structure;
mod android_surface;
mod create_render_pass2;
mod deferred_host_operations;
mod display;
mod display_swapchain;
mod draw_indirect_count;
mod external_memory_fd;
mod pipeline_executable_properties;
mod push_descriptor;
mod ray_tracing;
mod ray_tracing_pipeline;
mod surface;
mod swapchain;
mod timeline_semaphore;

Просмотреть файл

@ -0,0 +1,129 @@
#![allow(dead_code)]
use crate::prelude::*;
use crate::version::{EntryV1_0, InstanceV1_0};
use crate::vk;
use std::ffi::CStr;
use std::mem;
use std::ptr;
#[derive(Clone)]
pub struct PipelineExecutableProperties {
handle: vk::Instance,
pipeline_executable_properties_fn: vk::KhrPipelineExecutablePropertiesFn,
}
impl PipelineExecutableProperties {
pub fn new<E: EntryV1_0, I: InstanceV1_0>(
entry: &E,
instance: &I,
) -> PipelineExecutableProperties {
let pipeline_executable_properties_fn =
vk::KhrPipelineExecutablePropertiesFn::load(|name| unsafe {
mem::transmute(entry.get_instance_proc_addr(instance.handle(), name.as_ptr()))
});
PipelineExecutableProperties {
handle: instance.handle(),
pipeline_executable_properties_fn,
}
}
pub fn name() -> &'static CStr {
vk::KhrPipelineExecutablePropertiesFn::name()
}
#[doc = "https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetPipelineExecutableInternalRepresentationsKHR.html"]
pub unsafe fn get_pipeline_executable_internal_representations(
&self,
device: vk::Device,
executable_info: &vk::PipelineExecutableInfoKHR,
) -> VkResult<Vec<vk::PipelineExecutableInternalRepresentationKHR>> {
let mut count = 0;
let err_code = self
.pipeline_executable_properties_fn
.get_pipeline_executable_internal_representations_khr(
device,
executable_info,
&mut count,
ptr::null_mut(),
);
if err_code != vk::Result::SUCCESS {
return Err(err_code);
}
let mut v: Vec<_> = vec![Default::default(); count as usize];
self.pipeline_executable_properties_fn
.get_pipeline_executable_internal_representations_khr(
device,
executable_info,
&mut count,
v.as_mut_ptr(),
)
.result_with_success(v)
}
#[doc = "https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetPipelineExecutablePropertiesKHR.html"]
pub unsafe fn get_pipeline_executable_properties(
&self,
device: vk::Device,
pipeline_info: &vk::PipelineInfoKHR,
) -> VkResult<Vec<vk::PipelineExecutablePropertiesKHR>> {
let mut count = 0;
let err_code = self
.pipeline_executable_properties_fn
.get_pipeline_executable_properties_khr(
device,
pipeline_info,
&mut count,
ptr::null_mut(),
);
if err_code != vk::Result::SUCCESS {
return Err(err_code);
}
let mut v: Vec<_> = vec![Default::default(); count as usize];
self.pipeline_executable_properties_fn
.get_pipeline_executable_properties_khr(
device,
pipeline_info,
&mut count,
v.as_mut_ptr(),
)
.result_with_success(v)
}
#[doc = "https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetPipelineExecutableStatisticsKHR.html"]
pub unsafe fn get_pipeline_executable_statistics(
&self,
device: vk::Device,
executable_info: &vk::PipelineExecutableInfoKHR,
) -> VkResult<Vec<vk::PipelineExecutableStatisticKHR>> {
let mut count = 0;
let err_code = self
.pipeline_executable_properties_fn
.get_pipeline_executable_statistics_khr(
device,
executable_info,
&mut count,
ptr::null_mut(),
);
if err_code != vk::Result::SUCCESS {
return Err(err_code);
}
let mut v: Vec<_> = vec![Default::default(); count as usize];
self.pipeline_executable_properties_fn
.get_pipeline_executable_statistics_khr(
device,
executable_info,
&mut count,
v.as_mut_ptr(),
)
.result_with_success(v)
}
pub fn fp(&self) -> &vk::KhrPipelineExecutablePropertiesFn {
&self.pipeline_executable_properties_fn
}
pub fn instance(&self) -> vk::Instance {
self.handle
}
}

Просмотреть файл

@ -1,369 +0,0 @@
#![allow(dead_code)]
use crate::prelude::*;
use crate::version::{DeviceV1_0, InstanceV1_0, InstanceV1_1};
use crate::vk;
use crate::RawPtr;
use std::ffi::CStr;
use std::mem;
#[derive(Clone)]
pub struct RayTracing {
handle: vk::Device,
ray_tracing_fn: vk::KhrRayTracingFn,
}
impl RayTracing {
pub fn new<I: InstanceV1_0, D: DeviceV1_0>(instance: &I, device: &D) -> RayTracing {
let ray_tracing_fn = vk::KhrRayTracingFn::load(|name| unsafe {
mem::transmute(instance.get_device_proc_addr(device.handle(), name.as_ptr()))
});
RayTracing {
handle: device.handle(),
ray_tracing_fn,
}
}
pub unsafe fn get_properties<I: InstanceV1_1>(
instance: &I,
pdevice: vk::PhysicalDevice,
) -> vk::PhysicalDeviceRayTracingPropertiesKHR {
let mut props_rt = vk::PhysicalDeviceRayTracingPropertiesKHR::default();
{
let mut props = vk::PhysicalDeviceProperties2::builder().push_next(&mut props_rt);
instance.get_physical_device_properties2(pdevice, &mut props);
}
props_rt
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCreateAccelerationStructureKHR.html>"]
pub unsafe fn create_acceleration_structure(
&self,
create_info: &vk::AccelerationStructureCreateInfoKHR,
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::AccelerationStructureKHR> {
let mut accel_struct = mem::zeroed();
let err_code = self.ray_tracing_fn.create_acceleration_structure_khr(
self.handle,
create_info,
allocation_callbacks.as_raw_ptr(),
&mut accel_struct,
);
match err_code {
vk::Result::SUCCESS => Ok(accel_struct),
_ => Err(err_code),
}
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkDestroyAccelerationStructureKHR.html>"]
pub unsafe fn destroy_acceleration_structure(
&self,
accel_struct: vk::AccelerationStructureKHR,
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) {
self.ray_tracing_fn.destroy_acceleration_structure_khr(
self.handle,
accel_struct,
allocation_callbacks.as_raw_ptr(),
);
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetAccelerationStructureMemoryRequirementsKHR.html>"]
pub unsafe fn get_acceleration_structure_memory_requirements(
&self,
info: &vk::AccelerationStructureMemoryRequirementsInfoKHR,
) -> vk::MemoryRequirements2KHR {
let mut requirements = Default::default();
self.ray_tracing_fn
.get_acceleration_structure_memory_requirements_khr(
self.handle,
info,
&mut requirements,
);
requirements
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkBindAccelerationStructureMemoryKHR.html>"]
pub unsafe fn bind_acceleration_structure_memory(
&self,
bind_info: &[vk::BindAccelerationStructureMemoryInfoKHR],
) -> VkResult<()> {
let err_code = self.ray_tracing_fn.bind_acceleration_structure_memory_khr(
self.handle,
bind_info.len() as u32,
bind_info.as_ptr(),
);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCmdBuildAccelerationStructureKHR.html>"]
pub unsafe fn cmd_build_acceleration_structure(
&self,
command_buffer: vk::CommandBuffer,
infos: &[vk::AccelerationStructureBuildGeometryInfoKHR],
offset_infos: &[&[vk::AccelerationStructureBuildOffsetInfoKHR]],
) {
let offset_info_ptr = offset_infos
.iter()
.map(|slice| slice.as_ptr())
.collect::<Vec<*const vk::AccelerationStructureBuildOffsetInfoKHR>>();
self.ray_tracing_fn.cmd_build_acceleration_structure_khr(
command_buffer,
infos.len() as u32,
infos.as_ptr(),
offset_info_ptr.as_ptr(),
);
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCmdCopyAccelerationStructureKHR.html>"]
pub unsafe fn cmd_copy_acceleration_structure(
&self,
command_buffer: vk::CommandBuffer,
info: &vk::CopyAccelerationStructureInfoKHR,
) {
self.ray_tracing_fn
.cmd_copy_acceleration_structure_khr(command_buffer, info);
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCmdTraceRaysKHR.html>"]
pub unsafe fn cmd_trace_rays(
&self,
command_buffer: vk::CommandBuffer,
raygen_shader_binding_tables: &[vk::StridedBufferRegionKHR],
miss_shader_binding_tables: &[vk::StridedBufferRegionKHR],
hit_shader_binding_tables: &[vk::StridedBufferRegionKHR],
callable_shader_binding_tables: &[vk::StridedBufferRegionKHR],
width: u32,
height: u32,
depth: u32,
) {
self.ray_tracing_fn.cmd_trace_rays_khr(
command_buffer,
raygen_shader_binding_tables.as_ptr(),
miss_shader_binding_tables.as_ptr(),
hit_shader_binding_tables.as_ptr(),
callable_shader_binding_tables.as_ptr(),
width,
height,
depth,
);
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCreateRayTracingPipelinesKHR.html>"]
pub unsafe fn create_ray_tracing_pipelines(
&self,
pipeline_cache: vk::PipelineCache,
create_info: &[vk::RayTracingPipelineCreateInfoKHR],
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<Vec<vk::Pipeline>> {
let mut pipelines = vec![mem::zeroed(); create_info.len()];
let err_code = self.ray_tracing_fn.create_ray_tracing_pipelines_khr(
self.handle,
pipeline_cache,
create_info.len() as u32,
create_info.as_ptr(),
allocation_callbacks.as_raw_ptr(),
pipelines.as_mut_ptr(),
);
match err_code {
vk::Result::SUCCESS => Ok(pipelines),
_ => Err(err_code),
}
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetRayTracingShaderGroupHandlesKHR.html>"]
pub unsafe fn get_ray_tracing_shader_group_handles(
&self,
pipeline: vk::Pipeline,
first_group: u32,
group_count: u32,
data: &mut [u8],
) -> VkResult<()> {
let err_code = self
.ray_tracing_fn
.get_ray_tracing_shader_group_handles_khr(
self.handle,
pipeline,
first_group,
group_count,
data.len(),
data.as_mut_ptr() as *mut std::ffi::c_void,
);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetAccelerationStructureHandleKHR.html>"]
pub unsafe fn get_acceleration_structure_device_address(
&self,
info: &vk::AccelerationStructureDeviceAddressInfoKHR,
) -> vk::DeviceAddress {
self.ray_tracing_fn
.get_acceleration_structure_device_address_khr(self.handle, info)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCmdWriteAccelerationStructuresPropertiesKHR.html>"]
pub unsafe fn cmd_write_acceleration_structures_properties(
&self,
command_buffer: vk::CommandBuffer,
structures: &[vk::AccelerationStructureKHR],
query_type: vk::QueryType,
query_pool: vk::QueryPool,
first_query: u32,
) {
self.ray_tracing_fn
.cmd_write_acceleration_structures_properties_khr(
command_buffer,
structures.len() as u32,
structures.as_ptr(),
query_type,
query_pool,
first_query,
);
}
pub unsafe fn cmd_build_acceleration_structure_indirect(
&self,
command_buffer: vk::CommandBuffer,
info: &vk::AccelerationStructureBuildGeometryInfoKHR,
indirect_buffer: vk::Buffer,
indirect_offset: vk::DeviceSize,
indirect_stride: u32,
) {
self.ray_tracing_fn
.cmd_build_acceleration_structure_indirect_khr(
command_buffer,
info,
indirect_buffer,
indirect_offset,
indirect_stride,
);
}
pub unsafe fn copy_acceleration_structure_to_memory(
&self,
device: vk::Device,
info: &vk::CopyAccelerationStructureToMemoryInfoKHR,
) -> VkResult<()> {
let err_code = self
.ray_tracing_fn
.copy_acceleration_structure_to_memory_khr(device, info);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
}
pub unsafe fn copy_memory_to_acceleration_structure(
&self,
device: vk::Device,
info: &vk::CopyMemoryToAccelerationStructureInfoKHR,
) -> VkResult<()> {
let err_code = self
.ray_tracing_fn
.copy_memory_to_acceleration_structure_khr(device, info);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
}
pub unsafe fn cmd_copy_acceleration_structure_to_memory(
&self,
command_buffer: vk::CommandBuffer,
info: &vk::CopyAccelerationStructureToMemoryInfoKHR,
) {
self.ray_tracing_fn
.cmd_copy_acceleration_structure_to_memory_khr(command_buffer, info);
}
pub unsafe fn cmd_copy_memory_to_acceleration_structure(
&self,
command_buffer: vk::CommandBuffer,
info: &vk::CopyMemoryToAccelerationStructureInfoKHR,
) {
self.ray_tracing_fn
.cmd_copy_memory_to_acceleration_structure_khr(command_buffer, info);
}
pub unsafe fn get_ray_tracing_capture_replay_shader_group_handles(
&self,
device: vk::Device,
pipeline: vk::Pipeline,
first_group: u32,
group_count: u32,
data_size: usize,
) -> VkResult<Vec<u8>> {
let mut data: Vec<u8> = Vec::with_capacity(data_size);
let err_code = self
.ray_tracing_fn
.get_ray_tracing_capture_replay_shader_group_handles_khr(
device,
pipeline,
first_group,
group_count,
data_size,
data.as_mut_ptr() as *mut _,
);
match err_code {
vk::Result::SUCCESS => Ok(data),
_ => Err(err_code),
}
}
pub unsafe fn cmd_trace_rays_indirect(
&self,
command_buffer: vk::CommandBuffer,
raygen_shader_binding_table: &[vk::StridedBufferRegionKHR],
miss_shader_binding_table: &[vk::StridedBufferRegionKHR],
hit_shader_binding_table: &[vk::StridedBufferRegionKHR],
callable_shader_binding_table: &[vk::StridedBufferRegionKHR],
buffer: vk::Buffer,
offset: vk::DeviceSize,
) {
self.ray_tracing_fn.cmd_trace_rays_indirect_khr(
command_buffer,
raygen_shader_binding_table.as_ptr(),
miss_shader_binding_table.as_ptr(),
hit_shader_binding_table.as_ptr(),
callable_shader_binding_table.as_ptr(),
buffer,
offset,
);
}
pub unsafe fn get_device_acceleration_structure_compatibility(
&self,
device: vk::Device,
version: &vk::AccelerationStructureVersionKHR,
) -> VkResult<()> {
let err_code = self
.ray_tracing_fn
.get_device_acceleration_structure_compatibility_khr(device, version);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
}
pub fn name() -> &'static CStr {
vk::KhrRayTracingFn::name()
}
pub fn fp(&self) -> &vk::KhrRayTracingFn {
&self.ray_tracing_fn
}
pub fn device(&self) -> vk::Device {
self.handle
}
}

181
third_party/rust/ash/src/extensions/khr/ray_tracing_pipeline.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,181 @@
#![allow(dead_code)]
use crate::prelude::*;
use crate::version::{DeviceV1_0, InstanceV1_0, InstanceV1_1};
use crate::vk;
use crate::RawPtr;
use std::ffi::CStr;
use std::mem;
#[derive(Clone)]
pub struct RayTracingPipeline {
handle: vk::Device,
ray_tracing_fn: vk::KhrRayTracingPipelineFn,
}
impl RayTracingPipeline {
pub fn new<I: InstanceV1_0, D: DeviceV1_0>(instance: &I, device: &D) -> Self {
let ray_tracing_fn = vk::KhrRayTracingPipelineFn::load(|name| unsafe {
mem::transmute(instance.get_device_proc_addr(device.handle(), name.as_ptr()))
});
Self {
handle: device.handle(),
ray_tracing_fn,
}
}
pub unsafe fn get_properties<I: InstanceV1_1>(
instance: &I,
pdevice: vk::PhysicalDevice,
) -> vk::PhysicalDeviceRayTracingPipelinePropertiesKHR {
let mut props_rt = vk::PhysicalDeviceRayTracingPipelinePropertiesKHR::default();
{
let mut props = vk::PhysicalDeviceProperties2::builder().push_next(&mut props_rt);
instance.get_physical_device_properties2(pdevice, &mut props);
}
props_rt
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCmdTraceRaysKHR.html>"]
pub unsafe fn cmd_trace_rays(
&self,
command_buffer: vk::CommandBuffer,
raygen_shader_binding_tables: &vk::StridedDeviceAddressRegionKHR,
miss_shader_binding_tables: &vk::StridedDeviceAddressRegionKHR,
hit_shader_binding_tables: &vk::StridedDeviceAddressRegionKHR,
callable_shader_binding_tables: &vk::StridedDeviceAddressRegionKHR,
width: u32,
height: u32,
depth: u32,
) {
self.ray_tracing_fn.cmd_trace_rays_khr(
command_buffer,
raygen_shader_binding_tables as *const _,
miss_shader_binding_tables as *const _,
hit_shader_binding_tables as *const _,
callable_shader_binding_tables as *const _,
width,
height,
depth,
);
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCreateRayTracingPipelinesKHR.html>"]
pub unsafe fn create_ray_tracing_pipelines(
&self,
deferred_operation: vk::DeferredOperationKHR,
pipeline_cache: vk::PipelineCache,
create_info: &[vk::RayTracingPipelineCreateInfoKHR],
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<Vec<vk::Pipeline>> {
let mut pipelines = vec![mem::zeroed(); create_info.len()];
self.ray_tracing_fn
.create_ray_tracing_pipelines_khr(
self.handle,
deferred_operation,
pipeline_cache,
create_info.len() as u32,
create_info.as_ptr(),
allocation_callbacks.as_raw_ptr(),
pipelines.as_mut_ptr(),
)
.result_with_success(pipelines)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetRayTracingShaderGroupHandlesKHR.html>"]
pub unsafe fn get_ray_tracing_shader_group_handles(
&self,
pipeline: vk::Pipeline,
first_group: u32,
group_count: u32,
data_size: usize,
) -> VkResult<Vec<u8>> {
let mut data = Vec::<u8>::with_capacity(data_size);
let err_code = self
.ray_tracing_fn
.get_ray_tracing_shader_group_handles_khr(
self.handle,
pipeline,
first_group,
group_count,
data_size,
data.as_mut_ptr() as *mut std::ffi::c_void,
);
data.set_len(data_size);
err_code.result_with_success(data)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetRayTracingCaptureReplayShaderGroupHandlesKHR.html>"]
pub unsafe fn get_ray_tracing_capture_replay_shader_group_handles(
&self,
pipeline: vk::Pipeline,
first_group: u32,
group_count: u32,
data_size: usize,
) -> VkResult<Vec<u8>> {
let mut data: Vec<u8> = Vec::with_capacity(data_size);
self.ray_tracing_fn
.get_ray_tracing_capture_replay_shader_group_handles_khr(
self.handle,
pipeline,
first_group,
group_count,
data_size,
data.as_mut_ptr() as *mut _,
)
.result_with_success(data)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCmdTraceRaysIndirectKHR.html>"]
pub unsafe fn cmd_trace_rays_indirect(
&self,
command_buffer: vk::CommandBuffer,
raygen_shader_binding_table: &[vk::StridedDeviceAddressRegionKHR],
miss_shader_binding_table: &[vk::StridedDeviceAddressRegionKHR],
hit_shader_binding_table: &[vk::StridedDeviceAddressRegionKHR],
callable_shader_binding_table: &[vk::StridedDeviceAddressRegionKHR],
indirect_device_address: vk::DeviceAddress,
) {
self.ray_tracing_fn.cmd_trace_rays_indirect_khr(
command_buffer,
raygen_shader_binding_table.as_ptr(),
miss_shader_binding_table.as_ptr(),
hit_shader_binding_table.as_ptr(),
callable_shader_binding_table.as_ptr(),
indirect_device_address,
);
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetRayTracingShaderGroupStackSizeKHR.html>"]
pub unsafe fn get_ray_tracing_shader_group_stack_size(
&self,
pipeline: vk::Pipeline,
group: u32,
group_shader: vk::ShaderGroupShaderKHR,
) -> vk::DeviceSize {
self.ray_tracing_fn
.get_ray_tracing_shader_group_stack_size_khr(self.handle, pipeline, group, group_shader)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCmdSetRayTracingPipelineStackSizeKHR.html>"]
pub unsafe fn cmd_set_ray_tracing_pipeline_stack_size(
&self,
command_buffer: vk::CommandBuffer,
pipeline_stack_size: u32,
) {
self.ray_tracing_fn
.cmd_set_ray_tracing_pipeline_stack_size_khr(command_buffer, pipeline_stack_size);
}
pub fn name() -> &'static CStr {
vk::KhrRayTracingPipelineFn::name()
}
pub fn fp(&self) -> &vk::KhrRayTracingPipelineFn {
&self.ray_tracing_fn
}
pub fn device(&self) -> vk::Device {
self.handle
}
}

Просмотреть файл

@ -32,21 +32,18 @@ impl Surface {
pub unsafe fn get_physical_device_surface_support(
&self,
physical_device: vk::PhysicalDevice,
queue_index: u32,
queue_family_index: u32,
surface: vk::SurfaceKHR,
) -> VkResult<bool> {
let mut b = mem::zeroed();
let err_code = self.surface_fn.get_physical_device_surface_support_khr(
physical_device,
queue_index,
surface,
&mut b,
);
match err_code {
vk::Result::SUCCESS => Ok(b > 0),
_ => Err(err_code),
}
self.surface_fn
.get_physical_device_surface_support_khr(
physical_device,
queue_family_index,
surface,
&mut b,
)
.result_with_success(b > 0)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetPhysicalDeviceSurfacePresentModesKHR.html>"]
@ -62,7 +59,8 @@ impl Surface {
surface,
&mut count,
ptr::null_mut(),
);
)
.result()?;
let mut v = Vec::with_capacity(count as usize);
let err_code = self
.surface_fn
@ -73,10 +71,7 @@ impl Surface {
v.as_mut_ptr(),
);
v.set_len(count as usize);
match err_code {
vk::Result::SUCCESS => Ok(v),
_ => Err(err_code),
}
err_code.result_with_success(v)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetPhysicalDeviceSurfaceCapabilitiesKHR.html>"]
@ -86,17 +81,13 @@ impl Surface {
surface: vk::SurfaceKHR,
) -> VkResult<vk::SurfaceCapabilitiesKHR> {
let mut surface_capabilities = mem::zeroed();
let err_code = self
.surface_fn
self.surface_fn
.get_physical_device_surface_capabilities_khr(
physical_device,
surface,
&mut surface_capabilities,
);
match err_code {
vk::Result::SUCCESS => Ok(surface_capabilities),
_ => Err(err_code),
}
)
.result_with_success(surface_capabilities)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetPhysicalDeviceSurfaceFormatsKHR.html>"]
@ -106,12 +97,14 @@ impl Surface {
surface: vk::SurfaceKHR,
) -> VkResult<Vec<vk::SurfaceFormatKHR>> {
let mut count = 0;
self.surface_fn.get_physical_device_surface_formats_khr(
physical_device,
surface,
&mut count,
ptr::null_mut(),
);
self.surface_fn
.get_physical_device_surface_formats_khr(
physical_device,
surface,
&mut count,
ptr::null_mut(),
)
.result()?;
let mut v = Vec::with_capacity(count as usize);
let err_code = self.surface_fn.get_physical_device_surface_formats_khr(
physical_device,
@ -120,10 +113,7 @@ impl Surface {
v.as_mut_ptr(),
);
v.set_len(count as usize);
match err_code {
vk::Result::SUCCESS => Ok(v),
_ => Err(err_code),
}
err_code.result_with_success(v)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkDestroySurfaceKHR.html>"]

Просмотреть файл

@ -73,16 +73,14 @@ impl Swapchain {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::SwapchainKHR> {
let mut swapchain = mem::zeroed();
let err_code = self.swapchain_fn.create_swapchain_khr(
self.handle,
create_info,
allocation_callbacks.as_raw_ptr(),
&mut swapchain,
);
match err_code {
vk::Result::SUCCESS => Ok(swapchain),
_ => Err(err_code),
}
self.swapchain_fn
.create_swapchain_khr(
self.handle,
create_info,
allocation_callbacks.as_raw_ptr(),
&mut swapchain,
)
.result_with_success(swapchain)
}
/// On success, returns whether the swapchain is suboptimal for the surface.
@ -106,12 +104,9 @@ impl Swapchain {
swapchain: vk::SwapchainKHR,
) -> VkResult<Vec<vk::Image>> {
let mut count = 0;
self.swapchain_fn.get_swapchain_images_khr(
self.handle,
swapchain,
&mut count,
ptr::null_mut(),
);
self.swapchain_fn
.get_swapchain_images_khr(self.handle, swapchain, &mut count, ptr::null_mut())
.result()?;
let mut v = Vec::with_capacity(count as usize);
let err_code = self.swapchain_fn.get_swapchain_images_khr(
@ -121,10 +116,7 @@ impl Swapchain {
v.as_mut_ptr(),
);
v.set_len(count as usize);
match err_code {
vk::Result::SUCCESS => Ok(v),
_ => Err(err_code),
}
err_code.result_with_success(v)
}
pub fn fp(&self) -> &vk::KhrSwapchainFn {

Просмотреть файл

@ -34,14 +34,9 @@ impl TimelineSemaphore {
semaphore: vk::Semaphore,
) -> VkResult<u64> {
let mut value = 0;
let err_code = self
.timeline_semaphore_fn
.get_semaphore_counter_value_khr(device, semaphore, &mut value);
match err_code {
vk::Result::SUCCESS => Ok(value),
_ => Err(err_code),
}
self.timeline_semaphore_fn
.get_semaphore_counter_value_khr(device, semaphore, &mut value)
.result_with_success(value)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkWaitSemaphores.html>"]
@ -51,14 +46,9 @@ impl TimelineSemaphore {
wait_info: &vk::SemaphoreWaitInfo,
timeout: u64,
) -> VkResult<()> {
let err_code = self
.timeline_semaphore_fn
.wait_semaphores_khr(device, wait_info, timeout);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
self.timeline_semaphore_fn
.wait_semaphores_khr(device, wait_info, timeout)
.into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkSignalSemaphore.html>"]
@ -67,14 +57,9 @@ impl TimelineSemaphore {
device: vk::Device,
signal_info: &vk::SemaphoreSignalInfo,
) -> VkResult<()> {
let err_code = self
.timeline_semaphore_fn
.signal_semaphore_khr(device, signal_info);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
self.timeline_semaphore_fn
.signal_semaphore_khr(device, signal_info)
.into()
}
pub fn fp(&self) -> &vk::KhrTimelineSemaphoreFn {

Просмотреть файл

@ -34,16 +34,14 @@ impl WaylandSurface {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::SurfaceKHR> {
let mut surface = mem::zeroed();
let err_code = self.wayland_surface_fn.create_wayland_surface_khr(
self.handle,
create_info,
allocation_callbacks.as_raw_ptr(),
&mut surface,
);
match err_code {
vk::Result::SUCCESS => Ok(surface),
_ => Err(err_code),
}
self.wayland_surface_fn
.create_wayland_surface_khr(
self.handle,
create_info,
allocation_callbacks.as_raw_ptr(),
&mut surface,
)
.result_with_success(surface)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetPhysicalDeviceWaylandPresentationSupportKHR.html"]

Просмотреть файл

@ -34,16 +34,14 @@ impl Win32Surface {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::SurfaceKHR> {
let mut surface = mem::zeroed();
let err_code = self.win32_surface_fn.create_win32_surface_khr(
self.handle,
create_info,
allocation_callbacks.as_raw_ptr(),
&mut surface,
);
match err_code {
vk::Result::SUCCESS => Ok(surface),
_ => Err(err_code),
}
self.win32_surface_fn
.create_win32_surface_khr(
self.handle,
create_info,
allocation_callbacks.as_raw_ptr(),
&mut surface,
)
.result_with_success(surface)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetPhysicalDeviceWin32PresentationSupportKHR.html"]

Просмотреть файл

@ -34,16 +34,14 @@ impl XcbSurface {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::SurfaceKHR> {
let mut surface = mem::zeroed();
let err_code = self.xcb_surface_fn.create_xcb_surface_khr(
self.handle,
create_info,
allocation_callbacks.as_raw_ptr(),
&mut surface,
);
match err_code {
vk::Result::SUCCESS => Ok(surface),
_ => Err(err_code),
}
self.xcb_surface_fn
.create_xcb_surface_khr(
self.handle,
create_info,
allocation_callbacks.as_raw_ptr(),
&mut surface,
)
.result_with_success(surface)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetPhysicalDeviceXcbPresentationSupportKHR.html"]

Просмотреть файл

@ -34,16 +34,14 @@ impl XlibSurface {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::SurfaceKHR> {
let mut surface = mem::zeroed();
let err_code = self.xlib_surface_fn.create_xlib_surface_khr(
self.handle,
create_info,
allocation_callbacks.as_raw_ptr(),
&mut surface,
);
match err_code {
vk::Result::SUCCESS => Ok(surface),
_ => Err(err_code),
}
self.xlib_surface_fn
.create_xlib_surface_khr(
self.handle,
create_info,
allocation_callbacks.as_raw_ptr(),
&mut surface,
)
.result_with_success(surface)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetPhysicalDeviceXlibPresentationSupportKHR.html"]

Просмотреть файл

@ -34,16 +34,14 @@ impl IOSSurface {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::SurfaceKHR> {
let mut surface = mem::zeroed();
let err_code = self.ios_surface_fn.create_ios_surface_mvk(
self.handle,
create_info,
allocation_callbacks.as_raw_ptr(),
&mut surface,
);
match err_code {
vk::Result::SUCCESS => Ok(surface),
_ => Err(err_code),
}
self.ios_surface_fn
.create_ios_surface_mvk(
self.handle,
create_info,
allocation_callbacks.as_raw_ptr(),
&mut surface,
)
.result_with_success(surface)
}
pub fn fp(&self) -> &vk::MvkIosSurfaceFn {

Просмотреть файл

@ -34,16 +34,14 @@ impl MacOSSurface {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::SurfaceKHR> {
let mut surface = mem::zeroed();
let err_code = self.macos_surface_fn.create_mac_os_surface_mvk(
self.handle,
create_info,
allocation_callbacks.as_raw_ptr(),
&mut surface,
);
match err_code {
vk::Result::SUCCESS => Ok(surface),
_ => Err(err_code),
}
self.macos_surface_fn
.create_mac_os_surface_mvk(
self.handle,
create_info,
allocation_callbacks.as_raw_ptr(),
&mut surface,
)
.result_with_success(surface)
}
pub fn fp(&self) -> &vk::MvkMacosSurfaceFn {

Просмотреть файл

@ -0,0 +1,60 @@
#![allow(dead_code)]
use crate::version::{DeviceV1_0, InstanceV1_0};
use crate::vk;
use std::ffi::CStr;
use std::mem;
use std::os::raw::c_void;
#[derive(Clone)]
pub struct DeviceDiagnosticCheckpoints {
device_diagnostic_checkpoints_fn: vk::NvDeviceDiagnosticCheckpointsFn,
}
impl DeviceDiagnosticCheckpoints {
pub fn new<I: InstanceV1_0, D: DeviceV1_0>(
instance: &I,
device: &D,
) -> DeviceDiagnosticCheckpoints {
let device_diagnostic_checkpoints_fn =
vk::NvDeviceDiagnosticCheckpointsFn::load(|name| unsafe {
mem::transmute(instance.get_device_proc_addr(device.handle(), name.as_ptr()))
});
DeviceDiagnosticCheckpoints {
device_diagnostic_checkpoints_fn,
}
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCmdSetCheckpointNV.html>"]
pub unsafe fn cmd_set_checkpoint(
&self,
command_buffer: vk::CommandBuffer,
p_checkpoint_marker: *const c_void,
) {
self.device_diagnostic_checkpoints_fn
.cmd_set_checkpoint_nv(command_buffer, p_checkpoint_marker);
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetQueueCheckpointDataNV.html>"]
pub unsafe fn get_queue_checkpoint_data(&self, queue: vk::Queue) -> Vec<vk::CheckpointDataNV> {
let mut checkpoint_data_count: u32 = 0;
self.device_diagnostic_checkpoints_fn
.get_queue_checkpoint_data_nv(queue, &mut checkpoint_data_count, std::ptr::null_mut());
let mut checkpoint_data: Vec<vk::CheckpointDataNV> =
vec![vk::CheckpointDataNV::default(); checkpoint_data_count as _];
self.device_diagnostic_checkpoints_fn
.get_queue_checkpoint_data_nv(
queue,
&mut checkpoint_data_count,
checkpoint_data.as_mut_ptr(),
);
checkpoint_data
}
pub fn name() -> &'static CStr {
vk::NvDeviceDiagnosticCheckpointsFn::name()
}
pub fn fp(&self) -> &vk::NvDeviceDiagnosticCheckpointsFn {
&self.device_diagnostic_checkpoints_fn
}
}

Просмотреть файл

@ -1,5 +1,7 @@
pub use self::device_diagnostic_checkpoints::DeviceDiagnosticCheckpoints;
pub use self::mesh_shader::MeshShader;
pub use self::ray_tracing::RayTracing;
mod device_diagnostic_checkpoints;
mod mesh_shader;
mod ray_tracing;

Просмотреть файл

@ -42,16 +42,14 @@ impl RayTracing {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<vk::AccelerationStructureNV> {
let mut accel_struct = mem::zeroed();
let err_code = self.ray_tracing_fn.create_acceleration_structure_nv(
self.handle,
create_info,
allocation_callbacks.as_raw_ptr(),
&mut accel_struct,
);
match err_code {
vk::Result::SUCCESS => Ok(accel_struct),
_ => Err(err_code),
}
self.ray_tracing_fn
.create_acceleration_structure_nv(
self.handle,
create_info,
allocation_callbacks.as_raw_ptr(),
&mut accel_struct,
)
.result_with_success(accel_struct)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkDestroyAccelerationStructureNV.html>"]
@ -87,15 +85,13 @@ impl RayTracing {
&self,
bind_info: &[vk::BindAccelerationStructureMemoryInfoNV],
) -> VkResult<()> {
let err_code = self.ray_tracing_fn.bind_acceleration_structure_memory_nv(
self.handle,
bind_info.len() as u32,
bind_info.as_ptr(),
);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
self.ray_tracing_fn
.bind_acceleration_structure_memory_nv(
self.handle,
bind_info.len() as u32,
bind_info.as_ptr(),
)
.into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCmdBuildAccelerationStructureNV.html>"]
@ -182,18 +178,16 @@ impl RayTracing {
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> VkResult<Vec<vk::Pipeline>> {
let mut pipelines = vec![mem::zeroed(); create_info.len()];
let err_code = self.ray_tracing_fn.create_ray_tracing_pipelines_nv(
self.handle,
pipeline_cache,
create_info.len() as u32,
create_info.as_ptr(),
allocation_callbacks.as_raw_ptr(),
pipelines.as_mut_ptr(),
);
match err_code {
vk::Result::SUCCESS => Ok(pipelines),
_ => Err(err_code),
}
self.ray_tracing_fn
.create_ray_tracing_pipelines_nv(
self.handle,
pipeline_cache,
create_info.len() as u32,
create_info.as_ptr(),
allocation_callbacks.as_raw_ptr(),
pipelines.as_mut_ptr(),
)
.result_with_success(pipelines)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetRayTracingShaderGroupHandlesNV.html>"]
@ -204,18 +198,16 @@ impl RayTracing {
group_count: u32,
data: &mut [u8],
) -> VkResult<()> {
let err_code = self.ray_tracing_fn.get_ray_tracing_shader_group_handles_nv(
self.handle,
pipeline,
first_group,
group_count,
data.len(),
data.as_mut_ptr() as *mut std::ffi::c_void,
);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
self.ray_tracing_fn
.get_ray_tracing_shader_group_handles_nv(
self.handle,
pipeline,
first_group,
group_count,
data.len(),
data.as_mut_ptr() as *mut std::ffi::c_void,
)
.into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetAccelerationStructureHandleNV.html>"]
@ -225,16 +217,14 @@ impl RayTracing {
) -> VkResult<u64> {
let mut handle: u64 = 0;
let handle_ptr: *mut u64 = &mut handle;
let err_code = self.ray_tracing_fn.get_acceleration_structure_handle_nv(
self.handle,
accel_struct,
std::mem::size_of::<u64>(),
handle_ptr as *mut std::ffi::c_void,
);
match err_code {
vk::Result::SUCCESS => Ok(handle),
_ => Err(err_code),
}
self.ray_tracing_fn
.get_acceleration_structure_handle_nv(
self.handle,
accel_struct,
std::mem::size_of::<u64>(),
handle_ptr as *mut std::ffi::c_void,
)
.result_with_success(handle)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCmdWriteAccelerationStructuresPropertiesNV.html>"]
@ -259,13 +249,9 @@ impl RayTracing {
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCompileDeferredNV.html>"]
pub unsafe fn compile_deferred(&self, pipeline: vk::Pipeline, shader: u32) -> VkResult<()> {
let err_code = self
.ray_tracing_fn
.compile_deferred_nv(self.handle, pipeline, shader);
match err_code {
vk::Result::SUCCESS => Ok(()),
_ => Err(err_code),
}
self.ray_tracing_fn
.compile_deferred_nv(self.handle, pipeline, shader)
.into()
}
pub fn name() -> &'static CStr {

111
third_party/rust/ash/src/instance.rs поставляемый
Просмотреть файл

@ -49,17 +49,16 @@ impl InstanceV1_0 for Instance {
physical_device: vk::PhysicalDevice,
create_info: &vk::DeviceCreateInfo,
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> Result<Self::Device, vk::Result> {
) -> VkResult<Self::Device> {
let mut device: vk::Device = mem::zeroed();
let err_code = self.fp_v1_0().create_device(
physical_device,
create_info,
allocation_callbacks.as_raw_ptr(),
&mut device,
);
if err_code != vk::Result::SUCCESS {
return Err(err_code);
}
self.fp_v1_0()
.create_device(
physical_device,
create_info,
allocation_callbacks.as_raw_ptr(),
&mut device,
)
.result()?;
Ok(Device::load(&self.instance_fn_1_0, device))
}
fn handle(&self) -> vk::Instance {
@ -92,14 +91,11 @@ pub trait InstanceV1_2: InstanceV1_1 {
pub trait InstanceV1_1: InstanceV1_0 {
fn fp_v1_1(&self) -> &vk::InstanceFnV1_1;
unsafe fn enumerate_physical_device_groups_len(&self) -> usize {
unsafe fn enumerate_physical_device_groups_len(&self) -> VkResult<usize> {
let mut group_count = mem::zeroed();
self.fp_v1_1().enumerate_physical_device_groups(
self.handle(),
&mut group_count,
ptr::null_mut(),
);
group_count as usize
self.fp_v1_1()
.enumerate_physical_device_groups(self.handle(), &mut group_count, ptr::null_mut())
.result_with_success(group_count as usize)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkEnumeratePhysicalDeviceGroups.html>"]
@ -109,16 +105,9 @@ pub trait InstanceV1_1: InstanceV1_0 {
) -> VkResult<()> {
unsafe {
let mut group_count = out.len() as u32;
let err_code = self.fp_v1_1().enumerate_physical_device_groups(
self.handle(),
&mut group_count,
out.as_mut_ptr(),
);
if err_code == vk::Result::SUCCESS {
Ok(())
} else {
Err(err_code)
}
self.fp_v1_1()
.enumerate_physical_device_groups(self.handle(), &mut group_count, out.as_mut_ptr())
.into()
}
}
@ -160,16 +149,13 @@ pub trait InstanceV1_1: InstanceV1_0 {
format_info: &vk::PhysicalDeviceImageFormatInfo2,
image_format_prop: &mut vk::ImageFormatProperties2,
) -> VkResult<()> {
let err_code = self.fp_v1_1().get_physical_device_image_format_properties2(
physical_device,
format_info,
image_format_prop,
);
if err_code == vk::Result::SUCCESS {
Ok(())
} else {
Err(err_code)
}
self.fp_v1_1()
.get_physical_device_image_format_properties2(
physical_device,
format_info,
image_format_prop,
)
.into()
}
unsafe fn get_physical_device_queue_family_properties2_len(
@ -304,7 +290,7 @@ pub trait InstanceV1_0 {
physical_device: vk::PhysicalDevice,
create_info: &vk::DeviceCreateInfo,
allocation_callbacks: Option<&vk::AllocationCallbacks>,
) -> Result<Self::Device, vk::Result>;
) -> VkResult<Self::Device>;
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetDeviceProcAddr.html>"]
unsafe fn get_device_proc_addr(
@ -347,20 +333,17 @@ pub trait InstanceV1_0 {
flags: vk::ImageCreateFlags,
) -> VkResult<vk::ImageFormatProperties> {
let mut image_format_prop = mem::zeroed();
let err_code = self.fp_v1_0().get_physical_device_image_format_properties(
physical_device,
format,
typ,
tiling,
usage,
flags,
&mut image_format_prop,
);
if err_code == vk::Result::SUCCESS {
Ok(image_format_prop)
} else {
Err(err_code)
}
self.fp_v1_0()
.get_physical_device_image_format_properties(
physical_device,
format,
typ,
tiling,
usage,
flags,
&mut image_format_prop,
)
.result_with_success(image_format_prop)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetPhysicalDeviceMemoryProperties.html>"]
@ -421,7 +404,8 @@ pub trait InstanceV1_0 {
unsafe fn enumerate_physical_devices(&self) -> VkResult<Vec<vk::PhysicalDevice>> {
let mut num = mem::zeroed();
self.fp_v1_0()
.enumerate_physical_devices(self.handle(), &mut num, ptr::null_mut());
.enumerate_physical_devices(self.handle(), &mut num, ptr::null_mut())
.result()?;
let mut physical_devices = Vec::<vk::PhysicalDevice>::with_capacity(num as usize);
let err_code = self.fp_v1_0().enumerate_physical_devices(
self.handle(),
@ -429,24 +413,18 @@ pub trait InstanceV1_0 {
physical_devices.as_mut_ptr(),
);
physical_devices.set_len(num as usize);
match err_code {
vk::Result::SUCCESS => Ok(physical_devices),
_ => Err(err_code),
}
err_code.result_with_success(physical_devices)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkEnumerateDeviceExtensionProperties.html>"]
unsafe fn enumerate_device_extension_properties(
&self,
device: vk::PhysicalDevice,
) -> Result<Vec<vk::ExtensionProperties>, vk::Result> {
) -> VkResult<Vec<vk::ExtensionProperties>> {
let mut num = 0;
self.fp_v1_0().enumerate_device_extension_properties(
device,
ptr::null(),
&mut num,
ptr::null_mut(),
);
self.fp_v1_0()
.enumerate_device_extension_properties(device, ptr::null(), &mut num, ptr::null_mut())
.result()?;
let mut data = Vec::with_capacity(num as usize);
let err_code = self.fp_v1_0().enumerate_device_extension_properties(
device,
@ -455,9 +433,6 @@ pub trait InstanceV1_0 {
data.as_mut_ptr(),
);
data.set_len(num as usize);
match err_code {
vk::Result::SUCCESS => Ok(data),
_ => Err(err_code),
}
err_code.result_with_success(data)
}
}

6
third_party/rust/ash/src/lib.rs поставляемый
Просмотреть файл

@ -1,4 +1,8 @@
#![allow(clippy::too_many_arguments, clippy::missing_safety_doc)]
#![allow(
clippy::too_many_arguments,
clippy::missing_safety_doc,
clippy::upper_case_acronyms
)]
//! # Vulkan API
//!
//! <https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/index.html>

19
third_party/rust/ash/src/prelude.rs поставляемый
Просмотреть файл

@ -1,2 +1,21 @@
use crate::vk;
pub type VkResult<T> = Result<T, vk::Result>;
impl From<vk::Result> for VkResult<()> {
fn from(err_code: vk::Result) -> Self {
err_code.result()
}
}
impl vk::Result {
pub fn result(self) -> VkResult<()> {
self.result_with_success(())
}
pub fn result_with_success<T>(self, v: T) -> VkResult<T> {
match self {
vk::Result::SUCCESS => Ok(v),
_ => Err(self),
}
}
}

6
third_party/rust/ash/src/vk.rs поставляемый
Просмотреть файл

@ -1,4 +1,8 @@
#![allow(clippy::too_many_arguments, clippy::cognitive_complexity, clippy::wrong_self_convention)]
#![allow(
clippy::too_many_arguments,
clippy::cognitive_complexity,
clippy::wrong_self_convention
)]
#[macro_use]
mod macros;
pub use macros::*;

5
third_party/rust/ash/src/vk/aliases.rs поставляемый
Просмотреть файл

@ -21,7 +21,6 @@ pub type DescriptorBindingFlagsEXT = DescriptorBindingFlags;
pub type ResolveModeFlagsKHR = ResolveModeFlags;
pub type DescriptorUpdateTemplateKHR = DescriptorUpdateTemplate;
pub type SamplerYcbcrConversionKHR = SamplerYcbcrConversion;
pub type AccelerationStructureNV = AccelerationStructureKHR;
pub type DescriptorUpdateTemplateTypeKHR = DescriptorUpdateTemplateType;
pub type PointClippingBehaviorKHR = PointClippingBehavior;
pub type SemaphoreTypeKHR = SemaphoreType;
@ -29,8 +28,6 @@ pub type CopyAccelerationStructureModeNV = CopyAccelerationStructureModeKHR;
pub type AccelerationStructureTypeNV = AccelerationStructureTypeKHR;
pub type GeometryTypeNV = GeometryTypeKHR;
pub type RayTracingShaderGroupTypeNV = RayTracingShaderGroupTypeKHR;
pub type AccelerationStructureMemoryRequirementsTypeNV =
AccelerationStructureMemoryRequirementsTypeKHR;
pub type TessellationDomainOriginKHR = TessellationDomainOrigin;
pub type SamplerYcbcrModelConversionKHR = SamplerYcbcrModelConversion;
pub type SamplerYcbcrRangeKHR = SamplerYcbcrRange;
@ -144,8 +141,6 @@ pub type PhysicalDeviceShaderAtomicInt64FeaturesKHR = PhysicalDeviceShaderAtomic
pub type PhysicalDeviceDepthStencilResolvePropertiesKHR =
PhysicalDeviceDepthStencilResolveProperties;
pub type SubpassDescriptionDepthStencilResolveKHR = SubpassDescriptionDepthStencilResolve;
pub type BindAccelerationStructureMemoryInfoNV = BindAccelerationStructureMemoryInfoKHR;
pub type WriteDescriptorSetAccelerationStructureNV = WriteDescriptorSetAccelerationStructureKHR;
pub type ImageStencilUsageCreateInfoEXT = ImageStencilUsageCreateInfo;
pub type PhysicalDeviceScalarBlockLayoutFeaturesEXT = PhysicalDeviceScalarBlockLayoutFeatures;
pub type PhysicalDeviceUniformBufferStandardLayoutFeaturesKHR =

102
third_party/rust/ash/src/vk/bitflags.rs поставляемый
Просмотреть файл

@ -7,17 +7,6 @@ vk_bitflags_wrapped!(PipelineCacheCreateFlags, 0b0, Flags);
impl PipelineCacheCreateFlags {}
#[repr(transparent)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkCullModeFlagBits.html>"]
pub struct CullModeFlags(pub(crate) Flags);
vk_bitflags_wrapped!(CullModeFlags, 0b11, Flags);
impl CullModeFlags {
pub const NONE: Self = Self(0);
pub const FRONT: Self = Self(0b1);
pub const BACK: Self = Self(0b10);
pub const FRONT_AND_BACK: Self = Self(0x0000_0003);
}
#[repr(transparent)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkQueueFlagBits.html>"]
pub struct QueueFlags(pub(crate) Flags);
vk_bitflags_wrapped!(QueueFlags, 0b1111, Flags);
@ -33,6 +22,17 @@ impl QueueFlags {
}
#[repr(transparent)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkCullModeFlagBits.html>"]
pub struct CullModeFlags(pub(crate) Flags);
vk_bitflags_wrapped!(CullModeFlags, 0b11, Flags);
impl CullModeFlags {
pub const NONE: Self = Self(0);
pub const FRONT: Self = Self(0b1);
pub const BACK: Self = Self(0b10);
pub const FRONT_AND_BACK: Self = Self(0x0000_0003);
}
#[repr(transparent)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkRenderPassCreateFlagBits.html>"]
pub struct RenderPassCreateFlags(pub(crate) Flags);
vk_bitflags_wrapped!(RenderPassCreateFlags, 0b0, Flags);
@ -256,12 +256,6 @@ impl FenceCreateFlags {
}
#[repr(transparent)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkSemaphoreCreateFlagBits.html>"]
pub struct SemaphoreCreateFlags(pub(crate) Flags);
vk_bitflags_wrapped!(SemaphoreCreateFlags, 0b0, Flags);
impl SemaphoreCreateFlags {}
#[repr(transparent)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkFormatFeatureFlagBits.html>"]
pub struct FormatFeatureFlags(pub(crate) Flags);
vk_bitflags_wrapped!(FormatFeatureFlags, 0b1_1111_1111_1111, Flags);
@ -593,10 +587,10 @@ impl DebugReportFlagsEXT {
pub struct ExternalMemoryHandleTypeFlagsNV(pub(crate) Flags);
vk_bitflags_wrapped!(ExternalMemoryHandleTypeFlagsNV, 0b1111, Flags);
impl ExternalMemoryHandleTypeFlagsNV {
pub const EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_NV: Self = Self(0b1);
pub const EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_NV: Self = Self(0b10);
pub const EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_NV: Self = Self(0b100);
pub const EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_KMT_NV: Self = Self(0b1000);
pub const OPAQUE_WIN32: Self = Self(0b1);
pub const OPAQUE_WIN32_KMT: Self = Self(0b10);
pub const D3D11_IMAGE: Self = Self(0b100);
pub const D3D11_IMAGE_KMT: Self = Self(0b1000);
}
#[repr(transparent)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
@ -604,9 +598,9 @@ impl ExternalMemoryHandleTypeFlagsNV {
pub struct ExternalMemoryFeatureFlagsNV(pub(crate) Flags);
vk_bitflags_wrapped!(ExternalMemoryFeatureFlagsNV, 0b111, Flags);
impl ExternalMemoryFeatureFlagsNV {
pub const EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_NV: Self = Self(0b1);
pub const EXTERNAL_MEMORY_FEATURE_EXPORTABLE_NV: Self = Self(0b10);
pub const EXTERNAL_MEMORY_FEATURE_IMPORTABLE_NV: Self = Self(0b100);
pub const DEDICATED_ONLY: Self = Self(0b1);
pub const EXPORTABLE: Self = Self(0b10);
pub const IMPORTABLE: Self = Self(0b100);
}
#[repr(transparent)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
@ -651,6 +645,12 @@ impl IndirectStateFlagsNV {
}
#[repr(transparent)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkPrivateDataSlotCreateFlagBitsEXT.html>"]
pub struct PrivateDataSlotCreateFlagsEXT(pub(crate) Flags);
vk_bitflags_wrapped!(PrivateDataSlotCreateFlagsEXT, 0b0, Flags);
impl PrivateDataSlotCreateFlagsEXT {}
#[repr(transparent)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkDescriptorSetLayoutCreateFlagBits.html>"]
pub struct DescriptorSetLayoutCreateFlags(pub(crate) Flags);
vk_bitflags_wrapped!(DescriptorSetLayoutCreateFlags, 0b0, Flags);
@ -661,13 +661,13 @@ impl DescriptorSetLayoutCreateFlags {}
pub struct ExternalMemoryHandleTypeFlags(pub(crate) Flags);
vk_bitflags_wrapped!(ExternalMemoryHandleTypeFlags, 0b111_1111, Flags);
impl ExternalMemoryHandleTypeFlags {
pub const EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD: Self = Self(0b1);
pub const EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32: Self = Self(0b10);
pub const EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT: Self = Self(0b100);
pub const EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE: Self = Self(0b1000);
pub const EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT: Self = Self(0b1_0000);
pub const EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP: Self = Self(0b10_0000);
pub const EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE: Self = Self(0b100_0000);
pub const OPAQUE_FD: Self = Self(0b1);
pub const OPAQUE_WIN32: Self = Self(0b10);
pub const OPAQUE_WIN32_KMT: Self = Self(0b100);
pub const D3D11_TEXTURE: Self = Self(0b1000);
pub const D3D11_TEXTURE_KMT: Self = Self(0b1_0000);
pub const D3D12_HEAP: Self = Self(0b10_0000);
pub const D3D12_RESOURCE: Self = Self(0b100_0000);
}
#[repr(transparent)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
@ -675,9 +675,9 @@ impl ExternalMemoryHandleTypeFlags {
pub struct ExternalMemoryFeatureFlags(pub(crate) Flags);
vk_bitflags_wrapped!(ExternalMemoryFeatureFlags, 0b111, Flags);
impl ExternalMemoryFeatureFlags {
pub const EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY: Self = Self(0b1);
pub const EXTERNAL_MEMORY_FEATURE_EXPORTABLE: Self = Self(0b10);
pub const EXTERNAL_MEMORY_FEATURE_IMPORTABLE: Self = Self(0b100);
pub const DEDICATED_ONLY: Self = Self(0b1);
pub const EXPORTABLE: Self = Self(0b10);
pub const IMPORTABLE: Self = Self(0b100);
}
#[repr(transparent)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
@ -685,11 +685,11 @@ impl ExternalMemoryFeatureFlags {
pub struct ExternalSemaphoreHandleTypeFlags(pub(crate) Flags);
vk_bitflags_wrapped!(ExternalSemaphoreHandleTypeFlags, 0b1_1111, Flags);
impl ExternalSemaphoreHandleTypeFlags {
pub const EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD: Self = Self(0b1);
pub const EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32: Self = Self(0b10);
pub const EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT: Self = Self(0b100);
pub const EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE: Self = Self(0b1000);
pub const EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD: Self = Self(0b1_0000);
pub const OPAQUE_FD: Self = Self(0b1);
pub const OPAQUE_WIN32: Self = Self(0b10);
pub const OPAQUE_WIN32_KMT: Self = Self(0b100);
pub const D3D12_FENCE: Self = Self(0b1000);
pub const SYNC_FD: Self = Self(0b1_0000);
}
#[repr(transparent)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
@ -697,8 +697,8 @@ impl ExternalSemaphoreHandleTypeFlags {
pub struct ExternalSemaphoreFeatureFlags(pub(crate) Flags);
vk_bitflags_wrapped!(ExternalSemaphoreFeatureFlags, 0b11, Flags);
impl ExternalSemaphoreFeatureFlags {
pub const EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE: Self = Self(0b1);
pub const EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE: Self = Self(0b10);
pub const EXPORTABLE: Self = Self(0b1);
pub const IMPORTABLE: Self = Self(0b10);
}
#[repr(transparent)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
@ -714,10 +714,10 @@ impl SemaphoreImportFlags {
pub struct ExternalFenceHandleTypeFlags(pub(crate) Flags);
vk_bitflags_wrapped!(ExternalFenceHandleTypeFlags, 0b1111, Flags);
impl ExternalFenceHandleTypeFlags {
pub const EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD: Self = Self(0b1);
pub const EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32: Self = Self(0b10);
pub const EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT: Self = Self(0b100);
pub const EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD: Self = Self(0b1000);
pub const OPAQUE_FD: Self = Self(0b1);
pub const OPAQUE_WIN32: Self = Self(0b10);
pub const OPAQUE_WIN32_KMT: Self = Self(0b100);
pub const SYNC_FD: Self = Self(0b1000);
}
#[repr(transparent)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
@ -725,8 +725,8 @@ impl ExternalFenceHandleTypeFlags {
pub struct ExternalFenceFeatureFlags(pub(crate) Flags);
vk_bitflags_wrapped!(ExternalFenceFeatureFlags, 0b11, Flags);
impl ExternalFenceFeatureFlags {
pub const EXTERNAL_FENCE_FEATURE_EXPORTABLE: Self = Self(0b1);
pub const EXTERNAL_FENCE_FEATURE_IMPORTABLE: Self = Self(0b10);
pub const EXPORTABLE: Self = Self(0b1);
pub const IMPORTABLE: Self = Self(0b10);
}
#[repr(transparent)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
@ -881,6 +881,14 @@ impl BuildAccelerationStructureFlagsKHR {
}
#[repr(transparent)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkAccelerationStructureCreateFlagBitsKHR.html>"]
pub struct AccelerationStructureCreateFlagsKHR(pub(crate) Flags);
vk_bitflags_wrapped!(AccelerationStructureCreateFlagsKHR, 0b1, Flags);
impl AccelerationStructureCreateFlagsKHR {
pub const DEVICE_ADDRESS_CAPTURE_REPLAY: Self = Self(0b1);
}
#[repr(transparent)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkFramebufferCreateFlagBits.html>"]
pub struct FramebufferCreateFlags(pub(crate) Flags);
vk_bitflags_wrapped!(FramebufferCreateFlags, 0b0, Flags);

621
third_party/rust/ash/src/vk/const_debugs.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

5469
third_party/rust/ash/src/vk/definitions.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

187
third_party/rust/ash/src/vk/enums.rs поставляемый
Просмотреть файл

@ -869,6 +869,7 @@ impl SubpassContents {
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
#[repr(transparent)]
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkResult.html>"]
#[must_use]
pub struct Result(pub(crate) i32);
impl Result {
pub const fn from_raw(x: i32) -> Self {
@ -913,7 +914,7 @@ impl Result {
pub const ERROR_TOO_MANY_OBJECTS: Self = Self(-10);
#[doc = "Requested format is not supported on this device"]
pub const ERROR_FORMAT_NOT_SUPPORTED: Self = Self(-11);
#[doc = "A requested pool allocation has failed due to fragmentation of the pool\'s memory"]
#[doc = "A requested pool allocation has failed due to fragmentation of the pool's memory"]
pub const ERROR_FRAGMENTED_POOL: Self = Self(-12);
#[doc = "An unknown error has occurred, due to an implementation or application bug"]
pub const ERROR_UNKNOWN: Self = Self(-13);
@ -947,7 +948,7 @@ impl ::std::error::Error for Result {
Some("Requested format is not supported on this device")
}
Result::ERROR_FRAGMENTED_POOL => Some(
"A requested pool allocation has failed due to fragmentation of the pool\'s memory",
"A requested pool allocation has failed due to fragmentation of the pool's memory",
),
Result::ERROR_UNKNOWN => {
Some("An unknown error has occurred, due to an implementation or application bug")
@ -986,7 +987,7 @@ impl fmt::Display for Result {
Some("Requested format is not supported on this device")
}
Result::ERROR_FRAGMENTED_POOL => Some(
"A requested pool allocation has failed due to fragmentation of the pool\'s memory",
"A requested pool allocation has failed due to fragmentation of the pool's memory",
),
Result::ERROR_UNKNOWN => {
Some("An unknown error has occurred, due to an implementation or application bug")
@ -1053,55 +1054,30 @@ impl ObjectType {
}
impl ObjectType {
pub const UNKNOWN: Self = Self(0);
#[doc = "VkInstance"]
pub const INSTANCE: Self = Self(1);
#[doc = "VkPhysicalDevice"]
pub const PHYSICAL_DEVICE: Self = Self(2);
#[doc = "VkDevice"]
pub const DEVICE: Self = Self(3);
#[doc = "VkQueue"]
pub const QUEUE: Self = Self(4);
#[doc = "VkSemaphore"]
pub const SEMAPHORE: Self = Self(5);
#[doc = "VkCommandBuffer"]
pub const COMMAND_BUFFER: Self = Self(6);
#[doc = "VkFence"]
pub const FENCE: Self = Self(7);
#[doc = "VkDeviceMemory"]
pub const DEVICE_MEMORY: Self = Self(8);
#[doc = "VkBuffer"]
pub const BUFFER: Self = Self(9);
#[doc = "VkImage"]
pub const IMAGE: Self = Self(10);
#[doc = "VkEvent"]
pub const EVENT: Self = Self(11);
#[doc = "VkQueryPool"]
pub const QUERY_POOL: Self = Self(12);
#[doc = "VkBufferView"]
pub const BUFFER_VIEW: Self = Self(13);
#[doc = "VkImageView"]
pub const IMAGE_VIEW: Self = Self(14);
#[doc = "VkShaderModule"]
pub const SHADER_MODULE: Self = Self(15);
#[doc = "VkPipelineCache"]
pub const PIPELINE_CACHE: Self = Self(16);
#[doc = "VkPipelineLayout"]
pub const PIPELINE_LAYOUT: Self = Self(17);
#[doc = "VkRenderPass"]
pub const RENDER_PASS: Self = Self(18);
#[doc = "VkPipeline"]
pub const PIPELINE: Self = Self(19);
#[doc = "VkDescriptorSetLayout"]
pub const DESCRIPTOR_SET_LAYOUT: Self = Self(20);
#[doc = "VkSampler"]
pub const SAMPLER: Self = Self(21);
#[doc = "VkDescriptorPool"]
pub const DESCRIPTOR_POOL: Self = Self(22);
#[doc = "VkDescriptorSet"]
pub const DESCRIPTOR_SET: Self = Self(23);
#[doc = "VkFramebuffer"]
pub const FRAMEBUFFER: Self = Self(24);
#[doc = "VkCommandPool"]
pub const COMMAND_POOL: Self = Self(25);
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
@ -1212,10 +1188,29 @@ impl DebugReportObjectTypeEXT {
pub const COMMAND_POOL: Self = Self(25);
pub const SURFACE_KHR: Self = Self(26);
pub const SWAPCHAIN_KHR: Self = Self(27);
pub const DEBUG_REPORT_CALLBACK: Self = Self(28);
pub const DEBUG_REPORT_CALLBACK_EXT: Self = Self(28);
pub const DISPLAY_KHR: Self = Self(29);
pub const DISPLAY_MODE_KHR: Self = Self(30);
pub const VALIDATION_CACHE: Self = Self(33);
pub const VALIDATION_CACHE_EXT: Self = Self(33);
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
#[repr(transparent)]
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkDeviceMemoryReportEventTypeEXT.html>"]
pub struct DeviceMemoryReportEventTypeEXT(pub(crate) i32);
impl DeviceMemoryReportEventTypeEXT {
pub const fn from_raw(x: i32) -> Self {
DeviceMemoryReportEventTypeEXT(x)
}
pub const fn as_raw(self) -> i32 {
self.0
}
}
impl DeviceMemoryReportEventTypeEXT {
pub const ALLOCATE: Self = Self(0);
pub const FREE: Self = Self(1);
pub const IMPORT: Self = Self(2);
pub const UNIMPORT: Self = Self(3);
pub const ALLOCATION_FAILED: Self = Self(4);
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
#[repr(transparent)]
@ -1266,6 +1261,7 @@ impl ValidationFeatureEnableEXT {
pub const GPU_ASSISTED_RESERVE_BINDING_SLOT: Self = Self(1);
pub const BEST_PRACTICES: Self = Self(2);
pub const DEBUG_PRINTF: Self = Self(3);
pub const SYNCHRONIZATION_VALIDATION: Self = Self(4);
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
#[repr(transparent)]
@ -1640,6 +1636,10 @@ impl VendorId {
pub const KAZAN: Self = Self(0x1_0003);
#[doc = "Codeplay Software Ltd. vendor ID"]
pub const CODEPLAY: Self = Self(0x1_0004);
#[doc = "Mesa vendor ID"]
pub const MESA: Self = Self(0x1_0005);
#[doc = "PoCL vendor ID"]
pub const POCL: Self = Self(0x1_0006);
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
#[repr(transparent)]
@ -1678,6 +1678,10 @@ impl DriverId {
pub const GGP_PROPRIETARY: Self = Self(11);
#[doc = "Broadcom Inc."]
pub const BROADCOM_PROPRIETARY: Self = Self(12);
#[doc = "Mesa"]
pub const MESA_LLVMPIPE: Self = Self(13);
#[doc = "MoltenVK"]
pub const MOLTENVK: Self = Self(14);
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
#[repr(transparent)]
@ -1743,6 +1747,22 @@ impl CopyAccelerationStructureModeKHR {
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
#[repr(transparent)]
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkBuildAccelerationStructureModeKHR.html>"]
pub struct BuildAccelerationStructureModeKHR(pub(crate) i32);
impl BuildAccelerationStructureModeKHR {
pub const fn from_raw(x: i32) -> Self {
BuildAccelerationStructureModeKHR(x)
}
pub const fn as_raw(self) -> i32 {
self.0
}
}
impl BuildAccelerationStructureModeKHR {
pub const BUILD: Self = Self(0);
pub const UPDATE: Self = Self(1);
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
#[repr(transparent)]
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkAccelerationStructureTypeKHR.html>"]
pub struct AccelerationStructureTypeKHR(pub(crate) i32);
impl AccelerationStructureTypeKHR {
@ -1756,6 +1776,7 @@ impl AccelerationStructureTypeKHR {
impl AccelerationStructureTypeKHR {
pub const TOP_LEVEL: Self = Self(0);
pub const BOTTOM_LEVEL: Self = Self(1);
pub const GENERIC: Self = Self(2);
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
#[repr(transparent)]
@ -1772,20 +1793,21 @@ impl GeometryTypeKHR {
impl GeometryTypeKHR {
pub const TRIANGLES: Self = Self(0);
pub const AABBS: Self = Self(1);
pub const INSTANCES: Self = Self(2);
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
#[repr(transparent)]
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkAccelerationStructureMemoryRequirementsTypeKHR.html>"]
pub struct AccelerationStructureMemoryRequirementsTypeKHR(pub(crate) i32);
impl AccelerationStructureMemoryRequirementsTypeKHR {
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkAccelerationStructureMemoryRequirementsTypeNV.html>"]
pub struct AccelerationStructureMemoryRequirementsTypeNV(pub(crate) i32);
impl AccelerationStructureMemoryRequirementsTypeNV {
pub const fn from_raw(x: i32) -> Self {
AccelerationStructureMemoryRequirementsTypeKHR(x)
AccelerationStructureMemoryRequirementsTypeNV(x)
}
pub const fn as_raw(self) -> i32 {
self.0
}
}
impl AccelerationStructureMemoryRequirementsTypeKHR {
impl AccelerationStructureMemoryRequirementsTypeNV {
pub const OBJECT: Self = Self(0);
pub const BUILD_SCRATCH: Self = Self(1);
pub const UPDATE_SCRATCH: Self = Self(2);
@ -1826,6 +1848,40 @@ impl RayTracingShaderGroupTypeKHR {
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
#[repr(transparent)]
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkAccelerationStructureCompatibilityKHR.html>"]
pub struct AccelerationStructureCompatibilityKHR(pub(crate) i32);
impl AccelerationStructureCompatibilityKHR {
pub const fn from_raw(x: i32) -> Self {
AccelerationStructureCompatibilityKHR(x)
}
pub const fn as_raw(self) -> i32 {
self.0
}
}
impl AccelerationStructureCompatibilityKHR {
pub const COMPATIBLE: Self = Self(0);
pub const INCOMPATIBLE: Self = Self(1);
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
#[repr(transparent)]
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkShaderGroupShaderKHR.html>"]
pub struct ShaderGroupShaderKHR(pub(crate) i32);
impl ShaderGroupShaderKHR {
pub const fn from_raw(x: i32) -> Self {
ShaderGroupShaderKHR(x)
}
pub const fn as_raw(self) -> i32 {
self.0
}
}
impl ShaderGroupShaderKHR {
pub const GENERAL: Self = Self(0);
pub const CLOSEST_HIT: Self = Self(1);
pub const ANY_HIT: Self = Self(2);
pub const INTERSECTION: Self = Self(3);
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
#[repr(transparent)]
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkMemoryOverallocationBehaviorAMD.html>"]
pub struct MemoryOverallocationBehaviorAMD(pub(crate) i32);
impl MemoryOverallocationBehaviorAMD {
@ -2099,3 +2155,64 @@ impl LineRasterizationModeEXT {
pub const BRESENHAM: Self = Self(2);
pub const RECTANGULAR_SMOOTH: Self = Self(3);
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
#[repr(transparent)]
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkFragmentShadingRateCombinerOpKHR.html>"]
pub struct FragmentShadingRateCombinerOpKHR(pub(crate) i32);
impl FragmentShadingRateCombinerOpKHR {
pub const fn from_raw(x: i32) -> Self {
FragmentShadingRateCombinerOpKHR(x)
}
pub const fn as_raw(self) -> i32 {
self.0
}
}
impl FragmentShadingRateCombinerOpKHR {
pub const KEEP: Self = Self(0);
pub const REPLACE: Self = Self(1);
pub const MIN: Self = Self(2);
pub const MAX: Self = Self(3);
pub const MUL: Self = Self(4);
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
#[repr(transparent)]
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkFragmentShadingRateNV.html>"]
pub struct FragmentShadingRateNV(pub(crate) i32);
impl FragmentShadingRateNV {
pub const fn from_raw(x: i32) -> Self {
FragmentShadingRateNV(x)
}
pub const fn as_raw(self) -> i32 {
self.0
}
}
impl FragmentShadingRateNV {
pub const TYPE_1_INVOCATION_PER_PIXEL: Self = Self(0);
pub const TYPE_1_INVOCATION_PER_1X2_PIXELS: Self = Self(1);
pub const TYPE_1_INVOCATION_PER_2X1_PIXELS: Self = Self(4);
pub const TYPE_1_INVOCATION_PER_2X2_PIXELS: Self = Self(5);
pub const TYPE_1_INVOCATION_PER_2X4_PIXELS: Self = Self(6);
pub const TYPE_1_INVOCATION_PER_4X2_PIXELS: Self = Self(9);
pub const TYPE_1_INVOCATION_PER_4X4_PIXELS: Self = Self(10);
pub const TYPE_2_INVOCATIONS_PER_PIXEL: Self = Self(11);
pub const TYPE_4_INVOCATIONS_PER_PIXEL: Self = Self(12);
pub const TYPE_8_INVOCATIONS_PER_PIXEL: Self = Self(13);
pub const TYPE_16_INVOCATIONS_PER_PIXEL: Self = Self(14);
pub const NO_INVOCATIONS: Self = Self(15);
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
#[repr(transparent)]
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkFragmentShadingRateTypeNV.html>"]
pub struct FragmentShadingRateTypeNV(pub(crate) i32);
impl FragmentShadingRateTypeNV {
pub const fn from_raw(x: i32) -> Self {
FragmentShadingRateTypeNV(x)
}
pub const fn as_raw(self) -> i32 {
self.0
}
}
impl FragmentShadingRateTypeNV {
pub const FRAGMENT_SIZE: Self = Self(0);
pub const ENUMS: Self = Self(1);
}

9227
third_party/rust/ash/src/vk/extensions.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

1989
third_party/rust/ash/src/vk/features.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

10
third_party/rust/ash/src/vk/macros.rs поставляемый
Просмотреть файл

@ -16,7 +16,7 @@ pub const fn version_patch(version: u32) -> u32 {
}
#[macro_export]
macro_rules! vk_bitflags_wrapped {
( $ name : ident , $ all : expr , $ flag_type : ty ) => {
($ name : ident , $ all : expr , $ flag_type : ty) => {
impl Default for $name {
fn default() -> $name {
$name(0)
@ -120,10 +120,10 @@ macro_rules! vk_bitflags_wrapped {
}
#[macro_export]
macro_rules! handle_nondispatchable {
( $ name : ident , $ ty : ident ) => {
($ name : ident , $ ty : ident) => {
handle_nondispatchable!($name, $ty, doc = "");
};
( $ name : ident , $ ty : ident , $ doc_link : meta ) => {
($ name : ident , $ ty : ident , $ doc_link : meta) => {
#[repr(transparent)]
#[derive(Eq, PartialEq, Ord, PartialOrd, Clone, Copy, Hash, Default)]
#[$doc_link]
@ -156,10 +156,10 @@ macro_rules! handle_nondispatchable {
}
#[macro_export]
macro_rules! define_handle {
( $ name : ident , $ ty : ident ) => {
($ name : ident , $ ty : ident) => {
define_handle!($name, $ty, doc = "");
};
( $ name : ident , $ ty : ident , $ doc_link : meta ) => {
($ name : ident , $ ty : ident , $ doc_link : meta) => {
#[repr(transparent)]
#[derive(Eq, PartialEq, Ord, PartialOrd, Clone, Copy, Hash)]
#[$doc_link]

Просмотреть файл

@ -27,8 +27,8 @@ pub type zx_handle_t = u32;
pub type SECURITY_ATTRIBUTES = ();
pub type ANativeWindow = c_void;
pub type AHardwareBuffer = c_void;
#[doc = r" This definition is experimental and won't adhere to semver rules."]
pub type GgpStreamDescriptor = u32;
#[doc = r" This definition is experimental and won't adhere to semver rules."]
pub type GgpFrameToken = u32;
pub type CAMetalLayer = c_void;
pub type GgpStreamDescriptor = u32;
pub type GgpFrameToken = u64;
pub type IDirectFB = c_void;
pub type IDirectFBSurface = c_void;

Просмотреть файл

@ -1,5 +1,3 @@
use ash;
use ash::vk::{PhysicalDeviceProperties, PipelineColorBlendStateCreateInfo};
#[test]

1
third_party/rust/codespan-reporting/.cargo-checksum.json поставляемый Normal file

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

408
third_party/rust/codespan-reporting/CHANGELOG.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,408 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
## [0.11.1] - 2021-01-18
### Added
- Add `Chars::{box_drawing, ascii}` functions, the latter supporting a rustc-style of
output that only uses ASCII characters (not above U+007F) for use cases that do not allow
for box drawing characters, e.g. terminals that do not support them.
### Changed
- `Diagnostic::with_labels` and `Diagnostic::with_notes` now append additional
labels rather tan overwriting them, meaning that the documentation and behaviour match
more closely. The behaviour will only differ if you call the same builder methods
multiple times. If you call every builder method once only, nothing should change.
- `config::Chars::snippet_start` is now a String instead of a single `char`.
## [0.11.0] - 2020-11-30
There is now a [code of conduct](https://github.com/brendanzab/codespan/blob/master/CODE_OF_CONDUCT.md)
and a [contributing guide](https://github.com/brendanzab/codespan/blob/master/CONTRIBUTING.md).
Some versions were skipped to sync up with the `codespan-lsp` crate. The release
process has been changed so this should not happen again.
### Added
- If a label spans over multiple lines, not all lines are rendered.
The number of lines rendered at beginning and end is configurable separately.
- There is now a custom error type.
- There now is a medium rendering mode that is like the short rendering mode
but also shows notes from the diagnostic.
- `PartialEq` and `Eq` implementations for the `diagnostic::{Diagnostic, Label, Severity}` types.
### Changed
- All errors now use the error type `codespan_reporting::file::Error`.
This type also replaces the custom error type for `codespan-lsp`.
### Fixed
- Empty error codes are not rendered.
- The locus ("location of the diagnostic") is now computed so it is always at the first
primary label, or at the first secondary label if no primary labels are available.
- All `unwrap`s outside of tests and examples have been removed.
- Some internal improvements, including various code style improvements by using Clippy.
- Improved documentation, also mentioning how the ordering of labels is handled.
## [0.9.5] - 2020-06-24
### Changed
- Sections of source code that are marked with primary labels are now rendered
using the primary highlight color.
- Tab stops are now rendered properly.
We used to just render `\t` characters in source snippets with the same
number of spaces.
<details>
<summary>Example</summary>
For example, when rendering with a tab width of `3` we
would print:
```text
warning: tab test
┌─ tab_columns:1:2
1 │ hello
│ ^^^^^
2 │ ∙ hello
│ ^^^^^
3 │ ∙∙ hello
│ ^^^^^
4 │ ∙∙∙ hello
│ ^^^^^
5 │ ∙∙∙∙ hello
│ ^^^^^
6 │ ∙∙∙∙∙ hello
│ ^^^^^
7 │ ∙∙∙∙∙∙ hello
│ ^^^^^
```
Now we properly take into account the column of the tab character:
```text
warning: tab test
┌─ tab_columns:1:2
1 │ hello
│ ^^^^^
2 │ ∙ hello
│ ^^^^^
3 │ ∙∙ hello
│ ^^^^^
4 │ ∙∙∙ hello
│ ^^^^^
5 │ ∙∙∙∙ hello
│ ^^^^^
6 │ ∙∙∙∙∙ hello
│ ^^^^^
7 │ ∙∙∙∙∙∙ hello
│ ^^^^^
```
</details>
## [0.9.4] - 2020-05-18
### Changed
- We have made the caret rendering easier to read when there are multiple
labels on the same line. We also avoid printing trailing borders on the
final source source snippet if no notes are present.
<details>
<summary>Example</summary>
Instead of this:
```text
┌─ one_line.rs:3:5
3 │ v.push(v.pop().unwrap());
│ - first borrow later used by call
│ ---- first mutable borrow occurs here
│ ^ second mutable borrow occurs here
```
…we now render the following:
```text
┌─ one_line.rs:3:5
3 │ v.push(v.pop().unwrap());
│ - ---- ^ second mutable borrow occurs here
│ │ │
│ │ first mutable borrow occurs here
│ first borrow later used by call
```
</details>
### Fixed
- Diagnostic rendering no longer panics if label ranges are between UTF-8
character boundaries.
## [0.9.3] - 2020-04-29
### Changed
- Some panics were fixed when invalid unicode boundaries are supplied.
- Labels that marked the same span were originally rendered in reverse order.
This was a mistake! We've now fixed this.
<details>
<summary>Example</summary>
For example, this diagnostic:
```text
┌─ same_range:1:7
1 │ ::S { }
│ - Expected '('
│ ^ Unexpected '{'
```
…will now be rendered as:
```text
┌─ same_range:1:7
1 │ ::S { }
│ ^ Unexpected '{'
│ - Expected '('
```
</details>
- We've reduced the prominence of the 'locus' on source snippets by
simplifying the border and reducing the spacing around it. This is to help
focus attention on the underlined source snippet and error messages, rather
than the location, which should be a secondary focus.
<details>
<summary>Example</summary>
For example we originally rendered this:
```text
error: unknown builtin: `NATRAL`
┌── Data/Nat.fun:7:13 ───
7 │ {-# BUILTIN NATRAL Nat #-}
│ ^^^^^^ unknown builtin
= there is a builtin with a similar name: `NATURAL`
```
…and now we render this:
```text
error: unknown builtin: `NATRAL`
┌─ Data/Nat.fun:7:13
7 │ {-# BUILTIN NATRAL Nat #-}
│ ^^^^^^ unknown builtin
= there is a builtin with a similar name: `NATURAL`
```
</details>
## [0.9.2] - 2020-03-29
### Changed
- Render overlapping multiline marks on the same lines of source code.
<details>
<summary>Example</summary>
For example:
```text
error[E0308]: match arms have incompatible types
┌── codespan/src/file.rs:1:9 ───
1 │ ╭ match line_index.compare(self.last_line_index()) {
2 │ │ Ordering::Less => Ok(self.line_starts()[line_index.to_usize()]),
3 │ │ Ordering::Equal => Ok(self.source_span().end()),
4 │ │ Ordering::Greater => LineIndexOutOfBoundsError {
5 │ │ given: line_index,
6 │ │ max: self.last_line_index(),
7 │ │ },
8 │ │ }
│ ╰─────────' `match` arms have incompatible types
·
2 │ Ordering::Less => Ok(self.line_starts()[line_index.to_usize()]),
│ --------------------------------------------- this is found to be of type `Result<ByteIndex, LineIndexOutOfBoundsError>`
3 │ Ordering::Equal => Ok(self.source_span().end()),
│ ---------------------------- this is found to be of type `Result<ByteIndex, LineIndexOutOfBoundsError>`
4 │ Ordering::Greater => LineIndexOutOfBoundsError {
│ ╭──────────────────────────────────^
5 │ │ given: line_index,
6 │ │ max: self.last_line_index(),
7 │ │ },
│ ╰─────────────^ expected enum `Result`, found struct `LineIndexOutOfBoundsError`
= expected type `Result<ByteIndex, LineIndexOutOfBoundsError>`
found type `LineIndexOutOfBoundsError`
```
…is now rendered as:
```text
error[E0308]: match arms have incompatible types
┌── codespan/src/file.rs:1:9 ───
1 │ ╭ match line_index.compare(self.last_line_index()) {
2 │ │ Ordering::Less => Ok(self.line_starts()[line_index.to_usize()]),
│ │ --------------------------------------------- this is found to be of type `Result<ByteIndex, LineIndexOutOfBoundsError>`
3 │ │ Ordering::Equal => Ok(self.source_span().end()),
│ │ ---------------------------- this is found to be of type `Result<ByteIndex, LineIndexOutOfBoundsError>`
4 │ │ Ordering::Greater => LineIndexOutOfBoundsError {
│ ╭─│──────────────────────────────────^
5 │ │ │ given: line_index,
6 │ │ │ max: self.last_line_index(),
7 │ │ │ },
│ ╰─│─────────────^ expected enum `Result`, found struct `LineIndexOutOfBoundsError`
8 │ │ }
│ ╰─────────' `match` arms have incompatible types
= expected type `Result<ByteIndex, LineIndexOutOfBoundsError>`
found type `LineIndexOutOfBoundsError`
```
</details>
## [0.9.1] - 2020-03-23
### Added
- `codespan_reporting::diagnostic::Diagnostic` now implements `Debug`.
### Changed
- Single-line labels are now rendered together, under the same source line.
<details>
<summary>Example</summary>
For example:
```text
┌── one_line.rs:3:5 ───
3 │ v.push(v.pop().unwrap());
│ - first borrow later used by call
·
3 │ v.push(v.pop().unwrap());
│ ---- first mutable borrow occurs here
·
3 │ v.push(v.pop().unwrap());
│ ^ second mutable borrow occurs here
```
…is now rendered as:
```text
┌── one_line.rs:3:5 ───
3 │ v.push(v.pop().unwrap());
│ - first borrow later used by call
│ ---- first mutable borrow occurs here
│ ^ second mutable borrow occurs here
```
</details>
## [0.9.0] - 2020-03-11
### Added
- The `codespan_reporting::files` module was added as a way to decouple
`codespan_reporting` from `codespan`.
- `codespan_reporting::files::Files` allows users to implement custom file
databases that work with `codespan_reporting`. This should make it
easier to integrate with libraries like Salsa, and also makes it less
invasive to use `codespan_reporting` on existing projects.
- `codespan_reporting::files::SimpleFile` is a simple implementation of
`codespan_reporting::files::Files` where only a single file is needed.
- `codespan_reporting::files::SimpleFiles` is a simple implementation of
`codespan_reporting::files::Files` where multiple files are needed.
### Changed
- The `codespan_reporting::diagnostic` module has been greatly revamped,
making the builder API format more nicely with rustfmt, and allowing for
multiple primary labels.
- The output of `codespan_reporting::term::emit` was improved,
with the following changes:
- labels on consecutive lines no longer render breaks between them
- source lines are rendered when there is only one line between labels
- the inner gutter of code snippets is now aligned consistently
- the outer gutter of consecutive code snippets are now aligned consistently
- `codespan_reporting::term::emit` now takes writers as a trait object (rather
than using static dispatch) in order to reduce coda bloat and improve
compile times.
- The field names in `codespan_reporting::term::Chars` were tweaked for
consistency.
### Removed
- `codespan_reporting` no longer depends on `codespan`.
Note that `codespan` can _still_ be used with `codespan_reporting`,
as `codespan::Files` now implements `codespan_reporting::files::Files`.
## [0.8.0] - 2020-02-24
## [0.7.0] - 2020-01-06
## [0.6.0] - 2019-12-18
## [0.5.0] - 2019-10-02
## [0.4.1] - 2019-08-25
## [0.4.0] - 2019-08-22
## [0.3.0] - 2019-05-01
## [0.2.1] - 2019-02-26
## [0.2.0] - 2018-10-11
[Unreleased]: https://github.com/brendanzab/codespan/compare/v0.11.1...HEAD
[0.11.1]: https://github.com/brendanzab/codespan/compare/v0.11.0..v0.11.1
[0.11.0]: https://github.com/brendanzab/codespan/compare/v0.9.5...v0.11.0
[0.9.5]: https://github.com/brendanzab/codespan/compare/v0.9.4...v0.9.5
[0.9.4]: https://github.com/brendanzab/codespan/compare/v0.9.3...v0.9.4
[0.9.3]: https://github.com/brendanzab/codespan/compare/v0.9.2...v0.9.3
[0.9.2]: https://github.com/brendanzab/codespan/compare/v0.9.1...v0.9.2
[0.9.1]: https://github.com/brendanzab/codespan/compare/v0.9.0...v0.9.1
[0.9.0]: https://github.com/brendanzab/codespan/compare/v0.8.0...v0.9.0
[0.8.0]: https://github.com/brendanzab/codespan/compare/v0.7.0...v0.8.0
[0.7.0]: https://github.com/brendanzab/codespan/compare/v0.6.0...v0.7.0
[0.6.0]: https://github.com/brendanzab/codespan/compare/v0.5.0...v0.6.0
[0.5.0]: https://github.com/brendanzab/codespan/compare/v0.4.1...v0.5.0
[0.4.1]: https://github.com/brendanzab/codespan/compare/v0.4.0...v0.4.1
[0.4.0]: https://github.com/brendanzab/codespan/compare/v0.3.0...v0.4.0
[0.3.0]: https://github.com/brendanzab/codespan/compare/v0.2.1...v0.3.0
[0.2.1]: https://github.com/brendanzab/codespan/compare/v0.2.0...v0.2.1
[0.2.0]: https://github.com/brendanzab/codespan/releases/tag/v0.2.0

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше