зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1622846 - WebGPU new mapping API r=webidl,jgilbert,smaug
This PR updates wgpu to 64ae59072d
It has a number of things (API updates, correctness fixes, etc), but the biggest part is the new mapping API.
Differential Revision: https://phabricator.services.mozilla.com/D92636
This commit is contained in:
Родитель
a1acf45525
Коммит
5668f013a9
|
@ -1826,9 +1826,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "gfx-backend-dx12"
|
||||
version = "0.5.3"
|
||||
version = "0.5.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "37365e2927d55cefac0d3f78dfd1d3119fbb13a8bd7afe2409d729961fee22fc"
|
||||
checksum = "cfd506627f3a7003e80f4344123184ce60ed06822c8b8ad2ae4ec674a512ca86"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"d3d12",
|
||||
|
@ -3999,9 +3999,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "range-alloc"
|
||||
version = "0.1.0"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dd5927936723a9e8b715d37d7e4b390455087c4bdf25b9f702309460577b14f9"
|
||||
checksum = "a871f1e45a3a3f0c73fb60343c811238bb5143a81642e27c2ac7aac27ff01a63"
|
||||
|
||||
[[package]]
|
||||
name = "raw-cpuid"
|
||||
|
|
|
@ -1425,6 +1425,9 @@ DOMInterfaces = {
|
|||
'GPULoadOp': {
|
||||
'concrete': False,
|
||||
},
|
||||
'GPUMapMode': {
|
||||
'concrete': False,
|
||||
},
|
||||
'GPUPrimitiveTopology': {
|
||||
'concrete': False,
|
||||
},
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
|
||||
#include "mozilla/dom/ScriptSettings.h"
|
||||
#include "mozilla/ipc/Shmem.h"
|
||||
#include "js/RootingAPI.h"
|
||||
#include "nsContentUtils.h"
|
||||
#include "nsWrapperCache.h"
|
||||
#include "Device.h"
|
||||
|
@ -30,17 +31,14 @@ NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN(Buffer)
|
|||
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
|
||||
NS_IMPL_CYCLE_COLLECTION_TRACE_BEGIN(Buffer)
|
||||
NS_IMPL_CYCLE_COLLECTION_TRACE_PRESERVED_WRAPPER
|
||||
if (tmp->mMapping) {
|
||||
NS_IMPL_CYCLE_COLLECTION_TRACE_JS_MEMBER_CALLBACK(mMapping->mArrayBuffer)
|
||||
if (tmp->mMapped) {
|
||||
for (uint32_t i = 0; i < tmp->mMapped->mArrayBuffers.Length(); ++i) {
|
||||
NS_IMPL_CYCLE_COLLECTION_TRACE_JS_MEMBER_CALLBACK(
|
||||
mMapped->mArrayBuffers[i])
|
||||
}
|
||||
}
|
||||
NS_IMPL_CYCLE_COLLECTION_TRACE_END
|
||||
|
||||
Buffer::Mapping::Mapping(ipc::Shmem&& aShmem, JSObject* aArrayBuffer,
|
||||
bool aWrite)
|
||||
: mShmem(MakeUnique<ipc::Shmem>(std::move(aShmem))),
|
||||
mArrayBuffer(aArrayBuffer),
|
||||
mWrite(aWrite) {}
|
||||
|
||||
Buffer::Buffer(Device* const aParent, RawId aId, BufferAddress aSize)
|
||||
: ChildOf(aParent), mId(aId), mSize(aSize) {
|
||||
mozilla::HoldJSObjects(this);
|
||||
|
@ -52,30 +50,42 @@ Buffer::~Buffer() {
|
|||
}
|
||||
|
||||
void Buffer::Cleanup() {
|
||||
if (mParent) {
|
||||
if (mValid && mParent) {
|
||||
mValid = false;
|
||||
auto bridge = mParent->GetBridge();
|
||||
if (bridge && bridge->IsOpen()) {
|
||||
bridge->SendBufferDestroy(mId);
|
||||
}
|
||||
if (bridge && mMapped) {
|
||||
bridge->DeallocShmem(mMapped->mShmem);
|
||||
}
|
||||
}
|
||||
mMapping.reset();
|
||||
}
|
||||
|
||||
void Buffer::InitMapping(ipc::Shmem&& aShmem, JSObject* aArrayBuffer,
|
||||
bool aWrite) {
|
||||
mMapping.emplace(std::move(aShmem), aArrayBuffer, aWrite);
|
||||
void Buffer::SetMapped(ipc::Shmem&& aShmem, bool aWritable) {
|
||||
MOZ_ASSERT(!mMapped);
|
||||
mMapped.emplace();
|
||||
mMapped->mShmem = std::move(aShmem);
|
||||
mMapped->mWritable = aWritable;
|
||||
}
|
||||
|
||||
already_AddRefed<dom::Promise> Buffer::MapReadAsync(ErrorResult& aRv) {
|
||||
already_AddRefed<dom::Promise> Buffer::MapAsync(
|
||||
uint32_t aMode, uint64_t aOffset, const dom::Optional<uint64_t>& aSize,
|
||||
ErrorResult& aRv) {
|
||||
RefPtr<dom::Promise> promise = dom::Promise::Create(GetParentObject(), aRv);
|
||||
if (NS_WARN_IF(aRv.Failed())) {
|
||||
return nullptr;
|
||||
}
|
||||
if (mMapping) {
|
||||
if (mMapped) {
|
||||
aRv.ThrowInvalidStateError("Unable to map a buffer that is already mapped");
|
||||
return nullptr;
|
||||
}
|
||||
const auto checked = CheckedInt<size_t>(mSize);
|
||||
// Initialize with a dummy shmem, it will become real after the promise is
|
||||
// resolved.
|
||||
SetMapped(ipc::Shmem(), aMode == dom::GPUMapMode_Binding::WRITE);
|
||||
|
||||
const auto checked = aSize.WasPassed() ? CheckedInt<size_t>(aSize.Value())
|
||||
: CheckedInt<size_t>(mSize) - aOffset;
|
||||
if (!checked.isValid()) {
|
||||
aRv.ThrowRangeError("Mapped size is too large");
|
||||
return nullptr;
|
||||
|
@ -84,32 +94,16 @@ already_AddRefed<dom::Promise> Buffer::MapReadAsync(ErrorResult& aRv) {
|
|||
const auto& size = checked.value();
|
||||
RefPtr<Buffer> self(this);
|
||||
|
||||
auto mappingPromise = mParent->MapBufferForReadAsync(mId, size, aRv);
|
||||
auto mappingPromise = mParent->MapBufferAsync(mId, aMode, aOffset, size, aRv);
|
||||
if (!mappingPromise) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
mappingPromise->Then(
|
||||
GetMainThreadSerialEventTarget(), __func__,
|
||||
[promise, size, self](ipc::Shmem&& aShmem) {
|
||||
MOZ_ASSERT(aShmem.Size<uint8_t>() == size);
|
||||
dom::AutoJSAPI jsapi;
|
||||
if (!jsapi.Init(self->GetParentObject())) {
|
||||
promise->MaybeRejectWithAbortError("Owning page was unloaded!");
|
||||
return;
|
||||
}
|
||||
JS::Rooted<JSObject*> arrayBuffer(
|
||||
jsapi.cx(),
|
||||
Device::CreateExternalArrayBuffer(jsapi.cx(), size, aShmem));
|
||||
if (!arrayBuffer) {
|
||||
ErrorResult rv;
|
||||
rv.StealExceptionFromJSContext(jsapi.cx());
|
||||
promise->MaybeReject(std::move(rv));
|
||||
return;
|
||||
}
|
||||
JS::Rooted<JS::Value> val(jsapi.cx(), JS::ObjectValue(*arrayBuffer));
|
||||
self->mMapping.emplace(std::move(aShmem), arrayBuffer, false);
|
||||
promise->MaybeResolve(val);
|
||||
[promise, self](ipc::Shmem&& aShmem) {
|
||||
self->mMapped->mShmem = std::move(aShmem);
|
||||
promise->MaybeResolve(0);
|
||||
},
|
||||
[promise](const ipc::ResponseRejectReason&) {
|
||||
promise->MaybeRejectWithAbortError("Internal communication error!");
|
||||
|
@ -118,18 +112,49 @@ already_AddRefed<dom::Promise> Buffer::MapReadAsync(ErrorResult& aRv) {
|
|||
return promise.forget();
|
||||
}
|
||||
|
||||
void Buffer::Unmap(JSContext* aCx, ErrorResult& aRv) {
|
||||
if (!mMapping) {
|
||||
void Buffer::GetMappedRange(JSContext* aCx, uint64_t aOffset,
|
||||
const dom::Optional<uint64_t>& aSize,
|
||||
JS::Rooted<JSObject*>* aObject, ErrorResult& aRv) {
|
||||
const auto checkedOffset = CheckedInt<size_t>(aOffset);
|
||||
const auto checkedSize = aSize.WasPassed()
|
||||
? CheckedInt<size_t>(aSize.Value())
|
||||
: CheckedInt<size_t>(mSize) - aOffset;
|
||||
if (!checkedOffset.isValid() || !checkedSize.isValid()) {
|
||||
aRv.ThrowRangeError("Invalid mapped range");
|
||||
return;
|
||||
}
|
||||
JS::Rooted<JSObject*> rooted(aCx, mMapping->mArrayBuffer);
|
||||
bool ok = JS::DetachArrayBuffer(aCx, rooted);
|
||||
if (!ok) {
|
||||
if (!mMapped || !mMapped->IsReady()) {
|
||||
aRv.ThrowInvalidStateError("Buffer is not mapped");
|
||||
return;
|
||||
}
|
||||
|
||||
auto* const arrayBuffer = mParent->CreateExternalArrayBuffer(
|
||||
aCx, checkedOffset.value(), checkedSize.value(), mMapped->mShmem);
|
||||
if (!arrayBuffer) {
|
||||
aRv.NoteJSContextException(aCx);
|
||||
return;
|
||||
}
|
||||
mParent->UnmapBuffer(mId, std::move(mMapping->mShmem), mMapping->mWrite);
|
||||
mMapping.reset();
|
||||
|
||||
aObject->set(arrayBuffer);
|
||||
mMapped->mArrayBuffers.AppendElement(*aObject);
|
||||
}
|
||||
|
||||
void Buffer::Unmap(JSContext* aCx, ErrorResult& aRv) {
|
||||
if (!mMapped) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (const auto& arrayBuffer : mMapped->mArrayBuffers) {
|
||||
JS::Rooted<JSObject*> rooted(aCx, arrayBuffer);
|
||||
bool ok = JS::DetachArrayBuffer(aCx, rooted);
|
||||
if (!ok) {
|
||||
aRv.NoteJSContextException(aCx);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
mParent->UnmapBuffer(mId, std::move(mMapped->mShmem), mMapped->mWritable);
|
||||
mMapped.reset();
|
||||
}
|
||||
|
||||
void Buffer::Destroy() {
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
|
||||
#include "js/RootingAPI.h"
|
||||
#include "mozilla/dom/Nullable.h"
|
||||
#include "mozilla/ipc/Shmem.h"
|
||||
#include "mozilla/webgpu/WebGPUTypes.h"
|
||||
#include "ObjectModel.h"
|
||||
|
||||
|
@ -19,21 +20,25 @@ namespace webgpu {
|
|||
|
||||
class Device;
|
||||
|
||||
struct MappedInfo {
|
||||
ipc::Shmem mShmem;
|
||||
// True if mapping is requested for writing.
|
||||
bool mWritable = false;
|
||||
// Populated by `GetMappedRange`.
|
||||
nsTArray<JS::Heap<JSObject*>> mArrayBuffers;
|
||||
|
||||
MappedInfo() = default;
|
||||
MappedInfo(const MappedInfo&) = delete;
|
||||
bool IsReady() const { return mShmem.IsReadable(); }
|
||||
};
|
||||
|
||||
class Buffer final : public ObjectBase, public ChildOf<Device> {
|
||||
public:
|
||||
GPU_DECL_CYCLE_COLLECTION(Buffer)
|
||||
GPU_DECL_JS_WRAP(Buffer)
|
||||
|
||||
struct Mapping final {
|
||||
UniquePtr<ipc::Shmem> mShmem;
|
||||
JS::Heap<JSObject*> mArrayBuffer;
|
||||
const bool mWrite;
|
||||
|
||||
Mapping(ipc::Shmem&& aShmem, JSObject* aArrayBuffer, bool aWrite);
|
||||
};
|
||||
|
||||
Buffer(Device* const aParent, RawId aId, BufferAddress aSize);
|
||||
void InitMapping(ipc::Shmem&& aShmem, JSObject* aArrayBuffer, bool aWrite);
|
||||
void SetMapped(ipc::Shmem&& aShmem, bool aWritable);
|
||||
|
||||
const RawId mId;
|
||||
|
||||
|
@ -46,10 +51,16 @@ class Buffer final : public ObjectBase, public ChildOf<Device> {
|
|||
// are mapped.
|
||||
const BufferAddress mSize;
|
||||
nsString mLabel;
|
||||
Maybe<Mapping> mMapping;
|
||||
// Information about the currently active mapping.
|
||||
Maybe<MappedInfo> mMapped;
|
||||
|
||||
public:
|
||||
already_AddRefed<dom::Promise> MapReadAsync(ErrorResult& aRv);
|
||||
already_AddRefed<dom::Promise> MapAsync(uint32_t aMode, uint64_t aOffset,
|
||||
const dom::Optional<uint64_t>& aSize,
|
||||
ErrorResult& aRv);
|
||||
void GetMappedRange(JSContext* aCx, uint64_t aOffset,
|
||||
const dom::Optional<uint64_t>& aSize,
|
||||
JS::Rooted<JSObject*>* aObject, ErrorResult& aRv);
|
||||
void Unmap(JSContext* aCx, ErrorResult& aRv);
|
||||
void Destroy();
|
||||
};
|
||||
|
|
|
@ -37,10 +37,11 @@ static void mapFreeCallback(void* aContents, void* aUserData) {
|
|||
|
||||
RefPtr<WebGPUChild> Device::GetBridge() { return mBridge; }
|
||||
|
||||
JSObject* Device::CreateExternalArrayBuffer(JSContext* aCx, size_t aSize,
|
||||
ipc::Shmem& aShmem) {
|
||||
MOZ_ASSERT(aShmem.Size<uint8_t>() == aSize);
|
||||
return JS::NewExternalArrayBuffer(aCx, aSize, aShmem.get<uint8_t>(),
|
||||
JSObject* Device::CreateExternalArrayBuffer(JSContext* aCx, size_t aOffset,
|
||||
size_t aSize,
|
||||
const ipc::Shmem& aShmem) {
|
||||
MOZ_ASSERT(aOffset + aSize <= aShmem.Size<uint8_t>());
|
||||
return JS::NewExternalArrayBuffer(aCx, aSize, aShmem.get<uint8_t>() + aOffset,
|
||||
&mapFreeCallback, nullptr);
|
||||
}
|
||||
|
||||
|
@ -65,74 +66,79 @@ void Device::SetLabel(const nsAString& aLabel) { mLabel = aLabel; }
|
|||
Queue* Device::DefaultQueue() const { return mQueue; }
|
||||
|
||||
already_AddRefed<Buffer> Device::CreateBuffer(
|
||||
const dom::GPUBufferDescriptor& aDesc) {
|
||||
const dom::GPUBufferDescriptor& aDesc, ErrorResult& aRv) {
|
||||
ipc::Shmem shmem;
|
||||
bool hasMapFlags = aDesc.mUsage & (dom::GPUBufferUsage_Binding::MAP_WRITE |
|
||||
dom::GPUBufferUsage_Binding::MAP_READ);
|
||||
if (hasMapFlags || aDesc.mMappedAtCreation) {
|
||||
const auto checked = CheckedInt<size_t>(aDesc.mSize);
|
||||
if (!checked.isValid()) {
|
||||
aRv.ThrowRangeError("Mappable size is too large");
|
||||
return nullptr;
|
||||
}
|
||||
const auto& size = checked.value();
|
||||
|
||||
// TODO: use `ShmemPool`?
|
||||
if (!mBridge->AllocShmem(size, ipc::Shmem::SharedMemory::TYPE_BASIC,
|
||||
&shmem)) {
|
||||
aRv.ThrowAbortError(
|
||||
nsPrintfCString("Unable to allocate shmem of size %" PRIuPTR, size));
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// zero out memory
|
||||
memset(shmem.get<uint8_t>(), 0, size);
|
||||
}
|
||||
|
||||
// If the buffer is not mapped at creation, and it has Shmem, we send it
|
||||
// to the GPU process. Otherwise, we keep it.
|
||||
RawId id = mBridge->DeviceCreateBuffer(mId, aDesc);
|
||||
if (hasMapFlags && !aDesc.mMappedAtCreation) {
|
||||
mBridge->SendBufferReturnShmem(id, std::move(shmem));
|
||||
}
|
||||
RefPtr<Buffer> buffer = new Buffer(this, id, aDesc.mSize);
|
||||
|
||||
if (aDesc.mMappedAtCreation) {
|
||||
buffer->SetMapped(std::move(shmem),
|
||||
!(aDesc.mUsage & dom::GPUBufferUsage_Binding::MAP_READ));
|
||||
}
|
||||
|
||||
return buffer.forget();
|
||||
}
|
||||
|
||||
void Device::CreateBufferMapped(JSContext* aCx,
|
||||
const dom::GPUBufferDescriptor& aDesc,
|
||||
nsTArray<JS::Value>& aSequence,
|
||||
ErrorResult& aRv) {
|
||||
const auto checked = CheckedInt<size_t>(aDesc.mSize);
|
||||
if (!checked.isValid()) {
|
||||
RefPtr<MappingPromise> Device::MapBufferAsync(RawId aId, uint32_t aMode,
|
||||
size_t aOffset, size_t aSize,
|
||||
ErrorResult& aRv) {
|
||||
ffi::WGPUHostMap mode;
|
||||
switch (aMode) {
|
||||
case dom::GPUMapMode_Binding::READ:
|
||||
mode = ffi::WGPUHostMap_Read;
|
||||
break;
|
||||
case dom::GPUMapMode_Binding::WRITE:
|
||||
mode = ffi::WGPUHostMap_Write;
|
||||
break;
|
||||
default:
|
||||
aRv.ThrowInvalidAccessError(
|
||||
nsPrintfCString("Invalid map flag %u", aMode));
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const CheckedInt<uint64_t> offset(aOffset);
|
||||
if (!offset.isValid()) {
|
||||
aRv.ThrowRangeError("Mapped offset is too large");
|
||||
return nullptr;
|
||||
}
|
||||
const CheckedInt<uint64_t> size(aSize);
|
||||
if (!size.isValid()) {
|
||||
aRv.ThrowRangeError("Mapped size is too large");
|
||||
return;
|
||||
}
|
||||
const auto& size = checked.value();
|
||||
|
||||
// TODO: use `ShmemPool`
|
||||
ipc::Shmem shmem;
|
||||
if (!mBridge->AllocShmem(size, ipc::Shmem::SharedMemory::TYPE_BASIC,
|
||||
&shmem)) {
|
||||
aRv.ThrowAbortError(
|
||||
nsPrintfCString("Unable to allocate shmem of size %" PRIuPTR, size));
|
||||
return;
|
||||
}
|
||||
|
||||
// zero out memory
|
||||
memset(shmem.get<uint8_t>(), 0, size);
|
||||
|
||||
JS::Rooted<JSObject*> arrayBuffer(
|
||||
aCx, CreateExternalArrayBuffer(aCx, size, shmem));
|
||||
if (!arrayBuffer) {
|
||||
aRv.NoteJSContextException(aCx);
|
||||
return;
|
||||
}
|
||||
|
||||
dom::GPUBufferDescriptor modifiedDesc(aDesc);
|
||||
modifiedDesc.mUsage |= dom::GPUBufferUsage_Binding::MAP_WRITE;
|
||||
RawId id = mBridge->DeviceCreateBuffer(mId, modifiedDesc);
|
||||
RefPtr<Buffer> buffer = new Buffer(this, id, aDesc.mSize);
|
||||
|
||||
JS::Rooted<JS::Value> bufferValue(aCx);
|
||||
if (!dom::ToJSValue(aCx, buffer, &bufferValue)) {
|
||||
aRv.NoteJSContextException(aCx);
|
||||
return;
|
||||
}
|
||||
|
||||
aSequence.AppendElement(bufferValue);
|
||||
aSequence.AppendElement(JS::ObjectValue(*arrayBuffer));
|
||||
|
||||
buffer->InitMapping(std::move(shmem), arrayBuffer, true);
|
||||
}
|
||||
|
||||
RefPtr<MappingPromise> Device::MapBufferForReadAsync(RawId aId, size_t aSize,
|
||||
ErrorResult& aRv) {
|
||||
ipc::Shmem shmem;
|
||||
if (!mBridge->AllocShmem(aSize, ipc::Shmem::SharedMemory::TYPE_BASIC,
|
||||
&shmem)) {
|
||||
aRv.ThrowAbortError(
|
||||
nsPrintfCString("Unable to allocate shmem of size %" PRIuPTR, aSize));
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return mBridge->SendBufferMapRead(aId, std::move(shmem));
|
||||
return mBridge->SendBufferMap(aId, mode, offset.value(), size.value());
|
||||
}
|
||||
|
||||
void Device::UnmapBuffer(RawId aId, UniquePtr<ipc::Shmem> aShmem, bool aFlush) {
|
||||
mBridge->SendDeviceUnmapBuffer(mId, aId, std::move(*aShmem), aFlush);
|
||||
void Device::UnmapBuffer(RawId aId, ipc::Shmem&& aShmem, bool aFlush) {
|
||||
mBridge->SendBufferUnmap(aId, std::move(aShmem), aFlush);
|
||||
}
|
||||
|
||||
already_AddRefed<Texture> Device::CreateTexture(
|
||||
|
@ -177,6 +183,10 @@ already_AddRefed<BindGroup> Device::CreateBindGroup(
|
|||
|
||||
already_AddRefed<ShaderModule> Device::CreateShaderModule(
|
||||
const dom::GPUShaderModuleDescriptor& aDesc) {
|
||||
if (aDesc.mCode.IsString()) {
|
||||
// we don't yet support WGSL
|
||||
return nullptr;
|
||||
}
|
||||
RawId id = mBridge->DeviceCreateShaderModule(mId, aDesc);
|
||||
RefPtr<ShaderModule> object = new ShaderModule(this, id);
|
||||
return object.forget();
|
||||
|
|
|
@ -78,11 +78,13 @@ class Device final : public DOMEventTargetHelper {
|
|||
explicit Device(Adapter* const aParent, RawId aId);
|
||||
|
||||
RefPtr<WebGPUChild> GetBridge();
|
||||
static JSObject* CreateExternalArrayBuffer(JSContext* aCx, size_t aSize,
|
||||
ipc::Shmem& aShmem);
|
||||
RefPtr<MappingPromise> MapBufferForReadAsync(RawId aId, size_t aSize,
|
||||
ErrorResult& aRv);
|
||||
void UnmapBuffer(RawId aId, UniquePtr<ipc::Shmem> aShmem, bool aFlush);
|
||||
static JSObject* CreateExternalArrayBuffer(JSContext* aCx, size_t aOffset,
|
||||
size_t aSize,
|
||||
const ipc::Shmem& aShmem);
|
||||
RefPtr<MappingPromise> MapBufferAsync(RawId aId, uint32_t aMode,
|
||||
size_t aOffset, size_t aSize,
|
||||
ErrorResult& aRv);
|
||||
void UnmapBuffer(RawId aId, ipc::Shmem&& aShmem, bool aFlush);
|
||||
already_AddRefed<Texture> InitSwapChain(
|
||||
const dom::GPUSwapChainDescriptor& aDesc,
|
||||
const dom::GPUExtent3DDict& aExtent3D,
|
||||
|
@ -104,9 +106,8 @@ class Device final : public DOMEventTargetHelper {
|
|||
|
||||
Queue* DefaultQueue() const;
|
||||
|
||||
already_AddRefed<Buffer> CreateBuffer(const dom::GPUBufferDescriptor& aDesc);
|
||||
void CreateBufferMapped(JSContext* aCx, const dom::GPUBufferDescriptor& aDesc,
|
||||
nsTArray<JS::Value>& aSequence, ErrorResult& aRv);
|
||||
already_AddRefed<Buffer> CreateBuffer(const dom::GPUBufferDescriptor& aDesc,
|
||||
ErrorResult& aRv);
|
||||
|
||||
already_AddRefed<Texture> CreateTexture(
|
||||
const dom::GPUTextureDescriptor& aDesc);
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -14,6 +14,7 @@ using SerialPipelineLayoutDescriptor from "mozilla/webgpu/WebGPUTypes.h";
|
|||
using SerialBindGroupDescriptor from "mozilla/webgpu/WebGPUTypes.h";
|
||||
using SerialComputePipelineDescriptor from "mozilla/webgpu/WebGPUTypes.h";
|
||||
using SerialRenderPipelineDescriptor from "mozilla/webgpu/WebGPUTypes.h";
|
||||
using SerialSamplerDescriptor from "mozilla/webgpu/WebGPUTypes.h";
|
||||
using dom::GPURequestAdapterOptions from "mozilla/dom/WebGPUBinding.h";
|
||||
using dom::GPUDeviceDescriptor from "mozilla/dom/WebGPUBinding.h";
|
||||
using dom::GPUCommandEncoderDescriptor from "mozilla/dom/WebGPUBinding.h";
|
||||
|
@ -21,12 +22,12 @@ using dom::GPUCommandBufferDescriptor from "mozilla/dom/WebGPUBinding.h";
|
|||
using dom::GPUPipelineLayoutDescriptor from "mozilla/dom/WebGPUBinding.h";
|
||||
using webgpu::ffi::WGPUBufferDescriptor from "mozilla/webgpu/ffi/wgpu.h";
|
||||
using webgpu::ffi::WGPUTextureDescriptor from "mozilla/webgpu/ffi/wgpu.h";
|
||||
using webgpu::ffi::WGPUSamplerDescriptor from "mozilla/webgpu/ffi/wgpu.h";
|
||||
using webgpu::ffi::WGPUTextureViewDescriptor from "mozilla/webgpu/ffi/wgpu.h";
|
||||
using webgpu::ffi::WGPUBufferCopyView from "mozilla/webgpu/ffi/wgpu.h";
|
||||
using webgpu::ffi::WGPUTextureDataLayout from "mozilla/webgpu/ffi/wgpu.h";
|
||||
using webgpu::ffi::WGPUTextureCopyView from "mozilla/webgpu/ffi/wgpu.h";
|
||||
using webgpu::ffi::WGPUExtent3d from "mozilla/webgpu/ffi/wgpu.h";
|
||||
using webgpu::ffi::WGPUHostMap from "mozilla/webgpu/ffi/wgpu.h";
|
||||
|
||||
include "mozilla/webgpu/WebGPUSerialize.h";
|
||||
include protocol PCompositorBridge;
|
||||
|
@ -48,16 +49,17 @@ parent:
|
|||
async AdapterRequestDevice(RawId selfId, GPUDeviceDescriptor desc, RawId newId);
|
||||
async AdapterDestroy(RawId selfId);
|
||||
async DeviceCreateBuffer(RawId selfId, WGPUBufferDescriptor desc, nsCString label, RawId newId);
|
||||
async DeviceDestroy(RawId selfId);
|
||||
async DeviceUnmapBuffer(RawId selfId, RawId bufferId, Shmem shmem, bool flush);
|
||||
async BufferMapRead(RawId selfId, Shmem shmem) returns (Shmem sm);
|
||||
async BufferReturnShmem(RawId selfId, Shmem shmem);
|
||||
async BufferMap(RawId selfId, WGPUHostMap hostMap, uint64_t offset, uint64_t size) returns (Shmem sm);
|
||||
async BufferUnmap(RawId selfId, Shmem shmem, bool flush);
|
||||
async BufferDestroy(RawId selfId);
|
||||
async DeviceCreateTexture(RawId selfId, WGPUTextureDescriptor desc, nsCString label, RawId newId);
|
||||
async TextureCreateView(RawId selfId, WGPUTextureViewDescriptor desc, nsCString label, RawId newId);
|
||||
async TextureDestroy(RawId selfId);
|
||||
async TextureViewDestroy(RawId selfId);
|
||||
async DeviceCreateSampler(RawId selfId, WGPUSamplerDescriptor desc, nsCString label, RawId newId);
|
||||
async DeviceCreateSampler(RawId selfId, SerialSamplerDescriptor desc, RawId newId);
|
||||
async SamplerDestroy(RawId selfId);
|
||||
async DeviceDestroy(RawId selfId);
|
||||
|
||||
async DeviceCreateCommandEncoder(RawId selfId, GPUCommandEncoderDescriptor desc, RawId newId);
|
||||
async CommandEncoderCopyBufferToBuffer(RawId selfId, RawId sourceId, BufferAddress sourceOffset, RawId destinationId, BufferAddress destinationOffset, BufferAddress size);
|
||||
|
@ -79,7 +81,7 @@ parent:
|
|||
async PipelineLayoutDestroy(RawId selfId);
|
||||
async DeviceCreateBindGroup(RawId selfId, SerialBindGroupDescriptor desc, RawId newId);
|
||||
async BindGroupDestroy(RawId selfId);
|
||||
async DeviceCreateShaderModule(RawId selfId, uint32_t[] data, RawId newId);
|
||||
async DeviceCreateShaderModule(RawId selfId, uint32_t[] spirv, nsCString wgsl, RawId newId);
|
||||
async ShaderModuleDestroy(RawId selfId);
|
||||
async DeviceCreateComputePipeline(RawId selfId, SerialComputePipelineDescriptor desc, RawId newId);
|
||||
async ComputePipelineDestroy(RawId selfId);
|
||||
|
|
|
@ -49,11 +49,8 @@ RefPtr<RawIdPromise> WebGPUChild::InstanceRequestAdapter(
|
|||
->Then(
|
||||
GetCurrentSerialEventTarget(), __func__,
|
||||
[](const RawId& aId) {
|
||||
if (aId == 0) {
|
||||
return RawIdPromise::CreateAndReject(Nothing(), __func__);
|
||||
} else {
|
||||
return RawIdPromise::CreateAndResolve(aId, __func__);
|
||||
}
|
||||
return aId == 0 ? RawIdPromise::CreateAndReject(Nothing(), __func__)
|
||||
: RawIdPromise::CreateAndResolve(aId, __func__);
|
||||
},
|
||||
[](const ipc::ResponseRejectReason& aReason) {
|
||||
return RawIdPromise::CreateAndReject(Some(aReason), __func__);
|
||||
|
@ -65,10 +62,9 @@ Maybe<RawId> WebGPUChild::AdapterRequestDevice(
|
|||
RawId id = ffi::wgpu_client_make_device_id(mClient, aSelfId);
|
||||
if (SendAdapterRequestDevice(aSelfId, aDesc, id)) {
|
||||
return Some(id);
|
||||
} else {
|
||||
ffi::wgpu_client_kill_device_id(mClient, id);
|
||||
return Nothing();
|
||||
}
|
||||
ffi::wgpu_client_kill_device_id(mClient, id);
|
||||
return Nothing();
|
||||
}
|
||||
|
||||
RawId WebGPUChild::DeviceCreateBuffer(RawId aSelfId,
|
||||
|
@ -76,6 +72,7 @@ RawId WebGPUChild::DeviceCreateBuffer(RawId aSelfId,
|
|||
ffi::WGPUBufferDescriptor desc = {};
|
||||
desc.size = aDesc.mSize;
|
||||
desc.usage = aDesc.mUsage;
|
||||
desc.mapped_at_creation = aDesc.mMappedAtCreation;
|
||||
|
||||
RawId id = ffi::wgpu_client_make_buffer_id(mClient, aSelfId);
|
||||
if (!SendDeviceCreateBuffer(aSelfId, desc, nsCString(), id)) {
|
||||
|
@ -177,23 +174,21 @@ RawId WebGPUChild::TextureCreateView(
|
|||
|
||||
RawId WebGPUChild::DeviceCreateSampler(RawId aSelfId,
|
||||
const dom::GPUSamplerDescriptor& aDesc) {
|
||||
ffi::WGPUSamplerDescriptor desc = {};
|
||||
desc.address_mode_u = ffi::WGPUAddressMode(aDesc.mAddressModeU);
|
||||
desc.address_mode_v = ffi::WGPUAddressMode(aDesc.mAddressModeV);
|
||||
desc.address_mode_w = ffi::WGPUAddressMode(aDesc.mAddressModeW);
|
||||
desc.mag_filter = ffi::WGPUFilterMode(aDesc.mMagFilter);
|
||||
desc.min_filter = ffi::WGPUFilterMode(aDesc.mMinFilter);
|
||||
desc.mipmap_filter = ffi::WGPUFilterMode(aDesc.mMipmapFilter);
|
||||
desc.lod_min_clamp = aDesc.mLodMinClamp;
|
||||
desc.lod_max_clamp = aDesc.mLodMaxClamp;
|
||||
ffi::WGPUCompareFunction compare;
|
||||
SerialSamplerDescriptor desc = {};
|
||||
desc.mAddressU = ffi::WGPUAddressMode(aDesc.mAddressModeU);
|
||||
desc.mAddressV = ffi::WGPUAddressMode(aDesc.mAddressModeV);
|
||||
desc.mAddressW = ffi::WGPUAddressMode(aDesc.mAddressModeW);
|
||||
desc.mMagFilter = ffi::WGPUFilterMode(aDesc.mMagFilter);
|
||||
desc.mMinFilter = ffi::WGPUFilterMode(aDesc.mMinFilter);
|
||||
desc.mMipmapFilter = ffi::WGPUFilterMode(aDesc.mMipmapFilter);
|
||||
desc.mLodMinClamp = aDesc.mLodMinClamp;
|
||||
desc.mLodMaxClamp = aDesc.mLodMaxClamp;
|
||||
if (aDesc.mCompare.WasPassed()) {
|
||||
compare = ConvertCompareFunction(aDesc.mCompare.Value());
|
||||
desc.compare = compare;
|
||||
desc.mCompare = Some(ConvertCompareFunction(aDesc.mCompare.Value()));
|
||||
}
|
||||
|
||||
RawId id = ffi::wgpu_client_make_sampler_id(mClient, aSelfId);
|
||||
if (!SendDeviceCreateSampler(aSelfId, desc, nsCString(), id)) {
|
||||
if (!SendDeviceCreateSampler(aSelfId, desc, id)) {
|
||||
MOZ_CRASH("IPC failure");
|
||||
}
|
||||
return id;
|
||||
|
@ -228,7 +223,7 @@ RawId WebGPUChild::DeviceCreateBindGroupLayout(
|
|||
ffi::WGPUBindGroupLayoutEntry e = {};
|
||||
e.binding = entry.mBinding;
|
||||
e.visibility = entry.mVisibility;
|
||||
e.ty = ffi::WGPUBindingType(entry.mType);
|
||||
e.ty = ffi::WGPURawBindingType(entry.mType);
|
||||
e.multisampled = entry.mMultisampled;
|
||||
e.has_dynamic_offset = entry.mHasDynamicOffset;
|
||||
e.view_dimension = ffi::WGPUTextureViewDimension(entry.mViewDimension);
|
||||
|
@ -295,12 +290,18 @@ RawId WebGPUChild::DeviceCreateBindGroup(
|
|||
RawId WebGPUChild::DeviceCreateShaderModule(
|
||||
RawId aSelfId, const dom::GPUShaderModuleDescriptor& aDesc) {
|
||||
RawId id = ffi::wgpu_client_make_shader_module_id(mClient, aSelfId);
|
||||
MOZ_ASSERT(aDesc.mCode.IsUint32Array());
|
||||
const auto& code = aDesc.mCode.GetAsUint32Array();
|
||||
code.ComputeState();
|
||||
nsTArray<uint32_t> data(code.Length());
|
||||
data.AppendElements(code.Data(), code.Length());
|
||||
if (!SendDeviceCreateShaderModule(aSelfId, data, id)) {
|
||||
|
||||
nsTArray<uint32_t> spirv;
|
||||
nsCString wgsl;
|
||||
if (aDesc.mCode.IsString()) {
|
||||
CopyUTF16toUTF8(aDesc.mCode.GetAsString(), wgsl);
|
||||
} else {
|
||||
const auto& code = aDesc.mCode.GetAsUint32Array();
|
||||
code.ComputeState();
|
||||
spirv.AppendElements(code.Data(), code.Length());
|
||||
}
|
||||
|
||||
if (!SendDeviceCreateShaderModule(aSelfId, spirv, wgsl, id)) {
|
||||
MOZ_CRASH("IPC failure");
|
||||
}
|
||||
return id;
|
||||
|
|
|
@ -22,7 +22,6 @@ struct WGPUClient;
|
|||
struct WGPUTextureViewDescriptor;
|
||||
} // namespace ffi
|
||||
|
||||
struct TextureInfo;
|
||||
typedef MozPromise<RawId, Maybe<ipc::ResponseRejectReason>, true> RawIdPromise;
|
||||
|
||||
class WebGPUChild final : public PWebGPUChild {
|
||||
|
|
|
@ -202,53 +202,94 @@ ipc::IPCResult WebGPUParent::RecvDeviceCreateBuffer(
|
|||
desc.label = aLabel.Data();
|
||||
}
|
||||
ffi::wgpu_server_device_create_buffer(mContext, aSelfId, &desc, aNewId);
|
||||
return IPC_OK();
|
||||
}
|
||||
|
||||
ipc::IPCResult WebGPUParent::RecvDeviceUnmapBuffer(RawId aSelfId,
|
||||
RawId aBufferId,
|
||||
Shmem&& aShmem,
|
||||
bool aFlush) {
|
||||
if (aFlush) {
|
||||
ffi::wgpu_server_device_set_buffer_sub_data(mContext, aSelfId, aBufferId, 0,
|
||||
aShmem.get<uint8_t>(),
|
||||
aShmem.Size<uint8_t>());
|
||||
} else {
|
||||
ffi::wgpu_server_buffer_unmap(mContext, aBufferId);
|
||||
if (desc.usage & (WGPUBufferUsage_MAP_READ | WGPUBufferUsage_MAP_WRITE)) {
|
||||
mSharedMemoryMap.insert({aNewId, Shmem()});
|
||||
}
|
||||
DeallocShmem(aShmem);
|
||||
return IPC_OK();
|
||||
}
|
||||
|
||||
struct MapReadRequest {
|
||||
ipc::IPCResult WebGPUParent::RecvBufferReturnShmem(RawId aSelfId,
|
||||
Shmem&& aShmem) {
|
||||
mSharedMemoryMap[aSelfId] = aShmem;
|
||||
return IPC_OK();
|
||||
}
|
||||
|
||||
struct MapRequest {
|
||||
const ffi::WGPUGlobal* const mContext;
|
||||
ffi::WGPUBufferId mBufferId;
|
||||
ffi::WGPUHostMap mHostMap;
|
||||
uint64_t mOffset;
|
||||
ipc::Shmem mShmem;
|
||||
WebGPUParent::BufferMapReadResolver mResolver;
|
||||
MapReadRequest(ipc::Shmem&& shmem,
|
||||
WebGPUParent::BufferMapReadResolver&& resolver)
|
||||
: mShmem(shmem), mResolver(resolver) {}
|
||||
WebGPUParent::BufferMapResolver mResolver;
|
||||
MapRequest(const ffi::WGPUGlobal* context, ffi::WGPUBufferId bufferId,
|
||||
ffi::WGPUHostMap hostMap, uint64_t offset, ipc::Shmem&& shmem,
|
||||
WebGPUParent::BufferMapResolver&& resolver)
|
||||
: mContext(context),
|
||||
mBufferId(bufferId),
|
||||
mHostMap(hostMap),
|
||||
mOffset(offset),
|
||||
mShmem(shmem),
|
||||
mResolver(resolver) {}
|
||||
};
|
||||
|
||||
static void MapReadCallback(ffi::WGPUBufferMapAsyncStatus status,
|
||||
const uint8_t* ptr, uint8_t* userdata) {
|
||||
auto req = reinterpret_cast<MapReadRequest*>(userdata);
|
||||
static void MapCallback(ffi::WGPUBufferMapAsyncStatus status,
|
||||
uint8_t* userdata) {
|
||||
auto* req = reinterpret_cast<MapRequest*>(userdata);
|
||||
// TODO: better handle errors
|
||||
MOZ_ASSERT(status == ffi::WGPUBufferMapAsyncStatus_Success);
|
||||
memcpy(req->mShmem.get<uint8_t>(), ptr, req->mShmem.Size<uint8_t>());
|
||||
if (req->mHostMap == ffi::WGPUHostMap_Read) {
|
||||
const uint8_t* ptr = ffi::wgpu_server_buffer_get_mapped_range(
|
||||
req->mContext, req->mBufferId, req->mOffset,
|
||||
req->mShmem.Size<uint8_t>());
|
||||
memcpy(req->mShmem.get<uint8_t>(), ptr, req->mShmem.Size<uint8_t>());
|
||||
}
|
||||
req->mResolver(std::move(req->mShmem));
|
||||
delete req;
|
||||
}
|
||||
|
||||
ipc::IPCResult WebGPUParent::RecvBufferMapRead(
|
||||
RawId aSelfId, Shmem&& aShmem, BufferMapReadResolver&& aResolver) {
|
||||
auto size = aShmem.Size<uint8_t>();
|
||||
auto request = new MapReadRequest(std::move(aShmem), std::move(aResolver));
|
||||
ffi::wgpu_server_buffer_map_read(mContext, aSelfId, 0, size, &MapReadCallback,
|
||||
reinterpret_cast<uint8_t*>(request));
|
||||
ipc::IPCResult WebGPUParent::RecvBufferMap(RawId aSelfId,
|
||||
ffi::WGPUHostMap aHostMap,
|
||||
uint64_t aOffset, uint64_t aSize,
|
||||
BufferMapResolver&& aResolver) {
|
||||
auto* request = new MapRequest(mContext, aSelfId, aHostMap, aOffset,
|
||||
std::move(mSharedMemoryMap[aSelfId]),
|
||||
std::move(aResolver));
|
||||
ffi::WGPUBufferMapOperation mapOperation = {
|
||||
aHostMap, &MapCallback, reinterpret_cast<uint8_t*>(request)};
|
||||
ffi::wgpu_server_buffer_map(mContext, aSelfId, aOffset, aSize, mapOperation);
|
||||
return IPC_OK();
|
||||
}
|
||||
|
||||
ipc::IPCResult WebGPUParent::RecvBufferUnmap(RawId aSelfId, Shmem&& aShmem,
|
||||
bool aFlush) {
|
||||
if (aFlush) {
|
||||
// TODO: flush exact modified sub-range
|
||||
uint8_t* ptr = ffi::wgpu_server_buffer_get_mapped_range(
|
||||
mContext, aSelfId, 0, aShmem.Size<uint8_t>());
|
||||
MOZ_ASSERT(ptr != nullptr);
|
||||
memcpy(ptr, aShmem.get<uint8_t>(), aShmem.Size<uint8_t>());
|
||||
}
|
||||
|
||||
ffi::wgpu_server_buffer_unmap(mContext, aSelfId);
|
||||
|
||||
const auto iter = mSharedMemoryMap.find(aSelfId);
|
||||
if (iter == mSharedMemoryMap.end()) {
|
||||
DeallocShmem(aShmem);
|
||||
} else {
|
||||
iter->second = aShmem;
|
||||
}
|
||||
return IPC_OK();
|
||||
}
|
||||
|
||||
ipc::IPCResult WebGPUParent::RecvBufferDestroy(RawId aSelfId) {
|
||||
ffi::wgpu_server_buffer_destroy(mContext, aSelfId);
|
||||
|
||||
const auto iter = mSharedMemoryMap.find(aSelfId);
|
||||
if (iter != mSharedMemoryMap.end()) {
|
||||
DeallocShmem(iter->second);
|
||||
mSharedMemoryMap.erase(iter);
|
||||
}
|
||||
return IPC_OK();
|
||||
}
|
||||
|
||||
|
@ -285,11 +326,21 @@ ipc::IPCResult WebGPUParent::RecvTextureViewDestroy(RawId aSelfId) {
|
|||
}
|
||||
|
||||
ipc::IPCResult WebGPUParent::RecvDeviceCreateSampler(
|
||||
RawId aSelfId, const ffi::WGPUSamplerDescriptor& aDesc,
|
||||
const nsCString& aLabel, RawId aNewId) {
|
||||
ffi::WGPUSamplerDescriptor desc = aDesc;
|
||||
if (!aLabel.IsEmpty()) {
|
||||
desc.label = aLabel.Data();
|
||||
RawId aSelfId, const SerialSamplerDescriptor& aDesc, RawId aNewId) {
|
||||
ffi::WGPUSamplerDescriptor desc = {};
|
||||
desc.address_modes[0] = aDesc.mAddressU;
|
||||
desc.address_modes[1] = aDesc.mAddressV;
|
||||
desc.address_modes[2] = aDesc.mAddressW;
|
||||
desc.mag_filter = aDesc.mMagFilter;
|
||||
desc.min_filter = aDesc.mMinFilter;
|
||||
desc.mipmap_filter = aDesc.mMipmapFilter;
|
||||
desc.lod_min_clamp = aDesc.mLodMinClamp;
|
||||
desc.lod_max_clamp = aDesc.mLodMaxClamp;
|
||||
if (aDesc.mCompare) {
|
||||
desc.compare = aDesc.mCompare.ptr();
|
||||
}
|
||||
if (!aDesc.mLabel.IsEmpty()) {
|
||||
desc.label = aDesc.mLabel.Data();
|
||||
}
|
||||
ffi::wgpu_server_device_create_sampler(mContext, aSelfId, &desc, aNewId);
|
||||
return IPC_OK();
|
||||
|
@ -440,27 +491,24 @@ ipc::IPCResult WebGPUParent::RecvDeviceCreateBindGroup(
|
|||
RawId aSelfId, const SerialBindGroupDescriptor& aDesc, RawId aNewId) {
|
||||
nsTArray<ffi::WGPUBindGroupEntry> ffiEntries(aDesc.mEntries.Length());
|
||||
for (const auto& entry : aDesc.mEntries) {
|
||||
ffi::WGPUBindGroupEntry bgb = {};
|
||||
bgb.binding = entry.mBinding;
|
||||
ffi::WGPUBindGroupEntry bge = {};
|
||||
bge.binding = entry.mBinding;
|
||||
switch (entry.mType) {
|
||||
case SerialBindGroupEntryType::Buffer:
|
||||
bgb.resource.tag = ffi::WGPUBindingResource_Buffer;
|
||||
bgb.resource.buffer._0.buffer = entry.mValue;
|
||||
bgb.resource.buffer._0.offset = entry.mBufferOffset;
|
||||
bgb.resource.buffer._0.size = ffi::make_buffer_size(entry.mBufferSize);
|
||||
bge.buffer = entry.mValue;
|
||||
bge.offset = entry.mBufferOffset;
|
||||
bge.size = ffi::make_buffer_size(entry.mBufferSize);
|
||||
break;
|
||||
case SerialBindGroupEntryType::Texture:
|
||||
bgb.resource.tag = ffi::WGPUBindingResource_TextureView;
|
||||
bgb.resource.texture_view._0 = entry.mValue;
|
||||
bge.texture_view = entry.mValue;
|
||||
break;
|
||||
case SerialBindGroupEntryType::Sampler:
|
||||
bgb.resource.tag = ffi::WGPUBindingResource_Sampler;
|
||||
bgb.resource.sampler._0 = entry.mValue;
|
||||
bge.sampler = entry.mValue;
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("unreachable");
|
||||
}
|
||||
ffiEntries.AppendElement(bgb);
|
||||
ffiEntries.AppendElement(bge);
|
||||
}
|
||||
ffi::WGPUBindGroupDescriptor desc = {};
|
||||
desc.layout = aDesc.mLayout;
|
||||
|
@ -476,10 +524,11 @@ ipc::IPCResult WebGPUParent::RecvBindGroupDestroy(RawId aSelfId) {
|
|||
}
|
||||
|
||||
ipc::IPCResult WebGPUParent::RecvDeviceCreateShaderModule(
|
||||
RawId aSelfId, const nsTArray<uint32_t>& aData, RawId aNewId) {
|
||||
RawId aSelfId, const nsTArray<uint32_t>& aSpirv, const nsCString& aWgsl,
|
||||
RawId aNewId) {
|
||||
ffi::WGPUShaderModuleDescriptor desc = {};
|
||||
desc.code.bytes = aData.Elements();
|
||||
desc.code.length = aData.Length();
|
||||
desc.code.bytes = aSpirv.Elements();
|
||||
desc.code.length = aSpirv.Length();
|
||||
ffi::wgpu_server_device_create_shader_module(mContext, aSelfId, &desc,
|
||||
aNewId);
|
||||
return IPC_OK();
|
||||
|
@ -578,12 +627,12 @@ ipc::IPCResult WebGPUParent::RecvDeviceCreateSwapChain(
|
|||
NS_ERROR("Invalid total buffer size!");
|
||||
return IPC_OK();
|
||||
}
|
||||
auto textureHostData = new (fallible) uint8_t[wholeBufferSize.value()];
|
||||
auto* textureHostData = new (fallible) uint8_t[wholeBufferSize.value()];
|
||||
if (!textureHostData) {
|
||||
NS_ERROR("Unable to allocate host data!");
|
||||
return IPC_OK();
|
||||
}
|
||||
auto textureHost = new layers::MemoryTextureHost(
|
||||
RefPtr<layers::MemoryTextureHost> textureHost = new layers::MemoryTextureHost(
|
||||
textureHostData, aDesc, layers::TextureFlags::NO_FLAGS);
|
||||
textureHost->CreateRenderTexture(aExternalId);
|
||||
nsTArray<RawId> bufferIds(aBufferIds.Clone());
|
||||
|
@ -603,28 +652,39 @@ ipc::IPCResult WebGPUParent::RecvDeviceCreateSwapChain(
|
|||
return IPC_OK();
|
||||
}
|
||||
|
||||
struct PresentRequest {
|
||||
const ffi::WGPUGlobal* mContext;
|
||||
RefPtr<PresentationData> mData;
|
||||
};
|
||||
|
||||
static void PresentCallback(ffi::WGPUBufferMapAsyncStatus status,
|
||||
const uint8_t* ptr, uint8_t* userdata) {
|
||||
auto data = reinterpret_cast<PresentationData*>(userdata);
|
||||
uint8_t* userdata) {
|
||||
auto* req = reinterpret_cast<PresentRequest*>(userdata);
|
||||
PresentationData* data = req->mData.get();
|
||||
// get the buffer ID
|
||||
data->mBuffersLock.Lock();
|
||||
RawId bufferId = data->mQueuedBufferIds.back();
|
||||
data->mQueuedBufferIds.pop_back();
|
||||
data->mAvailableBufferIds.push_back(bufferId);
|
||||
data->mBuffersLock.Unlock();
|
||||
// copy the data
|
||||
if (status == ffi::WGPUBufferMapAsyncStatus_Success) {
|
||||
const auto bufferSize = data->mRowCount * data->mSourcePitch;
|
||||
const uint8_t* ptr = ffi::wgpu_server_buffer_get_mapped_range(
|
||||
req->mContext, bufferId, 0, bufferSize);
|
||||
uint8_t* dst = data->mTextureHost->GetBuffer();
|
||||
for (uint32_t row = 0; row < data->mRowCount; ++row) {
|
||||
memcpy(dst, ptr, data->mTargetPitch);
|
||||
dst += data->mTargetPitch;
|
||||
ptr += data->mSourcePitch;
|
||||
}
|
||||
wgpu_server_buffer_unmap(req->mContext, bufferId);
|
||||
} else {
|
||||
// TODO: better handle errors
|
||||
NS_WARNING("WebGPU frame mapping failed!");
|
||||
}
|
||||
data->mBuffersLock.Lock();
|
||||
RawId bufferId = data->mQueuedBufferIds.back();
|
||||
data->mQueuedBufferIds.pop_back();
|
||||
data->mAvailableBufferIds.push_back(bufferId);
|
||||
data->mBuffersLock.Unlock();
|
||||
// We artificially did `AddRef` before calling `wgpu_server_buffer_map_read`.
|
||||
// Now we can let it go again.
|
||||
data->Release();
|
||||
// free yourself
|
||||
delete req;
|
||||
}
|
||||
|
||||
ipc::IPCResult WebGPUParent::RecvSwapChainPresent(
|
||||
|
@ -645,7 +705,6 @@ ipc::IPCResult WebGPUParent::RecvSwapChainPresent(
|
|||
data->mBuffersLock.Lock();
|
||||
if (!data->mAvailableBufferIds.empty()) {
|
||||
bufferId = data->mAvailableBufferIds.back();
|
||||
wgpu_server_buffer_unmap(mContext, bufferId);
|
||||
data->mAvailableBufferIds.pop_back();
|
||||
} else if (!data->mUnassignedBufferIds.empty()) {
|
||||
bufferId = data->mUnassignedBufferIds.back();
|
||||
|
@ -703,13 +762,15 @@ ipc::IPCResult WebGPUParent::RecvSwapChainPresent(
|
|||
// texture,
|
||||
// we can just give it the contents of the last mapped buffer instead of the
|
||||
// copy.
|
||||
// This `AddRef` is needed for passing `data` as a raw pointer to
|
||||
// `wgpu_server_buffer_map_read` to serve as `userdata`. It's released at
|
||||
// the end of `PresentCallback` body.
|
||||
const auto userData = do_AddRef(data).take();
|
||||
ffi::wgpu_server_buffer_map_read(mContext, bufferId, 0, bufferSize,
|
||||
&PresentCallback,
|
||||
reinterpret_cast<uint8_t*>(userData));
|
||||
auto* const presentRequest = new PresentRequest{
|
||||
mContext,
|
||||
data,
|
||||
};
|
||||
|
||||
ffi::WGPUBufferMapOperation mapOperation = {
|
||||
ffi::WGPUHostMap_Read, &PresentCallback,
|
||||
reinterpret_cast<uint8_t*>(presentRequest)};
|
||||
ffi::wgpu_server_buffer_map(mContext, bufferId, 0, bufferSize, mapOperation);
|
||||
|
||||
return IPC_OK();
|
||||
}
|
||||
|
|
|
@ -33,10 +33,11 @@ class WebGPUParent final : public PWebGPUParent {
|
|||
ipc::IPCResult RecvDeviceCreateBuffer(RawId aSelfId,
|
||||
const ffi::WGPUBufferDescriptor& aDesc,
|
||||
const nsCString& aLabel, RawId aNewId);
|
||||
ipc::IPCResult RecvDeviceUnmapBuffer(RawId aSelfId, RawId aBufferId,
|
||||
Shmem&& aShmem, bool aFlush);
|
||||
ipc::IPCResult RecvBufferMapRead(RawId aSelfId, Shmem&& aShmem,
|
||||
BufferMapReadResolver&& aResolver);
|
||||
ipc::IPCResult RecvBufferReturnShmem(RawId aSelfId, Shmem&& aShmem);
|
||||
ipc::IPCResult RecvBufferMap(RawId aSelfId, ffi::WGPUHostMap aHostMap,
|
||||
uint64_t aOffset, uint64_t size,
|
||||
BufferMapResolver&& aResolver);
|
||||
ipc::IPCResult RecvBufferUnmap(RawId aSelfId, Shmem&& aShmem, bool aFlush);
|
||||
ipc::IPCResult RecvBufferDestroy(RawId aSelfId);
|
||||
ipc::IPCResult RecvDeviceCreateTexture(
|
||||
RawId aSelfId, const ffi::WGPUTextureDescriptor& aDesc,
|
||||
|
@ -46,9 +47,9 @@ class WebGPUParent final : public PWebGPUParent {
|
|||
const nsCString& aLabel, RawId aNewId);
|
||||
ipc::IPCResult RecvTextureDestroy(RawId aSelfId);
|
||||
ipc::IPCResult RecvTextureViewDestroy(RawId aSelfId);
|
||||
ipc::IPCResult RecvDeviceCreateSampler(
|
||||
RawId aSelfId, const ffi::WGPUSamplerDescriptor& aDesc,
|
||||
const nsCString& aLabel, RawId aNewId);
|
||||
ipc::IPCResult RecvDeviceCreateSampler(RawId aSelfId,
|
||||
const SerialSamplerDescriptor& aDesc,
|
||||
RawId aNewId);
|
||||
ipc::IPCResult RecvSamplerDestroy(RawId aSelfId);
|
||||
ipc::IPCResult RecvDeviceCreateCommandEncoder(
|
||||
RawId aSelfId, const dom::GPUCommandEncoderDescriptor& aDesc,
|
||||
|
@ -92,7 +93,8 @@ class WebGPUParent final : public PWebGPUParent {
|
|||
RawId aSelfId, const SerialBindGroupDescriptor& aDesc, RawId aNewId);
|
||||
ipc::IPCResult RecvBindGroupDestroy(RawId aSelfId);
|
||||
ipc::IPCResult RecvDeviceCreateShaderModule(RawId aSelfId,
|
||||
const nsTArray<uint32_t>& aData,
|
||||
const nsTArray<uint32_t>& aSpirv,
|
||||
const nsCString& aWgsl,
|
||||
RawId aNewId);
|
||||
ipc::IPCResult RecvShaderModuleDestroy(RawId aSelfId);
|
||||
ipc::IPCResult RecvDeviceCreateComputePipeline(
|
||||
|
@ -116,8 +118,13 @@ class WebGPUParent final : public PWebGPUParent {
|
|||
virtual ~WebGPUParent();
|
||||
void MaintainDevices();
|
||||
|
||||
const ffi::WGPUGlobal_IdentityRecyclerFactory* const mContext;
|
||||
const ffi::WGPUGlobal* const mContext;
|
||||
base::RepeatingTimer<WebGPUParent> mTimer;
|
||||
/// Shmem associated with a mappable buffer has to be owned by one of the
|
||||
/// processes. We keep it here for every mappable buffer while the buffer is
|
||||
/// used by GPU.
|
||||
std::unordered_map<uint64_t, Shmem> mSharedMemoryMap;
|
||||
/// Associated presentation data for each swapchain.
|
||||
std::unordered_map<uint64_t, RefPtr<PresentationData>> mCanvasMap;
|
||||
};
|
||||
|
||||
|
|
|
@ -48,19 +48,19 @@ DEFINE_IPC_SERIALIZER_DOM_ENUM(mozilla::dom::GPUPowerPreference);
|
|||
DEFINE_IPC_SERIALIZER_DOM_ENUM(mozilla::webgpu::SerialBindGroupEntryType);
|
||||
|
||||
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUAddressMode);
|
||||
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUBindingType);
|
||||
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPURawBindingType);
|
||||
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUBlendFactor);
|
||||
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUBlendOperation);
|
||||
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUCompareFunction);
|
||||
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUCullMode);
|
||||
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUFilterMode);
|
||||
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUFrontFace);
|
||||
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUHostMap);
|
||||
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUIndexFormat);
|
||||
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUInputStepMode);
|
||||
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUPrimitiveTopology);
|
||||
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUStencilOperation);
|
||||
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUTextureAspect);
|
||||
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUTextureComponentType);
|
||||
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUTextureDimension);
|
||||
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUTextureFormat);
|
||||
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUTextureViewDimension);
|
||||
|
@ -77,15 +77,10 @@ DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::dom::GPULimits, mMaxBindGroups);
|
|||
DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::dom::GPUDeviceDescriptor,
|
||||
mExtensions, mLimits);
|
||||
DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::ffi::WGPUBufferDescriptor,
|
||||
label, size, usage);
|
||||
label, size, usage, mapped_at_creation);
|
||||
DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::ffi::WGPUTextureDescriptor,
|
||||
label, size, mip_level_count, sample_count,
|
||||
dimension, format, usage);
|
||||
DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::ffi::WGPUSamplerDescriptor,
|
||||
label, address_mode_u, address_mode_v,
|
||||
address_mode_w, mag_filter, min_filter,
|
||||
mipmap_filter, lod_min_clamp, lod_max_clamp,
|
||||
compare, anisotropy_clamp);
|
||||
DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::ffi::WGPUExtent3d, width,
|
||||
height, depth);
|
||||
DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::ffi::WGPUOrigin3d, x, y, z);
|
||||
|
@ -146,6 +141,11 @@ DEFINE_IPC_SERIALIZER_WITH_FIELDS(
|
|||
mFragmentStage, mPrimitiveTopology, mRasterizationState, mColorStates,
|
||||
mDepthStencilState, mVertexState, mSampleCount, mSampleMask,
|
||||
mAlphaToCoverageEnabled);
|
||||
DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::SerialSamplerDescriptor,
|
||||
mLabel, mAddressU, mAddressV, mAddressW,
|
||||
mMagFilter, mMinFilter, mMipmapFilter,
|
||||
mLodMinClamp, mLodMaxClamp, mCompare,
|
||||
mAnisotropyClamp);
|
||||
|
||||
#undef DEFINE_IPC_SERIALIZER_FFI_ENUM
|
||||
#undef DEFINE_IPC_SERIALIZER_DOM_ENUM
|
||||
|
|
|
@ -34,52 +34,66 @@ enum class SerialBindGroupEntryType : uint8_t {
|
|||
};
|
||||
|
||||
struct SerialBindGroupEntry {
|
||||
uint32_t mBinding;
|
||||
SerialBindGroupEntryType mType;
|
||||
RawId mValue;
|
||||
BufferAddress mBufferOffset;
|
||||
BufferAddress mBufferSize;
|
||||
uint32_t mBinding = 0;
|
||||
SerialBindGroupEntryType mType = SerialBindGroupEntryType::EndGuard_;
|
||||
RawId mValue = 0;
|
||||
BufferAddress mBufferOffset = 0;
|
||||
BufferAddress mBufferSize = 0;
|
||||
};
|
||||
|
||||
struct SerialBindGroupDescriptor {
|
||||
nsCString mLabel;
|
||||
RawId mLayout;
|
||||
RawId mLayout = 0;
|
||||
nsTArray<SerialBindGroupEntry> mEntries;
|
||||
};
|
||||
|
||||
struct SerialProgrammableStageDescriptor {
|
||||
RawId mModule;
|
||||
RawId mModule = 0;
|
||||
nsString mEntryPoint;
|
||||
};
|
||||
|
||||
struct SerialComputePipelineDescriptor {
|
||||
RawId mLayout;
|
||||
RawId mLayout = 0;
|
||||
SerialProgrammableStageDescriptor mComputeStage;
|
||||
};
|
||||
|
||||
struct SerialVertexBufferLayoutDescriptor {
|
||||
ffi::WGPUBufferAddress mArrayStride;
|
||||
ffi::WGPUInputStepMode mStepMode;
|
||||
ffi::WGPUBufferAddress mArrayStride = 0;
|
||||
ffi::WGPUInputStepMode mStepMode = ffi::WGPUInputStepMode_Sentinel;
|
||||
nsTArray<ffi::WGPUVertexAttributeDescriptor> mAttributes;
|
||||
};
|
||||
|
||||
struct SerialVertexStateDescriptor {
|
||||
ffi::WGPUIndexFormat mIndexFormat;
|
||||
ffi::WGPUIndexFormat mIndexFormat = ffi::WGPUIndexFormat_Sentinel;
|
||||
nsTArray<SerialVertexBufferLayoutDescriptor> mVertexBuffers;
|
||||
};
|
||||
|
||||
struct SerialRenderPipelineDescriptor {
|
||||
RawId mLayout;
|
||||
RawId mLayout = 0;
|
||||
SerialProgrammableStageDescriptor mVertexStage;
|
||||
SerialProgrammableStageDescriptor mFragmentStage;
|
||||
ffi::WGPUPrimitiveTopology mPrimitiveTopology;
|
||||
ffi::WGPUPrimitiveTopology mPrimitiveTopology =
|
||||
ffi::WGPUPrimitiveTopology_Sentinel;
|
||||
Maybe<ffi::WGPURasterizationStateDescriptor> mRasterizationState;
|
||||
nsTArray<ffi::WGPUColorStateDescriptor> mColorStates;
|
||||
Maybe<ffi::WGPUDepthStencilStateDescriptor> mDepthStencilState;
|
||||
SerialVertexStateDescriptor mVertexState;
|
||||
uint32_t mSampleCount;
|
||||
uint32_t mSampleMask;
|
||||
bool mAlphaToCoverageEnabled;
|
||||
uint32_t mSampleCount = 0;
|
||||
uint32_t mSampleMask = 0;
|
||||
bool mAlphaToCoverageEnabled = false;
|
||||
};
|
||||
|
||||
struct SerialSamplerDescriptor {
|
||||
nsCString mLabel;
|
||||
ffi::WGPUAddressMode mAddressU = ffi::WGPUAddressMode_Sentinel,
|
||||
mAddressV = ffi::WGPUAddressMode_Sentinel,
|
||||
mAddressW = ffi::WGPUAddressMode_Sentinel;
|
||||
ffi::WGPUFilterMode mMagFilter = ffi::WGPUFilterMode_Sentinel,
|
||||
mMinFilter = ffi::WGPUFilterMode_Sentinel,
|
||||
mMipmapFilter = ffi::WGPUFilterMode_Sentinel;
|
||||
float mLodMinClamp = 0.0, mLodMaxClamp = 0.0;
|
||||
Maybe<ffi::WGPUCompareFunction> mCompare;
|
||||
uint8_t mAnisotropyClamp = 0;
|
||||
};
|
||||
|
||||
} // namespace webgpu
|
||||
|
|
|
@ -13,15 +13,21 @@ ok(SpecialPowers.getBoolPref('dom.webgpu.enabled'), 'Pref should be enabled.');
|
|||
const func = async function() {
|
||||
const adapter = await navigator.gpu.requestAdapter();
|
||||
const device = await adapter.requestDevice();
|
||||
const [buffer, mapping] = await device.createBufferMapped({
|
||||
size: 4,
|
||||
usage: GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_SRC, //TODO: remove the dummy usage
|
||||
});
|
||||
new Float32Array(mapping).set([1.0]);
|
||||
buffer.unmap();
|
||||
const data = await buffer.mapReadAsync();
|
||||
|
||||
const bufferRead = device.createBuffer({ size:4, usage: GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST });
|
||||
const bufferWrite = device.createBuffer({ size:4, usage: GPUBufferUsage.COPY_SRC, mappedAtCreation: true });
|
||||
(new Float32Array(bufferWrite.getMappedRange())).set([1.0]);
|
||||
bufferWrite.unmap();
|
||||
|
||||
const encoder = device.createCommandEncoder();
|
||||
encoder.copyBufferToBuffer(bufferWrite, 0, bufferRead, 0, 4);
|
||||
device.defaultQueue.submit([encoder.finish()]);
|
||||
|
||||
await bufferRead.mapAsync(GPUMapMode.READ);
|
||||
const data = bufferRead.getMappedRange();
|
||||
const value = new Float32Array(data)[0];
|
||||
buffer.unmap();
|
||||
bufferRead.unmap();
|
||||
|
||||
ok(value == 1.0, 'value == 1.0');
|
||||
};
|
||||
|
||||
|
|
|
@ -125,10 +125,8 @@ interface GPUDevice {
|
|||
|
||||
[SameObject] readonly attribute GPUQueue defaultQueue;
|
||||
|
||||
[NewObject]
|
||||
GPUBuffer createBuffer(GPUBufferDescriptor descriptor);
|
||||
[NewObject, Throws]
|
||||
GPUMappedBuffer createBufferMapped(GPUBufferDescriptor descriptor);
|
||||
GPUBuffer createBuffer(GPUBufferDescriptor descriptor);
|
||||
[NewObject]
|
||||
GPUTexture createTexture(GPUTextureDescriptor descriptor);
|
||||
[NewObject]
|
||||
|
@ -219,14 +217,26 @@ interface GPUBufferUsage {
|
|||
dictionary GPUBufferDescriptor {
|
||||
required GPUSize64 size;
|
||||
required GPUBufferUsageFlags usage;
|
||||
boolean mappedAtCreation = false;
|
||||
};
|
||||
|
||||
typedef unsigned long GPUMapModeFlags;
|
||||
|
||||
[Pref="dom.webgpu.enabled",
|
||||
Exposed=Window]
|
||||
interface GPUMapMode
|
||||
{
|
||||
const GPUMapModeFlags READ = 0x0001;
|
||||
const GPUMapModeFlags WRITE = 0x0002;
|
||||
};
|
||||
|
||||
[Pref="dom.webgpu.enabled",
|
||||
Exposed=Window]
|
||||
interface GPUBuffer {
|
||||
[NewObject]
|
||||
Promise<ArrayBuffer> mapReadAsync();
|
||||
//Promise<ArrayBuffer> mapWriteAsync();
|
||||
Promise<void> mapAsync(GPUMapModeFlags mode, optional GPUSize64 offset = 0, optional GPUSize64 size);
|
||||
[NewObject, Throws]
|
||||
ArrayBuffer getMappedRange(optional GPUSize64 offset = 0, optional GPUSize64 size);
|
||||
[Throws]
|
||||
void unmap();
|
||||
|
||||
|
|
|
@ -24,10 +24,10 @@ dependencies = [
|
|||
]
|
||||
|
||||
[[package]]
|
||||
name = "android_glue"
|
||||
version = "0.2.3"
|
||||
name = "android_log-sys"
|
||||
version = "0.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "000444226fcff248f2bc4c7625be32c63caccfecc2723a2b9f78a7487a49c407"
|
||||
checksum = "b8052e2d8aabbb8d556d6abbcce2a22b9590996c5f849b9c7ce4544a2e3b984e"
|
||||
|
||||
[[package]]
|
||||
name = "approx"
|
||||
|
@ -46,11 +46,11 @@ checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8"
|
|||
|
||||
[[package]]
|
||||
name = "ash"
|
||||
version = "0.31.0"
|
||||
version = "0.30.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c69a8137596e84c22d57f3da1b5de1d4230b1742a710091c85f4d7ce50f00f38"
|
||||
checksum = "69daec0742947f33a85931fa3cb0ce5f07929159dcbd1f0cbb5b2912e2978509"
|
||||
dependencies = [
|
||||
"libloading 0.6.1",
|
||||
"libloading 0.5.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -113,9 +113,9 @@ checksum = "0d8c1fef690941d3e7788d328517591fecc684c084084702d6ff1641e993699a"
|
|||
|
||||
[[package]]
|
||||
name = "bumpalo"
|
||||
version = "3.2.1"
|
||||
version = "3.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "12ae9db68ad7fac5fe51304d20f016c911539251075a214f8e663babefa35187"
|
||||
checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820"
|
||||
|
||||
[[package]]
|
||||
name = "byteorder"
|
||||
|
@ -136,9 +136,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.0.52"
|
||||
version = "1.0.54"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c3d87b23d6a92cd03af510a5ade527033f6aa6fa92161e2d5863a907d4c5e31d"
|
||||
checksum = "7bbb73db36c1246e9034e307d0fba23f9a2e251faa47ade70c1bd252220c8311"
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
|
@ -157,39 +157,24 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "cocoa"
|
||||
version = "0.19.1"
|
||||
version = "0.20.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f29f7768b2d1be17b96158e3285951d366b40211320fb30826a76cb7a0da6400"
|
||||
checksum = "8f7b6f3f7f4f0b3ec5c5039aaa9e8c3cef97a7a480a400fd62944841314f293d"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"block",
|
||||
"core-foundation 0.6.4",
|
||||
"core-foundation 0.7.0",
|
||||
"core-graphics",
|
||||
"foreign-types",
|
||||
"libc",
|
||||
"objc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cocoa-foundation"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7ade49b65d560ca58c403a479bb396592b155c0185eada742ee323d1d68d6318"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"block",
|
||||
"core-foundation 0.9.0",
|
||||
"core-graphics-types",
|
||||
"foreign-types",
|
||||
"libc",
|
||||
"objc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "copyless"
|
||||
version = "0.1.4"
|
||||
version = "0.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6ff9c56c9fb2a49c05ef0e431485a22400af20d33226dc0764d891d09e724127"
|
||||
checksum = "a2df960f5d869b2dd8532793fde43eb5427cceb126c929747a26823ab0eeb536"
|
||||
|
||||
[[package]]
|
||||
name = "core-foundation"
|
||||
|
@ -203,11 +188,11 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "core-foundation"
|
||||
version = "0.9.0"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3b5ed8e7e76c45974e15e41bfa8d5b0483cd90191639e01d8f5f1e606299d3fb"
|
||||
checksum = "57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171"
|
||||
dependencies = [
|
||||
"core-foundation-sys 0.8.0",
|
||||
"core-foundation-sys 0.7.0",
|
||||
"libc",
|
||||
]
|
||||
|
||||
|
@ -219,42 +204,30 @@ checksum = "e7ca8a5221364ef15ce201e8ed2f609fc312682a8f4e0e3d4aa5879764e0fa3b"
|
|||
|
||||
[[package]]
|
||||
name = "core-foundation-sys"
|
||||
version = "0.8.0"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9a21fa21941700a3cd8fcb4091f361a6a712fac632f85d9f487cc892045d55c6"
|
||||
checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac"
|
||||
|
||||
[[package]]
|
||||
name = "core-graphics"
|
||||
version = "0.17.3"
|
||||
version = "0.19.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "56790968ab1c8a1202a102e6de05fc6e1ec87da99e4e93e9a7d13efbfc1e95a9"
|
||||
checksum = "59e78b2e0aaf43f08e7ae0d6bc96895ef72ff0921c7d4ff4762201b2dba376dd"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"core-foundation 0.6.4",
|
||||
"foreign-types",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "core-graphics-types"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e92f5d519093a4178296707dbaa3880eae85a5ef5386675f361a1cf25376e93c"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"core-foundation 0.9.0",
|
||||
"core-foundation 0.7.0",
|
||||
"foreign-types",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "core-video-sys"
|
||||
version = "0.1.3"
|
||||
version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8dc065219542086f72d1e9f7aadbbab0989e980263695d129d502082d063a9d0"
|
||||
checksum = "34ecad23610ad9757664d644e369246edde1803fcb43ed72876565098a5d3828"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"core-foundation-sys 0.6.2",
|
||||
"core-foundation-sys 0.7.0",
|
||||
"core-graphics",
|
||||
"libc",
|
||||
"objc",
|
||||
|
@ -271,6 +244,17 @@ dependencies = [
|
|||
"winapi 0.3.8",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "derivative"
|
||||
version = "2.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cb582b60359da160a9477ee80f15c8d784c477e69c217ef2cdd4169c24ea380f"
|
||||
dependencies = [
|
||||
"proc-macro2 1.0.18",
|
||||
"quote 1.0.7",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dispatch"
|
||||
version = "0.2.0"
|
||||
|
@ -279,11 +263,11 @@ checksum = "bd0c93bb4b0c6d9b77f4435b0ae98c24d17f1c45b2ff844c6151a07256ca923b"
|
|||
|
||||
[[package]]
|
||||
name = "dlib"
|
||||
version = "0.4.1"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "77e51249a9d823a4cb79e3eca6dcd756153e8ed0157b6c04775d04bf1b13b76a"
|
||||
checksum = "b11f15d1e3268f140f68d390637d5e76d849782d971ae7063e0da69fe9709a76"
|
||||
dependencies = [
|
||||
"libloading 0.5.2",
|
||||
"libloading 0.6.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -367,17 +351,6 @@ dependencies = [
|
|||
"winapi 0.3.8",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "gfx-auxil"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3b46e6f0031330a0be08d17820f2dcaaa91cb36710a97a9500cb4f1c36e785c8"
|
||||
dependencies = [
|
||||
"fxhash",
|
||||
"gfx-hal",
|
||||
"spirv_cross 0.18.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "gfx-auxil"
|
||||
version = "0.4.0"
|
||||
|
@ -386,17 +359,17 @@ checksum = "67bdbf8e8d6883c70e5a0d7379ad8ab3ac95127a3761306b36122d8f1c177a8e"
|
|||
dependencies = [
|
||||
"fxhash",
|
||||
"gfx-hal",
|
||||
"spirv_cross 0.20.0",
|
||||
"spirv_cross",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "gfx-backend-dx11"
|
||||
version = "0.5.0"
|
||||
version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b148219292624126f78245e50a9720d95ea149a415ce8ce73ab7014205301b88"
|
||||
checksum = "92de0ddc0fde1a89b2a0e92dcc6bbb554bd34af0135e53a28d5ef064611094a4"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"gfx-auxil 0.3.0",
|
||||
"gfx-auxil",
|
||||
"gfx-hal",
|
||||
"libloading 0.5.2",
|
||||
"log",
|
||||
|
@ -404,26 +377,26 @@ dependencies = [
|
|||
"range-alloc",
|
||||
"raw-window-handle",
|
||||
"smallvec",
|
||||
"spirv_cross 0.18.0",
|
||||
"spirv_cross",
|
||||
"winapi 0.3.8",
|
||||
"wio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "gfx-backend-dx12"
|
||||
version = "0.5.0"
|
||||
version = "0.5.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a0e526746379e974501551b08958947e67a81b5ea8cdc717a000cdd72577da05"
|
||||
checksum = "98c598fb38d6f51db0219ac26d16ff8b78bc134987acd1940438a5adc46b694f"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"d3d12",
|
||||
"gfx-auxil 0.3.0",
|
||||
"gfx-auxil",
|
||||
"gfx-hal",
|
||||
"log",
|
||||
"range-alloc",
|
||||
"raw-window-handle",
|
||||
"smallvec",
|
||||
"spirv_cross 0.18.0",
|
||||
"spirv_cross",
|
||||
"winapi 0.3.8",
|
||||
]
|
||||
|
||||
|
@ -439,17 +412,18 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "gfx-backend-metal"
|
||||
version = "0.5.6"
|
||||
version = "0.5.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "92804d20b194de6c84cb4bec14ec6a6dcae9c51f0a9186817fb412a590131ae6"
|
||||
checksum = "7264b73ea2d8a7cff7eec3a4d08028a96cc18ff2fdf6479fb6f7febbc97dd03f"
|
||||
dependencies = [
|
||||
"arrayvec",
|
||||
"bitflags",
|
||||
"block",
|
||||
"cocoa-foundation",
|
||||
"cocoa",
|
||||
"copyless",
|
||||
"core-graphics",
|
||||
"foreign-types",
|
||||
"gfx-auxil 0.4.0",
|
||||
"gfx-auxil",
|
||||
"gfx-hal",
|
||||
"lazy_static",
|
||||
"log",
|
||||
|
@ -459,20 +433,20 @@ dependencies = [
|
|||
"range-alloc",
|
||||
"raw-window-handle",
|
||||
"smallvec",
|
||||
"spirv_cross 0.20.0",
|
||||
"spirv_cross",
|
||||
"storage-map",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "gfx-backend-vulkan"
|
||||
version = "0.5.11"
|
||||
version = "0.5.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "aec9c919cfc236d2c36aaa38609c1906a92f2df99a3c7f53022b01936f98275a"
|
||||
checksum = "5a4614727b750d62766db20d94032833f7293f9307f1b2103d5f8833889f863f"
|
||||
dependencies = [
|
||||
"arrayvec",
|
||||
"ash",
|
||||
"byteorder",
|
||||
"core-graphics-types",
|
||||
"core-graphics",
|
||||
"gfx-hal",
|
||||
"lazy_static",
|
||||
"log",
|
||||
|
@ -496,9 +470,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "gfx-hal"
|
||||
version = "0.5.3"
|
||||
version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a18534b23d4c262916231511309bc1f307c74cda8dcb68b93a10ca213a22814b"
|
||||
checksum = "1036da3617426192c1e8453ed2a2b6a66cf1e8c1486a921e9a8d6625234bf53c"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"raw-window-handle",
|
||||
|
@ -519,9 +493,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "hermit-abi"
|
||||
version = "0.1.12"
|
||||
version = "0.1.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "61565ff7aaace3525556587bd2dc31d4a07071957be715e63ce7b1eccf51a8f4"
|
||||
checksum = "91780f809e750b0a89f5544be56617ff6b1227ee485bcb06ebe10cdf89bd3b71"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
@ -546,9 +520,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "instant"
|
||||
version = "0.1.3"
|
||||
version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f7152d2aed88aa566e7a342250f21ba2222c1ae230ad577499dbfa3c18475b80"
|
||||
checksum = "7777a24a1ce5de49fcdde84ec46efa487c3af49d5b6e6e0a50367cc5c1096182"
|
||||
|
||||
[[package]]
|
||||
name = "iovec"
|
||||
|
@ -560,10 +534,16 @@ dependencies = [
|
|||
]
|
||||
|
||||
[[package]]
|
||||
name = "js-sys"
|
||||
version = "0.3.37"
|
||||
name = "jni-sys"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6a27d435371a2fa5b6d2b028a74bbdb1234f308da363226a2854ca3ff8ba7055"
|
||||
checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130"
|
||||
|
||||
[[package]]
|
||||
name = "js-sys"
|
||||
version = "0.3.40"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ce10c23ad2ea25ceca0093bd3192229da4c5b3c0f2de499c1ecac0d98d452177"
|
||||
dependencies = [
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
@ -592,9 +572,9 @@ checksum = "b294d6fa9ee409a054354afc4352b0b9ef7ca222c69b8812cbea9e7d2bf3783f"
|
|||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.69"
|
||||
version = "0.2.71"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "99e85c08494b21a9054e7fe1374a732aeadaff3980b6990b94bfd3a70f690005"
|
||||
checksum = "9457b06509d27052635f90d6466700c65095fdf75409b3fbdd903e988b886f49"
|
||||
|
||||
[[package]]
|
||||
name = "libloading"
|
||||
|
@ -608,9 +588,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "libloading"
|
||||
version = "0.6.1"
|
||||
version = "0.6.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3c4f51b790f5bdb65acb4cc94bb81d7b2ee60348a5431ac1467d390b017600b0"
|
||||
checksum = "2cadb8e769f070c45df05c78c7520eb4cd17061d4ab262e43cfc68b4d00ac71c"
|
||||
dependencies = [
|
||||
"winapi 0.3.8",
|
||||
]
|
||||
|
@ -695,13 +675,14 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "metal"
|
||||
version = "0.20.0"
|
||||
version = "0.18.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5c4e8a431536529327e28c9ba6992f2cb0c15d4222f0602a16e6d7695ff3bccf"
|
||||
checksum = "e198a0ee42bdbe9ef2c09d0b9426f3b2b47d90d93a4a9b0395c4cea605e92dc0"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"block",
|
||||
"cocoa-foundation",
|
||||
"cocoa",
|
||||
"core-graphics",
|
||||
"foreign-types",
|
||||
"log",
|
||||
"objc",
|
||||
|
@ -709,9 +690,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "mio"
|
||||
version = "0.6.21"
|
||||
version = "0.6.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "302dec22bcf6bae6dfb69c647187f4b4d0fb6f535521f7bc022430ce8e12008f"
|
||||
checksum = "fce347092656428bc8eaf6201042cb551b8d67855af7374542a92a0fbfcac430"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"fuchsia-zircon",
|
||||
|
@ -763,10 +744,41 @@ dependencies = [
|
|||
]
|
||||
|
||||
[[package]]
|
||||
name = "net2"
|
||||
version = "0.2.33"
|
||||
name = "ndk"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88"
|
||||
checksum = "95a356cafe20aee088789830bfea3a61336e84ded9e545e00d3869ce95dcb80c"
|
||||
dependencies = [
|
||||
"jni-sys",
|
||||
"ndk-sys",
|
||||
"num_enum",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ndk-glue"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d1730ee2e3de41c3321160a6da815f008c4006d71b095880ea50e17cf52332b8"
|
||||
dependencies = [
|
||||
"android_log-sys",
|
||||
"lazy_static",
|
||||
"libc",
|
||||
"log",
|
||||
"ndk",
|
||||
"ndk-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ndk-sys"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2b2820aca934aba5ed91c79acc72b6a44048ceacc5d36c035ed4e051f12d887d"
|
||||
|
||||
[[package]]
|
||||
name = "net2"
|
||||
version = "0.2.34"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2ba7c918ac76704fb42afcbbb43891e72731f3dcca3bef2a19786297baf14af7"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
|
@ -808,6 +820,28 @@ dependencies = [
|
|||
"autocfg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num_enum"
|
||||
version = "0.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ca565a7df06f3d4b485494f25ba05da1435950f4dc263440eda7a6fa9b8e36e4"
|
||||
dependencies = [
|
||||
"derivative",
|
||||
"num_enum_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num_enum_derive"
|
||||
version = "0.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ffa5a33ddddfee04c0283a7653987d634e880347e96b5b2ed64de07efb59db9d"
|
||||
dependencies = [
|
||||
"proc-macro-crate",
|
||||
"proc-macro2 1.0.18",
|
||||
"quote 1.0.7",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "objc"
|
||||
version = "0.2.7"
|
||||
|
@ -829,9 +863,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "once_cell"
|
||||
version = "1.3.1"
|
||||
version = "1.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b1c601810575c99596d4afc46f78a678c80105117c379eb3650cf99b8a21ce5b"
|
||||
checksum = "0b631f7e854af39a1739f401cf34a8a013dfe09eac4fa4dba91e9768bd28168d"
|
||||
|
||||
[[package]]
|
||||
name = "ordered-float"
|
||||
|
@ -881,8 +915,8 @@ version = "0.2.1"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6fb44a25c5bba983be0fc8592dfaf3e6d0935ce8be0c6b15b2a39507af34a926"
|
||||
dependencies = [
|
||||
"proc-macro2 1.0.10",
|
||||
"quote 1.0.3",
|
||||
"proc-macro2 1.0.18",
|
||||
"quote 1.0.7",
|
||||
"syn",
|
||||
"synstructure",
|
||||
"unicode-xid 0.2.0",
|
||||
|
@ -915,6 +949,15 @@ dependencies = [
|
|||
"winit",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro-crate"
|
||||
version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e10d4b51f154c8a7fb96fd6dad097cb74b863943ec010ac94b9fd1be8861fe1e"
|
||||
dependencies = [
|
||||
"toml",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "0.4.30"
|
||||
|
@ -926,9 +969,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.10"
|
||||
version = "1.0.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "df246d292ff63439fea9bc8c0a270bed0e390d5ebd4db4ba15aba81111b5abe3"
|
||||
checksum = "beae6331a816b1f65d04c45b078fd8e6c93e8071771f41b8163255bbd8d7c8fa"
|
||||
dependencies = [
|
||||
"unicode-xid 0.2.0",
|
||||
]
|
||||
|
@ -950,18 +993,18 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.3"
|
||||
version = "1.0.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2bdc6c187c65bca4260c9011c9e3132efe4909da44726bad24cf7572ae338d7f"
|
||||
checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37"
|
||||
dependencies = [
|
||||
"proc-macro2 1.0.10",
|
||||
"proc-macro2 1.0.18",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "range-alloc"
|
||||
version = "0.1.0"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dd5927936723a9e8b715d37d7e4b390455087c4bdf25b9f702309460577b14f9"
|
||||
checksum = "a871f1e45a3a3f0c73fb60343c811238bb5143a81642e27c2ac7aac27ff01a63"
|
||||
|
||||
[[package]]
|
||||
name = "raw-window-handle"
|
||||
|
@ -980,9 +1023,9 @@ checksum = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84"
|
|||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.3.7"
|
||||
version = "1.3.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a6020f034922e3194c711b82a627453881bc4682166cabb07134a10c26ba7692"
|
||||
checksum = "9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
|
@ -992,9 +1035,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.6.17"
|
||||
version = "0.6.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7fe5bd57d1d7414c6b5ed48563a2c855d995ff777729dcd91c369ec7fea395ae"
|
||||
checksum = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8"
|
||||
|
||||
[[package]]
|
||||
name = "renderdoc"
|
||||
|
@ -1004,7 +1047,7 @@ checksum = "9c9e8488c98756911664c8cc7b86284c320b6a6357d95908458136d7ebe9280c"
|
|||
dependencies = [
|
||||
"bitflags",
|
||||
"float-cmp",
|
||||
"libloading 0.6.1",
|
||||
"libloading 0.6.2",
|
||||
"once_cell",
|
||||
"renderdoc-sys",
|
||||
"winapi 0.3.8",
|
||||
|
@ -1095,21 +1138,21 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
|
|||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.106"
|
||||
version = "1.0.111"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "36df6ac6412072f67cf767ebbde4133a5b2e88e76dc6187fa7104cd16f783399"
|
||||
checksum = "c9124df5b40cbd380080b2cc6ab894c040a3070d995f5c9dc77e18c34a8ae37d"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.106"
|
||||
version = "1.0.111"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9e549e3abf4fb8621bd1609f11dfc9f5e50320802273b12f3811a67e6716ea6c"
|
||||
checksum = "3f2c3ac8e6ca1e9c80b8be1023940162bf81ae3cffbb1809474152f2ce1eb250"
|
||||
dependencies = [
|
||||
"proc-macro2 1.0.10",
|
||||
"quote 1.0.3",
|
||||
"proc-macro2 1.0.18",
|
||||
"quote 1.0.7",
|
||||
"syn",
|
||||
]
|
||||
|
||||
|
@ -1141,17 +1184,6 @@ dependencies = [
|
|||
"wayland-protocols",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "spirv_cross"
|
||||
version = "0.18.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "946216f8793f7199e3ea5b995ee8dc20a0ace1fcf46293a0ef4c17e1d046dbde"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"js-sys",
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "spirv_cross"
|
||||
version = "0.20.0"
|
||||
|
@ -1193,23 +1225,23 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "1.0.18"
|
||||
version = "1.0.31"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "410a7488c0a728c7ceb4ad59b9567eb4053d02e8cc7f5c0e0eeeb39518369213"
|
||||
checksum = "b5304cfdf27365b7585c25d4af91b35016ed21ef88f17ced89c7093b43dba8b6"
|
||||
dependencies = [
|
||||
"proc-macro2 1.0.10",
|
||||
"quote 1.0.3",
|
||||
"proc-macro2 1.0.18",
|
||||
"quote 1.0.7",
|
||||
"unicode-xid 0.2.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "synstructure"
|
||||
version = "0.12.3"
|
||||
version = "0.12.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545"
|
||||
checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701"
|
||||
dependencies = [
|
||||
"proc-macro2 1.0.10",
|
||||
"quote 1.0.3",
|
||||
"proc-macro2 1.0.18",
|
||||
"quote 1.0.7",
|
||||
"syn",
|
||||
"unicode-xid 0.2.0",
|
||||
]
|
||||
|
@ -1232,6 +1264,15 @@ dependencies = [
|
|||
"lazy_static",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml"
|
||||
version = "0.5.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ffc92d160b1eef40665be3a05630d003936a3bc7da7421277846c2613e92c71a"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "typenum"
|
||||
version = "1.12.0"
|
||||
|
@ -1262,9 +1303,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "vec_map"
|
||||
version = "0.8.1"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "05c78687fb1a80548ae3250346c3db86a80a7cdd77bda190189f2d0a0987c81a"
|
||||
checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
|
||||
|
||||
[[package]]
|
||||
name = "void"
|
||||
|
@ -1285,9 +1326,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "wasm-bindgen"
|
||||
version = "0.2.60"
|
||||
version = "0.2.63"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2cc57ce05287f8376e998cbddfb4c8cb43b84a7ec55cf4551d7c00eef317a47f"
|
||||
checksum = "4c2dc4aa152834bc334f506c1a06b866416a8b6697d5c9f75b9a689c8486def0"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"wasm-bindgen-macro",
|
||||
|
@ -1295,37 +1336,37 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-backend"
|
||||
version = "0.2.60"
|
||||
version = "0.2.63"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d967d37bf6c16cca2973ca3af071d0a2523392e4a594548155d89a678f4237cd"
|
||||
checksum = "ded84f06e0ed21499f6184df0e0cb3494727b0c5da89534e0fcc55c51d812101"
|
||||
dependencies = [
|
||||
"bumpalo",
|
||||
"lazy_static",
|
||||
"log",
|
||||
"proc-macro2 1.0.10",
|
||||
"quote 1.0.3",
|
||||
"proc-macro2 1.0.18",
|
||||
"quote 1.0.7",
|
||||
"syn",
|
||||
"wasm-bindgen-shared",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro"
|
||||
version = "0.2.60"
|
||||
version = "0.2.63"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8bd151b63e1ea881bb742cd20e1d6127cef28399558f3b5d415289bc41eee3a4"
|
||||
checksum = "838e423688dac18d73e31edce74ddfac468e37b1506ad163ffaf0a46f703ffe3"
|
||||
dependencies = [
|
||||
"quote 1.0.3",
|
||||
"quote 1.0.7",
|
||||
"wasm-bindgen-macro-support",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro-support"
|
||||
version = "0.2.60"
|
||||
version = "0.2.63"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d68a5b36eef1be7868f668632863292e37739656a80fc4b9acec7b0bd35a4931"
|
||||
checksum = "3156052d8ec77142051a533cdd686cba889537b213f948cd1d20869926e68e92"
|
||||
dependencies = [
|
||||
"proc-macro2 1.0.10",
|
||||
"quote 1.0.3",
|
||||
"proc-macro2 1.0.18",
|
||||
"quote 1.0.7",
|
||||
"syn",
|
||||
"wasm-bindgen-backend",
|
||||
"wasm-bindgen-shared",
|
||||
|
@ -1333,9 +1374,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-shared"
|
||||
version = "0.2.60"
|
||||
version = "0.2.63"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "daf76fe7d25ac79748a37538b7daeed1c7a6867c92d3245c12c6222e4a20d639"
|
||||
checksum = "c9ba19973a58daf4db6f352eda73dc0e289493cd29fb2632eb172085b6521acd"
|
||||
|
||||
[[package]]
|
||||
name = "wayland-client"
|
||||
|
@ -1482,14 +1523,13 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
|||
|
||||
[[package]]
|
||||
name = "winit"
|
||||
version = "0.22.1"
|
||||
version = "0.22.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fc53342d3d1a3d57f3949e0692d93d5a8adb7814d8683cef4a09c2b550e94246"
|
||||
checksum = "1e4ccbf7ddb6627828eace16cacde80fc6bf4dbb3469f88487262a02cf8e7862"
|
||||
dependencies = [
|
||||
"android_glue",
|
||||
"bitflags",
|
||||
"cocoa",
|
||||
"core-foundation 0.6.4",
|
||||
"core-foundation 0.7.0",
|
||||
"core-graphics",
|
||||
"core-video-sys",
|
||||
"dispatch",
|
||||
|
@ -1499,6 +1539,9 @@ dependencies = [
|
|||
"log",
|
||||
"mio",
|
||||
"mio-extras",
|
||||
"ndk",
|
||||
"ndk-glue",
|
||||
"ndk-sys",
|
||||
"objc",
|
||||
"parking_lot",
|
||||
"percent-encoding",
|
||||
|
|
|
@ -4,7 +4,7 @@ This is an active GitHub mirror of the WebGPU implementation in Rust, which now
|
|||
|
||||
# WebGPU
|
||||
|
||||
[![Matrix](https://img.shields.io/badge/Matrix-%23wgpu%3Amatrix.org-blueviolet.svg)](https://matrix.to/#/#wgpu:matrix.org)
|
||||
[![Matrix](https://img.shields.io/badge/Dev_Matrix-%23wgpu%3Amatrix.org-blueviolet.svg)](https://matrix.to/#/#wgpu:matrix.org) [![Matrix](https://img.shields.io/badge/User_Matrix-%23wgpu--users%3Amatrix.org-blueviolet.svg)](https://matrix.to/#/#wgpu-users:matrix.org)
|
||||
[![Build Status](https://github.com/gfx-rs/wgpu/workflows/CI/badge.svg)](https://github.com/gfx-rs/wgpu/actions)
|
||||
|
||||
This is the core logic of an experimental [WebGPU](https://www.w3.org/community/gpu/) implementation. It's written in Rust and is based on [gfx-hal](https://github.com/gfx-rs/gfx) with help of [gfx-extras](https://github.com/gfx-rs/gfx-extras). See the upstream [WebGPU specification](https://gpuweb.github.io/gpuweb/) (work in progress).
|
||||
|
|
|
@ -254,16 +254,15 @@ impl GlobalExt for wgc::hub::Global<IdentityPassThroughFactory> {
|
|||
}
|
||||
}
|
||||
A::CreateBindGroupLayout { id, label, entries } => {
|
||||
let label = Label::new(&label);
|
||||
self.device_create_bind_group_layout::<B>(
|
||||
device,
|
||||
&wgc::binding_model::BindGroupLayoutDescriptor {
|
||||
label: label.as_ptr(),
|
||||
entries: entries.as_ptr(),
|
||||
entries_length: entries.len(),
|
||||
&wgt::BindGroupLayoutDescriptor {
|
||||
label: Some(&label),
|
||||
bindings: &entries,
|
||||
},
|
||||
id,
|
||||
);
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
A::DestroyBindGroupLayout(id) => {
|
||||
self.bind_group_layout_destroy::<B>(id);
|
||||
|
@ -280,7 +279,8 @@ impl GlobalExt for wgc::hub::Global<IdentityPassThroughFactory> {
|
|||
bind_group_layouts_length: bind_group_layouts.len(),
|
||||
},
|
||||
id,
|
||||
);
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
A::DestroyPipelineLayout(id) => {
|
||||
self.pipeline_layout_destroy::<B>(id);
|
||||
|
@ -292,12 +292,11 @@ impl GlobalExt for wgc::hub::Global<IdentityPassThroughFactory> {
|
|||
entries,
|
||||
} => {
|
||||
use wgc::binding_model as bm;
|
||||
let label = Label::new(&label);
|
||||
let entry_vec = entries
|
||||
.into_iter()
|
||||
.iter()
|
||||
.map(|(binding, res)| wgc::binding_model::BindGroupEntry {
|
||||
binding,
|
||||
resource: match res {
|
||||
binding: *binding,
|
||||
resource: match *res {
|
||||
trace::BindingResource::Buffer { id, offset, size } => {
|
||||
bm::BindingResource::Buffer(bm::BufferBinding {
|
||||
buffer: id,
|
||||
|
@ -309,6 +308,9 @@ impl GlobalExt for wgc::hub::Global<IdentityPassThroughFactory> {
|
|||
trace::BindingResource::TextureView(id) => {
|
||||
bm::BindingResource::TextureView(id)
|
||||
}
|
||||
trace::BindingResource::TextureViewArray(ref id_array) => {
|
||||
bm::BindingResource::TextureViewArray(id_array)
|
||||
}
|
||||
},
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
@ -316,10 +318,9 @@ impl GlobalExt for wgc::hub::Global<IdentityPassThroughFactory> {
|
|||
self.device_create_bind_group::<B>(
|
||||
device,
|
||||
&wgc::binding_model::BindGroupDescriptor {
|
||||
label: label.as_ptr(),
|
||||
label: Some(&label),
|
||||
layout: layout_id,
|
||||
entries: entry_vec.as_ptr(),
|
||||
entries_length: entry_vec.len(),
|
||||
bindings: &entry_vec,
|
||||
},
|
||||
id,
|
||||
);
|
||||
|
@ -353,7 +354,8 @@ impl GlobalExt for wgc::hub::Global<IdentityPassThroughFactory> {
|
|||
compute_stage: cs_stage.desc,
|
||||
},
|
||||
id,
|
||||
);
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
A::DestroyComputePipeline(id) => {
|
||||
self.compute_pipeline_destroy::<B>(id);
|
||||
|
@ -400,7 +402,8 @@ impl GlobalExt for wgc::hub::Global<IdentityPassThroughFactory> {
|
|||
alpha_to_coverage_enabled: desc.alpha_to_coverage_enabled,
|
||||
},
|
||||
id,
|
||||
);
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
A::DestroyRenderPipeline(id) => {
|
||||
self.render_pipeline_destroy::<B>(id);
|
||||
|
@ -497,6 +500,7 @@ fn main() {
|
|||
#[cfg(not(feature = "winit"))]
|
||||
compatible_surface: None,
|
||||
},
|
||||
unsafe { wgt::UnsafeExtensions::allow() },
|
||||
wgc::instance::AdapterInputs::IdSet(
|
||||
&[wgc::id::TypedId::zip(0, 0, backend)],
|
||||
|id| id.backend(),
|
||||
|
|
|
@ -27,7 +27,7 @@ bitflags = "1.0"
|
|||
copyless = "0.1"
|
||||
fxhash = "0.2"
|
||||
log = "0.4"
|
||||
hal = { package = "gfx-hal", version = "0.5" }
|
||||
hal = { package = "gfx-hal", version = "0.5.1" }
|
||||
gfx-backend-empty = "0.5"
|
||||
gfx-descriptor = "0.1"
|
||||
gfx-memory = "0.1"
|
||||
|
@ -51,16 +51,16 @@ version = "0.5"
|
|||
features = ["peek-poke"]
|
||||
|
||||
[target.'cfg(any(target_os = "ios", target_os = "macos"))'.dependencies]
|
||||
gfx-backend-metal = { version = "0.5.6" }
|
||||
gfx-backend-vulkan = { version = "0.5.11", optional = true }
|
||||
gfx-backend-metal = { version = "0.5.3" }
|
||||
gfx-backend-vulkan = { version = "0.5.7", optional = true }
|
||||
|
||||
[target.'cfg(all(unix, not(target_os = "ios"), not(target_os = "macos")))'.dependencies]
|
||||
gfx-backend-vulkan = { version = "0.5.11" }
|
||||
gfx-backend-vulkan = { version = "0.5.7" }
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
gfx-backend-dx12 = { version = "0.5" }
|
||||
gfx-backend-dx12 = { version = "0.5.5" }
|
||||
gfx-backend-dx11 = { version = "0.5" }
|
||||
gfx-backend-vulkan = { version = "0.5.11" }
|
||||
gfx-backend-vulkan = { version = "0.5.7" }
|
||||
|
||||
[target.'cfg(any(target_os = "linux", target_os = "macos", target_os = "windows", target_os = "dragonfly", target_os = "freebsd"))'.dependencies]
|
||||
battery = { version = "0.7", optional = true }
|
||||
|
|
|
@ -17,50 +17,24 @@ use serde::Deserialize;
|
|||
use serde::Serialize;
|
||||
use std::borrow::Borrow;
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
|
||||
#[cfg_attr(feature = "trace", derive(Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(Deserialize))]
|
||||
pub enum BindingType {
|
||||
UniformBuffer = 0,
|
||||
StorageBuffer = 1,
|
||||
ReadonlyStorageBuffer = 2,
|
||||
Sampler = 3,
|
||||
ComparisonSampler = 4,
|
||||
SampledTexture = 5,
|
||||
ReadonlyStorageTexture = 6,
|
||||
WriteonlyStorageTexture = 7,
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum BindGroupLayoutError {
|
||||
ConflictBinding(u32),
|
||||
MissingExtension(wgt::Extensions),
|
||||
/// Arrays of bindings can't be 0 elements long
|
||||
ZeroCount,
|
||||
/// Arrays of bindings unsupported for this type of binding
|
||||
ArrayUnsupported,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Debug, Hash, PartialEq)]
|
||||
#[cfg_attr(feature = "trace", derive(Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(Deserialize))]
|
||||
pub struct BindGroupLayoutEntry {
|
||||
pub binding: u32,
|
||||
pub visibility: wgt::ShaderStage,
|
||||
pub ty: BindingType,
|
||||
pub multisampled: bool,
|
||||
pub has_dynamic_offset: bool,
|
||||
pub view_dimension: wgt::TextureViewDimension,
|
||||
pub texture_component_type: wgt::TextureComponentType,
|
||||
pub storage_texture_format: wgt::TextureFormat,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Debug)]
|
||||
pub struct BindGroupLayoutDescriptor {
|
||||
pub label: *const std::os::raw::c_char,
|
||||
pub entries: *const BindGroupLayoutEntry,
|
||||
pub entries_length: usize,
|
||||
}
|
||||
pub(crate) type BindEntryMap = FastHashMap<u32, wgt::BindGroupLayoutEntry>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BindGroupLayout<B: hal::Backend> {
|
||||
pub(crate) raw: B::DescriptorSetLayout,
|
||||
pub(crate) device_id: Stored<DeviceId>,
|
||||
pub(crate) life_guard: LifeGuard,
|
||||
pub(crate) entries: FastHashMap<u32, BindGroupLayoutEntry>,
|
||||
pub(crate) entries: BindEntryMap,
|
||||
pub(crate) desc_counts: DescriptorCounts,
|
||||
pub(crate) dynamic_count: usize,
|
||||
}
|
||||
|
@ -72,6 +46,11 @@ pub struct PipelineLayoutDescriptor {
|
|||
pub bind_group_layouts_length: usize,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum PipelineLayoutError {
|
||||
TooManyGroups(usize),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct PipelineLayout<B: hal::Backend> {
|
||||
pub(crate) raw: B::PipelineLayout,
|
||||
|
@ -90,32 +69,28 @@ pub struct BufferBinding {
|
|||
pub size: wgt::BufferSize,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
// Note: Duplicated in wgpu-rs as BindingResource
|
||||
#[derive(Debug)]
|
||||
#[cfg_attr(feature = "trace", derive(Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(Deserialize))]
|
||||
pub enum BindingResource {
|
||||
pub enum BindingResource<'a> {
|
||||
Buffer(BufferBinding),
|
||||
Sampler(SamplerId),
|
||||
TextureView(TextureViewId),
|
||||
TextureViewArray(&'a [TextureViewId]),
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
// Note: Duplicated in wgpu-rs as Binding
|
||||
#[derive(Debug)]
|
||||
#[cfg_attr(feature = "trace", derive(Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(Deserialize))]
|
||||
pub struct BindGroupEntry {
|
||||
pub struct BindGroupEntry<'a> {
|
||||
pub binding: u32,
|
||||
pub resource: BindingResource,
|
||||
pub resource: BindingResource<'a>,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
// Note: Duplicated in wgpu-rs as BindGroupDescriptor
|
||||
#[derive(Debug)]
|
||||
pub struct BindGroupDescriptor {
|
||||
pub label: *const std::os::raw::c_char,
|
||||
pub struct BindGroupDescriptor<'a> {
|
||||
pub label: Option<&'a str>,
|
||||
pub layout: BindGroupLayoutId,
|
||||
pub entries: *const BindGroupEntry,
|
||||
pub entries_length: usize,
|
||||
pub bindings: &'a [BindGroupEntry<'a>],
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
|
|
|
@ -211,6 +211,7 @@ struct PassComponent<T> {
|
|||
load_op: wgt::LoadOp,
|
||||
store_op: wgt::StoreOp,
|
||||
clear_value: T,
|
||||
read_only: bool,
|
||||
}
|
||||
|
||||
// required for PeekPoke
|
||||
|
@ -220,6 +221,7 @@ impl<T: Default> Default for PassComponent<T> {
|
|||
load_op: wgt::LoadOp::Clear,
|
||||
store_op: wgt::StoreOp::Clear,
|
||||
clear_value: T::default(),
|
||||
read_only: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,6 +37,14 @@ pub type RenderPassColorAttachmentDescriptor =
|
|||
pub type RenderPassDepthStencilAttachmentDescriptor =
|
||||
RenderPassDepthStencilAttachmentDescriptorBase<id::TextureViewId>;
|
||||
|
||||
fn is_depth_stencil_read_only(
|
||||
desc: &RenderPassDepthStencilAttachmentDescriptor,
|
||||
aspects: hal::format::Aspects,
|
||||
) -> bool {
|
||||
(desc.depth_read_only || !aspects.contains(hal::format::Aspects::DEPTH))
|
||||
&& (desc.stencil_read_only || !aspects.contains(hal::format::Aspects::STENCIL))
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Debug)]
|
||||
pub struct RenderPassDescriptor<'a> {
|
||||
|
@ -130,11 +138,13 @@ impl super::RawPass {
|
|||
load_op: ds.depth_load_op,
|
||||
store_op: ds.depth_store_op,
|
||||
clear_value: ds.clear_depth,
|
||||
read_only: ds.depth_read_only,
|
||||
},
|
||||
stencil: PassComponent {
|
||||
load_op: ds.stencil_load_op,
|
||||
store_op: ds.stencil_store_op,
|
||||
clear_value: ds.clear_stencil,
|
||||
read_only: ds.stencil_read_only,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
@ -150,6 +160,7 @@ impl super::RawPass {
|
|||
load_op: at.load_op,
|
||||
store_op: at.store_op,
|
||||
clear_value: at.clear_color,
|
||||
read_only: false,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
@ -361,13 +372,19 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
attachment: id::TextureViewId::from_raw(at.attachment).unwrap(),
|
||||
depth_load_op: at.depth.load_op,
|
||||
depth_store_op: at.depth.store_op,
|
||||
depth_read_only: at.depth.read_only,
|
||||
clear_depth: at.depth.clear_value,
|
||||
stencil_load_op: at.stencil.load_op,
|
||||
stencil_store_op: at.stencil.store_op,
|
||||
clear_stencil: at.stencil.clear_value,
|
||||
stencil_read_only: at.stencil.read_only,
|
||||
};
|
||||
Some(&depth_stencil_attachment_body)
|
||||
};
|
||||
// We default to false intentionally, even if depth-stencil isn't used at all.
|
||||
// This allows us to use the primary raw pipeline in `RenderPipeline`,
|
||||
// instead of the special read-only one, which would be `None`.
|
||||
let mut is_ds_read_only = false;
|
||||
|
||||
let (context, sample_count) = {
|
||||
use hal::device::Device as _;
|
||||
|
@ -388,11 +405,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
);
|
||||
|
||||
const MAX_TOTAL_ATTACHMENTS: usize = 10;
|
||||
type OutputAttachment<'a> = (
|
||||
&'a Stored<id::TextureId>,
|
||||
&'a hal::image::SubresourceRange,
|
||||
Option<TextureUse>,
|
||||
);
|
||||
struct OutputAttachment<'a> {
|
||||
texture_id: &'a Stored<id::TextureId>,
|
||||
range: &'a hal::image::SubresourceRange,
|
||||
previous_use: Option<TextureUse>,
|
||||
new_use: TextureUse,
|
||||
}
|
||||
let mut output_attachments =
|
||||
ArrayVec::<[OutputAttachment; MAX_TOTAL_ATTACHMENTS]>::new();
|
||||
|
||||
|
@ -420,23 +438,29 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
};
|
||||
|
||||
// Using render pass for transition.
|
||||
let consistent_use = base_trackers
|
||||
let previous_use = base_trackers
|
||||
.textures
|
||||
.query(source_id.value, view.range.clone());
|
||||
output_attachments.push((source_id, &view.range, consistent_use));
|
||||
let new_use = if is_depth_stencil_read_only(at, view.range.aspects) {
|
||||
is_ds_read_only = true;
|
||||
TextureUse::ATTACHMENT_READ
|
||||
} else {
|
||||
TextureUse::ATTACHMENT_WRITE
|
||||
};
|
||||
output_attachments.push(OutputAttachment {
|
||||
texture_id: source_id,
|
||||
range: &view.range,
|
||||
previous_use,
|
||||
new_use,
|
||||
});
|
||||
|
||||
let old_layout = match consistent_use {
|
||||
Some(usage) => {
|
||||
conv::map_texture_state(
|
||||
usage,
|
||||
hal::format::Aspects::DEPTH | hal::format::Aspects::STENCIL,
|
||||
)
|
||||
.1
|
||||
}
|
||||
None => hal::image::Layout::DepthStencilAttachmentOptimal,
|
||||
let new_layout = conv::map_texture_state(new_use, view.range.aspects).1;
|
||||
let old_layout = match previous_use {
|
||||
Some(usage) => conv::map_texture_state(usage, view.range.aspects).1,
|
||||
None => new_layout,
|
||||
};
|
||||
|
||||
Some(hal::pass::Attachment {
|
||||
let ds_at = hal::pass::Attachment {
|
||||
format: Some(conv::map_texture_format(
|
||||
view.format,
|
||||
device.private_features,
|
||||
|
@ -447,8 +471,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
at.stencil_load_op,
|
||||
at.stencil_store_op,
|
||||
),
|
||||
layouts: old_layout..hal::image::Layout::DepthStencilAttachmentOptimal,
|
||||
})
|
||||
layouts: old_layout..new_layout,
|
||||
};
|
||||
Some((ds_at, new_layout))
|
||||
}
|
||||
None => None,
|
||||
};
|
||||
|
@ -473,18 +498,26 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
|
||||
let layouts = match view.inner {
|
||||
TextureViewInner::Native { ref source_id, .. } => {
|
||||
let consistent_use = base_trackers
|
||||
let previous_use = base_trackers
|
||||
.textures
|
||||
.query(source_id.value, view.range.clone());
|
||||
output_attachments.push((source_id, &view.range, consistent_use));
|
||||
let new_use = TextureUse::ATTACHMENT_WRITE;
|
||||
output_attachments.push(OutputAttachment {
|
||||
texture_id: source_id,
|
||||
range: &view.range,
|
||||
previous_use,
|
||||
new_use,
|
||||
});
|
||||
|
||||
let old_layout = match consistent_use {
|
||||
let new_layout =
|
||||
conv::map_texture_state(new_use, hal::format::Aspects::COLOR).1;
|
||||
let old_layout = match previous_use {
|
||||
Some(usage) => {
|
||||
conv::map_texture_state(usage, hal::format::Aspects::COLOR).1
|
||||
}
|
||||
None => hal::image::Layout::ColorAttachmentOptimal,
|
||||
None => new_layout,
|
||||
};
|
||||
old_layout..hal::image::Layout::ColorAttachmentOptimal
|
||||
old_layout..new_layout
|
||||
}
|
||||
TextureViewInner::SwapChain { ref source_id, .. } => {
|
||||
if let Some((ref sc_id, _)) = cmb.used_swap_chain {
|
||||
|
@ -506,7 +539,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
}
|
||||
};
|
||||
|
||||
colors.push(hal::pass::Attachment {
|
||||
let color_at = hal::pass::Attachment {
|
||||
format: Some(conv::map_texture_format(
|
||||
view.format,
|
||||
device.private_features,
|
||||
|
@ -515,7 +548,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
ops: conv::map_load_store_ops(at.load_op, at.store_op),
|
||||
stencil_ops: hal::pass::AttachmentOps::DONT_CARE,
|
||||
layouts,
|
||||
});
|
||||
};
|
||||
colors.push((color_at, hal::image::Layout::ColorAttachmentOptimal));
|
||||
}
|
||||
|
||||
for resolve_target in color_attachments.iter().flat_map(|at| at.resolve_target) {
|
||||
|
@ -535,18 +569,26 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
|
||||
let layouts = match view.inner {
|
||||
TextureViewInner::Native { ref source_id, .. } => {
|
||||
let consistent_use = base_trackers
|
||||
let previous_use = base_trackers
|
||||
.textures
|
||||
.query(source_id.value, view.range.clone());
|
||||
output_attachments.push((source_id, &view.range, consistent_use));
|
||||
let new_use = TextureUse::ATTACHMENT_WRITE;
|
||||
output_attachments.push(OutputAttachment {
|
||||
texture_id: source_id,
|
||||
range: &view.range,
|
||||
previous_use,
|
||||
new_use,
|
||||
});
|
||||
|
||||
let old_layout = match consistent_use {
|
||||
let new_layout =
|
||||
conv::map_texture_state(new_use, hal::format::Aspects::COLOR).1;
|
||||
let old_layout = match previous_use {
|
||||
Some(usage) => {
|
||||
conv::map_texture_state(usage, hal::format::Aspects::COLOR).1
|
||||
}
|
||||
None => hal::image::Layout::ColorAttachmentOptimal,
|
||||
None => new_layout,
|
||||
};
|
||||
old_layout..hal::image::Layout::ColorAttachmentOptimal
|
||||
old_layout..new_layout
|
||||
}
|
||||
TextureViewInner::SwapChain { ref source_id, .. } => {
|
||||
if let Some((ref sc_id, _)) = cmb.used_swap_chain {
|
||||
|
@ -562,7 +604,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
}
|
||||
};
|
||||
|
||||
resolves.push(hal::pass::Attachment {
|
||||
let resolve_at = hal::pass::Attachment {
|
||||
format: Some(conv::map_texture_format(
|
||||
view.format,
|
||||
device.private_features,
|
||||
|
@ -574,7 +616,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
),
|
||||
stencil_ops: hal::pass::AttachmentOps::DONT_CARE,
|
||||
layouts,
|
||||
});
|
||||
};
|
||||
resolves.push((resolve_at, hal::image::Layout::ColorAttachmentOptimal));
|
||||
}
|
||||
|
||||
RenderPassKey {
|
||||
|
@ -584,30 +627,29 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
}
|
||||
};
|
||||
|
||||
for (source_id, view_range, consistent_use) in output_attachments {
|
||||
let texture = &texture_guard[source_id.value];
|
||||
for ot in output_attachments {
|
||||
let texture = &texture_guard[ot.texture_id.value];
|
||||
assert!(
|
||||
texture.usage.contains(TextureUsage::OUTPUT_ATTACHMENT),
|
||||
"Texture usage {:?} must contain the usage flag OUTPUT_ATTACHMENT",
|
||||
texture.usage
|
||||
);
|
||||
|
||||
let usage = consistent_use.unwrap_or(TextureUse::OUTPUT_ATTACHMENT);
|
||||
// this is important to record the `first` state.
|
||||
let _ = trackers.textures.change_replace(
|
||||
source_id.value,
|
||||
&source_id.ref_count,
|
||||
view_range.clone(),
|
||||
usage,
|
||||
ot.texture_id.value,
|
||||
&ot.texture_id.ref_count,
|
||||
ot.range.clone(),
|
||||
ot.previous_use.unwrap_or(ot.new_use),
|
||||
);
|
||||
if consistent_use.is_some() {
|
||||
if ot.previous_use.is_some() {
|
||||
// If we expect the texture to be transited to a new state by the
|
||||
// render pass configuration, make the tracker aware of that.
|
||||
let _ = trackers.textures.change_replace(
|
||||
source_id.value,
|
||||
&source_id.ref_count,
|
||||
view_range.clone(),
|
||||
TextureUse::OUTPUT_ATTACHMENT,
|
||||
ot.texture_id.value,
|
||||
&ot.texture_id.ref_count,
|
||||
ot.range.clone(),
|
||||
ot.new_use,
|
||||
);
|
||||
};
|
||||
}
|
||||
|
@ -615,8 +657,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
let mut render_pass_cache = device.render_passes.lock();
|
||||
let render_pass = match render_pass_cache.entry(rp_key.clone()) {
|
||||
Entry::Occupied(e) => e.into_mut(),
|
||||
Entry::Vacant(e) => {
|
||||
let color_ids = [
|
||||
Entry::Vacant(entry) => {
|
||||
let color_ids: [hal::pass::AttachmentRef; MAX_COLOR_TARGETS] = [
|
||||
(0, hal::image::Layout::ColorAttachmentOptimal),
|
||||
(1, hal::image::Layout::ColorAttachmentOptimal),
|
||||
(2, hal::image::Layout::ColorAttachmentOptimal),
|
||||
|
@ -629,49 +671,55 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
.iter()
|
||||
.any(|at| at.resolve_target.is_some())
|
||||
{
|
||||
for (i, at) in color_attachments.iter().enumerate() {
|
||||
if at.resolve_target.is_none() {
|
||||
resolve_ids.push((
|
||||
hal::pass::ATTACHMENT_UNUSED,
|
||||
hal::image::Layout::ColorAttachmentOptimal,
|
||||
));
|
||||
} else {
|
||||
let sample_count_check =
|
||||
view_guard[color_attachments[i].attachment].samples;
|
||||
assert!(
|
||||
sample_count_check > 1,
|
||||
"RenderPassColorAttachmentDescriptor with a resolve_target must have an attachment with sample_count > 1, had a sample count of {}",
|
||||
sample_count_check
|
||||
);
|
||||
resolve_ids.push((
|
||||
attachment_index,
|
||||
hal::image::Layout::ColorAttachmentOptimal,
|
||||
));
|
||||
attachment_index += 1;
|
||||
}
|
||||
for ((i, at), &(_, layout)) in color_attachments
|
||||
.iter()
|
||||
.enumerate()
|
||||
.zip(entry.key().resolves.iter())
|
||||
{
|
||||
let real_attachment_index = match at.resolve_target {
|
||||
Some(resolve_attachment) => {
|
||||
assert_ne!(
|
||||
view_guard[at.attachment].samples,
|
||||
1,
|
||||
"RenderPassColorAttachmentDescriptor's attachment with a resolve_target must be multi-sampled",
|
||||
);
|
||||
assert_eq!(
|
||||
view_guard[resolve_attachment].samples,
|
||||
1,
|
||||
"RenderPassColorAttachmentDescriptor's resolve_target must not be multi-sampled",
|
||||
);
|
||||
attachment_index + i
|
||||
}
|
||||
None => hal::pass::ATTACHMENT_UNUSED,
|
||||
};
|
||||
resolve_ids.push((real_attachment_index, layout));
|
||||
}
|
||||
attachment_index += color_attachments.len();
|
||||
}
|
||||
|
||||
let depth_id = (
|
||||
attachment_index,
|
||||
hal::image::Layout::DepthStencilAttachmentOptimal,
|
||||
);
|
||||
let depth_id = depth_stencil_attachment.map(|at| {
|
||||
let aspects = view_guard[at.attachment].range.aspects;
|
||||
let usage = if is_depth_stencil_read_only(at, aspects) {
|
||||
TextureUse::ATTACHMENT_READ
|
||||
} else {
|
||||
TextureUse::ATTACHMENT_WRITE
|
||||
};
|
||||
(attachment_index, conv::map_texture_state(usage, aspects).1)
|
||||
});
|
||||
|
||||
let subpass = hal::pass::SubpassDesc {
|
||||
colors: &color_ids[..color_attachments.len()],
|
||||
resolves: &resolve_ids,
|
||||
depth_stencil: depth_stencil_attachment.map(|_| &depth_id),
|
||||
depth_stencil: depth_id.as_ref(),
|
||||
inputs: &[],
|
||||
preserves: &[],
|
||||
};
|
||||
let all = entry.key().all().map(|(at, _)| at);
|
||||
|
||||
let pass = unsafe {
|
||||
device
|
||||
.raw
|
||||
.create_render_pass(e.key().all(), &[subpass], &[])
|
||||
}
|
||||
.unwrap();
|
||||
e.insert(pass)
|
||||
let pass =
|
||||
unsafe { device.raw.create_render_pass(all, iter::once(subpass), &[]) }
|
||||
.unwrap();
|
||||
entry.insert(pass)
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -744,13 +792,13 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
let clear_values = color_attachments
|
||||
.iter()
|
||||
.zip(&rp_key.colors)
|
||||
.flat_map(|(at, key)| {
|
||||
.flat_map(|(at, (rat, _layout))| {
|
||||
match at.load_op {
|
||||
LoadOp::Load => None,
|
||||
LoadOp::Clear => {
|
||||
use hal::format::ChannelType;
|
||||
//TODO: validate sign/unsign and normalized ranges of the color values
|
||||
let value = match key.format.unwrap().base_format().1 {
|
||||
let value = match rat.format.unwrap().base_format().1 {
|
||||
ChannelType::Unorm
|
||||
| ChannelType::Snorm
|
||||
| ChannelType::Ufloat
|
||||
|
@ -919,12 +967,17 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
|
||||
assert!(
|
||||
context.compatible(&pipeline.pass_context),
|
||||
"The render pipeline is not compatible with the pass!"
|
||||
"The render pipeline output formats do not match render pass attachment formats!"
|
||||
);
|
||||
assert_eq!(
|
||||
pipeline.sample_count, sample_count,
|
||||
"The render pipeline and renderpass have mismatching sample_count"
|
||||
);
|
||||
assert!(
|
||||
!is_ds_read_only || pipeline.flags.contains(PipelineFlags::DEPTH_STENCIL_READ_ONLY),
|
||||
"Pipeline {:?} is not compatible with the depth-stencil read-only render pass",
|
||||
pipeline_id
|
||||
);
|
||||
|
||||
state
|
||||
.blend_color
|
||||
|
|
|
@ -15,6 +15,7 @@ use crate::{
|
|||
use hal::command::CommandBuffer as _;
|
||||
use wgt::{BufferAddress, BufferUsage, Extent3d, Origin3d, TextureDataLayout, TextureUsage};
|
||||
|
||||
use std::convert::TryInto as _;
|
||||
use std::iter;
|
||||
|
||||
pub(crate) const BITS_PER_BYTE: u32 = 8;
|
||||
|
@ -82,6 +83,179 @@ impl TextureCopyView {
|
|||
}
|
||||
}
|
||||
|
||||
/// Function copied with minor modifications from webgpu standard https://gpuweb.github.io/gpuweb/#valid-texture-copy-range
|
||||
pub(crate) fn validate_linear_texture_data(
|
||||
layout: &TextureDataLayout,
|
||||
buffer_size: BufferAddress,
|
||||
bytes_per_texel: BufferAddress,
|
||||
copy_size: &Extent3d,
|
||||
) {
|
||||
// Convert all inputs to BufferAddress (u64) to prevent overflow issues
|
||||
let copy_width = copy_size.width as BufferAddress;
|
||||
let copy_height = copy_size.height as BufferAddress;
|
||||
let copy_depth = copy_size.depth as BufferAddress;
|
||||
|
||||
let offset = layout.offset;
|
||||
let rows_per_image = layout.rows_per_image as BufferAddress;
|
||||
let bytes_per_row = layout.bytes_per_row as BufferAddress;
|
||||
|
||||
// TODO: Once compressed textures are supported, these needs to be fixed
|
||||
let block_width: BufferAddress = 1;
|
||||
let block_height: BufferAddress = 1;
|
||||
let block_size = bytes_per_texel;
|
||||
|
||||
assert_eq!(
|
||||
copy_width % block_width,
|
||||
0,
|
||||
"Copy width {} must be a multiple of texture block width {}",
|
||||
copy_size.width,
|
||||
block_width,
|
||||
);
|
||||
assert_eq!(
|
||||
copy_height % block_height,
|
||||
0,
|
||||
"Copy height {} must be a multiple of texture block height {}",
|
||||
copy_size.height,
|
||||
block_height,
|
||||
);
|
||||
assert_eq!(
|
||||
rows_per_image % block_height,
|
||||
0,
|
||||
"Rows per image {} must be a multiple of image format block height {}",
|
||||
rows_per_image,
|
||||
block_height,
|
||||
);
|
||||
|
||||
let bytes_in_a_complete_row = block_size * copy_width / block_width;
|
||||
let required_bytes_in_copy = if copy_width == 0 || copy_height == 0 || copy_depth == 0 {
|
||||
0
|
||||
} else {
|
||||
let actual_rows_per_image = if rows_per_image == 0 {
|
||||
copy_height
|
||||
} else {
|
||||
rows_per_image
|
||||
};
|
||||
let texel_block_rows_per_image = actual_rows_per_image / block_height;
|
||||
let bytes_per_image = bytes_per_row * texel_block_rows_per_image;
|
||||
let bytes_in_last_slice =
|
||||
bytes_per_row * (copy_height / block_height - 1) + bytes_in_a_complete_row;
|
||||
bytes_per_image * (copy_depth - 1) + bytes_in_last_slice
|
||||
};
|
||||
|
||||
if rows_per_image != 0 {
|
||||
assert!(
|
||||
rows_per_image >= copy_height,
|
||||
"Rows per image {} must be greater or equal to copy_extent.height {}",
|
||||
rows_per_image,
|
||||
copy_height
|
||||
)
|
||||
}
|
||||
assert!(
|
||||
offset + required_bytes_in_copy <= buffer_size,
|
||||
"Texture copy using buffer indices {}..{} would overrun buffer of size {}",
|
||||
offset,
|
||||
offset + required_bytes_in_copy,
|
||||
buffer_size
|
||||
);
|
||||
assert_eq!(
|
||||
offset % block_size,
|
||||
0,
|
||||
"Buffer offset {} must be a multiple of image format block size {}",
|
||||
offset,
|
||||
block_size,
|
||||
);
|
||||
if copy_height > 1 {
|
||||
assert!(
|
||||
bytes_per_row >= bytes_in_a_complete_row,
|
||||
"Bytes per row {} must be at least the size of {} {}-byte texel blocks ({})",
|
||||
bytes_per_row,
|
||||
copy_width / block_width,
|
||||
block_size,
|
||||
bytes_in_a_complete_row,
|
||||
)
|
||||
}
|
||||
if copy_depth > 1 {
|
||||
assert_ne!(
|
||||
rows_per_image, 0,
|
||||
"Rows per image {} must be set to a non zero value when copy depth > 1 ({})",
|
||||
rows_per_image, copy_depth,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Function copied with minor modifications from webgpu standard https://gpuweb.github.io/gpuweb/#valid-texture-copy-range
|
||||
pub(crate) fn validate_texture_copy_range(
|
||||
texture_copy_view: &TextureCopyView,
|
||||
texture_dimension: hal::image::Kind,
|
||||
copy_size: &Extent3d,
|
||||
) {
|
||||
// TODO: Once compressed textures are supported, these needs to be fixed
|
||||
let block_width: u32 = 1;
|
||||
let block_height: u32 = 1;
|
||||
|
||||
let mut extent = texture_dimension.level_extent(
|
||||
texture_copy_view
|
||||
.mip_level
|
||||
.try_into()
|
||||
.expect("Mip level must be < 256"),
|
||||
);
|
||||
match texture_dimension {
|
||||
hal::image::Kind::D1(..) => {
|
||||
assert_eq!(
|
||||
(copy_size.height, copy_size.depth),
|
||||
(1, 1),
|
||||
"Copies with 1D textures must have height and depth of 1. Currently: ({}, {})",
|
||||
copy_size.height,
|
||||
copy_size.depth,
|
||||
);
|
||||
}
|
||||
hal::image::Kind::D2(_, _, array_layers, _) => {
|
||||
extent.depth = array_layers as u32;
|
||||
}
|
||||
hal::image::Kind::D3(..) => {}
|
||||
};
|
||||
|
||||
let x_copy_max = texture_copy_view.origin.x + copy_size.width;
|
||||
assert!(
|
||||
x_copy_max <= extent.width,
|
||||
"Texture copy with X range {}..{} overruns texture width {}",
|
||||
texture_copy_view.origin.x,
|
||||
x_copy_max,
|
||||
extent.width,
|
||||
);
|
||||
let y_copy_max = texture_copy_view.origin.y + copy_size.height;
|
||||
assert!(
|
||||
y_copy_max <= extent.height,
|
||||
"Texture copy with Y range {}..{} overruns texture height {}",
|
||||
texture_copy_view.origin.y,
|
||||
y_copy_max,
|
||||
extent.height,
|
||||
);
|
||||
let z_copy_max = texture_copy_view.origin.z + copy_size.depth;
|
||||
assert!(
|
||||
z_copy_max <= extent.depth,
|
||||
"Texture copy with Z range {}..{} overruns texture depth {}",
|
||||
texture_copy_view.origin.z,
|
||||
z_copy_max,
|
||||
extent.depth,
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
copy_size.width % block_width,
|
||||
0,
|
||||
"Copy width {} must be a multiple of texture block width {}",
|
||||
copy_size.width,
|
||||
block_width,
|
||||
);
|
||||
assert_eq!(
|
||||
copy_size.height % block_height,
|
||||
0,
|
||||
"Copy height {} must be a multiple of texture block height {}",
|
||||
copy_size.height,
|
||||
block_height,
|
||||
);
|
||||
}
|
||||
|
||||
impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
pub fn command_encoder_copy_buffer_to_buffer<B: GfxBackend>(
|
||||
&self,
|
||||
|
@ -114,6 +288,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
None => (),
|
||||
}
|
||||
|
||||
if size == 0 {
|
||||
log::trace!("Ignoring copy_buffer_to_buffer of size 0");
|
||||
return;
|
||||
}
|
||||
|
||||
let (src_buffer, src_pending) =
|
||||
cmb.trackers
|
||||
.buffers
|
||||
|
@ -136,6 +315,47 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
);
|
||||
barriers.extend(dst_pending.map(|pending| pending.into_hal(dst_buffer)));
|
||||
|
||||
assert_eq!(
|
||||
size % wgt::COPY_BUFFER_ALIGNMENT,
|
||||
0,
|
||||
"Buffer copy size {} must be a multiple of {}",
|
||||
size,
|
||||
wgt::COPY_BUFFER_ALIGNMENT,
|
||||
);
|
||||
assert_eq!(
|
||||
source_offset % wgt::COPY_BUFFER_ALIGNMENT,
|
||||
0,
|
||||
"Buffer source offset {} must be a multiple of {}",
|
||||
source_offset,
|
||||
wgt::COPY_BUFFER_ALIGNMENT,
|
||||
);
|
||||
assert_eq!(
|
||||
destination_offset % wgt::COPY_BUFFER_ALIGNMENT,
|
||||
0,
|
||||
"Buffer destination offset {} must be a multiple of {}",
|
||||
destination_offset,
|
||||
wgt::COPY_BUFFER_ALIGNMENT,
|
||||
);
|
||||
|
||||
let source_start_offset = source_offset;
|
||||
let source_end_offset = source_offset + size;
|
||||
let destination_start_offset = destination_offset;
|
||||
let destination_end_offset = destination_offset + size;
|
||||
assert!(
|
||||
source_end_offset <= src_buffer.size,
|
||||
"Buffer to buffer copy with indices {}..{} overruns source buffer of size {}",
|
||||
source_start_offset,
|
||||
source_end_offset,
|
||||
src_buffer.size
|
||||
);
|
||||
assert!(
|
||||
destination_end_offset <= dst_buffer.size,
|
||||
"Buffer to buffer copy with indices {}..{} overruns destination buffer of size {}",
|
||||
destination_start_offset,
|
||||
destination_end_offset,
|
||||
dst_buffer.size
|
||||
);
|
||||
|
||||
let region = hal::command::BufferCopy {
|
||||
src: source_offset,
|
||||
dst: destination_offset,
|
||||
|
@ -177,6 +397,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
None => (),
|
||||
}
|
||||
|
||||
if copy_size.width == 0 || copy_size.height == 0 || copy_size.width == 0 {
|
||||
log::trace!("Ignoring copy_buffer_to_texture of size 0");
|
||||
return;
|
||||
}
|
||||
|
||||
let (src_buffer, src_pending) = cmb.trackers.buffers.use_replace(
|
||||
&*buffer_guard,
|
||||
source.buffer,
|
||||
|
@ -207,6 +432,14 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
source.layout.bytes_per_row,
|
||||
wgt::COPY_BYTES_PER_ROW_ALIGNMENT
|
||||
);
|
||||
validate_texture_copy_range(destination, dst_texture.kind, copy_size);
|
||||
validate_linear_texture_data(
|
||||
&source.layout,
|
||||
src_buffer.size,
|
||||
bytes_per_texel as BufferAddress,
|
||||
copy_size,
|
||||
);
|
||||
|
||||
let buffer_width = source.layout.bytes_per_row / bytes_per_texel;
|
||||
let region = hal::command::BufferImageCopy {
|
||||
buffer_offset: source.layout.offset,
|
||||
|
@ -257,6 +490,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
None => (),
|
||||
}
|
||||
|
||||
if copy_size.width == 0 || copy_size.height == 0 || copy_size.width == 0 {
|
||||
log::trace!("Ignoring copy_texture_to_buffer of size 0");
|
||||
return;
|
||||
}
|
||||
|
||||
let (src_texture, src_pending) = cmb.trackers.textures.use_replace(
|
||||
&*texture_guard,
|
||||
source.texture,
|
||||
|
@ -295,6 +533,14 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
destination.layout.bytes_per_row,
|
||||
wgt::COPY_BYTES_PER_ROW_ALIGNMENT
|
||||
);
|
||||
validate_texture_copy_range(source, src_texture.kind, copy_size);
|
||||
validate_linear_texture_data(
|
||||
&destination.layout,
|
||||
dst_buffer.size,
|
||||
bytes_per_texel as BufferAddress,
|
||||
copy_size,
|
||||
);
|
||||
|
||||
let buffer_width = destination.layout.bytes_per_row / bytes_per_texel;
|
||||
let region = hal::command::BufferImageCopy {
|
||||
buffer_offset: destination.layout.offset,
|
||||
|
@ -351,6 +597,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
None => (),
|
||||
}
|
||||
|
||||
if copy_size.width == 0 || copy_size.height == 0 || copy_size.width == 0 {
|
||||
log::trace!("Ignoring copy_texture_to_texture of size 0");
|
||||
return;
|
||||
}
|
||||
|
||||
let (src_texture, src_pending) = cmb.trackers.textures.use_replace(
|
||||
&*texture_guard,
|
||||
source.texture,
|
||||
|
@ -378,6 +629,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
barriers.extend(dst_pending.map(|pending| pending.into_hal(dst_texture)));
|
||||
|
||||
assert_eq!(src_texture.dimension, dst_texture.dimension);
|
||||
validate_texture_copy_range(source, src_texture.kind, copy_size);
|
||||
validate_texture_copy_range(destination, dst_texture.kind, copy_size);
|
||||
|
||||
let region = hal::command::ImageCopy {
|
||||
src_subresource: src_layers,
|
||||
src_offset,
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
use crate::{binding_model, resource, PrivateFeatures};
|
||||
use crate::{resource, PrivateFeatures};
|
||||
|
||||
pub fn map_buffer_usage(usage: wgt::BufferUsage) -> (hal::buffer::Usage, hal::memory::Properties) {
|
||||
use hal::buffer::Usage as U;
|
||||
|
@ -75,40 +75,36 @@ pub fn map_texture_usage(
|
|||
value
|
||||
}
|
||||
|
||||
pub fn map_binding_type(binding: &binding_model::BindGroupLayoutEntry) -> hal::pso::DescriptorType {
|
||||
use crate::binding_model::BindingType as Bt;
|
||||
pub fn map_binding_type(binding: &wgt::BindGroupLayoutEntry) -> hal::pso::DescriptorType {
|
||||
use hal::pso;
|
||||
use wgt::BindingType as Bt;
|
||||
match binding.ty {
|
||||
Bt::UniformBuffer => pso::DescriptorType::Buffer {
|
||||
Bt::UniformBuffer { dynamic } => pso::DescriptorType::Buffer {
|
||||
ty: pso::BufferDescriptorType::Uniform,
|
||||
format: pso::BufferDescriptorFormat::Structured {
|
||||
dynamic_offset: binding.has_dynamic_offset,
|
||||
dynamic_offset: dynamic,
|
||||
},
|
||||
},
|
||||
Bt::StorageBuffer => pso::DescriptorType::Buffer {
|
||||
ty: pso::BufferDescriptorType::Storage { read_only: false },
|
||||
Bt::StorageBuffer { readonly, dynamic } => pso::DescriptorType::Buffer {
|
||||
ty: pso::BufferDescriptorType::Storage {
|
||||
read_only: readonly,
|
||||
},
|
||||
format: pso::BufferDescriptorFormat::Structured {
|
||||
dynamic_offset: binding.has_dynamic_offset,
|
||||
dynamic_offset: dynamic,
|
||||
},
|
||||
},
|
||||
Bt::ReadonlyStorageBuffer => pso::DescriptorType::Buffer {
|
||||
ty: pso::BufferDescriptorType::Storage { read_only: true },
|
||||
format: pso::BufferDescriptorFormat::Structured {
|
||||
dynamic_offset: binding.has_dynamic_offset,
|
||||
},
|
||||
},
|
||||
Bt::Sampler | Bt::ComparisonSampler => pso::DescriptorType::Sampler,
|
||||
Bt::SampledTexture => pso::DescriptorType::Image {
|
||||
Bt::Sampler { .. } => pso::DescriptorType::Sampler,
|
||||
Bt::SampledTexture { .. } => pso::DescriptorType::Image {
|
||||
ty: pso::ImageDescriptorType::Sampled {
|
||||
with_sampler: false,
|
||||
},
|
||||
},
|
||||
Bt::ReadonlyStorageTexture => pso::DescriptorType::Image {
|
||||
ty: pso::ImageDescriptorType::Storage { read_only: true },
|
||||
},
|
||||
Bt::WriteonlyStorageTexture => pso::DescriptorType::Image {
|
||||
ty: pso::ImageDescriptorType::Storage { read_only: false },
|
||||
Bt::StorageTexture { readonly, .. } => pso::DescriptorType::Image {
|
||||
ty: pso::ImageDescriptorType::Storage {
|
||||
read_only: readonly,
|
||||
},
|
||||
},
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -371,14 +367,14 @@ pub(crate) fn map_texture_format(
|
|||
// Depth and stencil formats
|
||||
Tf::Depth32Float => H::D32Sfloat,
|
||||
Tf::Depth24Plus => {
|
||||
if private_features.supports_texture_d24_s8 {
|
||||
if private_features.texture_d24_s8 {
|
||||
H::D24UnormS8Uint
|
||||
} else {
|
||||
H::D32Sfloat
|
||||
}
|
||||
}
|
||||
Tf::Depth24PlusStencil8 => {
|
||||
if private_features.supports_texture_d24_s8 {
|
||||
if private_features.texture_d24_s8 {
|
||||
H::D24UnormS8Uint
|
||||
} else {
|
||||
H::D32SfloatS8Uint
|
||||
|
@ -530,8 +526,9 @@ pub(crate) fn map_texture_state(
|
|||
W::COPY_SRC => L::TransferSrcOptimal,
|
||||
W::COPY_DST => L::TransferDstOptimal,
|
||||
W::SAMPLED => L::ShaderReadOnlyOptimal,
|
||||
W::OUTPUT_ATTACHMENT if is_color => L::ColorAttachmentOptimal,
|
||||
W::OUTPUT_ATTACHMENT => L::DepthStencilAttachmentOptimal, //TODO: read-only depth/stencil
|
||||
W::ATTACHMENT_READ | W::ATTACHMENT_WRITE if is_color => L::ColorAttachmentOptimal,
|
||||
W::ATTACHMENT_READ => L::DepthStencilReadOnlyOptimal,
|
||||
W::ATTACHMENT_WRITE => L::DepthStencilAttachmentOptimal,
|
||||
_ => L::General,
|
||||
};
|
||||
|
||||
|
@ -545,8 +542,14 @@ pub(crate) fn map_texture_state(
|
|||
if usage.contains(W::SAMPLED) {
|
||||
access |= A::SHADER_READ;
|
||||
}
|
||||
if usage.contains(W::OUTPUT_ATTACHMENT) {
|
||||
//TODO: read-only attachments
|
||||
if usage.contains(W::ATTACHMENT_READ) {
|
||||
access |= if is_color {
|
||||
A::COLOR_ATTACHMENT_READ
|
||||
} else {
|
||||
A::DEPTH_STENCIL_ATTACHMENT_READ
|
||||
};
|
||||
}
|
||||
if usage.contains(W::ATTACHMENT_WRITE) {
|
||||
access |= if is_color {
|
||||
A::COLOR_ATTACHMENT_WRITE
|
||||
} else {
|
||||
|
|
|
@ -657,19 +657,22 @@ impl<B: GfxBackend> LifetimeTracker<B> {
|
|||
_ => panic!("No pending mapping."),
|
||||
};
|
||||
log::debug!("Buffer {:?} map state -> Active", buffer_id);
|
||||
let host = match mapping.op {
|
||||
resource::BufferMapOperation::Read { .. } => super::HostMap::Read,
|
||||
resource::BufferMapOperation::Write { .. } => super::HostMap::Write,
|
||||
let host = mapping.op.host;
|
||||
let status = match super::map_buffer(raw, buffer, mapping.sub_range.clone(), host) {
|
||||
Ok(ptr) => {
|
||||
buffer.map_state = resource::BufferMapState::Active {
|
||||
ptr,
|
||||
sub_range: mapping.sub_range,
|
||||
host,
|
||||
};
|
||||
resource::BufferMapAsyncStatus::Success
|
||||
}
|
||||
Err(e) => {
|
||||
log::error!("Mapping failed {:?}", e);
|
||||
resource::BufferMapAsyncStatus::Error
|
||||
}
|
||||
};
|
||||
let result = super::map_buffer(raw, buffer, mapping.sub_range.clone(), host);
|
||||
if let Ok(ptr) = result {
|
||||
buffer.map_state = resource::BufferMapState::Active {
|
||||
ptr,
|
||||
sub_range: mapping.sub_range,
|
||||
host,
|
||||
};
|
||||
}
|
||||
pending_callbacks.push((mapping.op, result));
|
||||
pending_callbacks.push((mapping.op, status));
|
||||
}
|
||||
}
|
||||
pending_callbacks
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -54,6 +54,10 @@ impl<B: hal::Backend> PendingWrites<B> {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn consume_temp(&mut self, buffer: B::Buffer, memory: MemoryBlock<B>) {
|
||||
self.temp_buffers.push((buffer, memory));
|
||||
}
|
||||
|
||||
fn consume(&mut self, stage: StagingData<B>) {
|
||||
self.temp_buffers.push((stage.buffer, stage.memory));
|
||||
self.command_buffer = Some(stage.comb);
|
||||
|
@ -61,6 +65,17 @@ impl<B: hal::Backend> PendingWrites<B> {
|
|||
}
|
||||
|
||||
impl<B: hal::Backend> super::Device<B> {
|
||||
pub fn borrow_pending_writes(&mut self) -> &mut B::CommandBuffer {
|
||||
if self.pending_writes.command_buffer.is_none() {
|
||||
let mut comb = self.com_allocator.allocate_internal();
|
||||
unsafe {
|
||||
comb.begin_primary(hal::command::CommandBufferFlags::ONE_TIME_SUBMIT);
|
||||
}
|
||||
self.pending_writes.command_buffer = Some(comb);
|
||||
}
|
||||
self.pending_writes.command_buffer.as_mut().unwrap()
|
||||
}
|
||||
|
||||
fn prepare_stage(&mut self, size: wgt::BufferAddress) -> StagingData<B> {
|
||||
let mut buffer = unsafe {
|
||||
self.raw
|
||||
|
@ -138,7 +153,13 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
None => {}
|
||||
}
|
||||
|
||||
let mut stage = device.prepare_stage(data.len() as wgt::BufferAddress);
|
||||
let data_size = data.len() as wgt::BufferAddress;
|
||||
if data_size == 0 {
|
||||
log::trace!("Ignoring write_buffer of size 0");
|
||||
return;
|
||||
}
|
||||
|
||||
let mut stage = device.prepare_stage(data_size);
|
||||
{
|
||||
let mut mapped = stage
|
||||
.memory
|
||||
|
@ -163,6 +184,30 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
let last_submit_index = device.life_guard.submission_index.load(Ordering::Relaxed);
|
||||
dst.life_guard.use_at(last_submit_index + 1);
|
||||
|
||||
assert_eq!(
|
||||
data_size % wgt::COPY_BUFFER_ALIGNMENT,
|
||||
0,
|
||||
"Buffer write size {} must be a multiple of {}",
|
||||
buffer_offset,
|
||||
wgt::COPY_BUFFER_ALIGNMENT,
|
||||
);
|
||||
assert_eq!(
|
||||
buffer_offset % wgt::COPY_BUFFER_ALIGNMENT,
|
||||
0,
|
||||
"Buffer offset {} must be a multiple of {}",
|
||||
buffer_offset,
|
||||
wgt::COPY_BUFFER_ALIGNMENT,
|
||||
);
|
||||
let destination_start_offset = buffer_offset;
|
||||
let destination_end_offset = buffer_offset + data_size;
|
||||
assert!(
|
||||
destination_end_offset <= dst.size,
|
||||
"Write buffer with indices {}..{} overruns destination buffer of size {}",
|
||||
destination_start_offset,
|
||||
destination_end_offset,
|
||||
dst.size
|
||||
);
|
||||
|
||||
let region = hal::command::BufferCopy {
|
||||
src: 0,
|
||||
dst: buffer_offset,
|
||||
|
@ -218,11 +263,22 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
None => {}
|
||||
}
|
||||
|
||||
if size.width == 0 || size.height == 0 || size.width == 0 {
|
||||
log::trace!("Ignoring write_texture of size 0");
|
||||
return;
|
||||
}
|
||||
|
||||
let texture_format = texture_guard[destination.texture].format;
|
||||
let bytes_per_texel = conv::map_texture_format(texture_format, device.private_features)
|
||||
.surface_desc()
|
||||
.bits as u32
|
||||
/ BITS_PER_BYTE;
|
||||
crate::command::validate_linear_texture_data(
|
||||
data_layout,
|
||||
data.len() as wgt::BufferAddress,
|
||||
bytes_per_texel as wgt::BufferAddress,
|
||||
size,
|
||||
);
|
||||
|
||||
let bytes_per_row_alignment = get_lowest_common_denom(
|
||||
device.hal_limits.optimal_buffer_copy_pitch_alignment as u32,
|
||||
|
@ -271,6 +327,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
"Write texture usage {:?} must contain flag COPY_DST",
|
||||
dst.usage
|
||||
);
|
||||
crate::command::validate_texture_copy_range(destination, dst.kind, size);
|
||||
|
||||
let last_submit_index = device.life_guard.submission_index.load(Ordering::Relaxed);
|
||||
dst.life_guard.use_at(last_submit_index + 1);
|
||||
|
|
|
@ -27,6 +27,7 @@ pub enum BindingResource {
|
|||
},
|
||||
Sampler(id::SamplerId),
|
||||
TextureView(id::TextureViewId),
|
||||
TextureViewArray(Vec<id::TextureViewId>),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
|
@ -132,7 +133,7 @@ pub enum Action {
|
|||
CreateBindGroupLayout {
|
||||
id: id::BindGroupLayoutId,
|
||||
label: String,
|
||||
entries: Vec<crate::binding_model::BindGroupLayoutEntry>,
|
||||
entries: Vec<wgt::BindGroupLayoutEntry>,
|
||||
},
|
||||
DestroyBindGroupLayout(id::BindGroupLayoutId),
|
||||
CreatePipelineLayout {
|
||||
|
|
|
@ -177,6 +177,7 @@ impl<B: hal::Backend> Access<PipelineLayout<B>> for Device<B> {}
|
|||
impl<B: hal::Backend> Access<PipelineLayout<B>> for CommandBuffer<B> {}
|
||||
impl<B: hal::Backend> Access<BindGroupLayout<B>> for Root {}
|
||||
impl<B: hal::Backend> Access<BindGroupLayout<B>> for Device<B> {}
|
||||
impl<B: hal::Backend> Access<BindGroupLayout<B>> for PipelineLayout<B> {}
|
||||
impl<B: hal::Backend> Access<BindGroup<B>> for Root {}
|
||||
impl<B: hal::Backend> Access<BindGroup<B>> for Device<B> {}
|
||||
impl<B: hal::Backend> Access<BindGroup<B>> for BindGroupLayout<B> {}
|
||||
|
@ -191,7 +192,7 @@ impl<B: hal::Backend> Access<RenderPipeline<B>> for Device<B> {}
|
|||
impl<B: hal::Backend> Access<RenderPipeline<B>> for BindGroup<B> {}
|
||||
impl<B: hal::Backend> Access<RenderPipeline<B>> for ComputePipeline<B> {}
|
||||
impl<B: hal::Backend> Access<ShaderModule<B>> for Device<B> {}
|
||||
impl<B: hal::Backend> Access<ShaderModule<B>> for PipelineLayout<B> {}
|
||||
impl<B: hal::Backend> Access<ShaderModule<B>> for BindGroupLayout<B> {}
|
||||
impl<B: hal::Backend> Access<Buffer<B>> for Root {}
|
||||
impl<B: hal::Backend> Access<Buffer<B>> for Device<B> {}
|
||||
impl<B: hal::Backend> Access<Buffer<B>> for BindGroupLayout<B> {}
|
||||
|
|
|
@ -7,7 +7,7 @@ use crate::{
|
|||
device::Device,
|
||||
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Input, Token},
|
||||
id::{AdapterId, DeviceId, SurfaceId},
|
||||
power, LifeGuard, Stored, MAX_BIND_GROUPS,
|
||||
power, LifeGuard, PrivateFeatures, Stored, MAX_BIND_GROUPS,
|
||||
};
|
||||
|
||||
use wgt::{Backend, BackendBit, DeviceDescriptor, PowerPreference, BIND_BUFFER_ALIGNMENT};
|
||||
|
@ -118,13 +118,42 @@ pub struct Surface {
|
|||
#[derive(Debug)]
|
||||
pub struct Adapter<B: hal::Backend> {
|
||||
pub(crate) raw: hal::adapter::Adapter<B>,
|
||||
extensions: wgt::Extensions,
|
||||
limits: wgt::Limits,
|
||||
unsafe_extensions: wgt::UnsafeExtensions,
|
||||
life_guard: LifeGuard,
|
||||
}
|
||||
|
||||
impl<B: hal::Backend> Adapter<B> {
|
||||
fn new(raw: hal::adapter::Adapter<B>) -> Self {
|
||||
fn new(raw: hal::adapter::Adapter<B>, unsafe_extensions: wgt::UnsafeExtensions) -> Self {
|
||||
let adapter_features = raw.physical_device.features();
|
||||
|
||||
let mut extensions = wgt::Extensions::default() | wgt::Extensions::MAPPABLE_PRIMARY_BUFFERS;
|
||||
extensions.set(
|
||||
wgt::Extensions::ANISOTROPIC_FILTERING,
|
||||
adapter_features.contains(hal::Features::SAMPLER_ANISOTROPY),
|
||||
);
|
||||
extensions.set(
|
||||
wgt::Extensions::TEXTURE_BINDING_ARRAY,
|
||||
adapter_features.contains(hal::Features::TEXTURE_DESCRIPTOR_ARRAY),
|
||||
);
|
||||
if unsafe_extensions.allowed() {
|
||||
// Unsafe extensions go here
|
||||
}
|
||||
|
||||
let adapter_limits = raw.physical_device.limits();
|
||||
|
||||
let limits = wgt::Limits {
|
||||
max_bind_groups: (adapter_limits.max_bound_descriptor_sets as u32)
|
||||
.min(MAX_BIND_GROUPS as u32),
|
||||
_non_exhaustive: unsafe { wgt::NonExhaustive::new() },
|
||||
};
|
||||
|
||||
Adapter {
|
||||
raw,
|
||||
extensions,
|
||||
limits,
|
||||
unsafe_extensions,
|
||||
life_guard: LifeGuard::new(),
|
||||
}
|
||||
}
|
||||
|
@ -251,7 +280,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
self.surfaces.register_identity(id_in, surface, &mut token)
|
||||
}
|
||||
|
||||
pub fn enumerate_adapters(&self, inputs: AdapterInputs<Input<G, AdapterId>>) -> Vec<AdapterId> {
|
||||
pub fn enumerate_adapters(
|
||||
&self,
|
||||
unsafe_extensions: wgt::UnsafeExtensions,
|
||||
inputs: AdapterInputs<Input<G, AdapterId>>,
|
||||
) -> Vec<AdapterId> {
|
||||
let instance = &self.instance;
|
||||
let mut token = Token::root();
|
||||
let mut adapters = Vec::new();
|
||||
|
@ -264,7 +297,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
if let Some(ref inst) = instance.vulkan {
|
||||
if let Some(id_vulkan) = inputs.find(Backend::Vulkan) {
|
||||
for raw in inst.enumerate_adapters() {
|
||||
let adapter = Adapter::new(raw);
|
||||
let adapter = Adapter::new(raw, unsafe_extensions);
|
||||
log::info!("Adapter Vulkan {:?}", adapter.raw.info);
|
||||
adapters.push(backend::Vulkan::hub(self).adapters.register_identity(
|
||||
id_vulkan.clone(),
|
||||
|
@ -279,7 +312,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
{
|
||||
if let Some(id_metal) = inputs.find(Backend::Metal) {
|
||||
for raw in instance.metal.enumerate_adapters() {
|
||||
let adapter = Adapter::new(raw);
|
||||
let adapter = Adapter::new(raw, unsafe_extensions);
|
||||
log::info!("Adapter Metal {:?}", adapter.raw.info);
|
||||
adapters.push(backend::Metal::hub(self).adapters.register_identity(
|
||||
id_metal.clone(),
|
||||
|
@ -294,7 +327,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
if let Some(ref inst) = instance.dx12 {
|
||||
if let Some(id_dx12) = inputs.find(Backend::Dx12) {
|
||||
for raw in inst.enumerate_adapters() {
|
||||
let adapter = Adapter::new(raw);
|
||||
let adapter = Adapter::new(raw, unsafe_extensions);
|
||||
log::info!("Adapter Dx12 {:?}", adapter.raw.info);
|
||||
adapters.push(backend::Dx12::hub(self).adapters.register_identity(
|
||||
id_dx12.clone(),
|
||||
|
@ -307,7 +340,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
|
||||
if let Some(id_dx11) = inputs.find(Backend::Dx11) {
|
||||
for raw in instance.dx11.enumerate_adapters() {
|
||||
let adapter = Adapter::new(raw);
|
||||
let adapter = Adapter::new(raw, unsafe_extensions);
|
||||
log::info!("Adapter Dx11 {:?}", adapter.raw.info);
|
||||
adapters.push(backend::Dx11::hub(self).adapters.register_identity(
|
||||
id_dx11.clone(),
|
||||
|
@ -324,6 +357,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
pub fn pick_adapter(
|
||||
&self,
|
||||
desc: &RequestAdapterOptions,
|
||||
unsafe_extensions: wgt::UnsafeExtensions,
|
||||
inputs: AdapterInputs<Input<G, AdapterId>>,
|
||||
) -> Option<AdapterId> {
|
||||
let instance = &self.instance;
|
||||
|
@ -462,7 +496,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
))]
|
||||
{
|
||||
if selected < adapters_vk.len() {
|
||||
let adapter = Adapter::new(adapters_vk.swap_remove(selected));
|
||||
let adapter = Adapter::new(adapters_vk.swap_remove(selected), unsafe_extensions);
|
||||
log::info!("Adapter Vulkan {:?}", adapter.raw.info);
|
||||
let id = backend::Vulkan::hub(self).adapters.register_identity(
|
||||
id_vulkan.unwrap(),
|
||||
|
@ -476,7 +510,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
#[cfg(any(target_os = "ios", target_os = "macos"))]
|
||||
{
|
||||
if selected < adapters_mtl.len() {
|
||||
let adapter = Adapter::new(adapters_mtl.swap_remove(selected));
|
||||
let adapter = Adapter::new(adapters_mtl.swap_remove(selected), unsafe_extensions);
|
||||
log::info!("Adapter Metal {:?}", adapter.raw.info);
|
||||
let id = backend::Metal::hub(self).adapters.register_identity(
|
||||
id_metal.unwrap(),
|
||||
|
@ -490,7 +524,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
#[cfg(windows)]
|
||||
{
|
||||
if selected < adapters_dx12.len() {
|
||||
let adapter = Adapter::new(adapters_dx12.swap_remove(selected));
|
||||
let adapter = Adapter::new(adapters_dx12.swap_remove(selected), unsafe_extensions);
|
||||
log::info!("Adapter Dx12 {:?}", adapter.raw.info);
|
||||
let id = backend::Dx12::hub(self).adapters.register_identity(
|
||||
id_dx12.unwrap(),
|
||||
|
@ -501,7 +535,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
}
|
||||
selected -= adapters_dx12.len();
|
||||
if selected < adapters_dx11.len() {
|
||||
let adapter = Adapter::new(adapters_dx11.swap_remove(selected));
|
||||
let adapter = Adapter::new(adapters_dx11.swap_remove(selected), unsafe_extensions);
|
||||
log::info!("Adapter Dx11 {:?}", adapter.raw.info);
|
||||
let id = backend::Dx11::hub(self).adapters.register_identity(
|
||||
id_dx11.unwrap(),
|
||||
|
@ -532,11 +566,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
let (adapter_guard, _) = hub.adapters.read(&mut token);
|
||||
let adapter = &adapter_guard[adapter_id];
|
||||
|
||||
let features = adapter.raw.physical_device.features();
|
||||
|
||||
wgt::Extensions {
|
||||
anisotropic_filtering: features.contains(hal::Features::SAMPLER_ANISOTROPY),
|
||||
}
|
||||
adapter.extensions
|
||||
}
|
||||
|
||||
pub fn adapter_limits<B: GfxBackend>(&self, adapter_id: AdapterId) -> wgt::Limits {
|
||||
|
@ -545,11 +575,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
let (adapter_guard, _) = hub.adapters.read(&mut token);
|
||||
let adapter = &adapter_guard[adapter_id];
|
||||
|
||||
let limits = adapter.raw.physical_device.limits();
|
||||
|
||||
wgt::Limits {
|
||||
max_bind_groups: (limits.max_bound_descriptor_sets as u32).min(MAX_BIND_GROUPS as u32),
|
||||
}
|
||||
adapter.limits.clone()
|
||||
}
|
||||
|
||||
pub fn adapter_destroy<B: GfxBackend>(&self, adapter_id: AdapterId) {
|
||||
|
@ -600,14 +626,48 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
);
|
||||
}
|
||||
|
||||
// Verify all extensions were exposed by the adapter
|
||||
if !adapter.unsafe_extensions.allowed() {
|
||||
assert!(
|
||||
!desc.extensions.intersects(wgt::Extensions::ALL_UNSAFE),
|
||||
"Cannot enable unsafe extensions without passing UnsafeExtensions::allow() when getting an adapter. Enabled unsafe extensions: {:?}",
|
||||
desc.extensions & wgt::Extensions::ALL_UNSAFE
|
||||
)
|
||||
}
|
||||
assert!(
|
||||
adapter.extensions.contains(desc.extensions),
|
||||
"Cannot enable extensions that adapter doesn't support. Unsupported extensions: {:?}",
|
||||
desc.extensions - adapter.extensions
|
||||
);
|
||||
|
||||
// Check features needed by extensions
|
||||
if desc.extensions.anisotropic_filtering {
|
||||
if desc
|
||||
.extensions
|
||||
.contains(wgt::Extensions::ANISOTROPIC_FILTERING)
|
||||
{
|
||||
assert!(
|
||||
available_features.contains(hal::Features::SAMPLER_ANISOTROPY),
|
||||
"Missing feature SAMPLER_ANISOTROPY for anisotropic filtering extension"
|
||||
);
|
||||
enabled_features |= hal::Features::SAMPLER_ANISOTROPY;
|
||||
}
|
||||
if desc
|
||||
.extensions
|
||||
.contains(wgt::Extensions::MAPPABLE_PRIMARY_BUFFERS)
|
||||
&& adapter.raw.info.device_type == hal::adapter::DeviceType::DiscreteGpu
|
||||
{
|
||||
log::warn!("Extension MAPPABLE_PRIMARY_BUFFERS enabled on a discrete gpu. This is a massive performance footgun and likely not what you wanted");
|
||||
}
|
||||
if desc
|
||||
.extensions
|
||||
.contains(wgt::Extensions::TEXTURE_BINDING_ARRAY)
|
||||
{
|
||||
assert!(
|
||||
available_features.contains(hal::Features::TEXTURE_DESCRIPTOR_ARRAY),
|
||||
"Missing feature TEXTURE_DESCRIPTOR_ARRAY for texture binding array extension"
|
||||
);
|
||||
enabled_features |= hal::Features::TEXTURE_DESCRIPTOR_ARRAY;
|
||||
}
|
||||
|
||||
let family = adapter
|
||||
.raw
|
||||
|
@ -638,10 +698,16 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
}
|
||||
|
||||
let mem_props = phd.memory_properties();
|
||||
let supports_texture_d24_s8 = phd
|
||||
.format_properties(Some(hal::format::Format::D24UnormS8Uint))
|
||||
.optimal_tiling
|
||||
.contains(hal::format::ImageFeature::DEPTH_STENCIL_ATTACHMENT);
|
||||
if !desc.shader_validation {
|
||||
log::warn!("Shader validation is disabled");
|
||||
}
|
||||
let private_features = PrivateFeatures {
|
||||
shader_validation: desc.shader_validation,
|
||||
texture_d24_s8: phd
|
||||
.format_properties(Some(hal::format::Format::D24UnormS8Uint))
|
||||
.optimal_tiling
|
||||
.contains(hal::format::ImageFeature::DEPTH_STENCIL_ATTACHMENT),
|
||||
};
|
||||
|
||||
Device::new(
|
||||
gpu.device,
|
||||
|
@ -652,7 +718,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||
gpu.queue_groups.swap_remove(0),
|
||||
mem_props,
|
||||
limits,
|
||||
supports_texture_d24_s8,
|
||||
private_features,
|
||||
desc,
|
||||
trace_path,
|
||||
)
|
||||
|
|
|
@ -36,6 +36,7 @@ pub mod power;
|
|||
pub mod resource;
|
||||
pub mod swap_chain;
|
||||
mod track;
|
||||
mod validation;
|
||||
|
||||
pub use hal::pso::read_spirv;
|
||||
|
||||
|
@ -171,7 +172,8 @@ pub struct U32Array {
|
|||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
struct PrivateFeatures {
|
||||
pub supports_texture_d24_s8: bool,
|
||||
shader_validation: bool,
|
||||
texture_d24_s8: bool,
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
use crate::{
|
||||
device::RenderPassContext,
|
||||
id::{DeviceId, PipelineLayoutId, ShaderModuleId},
|
||||
validation::StageError,
|
||||
LifeGuard, RawString, RefCount, Stored, U32Array,
|
||||
};
|
||||
use std::borrow::Borrow;
|
||||
|
@ -57,6 +58,11 @@ pub struct ComputePipelineDescriptor {
|
|||
pub compute_stage: ProgrammableStageDescriptor,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum ComputePipelineError {
|
||||
Stage(StageError),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ComputePipeline<B: hal::Backend> {
|
||||
pub(crate) raw: B::ComputePipeline,
|
||||
|
@ -88,11 +94,28 @@ pub struct RenderPipelineDescriptor {
|
|||
pub alpha_to_coverage_enabled: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum RenderPipelineError {
|
||||
InvalidVertexAttributeOffset {
|
||||
location: wgt::ShaderLocation,
|
||||
offset: BufferAddress,
|
||||
},
|
||||
Stage {
|
||||
flag: wgt::ShaderStage,
|
||||
error: StageError,
|
||||
},
|
||||
IncompatibleOutputFormat {
|
||||
index: u8,
|
||||
},
|
||||
InvalidSampleCount(u32),
|
||||
}
|
||||
|
||||
bitflags::bitflags! {
|
||||
#[repr(transparent)]
|
||||
pub struct PipelineFlags: u32 {
|
||||
const BLEND_COLOR = 1;
|
||||
const STENCIL_REFERENCE = 2;
|
||||
const DEPTH_STENCIL_READ_ONLY = 4;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ use crate::{
|
|||
use gfx_memory::MemoryBlock;
|
||||
use wgt::{BufferAddress, BufferUsage, TextureFormat, TextureUsage};
|
||||
|
||||
use std::{borrow::Borrow, fmt};
|
||||
use std::{borrow::Borrow, ptr::NonNull};
|
||||
|
||||
bitflags::bitflags! {
|
||||
/// The internal enum mirrored from `BufferUsage`. The values don't have to match!
|
||||
|
@ -47,17 +47,18 @@ bitflags::bitflags! {
|
|||
const COPY_SRC = 1;
|
||||
const COPY_DST = 2;
|
||||
const SAMPLED = 4;
|
||||
const OUTPUT_ATTACHMENT = 8;
|
||||
const STORAGE_LOAD = 16;
|
||||
const STORAGE_STORE = 32;
|
||||
const ATTACHMENT_READ = 8;
|
||||
const ATTACHMENT_WRITE = 16;
|
||||
const STORAGE_LOAD = 32;
|
||||
const STORAGE_STORE = 48;
|
||||
/// The combination of all read-only usages.
|
||||
const READ_ALL = Self::COPY_SRC.bits | Self::SAMPLED.bits | Self::STORAGE_LOAD.bits;
|
||||
const READ_ALL = Self::COPY_SRC.bits | Self::SAMPLED.bits | Self::ATTACHMENT_READ.bits | Self::STORAGE_LOAD.bits;
|
||||
/// The combination of all write-only and read-write usages.
|
||||
const WRITE_ALL = Self::COPY_DST.bits | Self::OUTPUT_ATTACHMENT.bits | Self::STORAGE_STORE.bits;
|
||||
const WRITE_ALL = Self::COPY_DST.bits | Self::ATTACHMENT_WRITE.bits | Self::STORAGE_STORE.bits;
|
||||
/// The combination of all usages that the are guaranteed to be be ordered by the hardware.
|
||||
/// If a usage is not ordered, then even if it doesn't change between draw calls, there
|
||||
/// still need to be pipeline barriers inserted for synchronization.
|
||||
const ORDERED = Self::READ_ALL.bits | Self::COPY_DST.bits | Self::OUTPUT_ATTACHMENT.bits;
|
||||
const ORDERED = Self::READ_ALL.bits | Self::COPY_DST.bits | Self::ATTACHMENT_WRITE.bits;
|
||||
const UNINITIALIZED = 0xFFFF;
|
||||
}
|
||||
}
|
||||
|
@ -72,12 +73,18 @@ pub enum BufferMapAsyncStatus {
|
|||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum BufferMapState {
|
||||
pub enum BufferMapState<B: hal::Backend> {
|
||||
/// Mapped at creation.
|
||||
Init {
|
||||
ptr: NonNull<u8>,
|
||||
stage_buffer: B::Buffer,
|
||||
stage_memory: MemoryBlock<B>,
|
||||
},
|
||||
/// Waiting for GPU to be done before mapping
|
||||
Waiting(BufferPendingMapping),
|
||||
/// Mapped
|
||||
Active {
|
||||
ptr: *mut u8,
|
||||
ptr: NonNull<u8>,
|
||||
sub_range: hal::buffer::SubRange,
|
||||
host: crate::device::HostMap,
|
||||
},
|
||||
|
@ -85,49 +92,28 @@ pub enum BufferMapState {
|
|||
Idle,
|
||||
}
|
||||
|
||||
unsafe impl Send for BufferMapState {}
|
||||
unsafe impl Sync for BufferMapState {}
|
||||
unsafe impl<B: hal::Backend> Send for BufferMapState<B> {}
|
||||
unsafe impl<B: hal::Backend> Sync for BufferMapState<B> {}
|
||||
|
||||
pub enum BufferMapOperation {
|
||||
Read {
|
||||
callback: crate::device::BufferMapReadCallback,
|
||||
userdata: *mut u8,
|
||||
},
|
||||
Write {
|
||||
callback: crate::device::BufferMapWriteCallback,
|
||||
userdata: *mut u8,
|
||||
},
|
||||
pub type BufferMapCallback = unsafe extern "C" fn(status: BufferMapAsyncStatus, userdata: *mut u8);
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Debug)]
|
||||
pub struct BufferMapOperation {
|
||||
pub host: crate::device::HostMap,
|
||||
pub callback: BufferMapCallback,
|
||||
pub user_data: *mut u8,
|
||||
}
|
||||
|
||||
//TODO: clarify if/why this is needed here
|
||||
unsafe impl Send for BufferMapOperation {}
|
||||
unsafe impl Sync for BufferMapOperation {}
|
||||
|
||||
impl fmt::Debug for BufferMapOperation {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
let op = match *self {
|
||||
BufferMapOperation::Read { .. } => "read",
|
||||
BufferMapOperation::Write { .. } => "write",
|
||||
};
|
||||
write!(fmt, "BufferMapOperation <{}>", op)
|
||||
}
|
||||
}
|
||||
|
||||
impl BufferMapOperation {
|
||||
pub(crate) fn call_error(self) {
|
||||
match self {
|
||||
BufferMapOperation::Read { callback, userdata } => {
|
||||
log::error!("wgpu_buffer_map_read_async failed: buffer mapping is pending");
|
||||
unsafe {
|
||||
callback(BufferMapAsyncStatus::Error, std::ptr::null(), userdata);
|
||||
}
|
||||
}
|
||||
BufferMapOperation::Write { callback, userdata } => {
|
||||
log::error!("wgpu_buffer_map_write_async failed: buffer mapping is pending");
|
||||
unsafe {
|
||||
callback(BufferMapAsyncStatus::Error, std::ptr::null_mut(), userdata);
|
||||
}
|
||||
}
|
||||
log::error!("wgpu_buffer_map_async failed: buffer mapping is pending");
|
||||
unsafe {
|
||||
(self.callback)(BufferMapAsyncStatus::Error, self.user_data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -150,7 +136,7 @@ pub struct Buffer<B: hal::Backend> {
|
|||
pub(crate) full_range: (),
|
||||
pub(crate) sync_mapped_writes: Option<hal::memory::Segment>,
|
||||
pub(crate) life_guard: LifeGuard,
|
||||
pub(crate) map_state: BufferMapState,
|
||||
pub(crate) map_state: BufferMapState<B>,
|
||||
}
|
||||
|
||||
impl<B: hal::Backend> Borrow<RefCount> for Buffer<B> {
|
||||
|
|
|
@ -342,7 +342,7 @@ mod test {
|
|||
2..3,
|
||||
Unit {
|
||||
first: Some(TextureUse::COPY_SRC),
|
||||
last: TextureUse::OUTPUT_ATTACHMENT,
|
||||
last: TextureUse::ATTACHMENT_WRITE,
|
||||
},
|
||||
),
|
||||
]);
|
||||
|
@ -385,7 +385,7 @@ mod test {
|
|||
ts1.mips[0].query(&(2..3), |&v| v),
|
||||
Some(Ok(Unit {
|
||||
first: Some(TextureUse::SAMPLED),
|
||||
last: TextureUse::OUTPUT_ATTACHMENT,
|
||||
last: TextureUse::ATTACHMENT_WRITE,
|
||||
})),
|
||||
"wrong final layer 2 state"
|
||||
);
|
||||
|
@ -394,7 +394,7 @@ mod test {
|
|||
ts2.mips[0] = PlaneStates::from_slice(&[(
|
||||
2..3,
|
||||
Unit {
|
||||
first: Some(TextureUse::OUTPUT_ATTACHMENT),
|
||||
first: Some(TextureUse::ATTACHMENT_WRITE),
|
||||
last: TextureUse::COPY_SRC,
|
||||
},
|
||||
)]);
|
||||
|
|
|
@ -0,0 +1,614 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
use crate::{binding_model::BindEntryMap, FastHashMap};
|
||||
use spirv_headers as spirv;
|
||||
use wgt::{BindGroupLayoutEntry, BindingType};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum BindingError {
|
||||
/// The binding is missing from the pipeline layout.
|
||||
Missing,
|
||||
/// The visibility flags don't include the shader stage.
|
||||
Invisible,
|
||||
/// The load/store access flags don't match the shader.
|
||||
WrongUsage(naga::GlobalUse),
|
||||
/// The type on the shader side does not match the pipeline binding.
|
||||
WrongType,
|
||||
/// The view dimension doesn't match the shader.
|
||||
WrongTextureViewDimension { dim: spirv::Dim, is_array: bool },
|
||||
/// The component type of a sampled texture doesn't match the shader.
|
||||
WrongTextureComponentType(Option<naga::ScalarKind>),
|
||||
/// Texture sampling capability doesn't match with the shader.
|
||||
WrongTextureSampled,
|
||||
/// The multisampled flag doesn't match.
|
||||
WrongTextureMultisampled,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum InputError {
|
||||
/// The input is not provided by the earlier stage in the pipeline.
|
||||
Missing,
|
||||
/// The input type is not compatible with the provided.
|
||||
WrongType,
|
||||
}
|
||||
|
||||
/// Errors produced when validating a programmable stage of a pipeline.
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum StageError {
|
||||
/// Unable to find an entry point matching the specified execution model.
|
||||
MissingEntryPoint(spirv::ExecutionModel),
|
||||
/// Error matching a global binding against the pipeline layout.
|
||||
Binding {
|
||||
set: u32,
|
||||
binding: u32,
|
||||
error: BindingError,
|
||||
},
|
||||
/// Error matching the stage input against the previous stage outputs.
|
||||
Input {
|
||||
location: wgt::ShaderLocation,
|
||||
error: InputError,
|
||||
},
|
||||
}
|
||||
|
||||
fn check_binding(
|
||||
module: &naga::Module,
|
||||
var: &naga::GlobalVariable,
|
||||
entry: &BindGroupLayoutEntry,
|
||||
usage: naga::GlobalUse,
|
||||
) -> Result<(), BindingError> {
|
||||
let mut ty_inner = &module.types[var.ty].inner;
|
||||
//TODO: change naga's IR to avoid a pointer here
|
||||
if let naga::TypeInner::Pointer { base, class: _ } = *ty_inner {
|
||||
ty_inner = &module.types[base].inner;
|
||||
}
|
||||
let allowed_usage = match *ty_inner {
|
||||
naga::TypeInner::Struct { .. } => match entry.ty {
|
||||
BindingType::UniformBuffer { .. } => naga::GlobalUse::LOAD,
|
||||
BindingType::StorageBuffer { readonly, .. } => {
|
||||
if readonly {
|
||||
naga::GlobalUse::LOAD
|
||||
} else {
|
||||
naga::GlobalUse::all()
|
||||
}
|
||||
}
|
||||
_ => return Err(BindingError::WrongType),
|
||||
},
|
||||
naga::TypeInner::Sampler => match entry.ty {
|
||||
BindingType::Sampler { .. } => naga::GlobalUse::empty(),
|
||||
_ => return Err(BindingError::WrongType),
|
||||
},
|
||||
naga::TypeInner::Image { base, dim, flags } => {
|
||||
if flags.contains(naga::ImageFlags::MULTISAMPLED) {
|
||||
match entry.ty {
|
||||
BindingType::SampledTexture {
|
||||
multisampled: true, ..
|
||||
} => {}
|
||||
_ => return Err(BindingError::WrongTextureMultisampled),
|
||||
}
|
||||
}
|
||||
let view_dimension = match entry.ty {
|
||||
BindingType::SampledTexture { dimension, .. }
|
||||
| BindingType::StorageTexture { dimension, .. } => dimension,
|
||||
_ => {
|
||||
return Err(BindingError::WrongTextureViewDimension {
|
||||
dim,
|
||||
is_array: true,
|
||||
})
|
||||
}
|
||||
};
|
||||
if flags.contains(naga::ImageFlags::ARRAYED) {
|
||||
match (dim, view_dimension) {
|
||||
(spirv::Dim::Dim2D, wgt::TextureViewDimension::D2Array) => (),
|
||||
(spirv::Dim::DimCube, wgt::TextureViewDimension::CubeArray) => (),
|
||||
_ => {
|
||||
return Err(BindingError::WrongTextureViewDimension {
|
||||
dim,
|
||||
is_array: true,
|
||||
})
|
||||
}
|
||||
}
|
||||
} else {
|
||||
match (dim, view_dimension) {
|
||||
(spirv::Dim::Dim1D, wgt::TextureViewDimension::D1) => (),
|
||||
(spirv::Dim::Dim2D, wgt::TextureViewDimension::D2) => (),
|
||||
(spirv::Dim::Dim3D, wgt::TextureViewDimension::D3) => (),
|
||||
(spirv::Dim::DimCube, wgt::TextureViewDimension::Cube) => (),
|
||||
_ => {
|
||||
return Err(BindingError::WrongTextureViewDimension {
|
||||
dim,
|
||||
is_array: false,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
let (allowed_usage, is_sampled) = match entry.ty {
|
||||
BindingType::SampledTexture { component_type, .. } => {
|
||||
let expected_scalar_kind = match component_type {
|
||||
wgt::TextureComponentType::Float => naga::ScalarKind::Float,
|
||||
wgt::TextureComponentType::Sint => naga::ScalarKind::Sint,
|
||||
wgt::TextureComponentType::Uint => naga::ScalarKind::Uint,
|
||||
};
|
||||
match module.types[base].inner {
|
||||
naga::TypeInner::Scalar { kind, .. }
|
||||
| naga::TypeInner::Vector { kind, .. }
|
||||
if kind == expected_scalar_kind => {}
|
||||
naga::TypeInner::Scalar { kind, .. }
|
||||
| naga::TypeInner::Vector { kind, .. } => {
|
||||
return Err(BindingError::WrongTextureComponentType(Some(kind)))
|
||||
}
|
||||
_ => return Err(BindingError::WrongTextureComponentType(None)),
|
||||
};
|
||||
(naga::GlobalUse::LOAD, true)
|
||||
}
|
||||
BindingType::StorageTexture { readonly, .. } => {
|
||||
if readonly {
|
||||
//TODO: check entry.storage_texture_format
|
||||
(naga::GlobalUse::LOAD, false)
|
||||
} else {
|
||||
(naga::GlobalUse::STORE, false)
|
||||
}
|
||||
}
|
||||
_ => return Err(BindingError::WrongType),
|
||||
};
|
||||
if is_sampled != flags.contains(naga::ImageFlags::SAMPLED) {
|
||||
return Err(BindingError::WrongTextureSampled);
|
||||
}
|
||||
allowed_usage
|
||||
}
|
||||
_ => return Err(BindingError::WrongType),
|
||||
};
|
||||
if allowed_usage.contains(usage) {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(BindingError::WrongUsage(usage))
|
||||
}
|
||||
}
|
||||
|
||||
fn is_sub_type(sub: &naga::TypeInner, provided: &naga::TypeInner) -> bool {
|
||||
use naga::TypeInner as Ti;
|
||||
|
||||
match (sub, provided) {
|
||||
(
|
||||
&Ti::Scalar {
|
||||
kind: k0,
|
||||
width: w0,
|
||||
},
|
||||
&Ti::Scalar {
|
||||
kind: k1,
|
||||
width: w1,
|
||||
},
|
||||
) => k0 == k1 && w0 <= w1,
|
||||
(
|
||||
&Ti::Scalar {
|
||||
kind: k0,
|
||||
width: w0,
|
||||
},
|
||||
&Ti::Vector {
|
||||
size: _,
|
||||
kind: k1,
|
||||
width: w1,
|
||||
},
|
||||
) => k0 == k1 && w0 <= w1,
|
||||
(
|
||||
&Ti::Vector {
|
||||
size: s0,
|
||||
kind: k0,
|
||||
width: w0,
|
||||
},
|
||||
&Ti::Vector {
|
||||
size: s1,
|
||||
kind: k1,
|
||||
width: w1,
|
||||
},
|
||||
) => s0 as u8 <= s1 as u8 && k0 == k1 && w0 <= w1,
|
||||
(
|
||||
&Ti::Matrix {
|
||||
columns: c0,
|
||||
rows: r0,
|
||||
kind: k0,
|
||||
width: w0,
|
||||
},
|
||||
&Ti::Matrix {
|
||||
columns: c1,
|
||||
rows: r1,
|
||||
kind: k1,
|
||||
width: w1,
|
||||
},
|
||||
) => c0 == c1 && r0 == r1 && k0 == k1 && w0 <= w1,
|
||||
(&Ti::Struct { members: ref m0 }, &Ti::Struct { members: ref m1 }) => m0 == m1,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub enum MaybeOwned<'a, T> {
|
||||
Owned(T),
|
||||
Borrowed(&'a T),
|
||||
}
|
||||
|
||||
impl<'a, T> std::ops::Deref for MaybeOwned<'a, T> {
|
||||
type Target = T;
|
||||
fn deref(&self) -> &T {
|
||||
match *self {
|
||||
MaybeOwned::Owned(ref value) => value,
|
||||
MaybeOwned::Borrowed(value) => value,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn map_vertex_format(format: wgt::VertexFormat) -> naga::TypeInner {
|
||||
use naga::TypeInner as Ti;
|
||||
use wgt::VertexFormat as Vf;
|
||||
match format {
|
||||
Vf::Uchar2 => Ti::Vector {
|
||||
size: naga::VectorSize::Bi,
|
||||
kind: naga::ScalarKind::Uint,
|
||||
width: 8,
|
||||
},
|
||||
Vf::Uchar4 => Ti::Vector {
|
||||
size: naga::VectorSize::Quad,
|
||||
kind: naga::ScalarKind::Uint,
|
||||
width: 8,
|
||||
},
|
||||
Vf::Char2 => Ti::Vector {
|
||||
size: naga::VectorSize::Bi,
|
||||
kind: naga::ScalarKind::Sint,
|
||||
width: 8,
|
||||
},
|
||||
Vf::Char4 => Ti::Vector {
|
||||
size: naga::VectorSize::Quad,
|
||||
kind: naga::ScalarKind::Sint,
|
||||
width: 8,
|
||||
},
|
||||
Vf::Uchar2Norm => Ti::Vector {
|
||||
size: naga::VectorSize::Bi,
|
||||
kind: naga::ScalarKind::Float,
|
||||
width: 8,
|
||||
},
|
||||
Vf::Uchar4Norm => Ti::Vector {
|
||||
size: naga::VectorSize::Quad,
|
||||
kind: naga::ScalarKind::Float,
|
||||
width: 8,
|
||||
},
|
||||
Vf::Char2Norm => Ti::Vector {
|
||||
size: naga::VectorSize::Bi,
|
||||
kind: naga::ScalarKind::Float,
|
||||
width: 8,
|
||||
},
|
||||
Vf::Char4Norm => Ti::Vector {
|
||||
size: naga::VectorSize::Quad,
|
||||
kind: naga::ScalarKind::Float,
|
||||
width: 8,
|
||||
},
|
||||
Vf::Ushort2 => Ti::Vector {
|
||||
size: naga::VectorSize::Bi,
|
||||
kind: naga::ScalarKind::Uint,
|
||||
width: 16,
|
||||
},
|
||||
Vf::Ushort4 => Ti::Vector {
|
||||
size: naga::VectorSize::Quad,
|
||||
kind: naga::ScalarKind::Uint,
|
||||
width: 16,
|
||||
},
|
||||
Vf::Short2 => Ti::Vector {
|
||||
size: naga::VectorSize::Bi,
|
||||
kind: naga::ScalarKind::Sint,
|
||||
width: 16,
|
||||
},
|
||||
Vf::Short4 => Ti::Vector {
|
||||
size: naga::VectorSize::Quad,
|
||||
kind: naga::ScalarKind::Sint,
|
||||
width: 16,
|
||||
},
|
||||
Vf::Ushort2Norm | Vf::Short2Norm | Vf::Half2 => Ti::Vector {
|
||||
size: naga::VectorSize::Bi,
|
||||
kind: naga::ScalarKind::Float,
|
||||
width: 16,
|
||||
},
|
||||
Vf::Ushort4Norm | Vf::Short4Norm | Vf::Half4 => Ti::Vector {
|
||||
size: naga::VectorSize::Quad,
|
||||
kind: naga::ScalarKind::Float,
|
||||
width: 16,
|
||||
},
|
||||
Vf::Float => Ti::Scalar {
|
||||
kind: naga::ScalarKind::Float,
|
||||
width: 32,
|
||||
},
|
||||
Vf::Float2 => Ti::Vector {
|
||||
size: naga::VectorSize::Bi,
|
||||
kind: naga::ScalarKind::Float,
|
||||
width: 32,
|
||||
},
|
||||
Vf::Float3 => Ti::Vector {
|
||||
size: naga::VectorSize::Tri,
|
||||
kind: naga::ScalarKind::Float,
|
||||
width: 32,
|
||||
},
|
||||
Vf::Float4 => Ti::Vector {
|
||||
size: naga::VectorSize::Quad,
|
||||
kind: naga::ScalarKind::Float,
|
||||
width: 32,
|
||||
},
|
||||
Vf::Uint => Ti::Scalar {
|
||||
kind: naga::ScalarKind::Uint,
|
||||
width: 32,
|
||||
},
|
||||
Vf::Uint2 => Ti::Vector {
|
||||
size: naga::VectorSize::Bi,
|
||||
kind: naga::ScalarKind::Uint,
|
||||
width: 32,
|
||||
},
|
||||
Vf::Uint3 => Ti::Vector {
|
||||
size: naga::VectorSize::Tri,
|
||||
kind: naga::ScalarKind::Uint,
|
||||
width: 32,
|
||||
},
|
||||
Vf::Uint4 => Ti::Vector {
|
||||
size: naga::VectorSize::Quad,
|
||||
kind: naga::ScalarKind::Uint,
|
||||
width: 32,
|
||||
},
|
||||
Vf::Int => Ti::Scalar {
|
||||
kind: naga::ScalarKind::Sint,
|
||||
width: 32,
|
||||
},
|
||||
Vf::Int2 => Ti::Vector {
|
||||
size: naga::VectorSize::Bi,
|
||||
kind: naga::ScalarKind::Sint,
|
||||
width: 32,
|
||||
},
|
||||
Vf::Int3 => Ti::Vector {
|
||||
size: naga::VectorSize::Tri,
|
||||
kind: naga::ScalarKind::Sint,
|
||||
width: 32,
|
||||
},
|
||||
Vf::Int4 => Ti::Vector {
|
||||
size: naga::VectorSize::Quad,
|
||||
kind: naga::ScalarKind::Sint,
|
||||
width: 32,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn map_texture_format(format: wgt::TextureFormat) -> naga::TypeInner {
|
||||
use naga::{ScalarKind as Sk, TypeInner as Ti, VectorSize as Vs};
|
||||
use wgt::TextureFormat as Tf;
|
||||
|
||||
match format {
|
||||
Tf::R8Unorm | Tf::R8Snorm => Ti::Scalar {
|
||||
kind: Sk::Float,
|
||||
width: 8,
|
||||
},
|
||||
Tf::R8Uint => Ti::Scalar {
|
||||
kind: Sk::Uint,
|
||||
width: 8,
|
||||
},
|
||||
Tf::R8Sint => Ti::Scalar {
|
||||
kind: Sk::Sint,
|
||||
width: 8,
|
||||
},
|
||||
Tf::R16Uint => Ti::Scalar {
|
||||
kind: Sk::Uint,
|
||||
width: 16,
|
||||
},
|
||||
Tf::R16Sint => Ti::Scalar {
|
||||
kind: Sk::Sint,
|
||||
width: 16,
|
||||
},
|
||||
Tf::R16Float => Ti::Scalar {
|
||||
kind: Sk::Float,
|
||||
width: 16,
|
||||
},
|
||||
Tf::Rg8Unorm | Tf::Rg8Snorm => Ti::Vector {
|
||||
size: Vs::Bi,
|
||||
kind: Sk::Float,
|
||||
width: 8,
|
||||
},
|
||||
Tf::Rg8Uint => Ti::Vector {
|
||||
size: Vs::Bi,
|
||||
kind: Sk::Uint,
|
||||
width: 8,
|
||||
},
|
||||
Tf::Rg8Sint => Ti::Vector {
|
||||
size: Vs::Bi,
|
||||
kind: Sk::Sint,
|
||||
width: 8,
|
||||
},
|
||||
Tf::R32Uint => Ti::Scalar {
|
||||
kind: Sk::Uint,
|
||||
width: 32,
|
||||
},
|
||||
Tf::R32Sint => Ti::Scalar {
|
||||
kind: Sk::Sint,
|
||||
width: 32,
|
||||
},
|
||||
Tf::R32Float => Ti::Scalar {
|
||||
kind: Sk::Float,
|
||||
width: 32,
|
||||
},
|
||||
Tf::Rg16Uint => Ti::Vector {
|
||||
size: Vs::Bi,
|
||||
kind: Sk::Uint,
|
||||
width: 16,
|
||||
},
|
||||
Tf::Rg16Sint => Ti::Vector {
|
||||
size: Vs::Bi,
|
||||
kind: Sk::Sint,
|
||||
width: 16,
|
||||
},
|
||||
Tf::Rg16Float => Ti::Vector {
|
||||
size: Vs::Bi,
|
||||
kind: Sk::Float,
|
||||
width: 16,
|
||||
},
|
||||
Tf::Rgba8Unorm
|
||||
| Tf::Rgba8UnormSrgb
|
||||
| Tf::Rgba8Snorm
|
||||
| Tf::Bgra8Unorm
|
||||
| Tf::Bgra8UnormSrgb => Ti::Vector {
|
||||
size: Vs::Quad,
|
||||
kind: Sk::Float,
|
||||
width: 8,
|
||||
},
|
||||
Tf::Rgba8Uint => Ti::Vector {
|
||||
size: Vs::Quad,
|
||||
kind: Sk::Uint,
|
||||
width: 8,
|
||||
},
|
||||
Tf::Rgba8Sint => Ti::Vector {
|
||||
size: Vs::Quad,
|
||||
kind: Sk::Sint,
|
||||
width: 8,
|
||||
},
|
||||
Tf::Rgb10a2Unorm => Ti::Vector {
|
||||
size: Vs::Quad,
|
||||
kind: Sk::Float,
|
||||
width: 10,
|
||||
},
|
||||
Tf::Rg11b10Float => Ti::Vector {
|
||||
size: Vs::Tri,
|
||||
kind: Sk::Float,
|
||||
width: 11,
|
||||
},
|
||||
Tf::Rg32Uint => Ti::Vector {
|
||||
size: Vs::Bi,
|
||||
kind: Sk::Uint,
|
||||
width: 32,
|
||||
},
|
||||
Tf::Rg32Sint => Ti::Vector {
|
||||
size: Vs::Bi,
|
||||
kind: Sk::Sint,
|
||||
width: 32,
|
||||
},
|
||||
Tf::Rg32Float => Ti::Vector {
|
||||
size: Vs::Bi,
|
||||
kind: Sk::Float,
|
||||
width: 32,
|
||||
},
|
||||
Tf::Rgba16Uint => Ti::Vector {
|
||||
size: Vs::Quad,
|
||||
kind: Sk::Uint,
|
||||
width: 16,
|
||||
},
|
||||
Tf::Rgba16Sint => Ti::Vector {
|
||||
size: Vs::Quad,
|
||||
kind: Sk::Sint,
|
||||
width: 16,
|
||||
},
|
||||
Tf::Rgba16Float => Ti::Vector {
|
||||
size: Vs::Quad,
|
||||
kind: Sk::Float,
|
||||
width: 16,
|
||||
},
|
||||
Tf::Rgba32Uint => Ti::Vector {
|
||||
size: Vs::Quad,
|
||||
kind: Sk::Uint,
|
||||
width: 32,
|
||||
},
|
||||
Tf::Rgba32Sint => Ti::Vector {
|
||||
size: Vs::Quad,
|
||||
kind: Sk::Sint,
|
||||
width: 32,
|
||||
},
|
||||
Tf::Rgba32Float => Ti::Vector {
|
||||
size: Vs::Quad,
|
||||
kind: Sk::Float,
|
||||
width: 32,
|
||||
},
|
||||
Tf::Depth32Float | Tf::Depth24Plus | Tf::Depth24PlusStencil8 => {
|
||||
panic!("Unexpected depth format")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Return true if the fragment `format` is covered by the provided `output`.
|
||||
pub fn check_texture_format(format: wgt::TextureFormat, output: &naga::TypeInner) -> bool {
|
||||
let required = map_texture_format(format);
|
||||
is_sub_type(&required, output)
|
||||
}
|
||||
|
||||
pub type StageInterface<'a> = FastHashMap<wgt::ShaderLocation, MaybeOwned<'a, naga::TypeInner>>;
|
||||
|
||||
pub fn check_stage<'a>(
|
||||
module: &'a naga::Module,
|
||||
group_layouts: &[&BindEntryMap],
|
||||
entry_point_name: &str,
|
||||
execution_model: spirv::ExecutionModel,
|
||||
inputs: StageInterface<'a>,
|
||||
) -> Result<StageInterface<'a>, StageError> {
|
||||
// Since a shader module can have multiple entry points with the same name,
|
||||
// we need to look for one with the right execution model.
|
||||
let entry_point = module
|
||||
.entry_points
|
||||
.iter()
|
||||
.find(|entry_point| {
|
||||
entry_point.name == entry_point_name && entry_point.exec_model == execution_model
|
||||
})
|
||||
.ok_or(StageError::MissingEntryPoint(execution_model))?;
|
||||
let stage_bit = match execution_model {
|
||||
spirv::ExecutionModel::Vertex => wgt::ShaderStage::VERTEX,
|
||||
spirv::ExecutionModel::Fragment => wgt::ShaderStage::FRAGMENT,
|
||||
spirv::ExecutionModel::GLCompute => wgt::ShaderStage::COMPUTE,
|
||||
// the entry point wouldn't match otherwise
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let function = &module.functions[entry_point.function];
|
||||
let mut outputs = StageInterface::default();
|
||||
for ((_, var), &usage) in module.global_variables.iter().zip(&function.global_usage) {
|
||||
if usage.is_empty() {
|
||||
continue;
|
||||
}
|
||||
match var.binding {
|
||||
Some(naga::Binding::Descriptor { set, binding }) => {
|
||||
let result = group_layouts
|
||||
.get(set as usize)
|
||||
.and_then(|map| map.get(&binding))
|
||||
.ok_or(BindingError::Missing)
|
||||
.and_then(|entry| {
|
||||
if entry.visibility.contains(stage_bit) {
|
||||
Ok(entry)
|
||||
} else {
|
||||
Err(BindingError::Invisible)
|
||||
}
|
||||
})
|
||||
.and_then(|entry| check_binding(module, var, entry, usage));
|
||||
if let Err(error) = result {
|
||||
return Err(StageError::Binding {
|
||||
set,
|
||||
binding,
|
||||
error,
|
||||
});
|
||||
}
|
||||
}
|
||||
Some(naga::Binding::Location(location)) => {
|
||||
let mut ty = &module.types[var.ty].inner;
|
||||
//TODO: change naga's IR to not have pointer for varyings
|
||||
if let naga::TypeInner::Pointer { base, class: _ } = *ty {
|
||||
ty = &module.types[base].inner;
|
||||
}
|
||||
if usage.contains(naga::GlobalUse::STORE) {
|
||||
outputs.insert(location, MaybeOwned::Borrowed(ty));
|
||||
} else {
|
||||
let result =
|
||||
inputs
|
||||
.get(&location)
|
||||
.ok_or(InputError::Missing)
|
||||
.and_then(|provided| {
|
||||
if is_sub_type(ty, provided) {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(InputError::WrongType)
|
||||
}
|
||||
});
|
||||
if let Err(error) = result {
|
||||
return Err(StageError::Input { location, error });
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
Ok(outputs)
|
||||
}
|
|
@ -17,9 +17,11 @@ use std::{io, ptr, slice};
|
|||
pub const COPY_BYTES_PER_ROW_ALIGNMENT: u32 = 256;
|
||||
/// Bound uniform/storage buffer offsets must be aligned to this number.
|
||||
pub const BIND_BUFFER_ALIGNMENT: u64 = 256;
|
||||
/// Buffer to buffer copy offsets and sizes must be aligned to this number
|
||||
pub const COPY_BUFFER_ALIGNMENT: u64 = 4;
|
||||
|
||||
#[repr(transparent)]
|
||||
#[derive(Clone, Copy, Debug, Default, PartialEq)]
|
||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||
#[cfg_attr(feature = "peek-poke", derive(PeekPoke))]
|
||||
#[cfg_attr(
|
||||
feature = "trace",
|
||||
|
@ -34,7 +36,13 @@ pub const BIND_BUFFER_ALIGNMENT: u64 = 256;
|
|||
pub struct BufferSize(pub u64);
|
||||
|
||||
impl BufferSize {
|
||||
pub const WHOLE: BufferSize = BufferSize(!0u64);
|
||||
pub const WHOLE: BufferSize = BufferSize(!0);
|
||||
}
|
||||
|
||||
impl Default for BufferSize {
|
||||
fn default() -> Self {
|
||||
BufferSize::WHOLE
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(u8)]
|
||||
|
@ -61,6 +69,12 @@ pub enum PowerPreference {
|
|||
HighPerformance = 2,
|
||||
}
|
||||
|
||||
impl Default for PowerPreference {
|
||||
fn default() -> PowerPreference {
|
||||
PowerPreference::Default
|
||||
}
|
||||
}
|
||||
|
||||
bitflags::bitflags! {
|
||||
#[repr(transparent)]
|
||||
#[cfg_attr(feature = "trace", derive(Serialize))]
|
||||
|
@ -88,16 +102,102 @@ impl From<Backend> for BackendBit {
|
|||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)]
|
||||
/// This type is not to be constructed by any users of wgpu. If you construct this type, any semver
|
||||
/// guarantees made by wgpu are invalidated and a non-breaking change may break your code.
|
||||
///
|
||||
/// If you are here trying to construct it, the solution is to use partial construction with the
|
||||
/// default:
|
||||
///
|
||||
/// ```ignore
|
||||
/// let limits = Limits {
|
||||
/// max_bind_groups: 2,
|
||||
/// ..Limits::default()
|
||||
/// }
|
||||
/// ```
|
||||
#[doc(hidden)]
|
||||
#[derive(Debug, Copy, Clone, Default, Eq, PartialEq, Hash)]
|
||||
#[cfg_attr(feature = "trace", derive(Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(Deserialize))]
|
||||
pub struct Extensions {
|
||||
/// This is a native only extension. Support is planned to be added to webgpu,
|
||||
/// but it is not yet implemented.
|
||||
///
|
||||
/// https://github.com/gpuweb/gpuweb/issues/696
|
||||
pub anisotropic_filtering: bool,
|
||||
pub struct NonExhaustive(());
|
||||
|
||||
impl NonExhaustive {
|
||||
pub unsafe fn new() -> Self {
|
||||
Self(())
|
||||
}
|
||||
}
|
||||
|
||||
bitflags::bitflags! {
|
||||
#[repr(transparent)]
|
||||
#[derive(Default)]
|
||||
#[cfg_attr(feature = "trace", derive(Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(Deserialize))]
|
||||
pub struct Extensions: u64 {
|
||||
/// Allow anisotropic filtering in samplers.
|
||||
///
|
||||
/// Supported platforms:
|
||||
/// - OpenGL 4.6+ (or 1.2+ with widespread GL_EXT_texture_filter_anisotropic)
|
||||
/// - DX11/12
|
||||
/// - Metal
|
||||
/// - Vulkan
|
||||
///
|
||||
/// This is a native only extension. Support is planned to be added to webgpu,
|
||||
/// but it is not yet implemented.
|
||||
///
|
||||
/// https://github.com/gpuweb/gpuweb/issues/696
|
||||
const ANISOTROPIC_FILTERING = 0x0000_0000_0001_0000;
|
||||
/// Webgpu only allows the MAP_READ and MAP_WRITE buffer usage to be matched with
|
||||
/// COPY_DST and COPY_SRC respectively. This removes this requirement.
|
||||
///
|
||||
/// This is only beneficial on systems that share memory between CPU and GPU. If enabled
|
||||
/// on a system that doesn't, this can severely hinder performance. Only use if you understand
|
||||
/// the consequences.
|
||||
///
|
||||
/// Supported platforms:
|
||||
/// - All
|
||||
///
|
||||
/// This is a native only extension.
|
||||
const MAPPABLE_PRIMARY_BUFFERS = 0x0000_0000_0002_0000;
|
||||
/// Allows the user to create uniform arrays of textures in shaders:
|
||||
///
|
||||
/// eg. `uniform texture2D textures[10]`.
|
||||
///
|
||||
/// This extension only allows them to exist and to be indexed by compile time constant
|
||||
/// values.
|
||||
///
|
||||
/// Supported platforms:
|
||||
/// - DX12
|
||||
/// - Metal (with MSL 2.0+ on macOS 10.13+)
|
||||
/// - Vulkan
|
||||
///
|
||||
/// This is a native only extension.
|
||||
const TEXTURE_BINDING_ARRAY = 0x0000_0000_0004_0000;
|
||||
/// Extensions which are part of the upstream webgpu standard
|
||||
const ALL_WEBGPU = 0x0000_0000_0000_FFFF;
|
||||
/// Extensions that require activating the unsafe extension flag
|
||||
const ALL_UNSAFE = 0xFFFF_0000_0000_0000;
|
||||
/// Extensions that are only available when targeting native (not web)
|
||||
const ALL_NATIVE = 0xFFFF_FFFF_FFFF_0000;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, Default, Eq, PartialEq, Hash)]
|
||||
#[cfg_attr(feature = "trace", derive(Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(Deserialize))]
|
||||
pub struct UnsafeExtensions {
|
||||
allow_unsafe: bool,
|
||||
}
|
||||
impl UnsafeExtensions {
|
||||
pub unsafe fn allow() -> Self {
|
||||
Self { allow_unsafe: true }
|
||||
}
|
||||
pub fn disallow() -> Self {
|
||||
Self {
|
||||
allow_unsafe: false,
|
||||
}
|
||||
}
|
||||
pub fn allowed(self) -> bool {
|
||||
self.allow_unsafe
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
|
@ -106,11 +206,15 @@ pub struct Extensions {
|
|||
#[cfg_attr(feature = "replay", derive(Deserialize))]
|
||||
pub struct Limits {
|
||||
pub max_bind_groups: u32,
|
||||
pub _non_exhaustive: NonExhaustive,
|
||||
}
|
||||
|
||||
impl Default for Limits {
|
||||
fn default() -> Self {
|
||||
Limits { max_bind_groups: 4 }
|
||||
Limits {
|
||||
max_bind_groups: 4,
|
||||
_non_exhaustive: unsafe { NonExhaustive::new() },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -121,6 +225,9 @@ impl Default for Limits {
|
|||
pub struct DeviceDescriptor {
|
||||
pub extensions: Extensions,
|
||||
pub limits: Limits,
|
||||
/// Switch shader validation on/off. This is a temporary field
|
||||
/// that will be removed once our validation logic is complete.
|
||||
pub shader_validation: bool,
|
||||
}
|
||||
|
||||
// TODO: This is copy/pasted from gfx-hal, so we need to find a new place to put
|
||||
|
@ -166,7 +273,7 @@ pub fn read_spirv<R: io::Read + io::Seek>(mut x: R) -> io::Result<Vec<u32>> {
|
|||
bitflags::bitflags! {
|
||||
#[repr(transparent)]
|
||||
#[cfg_attr(feature = "trace", derive(Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(Deserialize))]
|
||||
#[cfg_attr(feature = "replay", derive(Deserialize))]
|
||||
pub struct ShaderStage: u32 {
|
||||
const NONE = 0;
|
||||
const VERTEX = 1;
|
||||
|
@ -389,7 +496,7 @@ pub enum TextureFormat {
|
|||
bitflags::bitflags! {
|
||||
#[repr(transparent)]
|
||||
#[cfg_attr(feature = "trace", derive(Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(Deserialize))]
|
||||
#[cfg_attr(feature = "replay", derive(Deserialize))]
|
||||
pub struct ColorWrite: u32 {
|
||||
const RED = 1;
|
||||
const GREEN = 2;
|
||||
|
@ -424,6 +531,9 @@ impl DepthStencilStateDescriptor {
|
|||
pub fn needs_stencil_reference(&self) -> bool {
|
||||
!self.stencil_front.compare.is_trivial() || !self.stencil_back.compare.is_trivial()
|
||||
}
|
||||
pub fn is_read_only(&self) -> bool {
|
||||
!self.depth_write_enabled && self.stencil_write_mask == 0
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
|
@ -590,6 +700,7 @@ pub struct BufferDescriptor<L> {
|
|||
pub label: L,
|
||||
pub size: BufferAddress,
|
||||
pub usage: BufferUsage,
|
||||
pub mapped_at_creation: bool,
|
||||
}
|
||||
|
||||
impl<L> BufferDescriptor<L> {
|
||||
|
@ -598,6 +709,7 @@ impl<L> BufferDescriptor<L> {
|
|||
label: fun(&self.label),
|
||||
size: self.size,
|
||||
usage: self.usage,
|
||||
mapped_at_creation: self.mapped_at_creation,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -719,9 +831,11 @@ pub struct RenderPassDepthStencilAttachmentDescriptorBase<T> {
|
|||
pub depth_load_op: LoadOp,
|
||||
pub depth_store_op: StoreOp,
|
||||
pub clear_depth: f32,
|
||||
pub depth_read_only: bool,
|
||||
pub stencil_load_op: LoadOp,
|
||||
pub stencil_store_op: StoreOp,
|
||||
pub clear_stencil: u32,
|
||||
pub stencil_read_only: bool,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
|
@ -920,8 +1034,7 @@ impl Default for FilterMode {
|
|||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
#[derive(Default, Clone, Debug, PartialEq)]
|
||||
#[cfg_attr(feature = "trace", derive(Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(Deserialize))]
|
||||
pub struct SamplerDescriptor<L> {
|
||||
|
@ -934,12 +1047,13 @@ pub struct SamplerDescriptor<L> {
|
|||
pub mipmap_filter: FilterMode,
|
||||
pub lod_min_clamp: f32,
|
||||
pub lod_max_clamp: f32,
|
||||
pub compare: CompareFunction,
|
||||
pub compare: Option<CompareFunction>,
|
||||
/// Anisotropic filtering extension must be enabled if this value is
|
||||
/// anything other than 0 and 1.
|
||||
/// anything other than 0 or 1.
|
||||
///
|
||||
/// Valid values are 0, 1, 2, 4, 8, and 16.
|
||||
pub anisotropy_clamp: u8,
|
||||
/// Valid values: 1, 2, 4, 8, and 16.
|
||||
pub anisotropy_clamp: Option<u8>,
|
||||
pub _non_exhaustive: NonExhaustive,
|
||||
}
|
||||
|
||||
impl<L> SamplerDescriptor<L> {
|
||||
|
@ -956,6 +1070,7 @@ impl<L> SamplerDescriptor<L> {
|
|||
lod_max_clamp: self.lod_max_clamp,
|
||||
compare: self.compare,
|
||||
anisotropy_clamp: self.anisotropy_clamp,
|
||||
_non_exhaustive: self._non_exhaustive,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1035,6 +1150,155 @@ pub struct TextureDataLayout {
|
|||
pub rows_per_image: u32,
|
||||
}
|
||||
|
||||
/// Specific type of a binding.
|
||||
/// WebGPU spec: https://gpuweb.github.io/gpuweb/#dictdef-gpubindgrouplayoutentry
|
||||
#[non_exhaustive]
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
#[cfg_attr(feature = "trace", derive(Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(Deserialize))]
|
||||
pub enum BindingType {
|
||||
/// A buffer for uniform values.
|
||||
///
|
||||
/// Example GLSL syntax:
|
||||
/// ```cpp,ignore
|
||||
/// layout(std140, binding = 0)
|
||||
/// uniform Globals {
|
||||
/// vec2 aUniform;
|
||||
/// vec2 anotherUniform;
|
||||
/// };
|
||||
/// ```
|
||||
UniformBuffer {
|
||||
/// Indicates that the binding has a dynamic offset.
|
||||
/// One offset must be passed to [RenderPass::set_bind_group] for each dynamic binding in increasing order of binding number.
|
||||
dynamic: bool,
|
||||
},
|
||||
/// A storage buffer.
|
||||
///
|
||||
/// Example GLSL syntax:
|
||||
/// ```cpp,ignore
|
||||
/// layout (set=0, binding=0) buffer myStorageBuffer {
|
||||
/// vec4 myElement[];
|
||||
/// };
|
||||
/// ```
|
||||
StorageBuffer {
|
||||
/// Indicates that the binding has a dynamic offset.
|
||||
/// One offset must be passed to [RenderPass::set_bind_group] for each dynamic binding in increasing order of binding number.
|
||||
dynamic: bool,
|
||||
/// The buffer can only be read in the shader and it must be annotated with `readonly`.
|
||||
///
|
||||
/// Example GLSL syntax:
|
||||
/// ```cpp,ignore
|
||||
/// layout (set=0, binding=0) readonly buffer myStorageBuffer {
|
||||
/// vec4 myElement[];
|
||||
/// };
|
||||
/// ```
|
||||
readonly: bool,
|
||||
},
|
||||
/// A sampler that can be used to sample a texture.
|
||||
///
|
||||
/// Example GLSL syntax:
|
||||
/// ```cpp,ignore
|
||||
/// layout(binding = 0)
|
||||
/// uniform sampler s;
|
||||
/// ```
|
||||
Sampler {
|
||||
/// Use as a comparison sampler instead of a normal sampler.
|
||||
/// For more info take a look at the analogous functionality in OpenGL: https://www.khronos.org/opengl/wiki/Sampler_Object#Comparison_mode.
|
||||
comparison: bool,
|
||||
},
|
||||
/// A texture.
|
||||
///
|
||||
/// Example GLSL syntax:
|
||||
/// ```cpp,ignore
|
||||
/// layout(binding = 0)
|
||||
/// uniform texture2D t;
|
||||
/// ```
|
||||
SampledTexture {
|
||||
/// Dimension of the texture view that is going to be sampled.
|
||||
dimension: TextureViewDimension,
|
||||
/// Component type of the texture.
|
||||
/// This must be compatible with the format of the texture.
|
||||
component_type: TextureComponentType,
|
||||
/// True if the texture has a sample count greater than 1.
|
||||
multisampled: bool,
|
||||
},
|
||||
/// A storage texture.
|
||||
///
|
||||
/// Example GLSL syntax:
|
||||
/// ```cpp,ignore
|
||||
/// layout(set=0, binding=0, r32f) uniform image2D myStorageImage;
|
||||
/// ```
|
||||
/// Note that the texture format must be specified in the shader as well.
|
||||
/// A list of valid formats can be found in the specification here: https://www.khronos.org/registry/OpenGL/specs/gl/GLSLangSpec.4.60.html#layout-qualifiers
|
||||
StorageTexture {
|
||||
/// Dimension of the texture view that is going to be sampled.
|
||||
dimension: TextureViewDimension,
|
||||
/// Component type of the texture.
|
||||
/// This must be compatible with the format of the texture.
|
||||
component_type: TextureComponentType,
|
||||
/// Format of the texture.
|
||||
format: TextureFormat,
|
||||
/// The texture can only be read in the shader and it must be annotated with `readonly`.
|
||||
///
|
||||
/// Example GLSL syntax:
|
||||
/// ```cpp,ignore
|
||||
/// layout(set=0, binding=0, r32f) readonly uniform image2D myStorageImage;
|
||||
/// ```
|
||||
readonly: bool,
|
||||
},
|
||||
}
|
||||
|
||||
/// A description of a single binding inside a bind group.
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "trace", derive(Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(Deserialize))]
|
||||
pub struct BindGroupLayoutEntry {
|
||||
pub binding: u32,
|
||||
pub visibility: ShaderStage,
|
||||
pub ty: BindingType,
|
||||
/// If this value is Some, indicates this entry is an array. Array size must be 1 or greater.
|
||||
///
|
||||
/// If this value is Some and `ty` is `BindingType::SampledTexture`, the TEXTURE_BINDING_ARRAY extension must be enabled.
|
||||
///
|
||||
/// If this value is Some and `ty` is any other variant, bind group creation will fail.
|
||||
pub count: Option<u32>,
|
||||
/// This struct should be partially initalized using the default method, but binding, visibility,
|
||||
/// and ty should be set.
|
||||
pub _non_exhaustive: NonExhaustive,
|
||||
}
|
||||
|
||||
impl Default for BindGroupLayoutEntry {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
binding: 0,
|
||||
visibility: ShaderStage::NONE,
|
||||
ty: BindingType::UniformBuffer { dynamic: false },
|
||||
count: None,
|
||||
_non_exhaustive: unsafe { NonExhaustive::new() },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BindGroupLayoutEntry {
|
||||
pub fn has_dynamic_offset(&self) -> bool {
|
||||
match self.ty {
|
||||
BindingType::UniformBuffer { dynamic, .. }
|
||||
| BindingType::StorageBuffer { dynamic, .. } => dynamic,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A description of a bind group layout.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct BindGroupLayoutDescriptor<'a> {
|
||||
/// An optional label to apply to the bind group layout.
|
||||
/// This can be useful for debugging and performance analysis.
|
||||
pub label: Option<&'a str>,
|
||||
|
||||
pub bindings: &'a [BindGroupLayoutEntry],
|
||||
}
|
||||
|
||||
/// This type allows us to make the serialized representation of a BufferSize more human-readable
|
||||
#[allow(dead_code)]
|
||||
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
|
||||
|
|
|
@ -10,8 +10,11 @@ autogen_warning = """/* DO NOT MODIFY THIS MANUALLY! This file was generated usi
|
|||
|
||||
typedef uint64_t WGPUNonZeroU64;
|
||||
typedef uint64_t WGPUOption_AdapterId;
|
||||
typedef uint64_t WGPUOption_BufferId;
|
||||
typedef uint64_t WGPUOption_SamplerId;
|
||||
typedef uint64_t WGPUOption_SurfaceId;
|
||||
typedef uint64_t WGPUOption_TextureViewId;
|
||||
typedef char WGPUNonExhaustive[0];
|
||||
"""
|
||||
include_version = true
|
||||
braces = "SameLine"
|
||||
|
@ -22,7 +25,8 @@ style = "tag"
|
|||
|
||||
[export]
|
||||
prefix = "WGPU"
|
||||
exclude = ["Option_AdapterId", "Option_SurfaceId", "Option_TextureViewId"]
|
||||
include = ["TextureComponentType", "TextureViewDimension"]
|
||||
exclude = ["NonExhaustive", "Option_AdapterId", "Option_BufferId", "Option_SamplerId", "Option_SurfaceId", "Option_TextureViewId"]
|
||||
|
||||
[export.rename]
|
||||
"BufferDescriptor_RawString" = "BufferDescriptor"
|
||||
|
|
|
@ -12,7 +12,7 @@ UNIFIED_SOURCES += [
|
|||
]
|
||||
|
||||
if CONFIG['COMPILE_ENVIRONMENT']:
|
||||
CbindgenHeader('wgpu_ffi_generated.h', inputs=['/gfx/wgpu_bindings'])
|
||||
CbindgenHeader('wgpu_ffi_generated.h', inputs=['/gfx/wgpu_bindings', '/gfx/wgpu'])
|
||||
|
||||
EXPORTS.mozilla.webgpu.ffi += [
|
||||
'!wgpu_ffi_generated.h',
|
||||
|
|
|
@ -6,15 +6,109 @@ use crate::identity::IdentityRecyclerFactory;
|
|||
|
||||
use wgc::{gfx_select, id};
|
||||
|
||||
use std::slice;
|
||||
use std::{marker::PhantomData, mem, slice};
|
||||
|
||||
pub type Global = wgc::hub::Global<IdentityRecyclerFactory>;
|
||||
// hide wgc's global in private
|
||||
pub struct Global(wgc::hub::Global<IdentityRecyclerFactory>);
|
||||
pub type RawString = *const std::os::raw::c_char;
|
||||
|
||||
impl std::ops::Deref for Global {
|
||||
type Target = wgc::hub::Global<IdentityRecyclerFactory>;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
pub enum RawBindingType {
|
||||
UniformBuffer,
|
||||
StorageBuffer,
|
||||
ReadonlyStorageBuffer,
|
||||
Sampler,
|
||||
ComparisonSampler,
|
||||
SampledTexture,
|
||||
ReadonlyStorageTexture,
|
||||
WriteonlyStorageTexture,
|
||||
}
|
||||
|
||||
#[repr(transparent)]
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct RawEnumOption<T>(u32, PhantomData<T>);
|
||||
|
||||
impl<T: Copy> From<Option<T>> for RawEnumOption<T> {
|
||||
fn from(option: Option<T>) -> Self {
|
||||
debug_assert_eq!(mem::size_of::<T>(), 4);
|
||||
let value = match option {
|
||||
Some(ref v) => unsafe { *mem::transmute::<*const T, *const u32>(v) },
|
||||
None => !0,
|
||||
};
|
||||
RawEnumOption(value, PhantomData)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Copy> RawEnumOption<T> {
|
||||
fn unwrap(self) -> T {
|
||||
assert_ne!(self.0, !0);
|
||||
unsafe { *mem::transmute::<*const u32, *const T>(&self.0) }
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
pub struct BindGroupLayoutEntry {
|
||||
pub binding: u32,
|
||||
pub visibility: wgt::ShaderStage,
|
||||
pub ty: RawBindingType,
|
||||
pub has_dynamic_offset: bool,
|
||||
pub view_dimension: RawEnumOption<wgt::TextureViewDimension>,
|
||||
pub texture_component_type: RawEnumOption<wgt::TextureComponentType>,
|
||||
pub multisampled: bool,
|
||||
pub storage_texture_format: RawEnumOption<wgt::TextureFormat>,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
pub struct BindGroupLayoutDescriptor {
|
||||
pub label: RawString,
|
||||
pub entries: *const BindGroupLayoutEntry,
|
||||
pub entries_length: usize,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Debug)]
|
||||
pub struct BindGroupEntry {
|
||||
pub binding: u32,
|
||||
pub buffer: Option<id::BufferId>,
|
||||
pub offset: wgt::BufferAddress,
|
||||
pub size: wgt::BufferSize,
|
||||
pub sampler: Option<id::SamplerId>,
|
||||
pub texture_view: Option<id::TextureViewId>,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
pub struct BindGroupDescriptor {
|
||||
pub label: RawString,
|
||||
pub layout: id::BindGroupLayoutId,
|
||||
pub entries: *const BindGroupEntry,
|
||||
pub entries_length: usize,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
pub struct SamplerDescriptor<'a> {
|
||||
pub label: RawString,
|
||||
pub address_modes: [wgt::AddressMode; 3],
|
||||
pub mag_filter: wgt::FilterMode,
|
||||
pub min_filter: wgt::FilterMode,
|
||||
pub mipmap_filter: wgt::FilterMode,
|
||||
pub lod_min_clamp: f32,
|
||||
pub lod_max_clamp: f32,
|
||||
pub compare: Option<&'a wgt::CompareFunction>,
|
||||
pub anisotropy_clamp: u8,
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_server_new(factory: IdentityRecyclerFactory) -> *mut Global {
|
||||
log::info!("Initializing WGPU server");
|
||||
Box::into_raw(Box::new(Global::new("wgpu", factory)))
|
||||
let global = Global(wgc::hub::Global::new("wgpu", factory));
|
||||
Box::into_raw(Box::new(global))
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
|
@ -52,6 +146,7 @@ pub unsafe extern "C" fn wgpu_server_instance_request_adapter(
|
|||
let ids = slice::from_raw_parts(ids, id_length);
|
||||
match global.pick_adapter(
|
||||
desc,
|
||||
wgt::UnsafeExtensions::disallow(),
|
||||
wgc::instance::AdapterInputs::IdSet(ids, |i| i.backend()),
|
||||
) {
|
||||
Some(id) => ids.iter().position(|&i| i == id).unwrap() as i8,
|
||||
|
@ -93,21 +188,19 @@ pub extern "C" fn wgpu_server_device_create_buffer(
|
|||
gfx_select!(self_id => global.device_create_buffer(self_id, desc, new_id));
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// This function is unsafe as there is no guarantee that the given pointer is
|
||||
/// valid for `size` elements.
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn wgpu_server_device_set_buffer_sub_data(
|
||||
pub extern "C" fn wgpu_server_buffer_map(
|
||||
global: &Global,
|
||||
self_id: id::DeviceId,
|
||||
buffer_id: id::BufferId,
|
||||
offset: wgt::BufferAddress,
|
||||
data: *const u8,
|
||||
start: wgt::BufferAddress,
|
||||
size: wgt::BufferAddress,
|
||||
operation: wgc::resource::BufferMapOperation,
|
||||
) {
|
||||
let slice = slice::from_raw_parts(data, size as usize);
|
||||
gfx_select!(self_id => global.device_set_buffer_sub_data(self_id, buffer_id, offset, slice));
|
||||
gfx_select!(buffer_id => global.buffer_map_async(
|
||||
buffer_id,
|
||||
start .. start + size,
|
||||
operation
|
||||
));
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
|
@ -115,21 +208,17 @@ pub unsafe extern "C" fn wgpu_server_device_set_buffer_sub_data(
|
|||
/// This function is unsafe as there is no guarantee that the given pointer is
|
||||
/// valid for `size` elements.
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_server_buffer_map_read(
|
||||
pub unsafe extern "C" fn wgpu_server_buffer_get_mapped_range(
|
||||
global: &Global,
|
||||
buffer_id: id::BufferId,
|
||||
start: wgt::BufferAddress,
|
||||
size: wgt::BufferAddress,
|
||||
callback: wgc::device::BufferMapReadCallback,
|
||||
userdata: *mut u8,
|
||||
) {
|
||||
let operation = wgc::resource::BufferMapOperation::Read { callback, userdata };
|
||||
|
||||
gfx_select!(buffer_id => global.buffer_map_async(
|
||||
) -> *mut u8 {
|
||||
gfx_select!(buffer_id => global.buffer_get_mapped_range(
|
||||
buffer_id,
|
||||
start .. start + size,
|
||||
operation
|
||||
));
|
||||
start,
|
||||
wgt::BufferSize(size)
|
||||
))
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
|
@ -306,14 +395,63 @@ pub unsafe extern "C" fn wgpu_server_queue_write_texture(
|
|||
gfx_select!(self_id => global.queue_write_texture(self_id, destination, data, layout, extent));
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// This function is unsafe as there is no guarantee that the given pointer is
|
||||
/// valid for `entries_length` elements.
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_server_device_create_bind_group_layout(
|
||||
pub unsafe extern "C" fn wgpu_server_device_create_bind_group_layout(
|
||||
global: &Global,
|
||||
self_id: id::DeviceId,
|
||||
desc: &wgc::binding_model::BindGroupLayoutDescriptor,
|
||||
desc: &BindGroupLayoutDescriptor,
|
||||
new_id: id::BindGroupLayoutId,
|
||||
) {
|
||||
gfx_select!(self_id => global.device_create_bind_group_layout(self_id, desc, new_id));
|
||||
let entries = slice::from_raw_parts(desc.entries, desc.entries_length);
|
||||
let bindings = entries
|
||||
.iter()
|
||||
.map(|entry| wgt::BindGroupLayoutEntry {
|
||||
binding: entry.binding,
|
||||
visibility: entry.visibility,
|
||||
ty: match entry.ty {
|
||||
RawBindingType::UniformBuffer => wgt::BindingType::UniformBuffer {
|
||||
dynamic: entry.has_dynamic_offset,
|
||||
},
|
||||
RawBindingType::StorageBuffer => wgt::BindingType::StorageBuffer {
|
||||
dynamic: entry.has_dynamic_offset,
|
||||
readonly: false,
|
||||
},
|
||||
RawBindingType::ReadonlyStorageBuffer => wgt::BindingType::StorageBuffer {
|
||||
dynamic: entry.has_dynamic_offset,
|
||||
readonly: true,
|
||||
},
|
||||
RawBindingType::Sampler => wgt::BindingType::Sampler { comparison: false },
|
||||
RawBindingType::ComparisonSampler => wgt::BindingType::Sampler { comparison: true },
|
||||
RawBindingType::SampledTexture => wgt::BindingType::SampledTexture {
|
||||
dimension: entry.view_dimension.unwrap(),
|
||||
component_type: entry.texture_component_type.unwrap(),
|
||||
multisampled: entry.multisampled,
|
||||
},
|
||||
RawBindingType::ReadonlyStorageTexture => wgt::BindingType::StorageTexture {
|
||||
dimension: entry.view_dimension.unwrap(),
|
||||
component_type: entry.texture_component_type.unwrap(),
|
||||
format: entry.storage_texture_format.unwrap(),
|
||||
readonly: true,
|
||||
},
|
||||
RawBindingType::WriteonlyStorageTexture => wgt::BindingType::StorageTexture {
|
||||
dimension: entry.view_dimension.unwrap(),
|
||||
component_type: entry.texture_component_type.unwrap(),
|
||||
format: entry.storage_texture_format.unwrap(),
|
||||
readonly: false,
|
||||
},
|
||||
},
|
||||
..Default::default()
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let desc = wgt::BindGroupLayoutDescriptor {
|
||||
label: None,
|
||||
bindings: &bindings,
|
||||
};
|
||||
gfx_select!(self_id => global.device_create_bind_group_layout(self_id, &desc, new_id)).unwrap();
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
|
@ -331,7 +469,7 @@ pub extern "C" fn wgpu_server_device_create_pipeline_layout(
|
|||
desc: &wgc::binding_model::PipelineLayoutDescriptor,
|
||||
new_id: id::PipelineLayoutId,
|
||||
) {
|
||||
gfx_select!(self_id => global.device_create_pipeline_layout(self_id, desc, new_id));
|
||||
gfx_select!(self_id => global.device_create_pipeline_layout(self_id, desc, new_id)).unwrap();
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
|
@ -342,14 +480,43 @@ pub extern "C" fn wgpu_server_pipeline_layout_destroy(
|
|||
gfx_select!(self_id => global.pipeline_layout_destroy(self_id));
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// This function is unsafe as there is no guarantee that the given pointer is
|
||||
/// valid for `entries_length` elements.
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_server_device_create_bind_group(
|
||||
pub unsafe extern "C" fn wgpu_server_device_create_bind_group(
|
||||
global: &Global,
|
||||
self_id: id::DeviceId,
|
||||
desc: &wgc::binding_model::BindGroupDescriptor,
|
||||
desc: &BindGroupDescriptor,
|
||||
new_id: id::BindGroupId,
|
||||
) {
|
||||
gfx_select!(self_id => global.device_create_bind_group(self_id, desc, new_id));
|
||||
let entries = slice::from_raw_parts(desc.entries, desc.entries_length);
|
||||
let bindings = entries
|
||||
.iter()
|
||||
.map(|entry| wgc::binding_model::BindGroupEntry {
|
||||
binding: entry.binding,
|
||||
resource: if let Some(id) = entry.buffer {
|
||||
wgc::binding_model::BindingResource::Buffer(wgc::binding_model::BufferBinding {
|
||||
buffer: id,
|
||||
offset: entry.offset,
|
||||
size: entry.size,
|
||||
})
|
||||
} else if let Some(id) = entry.sampler {
|
||||
wgc::binding_model::BindingResource::Sampler(id)
|
||||
} else if let Some(id) = entry.texture_view {
|
||||
wgc::binding_model::BindingResource::TextureView(id)
|
||||
} else {
|
||||
panic!("Unrecognized binding entry: {:?}", entry);
|
||||
},
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let desc = wgc::binding_model::BindGroupDescriptor {
|
||||
label: None,
|
||||
layout: desc.layout,
|
||||
bindings: &bindings,
|
||||
};
|
||||
gfx_select!(self_id => global.device_create_bind_group(self_id, &desc, new_id));
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
|
@ -379,7 +546,7 @@ pub extern "C" fn wgpu_server_device_create_compute_pipeline(
|
|||
desc: &wgc::pipeline::ComputePipelineDescriptor,
|
||||
new_id: id::ComputePipelineId,
|
||||
) {
|
||||
gfx_select!(self_id => global.device_create_compute_pipeline(self_id, desc, new_id));
|
||||
gfx_select!(self_id => global.device_create_compute_pipeline(self_id, desc, new_id)).unwrap();
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
|
@ -397,7 +564,7 @@ pub extern "C" fn wgpu_server_device_create_render_pipeline(
|
|||
desc: &wgc::pipeline::RenderPipelineDescriptor,
|
||||
new_id: id::RenderPipelineId,
|
||||
) {
|
||||
gfx_select!(self_id => global.device_create_render_pipeline(self_id, desc, new_id));
|
||||
gfx_select!(self_id => global.device_create_render_pipeline(self_id, desc, new_id)).unwrap();
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
|
@ -442,10 +609,28 @@ pub extern "C" fn wgpu_server_texture_view_destroy(global: &Global, self_id: id:
|
|||
pub extern "C" fn wgpu_server_device_create_sampler(
|
||||
global: &Global,
|
||||
self_id: id::DeviceId,
|
||||
desc: &wgt::SamplerDescriptor<RawString>,
|
||||
desc: &SamplerDescriptor,
|
||||
new_id: id::SamplerId,
|
||||
) {
|
||||
gfx_select!(self_id => global.device_create_sampler(self_id, desc, new_id));
|
||||
let desc = wgt::SamplerDescriptor {
|
||||
label: desc.label,
|
||||
address_mode_u: desc.address_modes[0],
|
||||
address_mode_v: desc.address_modes[1],
|
||||
address_mode_w: desc.address_modes[2],
|
||||
mag_filter: desc.mag_filter,
|
||||
min_filter: desc.min_filter,
|
||||
mipmap_filter: desc.mipmap_filter,
|
||||
lod_min_clamp: desc.lod_min_clamp,
|
||||
lod_max_clamp: desc.lod_max_clamp,
|
||||
compare: desc.compare.cloned(),
|
||||
anisotropy_clamp: if desc.anisotropy_clamp > 1 {
|
||||
Some(desc.anisotropy_clamp)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
_non_exhaustive: unsafe { wgt::NonExhaustive::new() },
|
||||
};
|
||||
gfx_select!(self_id => global.device_create_sampler(self_id, &desc, new_id));
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
|
|
|
@ -1 +1 @@
|
|||
{"files":{"Cargo.toml":"b8f48d969b060df6ec59623515b15e37c368cd3eb3db6c8e7c9a21711a02569b","README.md":"53ad6efa9975b59f9ab830c26c940e18c3e41efd107cd34d670771a8ba53ae1a","shaders/blit.hlsl":"1f8819f3a91acf71a69bfd14ccd245180a92a9efb0ab76bf6b2e89aae10f3952","src/command.rs":"608edf7c3d4dd2ced812bbb44513c0e38ae7274d516645d17296ac8a5a32f1a6","src/conv.rs":"157753744baa9fc74e2d185d0595bb23fbfbff702b8e04caaec4dfd45ec58d34","src/descriptors_cpu.rs":"2d8434fa23b71db6e51f4b6e68a63c4ce46159d74f027da2a309c4c0323956a7","src/device.rs":"800ae8823c20544007d7ed64d6018ba542c6efc93cc0bede7f689d9ef77e11d0","src/internal.rs":"374bf4f7fa58e687a29e3e3a5d78aa8965e39b8a89b49d591b827894f735b386","src/lib.rs":"2c8360f85165c8fe5a847ae4157554d7ebd6f91a34518b43541d88035f9bc419","src/pool.rs":"cc370b53a6843bcdbc507af013650505a60ab8b617747a3cb38c773e4ec1c6d7","src/resource.rs":"043b12a1cebdd87707815c4e6a1d42877ea5cbfc2f7d2e0458bf8e20970a86b4","src/root_constants.rs":"fce7f096f6a06c6796c937be0e1e9ae12c8183177a69be3387b33e33cf1d1b67","src/window.rs":"2dd210b0de1fb0ea1748110a5df35135c5231748a0d865b928ec7471bf0eaf9e"},"package":"37365e2927d55cefac0d3f78dfd1d3119fbb13a8bd7afe2409d729961fee22fc"}
|
||||
{"files":{"Cargo.toml":"841e48557a38e04ee3cc9102743830be1c62c6ba615ca193184362a5624dd2b9","README.md":"53ad6efa9975b59f9ab830c26c940e18c3e41efd107cd34d670771a8ba53ae1a","shaders/blit.hlsl":"1f8819f3a91acf71a69bfd14ccd245180a92a9efb0ab76bf6b2e89aae10f3952","src/command.rs":"1e844d2ce2756283d33895ea4fdbbc1dab7ff1a83de5ea5c9e265a8936ec099d","src/conv.rs":"2134527ccc69cc92deebe84c0695842b96aff97610d69e1661cf7e404973b445","src/descriptors_cpu.rs":"2d8434fa23b71db6e51f4b6e68a63c4ce46159d74f027da2a309c4c0323956a7","src/device.rs":"0fbb5f1802fd5c479365f21b8a67d16c314744f37f3670de9f6a27015990111f","src/internal.rs":"374bf4f7fa58e687a29e3e3a5d78aa8965e39b8a89b49d591b827894f735b386","src/lib.rs":"2e28935099bbd3c8a6a65d1b1eb8dc013f96004723008c42b3738f2389170191","src/pool.rs":"cc370b53a6843bcdbc507af013650505a60ab8b617747a3cb38c773e4ec1c6d7","src/resource.rs":"01b0b06896d9ea79d0f254bfb520ec020e0c31ea5febc5170f29400739ae43b3","src/root_constants.rs":"fce7f096f6a06c6796c937be0e1e9ae12c8183177a69be3387b33e33cf1d1b67","src/window.rs":"42a63638cb1a758ebdc29dfd5e825ae576d406a6e4f4dcbce41142983b934da8"},"package":"cfd506627f3a7003e80f4344123184ce60ed06822c8b8ad2ae4ec674a512ca86"}
|
|
@ -13,7 +13,7 @@
|
|||
[package]
|
||||
edition = "2018"
|
||||
name = "gfx-backend-dx12"
|
||||
version = "0.5.3"
|
||||
version = "0.5.9"
|
||||
authors = ["The Gfx-rs Developers"]
|
||||
description = "DirectX-12 API backend for gfx-rs"
|
||||
homepage = "https://github.com/gfx-rs/gfx"
|
||||
|
@ -36,7 +36,7 @@ package = "gfx-auxil"
|
|||
version = "1"
|
||||
|
||||
[dependencies.hal]
|
||||
version = "0.5"
|
||||
version = "0.5.3"
|
||||
package = "gfx-hal"
|
||||
|
||||
[dependencies.log]
|
||||
|
@ -48,7 +48,7 @@ features = ["libloading"]
|
|||
package = "d3d12"
|
||||
|
||||
[dependencies.range-alloc]
|
||||
version = "0.1"
|
||||
version = "0.1.1"
|
||||
|
||||
[dependencies.raw-window-handle]
|
||||
version = "0.3"
|
||||
|
|
|
@ -342,6 +342,7 @@ pub struct CommandBuffer {
|
|||
raw: native::GraphicsCommandList,
|
||||
allocator: native::CommandAllocator,
|
||||
shared: Arc<Shared>,
|
||||
is_active: bool,
|
||||
|
||||
// Cache renderpasses for graphics operations
|
||||
pass_cache: Option<RenderPassCache>,
|
||||
|
@ -431,6 +432,7 @@ impl CommandBuffer {
|
|||
raw,
|
||||
allocator,
|
||||
shared,
|
||||
is_active: false,
|
||||
pass_cache: None,
|
||||
cur_subpass: !0,
|
||||
gr_pipeline: PipelineCache::new(),
|
||||
|
@ -470,8 +472,22 @@ impl CommandBuffer {
|
|||
}
|
||||
|
||||
fn reset(&mut self) {
|
||||
if self
|
||||
.pool_create_flags
|
||||
.contains(pool::CommandPoolCreateFlags::RESET_INDIVIDUAL)
|
||||
{
|
||||
// Command buffer has reset semantics now and doesn't require to be in `Initial` state.
|
||||
if self.is_active {
|
||||
self.raw.close();
|
||||
}
|
||||
unsafe {
|
||||
self.allocator.Reset()
|
||||
};
|
||||
}
|
||||
self.raw
|
||||
.reset(self.allocator, native::PipelineState::null());
|
||||
self.is_active = true;
|
||||
|
||||
self.pass_cache = None;
|
||||
self.cur_subpass = !0;
|
||||
self.gr_pipeline = PipelineCache::new();
|
||||
|
@ -1162,29 +1178,15 @@ impl com::CommandBuffer<Backend> for CommandBuffer {
|
|||
_info: com::CommandBufferInheritanceInfo<Backend>,
|
||||
) {
|
||||
// TODO: Implement flags and secondary command buffers (bundles).
|
||||
if self
|
||||
.pool_create_flags
|
||||
.contains(pool::CommandPoolCreateFlags::RESET_INDIVIDUAL)
|
||||
{
|
||||
// Command buffer has reset semantics now and doesn't require to be in `Initial` state.
|
||||
self.allocator.Reset();
|
||||
}
|
||||
self.reset();
|
||||
}
|
||||
|
||||
unsafe fn finish(&mut self) {
|
||||
self.raw.Close();
|
||||
self.raw.close();
|
||||
self.is_active = false;
|
||||
}
|
||||
|
||||
unsafe fn reset(&mut self, _release_resources: bool) {
|
||||
// Ensure that we have a bijective relation between list and allocator.
|
||||
// This allows to modify the allocator here. Using `reset` requires this by specification.
|
||||
assert!(self
|
||||
.pool_create_flags
|
||||
.contains(pool::CommandPoolCreateFlags::RESET_INDIVIDUAL));
|
||||
|
||||
// TODO: `release_resources` should recreate the allocator to give back all memory.
|
||||
self.allocator.Reset();
|
||||
self.reset();
|
||||
}
|
||||
|
||||
|
@ -1510,7 +1512,7 @@ impl com::CommandBuffer<Backend> for CommandBuffer {
|
|||
},
|
||||
};
|
||||
let rtv = rtv_pool.alloc_handle();
|
||||
Device::view_image_as_render_target_impl(device, rtv, view_info).unwrap();
|
||||
Device::view_image_as_render_target_impl(device, rtv, &view_info).unwrap();
|
||||
self.clear_render_target_view(rtv, value.into(), &rect);
|
||||
}
|
||||
|
||||
|
@ -1555,7 +1557,7 @@ impl com::CommandBuffer<Backend> for CommandBuffer {
|
|||
},
|
||||
};
|
||||
let dsv = dsv_pool.alloc_handle();
|
||||
Device::view_image_as_depth_stencil_impl(device, dsv, view_info).unwrap();
|
||||
Device::view_image_as_depth_stencil_impl(device, dsv, &view_info).unwrap();
|
||||
self.clear_depth_stencil_view(dsv, depth, stencil, &rect);
|
||||
}
|
||||
|
||||
|
@ -2477,7 +2479,9 @@ impl com::CommandBuffer<Backend> for CommandBuffer {
|
|||
draw_count: DrawCount,
|
||||
stride: u32,
|
||||
) {
|
||||
assert_eq!(stride, 16);
|
||||
if stride != 0 {
|
||||
assert_eq!(stride, 16);
|
||||
}
|
||||
let buffer = buffer.expect_bound();
|
||||
self.set_graphics_bind_point();
|
||||
self.raw.ExecuteIndirect(
|
||||
|
@ -2497,7 +2501,9 @@ impl com::CommandBuffer<Backend> for CommandBuffer {
|
|||
draw_count: DrawCount,
|
||||
stride: u32,
|
||||
) {
|
||||
assert_eq!(stride, 20);
|
||||
if stride != 0 {
|
||||
assert_eq!(stride, 20);
|
||||
}
|
||||
let buffer = buffer.expect_bound();
|
||||
self.set_graphics_bind_point();
|
||||
self.raw.ExecuteIndirect(
|
||||
|
@ -2510,6 +2516,56 @@ impl com::CommandBuffer<Backend> for CommandBuffer {
|
|||
);
|
||||
}
|
||||
|
||||
unsafe fn draw_indirect_count(
|
||||
&mut self,
|
||||
buffer: &r::Buffer,
|
||||
offset: buffer::Offset,
|
||||
count_buffer: &r::Buffer,
|
||||
count_buffer_offset: buffer::Offset,
|
||||
max_draw_count: DrawCount,
|
||||
stride: u32
|
||||
) {
|
||||
if stride != 0 {
|
||||
assert_eq!(stride, 16);
|
||||
}
|
||||
let buffer = buffer.expect_bound();
|
||||
let count_buffer = count_buffer.expect_bound();
|
||||
self.set_graphics_bind_point();
|
||||
self.raw.ExecuteIndirect(
|
||||
self.shared.signatures.draw.as_mut_ptr(),
|
||||
max_draw_count,
|
||||
buffer.resource.as_mut_ptr(),
|
||||
offset,
|
||||
count_buffer.resource.as_mut_ptr(),
|
||||
count_buffer_offset,
|
||||
);
|
||||
}
|
||||
|
||||
unsafe fn draw_indexed_indirect_count(
|
||||
&mut self,
|
||||
buffer: &r::Buffer,
|
||||
offset: buffer::Offset,
|
||||
count_buffer: &r::Buffer,
|
||||
count_buffer_offset: buffer::Offset,
|
||||
max_draw_count: DrawCount,
|
||||
stride: u32
|
||||
) {
|
||||
if stride != 0 {
|
||||
assert_eq!(stride, 20);
|
||||
}
|
||||
let buffer = buffer.expect_bound();
|
||||
let count_buffer = count_buffer.expect_bound();
|
||||
self.set_graphics_bind_point();
|
||||
self.raw.ExecuteIndirect(
|
||||
self.shared.signatures.draw_indexed.as_mut_ptr(),
|
||||
max_draw_count,
|
||||
buffer.resource.as_mut_ptr(),
|
||||
offset,
|
||||
count_buffer.resource.as_mut_ptr(),
|
||||
count_buffer_offset,
|
||||
);
|
||||
}
|
||||
|
||||
unsafe fn set_event(&mut self, _: &(), _: pso::PipelineStage) {
|
||||
unimplemented!()
|
||||
}
|
||||
|
|
|
@ -21,11 +21,15 @@ use hal::{
|
|||
|
||||
use native::ShaderVisibility;
|
||||
|
||||
fn is_little_endinan() -> bool {
|
||||
unsafe { 1 == *(&1u32 as *const _ as *const u8) }
|
||||
}
|
||||
|
||||
pub fn map_format(format: Format) -> Option<DXGI_FORMAT> {
|
||||
use hal::format::Format::*;
|
||||
|
||||
// Handling packed formats according to the platform endianness.
|
||||
let reverse = unsafe { 1 == *(&1u32 as *const _ as *const u8) };
|
||||
let reverse = is_little_endinan();
|
||||
let format = match format {
|
||||
Bgra4Unorm if !reverse => DXGI_FORMAT_B4G4R4A4_UNORM,
|
||||
R5g6b5Unorm if reverse => DXGI_FORMAT_B5G6R5_UNORM,
|
||||
|
@ -109,6 +113,34 @@ pub fn map_format(format: Format) -> Option<DXGI_FORMAT> {
|
|||
Some(format)
|
||||
}
|
||||
|
||||
pub fn map_format_shader_depth(surface: SurfaceType) -> Option<DXGI_FORMAT> {
|
||||
match surface {
|
||||
SurfaceType::D16 => Some(DXGI_FORMAT_R16_UNORM),
|
||||
SurfaceType::X8D24 | SurfaceType::D24_S8 => Some(DXGI_FORMAT_R24_UNORM_X8_TYPELESS),
|
||||
SurfaceType::D32 => Some(DXGI_FORMAT_R32_FLOAT),
|
||||
SurfaceType::D32_S8 => Some(DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS),
|
||||
_ => return None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn map_format_shader_stencil(surface: SurfaceType) -> Option<DXGI_FORMAT> {
|
||||
match surface {
|
||||
SurfaceType::D24_S8 => Some(DXGI_FORMAT_X24_TYPELESS_G8_UINT),
|
||||
SurfaceType::D32_S8 => Some(DXGI_FORMAT_X32_TYPELESS_G8X24_UINT),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn map_format_dsv(surface: SurfaceType) -> Option<DXGI_FORMAT> {
|
||||
match surface {
|
||||
SurfaceType::D16 => Some(DXGI_FORMAT_D16_UNORM),
|
||||
SurfaceType::X8D24 | SurfaceType::D24_S8 => Some(DXGI_FORMAT_D24_UNORM_S8_UINT),
|
||||
SurfaceType::D32 => Some(DXGI_FORMAT_D32_FLOAT),
|
||||
SurfaceType::D32_S8 => Some(DXGI_FORMAT_D32_FLOAT_S8X24_UINT),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn map_format_nosrgb(format: Format) -> Option<DXGI_FORMAT> {
|
||||
// NOTE: DXGI doesn't allow sRGB format on the swapchain, but
|
||||
// creating RTV of swapchain buffers with sRGB works
|
||||
|
@ -141,6 +173,23 @@ pub fn map_swizzle(swizzle: Swizzle) -> UINT {
|
|||
)
|
||||
}
|
||||
|
||||
pub fn swizzle_rg(swizzle: Swizzle) -> Swizzle {
|
||||
use hal::format::Component as C;
|
||||
fn map_component(c: C) -> C {
|
||||
match c {
|
||||
C::R => C::G,
|
||||
C::G => C::R,
|
||||
x => x,
|
||||
}
|
||||
}
|
||||
Swizzle(
|
||||
map_component(swizzle.0),
|
||||
map_component(swizzle.1),
|
||||
map_component(swizzle.2),
|
||||
map_component(swizzle.3),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn map_surface_type(st: SurfaceType) -> Option<DXGI_FORMAT> {
|
||||
use hal::format::SurfaceType::*;
|
||||
|
||||
|
@ -164,20 +213,10 @@ pub fn map_surface_type(st: SurfaceType) -> Option<DXGI_FORMAT> {
|
|||
B10_G11_R11 => DXGI_FORMAT_R11G11B10_FLOAT,
|
||||
E5_B9_G9_R9 => DXGI_FORMAT_R9G9B9E5_SHAREDEXP,
|
||||
D16 => DXGI_FORMAT_R16_TYPELESS,
|
||||
X8D24 => DXGI_FORMAT_D24_UNORM_S8_UINT,
|
||||
X8D24 => DXGI_FORMAT_R24G8_TYPELESS,
|
||||
D32 => DXGI_FORMAT_R32_TYPELESS,
|
||||
D24_S8 => DXGI_FORMAT_D24_UNORM_S8_UINT,
|
||||
D32_S8 => DXGI_FORMAT_D32_FLOAT_S8X24_UINT,
|
||||
_ => return None,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn map_format_dsv(surface: SurfaceType) -> Option<DXGI_FORMAT> {
|
||||
Some(match surface {
|
||||
SurfaceType::D16 => DXGI_FORMAT_D16_UNORM,
|
||||
SurfaceType::X8D24 | SurfaceType::D24_S8 => DXGI_FORMAT_D24_UNORM_S8_UINT,
|
||||
SurfaceType::D32 => DXGI_FORMAT_D32_FLOAT,
|
||||
SurfaceType::D32_S8 => DXGI_FORMAT_D32_FLOAT_S8X24_UINT,
|
||||
D24_S8 => DXGI_FORMAT_R24G8_TYPELESS,
|
||||
D32_S8 => DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS,
|
||||
_ => return None,
|
||||
})
|
||||
}
|
||||
|
|
|
@ -117,30 +117,34 @@ pub(crate) enum CommandSignature {
|
|||
pub(crate) fn compile_shader(
|
||||
stage: pso::Stage,
|
||||
shader_model: hlsl::ShaderModel,
|
||||
features: &hal::Features,
|
||||
entry: &str,
|
||||
code: &[u8],
|
||||
) -> Result<native::Blob, d::ShaderError> {
|
||||
let stage_to_str = |stage, shader_model| {
|
||||
let stage = match stage {
|
||||
pso::Stage::Vertex => "vs",
|
||||
pso::Stage::Fragment => "ps",
|
||||
pso::Stage::Compute => "cs",
|
||||
_ => unimplemented!(),
|
||||
};
|
||||
|
||||
let model = match shader_model {
|
||||
hlsl::ShaderModel::V5_0 => "5_0",
|
||||
hlsl::ShaderModel::V5_1 => "5_1",
|
||||
hlsl::ShaderModel::V6_0 => "6_0",
|
||||
_ => unimplemented!(),
|
||||
};
|
||||
|
||||
format!("{}_{}\0", stage, model)
|
||||
let stage_str = match stage {
|
||||
pso::Stage::Vertex => "vs",
|
||||
pso::Stage::Fragment => "ps",
|
||||
pso::Stage::Compute => "cs",
|
||||
_ => unimplemented!(),
|
||||
};
|
||||
let model_str = match shader_model {
|
||||
hlsl::ShaderModel::V5_0 => "5_0",
|
||||
hlsl::ShaderModel::V5_1 => "5_1",
|
||||
hlsl::ShaderModel::V6_0 => "6_0",
|
||||
_ => unimplemented!(),
|
||||
};
|
||||
let full_stage = format!("{}_{}\0", stage_str, model_str);
|
||||
|
||||
let mut shader_data = native::Blob::null();
|
||||
let mut error = native::Blob::null();
|
||||
let entry = ffi::CString::new(entry).unwrap();
|
||||
let mut compile_flags = d3dcompiler::D3DCOMPILE_ENABLE_STRICTNESS;
|
||||
if cfg!(debug_assertions) {
|
||||
compile_flags |= d3dcompiler::D3DCOMPILE_DEBUG;
|
||||
}
|
||||
if features.contains(hal::Features::UNSIZED_DESCRIPTOR_ARRAY) {
|
||||
compile_flags |= d3dcompiler::D3DCOMPILE_ENABLE_UNBOUNDED_DESCRIPTOR_TABLES;
|
||||
}
|
||||
let hr = unsafe {
|
||||
d3dcompiler::D3DCompile(
|
||||
code.as_ptr() as *const _,
|
||||
|
@ -149,8 +153,8 @@ pub(crate) fn compile_shader(
|
|||
ptr::null(),
|
||||
ptr::null_mut(),
|
||||
entry.as_ptr() as *const _,
|
||||
stage_to_str(stage, shader_model).as_ptr() as *const i8,
|
||||
1,
|
||||
full_stage.as_ptr() as *const i8,
|
||||
compile_flags,
|
||||
0,
|
||||
shader_data.mut_void() as *mut *mut _,
|
||||
error.mut_void() as *mut *mut _,
|
||||
|
@ -491,6 +495,7 @@ impl Device {
|
|||
let shader = compile_shader(
|
||||
stage,
|
||||
shader_model,
|
||||
features,
|
||||
&entry_point.name,
|
||||
shader_code.as_bytes(),
|
||||
)?;
|
||||
|
@ -509,7 +514,7 @@ impl Device {
|
|||
code: &[u8],
|
||||
) -> Result<r::ShaderModule, d::ShaderError> {
|
||||
let mut shader_map = BTreeMap::new();
|
||||
let blob = compile_shader(stage, hlsl::ShaderModel::V5_1, hlsl_entry, code)?;
|
||||
let blob = compile_shader(stage, hlsl::ShaderModel::V5_1, &self.features, hlsl_entry, code)?;
|
||||
shader_map.insert(entry_point.into(), blob);
|
||||
Ok(r::ShaderModule::Compiled(shader_map))
|
||||
}
|
||||
|
@ -574,7 +579,7 @@ impl Device {
|
|||
pub(crate) fn view_image_as_render_target_impl(
|
||||
device: native::Device,
|
||||
handle: d3d12::D3D12_CPU_DESCRIPTOR_HANDLE,
|
||||
info: ViewInfo,
|
||||
info: &ViewInfo,
|
||||
) -> Result<(), image::ViewCreationError> {
|
||||
#![allow(non_snake_case)]
|
||||
|
||||
|
@ -593,7 +598,7 @@ impl Device {
|
|||
}
|
||||
if info.range.layers.end > info.kind.num_layers() {
|
||||
return Err(image::ViewCreationError::Layer(
|
||||
image::LayerError::OutOfBounds(info.range.layers),
|
||||
image::LayerError::OutOfBounds(info.range.layers.clone()),
|
||||
));
|
||||
}
|
||||
|
||||
|
@ -672,7 +677,7 @@ impl Device {
|
|||
|
||||
fn view_image_as_render_target(
|
||||
&self,
|
||||
info: ViewInfo,
|
||||
info: &ViewInfo,
|
||||
) -> Result<d3d12::D3D12_CPU_DESCRIPTOR_HANDLE, image::ViewCreationError> {
|
||||
let handle = self.rtv_pool.lock().unwrap().alloc_handle();
|
||||
Self::view_image_as_render_target_impl(self.raw, handle, info).map(|_| handle)
|
||||
|
@ -681,7 +686,7 @@ impl Device {
|
|||
pub(crate) fn view_image_as_depth_stencil_impl(
|
||||
device: native::Device,
|
||||
handle: d3d12::D3D12_CPU_DESCRIPTOR_HANDLE,
|
||||
info: ViewInfo,
|
||||
info: &ViewInfo,
|
||||
) -> Result<(), image::ViewCreationError> {
|
||||
#![allow(non_snake_case)]
|
||||
|
||||
|
@ -701,7 +706,7 @@ impl Device {
|
|||
}
|
||||
if info.range.layers.end > info.kind.num_layers() {
|
||||
return Err(image::ViewCreationError::Layer(
|
||||
image::LayerError::OutOfBounds(info.range.layers),
|
||||
image::LayerError::OutOfBounds(info.range.layers.clone()),
|
||||
));
|
||||
}
|
||||
|
||||
|
@ -760,7 +765,7 @@ impl Device {
|
|||
|
||||
fn view_image_as_depth_stencil(
|
||||
&self,
|
||||
info: ViewInfo,
|
||||
info: &ViewInfo,
|
||||
) -> Result<d3d12::D3D12_CPU_DESCRIPTOR_HANDLE, image::ViewCreationError> {
|
||||
let handle = self.dsv_pool.lock().unwrap().alloc_handle();
|
||||
Self::view_image_as_depth_stencil_impl(self.raw, handle, info).map(|_| handle)
|
||||
|
@ -888,17 +893,8 @@ impl Device {
|
|||
|
||||
fn view_image_as_shader_resource(
|
||||
&self,
|
||||
mut info: ViewInfo,
|
||||
info: &ViewInfo,
|
||||
) -> Result<d3d12::D3D12_CPU_DESCRIPTOR_HANDLE, image::ViewCreationError> {
|
||||
#![allow(non_snake_case)]
|
||||
|
||||
// Depth-stencil formats can't be used for SRVs.
|
||||
info.format = match info.format {
|
||||
dxgiformat::DXGI_FORMAT_D16_UNORM => dxgiformat::DXGI_FORMAT_R16_UNORM,
|
||||
dxgiformat::DXGI_FORMAT_D32_FLOAT => dxgiformat::DXGI_FORMAT_R32_FLOAT,
|
||||
format => format,
|
||||
};
|
||||
|
||||
let desc = Self::build_image_as_shader_resource_desc(&info)?;
|
||||
let handle = self.srv_uav_pool.lock().unwrap().alloc_handle();
|
||||
unsafe {
|
||||
|
@ -911,7 +907,7 @@ impl Device {
|
|||
|
||||
fn view_image_as_storage(
|
||||
&self,
|
||||
info: ViewInfo,
|
||||
info: &ViewInfo,
|
||||
) -> Result<d3d12::D3D12_CPU_DESCRIPTOR_HANDLE, image::ViewCreationError> {
|
||||
#![allow(non_snake_case)]
|
||||
assert_eq!(info.range.levels.start + 1, info.range.levels.end);
|
||||
|
@ -928,7 +924,7 @@ impl Device {
|
|||
|
||||
if info.range.layers.end > info.kind.num_layers() {
|
||||
return Err(image::ViewCreationError::Layer(
|
||||
image::LayerError::OutOfBounds(info.range.layers),
|
||||
image::LayerError::OutOfBounds(info.range.layers.clone()),
|
||||
));
|
||||
}
|
||||
if info.kind.num_samples() > 1 {
|
||||
|
@ -2050,6 +2046,7 @@ impl d::Device<B> for Device {
|
|||
baked_states,
|
||||
})
|
||||
} else {
|
||||
error!("Failed to build shader: {:x}", hr);
|
||||
Err(pso::CreationError::Other)
|
||||
}
|
||||
}
|
||||
|
@ -2552,7 +2549,7 @@ impl d::Device<B> for Device {
|
|||
let format = image_unbound.view_format.unwrap();
|
||||
(0 .. num_layers)
|
||||
.map(|layer| {
|
||||
self.view_image_as_render_target(ViewInfo {
|
||||
self.view_image_as_render_target(&ViewInfo {
|
||||
format,
|
||||
range: image::SubresourceRange {
|
||||
aspects: Aspects::COLOR,
|
||||
|
@ -2571,7 +2568,7 @@ impl d::Device<B> for Device {
|
|||
let format = image_unbound.dsv_format.unwrap();
|
||||
(0 .. num_layers)
|
||||
.map(|layer| {
|
||||
self.view_image_as_depth_stencil(ViewInfo {
|
||||
self.view_image_as_depth_stencil(&ViewInfo {
|
||||
format,
|
||||
range: image::SubresourceRange {
|
||||
aspects: Aspects::DEPTH,
|
||||
|
@ -2590,7 +2587,7 @@ impl d::Device<B> for Device {
|
|||
let format = image_unbound.dsv_format.unwrap();
|
||||
(0 .. num_layers)
|
||||
.map(|layer| {
|
||||
self.view_image_as_depth_stencil(ViewInfo {
|
||||
self.view_image_as_depth_stencil(&ViewInfo {
|
||||
format,
|
||||
range: image::SubresourceRange {
|
||||
aspects: Aspects::STENCIL,
|
||||
|
@ -2623,6 +2620,7 @@ impl d::Device<B> for Device {
|
|||
let is_array = image.kind.num_layers() > 1;
|
||||
let mip_levels = (range.levels.start, range.levels.end);
|
||||
let layers = (range.layers.start, range.layers.end);
|
||||
let surface_format = format.base_format().0;
|
||||
|
||||
let info = ViewInfo {
|
||||
resource: image.resource,
|
||||
|
@ -2650,24 +2648,47 @@ impl d::Device<B> for Device {
|
|||
.usage
|
||||
.intersects(image::Usage::SAMPLED | image::Usage::INPUT_ATTACHMENT)
|
||||
{
|
||||
self.view_image_as_shader_resource(info.clone()).ok()
|
||||
let info = if info.range.aspects.contains(format::Aspects::DEPTH) {
|
||||
conv::map_format_shader_depth(surface_format)
|
||||
.map(|format| ViewInfo {
|
||||
format,
|
||||
.. info.clone()
|
||||
})
|
||||
} else if info.range.aspects.contains(format::Aspects::STENCIL) {
|
||||
// Vulkan/gfx expects stencil to be read from the R channel,
|
||||
// while DX12 exposes it in "G" always.
|
||||
let new_swizzle = conv::swizzle_rg(swizzle);
|
||||
conv::map_format_shader_stencil(surface_format)
|
||||
.map(|format| ViewInfo {
|
||||
format,
|
||||
component_mapping: conv::map_swizzle(new_swizzle),
|
||||
.. info.clone()
|
||||
})
|
||||
} else {
|
||||
Some(info.clone())
|
||||
};
|
||||
if let Some(ref info) = info {
|
||||
self.view_image_as_shader_resource(&info).ok()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
},
|
||||
handle_rtv: if image.usage.contains(image::Usage::COLOR_ATTACHMENT) {
|
||||
self.view_image_as_render_target(info.clone()).ok()
|
||||
self.view_image_as_render_target(&info).ok()
|
||||
} else {
|
||||
None
|
||||
},
|
||||
handle_uav: if image.usage.contains(image::Usage::STORAGE) {
|
||||
self.view_image_as_storage(info.clone()).ok()
|
||||
self.view_image_as_storage(&info).ok()
|
||||
} else {
|
||||
None
|
||||
},
|
||||
handle_dsv: if image.usage.contains(image::Usage::DEPTH_STENCIL_ATTACHMENT) {
|
||||
match conv::map_format_dsv(format.base_format().0) {
|
||||
match conv::map_format_dsv(surface_format) {
|
||||
Some(dsv_format) => self
|
||||
.view_image_as_depth_stencil(ViewInfo {
|
||||
.view_image_as_depth_stencil(&ViewInfo {
|
||||
format: dsv_format,
|
||||
..info
|
||||
})
|
||||
|
@ -3432,9 +3453,23 @@ impl d::Device<B> for Device {
|
|||
// Just drop
|
||||
}
|
||||
|
||||
unsafe fn destroy_descriptor_pool(&self, _pool: r::DescriptorPool) {
|
||||
// Just drop
|
||||
// Allocated descriptor sets don't need to be freed beforehand.
|
||||
unsafe fn destroy_descriptor_pool(&self, pool: r::DescriptorPool) {
|
||||
let view_range = pool.heap_srv_cbv_uav.range_allocator.initial_range();
|
||||
if view_range.start < view_range.end {
|
||||
self.heap_srv_cbv_uav
|
||||
.lock()
|
||||
.unwrap()
|
||||
.range_allocator
|
||||
.free_range(view_range.clone());
|
||||
}
|
||||
let sampler_range = pool.heap_sampler.range_allocator.initial_range();
|
||||
if sampler_range.start < sampler_range.end {
|
||||
self.heap_sampler
|
||||
.lock()
|
||||
.unwrap()
|
||||
.range_allocator
|
||||
.free_range(sampler_range.clone());
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn destroy_descriptor_set_layout(&self, _layout: r::DescriptorSetLayout) {
|
||||
|
|
|
@ -37,6 +37,7 @@ use std::{
|
|||
fmt,
|
||||
mem,
|
||||
os::windows::ffi::OsStringExt,
|
||||
//TODO: use parking_lot
|
||||
sync::{Arc, Mutex},
|
||||
};
|
||||
|
||||
|
@ -869,12 +870,24 @@ impl hal::Instance<Backend> for Instance {
|
|||
name.to_string_lossy().into_owned()
|
||||
};
|
||||
|
||||
let mut features_architecture: d3d12::D3D12_FEATURE_DATA_ARCHITECTURE =
|
||||
unsafe { mem::zeroed() };
|
||||
assert_eq!(winerror::S_OK, unsafe {
|
||||
device.CheckFeatureSupport(
|
||||
d3d12::D3D12_FEATURE_ARCHITECTURE,
|
||||
&mut features_architecture as *mut _ as *mut _,
|
||||
mem::size_of::<d3d12::D3D12_FEATURE_DATA_ARCHITECTURE>() as _,
|
||||
)
|
||||
});
|
||||
|
||||
let info = adapter::AdapterInfo {
|
||||
name: device_name,
|
||||
vendor: desc.VendorId as usize,
|
||||
device: desc.DeviceId as usize,
|
||||
device_type: if (desc.Flags & dxgi::DXGI_ADAPTER_FLAG_SOFTWARE) != 0 {
|
||||
adapter::DeviceType::VirtualGpu
|
||||
} else if features_architecture.CacheCoherentUMA == TRUE {
|
||||
adapter::DeviceType::IntegratedGpu
|
||||
} else {
|
||||
adapter::DeviceType::DiscreteGpu
|
||||
},
|
||||
|
@ -889,16 +902,6 @@ impl hal::Instance<Backend> for Instance {
|
|||
)
|
||||
});
|
||||
|
||||
let mut features_architecture: d3d12::D3D12_FEATURE_DATA_ARCHITECTURE =
|
||||
unsafe { mem::zeroed() };
|
||||
assert_eq!(winerror::S_OK, unsafe {
|
||||
device.CheckFeatureSupport(
|
||||
d3d12::D3D12_FEATURE_ARCHITECTURE,
|
||||
&mut features_architecture as *mut _ as *mut _,
|
||||
mem::size_of::<d3d12::D3D12_FEATURE_DATA_ARCHITECTURE>() as _,
|
||||
)
|
||||
});
|
||||
|
||||
let depth_bounds_test_supported = {
|
||||
let mut features2: d3d12::D3D12_FEATURE_DATA_D3D12_OPTIONS2 =
|
||||
unsafe { mem::zeroed() };
|
||||
|
@ -1088,8 +1091,13 @@ impl hal::Instance<Backend> for Instance {
|
|||
Features::INSTANCE_RATE |
|
||||
Features::SAMPLER_MIP_LOD_BIAS |
|
||||
Features::SAMPLER_ANISOTROPY |
|
||||
Features::TEXTURE_DESCRIPTOR_ARRAY |
|
||||
Features::SAMPLER_MIRROR_CLAMP_EDGE |
|
||||
Features::NDC_Y_UP,
|
||||
Features::NDC_Y_UP |
|
||||
Features::SAMPLED_TEXTURE_DESCRIPTOR_INDEXING |
|
||||
Features::STORAGE_TEXTURE_DESCRIPTOR_INDEXING |
|
||||
Features::UNSIZED_DESCRIPTOR_ARRAY |
|
||||
Features::DRAW_INDIRECT_COUNT,
|
||||
hints:
|
||||
Hints::BASE_VERTEX_INSTANCE_DRAWING,
|
||||
limits: Limits { // TODO
|
||||
|
|
|
@ -643,13 +643,15 @@ impl DescriptorHeapSlice {
|
|||
})
|
||||
}
|
||||
|
||||
/// Free handles previously given out by this `DescriptorHeapSlice`. Do not use this with handles not given out by this `DescriptorHeapSlice`.
|
||||
/// Free handles previously given out by this `DescriptorHeapSlice`.
|
||||
/// Do not use this with handles not given out by this `DescriptorHeapSlice`.
|
||||
pub(crate) fn free_handles(&mut self, handle: DualHandle) {
|
||||
let start = (handle.gpu.ptr - self.start.gpu.ptr) / self.handle_size;
|
||||
let handle_range = start .. start + handle.size as u64;
|
||||
self.range_allocator.free_range(handle_range);
|
||||
}
|
||||
|
||||
/// Clear the allocator.
|
||||
pub(crate) fn clear(&mut self) {
|
||||
self.range_allocator.reset();
|
||||
}
|
||||
|
|
|
@ -159,7 +159,10 @@ impl w::PresentationSurface<Backend> for Surface {
|
|||
}
|
||||
|
||||
unsafe fn unconfigure_swapchain(&mut self, device: &Device) {
|
||||
if let Some(present) = self.presentation.take() {
|
||||
if let Some(mut present) = self.presentation.take() {
|
||||
let _ = present.swapchain.wait(winbase::INFINITE);
|
||||
let _ = device.wait_idle(); //TODO: this shouldn't be needed,
|
||||
// but it complains that the queue is still used otherwise
|
||||
device.destroy_swapchain(present.swapchain);
|
||||
}
|
||||
}
|
||||
|
@ -171,16 +174,7 @@ impl w::PresentationSurface<Backend> for Surface {
|
|||
let present = self.presentation.as_mut().unwrap();
|
||||
let sc = &mut present.swapchain;
|
||||
|
||||
match synchapi::WaitForSingleObject(
|
||||
sc.waitable,
|
||||
(timeout_ns / 1_000_000) as u32,
|
||||
) {
|
||||
winbase::WAIT_ABANDONED |
|
||||
winbase::WAIT_FAILED => return Err(w::AcquireError::DeviceLost(hal::device::DeviceLost)),
|
||||
winbase::WAIT_OBJECT_0 => (),
|
||||
winerror::WAIT_TIMEOUT => return Err(w::AcquireError::Timeout),
|
||||
hr => panic!("Unexpected wait status 0x{:X}", hr),
|
||||
}
|
||||
sc.wait((timeout_ns / 1_000_000) as u32)?;
|
||||
|
||||
let index = sc.inner.GetCurrentBackBufferIndex();
|
||||
let view = r::ImageView {
|
||||
|
@ -220,6 +214,18 @@ impl Swapchain {
|
|||
self.rtv_heap.destroy();
|
||||
self.inner
|
||||
}
|
||||
|
||||
pub(crate) fn wait(&mut self, timeout_ms: u32) -> Result<(), w::AcquireError> {
|
||||
match unsafe {
|
||||
synchapi::WaitForSingleObject(self.waitable, timeout_ms)
|
||||
} {
|
||||
winbase::WAIT_ABANDONED |
|
||||
winbase::WAIT_FAILED => Err(w::AcquireError::DeviceLost(hal::device::DeviceLost)),
|
||||
winbase::WAIT_OBJECT_0 => Ok(()),
|
||||
winerror::WAIT_TIMEOUT => Err(w::AcquireError::Timeout),
|
||||
hr => panic!("Unexpected wait status 0x{:X}", hr),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl w::Swapchain<Backend> for Swapchain {
|
||||
|
|
|
@ -1 +1 @@
|
|||
{"files":{"Cargo.toml":"787aa9611486551e3d92cb2dabbe367f9048ceba1fbd6525006853315338b0c5","src/lib.rs":"f3997ef41c4079ee6bfa55ed08cdfa11230ddc432c2ea26db750ac5eeb29fecf"},"package":"dd5927936723a9e8b715d37d7e4b390455087c4bdf25b9f702309460577b14f9"}
|
||||
{"files":{"Cargo.toml":"016db0c76a026079e34eaf12145e616af8b63ae28a8d56f055253520bffbc421","src/lib.rs":"18a5e1a1e28e4f0986b7f60e8ab7349b41aabbf4b15bdcbd163059cedb5cea2e"},"package":"a871f1e45a3a3f0c73fb60343c811238bb5143a81642e27c2ac7aac27ff01a63"}
|
|
@ -3,7 +3,7 @@
|
|||
# When uploading crates to the registry Cargo will automatically
|
||||
# "normalize" Cargo.toml files for maximal compatibility
|
||||
# with all versions of Cargo and also rewrite `path` dependencies
|
||||
# to registry (e.g. crates.io) dependencies
|
||||
# to registry (e.g., crates.io) dependencies
|
||||
#
|
||||
# If you believe there's an error in this file please file an
|
||||
# issue against the rust-lang/cargo repository. If you're
|
||||
|
@ -11,8 +11,9 @@
|
|||
# will likely look very different (and much more reasonable)
|
||||
|
||||
[package]
|
||||
edition = "2018"
|
||||
name = "range-alloc"
|
||||
version = "0.1.0"
|
||||
version = "0.1.1"
|
||||
authors = ["The Gfx-rs Developers"]
|
||||
description = "Generic range allocator used by gfx-rs backends"
|
||||
homepage = "https://github.com/gfx-rs/gfx"
|
||||
|
|
|
@ -2,7 +2,6 @@ use std::fmt::Debug;
|
|||
use std::iter::Sum;
|
||||
use std::ops::{Add, AddAssign, Range, Sub};
|
||||
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct RangeAllocator<T> {
|
||||
/// The range this allocator covers.
|
||||
|
@ -29,6 +28,10 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
pub fn initial_range(&self) -> &Range<T> {
|
||||
&self.initial_range
|
||||
}
|
||||
|
||||
pub fn allocate_range(&mut self, length: T) -> Result<Range<T>, RangeAllocationError<T>> {
|
||||
assert_ne!(length + length, length);
|
||||
let mut best_fit: Option<(usize, Range<T>)> = None;
|
||||
|
@ -52,9 +55,7 @@ where
|
|||
(best_index, best_range.clone())
|
||||
}
|
||||
}
|
||||
None => {
|
||||
(index, range)
|
||||
}
|
||||
None => (index, range),
|
||||
});
|
||||
}
|
||||
match best_fit {
|
||||
|
@ -64,11 +65,11 @@ where
|
|||
} else {
|
||||
self.free_ranges[index].start += length;
|
||||
}
|
||||
Ok(range.start..(range.start + length))
|
||||
Ok(range.start .. (range.start + length))
|
||||
}
|
||||
None => Err(RangeAllocationError {
|
||||
fragmented_free_length,
|
||||
})
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -77,7 +78,9 @@ where
|
|||
assert!(range.start < range.end);
|
||||
|
||||
// Get insertion position.
|
||||
let i = self.free_ranges.iter()
|
||||
let i = self
|
||||
.free_ranges
|
||||
.iter()
|
||||
.position(|r| r.start > range.start)
|
||||
.unwrap_or(self.free_ranges.len());
|
||||
|
||||
|
@ -97,22 +100,21 @@ where
|
|||
return;
|
||||
} else if i < self.free_ranges.len() && range.end == self.free_ranges[i].start {
|
||||
// Merge with |right|.
|
||||
self.free_ranges[i].start =
|
||||
if i > 0 && range.start == self.free_ranges[i - 1].end {
|
||||
// Check for possible merge with |left| and |right|.
|
||||
let left = self.free_ranges.remove(i - 1);
|
||||
left.start
|
||||
} else {
|
||||
range.start
|
||||
};
|
||||
self.free_ranges[i].start = if i > 0 && range.start == self.free_ranges[i - 1].end {
|
||||
// Check for possible merge with |left| and |right|.
|
||||
let left = self.free_ranges.remove(i - 1);
|
||||
left.start
|
||||
} else {
|
||||
range.start
|
||||
};
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// Debug checks
|
||||
assert!(
|
||||
(i == 0 || self.free_ranges[i - 1].end < range.start) &&
|
||||
(i >= self.free_ranges.len() || range.end < self.free_ranges[i].start)
|
||||
(i == 0 || self.free_ranges[i - 1].end < range.start)
|
||||
&& (i >= self.free_ranges.len() || range.end < self.free_ranges[i].start)
|
||||
);
|
||||
|
||||
self.free_ranges.insert(i, range);
|
||||
|
@ -121,24 +123,26 @@ where
|
|||
/// Returns an iterator over allocated non-empty ranges
|
||||
pub fn allocated_ranges<'a>(&'a self) -> impl 'a + Iterator<Item = Range<T>> {
|
||||
let first = match self.free_ranges.first() {
|
||||
Some(Range { ref start, .. }) if *start > self.initial_range.start => Some(self.initial_range.start .. *start),
|
||||
Some(Range { ref start, .. }) if *start > self.initial_range.start => {
|
||||
Some(self.initial_range.start .. *start)
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
|
||||
let last = match self.free_ranges.last() {
|
||||
Some(Range { end, .. }) if *end < self.initial_range.end => Some(*end .. self.initial_range.end),
|
||||
Some(Range { end, .. }) if *end < self.initial_range.end => {
|
||||
Some(*end .. self.initial_range.end)
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
|
||||
let mid = self.free_ranges
|
||||
let mid = self
|
||||
.free_ranges
|
||||
.iter()
|
||||
.zip(self.free_ranges.iter().skip(1))
|
||||
.map(|(ra, rb)| ra.end .. rb.start);
|
||||
|
||||
first
|
||||
.into_iter()
|
||||
.chain(mid)
|
||||
.chain(last)
|
||||
first.into_iter().chain(mid).chain(last)
|
||||
}
|
||||
|
||||
pub fn reset(&mut self) {
|
||||
|
@ -160,108 +164,113 @@ impl<T: Copy + Sub<Output = T> + Sum> RangeAllocator<T> {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_basic_allocation() {
|
||||
let mut alloc = RangeAllocator::new(0..10);
|
||||
let mut alloc = RangeAllocator::new(0 .. 10);
|
||||
// Test if an allocation works
|
||||
assert_eq!(alloc.allocate_range(4), Ok(0..4));
|
||||
assert_eq!(alloc.allocate_range(4), Ok(0 .. 4));
|
||||
// Free the prior allocation
|
||||
alloc.free_range(0..4);
|
||||
alloc.free_range(0 .. 4);
|
||||
// Make sure the free actually worked
|
||||
assert_eq!(alloc.free_ranges, vec![0..10]);
|
||||
assert_eq!(alloc.free_ranges, vec![0 .. 10]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_out_of_space() {
|
||||
let mut alloc = RangeAllocator::new(0..10);
|
||||
let mut alloc = RangeAllocator::new(0 .. 10);
|
||||
// Test if the allocator runs out of space correctly
|
||||
assert_eq!(alloc.allocate_range(10), Ok(0..10));
|
||||
assert_eq!(alloc.allocate_range(10), Ok(0 .. 10));
|
||||
assert!(alloc.allocate_range(4).is_err());
|
||||
alloc.free_range(0..10);
|
||||
alloc.free_range(0 .. 10);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dont_use_block_that_is_too_small() {
|
||||
let mut alloc = RangeAllocator::new(0..10);
|
||||
let mut alloc = RangeAllocator::new(0 .. 10);
|
||||
// Allocate three blocks then free the middle one and check for correct state
|
||||
assert_eq!(alloc.allocate_range(3), Ok(0..3));
|
||||
assert_eq!(alloc.allocate_range(3), Ok(3..6));
|
||||
assert_eq!(alloc.allocate_range(3), Ok(6..9));
|
||||
alloc.free_range(3..6);
|
||||
assert_eq!(alloc.free_ranges, vec![3..6, 9..10]);
|
||||
assert_eq!(alloc.allocate_range(3), Ok(0 .. 3));
|
||||
assert_eq!(alloc.allocate_range(3), Ok(3 .. 6));
|
||||
assert_eq!(alloc.allocate_range(3), Ok(6 .. 9));
|
||||
alloc.free_range(3 .. 6);
|
||||
assert_eq!(alloc.free_ranges, vec![3 .. 6, 9 .. 10]);
|
||||
// Now request space that the middle block can fill, but the end one can't.
|
||||
assert_eq!(alloc.allocate_range(3), Ok(3..6));
|
||||
assert_eq!(alloc.allocate_range(3), Ok(3 .. 6));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_free_blocks_in_middle() {
|
||||
let mut alloc = RangeAllocator::new(0..100);
|
||||
let mut alloc = RangeAllocator::new(0 .. 100);
|
||||
// Allocate many blocks then free every other block.
|
||||
assert_eq!(alloc.allocate_range(10), Ok(0..10));
|
||||
assert_eq!(alloc.allocate_range(10), Ok(10..20));
|
||||
assert_eq!(alloc.allocate_range(10), Ok(20..30));
|
||||
assert_eq!(alloc.allocate_range(10), Ok(30..40));
|
||||
assert_eq!(alloc.allocate_range(10), Ok(40..50));
|
||||
assert_eq!(alloc.allocate_range(10), Ok(50..60));
|
||||
assert_eq!(alloc.allocate_range(10), Ok(60..70));
|
||||
assert_eq!(alloc.allocate_range(10), Ok(70..80));
|
||||
assert_eq!(alloc.allocate_range(10), Ok(80..90));
|
||||
assert_eq!(alloc.allocate_range(10), Ok(90..100));
|
||||
assert_eq!(alloc.allocate_range(10), Ok(0 .. 10));
|
||||
assert_eq!(alloc.allocate_range(10), Ok(10 .. 20));
|
||||
assert_eq!(alloc.allocate_range(10), Ok(20 .. 30));
|
||||
assert_eq!(alloc.allocate_range(10), Ok(30 .. 40));
|
||||
assert_eq!(alloc.allocate_range(10), Ok(40 .. 50));
|
||||
assert_eq!(alloc.allocate_range(10), Ok(50 .. 60));
|
||||
assert_eq!(alloc.allocate_range(10), Ok(60 .. 70));
|
||||
assert_eq!(alloc.allocate_range(10), Ok(70 .. 80));
|
||||
assert_eq!(alloc.allocate_range(10), Ok(80 .. 90));
|
||||
assert_eq!(alloc.allocate_range(10), Ok(90 .. 100));
|
||||
assert_eq!(alloc.free_ranges, vec![]);
|
||||
alloc.free_range(10..20);
|
||||
alloc.free_range(30..40);
|
||||
alloc.free_range(50..60);
|
||||
alloc.free_range(70..80);
|
||||
alloc.free_range(90..100);
|
||||
alloc.free_range(10 .. 20);
|
||||
alloc.free_range(30 .. 40);
|
||||
alloc.free_range(50 .. 60);
|
||||
alloc.free_range(70 .. 80);
|
||||
alloc.free_range(90 .. 100);
|
||||
// Check that the right blocks were freed.
|
||||
assert_eq!(alloc.free_ranges, vec![10..20, 30..40, 50..60, 70..80, 90..100]);
|
||||
assert_eq!(
|
||||
alloc.free_ranges,
|
||||
vec![10 .. 20, 30 .. 40, 50 .. 60, 70 .. 80, 90 .. 100]
|
||||
);
|
||||
// Fragment the memory on purpose a bit.
|
||||
assert_eq!(alloc.allocate_range(6), Ok(10..16));
|
||||
assert_eq!(alloc.allocate_range(6), Ok(30..36));
|
||||
assert_eq!(alloc.allocate_range(6), Ok(50..56));
|
||||
assert_eq!(alloc.allocate_range(6), Ok(70..76));
|
||||
assert_eq!(alloc.allocate_range(6), Ok(90..96));
|
||||
assert_eq!(alloc.allocate_range(6), Ok(10 .. 16));
|
||||
assert_eq!(alloc.allocate_range(6), Ok(30 .. 36));
|
||||
assert_eq!(alloc.allocate_range(6), Ok(50 .. 56));
|
||||
assert_eq!(alloc.allocate_range(6), Ok(70 .. 76));
|
||||
assert_eq!(alloc.allocate_range(6), Ok(90 .. 96));
|
||||
// Check for fragmentation.
|
||||
assert_eq!(alloc.free_ranges, vec![16..20, 36..40, 56..60, 76..80, 96..100]);
|
||||
assert_eq!(
|
||||
alloc.free_ranges,
|
||||
vec![16 .. 20, 36 .. 40, 56 .. 60, 76 .. 80, 96 .. 100]
|
||||
);
|
||||
// Fill up the fragmentation
|
||||
assert_eq!(alloc.allocate_range(4), Ok(16..20));
|
||||
assert_eq!(alloc.allocate_range(4), Ok(36..40));
|
||||
assert_eq!(alloc.allocate_range(4), Ok(56..60));
|
||||
assert_eq!(alloc.allocate_range(4), Ok(76..80));
|
||||
assert_eq!(alloc.allocate_range(4), Ok(96..100));
|
||||
assert_eq!(alloc.allocate_range(4), Ok(16 .. 20));
|
||||
assert_eq!(alloc.allocate_range(4), Ok(36 .. 40));
|
||||
assert_eq!(alloc.allocate_range(4), Ok(56 .. 60));
|
||||
assert_eq!(alloc.allocate_range(4), Ok(76 .. 80));
|
||||
assert_eq!(alloc.allocate_range(4), Ok(96 .. 100));
|
||||
// Check that nothing is free.
|
||||
assert_eq!(alloc.free_ranges, vec![]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ignore_block_if_another_fits_better() {
|
||||
let mut alloc = RangeAllocator::new(0..10);
|
||||
let mut alloc = RangeAllocator::new(0 .. 10);
|
||||
// Allocate blocks such that the only free spaces available are 3..6 and 9..10
|
||||
// in order to prepare for the next test.
|
||||
assert_eq!(alloc.allocate_range(3), Ok(0..3));
|
||||
assert_eq!(alloc.allocate_range(3), Ok(3..6));
|
||||
assert_eq!(alloc.allocate_range(3), Ok(6..9));
|
||||
alloc.free_range(3..6);
|
||||
assert_eq!(alloc.free_ranges, vec![3..6, 9..10]);
|
||||
assert_eq!(alloc.allocate_range(3), Ok(0 .. 3));
|
||||
assert_eq!(alloc.allocate_range(3), Ok(3 .. 6));
|
||||
assert_eq!(alloc.allocate_range(3), Ok(6 .. 9));
|
||||
alloc.free_range(3 .. 6);
|
||||
assert_eq!(alloc.free_ranges, vec![3 .. 6, 9 .. 10]);
|
||||
// Now request space that can be filled by 3..6 but should be filled by 9..10
|
||||
// because 9..10 is a perfect fit.
|
||||
assert_eq!(alloc.allocate_range(1), Ok(9..10));
|
||||
assert_eq!(alloc.allocate_range(1), Ok(9 .. 10));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_neighbors() {
|
||||
let mut alloc = RangeAllocator::new(0..9);
|
||||
assert_eq!(alloc.allocate_range(3), Ok(0..3));
|
||||
assert_eq!(alloc.allocate_range(3), Ok(3..6));
|
||||
assert_eq!(alloc.allocate_range(3), Ok(6..9));
|
||||
alloc.free_range(0..3);
|
||||
alloc.free_range(6..9);
|
||||
alloc.free_range(3..6);
|
||||
assert_eq!(alloc.free_ranges, vec![0..9]);
|
||||
let mut alloc = RangeAllocator::new(0 .. 9);
|
||||
assert_eq!(alloc.allocate_range(3), Ok(0 .. 3));
|
||||
assert_eq!(alloc.allocate_range(3), Ok(3 .. 6));
|
||||
assert_eq!(alloc.allocate_range(3), Ok(6 .. 9));
|
||||
alloc.free_range(0 .. 3);
|
||||
alloc.free_range(6 .. 9);
|
||||
alloc.free_range(3 .. 6);
|
||||
assert_eq!(alloc.free_ranges, vec![0 .. 9]);
|
||||
}
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче