Bug 1596128 - Update wgpu to the version that has a separate wgpu-core

https://github.com/gfx-rs/wgpu rev d08118b535a95755c29984e256cba205172acd95

Differential Revision: https://phabricator.services.mozilla.com/D54014

--HG--
rename : gfx/wgpu/wgpu-native/Cargo.toml => gfx/wgpu/wgpu-core/Cargo.toml
rename : gfx/wgpu/wgpu-native/src/binding_model.rs => gfx/wgpu/wgpu-core/src/binding_model.rs
rename : gfx/wgpu/wgpu-native/src/command/allocator.rs => gfx/wgpu/wgpu-core/src/command/allocator.rs
rename : gfx/wgpu/wgpu-native/src/command/bind.rs => gfx/wgpu/wgpu-core/src/command/bind.rs
rename : gfx/wgpu/wgpu-native/src/conv.rs => gfx/wgpu/wgpu-core/src/conv.rs
rename : gfx/wgpu/wgpu-native/src/hub.rs => gfx/wgpu/wgpu-core/src/hub.rs
rename : gfx/wgpu/wgpu-native/src/id.rs => gfx/wgpu/wgpu-core/src/id.rs
rename : gfx/wgpu/wgpu-native/src/lib.rs => gfx/wgpu/wgpu-core/src/lib.rs
rename : gfx/wgpu/wgpu-native/src/pipeline.rs => gfx/wgpu/wgpu-core/src/pipeline.rs
rename : gfx/wgpu/wgpu-native/src/resource.rs => gfx/wgpu/wgpu-core/src/resource.rs
rename : gfx/wgpu/wgpu-native/src/track/buffer.rs => gfx/wgpu/wgpu-core/src/track/buffer.rs
rename : gfx/wgpu/wgpu-native/src/track/mod.rs => gfx/wgpu/wgpu-core/src/track/mod.rs
rename : gfx/wgpu/wgpu-native/src/track/range.rs => gfx/wgpu/wgpu-core/src/track/range.rs
rename : gfx/wgpu/wgpu-native/src/track/texture.rs => gfx/wgpu/wgpu-core/src/track/texture.rs
extra : moz-landing-system : lando
This commit is contained in:
Dzmitry Malyshau 2019-11-21 21:54:26 +00:00
Родитель abb35dc646
Коммит e662646742
47 изменённых файлов: 6036 добавлений и 6146 удалений

6
Cargo.lock сгенерированный
Просмотреть файл

@ -4333,8 +4333,8 @@ dependencies = [
] ]
[[package]] [[package]]
name = "wgpu-native" name = "wgpu-core"
version = "0.4.0" version = "0.1.0"
dependencies = [ dependencies = [
"arrayvec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "arrayvec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "bitflags 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -4360,7 +4360,7 @@ version = "0.1.0"
dependencies = [ dependencies = [
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"wgpu-native 0.4.0", "wgpu-core 0.1.0",
] ]
[[package]] [[package]]

Просмотреть файл

@ -58,8 +58,8 @@ before_install:
script: script:
- cargo test - cargo test
# TODO: enable GL backend # TODO: enable GL backend
- (cd wgpu-native && cargo check --all-features) - (cd wgpu-core && cargo check --all-features)
- if [[ $TRAVIS_OS_NAME == "osx" ]]; then (cd wgpu-native && cargo check --features gfx-backend-vulkan); fi - if [[ $TRAVIS_OS_NAME == "osx" ]]; then (cd wgpu-native && cargo check --features vulkan-portability); fi
- if [[ $TRAVIS_OS_NAME == "linux" ]]; then cargo check --release; fi - if [[ $TRAVIS_OS_NAME == "linux" ]]; then cargo check --release; fi
- if [[ $TRAVIS_RUST_VERSION == "nightly" ]]; then cargo +nightly install cbindgen; fi - if [[ $TRAVIS_RUST_VERSION == "nightly" ]]; then cargo +nightly install cbindgen; fi
- if [[ $TRAVIS_RUST_VERSION == "nightly" ]] && [[ $TRAVIS_OS_NAME == "windows" ]]; then - if [[ $TRAVIS_RUST_VERSION == "nightly" ]] && [[ $TRAVIS_OS_NAME == "windows" ]]; then

18
gfx/wgpu/Cargo.lock сгенерированный
Просмотреть файл

@ -619,8 +619,8 @@ version = "0.2.48"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "wgpu-native" name = "wgpu-core"
version = "0.4.0" version = "0.1.0"
dependencies = [ dependencies = [
"arrayvec 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "arrayvec 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
@ -632,10 +632,8 @@ dependencies = [
"gfx-backend-metal 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "gfx-backend-metal 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"gfx-backend-vulkan 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "gfx-backend-vulkan 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"gfx-hal 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "gfx-hal 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"raw-window-handle 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rendy-descriptor 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "rendy-descriptor 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rendy-memory 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "rendy-memory 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.91 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.91 (registry+https://github.com/rust-lang/crates.io-index)",
@ -643,13 +641,23 @@ dependencies = [
"vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "wgpu-native"
version = "0.4.0"
dependencies = [
"lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"raw-window-handle 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"wgpu-core 0.1.0",
]
[[package]] [[package]]
name = "wgpu-remote" name = "wgpu-remote"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"wgpu-native 0.4.0", "wgpu-core 0.1.0",
] ]
[[package]] [[package]]

Просмотреть файл

@ -1,5 +1,6 @@
[workspace] [workspace]
members = [ members = [
"wgpu-core",
"wgpu-native", "wgpu-native",
"wgpu-remote", "wgpu-remote",
] ]

Просмотреть файл

@ -8,8 +8,8 @@ BUILD_DIR:=build
CLEAN_FFI_DIR:= CLEAN_FFI_DIR:=
CREATE_BUILD_DIR:= CREATE_BUILD_DIR:=
WILDCARD_WGPU_NATIVE:=$(wildcard wgpu-native/**/*.rs) WILDCARD_WGPU_NATIVE:=$(wildcard wgpu-native/**/*.rs wgpu-core/**/*.rs)
WILDCARD_WGPU_NATIVE_AND_REMOTE:=$(wildcard wgpu-native/**/*.rs wgpu-remote/**/*.rs) WILDCARD_WGPU_REMOTE:=$(wildcard wgpu-remote/**/*.rs wgpu-core/**/*.rs)
ifeq (,$(TARGET)) ifeq (,$(TARGET))
CHECK_TARGET_FLAG= CHECK_TARGET_FLAG=
@ -30,7 +30,8 @@ endif
example-compute example-triangle example-remote \ example-compute example-triangle example-remote \
run-example-compute run-example-triangle run-example-remote run-example-compute run-example-triangle run-example-remote
all: example-compute example-triangle example-remote #TODO: example-remote
all: example-compute example-triangle
check: check:
cargo check --all cargo check --all
@ -46,15 +47,15 @@ clear:
$(CLEAN_FFI_DIR) $(CLEAN_FFI_DIR)
lib-native: Cargo.lock wgpu-native/Cargo.toml $(WILDCARD_WGPU_NATIVE) lib-native: Cargo.lock wgpu-native/Cargo.toml $(WILDCARD_WGPU_NATIVE)
cargo build --manifest-path wgpu-native/Cargo.toml --features local cargo build --manifest-path wgpu-native/Cargo.toml
lib-remote: Cargo.lock wgpu-remote/Cargo.toml $(WILDCARD_WGPU_NATIVE_AND_REMOTE) lib-remote: Cargo.lock wgpu-remote/Cargo.toml $(WILDCARD_WGPU_REMOTE)
cargo build --manifest-path wgpu-remote/Cargo.toml cargo build --manifest-path wgpu-remote/Cargo.toml
$(FFI_DIR)/wgpu.h: wgpu-native/cbindgen.toml $(WILDCARD_WGPU_NATIVE) $(FFI_DIR)/wgpu.h: wgpu-native/cbindgen.toml $(WILDCARD_WGPU_NATIVE)
rustup run nightly cbindgen -o $(FFI_DIR)/wgpu.h wgpu-native rustup run nightly cbindgen -o $(FFI_DIR)/wgpu.h wgpu-native
$(FFI_DIR)/wgpu-remote.h: wgpu-remote/cbindgen.toml $(WILDCARD_WGPU_NATIVE_AND_REMOTE) $(FFI_DIR)/wgpu-remote.h: wgpu-remote/cbindgen.toml $(WILDCARD_WGPU_REMOTE)
rustup run nightly cbindgen -o $(FFI_DIR)/wgpu-remote.h wgpu-remote rustup run nightly cbindgen -o $(FFI_DIR)/wgpu-remote.h wgpu-remote
example-compute: lib-native $(FFI_DIR)/wgpu.h examples/compute/main.c example-compute: lib-native $(FFI_DIR)/wgpu.h examples/compute/main.c

Просмотреть файл

@ -9,9 +9,10 @@ This is an active GitHub mirror of the WebGPU native implementation in Rust, whi
This is an experimental [WebGPU](https://www.w3.org/community/gpu/) implementation as a native static library. It's written in Rust and is based on [gfx-hal](https://github.com/gfx-rs/gfx) and [Rendy](https://github.com/amethyst/rendy) libraries. The corresponding WebIDL specification can be found at [gpuweb project](https://github.com/gpuweb/gpuweb/blob/master/spec/index.bs). This is an experimental [WebGPU](https://www.w3.org/community/gpu/) implementation as a native static library. It's written in Rust and is based on [gfx-hal](https://github.com/gfx-rs/gfx) and [Rendy](https://github.com/amethyst/rendy) libraries. The corresponding WebIDL specification can be found at [gpuweb project](https://github.com/gpuweb/gpuweb/blob/master/spec/index.bs).
The implementation consists of the following parts: The implementation consists of the following parts:
1. `wgpu-native` - the native implementation of WebGPU as a C API library 1. `wgpu-core` - internal Rust API for WebGPU implementations to use
2. `wgpu-remote` - remoting layer to work with WebGPU across the process boundary 2. `wgpu-native` - the native implementation of WebGPU as a C API library
3. `ffi` - the C headers generated by [cbindgen](https://github.com/eqrion/cbindgen) for both of the libraries 3. `wgpu-remote` - remoting layer to work with WebGPU across the process boundary
4. `ffi` - the C headers generated by [cbindgen](https://github.com/eqrion/cbindgen) for both of the libraries
## Supported Platforms ## Supported Platforms

Просмотреть файл

@ -16,9 +16,8 @@
#define BINDINGS_LENGTH (1) #define BINDINGS_LENGTH (1)
#define BIND_GROUP_LAYOUTS_LENGTH (1) #define BIND_GROUP_LAYOUTS_LENGTH (1)
void request_adapter_callback(WGPUAdapterId const *received, void *userdata) { void request_adapter_callback(WGPUAdapterId received, void *userdata) {
WGPUAdapterId *id = (WGPUAdapterId*) userdata; *(WGPUAdapterId*)userdata = received;
*id = *received;
} }
void read_buffer_map( void read_buffer_map(
@ -59,6 +58,7 @@ int main(
WGPUAdapterId adapter = { 0 }; WGPUAdapterId adapter = { 0 };
wgpu_request_adapter_async( wgpu_request_adapter_async(
NULL, NULL,
2 | 4 | 8,
request_adapter_callback, request_adapter_callback,
(void *) &adapter (void *) &adapter
); );

Просмотреть файл

@ -31,7 +31,6 @@ int main() {
WGPURequestAdapterOptions options = { WGPURequestAdapterOptions options = {
.power_preference = WGPUPowerPreference_LowPower, .power_preference = WGPUPowerPreference_LowPower,
.backends = 2 | 4 | 8,
}; };
char index = wgpu_server_instance_request_adapter(server, &options, ids, count); char index = wgpu_server_instance_request_adapter(server, &options, ids, count);
if (index < 0) { if (index < 0) {

Просмотреть файл

@ -36,9 +36,8 @@
#define RENDER_PASS_ATTACHMENTS_LENGTH (1) #define RENDER_PASS_ATTACHMENTS_LENGTH (1)
#define BIND_GROUP_LAYOUTS_LENGTH (1) #define BIND_GROUP_LAYOUTS_LENGTH (1)
void request_adapter_callback(WGPUAdapterId const *received, void *userdata) { void request_adapter_callback(WGPUAdapterId received, void *userdata) {
WGPUAdapterId *id = (WGPUAdapterId*) userdata; *(WGPUAdapterId*)userdata = received;
*id = *received;
} }
int main() { int main() {
@ -46,8 +45,8 @@ int main() {
wgpu_request_adapter_async( wgpu_request_adapter_async(
&(WGPURequestAdapterOptions){ &(WGPURequestAdapterOptions){
.power_preference = WGPUPowerPreference_LowPower, .power_preference = WGPUPowerPreference_LowPower,
.backends = 2 | 4 | 8,
}, },
2 | 4 | 8,
request_adapter_callback, request_adapter_callback,
(void *) &adapter (void *) &adapter
); );

Просмотреть файл

@ -27,8 +27,6 @@ typedef enum {
typedef struct WGPUClient WGPUClient; typedef struct WGPUClient WGPUClient;
typedef struct WGPUGlobal WGPUGlobal;
typedef uint64_t WGPUId_Adapter_Dummy; typedef uint64_t WGPUId_Adapter_Dummy;
typedef WGPUId_Adapter_Dummy WGPUAdapterId; typedef WGPUId_Adapter_Dummy WGPUAdapterId;
@ -55,11 +53,8 @@ typedef struct {
WGPULimits limits; WGPULimits limits;
} WGPUDeviceDescriptor; } WGPUDeviceDescriptor;
typedef uint32_t WGPUBackendBit;
typedef struct { typedef struct {
WGPUPowerPreference power_preference; WGPUPowerPreference power_preference;
WGPUBackendBit backends;
} WGPURequestAdapterOptions; } WGPURequestAdapterOptions;
WGPU_INLINE WGPU_INLINE

Просмотреть файл

@ -19,18 +19,6 @@
#include <stdint.h> #include <stdint.h>
#include <stdlib.h> #include <stdlib.h>
#define WGPUDEFAULT_BIND_GROUPS 4
#define WGPUDESIRED_NUM_FRAMES 3
#define WGPUMAX_BIND_GROUPS 4
#define WGPUMAX_COLOR_TARGETS 4
#define WGPUMAX_MIP_LEVELS 16
#define WGPUMAX_VERTEX_BUFFERS 8
typedef enum { typedef enum {
WGPUAddressMode_ClampToEdge = 0, WGPUAddressMode_ClampToEdge = 0,
WGPUAddressMode_Repeat = 1, WGPUAddressMode_Repeat = 1,
@ -655,14 +643,13 @@ typedef uint64_t WGPUId_RenderBundle_Dummy;
typedef WGPUId_RenderBundle_Dummy WGPURenderBundleId; typedef WGPUId_RenderBundle_Dummy WGPURenderBundleId;
typedef uint32_t WGPUBackendBit;
typedef struct { typedef struct {
WGPUPowerPreference power_preference; WGPUPowerPreference power_preference;
WGPUBackendBit backends;
} WGPURequestAdapterOptions; } WGPURequestAdapterOptions;
typedef void (*WGPURequestAdapterCallback)(const WGPUAdapterId *adapter, void *userdata); typedef uint32_t WGPUBackendBit;
typedef void (*WGPURequestAdapterCallback)(WGPUAdapterId id, void *userdata);
typedef struct { typedef struct {
WGPUTextureViewId view_id; WGPUTextureViewId view_id;
@ -678,97 +665,65 @@ typedef struct {
uint32_t array_layer_count; uint32_t array_layer_count;
} WGPUTextureViewDescriptor; } WGPUTextureViewDescriptor;
#if defined(WGPU_LOCAL)
WGPUDeviceId wgpu_adapter_request_device(WGPUAdapterId adapter_id, WGPUDeviceId wgpu_adapter_request_device(WGPUAdapterId adapter_id,
const WGPUDeviceDescriptor *desc); const WGPUDeviceDescriptor *desc);
#endif
#if defined(WGPU_LOCAL)
void wgpu_bind_group_destroy(WGPUBindGroupId bind_group_id); void wgpu_bind_group_destroy(WGPUBindGroupId bind_group_id);
#endif
#if defined(WGPU_LOCAL)
void wgpu_buffer_destroy(WGPUBufferId buffer_id); void wgpu_buffer_destroy(WGPUBufferId buffer_id);
#endif
#if defined(WGPU_LOCAL)
void wgpu_buffer_map_read_async(WGPUBufferId buffer_id, void wgpu_buffer_map_read_async(WGPUBufferId buffer_id,
WGPUBufferAddress start, WGPUBufferAddress start,
WGPUBufferAddress size, WGPUBufferAddress size,
WGPUBufferMapReadCallback callback, WGPUBufferMapReadCallback callback,
uint8_t *userdata); uint8_t *userdata);
#endif
#if defined(WGPU_LOCAL)
void wgpu_buffer_map_write_async(WGPUBufferId buffer_id, void wgpu_buffer_map_write_async(WGPUBufferId buffer_id,
WGPUBufferAddress start, WGPUBufferAddress start,
WGPUBufferAddress size, WGPUBufferAddress size,
WGPUBufferMapWriteCallback callback, WGPUBufferMapWriteCallback callback,
uint8_t *userdata); uint8_t *userdata);
#endif
#if defined(WGPU_LOCAL)
void wgpu_buffer_unmap(WGPUBufferId buffer_id); void wgpu_buffer_unmap(WGPUBufferId buffer_id);
#endif
#if defined(WGPU_LOCAL)
WGPUComputePassId wgpu_command_encoder_begin_compute_pass(WGPUCommandEncoderId encoder_id, WGPUComputePassId wgpu_command_encoder_begin_compute_pass(WGPUCommandEncoderId encoder_id,
const WGPUComputePassDescriptor *desc); const WGPUComputePassDescriptor *desc);
#endif
#if defined(WGPU_LOCAL)
WGPURenderPassId wgpu_command_encoder_begin_render_pass(WGPUCommandEncoderId encoder_id, WGPURenderPassId wgpu_command_encoder_begin_render_pass(WGPUCommandEncoderId encoder_id,
const WGPURenderPassDescriptor *desc); const WGPURenderPassDescriptor *desc);
#endif
#if defined(WGPU_LOCAL)
void wgpu_command_encoder_copy_buffer_to_buffer(WGPUCommandEncoderId command_encoder_id, void wgpu_command_encoder_copy_buffer_to_buffer(WGPUCommandEncoderId command_encoder_id,
WGPUBufferId source, WGPUBufferId source,
WGPUBufferAddress source_offset, WGPUBufferAddress source_offset,
WGPUBufferId destination, WGPUBufferId destination,
WGPUBufferAddress destination_offset, WGPUBufferAddress destination_offset,
WGPUBufferAddress size); WGPUBufferAddress size);
#endif
#if defined(WGPU_LOCAL)
void wgpu_command_encoder_copy_buffer_to_texture(WGPUCommandEncoderId command_encoder_id, void wgpu_command_encoder_copy_buffer_to_texture(WGPUCommandEncoderId command_encoder_id,
const WGPUBufferCopyView *source, const WGPUBufferCopyView *source,
const WGPUTextureCopyView *destination, const WGPUTextureCopyView *destination,
WGPUExtent3d copy_size); WGPUExtent3d copy_size);
#endif
#if defined(WGPU_LOCAL)
void wgpu_command_encoder_copy_texture_to_buffer(WGPUCommandEncoderId command_encoder_id, void wgpu_command_encoder_copy_texture_to_buffer(WGPUCommandEncoderId command_encoder_id,
const WGPUTextureCopyView *source, const WGPUTextureCopyView *source,
const WGPUBufferCopyView *destination, const WGPUBufferCopyView *destination,
WGPUExtent3d copy_size); WGPUExtent3d copy_size);
#endif
#if defined(WGPU_LOCAL)
void wgpu_command_encoder_copy_texture_to_texture(WGPUCommandEncoderId command_encoder_id, void wgpu_command_encoder_copy_texture_to_texture(WGPUCommandEncoderId command_encoder_id,
const WGPUTextureCopyView *source, const WGPUTextureCopyView *source,
const WGPUTextureCopyView *destination, const WGPUTextureCopyView *destination,
WGPUExtent3d copy_size); WGPUExtent3d copy_size);
#endif
#if defined(WGPU_LOCAL)
WGPUCommandBufferId wgpu_command_encoder_finish(WGPUCommandEncoderId encoder_id, WGPUCommandBufferId wgpu_command_encoder_finish(WGPUCommandEncoderId encoder_id,
const WGPUCommandBufferDescriptor *desc); const WGPUCommandBufferDescriptor *desc);
#endif
#if defined(WGPU_LOCAL)
void wgpu_compute_pass_dispatch(WGPUComputePassId pass_id, uint32_t x, uint32_t y, uint32_t z); void wgpu_compute_pass_dispatch(WGPUComputePassId pass_id, uint32_t x, uint32_t y, uint32_t z);
#endif
#if defined(WGPU_LOCAL)
void wgpu_compute_pass_dispatch_indirect(WGPUComputePassId pass_id, void wgpu_compute_pass_dispatch_indirect(WGPUComputePassId pass_id,
WGPUBufferId indirect_buffer_id, WGPUBufferId indirect_buffer_id,
WGPUBufferAddress indirect_offset); WGPUBufferAddress indirect_offset);
#endif
#if defined(WGPU_LOCAL)
void wgpu_compute_pass_end_pass(WGPUComputePassId pass_id); void wgpu_compute_pass_end_pass(WGPUComputePassId pass_id);
#endif
void wgpu_compute_pass_insert_debug_marker(WGPUComputePassId _pass_id, WGPURawString _label); void wgpu_compute_pass_insert_debug_marker(WGPUComputePassId _pass_id, WGPURawString _label);
@ -776,205 +731,128 @@ void wgpu_compute_pass_pop_debug_group(WGPUComputePassId _pass_id);
void wgpu_compute_pass_push_debug_group(WGPUComputePassId _pass_id, WGPURawString _label); void wgpu_compute_pass_push_debug_group(WGPUComputePassId _pass_id, WGPURawString _label);
#if defined(WGPU_LOCAL)
void wgpu_compute_pass_set_bind_group(WGPUComputePassId pass_id, void wgpu_compute_pass_set_bind_group(WGPUComputePassId pass_id,
uint32_t index, uint32_t index,
WGPUBindGroupId bind_group_id, WGPUBindGroupId bind_group_id,
const WGPUBufferAddress *offsets, const WGPUBufferAddress *offsets,
uintptr_t offsets_length); uintptr_t offsets_length);
#endif
#if defined(WGPU_LOCAL)
void wgpu_compute_pass_set_pipeline(WGPUComputePassId pass_id, WGPUComputePipelineId pipeline_id); void wgpu_compute_pass_set_pipeline(WGPUComputePassId pass_id, WGPUComputePipelineId pipeline_id);
#endif
#if defined(WGPU_LOCAL)
WGPUSurfaceId wgpu_create_surface_from_metal_layer(void *layer); WGPUSurfaceId wgpu_create_surface_from_metal_layer(void *layer);
#endif
#if defined(WGPU_LOCAL)
WGPUSurfaceId wgpu_create_surface_from_windows_hwnd(void *_hinstance, void *hwnd); WGPUSurfaceId wgpu_create_surface_from_windows_hwnd(void *_hinstance, void *hwnd);
#endif
#if defined(WGPU_LOCAL)
WGPUSurfaceId wgpu_create_surface_from_xlib(const void **display, uint64_t window); WGPUSurfaceId wgpu_create_surface_from_xlib(const void **display, uint64_t window);
#endif
#if defined(WGPU_LOCAL)
WGPUBindGroupId wgpu_device_create_bind_group(WGPUDeviceId device_id, WGPUBindGroupId wgpu_device_create_bind_group(WGPUDeviceId device_id,
const WGPUBindGroupDescriptor *desc); const WGPUBindGroupDescriptor *desc);
#endif
#if defined(WGPU_LOCAL)
WGPUBindGroupLayoutId wgpu_device_create_bind_group_layout(WGPUDeviceId device_id, WGPUBindGroupLayoutId wgpu_device_create_bind_group_layout(WGPUDeviceId device_id,
const WGPUBindGroupLayoutDescriptor *desc); const WGPUBindGroupLayoutDescriptor *desc);
#endif
#if defined(WGPU_LOCAL)
WGPUBufferId wgpu_device_create_buffer(WGPUDeviceId device_id, const WGPUBufferDescriptor *desc); WGPUBufferId wgpu_device_create_buffer(WGPUDeviceId device_id, const WGPUBufferDescriptor *desc);
#endif
#if defined(WGPU_LOCAL)
WGPUBufferId wgpu_device_create_buffer_mapped(WGPUDeviceId device_id, WGPUBufferId wgpu_device_create_buffer_mapped(WGPUDeviceId device_id,
const WGPUBufferDescriptor *desc, const WGPUBufferDescriptor *desc,
uint8_t **mapped_ptr_out); uint8_t **mapped_ptr_out);
#endif
#if defined(WGPU_LOCAL)
WGPUCommandEncoderId wgpu_device_create_command_encoder(WGPUDeviceId device_id, WGPUCommandEncoderId wgpu_device_create_command_encoder(WGPUDeviceId device_id,
const WGPUCommandEncoderDescriptor *desc); const WGPUCommandEncoderDescriptor *desc);
#endif
#if defined(WGPU_LOCAL)
WGPUComputePipelineId wgpu_device_create_compute_pipeline(WGPUDeviceId device_id, WGPUComputePipelineId wgpu_device_create_compute_pipeline(WGPUDeviceId device_id,
const WGPUComputePipelineDescriptor *desc); const WGPUComputePipelineDescriptor *desc);
#endif
#if defined(WGPU_LOCAL)
WGPUPipelineLayoutId wgpu_device_create_pipeline_layout(WGPUDeviceId device_id, WGPUPipelineLayoutId wgpu_device_create_pipeline_layout(WGPUDeviceId device_id,
const WGPUPipelineLayoutDescriptor *desc); const WGPUPipelineLayoutDescriptor *desc);
#endif
#if defined(WGPU_LOCAL)
WGPURenderPipelineId wgpu_device_create_render_pipeline(WGPUDeviceId device_id, WGPURenderPipelineId wgpu_device_create_render_pipeline(WGPUDeviceId device_id,
const WGPURenderPipelineDescriptor *desc); const WGPURenderPipelineDescriptor *desc);
#endif
#if defined(WGPU_LOCAL)
WGPUSamplerId wgpu_device_create_sampler(WGPUDeviceId device_id, const WGPUSamplerDescriptor *desc); WGPUSamplerId wgpu_device_create_sampler(WGPUDeviceId device_id, const WGPUSamplerDescriptor *desc);
#endif
#if defined(WGPU_LOCAL)
WGPUShaderModuleId wgpu_device_create_shader_module(WGPUDeviceId device_id, WGPUShaderModuleId wgpu_device_create_shader_module(WGPUDeviceId device_id,
const WGPUShaderModuleDescriptor *desc); const WGPUShaderModuleDescriptor *desc);
#endif
#if defined(WGPU_LOCAL)
WGPUSwapChainId wgpu_device_create_swap_chain(WGPUDeviceId device_id, WGPUSwapChainId wgpu_device_create_swap_chain(WGPUDeviceId device_id,
WGPUSurfaceId surface_id, WGPUSurfaceId surface_id,
const WGPUSwapChainDescriptor *desc); const WGPUSwapChainDescriptor *desc);
#endif
#if defined(WGPU_LOCAL)
WGPUTextureId wgpu_device_create_texture(WGPUDeviceId device_id, const WGPUTextureDescriptor *desc); WGPUTextureId wgpu_device_create_texture(WGPUDeviceId device_id, const WGPUTextureDescriptor *desc);
#endif
#if defined(WGPU_LOCAL)
void wgpu_device_destroy(WGPUDeviceId device_id); void wgpu_device_destroy(WGPUDeviceId device_id);
#endif
#if defined(WGPU_LOCAL)
void wgpu_device_get_limits(WGPUDeviceId _device_id, WGPULimits *limits); void wgpu_device_get_limits(WGPUDeviceId _device_id, WGPULimits *limits);
#endif
#if defined(WGPU_LOCAL)
WGPUQueueId wgpu_device_get_queue(WGPUDeviceId device_id); WGPUQueueId wgpu_device_get_queue(WGPUDeviceId device_id);
#endif
#if defined(WGPU_LOCAL)
void wgpu_device_poll(WGPUDeviceId device_id, bool force_wait); void wgpu_device_poll(WGPUDeviceId device_id, bool force_wait);
#endif
#if defined(WGPU_LOCAL)
void wgpu_queue_submit(WGPUQueueId queue_id, void wgpu_queue_submit(WGPUQueueId queue_id,
const WGPUCommandBufferId *command_buffers, const WGPUCommandBufferId *command_buffers,
uintptr_t command_buffers_length); uintptr_t command_buffers_length);
#endif
#if defined(WGPU_LOCAL)
void wgpu_render_pass_draw(WGPURenderPassId pass_id, void wgpu_render_pass_draw(WGPURenderPassId pass_id,
uint32_t vertex_count, uint32_t vertex_count,
uint32_t instance_count, uint32_t instance_count,
uint32_t first_vertex, uint32_t first_vertex,
uint32_t first_instance); uint32_t first_instance);
#endif
#if defined(WGPU_LOCAL)
void wgpu_render_pass_draw_indexed(WGPURenderPassId pass_id, void wgpu_render_pass_draw_indexed(WGPURenderPassId pass_id,
uint32_t index_count, uint32_t index_count,
uint32_t instance_count, uint32_t instance_count,
uint32_t first_index, uint32_t first_index,
int32_t base_vertex, int32_t base_vertex,
uint32_t first_instance); uint32_t first_instance);
#endif
#if defined(WGPU_LOCAL)
void wgpu_render_pass_draw_indexed_indirect(WGPURenderPassId pass_id, void wgpu_render_pass_draw_indexed_indirect(WGPURenderPassId pass_id,
WGPUBufferId indirect_buffer_id, WGPUBufferId indirect_buffer_id,
WGPUBufferAddress indirect_offset); WGPUBufferAddress indirect_offset);
#endif
#if defined(WGPU_LOCAL)
void wgpu_render_pass_draw_indirect(WGPURenderPassId pass_id, void wgpu_render_pass_draw_indirect(WGPURenderPassId pass_id,
WGPUBufferId indirect_buffer_id, WGPUBufferId indirect_buffer_id,
WGPUBufferAddress indirect_offset); WGPUBufferAddress indirect_offset);
#endif
#if defined(WGPU_LOCAL)
void wgpu_render_pass_end_pass(WGPURenderPassId pass_id); void wgpu_render_pass_end_pass(WGPURenderPassId pass_id);
#endif
#if defined(WGPU_LOCAL)
void wgpu_render_pass_execute_bundles(WGPURenderPassId _pass_id, void wgpu_render_pass_execute_bundles(WGPURenderPassId _pass_id,
const WGPURenderBundleId *_bundles, const WGPURenderBundleId *_bundles,
uintptr_t _bundles_length); uintptr_t _bundles_length);
#endif
#if defined(WGPU_LOCAL)
void wgpu_render_pass_insert_debug_marker(WGPURenderPassId _pass_id, WGPURawString _label); void wgpu_render_pass_insert_debug_marker(WGPURenderPassId _pass_id, WGPURawString _label);
#endif
#if defined(WGPU_LOCAL)
void wgpu_render_pass_pop_debug_group(WGPURenderPassId _pass_id); void wgpu_render_pass_pop_debug_group(WGPURenderPassId _pass_id);
#endif
#if defined(WGPU_LOCAL)
void wgpu_render_pass_push_debug_group(WGPURenderPassId _pass_id, WGPURawString _label); void wgpu_render_pass_push_debug_group(WGPURenderPassId _pass_id, WGPURawString _label);
#endif
#if defined(WGPU_LOCAL)
void wgpu_render_pass_set_bind_group(WGPURenderPassId pass_id, void wgpu_render_pass_set_bind_group(WGPURenderPassId pass_id,
uint32_t index, uint32_t index,
WGPUBindGroupId bind_group_id, WGPUBindGroupId bind_group_id,
const WGPUBufferAddress *offsets, const WGPUBufferAddress *offsets,
uintptr_t offsets_length); uintptr_t offsets_length);
#endif
#if defined(WGPU_LOCAL)
void wgpu_render_pass_set_blend_color(WGPURenderPassId pass_id, const WGPUColor *color); void wgpu_render_pass_set_blend_color(WGPURenderPassId pass_id, const WGPUColor *color);
#endif
#if defined(WGPU_LOCAL)
void wgpu_render_pass_set_index_buffer(WGPURenderPassId pass_id, void wgpu_render_pass_set_index_buffer(WGPURenderPassId pass_id,
WGPUBufferId buffer_id, WGPUBufferId buffer_id,
WGPUBufferAddress offset); WGPUBufferAddress offset);
#endif
#if defined(WGPU_LOCAL)
void wgpu_render_pass_set_pipeline(WGPURenderPassId pass_id, WGPURenderPipelineId pipeline_id); void wgpu_render_pass_set_pipeline(WGPURenderPassId pass_id, WGPURenderPipelineId pipeline_id);
#endif
#if defined(WGPU_LOCAL)
void wgpu_render_pass_set_scissor_rect(WGPURenderPassId pass_id, void wgpu_render_pass_set_scissor_rect(WGPURenderPassId pass_id,
uint32_t x, uint32_t x,
uint32_t y, uint32_t y,
uint32_t w, uint32_t w,
uint32_t h); uint32_t h);
#endif
#if defined(WGPU_LOCAL)
void wgpu_render_pass_set_stencil_reference(WGPURenderPassId pass_id, uint32_t value); void wgpu_render_pass_set_stencil_reference(WGPURenderPassId pass_id, uint32_t value);
#endif
#if defined(WGPU_LOCAL)
void wgpu_render_pass_set_vertex_buffers(WGPURenderPassId pass_id, void wgpu_render_pass_set_vertex_buffers(WGPURenderPassId pass_id,
uint32_t start_slot, uint32_t start_slot,
const WGPUBufferId *buffers, const WGPUBufferId *buffers,
const WGPUBufferAddress *offsets, const WGPUBufferAddress *offsets,
uintptr_t length); uintptr_t length);
#endif
#if defined(WGPU_LOCAL)
void wgpu_render_pass_set_viewport(WGPURenderPassId pass_id, void wgpu_render_pass_set_viewport(WGPURenderPassId pass_id,
float x, float x,
float y, float y,
@ -982,35 +860,21 @@ void wgpu_render_pass_set_viewport(WGPURenderPassId pass_id,
float h, float h,
float min_depth, float min_depth,
float max_depth); float max_depth);
#endif
#if defined(WGPU_LOCAL)
void wgpu_request_adapter_async(const WGPURequestAdapterOptions *desc, void wgpu_request_adapter_async(const WGPURequestAdapterOptions *desc,
WGPUBackendBit mask,
WGPURequestAdapterCallback callback, WGPURequestAdapterCallback callback,
void *userdata); void *userdata);
#endif
#if defined(WGPU_LOCAL)
void wgpu_sampler_destroy(WGPUSamplerId sampler_id); void wgpu_sampler_destroy(WGPUSamplerId sampler_id);
#endif
#if defined(WGPU_LOCAL)
WGPUSwapChainOutput wgpu_swap_chain_get_next_texture(WGPUSwapChainId swap_chain_id); WGPUSwapChainOutput wgpu_swap_chain_get_next_texture(WGPUSwapChainId swap_chain_id);
#endif
#if defined(WGPU_LOCAL)
void wgpu_swap_chain_present(WGPUSwapChainId swap_chain_id); void wgpu_swap_chain_present(WGPUSwapChainId swap_chain_id);
#endif
#if defined(WGPU_LOCAL)
WGPUTextureViewId wgpu_texture_create_view(WGPUTextureId texture_id, WGPUTextureViewId wgpu_texture_create_view(WGPUTextureId texture_id,
const WGPUTextureViewDescriptor *desc); const WGPUTextureViewDescriptor *desc);
#endif
#if defined(WGPU_LOCAL)
void wgpu_texture_destroy(WGPUTextureId texture_id); void wgpu_texture_destroy(WGPUTextureId texture_id);
#endif
#if defined(WGPU_LOCAL)
void wgpu_texture_view_destroy(WGPUTextureViewId texture_view_id); void wgpu_texture_view_destroy(WGPUTextureViewId texture_view_id);
#endif

Просмотреть файл

@ -0,0 +1,48 @@
[package]
name = "wgpu-core"
version = "0.1.0"
authors = [
"Dzmitry Malyshau <kvark@mozilla.com>",
"Joshua Groves <josh@joshgroves.com>",
]
edition = "2018"
description = "WebGPU core logic on gfx-hal/rendy"
homepage = "https://github.com/gfx-rs/wgpu"
repository = "https://github.com/gfx-rs/wgpu"
keywords = ["graphics"]
license = "MPL-2.0"
[lib]
[features]
default = []
metal-auto-capture = ["gfx-backend-metal/auto-capture"]
#NOTE: glutin feature is not stable, use at your own risk
#glutin = ["gfx-backend-gl/glutin"]
[dependencies]
arrayvec = "0.5"
bitflags = "1.0"
copyless = "0.1"
fxhash = "0.2"
log = "0.4"
hal = { package = "gfx-hal", version = "0.4" }
gfx-backend-empty = { version = "0.4" }
parking_lot = "0.9"
rendy-memory = "0.5"
rendy-descriptor = "0.5"
serde = { version = "1.0", features = ["serde_derive"], optional = true }
smallvec = "0.6"
vec_map = "0.8"
[target.'cfg(any(target_os = "ios", target_os = "macos"))'.dependencies]
gfx-backend-metal = { version = "0.4" }
gfx-backend-vulkan = { version = "0.4", optional = true }
[target.'cfg(all(unix, not(target_os = "ios"), not(target_os = "macos")))'.dependencies]
gfx-backend-vulkan = { version = "0.4", features = ["x11"] }
[target.'cfg(windows)'.dependencies]
gfx-backend-dx12 = { version = "0.4.1" }
gfx-backend-dx11 = { version = "0.4" }
gfx-backend-vulkan = { version = "0.4" }

Просмотреть файл

@ -3,28 +3,23 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{ use crate::{
id::{BindGroupLayoutId, BufferId, DeviceId, SamplerId, TextureViewId},
resource::TextureViewDimension, resource::TextureViewDimension,
track::TrackerSet, track::{DUMMY_SELECTOR, TrackerSet},
BindGroupLayoutId,
BufferAddress, BufferAddress,
BufferId,
DeviceId,
LifeGuard, LifeGuard,
RefCount, RefCount,
SamplerId,
Stored, Stored,
TextureViewId,
}; };
use arrayvec::ArrayVec; use arrayvec::ArrayVec;
use bitflags::bitflags;
use rendy_descriptor::{DescriptorRanges, DescriptorSet}; use rendy_descriptor::{DescriptorRanges, DescriptorSet};
use std::borrow::Borrow; use std::borrow::Borrow;
pub const MAX_BIND_GROUPS: usize = 4; pub const MAX_BIND_GROUPS: usize = 4;
bitflags! { bitflags::bitflags! {
#[repr(transparent)] #[repr(transparent)]
pub struct ShaderStage: u32 { pub struct ShaderStage: u32 {
const NONE = 0; const NONE = 0;
@ -130,3 +125,9 @@ impl<B: hal::Backend> Borrow<RefCount> for BindGroup<B> {
&self.life_guard.ref_count &self.life_guard.ref_count
} }
} }
impl<B: hal::Backend> Borrow<()> for BindGroup<B> {
fn borrow(&self) -> &() {
&DUMMY_SELECTOR
}
}

Просмотреть файл

@ -5,8 +5,8 @@
use super::CommandBuffer; use super::CommandBuffer;
use crate::{ use crate::{
hub::GfxBackend, hub::GfxBackend,
id::DeviceId,
track::TrackerSet, track::TrackerSet,
DeviceId,
Features, Features,
LifeGuard, LifeGuard,
Stored, Stored,
@ -27,9 +27,7 @@ struct CommandPool<B: hal::Backend> {
impl<B: hal::Backend> CommandPool<B> { impl<B: hal::Backend> CommandPool<B> {
fn allocate(&mut self) -> B::CommandBuffer { fn allocate(&mut self) -> B::CommandBuffer {
if self.available.is_empty() { if self.available.is_empty() {
let extra = unsafe { let extra = unsafe { self.raw.allocate_vec(20, hal::command::Level::Primary) };
self.raw.allocate_vec(20, hal::command::Level::Primary)
};
self.available.extend(extra); self.available.extend(extra);
} }
@ -113,9 +111,7 @@ impl<B: hal::Backend> CommandAllocator<B> {
let pool = inner.pools.get_mut(&cmd_buf.recorded_thread_id).unwrap(); let pool = inner.pools.get_mut(&cmd_buf.recorded_thread_id).unwrap();
if pool.available.is_empty() { if pool.available.is_empty() {
let extra = unsafe { let extra = unsafe { pool.raw.allocate_vec(20, hal::command::Level::Primary) };
pool.raw.allocate_vec(20, hal::command::Level::Primary)
};
pool.available.extend(extra); pool.available.extend(extra);
} }

Просмотреть файл

@ -3,12 +3,10 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{ use crate::{
binding_model::BindGroup,
hub::GfxBackend, hub::GfxBackend,
BindGroup, id::{BindGroupId, BindGroupLayoutId, PipelineLayoutId},
BindGroupId,
BindGroupLayoutId,
BufferAddress, BufferAddress,
PipelineLayoutId,
Stored, Stored,
}; };

Просмотреть файл

@ -0,0 +1,246 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{
command::{
bind::{Binder, LayoutChange},
CommandBuffer,
},
device::{all_buffer_stages, BIND_BUFFER_ALIGNMENT},
hub::{GfxBackend, Global, IdentityFilter, Token},
id::{BindGroupId, BufferId, CommandBufferId, ComputePassId, ComputePipelineId},
resource::BufferUsage,
track::{Stitch, TrackerSet},
BufferAddress,
Stored,
};
use hal::{self, command::CommandBuffer as _};
use std::iter;
#[derive(Debug)]
pub struct ComputePass<B: hal::Backend> {
raw: B::CommandBuffer,
cmb_id: Stored<CommandBufferId>,
binder: Binder,
trackers: TrackerSet,
}
impl<B: hal::Backend> ComputePass<B> {
pub(crate) fn new(
raw: B::CommandBuffer,
cmb_id: Stored<CommandBufferId>,
trackers: TrackerSet,
max_bind_groups: u32,
) -> Self {
ComputePass {
raw,
cmb_id,
binder: Binder::new(max_bind_groups),
trackers,
}
}
}
// Common routines between render/compute
impl<F: IdentityFilter<ComputePassId>> Global<F> {
pub fn compute_pass_end_pass<B: GfxBackend>(&self, pass_id: ComputePassId) {
let mut token = Token::root();
let hub = B::hub(self);
let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token);
let (pass, _) = hub.compute_passes.unregister(pass_id, &mut token);
let cmb = &mut cmb_guard[pass.cmb_id.value];
// There are no transitions to be made: we've already been inserting barriers
// into the parent command buffer while recording this compute pass.
cmb.trackers = pass.trackers;
cmb.raw.push(pass.raw);
}
}
impl<F> Global<F> {
pub fn compute_pass_set_bind_group<B: GfxBackend>(
&self,
pass_id: ComputePassId,
index: u32,
bind_group_id: BindGroupId,
offsets: &[BufferAddress],
) {
let hub = B::hub(self);
let mut token = Token::root();
let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
let (mut pass_guard, mut token) = hub.compute_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
let bind_group = pass
.trackers
.bind_groups
.use_extend(&*bind_group_guard, bind_group_id, (), ())
.unwrap();
assert_eq!(bind_group.dynamic_count, offsets.len());
if cfg!(debug_assertions) {
for off in offsets {
assert_eq!(
*off % BIND_BUFFER_ALIGNMENT,
0,
"Misaligned dynamic buffer offset: {} does not align with {}",
off,
BIND_BUFFER_ALIGNMENT
);
}
}
//Note: currently, WebGPU compute passes have synchronization defined
// at a dispatch granularity, so we insert the necessary barriers here.
let (buffer_guard, mut token) = hub.buffers.read(&mut token);
let (texture_guard, _) = hub.textures.read(&mut token);
log::trace!(
"Encoding barriers on binding of {:?} in pass {:?}",
bind_group_id,
pass_id
);
CommandBuffer::insert_barriers(
&mut pass.raw,
&mut pass.trackers,
&bind_group.used,
Stitch::Last,
&*buffer_guard,
&*texture_guard,
);
if let Some((pipeline_layout_id, follow_up_sets, follow_up_offsets)) = pass
.binder
.provide_entry(index as usize, bind_group_id, bind_group, offsets)
{
let bind_groups = iter::once(bind_group.raw.raw())
.chain(follow_up_sets.map(|bg_id| bind_group_guard[bg_id].raw.raw()));
unsafe {
pass.raw.bind_compute_descriptor_sets(
&pipeline_layout_guard[pipeline_layout_id].raw,
index as usize,
bind_groups,
offsets
.iter()
.chain(follow_up_offsets)
.map(|&off| off as hal::command::DescriptorSetOffset),
);
}
};
}
// Compute-specific routines
pub fn compute_pass_dispatch<B: GfxBackend>(
&self,
pass_id: ComputePassId,
x: u32,
y: u32,
z: u32,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (mut pass_guard, _) = hub.compute_passes.write(&mut token);
unsafe {
pass_guard[pass_id].raw.dispatch([x, y, z]);
}
}
pub fn compute_pass_dispatch_indirect<B: GfxBackend>(
&self,
pass_id: ComputePassId,
indirect_buffer_id: BufferId,
indirect_offset: BufferAddress,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (buffer_guard, _) = hub.buffers.read(&mut token);
let (mut pass_guard, _) = hub.compute_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
let (src_buffer, src_pending) = pass.trackers.buffers.use_replace(
&*buffer_guard,
indirect_buffer_id,
(),
BufferUsage::INDIRECT,
);
assert!(src_buffer.usage.contains(BufferUsage::INDIRECT));
let barriers = src_pending.map(|pending| hal::memory::Barrier::Buffer {
states: pending.to_states(),
target: &src_buffer.raw,
families: None,
range: None .. None,
});
unsafe {
pass.raw.pipeline_barrier(
all_buffer_stages() .. all_buffer_stages(),
hal::memory::Dependencies::empty(),
barriers,
);
pass.raw.dispatch_indirect(&src_buffer.raw, indirect_offset);
}
}
pub fn compute_pass_set_pipeline<B: GfxBackend>(
&self,
pass_id: ComputePassId,
pipeline_id: ComputePipelineId,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
let (mut pass_guard, mut token) = hub.compute_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
let (pipeline_guard, _) = hub.compute_pipelines.read(&mut token);
let pipeline = &pipeline_guard[pipeline_id];
unsafe {
pass.raw.bind_compute_pipeline(&pipeline.raw);
}
// Rebind resources
if pass.binder.pipeline_layout_id != Some(pipeline.layout_id.clone()) {
let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id];
pass.binder.pipeline_layout_id = Some(pipeline.layout_id.clone());
pass.binder
.reset_expectations(pipeline_layout.bind_group_layout_ids.len());
let mut is_compatible = true;
for (index, (entry, &bgl_id)) in pass
.binder
.entries
.iter_mut()
.zip(&pipeline_layout.bind_group_layout_ids)
.enumerate()
{
match entry.expect_layout(bgl_id) {
LayoutChange::Match(bg_id, offsets) if is_compatible => {
let desc_set = bind_group_guard[bg_id].raw.raw();
unsafe {
pass.raw.bind_compute_descriptor_sets(
&pipeline_layout.raw,
index,
iter::once(desc_set),
offsets.iter().map(|offset| *offset as u32),
);
}
}
LayoutChange::Match(..) | LayoutChange::Unchanged => {}
LayoutChange::Mismatch => {
is_compatible = false;
}
}
}
}
}
}

Просмотреть файл

@ -0,0 +1,756 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
mod allocator;
mod bind;
mod compute;
mod render;
mod transfer;
pub(crate) use self::allocator::CommandAllocator;
pub use self::compute::*;
pub use self::render::*;
pub use self::transfer::*;
use crate::{
conv,
device::{
all_buffer_stages,
all_image_stages,
FramebufferKey,
RenderPassContext,
RenderPassKey,
},
hub::{GfxBackend, Global, IdentityFilter, Storage, Token},
id::{
BufferId,
CommandBufferId,
CommandEncoderId,
ComputePassId,
DeviceId,
RenderPassId,
TextureId,
TextureViewId,
},
resource::{Buffer, Texture, TextureUsage, TextureViewInner},
track::{Stitch, TrackerSet},
Color,
Features,
LifeGuard,
Stored,
};
use arrayvec::ArrayVec;
use hal::{adapter::PhysicalDevice as _, command::CommandBuffer as _, device::Device as _};
use std::{borrow::Borrow, collections::hash_map::Entry, iter, mem, ptr, slice, thread::ThreadId};
pub struct RenderBundle<B: hal::Backend> {
_raw: B::CommandBuffer,
}
#[repr(C)]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
pub enum LoadOp {
Clear = 0,
Load = 1,
}
#[repr(C)]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
pub enum StoreOp {
Clear = 0,
Store = 1,
}
#[repr(C)]
#[derive(Debug)]
pub struct RenderPassColorAttachmentDescriptor {
pub attachment: TextureViewId,
pub resolve_target: *const TextureViewId,
pub load_op: LoadOp,
pub store_op: StoreOp,
pub clear_color: Color,
}
#[repr(C)]
#[derive(Debug)]
pub struct RenderPassDepthStencilAttachmentDescriptor<T> {
pub attachment: T,
pub depth_load_op: LoadOp,
pub depth_store_op: StoreOp,
pub clear_depth: f32,
pub stencil_load_op: LoadOp,
pub stencil_store_op: StoreOp,
pub clear_stencil: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct RenderPassDescriptor {
pub color_attachments: *const RenderPassColorAttachmentDescriptor,
pub color_attachments_length: usize,
pub depth_stencil_attachment: *const RenderPassDepthStencilAttachmentDescriptor<TextureViewId>,
}
#[repr(C)]
#[derive(Clone, Debug, Default)]
pub struct ComputePassDescriptor {
pub todo: u32,
}
#[derive(Debug)]
pub struct CommandBuffer<B: hal::Backend> {
pub(crate) raw: Vec<B::CommandBuffer>,
is_recording: bool,
recorded_thread_id: ThreadId,
pub(crate) device_id: Stored<DeviceId>,
pub(crate) life_guard: LifeGuard,
pub(crate) trackers: TrackerSet,
pub(crate) used_swap_chain: Option<(Stored<TextureViewId>, B::Framebuffer)>,
pub(crate) features: Features,
}
impl<B: GfxBackend> CommandBuffer<B> {
pub(crate) fn insert_barriers(
raw: &mut B::CommandBuffer,
base: &mut TrackerSet,
head: &TrackerSet,
stitch: Stitch,
buffer_guard: &Storage<Buffer<B>, BufferId>,
texture_guard: &Storage<Texture<B>, TextureId>,
) {
log::trace!("\tstitch {:?}", stitch);
debug_assert_eq!(B::VARIANT, base.backend());
debug_assert_eq!(B::VARIANT, head.backend());
let buffer_barriers = base
.buffers
.merge_replace(&head.buffers, stitch)
.map(|pending| {
log::trace!("\tbuffer -> {:?}", pending);
hal::memory::Barrier::Buffer {
states: pending.to_states(),
target: &buffer_guard[pending.id].raw,
range: None .. None,
families: None,
}
});
let texture_barriers = base
.textures
.merge_replace(&head.textures, stitch)
.map(|pending| {
log::trace!("\ttexture -> {:?}", pending);
hal::memory::Barrier::Image {
states: pending.to_states(),
target: &texture_guard[pending.id].raw,
range: pending.selector,
families: None,
}
});
base.views.merge_extend(&head.views).unwrap();
base.bind_groups.merge_extend(&head.bind_groups).unwrap();
base.samplers.merge_extend(&head.samplers).unwrap();
let stages = all_buffer_stages() | all_image_stages();
unsafe {
raw.pipeline_barrier(
stages .. stages,
hal::memory::Dependencies::empty(),
buffer_barriers.chain(texture_barriers),
);
}
}
}
#[repr(C)]
#[derive(Clone, Debug, Default)]
pub struct CommandEncoderDescriptor {
// MSVC doesn't allow zero-sized structs
// We can remove this when we actually have a field
pub todo: u32,
}
#[repr(C)]
#[derive(Clone, Debug, Default)]
pub struct CommandBufferDescriptor {
pub todo: u32,
}
impl<F> Global<F> {
pub fn command_encoder_finish<B: GfxBackend>(
&self,
encoder_id: CommandEncoderId,
_desc: &CommandBufferDescriptor,
) -> CommandBufferId {
let hub = B::hub(self);
let mut token = Token::root();
//TODO: actually close the last recorded command buffer
let (mut comb_guard, _) = hub.command_buffers.write(&mut token);
let comb = &mut comb_guard[encoder_id];
assert!(comb.is_recording);
comb.is_recording = false;
// stop tracking the swapchain image, if used
if let Some((ref view_id, _)) = comb.used_swap_chain {
comb.trackers.views.remove(view_id.value);
}
encoder_id
}
}
impl<F: IdentityFilter<RenderPassId>> Global<F> {
pub fn command_encoder_begin_render_pass<B: GfxBackend>(
&self,
encoder_id: CommandEncoderId,
desc: &RenderPassDescriptor,
id_in: F::Input,
) -> RenderPassId {
let hub = B::hub(self);
let mut token = Token::root();
let (adapter_guard, mut token) = hub.adapters.read(&mut token);
let (device_guard, mut token) = hub.devices.read(&mut token);
let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token);
let cmb = &mut cmb_guard[encoder_id];
let device = &device_guard[cmb.device_id.value];
let limits = adapter_guard[device.adapter_id]
.raw
.physical_device
.limits();
let samples_count_limit = limits.framebuffer_color_sample_counts;
let mut current_comb = device.com_allocator.extend(cmb);
unsafe {
current_comb.begin(
hal::command::CommandBufferFlags::ONE_TIME_SUBMIT,
hal::command::CommandBufferInheritanceInfo::default(),
);
}
let pass = {
let (_, mut token) = hub.buffers.read(&mut token); //skip token
let (texture_guard, mut token) = hub.textures.read(&mut token);
let (view_guard, _) = hub.texture_views.read(&mut token);
let mut extent = None;
let mut barriers = Vec::new();
let mut used_swap_chain_image = None::<Stored<TextureViewId>>;
let color_attachments = unsafe {
slice::from_raw_parts(desc.color_attachments, desc.color_attachments_length)
};
let depth_stencil_attachment = unsafe { desc.depth_stencil_attachment.as_ref() };
let sample_count = color_attachments
.get(0)
.map(|at| view_guard[at.attachment].samples)
.unwrap_or(1);
assert!(
sample_count & samples_count_limit != 0,
"Attachment sample_count must be supported by physical device limits"
);
log::trace!(
"Encoding render pass begin in command buffer {:?}",
encoder_id
);
let rp_key = {
let trackers = &mut cmb.trackers;
let depth_stencil = depth_stencil_attachment.map(|at| {
let view = trackers
.views
.use_extend(&*view_guard, at.attachment, (), ())
.unwrap();
if let Some(ex) = extent {
assert_eq!(ex, view.extent);
} else {
extent = Some(view.extent);
}
let texture_id = match view.inner {
TextureViewInner::Native { ref source_id, .. } => source_id.value,
TextureViewInner::SwapChain { .. } => {
panic!("Unexpected depth/stencil use of swapchain image!")
}
};
let texture = &texture_guard[texture_id];
assert!(texture.usage.contains(TextureUsage::OUTPUT_ATTACHMENT));
let old_layout = match trackers.textures.query(texture_id, view.range.clone()) {
Some(usage) => {
conv::map_texture_state(
usage,
hal::format::Aspects::DEPTH | hal::format::Aspects::STENCIL,
)
.1
}
None => {
// Required sub-resources have inconsistent states, we need to
// issue individual barriers instead of relying on the render pass.
let pending = trackers.textures.change_replace(
texture_id,
&texture.life_guard.ref_count,
view.range.clone(),
TextureUsage::OUTPUT_ATTACHMENT,
&texture.full_range,
);
barriers.extend(pending.map(|pending| {
log::trace!("\tdepth-stencil {:?}", pending);
hal::memory::Barrier::Image {
states: pending.to_states(),
target: &texture.raw,
families: None,
range: pending.selector,
}
}));
hal::image::Layout::DepthStencilAttachmentOptimal
}
};
hal::pass::Attachment {
format: Some(conv::map_texture_format(view.format, device.features)),
samples: view.samples,
ops: conv::map_load_store_ops(at.depth_load_op, at.depth_store_op),
stencil_ops: conv::map_load_store_ops(
at.stencil_load_op,
at.stencil_store_op,
),
layouts: old_layout .. hal::image::Layout::DepthStencilAttachmentOptimal,
}
});
let mut colors = ArrayVec::new();
let mut resolves = ArrayVec::new();
for at in color_attachments {
let view = &view_guard[at.attachment];
if let Some(ex) = extent {
assert_eq!(ex, view.extent);
} else {
extent = Some(view.extent);
}
assert_eq!(
view.samples, sample_count,
"All attachments must have the same sample_count"
);
let first_use = trackers.views.init(
at.attachment,
view.life_guard.ref_count.clone(),
(),
(),
);
let layouts = match view.inner {
TextureViewInner::Native { ref source_id, .. } => {
let texture = &texture_guard[source_id.value];
assert!(texture.usage.contains(TextureUsage::OUTPUT_ATTACHMENT));
let old_layout = match trackers
.textures
.query(source_id.value, view.range.clone())
{
Some(usage) => {
conv::map_texture_state(usage, hal::format::Aspects::COLOR).1
}
None => {
// Required sub-resources have inconsistent states, we need to
// issue individual barriers instead of relying on the render pass.
let pending = trackers.textures.change_replace(
source_id.value,
&texture.life_guard.ref_count,
view.range.clone(),
TextureUsage::OUTPUT_ATTACHMENT,
&texture.full_range,
);
barriers.extend(pending.map(|pending| {
log::trace!("\tcolor {:?}", pending);
hal::memory::Barrier::Image {
states: pending.to_states(),
target: &texture.raw,
families: None,
range: pending.selector,
}
}));
hal::image::Layout::ColorAttachmentOptimal
}
};
old_layout .. hal::image::Layout::ColorAttachmentOptimal
}
TextureViewInner::SwapChain { .. } => {
if let Some((ref view_id, _)) = cmb.used_swap_chain {
assert_eq!(view_id.value, at.attachment);
} else {
assert!(used_swap_chain_image.is_none());
used_swap_chain_image = Some(Stored {
value: at.attachment,
ref_count: view.life_guard.ref_count.clone(),
});
}
let end = hal::image::Layout::Present;
let start = if first_use {
hal::image::Layout::Undefined
} else {
end
};
start .. end
}
};
colors.push(hal::pass::Attachment {
format: Some(conv::map_texture_format(view.format, device.features)),
samples: view.samples,
ops: conv::map_load_store_ops(at.load_op, at.store_op),
stencil_ops: hal::pass::AttachmentOps::DONT_CARE,
layouts,
});
}
for &resolve_target in color_attachments
.iter()
.flat_map(|at| unsafe { at.resolve_target.as_ref() })
{
let view = &view_guard[resolve_target];
assert_eq!(extent, Some(view.extent));
assert_eq!(
view.samples, 1,
"All resolve_targets must have a sample_count of 1"
);
let first_use = trackers.views.init(
resolve_target,
view.life_guard.ref_count.clone(),
(),
(),
);
let layouts = match view.inner {
TextureViewInner::Native { ref source_id, .. } => {
let texture = &texture_guard[source_id.value];
assert!(texture.usage.contains(TextureUsage::OUTPUT_ATTACHMENT));
let old_layout = match trackers
.textures
.query(source_id.value, view.range.clone())
{
Some(usage) => {
conv::map_texture_state(usage, hal::format::Aspects::COLOR).1
}
None => {
// Required sub-resources have inconsistent states, we need to
// issue individual barriers instead of relying on the render pass.
let pending = trackers.textures.change_replace(
source_id.value,
&texture.life_guard.ref_count,
view.range.clone(),
TextureUsage::OUTPUT_ATTACHMENT,
&texture.full_range,
);
barriers.extend(pending.map(|pending| {
log::trace!("\tresolve {:?}", pending);
hal::memory::Barrier::Image {
states: pending.to_states(),
target: &texture.raw,
families: None,
range: pending.selector,
}
}));
hal::image::Layout::ColorAttachmentOptimal
}
};
old_layout .. hal::image::Layout::ColorAttachmentOptimal
}
TextureViewInner::SwapChain { .. } => {
if let Some((ref view_id, _)) = cmb.used_swap_chain {
assert_eq!(view_id.value, resolve_target);
} else {
assert!(used_swap_chain_image.is_none());
used_swap_chain_image = Some(Stored {
value: resolve_target,
ref_count: view.life_guard.ref_count.clone(),
});
}
let end = hal::image::Layout::Present;
let start = if first_use {
hal::image::Layout::Undefined
} else {
end
};
start .. end
}
};
resolves.push(hal::pass::Attachment {
format: Some(conv::map_texture_format(view.format, device.features)),
samples: view.samples,
ops: hal::pass::AttachmentOps::new(
hal::pass::AttachmentLoadOp::DontCare,
hal::pass::AttachmentStoreOp::Store,
),
stencil_ops: hal::pass::AttachmentOps::DONT_CARE,
layouts,
});
}
RenderPassKey {
colors,
resolves,
depth_stencil,
}
};
if !barriers.is_empty() {
unsafe {
current_comb.pipeline_barrier(
all_image_stages() .. all_image_stages(),
hal::memory::Dependencies::empty(),
barriers,
);
}
}
let mut render_pass_cache = device.render_passes.lock();
let render_pass = match render_pass_cache.entry(rp_key.clone()) {
Entry::Occupied(e) => e.into_mut(),
Entry::Vacant(e) => {
let color_ids = [
(0, hal::image::Layout::ColorAttachmentOptimal),
(1, hal::image::Layout::ColorAttachmentOptimal),
(2, hal::image::Layout::ColorAttachmentOptimal),
(3, hal::image::Layout::ColorAttachmentOptimal),
];
let mut resolve_ids = ArrayVec::<[_; crate::device::MAX_COLOR_TARGETS]>::new();
let mut attachment_index = color_attachments.len();
if color_attachments
.iter()
.any(|at| at.resolve_target != ptr::null())
{
for (i, at) in color_attachments.iter().enumerate() {
if at.resolve_target == ptr::null() {
resolve_ids.push((
hal::pass::ATTACHMENT_UNUSED,
hal::image::Layout::ColorAttachmentOptimal,
));
} else {
let sample_count_check =
view_guard[color_attachments[i].attachment].samples;
assert!(sample_count_check > 1, "RenderPassColorAttachmentDescriptor with a resolve_target must have an attachment with sample_count > 1");
resolve_ids.push((
attachment_index,
hal::image::Layout::ColorAttachmentOptimal,
));
attachment_index += 1;
}
}
}
let depth_id = (
attachment_index,
hal::image::Layout::DepthStencilAttachmentOptimal,
);
let subpass = hal::pass::SubpassDesc {
colors: &color_ids[.. color_attachments.len()],
resolves: &resolve_ids,
depth_stencil: depth_stencil_attachment.map(|_| &depth_id),
inputs: &[],
preserves: &[],
};
let pass = unsafe {
device
.raw
.create_render_pass(e.key().all(), &[subpass], &[])
}
.unwrap();
e.insert(pass)
}
};
let mut framebuffer_cache;
let fb_key = FramebufferKey {
colors: color_attachments.iter().map(|at| at.attachment).collect(),
resolves: color_attachments
.iter()
.filter_map(|at| unsafe { at.resolve_target.as_ref() }.cloned())
.collect(),
depth_stencil: depth_stencil_attachment.map(|at| at.attachment),
};
let framebuffer = match used_swap_chain_image.take() {
Some(view_id) => {
assert!(cmb.used_swap_chain.is_none());
// Always create a new framebuffer and delete it after presentation.
let attachments = fb_key.all().map(|&id| match view_guard[id].inner {
TextureViewInner::Native { ref raw, .. } => raw,
TextureViewInner::SwapChain { ref image, .. } => Borrow::borrow(image),
});
let framebuffer = unsafe {
device
.raw
.create_framebuffer(&render_pass, attachments, extent.unwrap())
.unwrap()
};
cmb.used_swap_chain = Some((view_id, framebuffer));
&mut cmb.used_swap_chain.as_mut().unwrap().1
}
None => {
// Cache framebuffers by the device.
framebuffer_cache = device.framebuffers.lock();
match framebuffer_cache.entry(fb_key) {
Entry::Occupied(e) => e.into_mut(),
Entry::Vacant(e) => {
let fb = {
let attachments =
e.key().all().map(|&id| match view_guard[id].inner {
TextureViewInner::Native { ref raw, .. } => raw,
TextureViewInner::SwapChain { ref image, .. } => {
Borrow::borrow(image)
}
});
unsafe {
device.raw.create_framebuffer(
&render_pass,
attachments,
extent.unwrap(),
)
}
.unwrap()
};
e.insert(fb)
}
}
}
};
let rect = {
let ex = extent.unwrap();
hal::pso::Rect {
x: 0,
y: 0,
w: ex.width as _,
h: ex.height as _,
}
};
let clear_values = color_attachments
.iter()
.zip(&rp_key.colors)
.flat_map(|(at, key)| {
match at.load_op {
LoadOp::Load => None,
LoadOp::Clear => {
use hal::format::ChannelType;
//TODO: validate sign/unsign and normalized ranges of the color values
let value = match key.format.unwrap().base_format().1 {
ChannelType::Unorm
| ChannelType::Snorm
| ChannelType::Ufloat
| ChannelType::Sfloat
| ChannelType::Uscaled
| ChannelType::Sscaled
| ChannelType::Srgb => hal::command::ClearColor {
float32: conv::map_color_f32(&at.clear_color),
},
ChannelType::Sint => hal::command::ClearColor {
sint32: conv::map_color_i32(&at.clear_color),
},
ChannelType::Uint => hal::command::ClearColor {
uint32: conv::map_color_u32(&at.clear_color),
},
};
Some(hal::command::ClearValue { color: value })
}
}
})
.chain(depth_stencil_attachment.and_then(|at| {
match (at.depth_load_op, at.stencil_load_op) {
(LoadOp::Load, LoadOp::Load) => None,
(LoadOp::Clear, _) | (_, LoadOp::Clear) => {
let value = hal::command::ClearDepthStencil {
depth: at.clear_depth,
stencil: at.clear_stencil,
};
Some(hal::command::ClearValue {
depth_stencil: value,
})
}
}
}));
unsafe {
current_comb.begin_render_pass(
render_pass,
framebuffer,
rect,
clear_values,
hal::command::SubpassContents::Inline,
);
current_comb.set_scissors(0, iter::once(&rect));
current_comb.set_viewports(
0,
iter::once(hal::pso::Viewport {
rect,
depth: 0.0 .. 1.0,
}),
);
}
let context = RenderPassContext {
colors: color_attachments
.iter()
.map(|at| view_guard[at.attachment].format)
.collect(),
resolves: color_attachments
.iter()
.filter_map(|at| unsafe { at.resolve_target.as_ref() })
.map(|resolve| view_guard[*resolve].format)
.collect(),
depth_stencil: depth_stencil_attachment.map(|at| view_guard[at.attachment].format),
};
RenderPass::new(
current_comb,
Stored {
value: encoder_id,
ref_count: cmb.life_guard.ref_count.clone(),
},
context,
sample_count,
cmb.features.max_bind_groups,
)
};
hub.render_passes.register_identity(id_in, pass, &mut token)
}
}
impl<F: IdentityFilter<ComputePassId>> Global<F> {
pub fn command_encoder_begin_compute_pass<B: GfxBackend>(
&self,
encoder_id: CommandEncoderId,
_desc: &ComputePassDescriptor,
id_in: F::Input,
) -> ComputePassId {
let hub = B::hub(self);
let mut token = Token::root();
let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token);
let cmb = &mut cmb_guard[encoder_id];
let raw = cmb.raw.pop().unwrap();
let trackers = mem::replace(&mut cmb.trackers, TrackerSet::new(encoder_id.backend()));
let stored = Stored {
value: encoder_id,
ref_count: cmb.life_guard.ref_count.clone(),
};
let pass = ComputePass::new(raw, stored, trackers, cmb.features.max_bind_groups);
hub.compute_passes
.register_identity(id_in, pass, &mut token)
}
}

Просмотреть файл

@ -0,0 +1,673 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{
command::{
bind::{Binder, LayoutChange},
CommandBuffer,
},
conv,
device::{RenderPassContext, BIND_BUFFER_ALIGNMENT, MAX_VERTEX_BUFFERS},
hub::{GfxBackend, Global, IdentityFilter, Token},
id::{BindGroupId, BufferId, CommandBufferId, RenderPassId, RenderPipelineId},
pipeline::{IndexFormat, InputStepMode, PipelineFlags},
resource::BufferUsage,
track::{Stitch, TrackerSet},
BufferAddress,
Color,
Stored,
};
use hal::command::CommandBuffer as _;
use std::{iter, ops::Range};
#[derive(Debug, PartialEq)]
enum OptionalState {
Unused,
Required,
Set,
}
impl OptionalState {
fn require(&mut self, require: bool) {
if require && *self == OptionalState::Unused {
*self = OptionalState::Required;
}
}
}
#[derive(Debug, PartialEq)]
enum DrawError {
MissingBlendColor,
MissingStencilReference,
IncompatibleBindGroup {
index: u32,
//expected: BindGroupLayoutId,
//provided: Option<(BindGroupLayoutId, BindGroupId)>,
},
}
#[derive(Debug)]
pub struct IndexState {
bound_buffer_view: Option<(BufferId, Range<BufferAddress>)>,
format: IndexFormat,
limit: u32,
}
impl IndexState {
fn update_limit(&mut self) {
self.limit = match self.bound_buffer_view {
Some((_, ref range)) => {
let shift = match self.format {
IndexFormat::Uint16 => 1,
IndexFormat::Uint32 => 2,
};
((range.end - range.start) >> shift) as u32
}
None => 0,
}
}
}
#[derive(Clone, Copy, Debug)]
pub struct VertexBufferState {
total_size: BufferAddress,
stride: BufferAddress,
rate: InputStepMode,
}
impl VertexBufferState {
const EMPTY: Self = VertexBufferState {
total_size: 0,
stride: 0,
rate: InputStepMode::Vertex,
};
}
#[derive(Debug)]
pub struct VertexState {
inputs: [VertexBufferState; MAX_VERTEX_BUFFERS],
vertex_limit: u32,
instance_limit: u32,
}
impl VertexState {
fn update_limits(&mut self) {
self.vertex_limit = !0;
self.instance_limit = !0;
for vbs in &self.inputs {
if vbs.stride == 0 {
continue;
}
let limit = (vbs.total_size / vbs.stride) as u32;
match vbs.rate {
InputStepMode::Vertex => self.vertex_limit = self.vertex_limit.min(limit),
InputStepMode::Instance => self.instance_limit = self.instance_limit.min(limit),
}
}
}
}
#[derive(Debug)]
pub struct RenderPass<B: hal::Backend> {
raw: B::CommandBuffer,
cmb_id: Stored<CommandBufferId>,
context: RenderPassContext,
binder: Binder,
trackers: TrackerSet,
blend_color_status: OptionalState,
stencil_reference_status: OptionalState,
index_state: IndexState,
vertex_state: VertexState,
sample_count: u8,
}
impl<B: GfxBackend> RenderPass<B> {
pub(crate) fn new(
raw: B::CommandBuffer,
cmb_id: Stored<CommandBufferId>,
context: RenderPassContext,
sample_count: u8,
max_bind_groups: u32,
) -> Self {
RenderPass {
raw,
cmb_id,
context,
binder: Binder::new(max_bind_groups),
trackers: TrackerSet::new(B::VARIANT),
blend_color_status: OptionalState::Unused,
stencil_reference_status: OptionalState::Unused,
index_state: IndexState {
bound_buffer_view: None,
format: IndexFormat::Uint16,
limit: 0,
},
vertex_state: VertexState {
inputs: [VertexBufferState::EMPTY; MAX_VERTEX_BUFFERS],
vertex_limit: 0,
instance_limit: 0,
},
sample_count,
}
}
fn is_ready(&self) -> Result<(), DrawError> {
//TODO: vertex buffers
let bind_mask = self.binder.invalid_mask();
if bind_mask != 0 {
//let (expected, provided) = self.binder.entries[index as usize].info();
return Err(DrawError::IncompatibleBindGroup {
index: bind_mask.trailing_zeros() as u32,
});
}
if self.blend_color_status == OptionalState::Required {
return Err(DrawError::MissingBlendColor);
}
if self.stencil_reference_status == OptionalState::Required {
return Err(DrawError::MissingStencilReference);
}
Ok(())
}
}
// Common routines between render/compute
impl<F: IdentityFilter<RenderPassId>> Global<F> {
pub fn render_pass_end_pass<B: GfxBackend>(&self, pass_id: RenderPassId) {
let hub = B::hub(self);
let mut token = Token::root();
let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token);
let (mut pass, mut token) = hub.render_passes.unregister(pass_id, &mut token);
unsafe {
pass.raw.end_render_pass();
}
pass.trackers.optimize();
let cmb = &mut cmb_guard[pass.cmb_id.value];
let (buffer_guard, mut token) = hub.buffers.read(&mut token);
let (texture_guard, _) = hub.textures.read(&mut token);
match cmb.raw.last_mut() {
Some(last) => {
log::trace!("Encoding barriers before pass {:?}", pass_id);
CommandBuffer::insert_barriers(
last,
&mut cmb.trackers,
&pass.trackers,
Stitch::Last,
&*buffer_guard,
&*texture_guard,
);
unsafe { last.finish() };
}
None => {
cmb.trackers.merge_extend(&pass.trackers);
}
}
cmb.raw.push(pass.raw);
}
pub fn render_pass_set_bind_group<B: GfxBackend>(
&self,
pass_id: RenderPassId,
index: u32,
bind_group_id: BindGroupId,
offsets: &[BufferAddress],
) {
let hub = B::hub(self);
let mut token = Token::root();
let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
let (mut pass_guard, _) = hub.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
let bind_group = pass
.trackers
.bind_groups
.use_extend(&*bind_group_guard, bind_group_id, (), ())
.unwrap();
assert_eq!(bind_group.dynamic_count, offsets.len());
if cfg!(debug_assertions) {
for off in offsets {
assert_eq!(
*off % BIND_BUFFER_ALIGNMENT,
0,
"Misaligned dynamic buffer offset: {} does not align with {}",
off,
BIND_BUFFER_ALIGNMENT
);
}
}
pass.trackers.merge_extend(&bind_group.used);
if let Some((pipeline_layout_id, follow_up_sets, follow_up_offsets)) = pass
.binder
.provide_entry(index as usize, bind_group_id, bind_group, offsets)
{
let bind_groups = iter::once(bind_group.raw.raw())
.chain(follow_up_sets.map(|bg_id| bind_group_guard[bg_id].raw.raw()));
unsafe {
pass.raw.bind_graphics_descriptor_sets(
&&pipeline_layout_guard[pipeline_layout_id].raw,
index as usize,
bind_groups,
offsets
.iter()
.chain(follow_up_offsets)
.map(|&off| off as hal::command::DescriptorSetOffset),
);
}
};
}
// Render-specific routines
pub fn render_pass_set_index_buffer<B: GfxBackend>(
&self,
pass_id: RenderPassId,
buffer_id: BufferId,
offset: BufferAddress,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (mut pass_guard, mut token) = hub.render_passes.write(&mut token);
let (buffer_guard, _) = hub.buffers.read(&mut token);
let pass = &mut pass_guard[pass_id];
let buffer = pass
.trackers
.buffers
.use_extend(&*buffer_guard, buffer_id, (), BufferUsage::INDEX)
.unwrap();
assert!(buffer.usage.contains(BufferUsage::INDEX));
let range = offset .. buffer.size;
pass.index_state.bound_buffer_view = Some((buffer_id, range));
pass.index_state.update_limit();
let view = hal::buffer::IndexBufferView {
buffer: &buffer.raw,
offset,
index_type: conv::map_index_format(pass.index_state.format),
};
unsafe {
pass.raw.bind_index_buffer(view);
}
}
pub fn render_pass_set_vertex_buffers<B: GfxBackend>(
&self,
pass_id: RenderPassId,
start_slot: u32,
buffers: &[BufferId],
offsets: &[BufferAddress],
) {
let hub = B::hub(self);
let mut token = Token::root();
assert_eq!(buffers.len(), offsets.len());
let (mut pass_guard, mut token) = hub.render_passes.write(&mut token);
let (buffer_guard, _) = hub.buffers.read(&mut token);
let pass = &mut pass_guard[pass_id];
for (vbs, (&id, &offset)) in pass.vertex_state.inputs[start_slot as usize ..]
.iter_mut()
.zip(buffers.iter().zip(offsets))
{
let buffer = pass
.trackers
.buffers
.use_extend(&*buffer_guard, id, (), BufferUsage::VERTEX)
.unwrap();
assert!(buffer.usage.contains(BufferUsage::VERTEX));
vbs.total_size = buffer.size - offset;
}
pass.vertex_state.update_limits();
let buffers = buffers
.iter()
.map(|&id| &buffer_guard[id].raw)
.zip(offsets.iter().cloned());
unsafe {
pass.raw.bind_vertex_buffers(start_slot, buffers);
}
}
pub fn render_pass_draw<B: GfxBackend>(
&self,
pass_id: RenderPassId,
vertex_count: u32,
instance_count: u32,
first_vertex: u32,
first_instance: u32,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (mut pass_guard, _) = hub.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
pass.is_ready().unwrap();
assert!(
first_vertex + vertex_count <= pass.vertex_state.vertex_limit,
"Vertex out of range!"
);
assert!(
first_instance + instance_count <= pass.vertex_state.instance_limit,
"Instance out of range!"
);
unsafe {
pass.raw.draw(
first_vertex .. first_vertex + vertex_count,
first_instance .. first_instance + instance_count,
);
}
}
pub fn render_pass_draw_indirect<B: GfxBackend>(
&self,
pass_id: RenderPassId,
indirect_buffer_id: BufferId,
indirect_offset: BufferAddress,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (mut pass_guard, _) = hub.render_passes.write(&mut token);
let (buffer_guard, _) = hub.buffers.read(&mut token);
let pass = &mut pass_guard[pass_id];
pass.is_ready().unwrap();
let buffer = pass
.trackers
.buffers
.use_extend(
&*buffer_guard,
indirect_buffer_id,
(),
BufferUsage::INDIRECT,
)
.unwrap();
assert!(buffer.usage.contains(BufferUsage::INDIRECT));
unsafe {
pass.raw.draw_indirect(&buffer.raw, indirect_offset, 1, 0);
}
}
pub fn render_pass_draw_indexed<B: GfxBackend>(
&self,
pass_id: RenderPassId,
index_count: u32,
instance_count: u32,
first_index: u32,
base_vertex: i32,
first_instance: u32,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (mut pass_guard, _) = hub.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
pass.is_ready().unwrap();
//TODO: validate that base_vertex + max_index() is within the provided range
assert!(
first_index + index_count <= pass.index_state.limit,
"Index out of range!"
);
assert!(
first_instance + instance_count <= pass.vertex_state.instance_limit,
"Instance out of range!"
);
unsafe {
pass.raw.draw_indexed(
first_index .. first_index + index_count,
base_vertex,
first_instance .. first_instance + instance_count,
);
}
}
pub fn render_pass_draw_indexed_indirect<B: GfxBackend>(
&self,
pass_id: RenderPassId,
indirect_buffer_id: BufferId,
indirect_offset: BufferAddress,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (mut pass_guard, _) = hub.render_passes.write(&mut token);
let (buffer_guard, _) = hub.buffers.read(&mut token);
let pass = &mut pass_guard[pass_id];
pass.is_ready().unwrap();
let buffer = pass
.trackers
.buffers
.use_extend(
&*buffer_guard,
indirect_buffer_id,
(),
BufferUsage::INDIRECT,
)
.unwrap();
assert!(buffer.usage.contains(BufferUsage::INDIRECT));
unsafe {
pass.raw
.draw_indexed_indirect(&buffer.raw, indirect_offset, 1, 0);
}
}
pub fn render_pass_set_pipeline<B: GfxBackend>(
&self,
pass_id: RenderPassId,
pipeline_id: RenderPipelineId,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
let (mut pass_guard, mut token) = hub.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
let (pipeline_guard, mut token) = hub.render_pipelines.read(&mut token);
let pipeline = &pipeline_guard[pipeline_id];
assert!(
pass.context.compatible(&pipeline.pass_context),
"The render pipeline is not compatible with the pass!"
);
assert_eq!(
pipeline.sample_count, pass.sample_count,
"The render pipeline and renderpass have mismatching sample_count"
);
pass.blend_color_status
.require(pipeline.flags.contains(PipelineFlags::BLEND_COLOR));
pass.stencil_reference_status
.require(pipeline.flags.contains(PipelineFlags::STENCIL_REFERENCE));
unsafe {
pass.raw.bind_graphics_pipeline(&pipeline.raw);
}
// Rebind resource
if pass.binder.pipeline_layout_id != Some(pipeline.layout_id.clone()) {
let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id];
pass.binder.pipeline_layout_id = Some(pipeline.layout_id.clone());
pass.binder
.reset_expectations(pipeline_layout.bind_group_layout_ids.len());
let mut is_compatible = true;
for (index, (entry, &bgl_id)) in pass
.binder
.entries
.iter_mut()
.zip(&pipeline_layout.bind_group_layout_ids)
.enumerate()
{
match entry.expect_layout(bgl_id) {
LayoutChange::Match(bg_id, offsets) if is_compatible => {
let desc_set = bind_group_guard[bg_id].raw.raw();
unsafe {
pass.raw.bind_graphics_descriptor_sets(
&pipeline_layout.raw,
index,
iter::once(desc_set),
offsets.iter().map(|offset| *offset as u32),
);
}
}
LayoutChange::Match(..) | LayoutChange::Unchanged => {}
LayoutChange::Mismatch => {
is_compatible = false;
}
}
}
}
// Rebind index buffer if the index format has changed with the pipeline switch
if pass.index_state.format != pipeline.index_format {
pass.index_state.format = pipeline.index_format;
pass.index_state.update_limit();
if let Some((buffer_id, ref range)) = pass.index_state.bound_buffer_view {
let (buffer_guard, _) = hub.buffers.read(&mut token);
let buffer = pass
.trackers
.buffers
.use_extend(&*buffer_guard, buffer_id, (), BufferUsage::INDEX)
.unwrap();
let view = hal::buffer::IndexBufferView {
buffer: &buffer.raw,
offset: range.start,
index_type: conv::map_index_format(pass.index_state.format),
};
unsafe {
pass.raw.bind_index_buffer(view);
}
}
}
// Update vertex buffer limits
for (vbs, &(stride, rate)) in pass
.vertex_state
.inputs
.iter_mut()
.zip(&pipeline.vertex_strides)
{
vbs.stride = stride;
vbs.rate = rate;
}
for vbs in pass.vertex_state.inputs[pipeline.vertex_strides.len() ..].iter_mut() {
vbs.stride = 0;
vbs.rate = InputStepMode::Vertex;
}
pass.vertex_state.update_limits();
}
pub fn render_pass_set_blend_color<B: GfxBackend>(&self, pass_id: RenderPassId, color: &Color) {
let hub = B::hub(self);
let mut token = Token::root();
let (mut pass_guard, _) = hub.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
pass.blend_color_status = OptionalState::Set;
unsafe {
pass.raw.set_blend_constants(conv::map_color_f32(color));
}
}
pub fn render_pass_set_stencil_reference<B: GfxBackend>(
&self,
pass_id: RenderPassId,
value: u32,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (mut pass_guard, _) = hub.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
pass.stencil_reference_status = OptionalState::Set;
unsafe {
pass.raw.set_stencil_reference(hal::pso::Face::all(), value);
}
}
pub fn render_pass_set_viewport<B: GfxBackend>(
&self,
pass_id: RenderPassId,
x: f32,
y: f32,
w: f32,
h: f32,
min_depth: f32,
max_depth: f32,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (mut pass_guard, _) = hub.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
unsafe {
use std::convert::TryFrom;
use std::i16;
pass.raw.set_viewports(
0,
&[hal::pso::Viewport {
rect: hal::pso::Rect {
x: i16::try_from(x.round() as i64).unwrap_or(0),
y: i16::try_from(y.round() as i64).unwrap_or(0),
w: i16::try_from(w.round() as i64).unwrap_or(i16::MAX),
h: i16::try_from(h.round() as i64).unwrap_or(i16::MAX),
},
depth: min_depth .. max_depth,
}],
);
}
}
pub fn render_pass_set_scissor_rect<B: GfxBackend>(
&self,
pass_id: RenderPassId,
x: u32,
y: u32,
w: u32,
h: u32,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (mut pass_guard, _) = hub.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
unsafe {
use std::convert::TryFrom;
use std::i16;
pass.raw.set_scissors(
0,
&[hal::pso::Rect {
x: i16::try_from(x).unwrap_or(0),
y: i16::try_from(y).unwrap_or(0),
w: i16::try_from(w).unwrap_or(i16::MAX),
h: i16::try_from(h).unwrap_or(i16::MAX),
}],
);
}
}
}

Просмотреть файл

@ -0,0 +1,355 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{
conv,
device::{all_buffer_stages, all_image_stages},
hub::{GfxBackend, Global, Token},
id::{BufferId, CommandEncoderId, TextureId},
resource::{BufferUsage, TextureUsage},
BufferAddress,
Extent3d,
Origin3d,
};
use hal::command::CommandBuffer as _;
use std::iter;
const BITS_PER_BYTE: u32 = 8;
#[repr(C)]
#[derive(Debug)]
pub struct BufferCopyView {
pub buffer: BufferId,
pub offset: BufferAddress,
pub row_pitch: u32,
pub image_height: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct TextureCopyView {
pub texture: TextureId,
pub mip_level: u32,
pub array_layer: u32,
pub origin: Origin3d,
}
impl TextureCopyView {
//TODO: we currently access each texture twice for a transfer,
// once only to get the aspect flags, which is unfortunate.
fn to_selector(&self, aspects: hal::format::Aspects) -> hal::image::SubresourceRange {
let level = self.mip_level as hal::image::Level;
let layer = self.array_layer as hal::image::Layer;
hal::image::SubresourceRange {
aspects,
levels: level .. level + 1,
layers: layer .. layer + 1,
}
}
fn to_sub_layers(&self, aspects: hal::format::Aspects) -> hal::image::SubresourceLayers {
let layer = self.array_layer as hal::image::Layer;
hal::image::SubresourceLayers {
aspects,
level: self.mip_level as hal::image::Level,
layers: layer .. layer + 1,
}
}
}
impl<F> Global<F> {
pub fn command_encoder_copy_buffer_to_buffer<B: GfxBackend>(
&self,
command_encoder_id: CommandEncoderId,
source: BufferId,
source_offset: BufferAddress,
destination: BufferId,
destination_offset: BufferAddress,
size: BufferAddress,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token);
let cmb = &mut cmb_guard[command_encoder_id];
let (buffer_guard, _) = hub.buffers.read(&mut token);
// we can't hold both src_pending and dst_pending in scope because they
// borrow the buffer tracker mutably...
let mut barriers = Vec::new();
let (src_buffer, src_pending) =
cmb.trackers
.buffers
.use_replace(&*buffer_guard, source, (), BufferUsage::COPY_SRC);
assert!(src_buffer.usage.contains(BufferUsage::COPY_SRC));
barriers.extend(src_pending.map(|pending| hal::memory::Barrier::Buffer {
states: pending.to_states(),
target: &src_buffer.raw,
families: None,
range: None .. None,
}));
let (dst_buffer, dst_pending) = cmb.trackers.buffers.use_replace(
&*buffer_guard,
destination,
(),
BufferUsage::COPY_DST,
);
assert!(dst_buffer.usage.contains(BufferUsage::COPY_DST));
barriers.extend(dst_pending.map(|pending| hal::memory::Barrier::Buffer {
states: pending.to_states(),
target: &dst_buffer.raw,
families: None,
range: None .. None,
}));
let region = hal::command::BufferCopy {
src: source_offset,
dst: destination_offset,
size,
};
let cmb_raw = cmb.raw.last_mut().unwrap();
unsafe {
cmb_raw.pipeline_barrier(
all_buffer_stages() .. all_buffer_stages(),
hal::memory::Dependencies::empty(),
barriers,
);
cmb_raw.copy_buffer(&src_buffer.raw, &dst_buffer.raw, iter::once(region));
}
}
pub fn command_encoder_copy_buffer_to_texture<B: GfxBackend>(
&self,
command_encoder_id: CommandEncoderId,
source: &BufferCopyView,
destination: &TextureCopyView,
copy_size: Extent3d,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token);
let cmb = &mut cmb_guard[command_encoder_id];
let (buffer_guard, mut token) = hub.buffers.read(&mut token);
let (texture_guard, _) = hub.textures.read(&mut token);
let aspects = texture_guard[destination.texture].full_range.aspects;
let (src_buffer, src_pending) = cmb.trackers.buffers.use_replace(
&*buffer_guard,
source.buffer,
(),
BufferUsage::COPY_SRC,
);
assert!(src_buffer.usage.contains(BufferUsage::COPY_SRC));
let src_barriers = src_pending.map(|pending| hal::memory::Barrier::Buffer {
states: pending.to_states(),
target: &src_buffer.raw,
families: None,
range: None .. None,
});
let (dst_texture, dst_pending) = cmb.trackers.textures.use_replace(
&*texture_guard,
destination.texture,
destination.to_selector(aspects),
TextureUsage::COPY_DST,
);
assert!(dst_texture.usage.contains(TextureUsage::COPY_DST));
let dst_barriers = dst_pending.map(|pending| hal::memory::Barrier::Image {
states: pending.to_states(),
target: &dst_texture.raw,
families: None,
range: pending.selector,
});
let aspects = dst_texture.full_range.aspects;
let bytes_per_texel = conv::map_texture_format(dst_texture.format, cmb.features)
.surface_desc()
.bits as u32
/ BITS_PER_BYTE;
let buffer_width = source.row_pitch / bytes_per_texel;
assert_eq!(source.row_pitch % bytes_per_texel, 0);
let region = hal::command::BufferImageCopy {
buffer_offset: source.offset,
buffer_width,
buffer_height: source.image_height,
image_layers: destination.to_sub_layers(aspects),
image_offset: conv::map_origin(destination.origin),
image_extent: conv::map_extent(copy_size),
};
let cmb_raw = cmb.raw.last_mut().unwrap();
let stages = all_buffer_stages() | all_image_stages();
unsafe {
cmb_raw.pipeline_barrier(
stages .. stages,
hal::memory::Dependencies::empty(),
src_barriers.chain(dst_barriers),
);
cmb_raw.copy_buffer_to_image(
&src_buffer.raw,
&dst_texture.raw,
hal::image::Layout::TransferDstOptimal,
iter::once(region),
);
}
}
pub fn command_encoder_copy_texture_to_buffer<B: GfxBackend>(
&self,
command_encoder_id: CommandEncoderId,
source: &TextureCopyView,
destination: &BufferCopyView,
copy_size: Extent3d,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token);
let cmb = &mut cmb_guard[command_encoder_id];
let (buffer_guard, mut token) = hub.buffers.read(&mut token);
let (texture_guard, _) = hub.textures.read(&mut token);
let aspects = texture_guard[source.texture].full_range.aspects;
let (src_texture, src_pending) = cmb.trackers.textures.use_replace(
&*texture_guard,
source.texture,
source.to_selector(aspects),
TextureUsage::COPY_SRC,
);
assert!(src_texture.usage.contains(TextureUsage::COPY_SRC));
let src_barriers = src_pending.map(|pending| hal::memory::Barrier::Image {
states: pending.to_states(),
target: &src_texture.raw,
families: None,
range: pending.selector,
});
let (dst_buffer, dst_barriers) = cmb.trackers.buffers.use_replace(
&*buffer_guard,
destination.buffer,
(),
BufferUsage::COPY_DST,
);
assert!(dst_buffer.usage.contains(BufferUsage::COPY_DST));
let dst_barrier = dst_barriers.map(|pending| hal::memory::Barrier::Buffer {
states: pending.to_states(),
target: &dst_buffer.raw,
families: None,
range: None .. None,
});
let aspects = src_texture.full_range.aspects;
let bytes_per_texel = conv::map_texture_format(src_texture.format, cmb.features)
.surface_desc()
.bits as u32
/ BITS_PER_BYTE;
let buffer_width = destination.row_pitch / bytes_per_texel;
assert_eq!(destination.row_pitch % bytes_per_texel, 0);
let region = hal::command::BufferImageCopy {
buffer_offset: destination.offset,
buffer_width,
buffer_height: destination.image_height,
image_layers: source.to_sub_layers(aspects),
image_offset: conv::map_origin(source.origin),
image_extent: conv::map_extent(copy_size),
};
let cmb_raw = cmb.raw.last_mut().unwrap();
let stages = all_buffer_stages() | all_image_stages();
unsafe {
cmb_raw.pipeline_barrier(
stages .. stages,
hal::memory::Dependencies::empty(),
src_barriers.chain(dst_barrier),
);
cmb_raw.copy_image_to_buffer(
&src_texture.raw,
hal::image::Layout::TransferSrcOptimal,
&dst_buffer.raw,
iter::once(region),
);
}
}
pub fn command_encoder_copy_texture_to_texture<B: GfxBackend>(
&self,
command_encoder_id: CommandEncoderId,
source: &TextureCopyView,
destination: &TextureCopyView,
copy_size: Extent3d,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token);
let cmb = &mut cmb_guard[command_encoder_id];
let (_, mut token) = hub.buffers.read(&mut token); // skip token
let (texture_guard, _) = hub.textures.read(&mut token);
// we can't hold both src_pending and dst_pending in scope because they
// borrow the buffer tracker mutably...
let mut barriers = Vec::new();
let aspects = texture_guard[source.texture].full_range.aspects
& texture_guard[destination.texture].full_range.aspects;
let (src_texture, src_pending) = cmb.trackers.textures.use_replace(
&*texture_guard,
source.texture,
source.to_selector(aspects),
TextureUsage::COPY_SRC,
);
assert!(src_texture.usage.contains(TextureUsage::COPY_SRC));
barriers.extend(src_pending.map(|pending| hal::memory::Barrier::Image {
states: pending.to_states(),
target: &src_texture.raw,
families: None,
range: pending.selector,
}));
let (dst_texture, dst_pending) = cmb.trackers.textures.use_replace(
&*texture_guard,
destination.texture,
destination.to_selector(aspects),
TextureUsage::COPY_DST,
);
assert!(dst_texture.usage.contains(TextureUsage::COPY_DST));
barriers.extend(dst_pending.map(|pending| hal::memory::Barrier::Image {
states: pending.to_states(),
target: &dst_texture.raw,
families: None,
range: pending.selector,
}));
let aspects = src_texture.full_range.aspects & dst_texture.full_range.aspects;
let region = hal::command::ImageCopy {
src_subresource: source.to_sub_layers(aspects),
src_offset: conv::map_origin(source.origin),
dst_subresource: destination.to_sub_layers(aspects),
dst_offset: conv::map_origin(destination.origin),
extent: conv::map_extent(copy_size),
};
let cmb_raw = cmb.raw.last_mut().unwrap();
unsafe {
cmb_raw.pipeline_barrier(
all_image_stages() .. all_image_stages(),
hal::memory::Dependencies::empty(),
barriers,
);
cmb_raw.copy_image(
&src_texture.raw,
hal::image::Layout::TransferSrcOptimal,
&dst_texture.raw,
hal::image::Layout::TransferDstOptimal,
iter::once(region),
);
}
}
}

Просмотреть файл

@ -138,7 +138,9 @@ pub fn map_extent(extent: Extent3d) -> hal::image::Extent {
} }
} }
pub fn map_primitive_topology(primitive_topology: pipeline::PrimitiveTopology) -> hal::pso::Primitive { pub fn map_primitive_topology(
primitive_topology: pipeline::PrimitiveTopology,
) -> hal::pso::Primitive {
use crate::pipeline::PrimitiveTopology as Pt; use crate::pipeline::PrimitiveTopology as Pt;
use hal::pso::Primitive as H; use hal::pso::Primitive as H;
match primitive_topology { match primitive_topology {

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -4,96 +4,77 @@
use crate::{ use crate::{
backend, backend,
id::{Input, Output}, binding_model::{BindGroup, BindGroupLayout, PipelineLayout},
Adapter, command::{CommandBuffer, ComputePass, RenderPass},
AdapterId, device::{Device, ShaderModule},
id::{
AdapterId,
BindGroupId,
BindGroupLayoutId,
BufferId,
CommandBufferId,
ComputePassId,
ComputePipelineId,
DeviceId,
PipelineLayoutId,
RenderPassId,
RenderPipelineId,
SamplerId,
ShaderModuleId,
SurfaceId,
SwapChainId,
TextureId,
TextureViewId,
TypedId,
},
instance::{Adapter, Instance, Surface},
pipeline::{ComputePipeline, RenderPipeline},
resource::{Buffer, Sampler, Texture, TextureView},
swap_chain::SwapChain,
Backend, Backend,
BindGroup,
BindGroupId,
BindGroupLayout,
BindGroupLayoutId,
Buffer,
BufferId,
CommandBuffer,
CommandBufferId,
ComputePass,
ComputePassId,
ComputePipeline,
ComputePipelineId,
Device,
DeviceId,
Epoch, Epoch,
Index, Index,
Instance,
PipelineLayout,
PipelineLayoutId,
RenderPass,
RenderPassId,
RenderPipeline,
RenderPipelineId,
Sampler,
SamplerId,
ShaderModule,
ShaderModuleId,
Surface,
SurfaceId,
SwapChain,
SwapChainId,
Texture,
TextureId,
TextureView,
TextureViewId,
TypedId,
}; };
#[cfg(feature = "local")] use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard};
use parking_lot::Mutex;
use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard};
use vec_map::VecMap; use vec_map::VecMap;
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
use std::cell::Cell; use std::cell::Cell;
#[cfg(feature = "local")]
use std::sync::Arc;
use std::{marker::PhantomData, ops}; use std::{marker::PhantomData, ops};
/// A simple structure to manage identities of objects. /// A simple structure to manage identities of objects.
#[derive(Debug)] #[derive(Debug)]
pub struct IdentityManager<I: TypedId> { pub struct IdentityManager {
free: Vec<Index>, free: Vec<Index>,
epochs: Vec<Epoch>, epochs: Vec<Epoch>,
backend: Backend,
phantom: PhantomData<I>,
} }
impl<I: TypedId> IdentityManager<I> { impl Default for IdentityManager {
pub fn new(backend: Backend) -> Self { fn default() -> Self {
IdentityManager { IdentityManager {
free: Default::default(), free: Default::default(),
epochs: Default::default(), epochs: Default::default(),
backend,
phantom: PhantomData,
} }
} }
} }
impl<I: TypedId> IdentityManager<I> { impl IdentityManager {
pub fn alloc(&mut self) -> I { pub fn alloc<I: TypedId>(&mut self, backend: Backend) -> I {
match self.free.pop() { match self.free.pop() {
Some(index) => I::zip(index, self.epochs[index as usize], self.backend), Some(index) => I::zip(index, self.epochs[index as usize], backend),
None => { None => {
let epoch = 1; let epoch = 1;
let id = I::zip(self.epochs.len() as Index, epoch, self.backend); let id = I::zip(self.epochs.len() as Index, epoch, backend);
self.epochs.push(epoch); self.epochs.push(epoch);
id id
} }
} }
} }
pub fn free(&mut self, id: I) { pub fn free<I: TypedId>(&mut self, id: I) {
let (index, epoch, backend) = id.unzip(); let (index, epoch, _backend) = id.unzip();
debug_assert_eq!(backend, self.backend);
// avoid doing this check in release // avoid doing this check in release
if cfg!(debug_assertions) { if cfg!(debug_assertions) {
assert!(!self.free.contains(&index)); assert!(!self.free.contains(&index));
@ -271,19 +252,57 @@ impl<'a, T> Drop for Token<'a, T> {
} }
pub trait IdentityFilter<I> {
type Input: Clone;
fn process(&self, id: Self::Input, backend: Backend) -> I;
fn free(&self, id: I);
}
impl<I: TypedId + Clone> IdentityFilter<I> for () {
type Input = I;
fn process(&self, id: I, _backend: Backend) -> I {
//debug_assert_eq!(id.unzip().2, backend);
id
}
fn free(&self, _id: I) {}
}
impl<I: TypedId> IdentityFilter<I> for Mutex<IdentityManager> {
type Input = PhantomData<I>;
fn process(&self, _id: Self::Input, backend: Backend) -> I {
self.lock().alloc(backend)
}
fn free(&self, id: I) {
self.lock().free(id)
}
}
/// Compound trait for all the things a device cares about
/// for the matter of destruction/cleanup.
pub trait AllIdentityFilter:
IdentityFilter<BufferId>
+ IdentityFilter<TextureId>
+ IdentityFilter<TextureViewId>
+ IdentityFilter<BindGroupId>
+ IdentityFilter<SamplerId>
{
}
impl AllIdentityFilter for Mutex<IdentityManager> {}
impl AllIdentityFilter for () {}
#[derive(Debug)] #[derive(Debug)]
pub struct Registry<T, I: TypedId> { pub struct Registry<T, I: TypedId, F> {
#[cfg(feature = "local")] pub(crate) identity: F,
pub identity: Mutex<IdentityManager<I>>,
data: RwLock<Storage<T, I>>, data: RwLock<Storage<T, I>>,
backend: Backend, backend: Backend,
} }
impl<T, I: TypedId> Registry<T, I> { impl<T, I: TypedId, F: Default> Registry<T, I, F> {
fn new(backend: Backend) -> Self { fn new(backend: Backend) -> Self {
Registry { Registry {
#[cfg(feature = "local")] identity: F::default(),
identity: Mutex::new(IdentityManager::new(backend)),
data: RwLock::new(Storage { data: RwLock::new(Storage {
map: VecMap::new(), map: VecMap::new(),
_phantom: PhantomData, _phantom: PhantomData,
@ -293,44 +312,13 @@ impl<T, I: TypedId> Registry<T, I> {
} }
} }
impl<T, I: TypedId + Copy> Registry<T, I> { impl<T, I: TypedId + Copy, F> Registry<T, I, F> {
pub fn register<A: Access<T>>(&self, id: I, value: T, _token: &mut Token<A>) { pub fn register<A: Access<T>>(&self, id: I, value: T, _token: &mut Token<A>) {
debug_assert_eq!(id.unzip().2, self.backend); debug_assert_eq!(id.unzip().2, self.backend);
let old = self.data.write().insert(id, value); let old = self.data.write().insert(id, value);
assert!(old.is_none()); assert!(old.is_none());
} }
#[cfg(feature = "local")]
pub fn new_identity(&self, _id_in: Input<I>) -> (I, Output<I>) {
let id = self.identity.lock().alloc();
(id, id)
}
#[cfg(not(feature = "local"))]
pub fn new_identity(&self, id_in: Input<I>) -> (I, Output<I>) {
//TODO: debug_assert_eq!(self.backend, id_in.backend());
(id_in, PhantomData)
}
pub fn register_identity<A: Access<T>>(
&self,
id_in: Input<I>,
value: T,
token: &mut Token<A>,
) -> Output<I> {
let (id, output) = self.new_identity(id_in);
self.register(id, value, token);
output
}
pub fn unregister<A: Access<T>>(&self, id: I, _token: &mut Token<A>) -> (T, Token<T>) {
let value = self.data.write().remove(id).unwrap();
//Note: careful about the order here!
#[cfg(feature = "local")]
self.identity.lock().free(id);
(value, Token::new())
}
pub fn read<A: Access<T>>( pub fn read<A: Access<T>>(
&self, &self,
_token: &mut Token<A>, _token: &mut Token<A>,
@ -346,27 +334,47 @@ impl<T, I: TypedId + Copy> Registry<T, I> {
} }
} }
#[derive(Debug)] impl<T, I: TypedId + Copy, F: IdentityFilter<I>> Registry<T, I, F> {
pub struct Hub<B: hal::Backend> { pub fn register_identity<A: Access<T>>(
pub adapters: Registry<Adapter<B>, AdapterId>, &self,
pub devices: Registry<Device<B>, DeviceId>, id_in: F::Input,
pub swap_chains: Registry<SwapChain<B>, SwapChainId>, value: T,
pub pipeline_layouts: Registry<PipelineLayout<B>, PipelineLayoutId>, token: &mut Token<A>,
pub shader_modules: Registry<ShaderModule<B>, ShaderModuleId>, ) -> I {
pub bind_group_layouts: Registry<BindGroupLayout<B>, BindGroupLayoutId>, let id = self.identity.process(id_in, self.backend);
pub bind_groups: Registry<BindGroup<B>, BindGroupId>, self.register(id, value, token);
pub command_buffers: Registry<CommandBuffer<B>, CommandBufferId>, id
pub render_passes: Registry<RenderPass<B>, RenderPassId>, }
pub render_pipelines: Registry<RenderPipeline<B>, RenderPipelineId>,
pub compute_passes: Registry<ComputePass<B>, ComputePassId>, pub fn unregister<A: Access<T>>(&self, id: I, _token: &mut Token<A>) -> (T, Token<T>) {
pub compute_pipelines: Registry<ComputePipeline<B>, ComputePipelineId>, let value = self.data.write().remove(id).unwrap();
pub buffers: Registry<Buffer<B>, BufferId>, //Note: careful about the order here!
pub textures: Registry<Texture<B>, TextureId>, self.identity.free(id);
pub texture_views: Registry<TextureView<B>, TextureViewId>, (value, Token::new())
pub samplers: Registry<Sampler<B>, SamplerId>, }
} }
impl<B: GfxBackend> Default for Hub<B> { #[derive(Debug)]
pub struct Hub<B: hal::Backend, F> {
pub adapters: Registry<Adapter<B>, AdapterId, F>,
pub devices: Registry<Device<B>, DeviceId, F>,
pub swap_chains: Registry<SwapChain<B>, SwapChainId, F>,
pub pipeline_layouts: Registry<PipelineLayout<B>, PipelineLayoutId, F>,
pub shader_modules: Registry<ShaderModule<B>, ShaderModuleId, F>,
pub bind_group_layouts: Registry<BindGroupLayout<B>, BindGroupLayoutId, F>,
pub bind_groups: Registry<BindGroup<B>, BindGroupId, F>,
pub command_buffers: Registry<CommandBuffer<B>, CommandBufferId, F>,
pub render_passes: Registry<RenderPass<B>, RenderPassId, F>,
pub render_pipelines: Registry<RenderPipeline<B>, RenderPipelineId, F>,
pub compute_passes: Registry<ComputePass<B>, ComputePassId, F>,
pub compute_pipelines: Registry<ComputePipeline<B>, ComputePipelineId, F>,
pub buffers: Registry<Buffer<B>, BufferId, F>,
pub textures: Registry<Texture<B>, TextureId, F>,
pub texture_views: Registry<TextureView<B>, TextureViewId, F>,
pub samplers: Registry<Sampler<B>, SamplerId, F>,
}
impl<B: GfxBackend, F: Default> Default for Hub<B, F> {
fn default() -> Self { fn default() -> Self {
Hub { Hub {
adapters: Registry::new(B::VARIANT), adapters: Registry::new(B::VARIANT),
@ -389,7 +397,7 @@ impl<B: GfxBackend> Default for Hub<B> {
} }
} }
impl<B: hal::Backend> Drop for Hub<B> { impl<B: hal::Backend, F> Drop for Hub<B, F> {
fn drop(&mut self) { fn drop(&mut self) {
use crate::resource::TextureViewInner; use crate::resource::TextureViewInner;
use hal::device::Device as _; use hal::device::Device as _;
@ -398,7 +406,9 @@ impl<B: hal::Backend> Drop for Hub<B> {
for (_, (sampler, _)) in self.samplers.data.write().map.drain() { for (_, (sampler, _)) in self.samplers.data.write().map.drain() {
unsafe { unsafe {
devices[sampler.device_id.value].raw.destroy_sampler(sampler.raw); devices[sampler.device_id.value]
.raw
.destroy_sampler(sampler.raw);
} }
} }
{ {
@ -417,16 +427,22 @@ impl<B: hal::Backend> Drop for Hub<B> {
} }
for (_, (texture, _)) in self.textures.data.write().map.drain() { for (_, (texture, _)) in self.textures.data.write().map.drain() {
unsafe { unsafe {
devices[texture.device_id.value].raw.destroy_image(texture.raw); devices[texture.device_id.value]
.raw
.destroy_image(texture.raw);
} }
} }
for (_, (buffer, _)) in self.buffers.data.write().map.drain() { for (_, (buffer, _)) in self.buffers.data.write().map.drain() {
unsafe { unsafe {
devices[buffer.device_id.value].raw.destroy_buffer(buffer.raw); devices[buffer.device_id.value]
.raw
.destroy_buffer(buffer.raw);
} }
} }
for (_, (command_buffer, _)) in self.command_buffers.data.write().map.drain() { for (_, (command_buffer, _)) in self.command_buffers.data.write().map.drain() {
devices[command_buffer.device_id.value].com_allocator.after_submit(command_buffer, 0); devices[command_buffer.device_id.value]
.com_allocator
.after_submit(command_buffer, 0);
} }
for (_, (bind_group, _)) in self.bind_groups.data.write().map.drain() { for (_, (bind_group, _)) in self.bind_groups.data.write().map.drain() {
let device = &devices[bind_group.device_id.value]; let device = &devices[bind_group.device_id.value];
@ -451,29 +467,29 @@ impl<B: hal::Backend> Drop for Hub<B> {
} }
#[derive(Debug, Default)] #[derive(Debug, Default)]
pub struct Hubs { pub struct Hubs<F> {
#[cfg(any( #[cfg(any(
not(any(target_os = "ios", target_os = "macos")), not(any(target_os = "ios", target_os = "macos")),
feature = "gfx-backend-vulkan" feature = "gfx-backend-vulkan"
))] ))]
vulkan: Hub<backend::Vulkan>, vulkan: Hub<backend::Vulkan, F>,
#[cfg(any(target_os = "ios", target_os = "macos"))] #[cfg(any(target_os = "ios", target_os = "macos"))]
metal: Hub<backend::Metal>, metal: Hub<backend::Metal, F>,
#[cfg(windows)] #[cfg(windows)]
dx12: Hub<backend::Dx12>, dx12: Hub<backend::Dx12, F>,
#[cfg(windows)] #[cfg(windows)]
dx11: Hub<backend::Dx11>, dx11: Hub<backend::Dx11, F>,
} }
#[derive(Debug)] #[derive(Debug)]
pub struct Global { pub struct Global<F> {
pub instance: Instance, pub instance: Instance,
pub surfaces: Registry<Surface, SurfaceId>, pub surfaces: Registry<Surface, SurfaceId, F>,
hubs: Hubs, hubs: Hubs<F>,
} }
impl Global { impl<F: Default> Global<F> {
fn new_impl(name: &str) -> Self { pub fn new(name: &str) -> Self {
Global { Global {
instance: Instance::new(name, 1), instance: Instance::new(name, 1),
surfaces: Registry::new(Backend::Empty), surfaces: Registry::new(Backend::Empty),
@ -481,14 +497,12 @@ impl Global {
} }
} }
#[cfg(not(feature = "local"))]
pub fn new(name: &str) -> Self {
Self::new_impl(name)
}
#[cfg(not(feature = "local"))]
pub fn delete(self) { pub fn delete(self) {
let Global { mut instance, surfaces, hubs } = self; let Global {
mut instance,
surfaces,
hubs,
} = self;
drop(hubs); drop(hubs);
// destroy surfaces // destroy surfaces
for (_, (surface, _)) in surfaces.data.write().map.drain() { for (_, (surface, _)) in surfaces.data.write().map.drain() {
@ -497,14 +511,9 @@ impl Global {
} }
} }
#[cfg(feature = "local")]
lazy_static::lazy_static! {
pub static ref GLOBAL: Arc<Global> = Arc::new(Global::new_impl("wgpu"));
}
pub trait GfxBackend: hal::Backend { pub trait GfxBackend: hal::Backend {
const VARIANT: Backend; const VARIANT: Backend;
fn hub(global: &Global) -> &Hub<Self>; fn hub<F>(global: &Global<F>) -> &Hub<Self, F>;
fn get_surface_mut(surface: &mut Surface) -> &mut Self::Surface; fn get_surface_mut(surface: &mut Surface) -> &mut Self::Surface;
} }
@ -514,7 +523,7 @@ pub trait GfxBackend: hal::Backend {
))] ))]
impl GfxBackend for backend::Vulkan { impl GfxBackend for backend::Vulkan {
const VARIANT: Backend = Backend::Vulkan; const VARIANT: Backend = Backend::Vulkan;
fn hub(global: &Global) -> &Hub<Self> { fn hub<F>(global: &Global<F>) -> &Hub<Self, F> {
&global.hubs.vulkan &global.hubs.vulkan
} }
fn get_surface_mut(surface: &mut Surface) -> &mut Self::Surface { fn get_surface_mut(surface: &mut Surface) -> &mut Self::Surface {
@ -525,7 +534,7 @@ impl GfxBackend for backend::Vulkan {
#[cfg(any(target_os = "ios", target_os = "macos"))] #[cfg(any(target_os = "ios", target_os = "macos"))]
impl GfxBackend for backend::Metal { impl GfxBackend for backend::Metal {
const VARIANT: Backend = Backend::Metal; const VARIANT: Backend = Backend::Metal;
fn hub(global: &Global) -> &Hub<Self> { fn hub<F>(global: &Global<F>) -> &Hub<Self, F> {
&global.hubs.metal &global.hubs.metal
} }
fn get_surface_mut(surface: &mut Surface) -> &mut Self::Surface { fn get_surface_mut(surface: &mut Surface) -> &mut Self::Surface {
@ -536,7 +545,7 @@ impl GfxBackend for backend::Metal {
#[cfg(windows)] #[cfg(windows)]
impl GfxBackend for backend::Dx12 { impl GfxBackend for backend::Dx12 {
const VARIANT: Backend = Backend::Dx12; const VARIANT: Backend = Backend::Dx12;
fn hub(global: &Global) -> &Hub<Self> { fn hub<F>(global: &Global<F>) -> &Hub<Self, F> {
&global.hubs.dx12 &global.hubs.dx12
} }
fn get_surface_mut(surface: &mut Surface) -> &mut Self::Surface { fn get_surface_mut(surface: &mut Surface) -> &mut Self::Surface {
@ -547,7 +556,7 @@ impl GfxBackend for backend::Dx12 {
#[cfg(windows)] #[cfg(windows)]
impl GfxBackend for backend::Dx11 { impl GfxBackend for backend::Dx11 {
const VARIANT: Backend = Backend::Dx11; const VARIANT: Backend = Backend::Dx11;
fn hub(global: &Global) -> &Hub<Self> { fn hub<F>(global: &Global<F>) -> &Hub<Self, F> {
&global.hubs.dx11 &global.hubs.dx11
} }
fn get_surface_mut(surface: &mut Surface) -> &mut Self::Surface { fn get_surface_mut(surface: &mut Surface) -> &mut Self::Surface {

Просмотреть файл

@ -78,42 +78,33 @@ impl<T> TypedId for Id<T> {
} }
} }
#[cfg(not(feature = "local"))]
pub type Input<T> = T;
#[cfg(feature = "local")]
pub type Input<T> = PhantomData<T>;
#[cfg(feature = "local")]
pub type Output<T> = T;
#[cfg(not(feature = "local"))]
pub type Output<T> = PhantomData<T>;
pub type AdapterId = Id<crate::instance::Adapter<Dummy>>;
pub type AdapterId = Id<crate::Adapter<Dummy>>; pub type SurfaceId = Id<crate::instance::Surface>;
pub type DeviceId = Id<crate::Device<Dummy>>; // Device
pub type DeviceId = Id<crate::device::Device<Dummy>>;
pub type QueueId = DeviceId; pub type QueueId = DeviceId;
pub type ShaderModuleId = Id<crate::device::ShaderModule<Dummy>>;
// Resource // Resource
pub type BufferId = Id<crate::Buffer<Dummy>>; pub type BufferId = Id<crate::resource::Buffer<Dummy>>;
pub type TextureViewId = Id<crate::TextureView<Dummy>>; pub type TextureViewId = Id<crate::resource::TextureView<Dummy>>;
pub type TextureId = Id<crate::Texture<Dummy>>; pub type TextureId = Id<crate::resource::Texture<Dummy>>;
pub type SamplerId = Id<crate::Sampler<Dummy>>; pub type SamplerId = Id<crate::resource::Sampler<Dummy>>;
// Binding model // Binding model
pub type BindGroupLayoutId = Id<crate::BindGroupLayout<Dummy>>; pub type BindGroupLayoutId = Id<crate::binding_model::BindGroupLayout<Dummy>>;
pub type PipelineLayoutId = Id<crate::PipelineLayout<Dummy>>; pub type PipelineLayoutId = Id<crate::binding_model::PipelineLayout<Dummy>>;
pub type BindGroupId = Id<crate::BindGroup<Dummy>>; pub type BindGroupId = Id<crate::binding_model::BindGroup<Dummy>>;
// Pipeline // Pipeline
pub type InputStateId = Id<crate::InputState>; pub type RenderPipelineId = Id<crate::pipeline::RenderPipeline<Dummy>>;
pub type ShaderModuleId = Id<crate::ShaderModule<Dummy>>; pub type ComputePipelineId = Id<crate::pipeline::ComputePipeline<Dummy>>;
pub type RenderPipelineId = Id<crate::RenderPipeline<Dummy>>;
pub type ComputePipelineId = Id<crate::ComputePipeline<Dummy>>;
// Command // Command
pub type CommandBufferId = Id<crate::CommandBuffer<Dummy>>; pub type CommandBufferId = Id<crate::command::CommandBuffer<Dummy>>;
pub type CommandEncoderId = CommandBufferId; pub type CommandEncoderId = CommandBufferId;
pub type RenderBundleId = Id<crate::RenderBundle<Dummy>>; pub type RenderBundleId = Id<crate::command::RenderBundle<Dummy>>;
pub type RenderPassId = Id<crate::RenderPass<Dummy>>; pub type RenderPassId = Id<crate::command::RenderPass<Dummy>>;
pub type ComputePassId = Id<crate::ComputePass<Dummy>>; pub type ComputePassId = Id<crate::command::ComputePass<Dummy>>;
// Swap chain // Swap chain
pub type SurfaceId = Id<crate::Surface>; pub type SwapChainId = Id<crate::swap_chain::SwapChain<Dummy>>;
pub type SwapChainId = Id<crate::SwapChain<Dummy>>;
impl SurfaceId { impl SurfaceId {
pub(crate) fn to_swap_chain_id(&self, backend: Backend) -> SwapChainId { pub(crate) fn to_swap_chain_id(&self, backend: Backend) -> SwapChainId {

Просмотреть файл

@ -0,0 +1,424 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{
backend,
binding_model::MAX_BIND_GROUPS,
device::{Device, BIND_BUFFER_ALIGNMENT},
hub::{GfxBackend, Global, IdentityFilter, Token},
id::{AdapterId, DeviceId},
Backend,
};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
pub use hal::adapter::AdapterInfo;
use hal::{self, adapter::PhysicalDevice as _, queue::QueueFamily as _, Instance as _};
#[derive(Debug)]
pub struct Instance {
#[cfg(any(
not(any(target_os = "ios", target_os = "macos")),
feature = "gfx-backend-vulkan"
))]
pub vulkan: Option<gfx_backend_vulkan::Instance>,
#[cfg(any(target_os = "ios", target_os = "macos"))]
pub metal: gfx_backend_metal::Instance,
#[cfg(windows)]
pub dx12: Option<gfx_backend_dx12::Instance>,
#[cfg(windows)]
pub dx11: gfx_backend_dx11::Instance,
}
impl Instance {
pub fn new(name: &str, version: u32) -> Self {
Instance {
#[cfg(any(
not(any(target_os = "ios", target_os = "macos")),
feature = "gfx-backend-vulkan"
))]
vulkan: gfx_backend_vulkan::Instance::create(name, version).ok(),
#[cfg(any(target_os = "ios", target_os = "macos"))]
metal: gfx_backend_metal::Instance::create(name, version).unwrap(),
#[cfg(windows)]
dx12: gfx_backend_dx12::Instance::create(name, version).ok(),
#[cfg(windows)]
dx11: gfx_backend_dx11::Instance::create(name, version).unwrap(),
}
}
pub(crate) fn destroy_surface(&mut self, surface: Surface) {
//TODO: fill out the proper destruction once we are on gfx-0.4
#[cfg(any(
not(any(target_os = "ios", target_os = "macos")),
feature = "gfx-backend-vulkan"
))]
{
if let Some(_suf) = surface.vulkan {
//self.vulkan.as_mut().unwrap().destroy_surface(suf);
}
}
#[cfg(any(target_os = "ios", target_os = "macos"))]
{
let _ = surface;
//self.metal.destroy_surface(surface.metal);
}
#[cfg(windows)]
{
if let Some(_suf) = surface.dx12 {
//self.dx12.as_mut().unwrap().destroy_surface(suf);
}
//self.dx11.destroy_surface(surface.dx11);
}
}
}
type GfxSurface<B> = <B as hal::Backend>::Surface;
#[derive(Debug)]
pub struct Surface {
#[cfg(any(
not(any(target_os = "ios", target_os = "macos")),
feature = "gfx-backend-vulkan"
))]
pub vulkan: Option<GfxSurface<backend::Vulkan>>,
#[cfg(any(target_os = "ios", target_os = "macos"))]
pub metal: GfxSurface<backend::Metal>,
#[cfg(windows)]
pub dx12: Option<GfxSurface<backend::Dx12>>,
#[cfg(windows)]
pub dx11: GfxSurface<backend::Dx11>,
}
#[derive(Debug)]
pub struct Adapter<B: hal::Backend> {
pub(crate) raw: hal::adapter::Adapter<B>,
}
#[repr(C)]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum PowerPreference {
Default = 0,
LowPower = 1,
HighPerformance = 2,
}
#[repr(C)]
#[derive(Clone, Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct RequestAdapterOptions {
pub power_preference: PowerPreference,
}
impl Default for RequestAdapterOptions {
fn default() -> Self {
RequestAdapterOptions {
power_preference: PowerPreference::Default,
}
}
}
#[repr(C)]
#[derive(Clone, Debug, Default)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Extensions {
pub anisotropic_filtering: bool,
}
#[repr(C)]
#[derive(Clone, Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Limits {
pub max_bind_groups: u32,
}
impl Default for Limits {
fn default() -> Self {
Limits {
max_bind_groups: MAX_BIND_GROUPS as u32,
}
}
}
#[repr(C)]
#[derive(Clone, Debug, Default)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct DeviceDescriptor {
pub extensions: Extensions,
pub limits: Limits,
}
bitflags::bitflags! {
#[repr(transparent)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct BackendBit: u32 {
const VULKAN = 1 << Backend::Vulkan as u32;
const GL = 1 << Backend::Gl as u32;
const METAL = 1 << Backend::Metal as u32;
const DX12 = 1 << Backend::Dx12 as u32;
const DX11 = 1 << Backend::Dx11 as u32;
/// Vulkan + METAL + DX12
const PRIMARY = Self::VULKAN.bits | Self::METAL.bits | Self::DX12.bits;
/// OpenGL + DX11
const SECONDARY = Self::GL.bits | Self::DX11.bits;
}
}
impl From<Backend> for BackendBit {
fn from(backend: Backend) -> Self {
BackendBit::from_bits(1 << backend as u32).unwrap()
}
}
pub enum AdapterInputs<'a, I> {
IdSet(&'a [I], fn(&I) -> Backend),
Mask(BackendBit, fn() -> I),
}
impl<I: Clone> AdapterInputs<'_, I> {
fn find(&self, b: Backend) -> Option<I> {
match *self {
AdapterInputs::IdSet(ids, ref fun) => ids.iter().find(|id| fun(id) == b).cloned(),
AdapterInputs::Mask(bits, ref fun) => {
if bits.contains(b.into()) {
Some(fun())
} else {
None
}
}
}
}
}
impl<F: IdentityFilter<AdapterId>> Global<F> {
pub fn pick_adapter(
&self,
desc: &RequestAdapterOptions,
inputs: AdapterInputs<F::Input>,
) -> Option<AdapterId> {
let instance = &self.instance;
let mut device_types = Vec::new();
let id_vulkan = inputs.find(Backend::Vulkan);
let id_metal = inputs.find(Backend::Metal);
let id_dx12 = inputs.find(Backend::Dx12);
let id_dx11 = inputs.find(Backend::Dx11);
#[cfg(any(
not(any(target_os = "ios", target_os = "macos")),
feature = "gfx-backend-vulkan"
))]
let mut adapters_vk = match instance.vulkan {
Some(ref inst) if id_vulkan.is_some() => {
let adapters = inst.enumerate_adapters();
device_types.extend(adapters.iter().map(|ad| ad.info.device_type.clone()));
adapters
}
_ => Vec::new(),
};
#[cfg(any(target_os = "ios", target_os = "macos"))]
let mut adapters_mtl = if id_metal.is_some() {
let adapters = instance.metal.enumerate_adapters();
device_types.extend(adapters.iter().map(|ad| ad.info.device_type.clone()));
adapters
} else {
Vec::new()
};
#[cfg(windows)]
let mut adapters_dx12 = match instance.dx12 {
Some(ref inst) if id_dx12.is_some() => {
let adapters = inst.enumerate_adapters();
device_types.extend(adapters.iter().map(|ad| ad.info.device_type.clone()));
adapters
}
_ => Vec::new(),
};
#[cfg(windows)]
let mut adapters_dx11 = if id_dx11.is_some() {
let adapters = instance.dx11.enumerate_adapters();
device_types.extend(adapters.iter().map(|ad| ad.info.device_type.clone()));
adapters
} else {
Vec::new()
};
if device_types.is_empty() {
log::warn!("No adapters are available!");
return None;
}
let (mut integrated, mut discrete, mut virt, mut other) = (None, None, None, None);
for (i, ty) in device_types.into_iter().enumerate() {
match ty {
hal::adapter::DeviceType::IntegratedGpu => {
integrated = integrated.or(Some(i));
}
hal::adapter::DeviceType::DiscreteGpu => {
discrete = discrete.or(Some(i));
}
hal::adapter::DeviceType::VirtualGpu => {
virt = virt.or(Some(i));
}
_ => {
other = other.or(Some(i));
}
}
}
let preferred_gpu = match desc.power_preference {
PowerPreference::Default => integrated.or(discrete).or(other).or(virt),
PowerPreference::LowPower => integrated.or(other).or(discrete).or(virt),
PowerPreference::HighPerformance => discrete.or(other).or(integrated).or(virt),
};
let mut token = Token::root();
let mut selected = preferred_gpu.unwrap_or(0);
#[cfg(any(
not(any(target_os = "ios", target_os = "macos")),
feature = "gfx-backend-vulkan"
))]
{
if selected < adapters_vk.len() {
let adapter = Adapter {
raw: adapters_vk.swap_remove(selected),
};
log::info!("Adapter Vulkan {:?}", adapter.raw.info);
let id = backend::Vulkan::hub(self).adapters.register_identity(
id_vulkan.unwrap(),
adapter,
&mut token,
);
return Some(id);
}
selected -= adapters_vk.len();
}
#[cfg(any(target_os = "ios", target_os = "macos"))]
{
if selected < adapters_mtl.len() {
let adapter = Adapter {
raw: adapters_mtl.swap_remove(selected),
};
log::info!("Adapter Metal {:?}", adapter.raw.info);
let id = backend::Metal::hub(self).adapters.register_identity(
id_metal.unwrap(),
adapter,
&mut token,
);
return Some(id);
}
selected -= adapters_mtl.len();
}
#[cfg(windows)]
{
if selected < adapters_dx12.len() {
let adapter = Adapter {
raw: adapters_dx12.swap_remove(selected),
};
log::info!("Adapter Dx12 {:?}", adapter.raw.info);
let id = backend::Dx12::hub(self).adapters.register_identity(
id_dx12.unwrap(),
adapter,
&mut token,
);
return Some(id);
}
selected -= adapters_dx12.len();
if selected < adapters_dx11.len() {
let adapter = Adapter {
raw: adapters_dx11.swap_remove(selected),
};
log::info!("Adapter Dx11 {:?}", adapter.raw.info);
let id = backend::Dx11::hub(self).adapters.register_identity(
id_dx11.unwrap(),
adapter,
&mut token,
);
return Some(id);
}
selected -= adapters_dx11.len();
}
let _ = (selected, id_vulkan, id_metal, id_dx12, id_dx11);
unreachable!()
}
pub fn adapter_get_info<B: GfxBackend>(&self, adapter_id: AdapterId) -> AdapterInfo {
let hub = B::hub(self);
let mut token = Token::root();
let (adapter_guard, _) = hub.adapters.read(&mut token);
let adapter = &adapter_guard[adapter_id];
adapter.raw.info.clone()
}
}
impl<F: IdentityFilter<DeviceId>> Global<F> {
pub fn adapter_request_device<B: GfxBackend>(
&self,
adapter_id: AdapterId,
desc: &DeviceDescriptor,
id_in: F::Input,
) -> DeviceId {
let hub = B::hub(self);
let mut token = Token::root();
let device = {
let (adapter_guard, _) = hub.adapters.read(&mut token);
let adapter = &adapter_guard[adapter_id].raw;
let family = adapter
.queue_families
.iter()
.find(|family| family.queue_type().supports_graphics())
.unwrap();
let mut gpu = unsafe {
adapter
.physical_device
.open(&[(family, &[1.0])], hal::Features::empty())
.unwrap()
};
let limits = adapter.physical_device.limits();
assert_eq!(
0,
BIND_BUFFER_ALIGNMENT % limits.min_storage_buffer_offset_alignment,
"Adapter storage buffer offset alignment not compatible with WGPU"
);
assert_eq!(
0,
BIND_BUFFER_ALIGNMENT % limits.min_uniform_buffer_offset_alignment,
"Adapter uniform buffer offset alignment not compatible with WGPU"
);
if limits.max_bound_descriptor_sets == 0 {
log::warn!("max_bind_groups limit is missing");
} else {
assert!(
u32::from(limits.max_bound_descriptor_sets) >= desc.limits.max_bind_groups,
"Adapter does not support the requested max_bind_groups"
);
}
let mem_props = adapter.physical_device.memory_properties();
let supports_texture_d24_s8 = adapter
.physical_device
.format_properties(Some(hal::format::Format::D24UnormS8Uint))
.optimal_tiling
.contains(hal::format::ImageFeature::DEPTH_STENCIL_ATTACHMENT);
Device::new(
gpu.device,
adapter_id,
gpu.queue_groups.swap_remove(0),
mem_props,
supports_texture_d24_s8,
desc.limits.max_bind_groups,
)
};
hub.devices.register_identity(id_in, device, &mut token)
}
}

Просмотреть файл

@ -0,0 +1,221 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
pub mod backend {
#[cfg(windows)]
pub use gfx_backend_dx11::Backend as Dx11;
#[cfg(windows)]
pub use gfx_backend_dx12::Backend as Dx12;
pub use gfx_backend_empty::Backend as Empty;
#[cfg(any(target_os = "ios", target_os = "macos"))]
pub use gfx_backend_metal::Backend as Metal;
#[cfg(any(
not(any(target_os = "ios", target_os = "macos")),
feature = "gfx-backend-vulkan"
))]
pub use gfx_backend_vulkan::Backend as Vulkan;
}
pub mod binding_model;
pub mod command;
pub mod conv;
pub mod device;
pub mod hub;
pub mod id;
pub mod instance;
pub mod pipeline;
pub mod resource;
pub mod swap_chain;
pub mod track;
pub use hal::pso::read_spirv;
use std::{
os::raw::c_char,
ptr,
sync::atomic::{AtomicUsize, Ordering},
};
type SubmissionIndex = usize;
type Index = u32;
type Epoch = u32;
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum Backend {
Empty = 0,
Vulkan = 1,
Metal = 2,
Dx12 = 3,
Dx11 = 4,
Gl = 5,
}
pub type BufferAddress = u64;
pub type RawString = *const c_char;
//TODO: make it private. Currently used for swapchain creation impl.
#[derive(Debug)]
pub struct RefCount(ptr::NonNull<AtomicUsize>);
unsafe impl Send for RefCount {}
unsafe impl Sync for RefCount {}
impl RefCount {
const MAX: usize = 1 << 24;
fn load(&self) -> usize {
unsafe { self.0.as_ref() }.load(Ordering::Acquire)
}
}
impl Clone for RefCount {
fn clone(&self) -> Self {
let old_size = unsafe { self.0.as_ref() }.fetch_add(1, Ordering::Relaxed);
assert!(old_size < Self::MAX);
RefCount(self.0)
}
}
impl Drop for RefCount {
fn drop(&mut self) {
if unsafe { self.0.as_ref() }.fetch_sub(1, Ordering::Relaxed) == 1 {
let _ = unsafe { Box::from_raw(self.0.as_ptr()) };
}
}
}
#[derive(Debug)]
struct LifeGuard {
ref_count: RefCount,
submission_index: AtomicUsize,
}
impl LifeGuard {
fn new() -> Self {
let bx = Box::new(AtomicUsize::new(1));
LifeGuard {
ref_count: RefCount(ptr::NonNull::new(Box::into_raw(bx)).unwrap()),
submission_index: AtomicUsize::new(0),
}
}
}
#[derive(Clone, Debug)]
struct Stored<T> {
value: T,
ref_count: RefCount,
}
#[repr(C)]
#[derive(Clone, Copy, Debug)]
pub struct Color {
pub r: f64,
pub g: f64,
pub b: f64,
pub a: f64,
}
impl Color {
pub const TRANSPARENT: Self = Color {
r: 0.0,
g: 0.0,
b: 0.0,
a: 0.0,
};
pub const BLACK: Self = Color {
r: 0.0,
g: 0.0,
b: 0.0,
a: 1.0,
};
pub const WHITE: Self = Color {
r: 1.0,
g: 1.0,
b: 1.0,
a: 1.0,
};
pub const RED: Self = Color {
r: 1.0,
g: 0.0,
b: 0.0,
a: 1.0,
};
pub const GREEN: Self = Color {
r: 0.0,
g: 1.0,
b: 0.0,
a: 1.0,
};
pub const BLUE: Self = Color {
r: 0.0,
g: 0.0,
b: 1.0,
a: 1.0,
};
}
#[repr(C)]
#[derive(Clone, Copy, Debug)]
pub struct Origin3d {
pub x: f32,
pub y: f32,
pub z: f32,
}
impl Origin3d {
pub const ZERO: Self = Origin3d {
x: 0.0,
y: 0.0,
z: 0.0,
};
}
impl Default for Origin3d {
fn default() -> Self {
Origin3d::ZERO
}
}
#[repr(C)]
#[derive(Clone, Copy, Debug)]
pub struct Extent3d {
pub width: u32,
pub height: u32,
pub depth: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct U32Array {
pub bytes: *const u32,
pub length: usize,
}
#[derive(Clone, Copy, Debug)]
pub(crate) struct Features {
pub max_bind_groups: u32,
pub supports_texture_d24_s8: bool,
}
#[macro_export]
macro_rules! gfx_select {
($id:expr => $global:ident.$method:ident( $($param:expr),+ )) => {
match $id.backend() {
#[cfg(any(not(any(target_os = "ios", target_os = "macos")), feature = "gfx-backend-vulkan"))]
$crate::Backend::Vulkan => $global.$method::<$crate::backend::Vulkan>( $($param),+ ),
#[cfg(any(target_os = "ios", target_os = "macos"))]
$crate::Backend::Metal => $global.$method::<$crate::backend::Metal>( $($param),+ ),
#[cfg(windows)]
$crate::Backend::Dx12 => $global.$method::<$crate::backend::Dx12>( $($param),+ ),
#[cfg(windows)]
$crate::Backend::Dx11 => $global.$method::<$crate::backend::Dx11>( $($param),+ ),
_ => unreachable!()
}
};
}
/// Fast hash map used internally.
type FastHashMap<K, V> =
std::collections::HashMap<K, V, std::hash::BuildHasherDefault<fxhash::FxHasher>>;

Просмотреть файл

@ -4,16 +4,13 @@
use crate::{ use crate::{
device::RenderPassContext, device::RenderPassContext,
id::{PipelineLayoutId, ShaderModuleId},
resource, resource,
BufferAddress, BufferAddress,
PipelineLayoutId,
RawString, RawString,
ShaderModuleId,
U32Array, U32Array,
}; };
use bitflags::bitflags;
pub type ShaderLocation = u32; pub type ShaderLocation = u32;
#[repr(C)] #[repr(C)]
@ -50,7 +47,7 @@ impl Default for BlendOperation {
} }
} }
bitflags! { bitflags::bitflags! {
#[repr(transparent)] #[repr(transparent)]
pub struct ColorWrite: u32 { pub struct ColorWrite: u32 {
const RED = 1; const RED = 1;
@ -334,7 +331,7 @@ pub struct RenderPipelineDescriptor {
pub alpha_to_coverage_enabled: bool, pub alpha_to_coverage_enabled: bool,
} }
bitflags! { bitflags::bitflags! {
#[repr(transparent)] #[repr(transparent)]
pub struct PipelineFlags: u32 { pub struct PipelineFlags: u32 {
const BLEND_COLOR = 1; const BLEND_COLOR = 1;

Просмотреть файл

@ -3,26 +3,22 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{ use crate::{
id::{DeviceId, SwapChainId, TextureId},
track::DUMMY_SELECTOR,
BufferAddress, BufferAddress,
BufferMapReadCallback,
BufferMapWriteCallback,
DeviceId,
Extent3d, Extent3d,
LifeGuard, LifeGuard,
RefCount, RefCount,
Stored, Stored,
SwapChainId,
TextureId,
}; };
use bitflags::bitflags;
use hal; use hal;
use rendy_memory::MemoryBlock; use rendy_memory::MemoryBlock;
use smallvec::SmallVec; use smallvec::SmallVec;
use std::borrow::Borrow; use std::{borrow::Borrow, fmt};
bitflags! { bitflags::bitflags! {
#[repr(transparent)] #[repr(transparent)]
pub struct BufferUsage: u32 { pub struct BufferUsage: u32 {
const MAP_READ = 1; const MAP_READ = 1;
@ -65,25 +61,35 @@ pub enum BufferMapAsyncStatus {
ContextLost, ContextLost,
} }
#[derive(Clone, Debug)]
pub enum BufferMapOperation { pub enum BufferMapOperation {
Read(std::ops::Range<u64>, BufferMapReadCallback, *mut u8), Read(std::ops::Range<u64>, Box<dyn FnOnce(BufferMapAsyncStatus, *const u8)>),
Write(std::ops::Range<u64>, BufferMapWriteCallback, *mut u8), Write(std::ops::Range<u64>, Box<dyn FnOnce(BufferMapAsyncStatus, *mut u8)>),
} }
//TODO: clarify if/why this is needed here
unsafe impl Send for BufferMapOperation {} unsafe impl Send for BufferMapOperation {}
unsafe impl Sync for BufferMapOperation {} unsafe impl Sync for BufferMapOperation {}
impl fmt::Debug for BufferMapOperation {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let (op, range) = match *self {
BufferMapOperation::Read(ref range, _) => ("read", range),
BufferMapOperation::Write(ref range, _) => ("write", range),
};
write!(fmt, "BufferMapOperation <{}> of range {:?}", op, range)
}
}
impl BufferMapOperation { impl BufferMapOperation {
pub(crate) fn call_error(self) { pub(crate) fn call_error(self) {
match self { match self {
BufferMapOperation::Read(_, callback, userdata) => { BufferMapOperation::Read(_, callback) => {
log::error!("wgpu_buffer_map_read_async failed: buffer mapping is pending"); log::error!("wgpu_buffer_map_read_async failed: buffer mapping is pending");
callback(BufferMapAsyncStatus::Error, std::ptr::null_mut(), userdata); callback(BufferMapAsyncStatus::Error, std::ptr::null());
} }
BufferMapOperation::Write(_, callback, userdata) => { BufferMapOperation::Write(_, callback) => {
log::error!("wgpu_buffer_map_write_async failed: buffer mapping is pending"); log::error!("wgpu_buffer_map_write_async failed: buffer mapping is pending");
callback(BufferMapAsyncStatus::Error, std::ptr::null_mut(), userdata); callback(BufferMapAsyncStatus::Error, std::ptr::null_mut());
} }
} }
} }
@ -96,6 +102,7 @@ pub struct Buffer<B: hal::Backend> {
pub(crate) usage: BufferUsage, pub(crate) usage: BufferUsage,
pub(crate) memory: MemoryBlock<B>, pub(crate) memory: MemoryBlock<B>,
pub(crate) size: BufferAddress, pub(crate) size: BufferAddress,
pub(crate) full_range: (),
pub(crate) mapped_write_ranges: Vec<std::ops::Range<u64>>, pub(crate) mapped_write_ranges: Vec<std::ops::Range<u64>>,
pub(crate) pending_map_operation: Option<BufferMapOperation>, pub(crate) pending_map_operation: Option<BufferMapOperation>,
pub(crate) life_guard: LifeGuard, pub(crate) life_guard: LifeGuard,
@ -107,6 +114,12 @@ impl<B: hal::Backend> Borrow<RefCount> for Buffer<B> {
} }
} }
impl<B: hal::Backend> Borrow<()> for Buffer<B> {
fn borrow(&self) -> &() {
&DUMMY_SELECTOR
}
}
#[repr(C)] #[repr(C)]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] #[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
pub enum TextureDimension { pub enum TextureDimension {
@ -178,7 +191,7 @@ pub enum TextureFormat {
Depth24PlusStencil8 = 43, Depth24PlusStencil8 = 43,
} }
bitflags! { bitflags::bitflags! {
#[repr(transparent)] #[repr(transparent)]
pub struct TextureUsage: u32 { pub struct TextureUsage: u32 {
const COPY_SRC = 1; const COPY_SRC = 1;
@ -229,6 +242,12 @@ impl<B: hal::Backend> Borrow<RefCount> for Texture<B> {
} }
} }
impl<B: hal::Backend> Borrow<hal::image::SubresourceRange> for Texture<B> {
fn borrow(&self) -> &hal::image::SubresourceRange {
&self.full_range
}
}
#[repr(C)] #[repr(C)]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] #[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
pub enum TextureAspect { pub enum TextureAspect {
@ -296,6 +315,12 @@ impl<B: hal::Backend> Borrow<RefCount> for TextureView<B> {
} }
} }
impl<B: hal::Backend> Borrow<()> for TextureView<B> {
fn borrow(&self) -> &() {
&DUMMY_SELECTOR
}
}
#[repr(C)] #[repr(C)]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] #[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
pub enum AddressMode { pub enum AddressMode {
@ -371,3 +396,9 @@ impl<B: hal::Backend> Borrow<RefCount> for Sampler<B> {
&self.life_guard.ref_count &self.life_guard.ref_count
} }
} }
impl<B: hal::Backend> Borrow<()> for Sampler<B> {
fn borrow(&self) -> &() {
&DUMMY_SELECTOR
}
}

Просмотреть файл

@ -0,0 +1,244 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*! Swap chain management.
## Lifecycle
At the low level, the swap chain is using the new simplified model of gfx-rs.
A swap chain is a separate object that is backend-dependent but shares the index with
the parent surface, which is backend-independent. This ensures a 1:1 correspondence
between them.
`get_next_image()` requests a new image from the surface. It becomes a part of
`TextureViewInner::SwapChain` of the resulted view. The view is registered in the HUB
but not in the device tracker.
The only operation allowed on the view is to be either a color or a resolve attachment.
It can only be used in one command buffer, which needs to be submitted before presenting.
Command buffer tracker knows about the view, but only for the duration of recording.
The view ID is erased from it at the end, so that it's not merged into the device tracker.
When a swapchain view is used in `begin_render_pass()`, we assume the start and end image
layouts purely based on whether or not this view was used in this command buffer before.
It always starts with `Uninitialized` and ends with `Present`, so that no barriers are
needed when we need to actually present it.
In `queue_submit()` we make sure to signal the semaphore whenever we render to a swap
chain view.
In `present()` we return the swap chain image back and wait on the semaphore.
!*/
use crate::{
conv,
hub::{GfxBackend, Global, IdentityFilter, Token},
id::{DeviceId, SwapChainId, TextureViewId},
resource,
Extent3d,
Features,
LifeGuard,
Stored,
};
use hal::{self, device::Device as _, queue::CommandQueue as _, window::PresentationSurface as _};
use smallvec::SmallVec;
const FRAME_TIMEOUT_MS: u64 = 1000;
pub const DESIRED_NUM_FRAMES: u32 = 3;
#[derive(Debug)]
pub struct SwapChain<B: hal::Backend> {
pub(crate) life_guard: LifeGuard,
pub(crate) device_id: Stored<DeviceId>,
pub(crate) desc: SwapChainDescriptor,
pub(crate) num_frames: hal::window::SwapImageIndex,
pub(crate) semaphore: B::Semaphore,
pub(crate) acquired_view_id: Option<Stored<TextureViewId>>,
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub enum PresentMode {
NoVsync = 0,
Vsync = 1,
}
#[repr(C)]
#[derive(Clone, Debug)]
pub struct SwapChainDescriptor {
pub usage: resource::TextureUsage,
pub format: resource::TextureFormat,
pub width: u32,
pub height: u32,
pub present_mode: PresentMode,
}
impl SwapChainDescriptor {
pub(crate) fn to_hal(
&self,
num_frames: u32,
features: &Features,
) -> hal::window::SwapchainConfig {
let mut config = hal::window::SwapchainConfig::new(
self.width,
self.height,
conv::map_texture_format(self.format, *features),
num_frames,
);
//TODO: check for supported
config.image_usage = conv::map_texture_usage(self.usage, hal::format::Aspects::COLOR);
config.composite_alpha_mode = hal::window::CompositeAlphaMode::OPAQUE;
config.present_mode = match self.present_mode {
PresentMode::NoVsync => hal::window::PresentMode::IMMEDIATE,
PresentMode::Vsync => hal::window::PresentMode::FIFO,
};
config
}
pub fn to_texture_desc(&self) -> resource::TextureDescriptor {
resource::TextureDescriptor {
size: Extent3d {
width: self.width,
height: self.height,
depth: 1,
},
mip_level_count: 1,
array_layer_count: 1,
sample_count: 1,
dimension: resource::TextureDimension::D2,
format: self.format,
usage: self.usage,
}
}
}
#[repr(C)]
#[derive(Debug)]
pub struct SwapChainOutput {
pub view_id: TextureViewId,
}
#[derive(Debug)]
pub enum SwapChainGetNextTextureError {
GpuProcessingTimeout,
}
impl<F: IdentityFilter<TextureViewId>> Global<F> {
pub fn swap_chain_get_next_texture<B: GfxBackend>(
&self,
swap_chain_id: SwapChainId,
view_id_in: F::Input,
) -> Result<SwapChainOutput, SwapChainGetNextTextureError> {
let hub = B::hub(self);
let mut token = Token::root();
let (mut surface_guard, mut token) = self.surfaces.write(&mut token);
let surface = &mut surface_guard[swap_chain_id.to_surface_id()];
let (device_guard, mut token) = hub.devices.read(&mut token);
let (mut swap_chain_guard, mut token) = hub.swap_chains.write(&mut token);
let sc = &mut swap_chain_guard[swap_chain_id];
let device = &device_guard[sc.device_id.value];
let (image, _) = {
let suf = B::get_surface_mut(surface);
match unsafe { suf.acquire_image(FRAME_TIMEOUT_MS * 1_000_000) } {
Ok(surface_image) => surface_image,
Err(hal::window::AcquireError::Timeout) => {
return Err(SwapChainGetNextTextureError::GpuProcessingTimeout);
}
Err(e) => {
log::warn!("acquire_image() failed ({:?}), reconfiguring swapchain", e);
let desc = sc.desc.to_hal(sc.num_frames, &device.features);
unsafe {
suf.configure_swapchain(&device.raw, desc).unwrap();
suf.acquire_image(FRAME_TIMEOUT_MS * 1_000_000).unwrap()
}
}
}
};
let view = resource::TextureView {
inner: resource::TextureViewInner::SwapChain {
image,
source_id: Stored {
value: swap_chain_id,
ref_count: sc.life_guard.ref_count.clone(),
},
framebuffers: SmallVec::new(),
},
format: sc.desc.format,
extent: hal::image::Extent {
width: sc.desc.width,
height: sc.desc.height,
depth: 1,
},
samples: 1,
range: hal::image::SubresourceRange {
aspects: hal::format::Aspects::COLOR,
layers: 0 .. 1,
levels: 0 .. 1,
},
life_guard: LifeGuard::new(),
};
let ref_count = view.life_guard.ref_count.clone();
let view_id = hub
.texture_views
.register_identity(view_id_in, view, &mut token);
assert!(
sc.acquired_view_id.is_none(),
"Swap chain image is already acquired"
);
sc.acquired_view_id = Some(Stored {
value: view_id,
ref_count,
});
Ok(SwapChainOutput { view_id })
}
pub fn swap_chain_present<B: GfxBackend>(&self, swap_chain_id: SwapChainId) {
let hub = B::hub(self);
let mut token = Token::root();
let (mut surface_guard, mut token) = self.surfaces.write(&mut token);
let surface = &mut surface_guard[swap_chain_id.to_surface_id()];
let (mut device_guard, mut token) = hub.devices.write(&mut token);
let (mut swap_chain_guard, mut token) = hub.swap_chains.write(&mut token);
let sc = &mut swap_chain_guard[swap_chain_id];
let device = &mut device_guard[sc.device_id.value];
let view_id = sc
.acquired_view_id
.take()
.expect("Swap chain image is not acquired");
let (view, _) = hub.texture_views.unregister(view_id.value, &mut token);
let (image, framebuffers) = match view.inner {
resource::TextureViewInner::Native { .. } => unreachable!(),
resource::TextureViewInner::SwapChain {
image,
framebuffers,
..
} => (image, framebuffers),
};
let err = unsafe {
let queue = &mut device.queue_group.queues[0];
queue.present_surface(B::get_surface_mut(surface), image, Some(&sc.semaphore))
};
if let Err(e) = err {
log::warn!("present failed: {:?}", e);
}
for fbo in framebuffers {
unsafe {
device.raw.destroy_framebuffer(fbo);
}
}
}
}

Просмотреть файл

@ -3,7 +3,7 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use super::{PendingTransition, ResourceState, Stitch, Unit}; use super::{PendingTransition, ResourceState, Stitch, Unit};
use crate::{conv, resource::BufferUsage, BufferId}; use crate::{conv, id::BufferId, resource::BufferUsage};
use std::ops::Range; use std::ops::Range;
//TODO: store `hal::buffer::State` here to avoid extra conversions //TODO: store `hal::buffer::State` here to avoid extra conversions
@ -16,19 +16,17 @@ impl PendingTransition<BufferState> {
} }
} }
impl Default for BufferState { impl ResourceState for BufferState {
fn default() -> Self { type Id = BufferId;
type Selector = ();
type Usage = BufferUsage;
fn new(_full_selector: &Self::Selector) -> Self {
BufferState { BufferState {
init: BufferUsage::empty(), init: BufferUsage::empty(),
last: BufferUsage::empty(), last: BufferUsage::empty(),
} }
} }
}
impl ResourceState for BufferState {
type Id = BufferId;
type Selector = ();
type Usage = BufferUsage;
fn query(&self, _selector: Self::Selector) -> Option<Self::Usage> { fn query(&self, _selector: Self::Selector) -> Option<Self::Usage> {
Some(self.last) Some(self.last)
@ -106,7 +104,7 @@ impl ResourceState for BufferState {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::*; use super::*;
use crate::{Backend, TypedId}; use crate::{id::TypedId, Backend};
#[test] #[test]
fn change() { fn change() {

Просмотреть файл

@ -8,15 +8,12 @@ mod texture;
use crate::{ use crate::{
hub::Storage, hub::Storage,
id::{BindGroupId, SamplerId, TextureViewId, TypedId},
Backend, Backend,
BindGroupId,
Epoch, Epoch,
FastHashMap, FastHashMap,
Index, Index,
RefCount, RefCount,
SamplerId,
TextureViewId,
TypedId,
}; };
use std::{ use std::{
@ -32,6 +29,8 @@ use buffer::BufferState;
use texture::TextureState; use texture::TextureState;
pub const SEPARATE_DEPTH_STENCIL_STATES: bool = false;
/// A single unit of state tracking. It keeps an initial /// A single unit of state tracking. It keeps an initial
/// usage as well as the last/current one, similar to `Range`. /// usage as well as the last/current one, similar to `Range`.
#[derive(Clone, Copy, Debug, PartialEq)] #[derive(Clone, Copy, Debug, PartialEq)]
@ -74,7 +73,7 @@ pub enum Stitch {
/// The main trait that abstracts away the tracking logic of /// The main trait that abstracts away the tracking logic of
/// a particular resource type, like a buffer or a texture. /// a particular resource type, like a buffer or a texture.
pub trait ResourceState: Clone + Default { pub trait ResourceState: Clone {
/// Corresponding `HUB` identifier. /// Corresponding `HUB` identifier.
type Id: Copy + Debug + TypedId; type Id: Copy + Debug + TypedId;
/// A type specifying the sub-resources. /// A type specifying the sub-resources.
@ -82,6 +81,9 @@ pub trait ResourceState: Clone + Default {
/// Usage type for a `Unit` of a sub-resource. /// Usage type for a `Unit` of a sub-resource.
type Usage: Debug; type Usage: Debug;
/// Create a new resource state to track the specified subresources.
fn new(full_selector: &Self::Selector) -> Self;
/// Check if all the selected sub-resources have the same /// Check if all the selected sub-resources have the same
/// usage, and return it. /// usage, and return it.
/// ///
@ -212,11 +214,11 @@ impl<S: ResourceState> ResourceTracker<S> {
pub fn init( pub fn init(
&mut self, &mut self,
id: S::Id, id: S::Id,
ref_count: &RefCount, ref_count: RefCount,
selector: S::Selector, selector: S::Selector,
default: S::Usage, default: S::Usage,
) -> bool { ) -> bool {
let mut state = S::default(); let mut state = S::new(&selector);
match state.change(id, selector, default, None) { match state.change(id, selector, default, None) {
Ok(()) => (), Ok(()) => (),
Err(_) => unreachable!(), Err(_) => unreachable!(),
@ -228,7 +230,7 @@ impl<S: ResourceState> ResourceTracker<S> {
.insert( .insert(
index, index,
Resource { Resource {
ref_count: ref_count.clone(), ref_count,
state, state,
epoch, epoch,
}, },
@ -255,13 +257,14 @@ impl<S: ResourceState> ResourceTracker<S> {
map: &'a mut FastHashMap<Index, Resource<S>>, map: &'a mut FastHashMap<Index, Resource<S>>,
id: S::Id, id: S::Id,
ref_count: &RefCount, ref_count: &RefCount,
full_selector: &S::Selector,
) -> &'a mut Resource<S> { ) -> &'a mut Resource<S> {
let (index, epoch, backend) = id.unzip(); let (index, epoch, backend) = id.unzip();
debug_assert_eq!(self_backend, backend); debug_assert_eq!(self_backend, backend);
match map.entry(index) { match map.entry(index) {
Entry::Vacant(e) => e.insert(Resource { Entry::Vacant(e) => e.insert(Resource {
ref_count: ref_count.clone(), ref_count: ref_count.clone(),
state: S::default(), state: S::new(full_selector),
epoch, epoch,
}), }),
Entry::Occupied(e) => { Entry::Occupied(e) => {
@ -280,8 +283,9 @@ impl<S: ResourceState> ResourceTracker<S> {
ref_count: &RefCount, ref_count: &RefCount,
selector: S::Selector, selector: S::Selector,
usage: S::Usage, usage: S::Usage,
full_selector: &S::Selector,
) -> Result<(), PendingTransition<S>> { ) -> Result<(), PendingTransition<S>> {
Self::get_or_insert(self.backend, &mut self.map, id, ref_count) Self::get_or_insert(self.backend, &mut self.map, id, ref_count, full_selector)
.state .state
.change(id, selector, usage, None) .change(id, selector, usage, None)
} }
@ -293,8 +297,9 @@ impl<S: ResourceState> ResourceTracker<S> {
ref_count: &RefCount, ref_count: &RefCount,
selector: S::Selector, selector: S::Selector,
usage: S::Usage, usage: S::Usage,
full_selector: &S::Selector,
) -> Drain<PendingTransition<S>> { ) -> Drain<PendingTransition<S>> {
let res = Self::get_or_insert(self.backend, &mut self.map, id, ref_count); let res = Self::get_or_insert(self.backend, &mut self.map, id, ref_count, full_selector);
res.state res.state
.change(id, selector, usage, Some(&mut self.temp)) .change(id, selector, usage, Some(&mut self.temp))
.ok(); //TODO: unwrap? .ok(); //TODO: unwrap?
@ -352,7 +357,7 @@ impl<S: ResourceState> ResourceTracker<S> {
/// the last read-only usage, if possible. /// the last read-only usage, if possible.
/// ///
/// Returns the old usage as an error if there is a conflict. /// Returns the old usage as an error if there is a conflict.
pub fn use_extend<'a, T: 'a + Borrow<RefCount>>( pub fn use_extend<'a, T: 'a + Borrow<RefCount> + Borrow<S::Selector>>(
&mut self, &mut self,
storage: &'a Storage<T, S::Id>, storage: &'a Storage<T, S::Id>,
id: S::Id, id: S::Id,
@ -360,7 +365,7 @@ impl<S: ResourceState> ResourceTracker<S> {
usage: S::Usage, usage: S::Usage,
) -> Result<&'a T, S::Usage> { ) -> Result<&'a T, S::Usage> {
let item = &storage[id]; let item = &storage[id];
self.change_extend(id, item.borrow(), selector, usage) self.change_extend(id, item.borrow(), selector, usage, item.borrow())
.map(|()| item) .map(|()| item)
.map_err(|pending| pending.usage.start) .map_err(|pending| pending.usage.start)
} }
@ -369,7 +374,7 @@ impl<S: ResourceState> ResourceTracker<S> {
/// Combines storage access by 'Id' with the transition that replaces /// Combines storage access by 'Id' with the transition that replaces
/// the last usage with a new one, returning an iterator over these /// the last usage with a new one, returning an iterator over these
/// transitions. /// transitions.
pub fn use_replace<'a, T: 'a + Borrow<RefCount>>( pub fn use_replace<'a, T: 'a + Borrow<RefCount> + Borrow<S::Selector>>(
&mut self, &mut self,
storage: &'a Storage<T, S::Id>, storage: &'a Storage<T, S::Id>,
id: S::Id, id: S::Id,
@ -377,7 +382,7 @@ impl<S: ResourceState> ResourceTracker<S> {
usage: S::Usage, usage: S::Usage,
) -> (&'a T, Drain<PendingTransition<S>>) { ) -> (&'a T, Drain<PendingTransition<S>>) {
let item = &storage[id]; let item = &storage[id];
let drain = self.change_replace(id, item.borrow(), selector, usage); let drain = self.change_replace(id, item.borrow(), selector, usage, item.borrow());
(item, drain) (item, drain)
} }
} }
@ -388,6 +393,10 @@ impl<I: Copy + Debug + TypedId> ResourceState for PhantomData<I> {
type Selector = (); type Selector = ();
type Usage = (); type Usage = ();
fn new(_full_selector: &Self::Selector) -> Self {
PhantomData
}
fn query(&self, _selector: Self::Selector) -> Option<Self::Usage> { fn query(&self, _selector: Self::Selector) -> Option<Self::Usage> {
Some(()) Some(())
} }
@ -415,6 +424,8 @@ impl<I: Copy + Debug + TypedId> ResourceState for PhantomData<I> {
fn optimize(&mut self) {} fn optimize(&mut self) {}
} }
pub const DUMMY_SELECTOR: () = ();
/// A set of trackers for all relevant resources. /// A set of trackers for all relevant resources.
#[derive(Debug)] #[derive(Debug)]

Просмотреть файл

@ -2,26 +2,19 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use super::{range::RangedStates, PendingTransition, ResourceState, Stitch, Unit}; use super::{SEPARATE_DEPTH_STENCIL_STATES, range::RangedStates, PendingTransition, ResourceState, Stitch, Unit};
use crate::{conv, device::MAX_MIP_LEVELS, resource::TextureUsage, TextureId}; use crate::{conv, device::MAX_MIP_LEVELS, id::TextureId, resource::TextureUsage};
use arrayvec::ArrayVec; use arrayvec::ArrayVec;
use std::ops::Range; use std::ops::Range;
type PlaneStates = RangedStates<hal::image::Layer, Unit<TextureUsage>>;
//TODO: store `hal::image::State` here to avoid extra conversions //TODO: store `hal::image::State` here to avoid extra conversions
#[derive(Clone, Debug, Default)] type PlaneStates = RangedStates<hal::image::Layer, Unit<TextureUsage>>;
struct MipState { type MipState = ArrayVec<[(hal::format::Aspects, PlaneStates); 2]>;
color: PlaneStates,
depth: PlaneStates,
stencil: PlaneStates,
}
#[derive(Clone, Debug, Default)] #[derive(Clone, Debug)]
pub struct TextureState { pub struct TextureState {
mips: ArrayVec<[MipState; MAX_MIP_LEVELS]>, mips: ArrayVec<[MipState; MAX_MIP_LEVELS]>,
} }
@ -70,18 +63,32 @@ impl ResourceState for TextureState {
type Selector = hal::image::SubresourceRange; type Selector = hal::image::SubresourceRange;
type Usage = TextureUsage; type Usage = TextureUsage;
fn new(full_selector: &Self::Selector) -> Self {
TextureState {
mips: (0 .. full_selector.levels.end)
.map(|_| {
let mut slices = ArrayVec::new();
let aspects_without_stencil = full_selector.aspects & !hal::format::Aspects::STENCIL;
if SEPARATE_DEPTH_STENCIL_STATES && full_selector.aspects != aspects_without_stencil {
slices.push((aspects_without_stencil, PlaneStates::default()));
slices.push((hal::format::Aspects::STENCIL, PlaneStates::default()));
} else {
slices.push((full_selector.aspects, PlaneStates::default()))
}
slices
})
.collect()
}
}
fn query(&self, selector: Self::Selector) -> Option<Self::Usage> { fn query(&self, selector: Self::Selector) -> Option<Self::Usage> {
let mut result = None; let mut result = None;
let num_levels = self.mips.len(); let num_levels = self.mips.len();
let mip_start = num_levels.min(selector.levels.start as usize); let mip_start = num_levels.min(selector.levels.start as usize);
let mip_end = num_levels.min(selector.levels.end as usize); let mip_end = num_levels.min(selector.levels.end as usize);
for mip in self.mips[mip_start .. mip_end].iter() { for mip in self.mips[mip_start .. mip_end].iter() {
for &(aspect, plane_states) in &[ for &(aspects, ref plane_states) in mip {
(hal::format::Aspects::COLOR, &mip.color), if !selector.aspects.intersects(aspects) {
(hal::format::Aspects::DEPTH, &mip.depth),
(hal::format::Aspects::STENCIL, &mip.stencil),
] {
if !selector.aspects.contains(aspect) {
continue; continue;
} }
match plane_states.query(&selector.layers, |unit| unit.last) { match plane_states.query(&selector.layers, |unit| unit.last) {
@ -104,23 +111,18 @@ impl ResourceState for TextureState {
usage: Self::Usage, usage: Self::Usage,
mut output: Option<&mut Vec<PendingTransition<Self>>>, mut output: Option<&mut Vec<PendingTransition<Self>>>,
) -> Result<(), PendingTransition<Self>> { ) -> Result<(), PendingTransition<Self>> {
while self.mips.len() < selector.levels.end as usize {
self.mips.push(MipState::default());
}
for (mip_id, mip) in self.mips for (mip_id, mip) in self.mips
[selector.levels.start as usize .. selector.levels.end as usize] [selector.levels.start as usize .. selector.levels.end as usize]
.iter_mut() .iter_mut()
.enumerate() .enumerate()
{ {
let level = selector.levels.start + mip_id as hal::image::Level; let level = selector.levels.start + mip_id as hal::image::Level;
for &mut (aspect, ref mut plane_states) in &mut [ for &mut (mip_aspects, ref mut plane_states) in mip {
(hal::format::Aspects::COLOR, &mut mip.color), let aspects = selector.aspects & mip_aspects;
(hal::format::Aspects::DEPTH, &mut mip.depth), if aspects.is_empty() {
(hal::format::Aspects::STENCIL, &mut mip.stencil),
] {
if !selector.aspects.contains(aspect) {
continue; continue;
} }
debug_assert_eq!(aspects, mip_aspects);
let layers = plane_states.isolate(&selector.layers, Unit::new(usage)); let layers = plane_states.isolate(&selector.layers, Unit::new(usage));
for &mut (ref range, ref mut unit) in layers { for &mut (ref range, ref mut unit) in layers {
if unit.last == usage && TextureUsage::ORDERED.contains(usage) { if unit.last == usage && TextureUsage::ORDERED.contains(usage) {
@ -129,7 +131,7 @@ impl ResourceState for TextureState {
let pending = PendingTransition { let pending = PendingTransition {
id, id,
selector: hal::image::SubresourceRange { selector: hal::image::SubresourceRange {
aspects: hal::format::Aspects::COLOR, aspects,
levels: level .. level + 1, levels: level .. level + 1,
layers: range.clone(), layers: range.clone(),
}, },
@ -158,23 +160,8 @@ impl ResourceState for TextureState {
for (mip_id, (mip_self, mip_other)) in self.mips.iter_mut().zip(&other.mips).enumerate() { for (mip_id, (mip_self, mip_other)) in self.mips.iter_mut().zip(&other.mips).enumerate() {
let level = mip_id as hal::image::Level; let level = mip_id as hal::image::Level;
for &mut (aspects, ref mut planes_self, planes_other) in &mut [ for (&mut (aspects, ref mut planes_self), &(aspects_other, ref planes_other)) in mip_self.iter_mut().zip(mip_other) {
( debug_assert_eq!(aspects, aspects_other);
hal::format::Aspects::COLOR,
&mut mip_self.color,
&mip_other.color,
),
(
hal::format::Aspects::DEPTH,
&mut mip_self.depth,
&mip_other.depth,
),
(
hal::format::Aspects::STENCIL,
&mut mip_self.stencil,
&mip_other.stencil,
),
] {
temp.extend(planes_self.merge(planes_other, 0)); temp.extend(planes_self.merge(planes_other, 0));
planes_self.clear(); planes_self.clear();
@ -227,9 +214,9 @@ impl ResourceState for TextureState {
fn optimize(&mut self) { fn optimize(&mut self) {
for mip in self.mips.iter_mut() { for mip in self.mips.iter_mut() {
mip.color.coalesce(); for &mut (_, ref mut planes) in mip.iter_mut() {
mip.depth.coalesce(); planes.coalesce();
mip.stencil.coalesce(); }
} }
} }
} }
@ -244,10 +231,12 @@ mod test {
#[test] #[test]
fn query() { fn query() {
let mut ts = TextureState::default(); let mut ts = TextureState::new(&SubresourceRange {
ts.mips.push(MipState::default()); aspects: Aspects::COLOR,
ts.mips.push(MipState::default()); levels: 0 .. 2,
ts.mips[1].color = PlaneStates::new(&[ layers: 0 .. 10,
});
ts.mips[1][0].1 = PlaneStates::new(&[
(1 .. 3, Unit::new(TextureUsage::SAMPLED)), (1 .. 3, Unit::new(TextureUsage::SAMPLED)),
(3 .. 5, Unit::new(TextureUsage::SAMPLED)), (3 .. 5, Unit::new(TextureUsage::SAMPLED)),
(5 .. 6, Unit::new(TextureUsage::STORAGE)), (5 .. 6, Unit::new(TextureUsage::STORAGE)),

Просмотреть файл

@ -13,41 +13,19 @@ keywords = ["graphics"]
license = "MPL-2.0" license = "MPL-2.0"
[lib] [lib]
# Enabling these targets makes our CI bots try to build them and fail atm crate-type = ["lib", "cdylib", "staticlib"]
#crate-type = ["lib", "cdylib", "staticlib"]
[features] [features]
default = [] default = []
local = ["lazy_static", "raw-window-handle"] #metal-auto-capture = ["gfx-backend-metal/auto-capture"]
metal-auto-capture = ["gfx-backend-metal/auto-capture"] vulkan-portability = ["core/gfx-backend-vulkan"]
#NOTE: glutin feature is not stable, use at your own risk
#glutin = ["gfx-backend-gl/glutin"] [dependencies.core]
path = "../wgpu-core"
package = "wgpu-core"
version = "0.1"
[dependencies] [dependencies]
arrayvec = "0.5" lazy_static = "1.1"
bitflags = "1.0"
copyless = "0.1"
fxhash = "0.2"
lazy_static = { version = "1.1.0", optional = true }
log = "0.4"
hal = { package = "gfx-hal", version = "0.4" }
gfx-backend-empty = { version = "0.4" }
parking_lot = "0.9" parking_lot = "0.9"
raw-window-handle = { version = "0.3", optional = true } raw-window-handle = "0.3"
rendy-memory = "0.5"
rendy-descriptor = "0.5"
serde = { version = "1.0", features = ["serde_derive"], optional = true }
smallvec = "0.6"
vec_map = "0.8"
[target.'cfg(any(target_os = "ios", target_os = "macos"))'.dependencies]
gfx-backend-metal = { version = "0.4" }
gfx-backend-vulkan = { version = "0.4", optional = true }
[target.'cfg(all(unix, not(target_os = "ios"), not(target_os = "macos")))'.dependencies]
gfx-backend-vulkan = { version = "0.4", features = ["x11"] }
[target.'cfg(windows)'.dependencies]
gfx-backend-dx12 = { version = "0.4.1" }
gfx-backend-dx11 = { version = "0.4" }
gfx-backend-vulkan = { version = "0.4" }

Просмотреть файл

@ -22,10 +22,8 @@ prefix = "WGPU"
exclude = ["BufferMapResult"] exclude = ["BufferMapResult"]
[parse] [parse]
parse_deps = false parse_deps = true
include = ["wgpu-core"]
[parse.expand]
features = ["local"]
[fn] [fn]
@ -40,7 +38,3 @@ derive_helper_methods = true
bitflags = true bitflags = true
[defines] [defines]
"feature = local" = "WGPU_LOCAL"
"feature = gfx-backend-gl" = "WGPU_BACKEND_GL"
"feature = winit" = "WGPU_WINIT"
"feature = glutin" = "WGPU_GLUTIN"

Просмотреть файл

@ -0,0 +1,317 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::GLOBAL;
use core::{gfx_select, id};
use std::{marker::PhantomData, slice};
#[no_mangle]
pub extern "C" fn wgpu_command_encoder_finish(
encoder_id: id::CommandEncoderId,
desc: Option<&core::command::CommandBufferDescriptor>,
) -> id::CommandBufferId {
let desc = &desc.cloned().unwrap_or_default();
gfx_select!(encoder_id => GLOBAL.command_encoder_finish(encoder_id, desc))
}
#[no_mangle]
pub extern "C" fn wgpu_command_encoder_copy_buffer_to_buffer(
command_encoder_id: id::CommandEncoderId,
source: id::BufferId,
source_offset: core::BufferAddress,
destination: id::BufferId,
destination_offset: core::BufferAddress,
size: core::BufferAddress,
) {
gfx_select!(command_encoder_id => GLOBAL.command_encoder_copy_buffer_to_buffer(
command_encoder_id,
source, source_offset,
destination,
destination_offset,
size))
}
#[no_mangle]
pub extern "C" fn wgpu_command_encoder_copy_buffer_to_texture(
command_encoder_id: id::CommandEncoderId,
source: &core::command::BufferCopyView,
destination: &core::command::TextureCopyView,
copy_size: core::Extent3d,
) {
gfx_select!(command_encoder_id => GLOBAL.command_encoder_copy_buffer_to_texture(
command_encoder_id,
source,
destination,
copy_size))
}
#[no_mangle]
pub extern "C" fn wgpu_command_encoder_copy_texture_to_buffer(
command_encoder_id: id::CommandEncoderId,
source: &core::command::TextureCopyView,
destination: &core::command::BufferCopyView,
copy_size: core::Extent3d,
) {
gfx_select!(command_encoder_id => GLOBAL.command_encoder_copy_texture_to_buffer(
command_encoder_id,
source,
destination,
copy_size))
}
#[no_mangle]
pub extern "C" fn wgpu_command_encoder_copy_texture_to_texture(
command_encoder_id: id::CommandEncoderId,
source: &core::command::TextureCopyView,
destination: &core::command::TextureCopyView,
copy_size: core::Extent3d,
) {
gfx_select!(command_encoder_id => GLOBAL.command_encoder_copy_texture_to_texture(
command_encoder_id,
source,
destination,
copy_size))
}
#[no_mangle]
pub extern "C" fn wgpu_command_encoder_begin_render_pass(
encoder_id: id::CommandEncoderId,
desc: &core::command::RenderPassDescriptor,
) -> id::RenderPassId {
gfx_select!(encoder_id => GLOBAL.command_encoder_begin_render_pass(encoder_id, desc, PhantomData))
}
#[no_mangle]
pub extern "C" fn wgpu_render_pass_end_pass(pass_id: id::RenderPassId) {
gfx_select!(pass_id => GLOBAL.render_pass_end_pass(pass_id))
}
#[no_mangle]
pub extern "C" fn wgpu_render_pass_set_bind_group(
pass_id: id::RenderPassId,
index: u32,
bind_group_id: id::BindGroupId,
offsets: *const core::BufferAddress,
offsets_length: usize,
) {
let offsets = if offsets_length != 0 {
unsafe { slice::from_raw_parts(offsets, offsets_length) }
} else {
&[]
};
gfx_select!(pass_id => GLOBAL.render_pass_set_bind_group(pass_id, index, bind_group_id, offsets))
}
#[no_mangle]
pub extern "C" fn wgpu_render_pass_push_debug_group(
_pass_id: id::RenderPassId,
_label: core::RawString,
) {
//TODO
}
#[no_mangle]
pub extern "C" fn wgpu_render_pass_pop_debug_group(_pass_id: id::RenderPassId) {
//TODO
}
#[no_mangle]
pub extern "C" fn wgpu_render_pass_insert_debug_marker(
_pass_id: id::RenderPassId,
_label: core::RawString,
) {
//TODO
}
#[no_mangle]
pub extern "C" fn wgpu_render_pass_set_index_buffer(
pass_id: id::RenderPassId,
buffer_id: id::BufferId,
offset: core::BufferAddress,
) {
gfx_select!(pass_id => GLOBAL.render_pass_set_index_buffer(pass_id, buffer_id, offset))
}
#[no_mangle]
pub extern "C" fn wgpu_render_pass_set_vertex_buffers(
pass_id: id::RenderPassId,
start_slot: u32,
buffers: *const id::BufferId,
offsets: *const core::BufferAddress,
length: usize,
) {
let buffers = unsafe { slice::from_raw_parts(buffers, length) };
let offsets = unsafe { slice::from_raw_parts(offsets, length) };
gfx_select!(pass_id => GLOBAL.render_pass_set_vertex_buffers(pass_id, start_slot, buffers, offsets))
}
#[no_mangle]
pub extern "C" fn wgpu_render_pass_draw(
pass_id: id::RenderPassId,
vertex_count: u32,
instance_count: u32,
first_vertex: u32,
first_instance: u32,
) {
gfx_select!(pass_id => GLOBAL.render_pass_draw(pass_id, vertex_count, instance_count, first_vertex, first_instance))
}
#[no_mangle]
pub extern "C" fn wgpu_render_pass_draw_indirect(
pass_id: id::RenderPassId,
indirect_buffer_id: id::BufferId,
indirect_offset: core::BufferAddress,
) {
gfx_select!(pass_id => GLOBAL.render_pass_draw_indirect(pass_id, indirect_buffer_id, indirect_offset))
}
#[no_mangle]
pub extern "C" fn wgpu_render_pass_draw_indexed(
pass_id: id::RenderPassId,
index_count: u32,
instance_count: u32,
first_index: u32,
base_vertex: i32,
first_instance: u32,
) {
gfx_select!(pass_id => GLOBAL.render_pass_draw_indexed(pass_id, index_count, instance_count, first_index, base_vertex, first_instance))
}
#[no_mangle]
pub extern "C" fn wgpu_render_pass_draw_indexed_indirect(
pass_id: id::RenderPassId,
indirect_buffer_id: id::BufferId,
indirect_offset: core::BufferAddress,
) {
gfx_select!(pass_id => GLOBAL.render_pass_draw_indexed_indirect(pass_id, indirect_buffer_id, indirect_offset))
}
#[no_mangle]
pub extern "C" fn wgpu_render_pass_set_pipeline(
pass_id: id::RenderPassId,
pipeline_id: id::RenderPipelineId,
) {
gfx_select!(pass_id => GLOBAL.render_pass_set_pipeline(pass_id, pipeline_id))
}
#[no_mangle]
pub extern "C" fn wgpu_render_pass_set_blend_color(pass_id: id::RenderPassId, color: &core::Color) {
gfx_select!(pass_id => GLOBAL.render_pass_set_blend_color(pass_id, color))
}
#[no_mangle]
pub extern "C" fn wgpu_render_pass_set_stencil_reference(pass_id: id::RenderPassId, value: u32) {
gfx_select!(pass_id => GLOBAL.render_pass_set_stencil_reference(pass_id, value))
}
#[no_mangle]
pub extern "C" fn wgpu_render_pass_set_viewport(
pass_id: id::RenderPassId,
x: f32,
y: f32,
w: f32,
h: f32,
min_depth: f32,
max_depth: f32,
) {
gfx_select!(pass_id => GLOBAL.render_pass_set_viewport(pass_id, x, y, w, h, min_depth, max_depth))
}
#[no_mangle]
pub extern "C" fn wgpu_render_pass_set_scissor_rect(
pass_id: id::RenderPassId,
x: u32,
y: u32,
w: u32,
h: u32,
) {
gfx_select!(pass_id => GLOBAL.render_pass_set_scissor_rect(pass_id, x, y, w, h))
}
#[no_mangle]
pub extern "C" fn wgpu_render_pass_execute_bundles(
_pass_id: id::RenderPassId,
_bundles: *const id::RenderBundleId,
_bundles_length: usize,
) {
unimplemented!()
}
#[no_mangle]
pub extern "C" fn wgpu_command_encoder_begin_compute_pass(
encoder_id: id::CommandEncoderId,
desc: Option<&core::command::ComputePassDescriptor>,
) -> id::ComputePassId {
let desc = &desc.cloned().unwrap_or_default();
gfx_select!(encoder_id => GLOBAL.command_encoder_begin_compute_pass(encoder_id, desc, PhantomData))
}
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_end_pass(pass_id: id::ComputePassId) {
gfx_select!(pass_id => GLOBAL.compute_pass_end_pass(pass_id))
}
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_set_bind_group(
pass_id: id::ComputePassId,
index: u32,
bind_group_id: id::BindGroupId,
offsets: *const core::BufferAddress,
offsets_length: usize,
) {
let offsets = if offsets_length != 0 {
unsafe { slice::from_raw_parts(offsets, offsets_length) }
} else {
&[]
};
gfx_select!(pass_id => GLOBAL.compute_pass_set_bind_group(pass_id, index, bind_group_id, offsets))
}
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_push_debug_group(
_pass_id: id::ComputePassId,
_label: core::RawString,
) {
//TODO
}
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_pop_debug_group(_pass_id: id::ComputePassId) {
//TODO
}
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_insert_debug_marker(
_pass_id: id::ComputePassId,
_label: core::RawString,
) {
//TODO
}
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_dispatch(pass_id: id::ComputePassId, x: u32, y: u32, z: u32) {
gfx_select!(pass_id => GLOBAL.compute_pass_dispatch(pass_id, x, y, z))
}
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_dispatch_indirect(
pass_id: id::ComputePassId,
indirect_buffer_id: id::BufferId,
indirect_offset: core::BufferAddress,
) {
gfx_select!(pass_id => GLOBAL.compute_pass_dispatch_indirect(pass_id, indirect_buffer_id, indirect_offset))
}
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_set_pipeline(
pass_id: id::ComputePassId,
pipeline_id: id::ComputePipelineId,
) {
gfx_select!(pass_id => GLOBAL.compute_pass_set_pipeline(pass_id, pipeline_id))
}

Просмотреть файл

@ -1,316 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{
command::bind::{Binder, LayoutChange},
device::all_buffer_stages,
hub::{GfxBackend, Global, Token},
track::{Stitch, TrackerSet},
BindGroupId,
BufferAddress,
BufferId,
BufferUsage,
CommandBuffer,
CommandBufferId,
ComputePassId,
ComputePipelineId,
RawString,
Stored,
BIND_BUFFER_ALIGNMENT,
};
#[cfg(feature = "local")]
use crate::{gfx_select, hub::GLOBAL};
use hal::{self, command::CommandBuffer as _};
use std::iter;
#[cfg(feature = "local")]
use std::slice;
#[derive(Debug)]
pub struct ComputePass<B: hal::Backend> {
raw: B::CommandBuffer,
cmb_id: Stored<CommandBufferId>,
binder: Binder,
trackers: TrackerSet,
}
impl<B: hal::Backend> ComputePass<B> {
pub(crate) fn new(
raw: B::CommandBuffer,
cmb_id: Stored<CommandBufferId>,
trackers: TrackerSet,
max_bind_groups: u32,
) -> Self {
ComputePass {
raw,
cmb_id,
binder: Binder::new(max_bind_groups),
trackers,
}
}
}
// Common routines between render/compute
pub fn compute_pass_end_pass<B: GfxBackend>(global: &Global, pass_id: ComputePassId) {
let mut token = Token::root();
let hub = B::hub(global);
let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token);
let (pass, _) = hub.compute_passes.unregister(pass_id, &mut token);
let cmb = &mut cmb_guard[pass.cmb_id.value];
// There are no transitions to be made: we've already been inserting barriers
// into the parent command buffer while recording this compute pass.
cmb.trackers = pass.trackers;
cmb.raw.push(pass.raw);
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_end_pass(pass_id: ComputePassId) {
gfx_select!(pass_id => compute_pass_end_pass(&*GLOBAL, pass_id))
}
pub fn compute_pass_set_bind_group<B: GfxBackend>(
global: &Global,
pass_id: ComputePassId,
index: u32,
bind_group_id: BindGroupId,
offsets: &[BufferAddress],
) {
let hub = B::hub(global);
let mut token = Token::root();
let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
let (mut pass_guard, mut token) = hub.compute_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
let bind_group = pass
.trackers
.bind_groups
.use_extend(&*bind_group_guard, bind_group_id, (), ())
.unwrap();
assert_eq!(bind_group.dynamic_count, offsets.len());
if cfg!(debug_assertions) {
for off in offsets {
assert_eq!(
*off % BIND_BUFFER_ALIGNMENT,
0,
"Misaligned dynamic buffer offset: {} does not align with {}",
off,
BIND_BUFFER_ALIGNMENT
);
}
}
//Note: currently, WebGPU compute passes have synchronization defined
// at a dispatch granularity, so we insert the necessary barriers here.
let (buffer_guard, mut token) = hub.buffers.read(&mut token);
let (texture_guard, _) = hub.textures.read(&mut token);
log::trace!(
"Encoding barriers on binding of {:?} in pass {:?}",
bind_group_id,
pass_id
);
CommandBuffer::insert_barriers(
&mut pass.raw,
&mut pass.trackers,
&bind_group.used,
Stitch::Last,
&*buffer_guard,
&*texture_guard,
);
if let Some((pipeline_layout_id, follow_up_sets, follow_up_offsets)) = pass
.binder
.provide_entry(index as usize, bind_group_id, bind_group, offsets)
{
let bind_groups = iter::once(bind_group.raw.raw())
.chain(follow_up_sets.map(|bg_id| bind_group_guard[bg_id].raw.raw()));
unsafe {
pass.raw.bind_compute_descriptor_sets(
&pipeline_layout_guard[pipeline_layout_id].raw,
index as usize,
bind_groups,
offsets
.iter()
.chain(follow_up_offsets)
.map(|&off| off as hal::command::DescriptorSetOffset),
);
}
};
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_set_bind_group(
pass_id: ComputePassId,
index: u32,
bind_group_id: BindGroupId,
offsets: *const BufferAddress,
offsets_length: usize,
) {
let offsets = if offsets_length != 0 {
unsafe { slice::from_raw_parts(offsets, offsets_length) }
} else {
&[]
};
gfx_select!(pass_id => compute_pass_set_bind_group(&*GLOBAL, pass_id, index, bind_group_id, offsets))
}
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_push_debug_group(_pass_id: ComputePassId, _label: RawString) {
//TODO
}
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_pop_debug_group(_pass_id: ComputePassId) {
//TODO
}
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_insert_debug_marker(
_pass_id: ComputePassId,
_label: RawString,
) {
//TODO
}
// Compute-specific routines
pub fn compute_pass_dispatch<B: GfxBackend>(
global: &Global,
pass_id: ComputePassId,
x: u32,
y: u32,
z: u32,
) {
let hub = B::hub(global);
let mut token = Token::root();
let (mut pass_guard, _) = hub.compute_passes.write(&mut token);
unsafe {
pass_guard[pass_id].raw.dispatch([x, y, z]);
}
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_dispatch(pass_id: ComputePassId, x: u32, y: u32, z: u32) {
gfx_select!(pass_id => compute_pass_dispatch(&*GLOBAL, pass_id, x, y, z))
}
pub fn compute_pass_dispatch_indirect<B: GfxBackend>(
global: &Global,
pass_id: ComputePassId,
indirect_buffer_id: BufferId,
indirect_offset: BufferAddress,
) {
let hub = B::hub(global);
let mut token = Token::root();
let (buffer_guard, _) = hub.buffers.read(&mut token);
let (mut pass_guard, _) = hub.compute_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
let (src_buffer, src_pending) = pass.trackers.buffers.use_replace(
&*buffer_guard,
indirect_buffer_id,
(),
BufferUsage::INDIRECT,
);
assert!(src_buffer.usage.contains(BufferUsage::INDIRECT));
let barriers = src_pending.map(|pending| hal::memory::Barrier::Buffer {
states: pending.to_states(),
target: &src_buffer.raw,
families: None,
range: None .. None,
});
unsafe {
pass.raw.pipeline_barrier(
all_buffer_stages() .. all_buffer_stages(),
hal::memory::Dependencies::empty(),
barriers,
);
pass.raw.dispatch_indirect(&src_buffer.raw, indirect_offset);
}
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_dispatch_indirect(
pass_id: ComputePassId,
indirect_buffer_id: BufferId,
indirect_offset: BufferAddress,
) {
gfx_select!(pass_id => compute_pass_dispatch_indirect(&*GLOBAL, pass_id, indirect_buffer_id, indirect_offset))
}
pub fn compute_pass_set_pipeline<B: GfxBackend>(
global: &Global,
pass_id: ComputePassId,
pipeline_id: ComputePipelineId,
) {
let hub = B::hub(global);
let mut token = Token::root();
let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
let (mut pass_guard, mut token) = hub.compute_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
let (pipeline_guard, _) = hub.compute_pipelines.read(&mut token);
let pipeline = &pipeline_guard[pipeline_id];
unsafe {
pass.raw.bind_compute_pipeline(&pipeline.raw);
}
// Rebind resources
if pass.binder.pipeline_layout_id != Some(pipeline.layout_id.clone()) {
let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id];
pass.binder.pipeline_layout_id = Some(pipeline.layout_id.clone());
pass.binder
.reset_expectations(pipeline_layout.bind_group_layout_ids.len());
let mut is_compatible = true;
for (index, (entry, &bgl_id)) in pass
.binder
.entries
.iter_mut()
.zip(&pipeline_layout.bind_group_layout_ids)
.enumerate()
{
match entry.expect_layout(bgl_id) {
LayoutChange::Match(bg_id, offsets) if is_compatible => {
let desc_set = bind_group_guard[bg_id].raw.raw();
unsafe {
pass.raw.bind_compute_descriptor_sets(
&pipeline_layout.raw,
index,
iter::once(desc_set),
offsets.iter().map(|offset| *offset as u32),
);
}
}
LayoutChange::Match(..) | LayoutChange::Unchanged => {}
LayoutChange::Mismatch => {
is_compatible = false;
}
}
}
}
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_set_pipeline(
pass_id: ComputePassId,
pipeline_id: ComputePipelineId,
) {
gfx_select!(pass_id => compute_pass_set_pipeline(&*GLOBAL, pass_id, pipeline_id))
}

Просмотреть файл

@ -1,769 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
mod allocator;
mod bind;
mod compute;
mod render;
mod transfer;
pub(crate) use self::allocator::CommandAllocator;
pub use self::compute::*;
pub use self::render::*;
pub use self::transfer::*;
use crate::{
conv,
device::{
all_buffer_stages,
all_image_stages,
FramebufferKey,
RenderPassContext,
RenderPassKey,
},
hub::{GfxBackend, Global, Storage, Token},
id::{Input, Output},
resource::TextureViewInner,
track::{Stitch, TrackerSet},
Buffer,
BufferId,
Color,
CommandBufferId,
CommandEncoderId,
ComputePassId,
DeviceId,
Features,
LifeGuard,
RenderPassId,
Stored,
Texture,
TextureId,
TextureUsage,
TextureViewId,
};
#[cfg(feature = "local")]
use crate::{gfx_select, hub::GLOBAL};
use arrayvec::ArrayVec;
use hal::{adapter::PhysicalDevice as _, command::CommandBuffer as _, device::Device as _};
#[cfg(feature = "local")]
use std::marker::PhantomData;
use std::{borrow::Borrow, collections::hash_map::Entry, iter, mem, ptr, slice, thread::ThreadId};
pub struct RenderBundle<B: hal::Backend> {
_raw: B::CommandBuffer,
}
#[repr(C)]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
pub enum LoadOp {
Clear = 0,
Load = 1,
}
#[repr(C)]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
pub enum StoreOp {
Clear = 0,
Store = 1,
}
#[repr(C)]
#[derive(Debug)]
pub struct RenderPassColorAttachmentDescriptor {
pub attachment: TextureViewId,
pub resolve_target: *const TextureViewId,
pub load_op: LoadOp,
pub store_op: StoreOp,
pub clear_color: Color,
}
#[repr(C)]
#[derive(Debug)]
pub struct RenderPassDepthStencilAttachmentDescriptor<T> {
pub attachment: T,
pub depth_load_op: LoadOp,
pub depth_store_op: StoreOp,
pub clear_depth: f32,
pub stencil_load_op: LoadOp,
pub stencil_store_op: StoreOp,
pub clear_stencil: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct RenderPassDescriptor {
pub color_attachments: *const RenderPassColorAttachmentDescriptor,
pub color_attachments_length: usize,
pub depth_stencil_attachment: *const RenderPassDepthStencilAttachmentDescriptor<TextureViewId>,
}
#[repr(C)]
#[derive(Clone, Debug, Default)]
pub struct ComputePassDescriptor {
pub todo: u32,
}
#[derive(Debug)]
pub struct CommandBuffer<B: hal::Backend> {
pub(crate) raw: Vec<B::CommandBuffer>,
is_recording: bool,
recorded_thread_id: ThreadId,
pub(crate) device_id: Stored<DeviceId>,
pub(crate) life_guard: LifeGuard,
pub(crate) trackers: TrackerSet,
pub(crate) used_swap_chain: Option<(Stored<TextureViewId>, B::Framebuffer)>,
pub(crate) features: Features,
}
impl<B: GfxBackend> CommandBuffer<B> {
pub(crate) fn insert_barriers(
raw: &mut B::CommandBuffer,
base: &mut TrackerSet,
head: &TrackerSet,
stitch: Stitch,
buffer_guard: &Storage<Buffer<B>, BufferId>,
texture_guard: &Storage<Texture<B>, TextureId>,
) {
log::trace!("\tstitch {:?}", stitch);
debug_assert_eq!(B::VARIANT, base.backend());
debug_assert_eq!(B::VARIANT, head.backend());
let buffer_barriers = base
.buffers
.merge_replace(&head.buffers, stitch)
.map(|pending| {
log::trace!("\tbuffer -> {:?}", pending);
hal::memory::Barrier::Buffer {
states: pending.to_states(),
target: &buffer_guard[pending.id].raw,
range: None .. None,
families: None,
}
});
let texture_barriers = base
.textures
.merge_replace(&head.textures, stitch)
.map(|pending| {
log::trace!("\ttexture -> {:?}", pending);
hal::memory::Barrier::Image {
states: pending.to_states(),
target: &texture_guard[pending.id].raw,
range: pending.selector,
families: None,
}
});
base.views.merge_extend(&head.views).unwrap();
base.bind_groups.merge_extend(&head.bind_groups).unwrap();
base.samplers.merge_extend(&head.samplers).unwrap();
let stages = all_buffer_stages() | all_image_stages();
unsafe {
raw.pipeline_barrier(
stages .. stages,
hal::memory::Dependencies::empty(),
buffer_barriers.chain(texture_barriers),
);
}
}
}
#[repr(C)]
#[derive(Clone, Debug, Default)]
pub struct CommandEncoderDescriptor {
// MSVC doesn't allow zero-sized structs
// We can remove this when we actually have a field
pub todo: u32,
}
#[repr(C)]
#[derive(Clone, Debug, Default)]
pub struct CommandBufferDescriptor {
pub todo: u32,
}
pub fn command_encoder_finish<B: GfxBackend>(
global: &Global,
encoder_id: CommandEncoderId,
_desc: &CommandBufferDescriptor,
) -> CommandBufferId {
let hub = B::hub(global);
let mut token = Token::root();
//TODO: actually close the last recorded command buffer
let (mut comb_guard, _) = hub.command_buffers.write(&mut token);
let comb = &mut comb_guard[encoder_id];
assert!(comb.is_recording);
comb.is_recording = false;
// stop tracking the swapchain image, if used
if let Some((ref view_id, _)) = comb.used_swap_chain {
comb.trackers.views.remove(view_id.value);
}
encoder_id
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_command_encoder_finish(
encoder_id: CommandEncoderId,
desc: Option<&CommandBufferDescriptor>,
) -> CommandBufferId {
let desc = &desc.cloned().unwrap_or_default();
gfx_select!(encoder_id => command_encoder_finish(&*GLOBAL, encoder_id, desc))
}
pub fn command_encoder_begin_render_pass<B: GfxBackend>(
global: &Global,
encoder_id: CommandEncoderId,
desc: &RenderPassDescriptor,
id_in: Input<RenderPassId>,
) -> Output<RenderPassId> {
let hub = B::hub(global);
let mut token = Token::root();
let (adapter_guard, mut token) = hub.adapters.read(&mut token);
let (device_guard, mut token) = hub.devices.read(&mut token);
let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token);
let cmb = &mut cmb_guard[encoder_id];
let device = &device_guard[cmb.device_id.value];
let limits = adapter_guard[device.adapter_id]
.raw
.physical_device
.limits();
let samples_count_limit = limits.framebuffer_color_sample_counts;
let mut current_comb = device.com_allocator.extend(cmb);
unsafe {
current_comb.begin(
hal::command::CommandBufferFlags::ONE_TIME_SUBMIT,
hal::command::CommandBufferInheritanceInfo::default(),
);
}
let pass = {
let (_, mut token) = hub.buffers.read(&mut token); //skip token
let (texture_guard, mut token) = hub.textures.read(&mut token);
let (view_guard, _) = hub.texture_views.read(&mut token);
let mut extent = None;
let mut barriers = Vec::new();
let mut used_swap_chain_image = None::<Stored<TextureViewId>>;
let color_attachments =
unsafe { slice::from_raw_parts(desc.color_attachments, desc.color_attachments_length) };
let depth_stencil_attachment = unsafe { desc.depth_stencil_attachment.as_ref() };
let sample_count = color_attachments
.get(0)
.map(|at| view_guard[at.attachment].samples)
.unwrap_or(1);
assert!(
sample_count & samples_count_limit != 0,
"Attachment sample_count must be supported by physical device limits"
);
log::trace!(
"Encoding render pass begin in command buffer {:?}",
encoder_id
);
let rp_key = {
let trackers = &mut cmb.trackers;
let depth_stencil = depth_stencil_attachment.map(|at| {
let view = trackers
.views
.use_extend(&*view_guard, at.attachment, (), ())
.unwrap();
if let Some(ex) = extent {
assert_eq!(ex, view.extent);
} else {
extent = Some(view.extent);
}
let texture_id = match view.inner {
TextureViewInner::Native { ref source_id, .. } => source_id.value,
TextureViewInner::SwapChain { .. } => {
panic!("Unexpected depth/stencil use of swapchain image!")
}
};
let texture = &texture_guard[texture_id];
assert!(texture.usage.contains(TextureUsage::OUTPUT_ATTACHMENT));
let old_layout = match trackers.textures.query(texture_id, view.range.clone()) {
Some(usage) => {
conv::map_texture_state(
usage,
hal::format::Aspects::DEPTH | hal::format::Aspects::STENCIL,
)
.1
}
None => {
// Required sub-resources have inconsistent states, we need to
// issue individual barriers instead of relying on the render pass.
let pending = trackers.textures.change_replace(
texture_id,
&texture.life_guard.ref_count,
view.range.clone(),
TextureUsage::OUTPUT_ATTACHMENT,
);
barriers.extend(pending.map(|pending| {
log::trace!("\tdepth-stencil {:?}", pending);
hal::memory::Barrier::Image {
states: pending.to_states(),
target: &texture.raw,
families: None,
range: pending.selector,
}
}));
hal::image::Layout::DepthStencilAttachmentOptimal
}
};
hal::pass::Attachment {
format: Some(conv::map_texture_format(view.format, device.features)),
samples: view.samples,
ops: conv::map_load_store_ops(at.depth_load_op, at.depth_store_op),
stencil_ops: conv::map_load_store_ops(at.stencil_load_op, at.stencil_store_op),
layouts: old_layout .. hal::image::Layout::DepthStencilAttachmentOptimal,
}
});
let mut colors = ArrayVec::new();
let mut resolves = ArrayVec::new();
for at in color_attachments {
let view = &view_guard[at.attachment];
if let Some(ex) = extent {
assert_eq!(ex, view.extent);
} else {
extent = Some(view.extent);
}
assert_eq!(
view.samples, sample_count,
"All attachments must have the same sample_count"
);
let first_use =
trackers
.views
.init(at.attachment, &view.life_guard.ref_count, (), ());
let layouts = match view.inner {
TextureViewInner::Native { ref source_id, .. } => {
let texture = &texture_guard[source_id.value];
assert!(texture.usage.contains(TextureUsage::OUTPUT_ATTACHMENT));
let old_layout =
match trackers.textures.query(source_id.value, view.range.clone()) {
Some(usage) => {
conv::map_texture_state(usage, hal::format::Aspects::COLOR).1
}
None => {
// Required sub-resources have inconsistent states, we need to
// issue individual barriers instead of relying on the render pass.
let pending = trackers.textures.change_replace(
source_id.value,
&texture.life_guard.ref_count,
view.range.clone(),
TextureUsage::OUTPUT_ATTACHMENT,
);
barriers.extend(pending.map(|pending| {
log::trace!("\tcolor {:?}", pending);
hal::memory::Barrier::Image {
states: pending.to_states(),
target: &texture.raw,
families: None,
range: pending.selector,
}
}));
hal::image::Layout::ColorAttachmentOptimal
}
};
old_layout .. hal::image::Layout::ColorAttachmentOptimal
}
TextureViewInner::SwapChain { .. } => {
if let Some((ref view_id, _)) = cmb.used_swap_chain {
assert_eq!(view_id.value, at.attachment);
} else {
assert!(used_swap_chain_image.is_none());
used_swap_chain_image = Some(Stored {
value: at.attachment,
ref_count: view.life_guard.ref_count.clone(),
});
}
let end = hal::image::Layout::Present;
let start = if first_use {
hal::image::Layout::Undefined
} else {
end
};
start .. end
}
};
colors.push(hal::pass::Attachment {
format: Some(conv::map_texture_format(view.format, device.features)),
samples: view.samples,
ops: conv::map_load_store_ops(at.load_op, at.store_op),
stencil_ops: hal::pass::AttachmentOps::DONT_CARE,
layouts,
});
}
for &resolve_target in color_attachments
.iter()
.flat_map(|at| unsafe { at.resolve_target.as_ref() })
{
let view = &view_guard[resolve_target];
assert_eq!(extent, Some(view.extent));
assert_eq!(
view.samples, 1,
"All resolve_targets must have a sample_count of 1"
);
let first_use =
trackers
.views
.init(resolve_target, &view.life_guard.ref_count, (), ());
let layouts = match view.inner {
TextureViewInner::Native { ref source_id, .. } => {
let texture = &texture_guard[source_id.value];
assert!(texture.usage.contains(TextureUsage::OUTPUT_ATTACHMENT));
let old_layout =
match trackers.textures.query(source_id.value, view.range.clone()) {
Some(usage) => {
conv::map_texture_state(usage, hal::format::Aspects::COLOR).1
}
None => {
// Required sub-resources have inconsistent states, we need to
// issue individual barriers instead of relying on the render pass.
let pending = trackers.textures.change_replace(
source_id.value,
&texture.life_guard.ref_count,
view.range.clone(),
TextureUsage::OUTPUT_ATTACHMENT,
);
barriers.extend(pending.map(|pending| {
log::trace!("\tresolve {:?}", pending);
hal::memory::Barrier::Image {
states: pending.to_states(),
target: &texture.raw,
families: None,
range: pending.selector,
}
}));
hal::image::Layout::ColorAttachmentOptimal
}
};
old_layout .. hal::image::Layout::ColorAttachmentOptimal
}
TextureViewInner::SwapChain { .. } => {
if let Some((ref view_id, _)) = cmb.used_swap_chain {
assert_eq!(view_id.value, resolve_target);
} else {
assert!(used_swap_chain_image.is_none());
used_swap_chain_image = Some(Stored {
value: resolve_target,
ref_count: view.life_guard.ref_count.clone(),
});
}
let end = hal::image::Layout::Present;
let start = if first_use {
hal::image::Layout::Undefined
} else {
end
};
start .. end
}
};
resolves.push(hal::pass::Attachment {
format: Some(conv::map_texture_format(view.format, device.features)),
samples: view.samples,
ops: hal::pass::AttachmentOps::new(
hal::pass::AttachmentLoadOp::DontCare,
hal::pass::AttachmentStoreOp::Store,
),
stencil_ops: hal::pass::AttachmentOps::DONT_CARE,
layouts,
});
}
RenderPassKey {
colors,
resolves,
depth_stencil,
}
};
if !barriers.is_empty() {
unsafe {
current_comb.pipeline_barrier(
all_image_stages() .. all_image_stages(),
hal::memory::Dependencies::empty(),
barriers,
);
}
}
let mut render_pass_cache = device.render_passes.lock();
let render_pass = match render_pass_cache.entry(rp_key.clone()) {
Entry::Occupied(e) => e.into_mut(),
Entry::Vacant(e) => {
let color_ids = [
(0, hal::image::Layout::ColorAttachmentOptimal),
(1, hal::image::Layout::ColorAttachmentOptimal),
(2, hal::image::Layout::ColorAttachmentOptimal),
(3, hal::image::Layout::ColorAttachmentOptimal),
];
let mut resolve_ids = ArrayVec::<[_; crate::device::MAX_COLOR_TARGETS]>::new();
let mut attachment_index = color_attachments.len();
if color_attachments
.iter()
.any(|at| at.resolve_target != ptr::null())
{
for (i, at) in color_attachments.iter().enumerate() {
if at.resolve_target == ptr::null() {
resolve_ids.push((
hal::pass::ATTACHMENT_UNUSED,
hal::image::Layout::ColorAttachmentOptimal,
));
} else {
let sample_count_check =
view_guard[color_attachments[i].attachment].samples;
assert!(sample_count_check > 1, "RenderPassColorAttachmentDescriptor with a resolve_target must have an attachment with sample_count > 1");
resolve_ids.push((
attachment_index,
hal::image::Layout::ColorAttachmentOptimal,
));
attachment_index += 1;
}
}
}
let depth_id = (
attachment_index,
hal::image::Layout::DepthStencilAttachmentOptimal,
);
let subpass = hal::pass::SubpassDesc {
colors: &color_ids[.. color_attachments.len()],
resolves: &resolve_ids,
depth_stencil: depth_stencil_attachment.map(|_| &depth_id),
inputs: &[],
preserves: &[],
};
let pass = unsafe {
device
.raw
.create_render_pass(e.key().all(), &[subpass], &[])
}
.unwrap();
e.insert(pass)
}
};
let mut framebuffer_cache;
let fb_key = FramebufferKey {
colors: color_attachments.iter().map(|at| at.attachment).collect(),
resolves: color_attachments
.iter()
.filter_map(|at| unsafe { at.resolve_target.as_ref() }.cloned())
.collect(),
depth_stencil: depth_stencil_attachment.map(|at| at.attachment),
};
let framebuffer = match used_swap_chain_image.take() {
Some(view_id) => {
assert!(cmb.used_swap_chain.is_none());
// Always create a new framebuffer and delete it after presentation.
let attachments = fb_key.all().map(|&id| match view_guard[id].inner {
TextureViewInner::Native { ref raw, .. } => raw,
TextureViewInner::SwapChain { ref image, .. } => Borrow::borrow(image),
});
let framebuffer = unsafe {
device
.raw
.create_framebuffer(&render_pass, attachments, extent.unwrap())
.unwrap()
};
cmb.used_swap_chain = Some((view_id, framebuffer));
&mut cmb.used_swap_chain.as_mut().unwrap().1
}
None => {
// Cache framebuffers by the device.
framebuffer_cache = device.framebuffers.lock();
match framebuffer_cache.entry(fb_key) {
Entry::Occupied(e) => e.into_mut(),
Entry::Vacant(e) => {
let fb = {
let attachments = e.key().all().map(|&id| match view_guard[id].inner {
TextureViewInner::Native { ref raw, .. } => raw,
TextureViewInner::SwapChain { ref image, .. } => {
Borrow::borrow(image)
}
});
unsafe {
device.raw.create_framebuffer(
&render_pass,
attachments,
extent.unwrap(),
)
}
.unwrap()
};
e.insert(fb)
}
}
}
};
let rect = {
let ex = extent.unwrap();
hal::pso::Rect {
x: 0,
y: 0,
w: ex.width as _,
h: ex.height as _,
}
};
let clear_values = color_attachments
.iter()
.zip(&rp_key.colors)
.flat_map(|(at, key)| {
match at.load_op {
LoadOp::Load => None,
LoadOp::Clear => {
use hal::format::ChannelType;
//TODO: validate sign/unsign and normalized ranges of the color values
let value = match key.format.unwrap().base_format().1 {
ChannelType::Unorm
| ChannelType::Snorm
| ChannelType::Ufloat
| ChannelType::Sfloat
| ChannelType::Uscaled
| ChannelType::Sscaled
| ChannelType::Srgb => hal::command::ClearColor {
float32: conv::map_color_f32(&at.clear_color),
},
ChannelType::Sint => hal::command::ClearColor {
sint32: conv::map_color_i32(&at.clear_color),
},
ChannelType::Uint => hal::command::ClearColor {
uint32: conv::map_color_u32(&at.clear_color),
},
};
Some(hal::command::ClearValue { color: value })
}
}
})
.chain(depth_stencil_attachment.and_then(|at| {
match (at.depth_load_op, at.stencil_load_op) {
(LoadOp::Load, LoadOp::Load) => None,
(LoadOp::Clear, _) | (_, LoadOp::Clear) => {
let value = hal::command::ClearDepthStencil {
depth: at.clear_depth,
stencil: at.clear_stencil,
};
Some(hal::command::ClearValue {
depth_stencil: value,
})
}
}
}));
unsafe {
current_comb.begin_render_pass(
render_pass,
framebuffer,
rect,
clear_values,
hal::command::SubpassContents::Inline,
);
current_comb.set_scissors(0, iter::once(&rect));
current_comb.set_viewports(
0,
iter::once(hal::pso::Viewport {
rect,
depth: 0.0 .. 1.0,
}),
);
}
let context = RenderPassContext {
colors: color_attachments
.iter()
.map(|at| view_guard[at.attachment].format)
.collect(),
resolves: color_attachments
.iter()
.filter_map(|at| unsafe { at.resolve_target.as_ref() })
.map(|resolve| view_guard[*resolve].format)
.collect(),
depth_stencil: depth_stencil_attachment.map(|at| view_guard[at.attachment].format),
};
RenderPass::new(
current_comb,
Stored {
value: encoder_id,
ref_count: cmb.life_guard.ref_count.clone(),
},
context,
sample_count,
cmb.features.max_bind_groups,
)
};
hub.render_passes.register_identity(id_in, pass, &mut token)
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_command_encoder_begin_render_pass(
encoder_id: CommandEncoderId,
desc: &RenderPassDescriptor,
) -> RenderPassId {
gfx_select!(encoder_id => command_encoder_begin_render_pass(&*GLOBAL, encoder_id, desc, PhantomData))
}
pub fn command_encoder_begin_compute_pass<B: GfxBackend>(
global: &Global,
encoder_id: CommandEncoderId,
_desc: &ComputePassDescriptor,
id_in: Input<ComputePassId>,
) -> Output<ComputePassId> {
let hub = B::hub(global);
let mut token = Token::root();
let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token);
let cmb = &mut cmb_guard[encoder_id];
let raw = cmb.raw.pop().unwrap();
let trackers = mem::replace(&mut cmb.trackers, TrackerSet::new(encoder_id.backend()));
let stored = Stored {
value: encoder_id,
ref_count: cmb.life_guard.ref_count.clone(),
};
let pass = ComputePass::new(raw, stored, trackers, cmb.features.max_bind_groups);
hub.compute_passes
.register_identity(id_in, pass, &mut token)
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_command_encoder_begin_compute_pass(
encoder_id: CommandEncoderId,
desc: Option<&ComputePassDescriptor>,
) -> ComputePassId {
let desc = &desc.cloned().unwrap_or_default();
gfx_select!(encoder_id => command_encoder_begin_compute_pass(&*GLOBAL, encoder_id, desc, PhantomData))
}

Просмотреть файл

@ -1,848 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{
command::bind::{Binder, LayoutChange},
conv,
device::{RenderPassContext, BIND_BUFFER_ALIGNMENT, MAX_VERTEX_BUFFERS},
hub::{GfxBackend, Global, Token},
pipeline::{IndexFormat, InputStepMode, PipelineFlags},
resource::BufferUsage,
track::{Stitch, TrackerSet},
BindGroupId,
BufferAddress,
BufferId,
Color,
CommandBuffer,
CommandBufferId,
RenderPassId,
RenderPipelineId,
Stored,
};
#[cfg(feature = "local")]
use crate::{gfx_select, hub::GLOBAL, RawString, RenderBundleId};
use hal::command::CommandBuffer as _;
#[cfg(feature = "local")]
use std::slice;
use std::{iter, ops::Range};
#[derive(Debug, PartialEq)]
enum OptionalState {
Unused,
Required,
Set,
}
impl OptionalState {
fn require(&mut self, require: bool) {
if require && *self == OptionalState::Unused {
*self = OptionalState::Required;
}
}
}
#[derive(Debug, PartialEq)]
enum DrawError {
MissingBlendColor,
MissingStencilReference,
IncompatibleBindGroup {
index: u32,
//expected: BindGroupLayoutId,
//provided: Option<(BindGroupLayoutId, BindGroupId)>,
},
}
#[derive(Debug)]
pub struct IndexState {
bound_buffer_view: Option<(BufferId, Range<BufferAddress>)>,
format: IndexFormat,
limit: u32,
}
impl IndexState {
fn update_limit(&mut self) {
self.limit = match self.bound_buffer_view {
Some((_, ref range)) => {
let shift = match self.format {
IndexFormat::Uint16 => 1,
IndexFormat::Uint32 => 2,
};
((range.end - range.start) >> shift) as u32
}
None => 0,
}
}
}
#[derive(Clone, Copy, Debug)]
pub struct VertexBufferState {
total_size: BufferAddress,
stride: BufferAddress,
rate: InputStepMode,
}
impl VertexBufferState {
const EMPTY: Self = VertexBufferState {
total_size: 0,
stride: 0,
rate: InputStepMode::Vertex,
};
}
#[derive(Debug)]
pub struct VertexState {
inputs: [VertexBufferState; MAX_VERTEX_BUFFERS],
vertex_limit: u32,
instance_limit: u32,
}
impl VertexState {
fn update_limits(&mut self) {
self.vertex_limit = !0;
self.instance_limit = !0;
for vbs in &self.inputs {
if vbs.stride == 0 {
continue;
}
let limit = (vbs.total_size / vbs.stride) as u32;
match vbs.rate {
InputStepMode::Vertex => self.vertex_limit = self.vertex_limit.min(limit),
InputStepMode::Instance => self.instance_limit = self.instance_limit.min(limit),
}
}
}
}
#[derive(Debug)]
pub struct RenderPass<B: hal::Backend> {
raw: B::CommandBuffer,
cmb_id: Stored<CommandBufferId>,
context: RenderPassContext,
binder: Binder,
trackers: TrackerSet,
blend_color_status: OptionalState,
stencil_reference_status: OptionalState,
index_state: IndexState,
vertex_state: VertexState,
sample_count: u8,
}
impl<B: GfxBackend> RenderPass<B> {
pub(crate) fn new(
raw: B::CommandBuffer,
cmb_id: Stored<CommandBufferId>,
context: RenderPassContext,
sample_count: u8,
max_bind_groups: u32,
) -> Self {
RenderPass {
raw,
cmb_id,
context,
binder: Binder::new(max_bind_groups),
trackers: TrackerSet::new(B::VARIANT),
blend_color_status: OptionalState::Unused,
stencil_reference_status: OptionalState::Unused,
index_state: IndexState {
bound_buffer_view: None,
format: IndexFormat::Uint16,
limit: 0,
},
vertex_state: VertexState {
inputs: [VertexBufferState::EMPTY; MAX_VERTEX_BUFFERS],
vertex_limit: 0,
instance_limit: 0,
},
sample_count,
}
}
fn is_ready(&self) -> Result<(), DrawError> {
//TODO: vertex buffers
let bind_mask = self.binder.invalid_mask();
if bind_mask != 0 {
//let (expected, provided) = self.binder.entries[index as usize].info();
return Err(DrawError::IncompatibleBindGroup {
index: bind_mask.trailing_zeros() as u32,
});
}
if self.blend_color_status == OptionalState::Required {
return Err(DrawError::MissingBlendColor);
}
if self.stencil_reference_status == OptionalState::Required {
return Err(DrawError::MissingStencilReference);
}
Ok(())
}
}
// Common routines between render/compute
pub fn render_pass_end_pass<B: GfxBackend>(global: &Global, pass_id: RenderPassId) {
let hub = B::hub(global);
let mut token = Token::root();
let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token);
let (mut pass, mut token) = hub.render_passes.unregister(pass_id, &mut token);
unsafe {
pass.raw.end_render_pass();
}
pass.trackers.optimize();
let cmb = &mut cmb_guard[pass.cmb_id.value];
let (buffer_guard, mut token) = hub.buffers.read(&mut token);
let (texture_guard, _) = hub.textures.read(&mut token);
match cmb.raw.last_mut() {
Some(last) => {
log::trace!("Encoding barriers before pass {:?}", pass_id);
CommandBuffer::insert_barriers(
last,
&mut cmb.trackers,
&pass.trackers,
Stitch::Last,
&*buffer_guard,
&*texture_guard,
);
unsafe { last.finish() };
}
None => {
cmb.trackers.merge_extend(&pass.trackers);
}
}
cmb.raw.push(pass.raw);
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_render_pass_end_pass(pass_id: RenderPassId) {
gfx_select!(pass_id => render_pass_end_pass(&*GLOBAL, pass_id))
}
pub fn render_pass_set_bind_group<B: GfxBackend>(
global: &Global,
pass_id: RenderPassId,
index: u32,
bind_group_id: BindGroupId,
offsets: &[BufferAddress],
) {
let hub = B::hub(global);
let mut token = Token::root();
let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
let (mut pass_guard, _) = hub.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
let bind_group = pass
.trackers
.bind_groups
.use_extend(&*bind_group_guard, bind_group_id, (), ())
.unwrap();
assert_eq!(bind_group.dynamic_count, offsets.len());
if cfg!(debug_assertions) {
for off in offsets {
assert_eq!(
*off % BIND_BUFFER_ALIGNMENT,
0,
"Misaligned dynamic buffer offset: {} does not align with {}",
off,
BIND_BUFFER_ALIGNMENT
);
}
}
pass.trackers.merge_extend(&bind_group.used);
if let Some((pipeline_layout_id, follow_up_sets, follow_up_offsets)) = pass
.binder
.provide_entry(index as usize, bind_group_id, bind_group, offsets)
{
let bind_groups = iter::once(bind_group.raw.raw())
.chain(follow_up_sets.map(|bg_id| bind_group_guard[bg_id].raw.raw()));
unsafe {
pass.raw.bind_graphics_descriptor_sets(
&&pipeline_layout_guard[pipeline_layout_id].raw,
index as usize,
bind_groups,
offsets
.iter()
.chain(follow_up_offsets)
.map(|&off| off as hal::command::DescriptorSetOffset),
);
}
};
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_render_pass_set_bind_group(
pass_id: RenderPassId,
index: u32,
bind_group_id: BindGroupId,
offsets: *const BufferAddress,
offsets_length: usize,
) {
let offsets = if offsets_length != 0 {
unsafe { slice::from_raw_parts(offsets, offsets_length) }
} else {
&[]
};
gfx_select!(pass_id => render_pass_set_bind_group(&*GLOBAL, pass_id, index, bind_group_id, offsets))
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_render_pass_push_debug_group(_pass_id: RenderPassId, _label: RawString) {
//TODO
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_render_pass_pop_debug_group(_pass_id: RenderPassId) {
//TODO
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_render_pass_insert_debug_marker(_pass_id: RenderPassId, _label: RawString) {
//TODO
}
// Render-specific routines
pub fn render_pass_set_index_buffer<B: GfxBackend>(
global: &Global,
pass_id: RenderPassId,
buffer_id: BufferId,
offset: BufferAddress,
) {
let hub = B::hub(global);
let mut token = Token::root();
let (mut pass_guard, mut token) = hub.render_passes.write(&mut token);
let (buffer_guard, _) = hub.buffers.read(&mut token);
let pass = &mut pass_guard[pass_id];
let buffer = pass
.trackers
.buffers
.use_extend(&*buffer_guard, buffer_id, (), BufferUsage::INDEX)
.unwrap();
assert!(buffer.usage.contains(BufferUsage::INDEX));
let range = offset .. buffer.size;
pass.index_state.bound_buffer_view = Some((buffer_id, range));
pass.index_state.update_limit();
let view = hal::buffer::IndexBufferView {
buffer: &buffer.raw,
offset,
index_type: conv::map_index_format(pass.index_state.format),
};
unsafe {
pass.raw.bind_index_buffer(view);
}
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_render_pass_set_index_buffer(
pass_id: RenderPassId,
buffer_id: BufferId,
offset: BufferAddress,
) {
gfx_select!(pass_id => render_pass_set_index_buffer(&*GLOBAL, pass_id, buffer_id, offset))
}
pub fn render_pass_set_vertex_buffers<B: GfxBackend>(
global: &Global,
pass_id: RenderPassId,
start_slot: u32,
buffers: &[BufferId],
offsets: &[BufferAddress],
) {
let hub = B::hub(global);
let mut token = Token::root();
assert_eq!(buffers.len(), offsets.len());
let (mut pass_guard, mut token) = hub.render_passes.write(&mut token);
let (buffer_guard, _) = hub.buffers.read(&mut token);
let pass = &mut pass_guard[pass_id];
for (vbs, (&id, &offset)) in pass.vertex_state.inputs[start_slot as usize ..]
.iter_mut()
.zip(buffers.iter().zip(offsets))
{
let buffer = pass
.trackers
.buffers
.use_extend(&*buffer_guard, id, (), BufferUsage::VERTEX)
.unwrap();
assert!(buffer.usage.contains(BufferUsage::VERTEX));
vbs.total_size = buffer.size - offset;
}
pass.vertex_state.update_limits();
let buffers = buffers
.iter()
.map(|&id| &buffer_guard[id].raw)
.zip(offsets.iter().cloned());
unsafe {
pass.raw.bind_vertex_buffers(start_slot, buffers);
}
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_render_pass_set_vertex_buffers(
pass_id: RenderPassId,
start_slot: u32,
buffers: *const BufferId,
offsets: *const BufferAddress,
length: usize,
) {
let buffers = unsafe { slice::from_raw_parts(buffers, length) };
let offsets = unsafe { slice::from_raw_parts(offsets, length) };
gfx_select!(pass_id => render_pass_set_vertex_buffers(&*GLOBAL, pass_id, start_slot, buffers, offsets))
}
pub fn render_pass_draw<B: GfxBackend>(
global: &Global,
pass_id: RenderPassId,
vertex_count: u32,
instance_count: u32,
first_vertex: u32,
first_instance: u32,
) {
let hub = B::hub(global);
let mut token = Token::root();
let (mut pass_guard, _) = hub.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
pass.is_ready().unwrap();
assert!(
first_vertex + vertex_count <= pass.vertex_state.vertex_limit,
"Vertex out of range!"
);
assert!(
first_instance + instance_count <= pass.vertex_state.instance_limit,
"Instance out of range!"
);
unsafe {
pass.raw.draw(
first_vertex .. first_vertex + vertex_count,
first_instance .. first_instance + instance_count,
);
}
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_render_pass_draw(
pass_id: RenderPassId,
vertex_count: u32,
instance_count: u32,
first_vertex: u32,
first_instance: u32,
) {
gfx_select!(pass_id => render_pass_draw(&*GLOBAL, pass_id, vertex_count, instance_count, first_vertex, first_instance))
}
pub fn render_pass_draw_indirect<B: GfxBackend>(
global: &Global,
pass_id: RenderPassId,
indirect_buffer_id: BufferId,
indirect_offset: BufferAddress,
) {
let hub = B::hub(global);
let mut token = Token::root();
let (mut pass_guard, _) = hub.render_passes.write(&mut token);
let (buffer_guard, _) = hub.buffers.read(&mut token);
let pass = &mut pass_guard[pass_id];
pass.is_ready().unwrap();
let buffer = pass
.trackers
.buffers
.use_extend(
&*buffer_guard,
indirect_buffer_id,
(),
BufferUsage::INDIRECT,
)
.unwrap();
assert!(buffer.usage.contains(BufferUsage::INDIRECT));
unsafe {
pass.raw.draw_indirect(&buffer.raw, indirect_offset, 1, 0);
}
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_render_pass_draw_indirect(
pass_id: RenderPassId,
indirect_buffer_id: BufferId,
indirect_offset: BufferAddress,
) {
gfx_select!(pass_id => render_pass_draw_indirect(&*GLOBAL, pass_id, indirect_buffer_id, indirect_offset))
}
pub fn render_pass_draw_indexed<B: GfxBackend>(
global: &Global,
pass_id: RenderPassId,
index_count: u32,
instance_count: u32,
first_index: u32,
base_vertex: i32,
first_instance: u32,
) {
let hub = B::hub(global);
let mut token = Token::root();
let (mut pass_guard, _) = hub.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
pass.is_ready().unwrap();
//TODO: validate that base_vertex + max_index() is within the provided range
assert!(
first_index + index_count <= pass.index_state.limit,
"Index out of range!"
);
assert!(
first_instance + instance_count <= pass.vertex_state.instance_limit,
"Instance out of range!"
);
unsafe {
pass.raw.draw_indexed(
first_index .. first_index + index_count,
base_vertex,
first_instance .. first_instance + instance_count,
);
}
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_render_pass_draw_indexed(
pass_id: RenderPassId,
index_count: u32,
instance_count: u32,
first_index: u32,
base_vertex: i32,
first_instance: u32,
) {
gfx_select!(pass_id => render_pass_draw_indexed(&*GLOBAL, pass_id, index_count, instance_count, first_index, base_vertex, first_instance))
}
pub fn render_pass_draw_indexed_indirect<B: GfxBackend>(
global: &Global,
pass_id: RenderPassId,
indirect_buffer_id: BufferId,
indirect_offset: BufferAddress,
) {
let hub = B::hub(global);
let mut token = Token::root();
let (mut pass_guard, _) = hub.render_passes.write(&mut token);
let (buffer_guard, _) = hub.buffers.read(&mut token);
let pass = &mut pass_guard[pass_id];
pass.is_ready().unwrap();
let buffer = pass
.trackers
.buffers
.use_extend(
&*buffer_guard,
indirect_buffer_id,
(),
BufferUsage::INDIRECT,
)
.unwrap();
assert!(buffer.usage.contains(BufferUsage::INDIRECT));
unsafe {
pass.raw
.draw_indexed_indirect(&buffer.raw, indirect_offset, 1, 0);
}
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_render_pass_draw_indexed_indirect(
pass_id: RenderPassId,
indirect_buffer_id: BufferId,
indirect_offset: BufferAddress,
) {
gfx_select!(pass_id => render_pass_draw_indexed_indirect(&*GLOBAL, pass_id, indirect_buffer_id, indirect_offset))
}
pub fn render_pass_set_pipeline<B: GfxBackend>(
global: &Global,
pass_id: RenderPassId,
pipeline_id: RenderPipelineId,
) {
let hub = B::hub(global);
let mut token = Token::root();
let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
let (mut pass_guard, mut token) = hub.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
let (pipeline_guard, mut token) = hub.render_pipelines.read(&mut token);
let pipeline = &pipeline_guard[pipeline_id];
assert!(
pass.context.compatible(&pipeline.pass_context),
"The render pipeline is not compatible with the pass!"
);
assert_eq!(
pipeline.sample_count, pass.sample_count,
"The render pipeline and renderpass have mismatching sample_count"
);
pass.blend_color_status
.require(pipeline.flags.contains(PipelineFlags::BLEND_COLOR));
pass.stencil_reference_status
.require(pipeline.flags.contains(PipelineFlags::STENCIL_REFERENCE));
unsafe {
pass.raw.bind_graphics_pipeline(&pipeline.raw);
}
// Rebind resource
if pass.binder.pipeline_layout_id != Some(pipeline.layout_id.clone()) {
let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id];
pass.binder.pipeline_layout_id = Some(pipeline.layout_id.clone());
pass.binder
.reset_expectations(pipeline_layout.bind_group_layout_ids.len());
let mut is_compatible = true;
for (index, (entry, &bgl_id)) in pass
.binder
.entries
.iter_mut()
.zip(&pipeline_layout.bind_group_layout_ids)
.enumerate()
{
match entry.expect_layout(bgl_id) {
LayoutChange::Match(bg_id, offsets) if is_compatible => {
let desc_set = bind_group_guard[bg_id].raw.raw();
unsafe {
pass.raw.bind_graphics_descriptor_sets(
&pipeline_layout.raw,
index,
iter::once(desc_set),
offsets.iter().map(|offset| *offset as u32),
);
}
}
LayoutChange::Match(..) | LayoutChange::Unchanged => {}
LayoutChange::Mismatch => {
is_compatible = false;
}
}
}
}
// Rebind index buffer if the index format has changed with the pipeline switch
if pass.index_state.format != pipeline.index_format {
pass.index_state.format = pipeline.index_format;
pass.index_state.update_limit();
if let Some((buffer_id, ref range)) = pass.index_state.bound_buffer_view {
let (buffer_guard, _) = hub.buffers.read(&mut token);
let buffer = pass
.trackers
.buffers
.use_extend(&*buffer_guard, buffer_id, (), BufferUsage::INDEX)
.unwrap();
let view = hal::buffer::IndexBufferView {
buffer: &buffer.raw,
offset: range.start,
index_type: conv::map_index_format(pass.index_state.format),
};
unsafe {
pass.raw.bind_index_buffer(view);
}
}
}
// Update vertex buffer limits
for (vbs, &(stride, rate)) in pass
.vertex_state
.inputs
.iter_mut()
.zip(&pipeline.vertex_strides)
{
vbs.stride = stride;
vbs.rate = rate;
}
for vbs in pass.vertex_state.inputs[pipeline.vertex_strides.len() ..].iter_mut() {
vbs.stride = 0;
vbs.rate = InputStepMode::Vertex;
}
pass.vertex_state.update_limits();
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_render_pass_set_pipeline(
pass_id: RenderPassId,
pipeline_id: RenderPipelineId,
) {
gfx_select!(pass_id => render_pass_set_pipeline(&*GLOBAL, pass_id, pipeline_id))
}
pub fn render_pass_set_blend_color<B: GfxBackend>(
global: &Global,
pass_id: RenderPassId,
color: &Color,
) {
let hub = B::hub(global);
let mut token = Token::root();
let (mut pass_guard, _) = hub.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
pass.blend_color_status = OptionalState::Set;
unsafe {
pass.raw.set_blend_constants(conv::map_color_f32(color));
}
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_render_pass_set_blend_color(pass_id: RenderPassId, color: &Color) {
gfx_select!(pass_id => render_pass_set_blend_color(&*GLOBAL, pass_id, color))
}
pub fn render_pass_set_stencil_reference<B: GfxBackend>(
global: &Global,
pass_id: RenderPassId,
value: u32,
) {
let hub = B::hub(global);
let mut token = Token::root();
let (mut pass_guard, _) = hub.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
pass.stencil_reference_status = OptionalState::Set;
unsafe {
pass.raw.set_stencil_reference(hal::pso::Face::all(), value);
}
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_render_pass_set_stencil_reference(pass_id: RenderPassId, value: u32) {
gfx_select!(pass_id => render_pass_set_stencil_reference(&*GLOBAL, pass_id, value))
}
pub fn render_pass_set_viewport<B: GfxBackend>(
global: &Global,
pass_id: RenderPassId,
x: f32,
y: f32,
w: f32,
h: f32,
min_depth: f32,
max_depth: f32,
) {
let hub = B::hub(global);
let mut token = Token::root();
let (mut pass_guard, _) = hub.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
unsafe {
use std::convert::TryFrom;
use std::i16;
pass.raw.set_viewports(
0,
&[hal::pso::Viewport {
rect: hal::pso::Rect {
x: i16::try_from(x.round() as i64).unwrap_or(0),
y: i16::try_from(y.round() as i64).unwrap_or(0),
w: i16::try_from(w.round() as i64).unwrap_or(i16::MAX),
h: i16::try_from(h.round() as i64).unwrap_or(i16::MAX),
},
depth: min_depth .. max_depth,
}],
);
}
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_render_pass_set_viewport(
pass_id: RenderPassId,
x: f32,
y: f32,
w: f32,
h: f32,
min_depth: f32,
max_depth: f32,
) {
gfx_select!(pass_id => render_pass_set_viewport(&*GLOBAL, pass_id, x, y, w, h, min_depth, max_depth))
}
pub fn render_pass_set_scissor_rect<B: GfxBackend>(
global: &Global,
pass_id: RenderPassId,
x: u32,
y: u32,
w: u32,
h: u32,
) {
let hub = B::hub(global);
let mut token = Token::root();
let (mut pass_guard, _) = hub.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
unsafe {
use std::convert::TryFrom;
use std::i16;
pass.raw.set_scissors(
0,
&[hal::pso::Rect {
x: i16::try_from(x).unwrap_or(0),
y: i16::try_from(y).unwrap_or(0),
w: i16::try_from(w).unwrap_or(i16::MAX),
h: i16::try_from(h).unwrap_or(i16::MAX),
}],
);
}
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_render_pass_set_scissor_rect(
pass_id: RenderPassId,
x: u32,
y: u32,
w: u32,
h: u32,
) {
gfx_select!(pass_id => render_pass_set_scissor_rect(&*GLOBAL, pass_id, x, y, w, h))
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_render_pass_execute_bundles(
_pass_id: RenderPassId,
_bundles: *const RenderBundleId,
_bundles_length: usize,
) {
unimplemented!()
}

Просмотреть файл

@ -1,421 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{
conv,
device::{all_buffer_stages, all_image_stages},
hub::{GfxBackend, Global, Token},
BufferAddress,
BufferId,
BufferUsage,
CommandEncoderId,
Extent3d,
Origin3d,
TextureId,
TextureUsage,
};
#[cfg(feature = "local")]
use crate::{gfx_select, hub::GLOBAL};
use hal::command::CommandBuffer as _;
use std::iter;
const BITS_PER_BYTE: u32 = 8;
#[repr(C)]
#[derive(Debug)]
pub struct BufferCopyView {
pub buffer: BufferId,
pub offset: BufferAddress,
pub row_pitch: u32,
pub image_height: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct TextureCopyView {
pub texture: TextureId,
pub mip_level: u32,
pub array_layer: u32,
pub origin: Origin3d,
}
impl TextureCopyView {
//TODO: we currently access each texture twice for a transfer,
// once only to get the aspect flags, which is unfortunate.
fn to_selector(&self, aspects: hal::format::Aspects) -> hal::image::SubresourceRange {
let level = self.mip_level as hal::image::Level;
let layer = self.array_layer as hal::image::Layer;
hal::image::SubresourceRange {
aspects,
levels: level .. level + 1,
layers: layer .. layer + 1,
}
}
fn to_sub_layers(&self, aspects: hal::format::Aspects) -> hal::image::SubresourceLayers {
let layer = self.array_layer as hal::image::Layer;
hal::image::SubresourceLayers {
aspects,
level: self.mip_level as hal::image::Level,
layers: layer .. layer + 1,
}
}
}
pub fn command_encoder_copy_buffer_to_buffer<B: GfxBackend>(
global: &Global,
command_encoder_id: CommandEncoderId,
source: BufferId,
source_offset: BufferAddress,
destination: BufferId,
destination_offset: BufferAddress,
size: BufferAddress,
) {
let hub = B::hub(global);
let mut token = Token::root();
let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token);
let cmb = &mut cmb_guard[command_encoder_id];
let (buffer_guard, _) = hub.buffers.read(&mut token);
// we can't hold both src_pending and dst_pending in scope because they
// borrow the buffer tracker mutably...
let mut barriers = Vec::new();
let (src_buffer, src_pending) =
cmb.trackers
.buffers
.use_replace(&*buffer_guard, source, (), BufferUsage::COPY_SRC);
assert!(src_buffer.usage.contains(BufferUsage::COPY_SRC));
barriers.extend(src_pending.map(|pending| hal::memory::Barrier::Buffer {
states: pending.to_states(),
target: &src_buffer.raw,
families: None,
range: None .. None,
}));
let (dst_buffer, dst_pending) =
cmb.trackers
.buffers
.use_replace(&*buffer_guard, destination, (), BufferUsage::COPY_DST);
assert!(dst_buffer.usage.contains(BufferUsage::COPY_DST));
barriers.extend(dst_pending.map(|pending| hal::memory::Barrier::Buffer {
states: pending.to_states(),
target: &dst_buffer.raw,
families: None,
range: None .. None,
}));
let region = hal::command::BufferCopy {
src: source_offset,
dst: destination_offset,
size,
};
let cmb_raw = cmb.raw.last_mut().unwrap();
unsafe {
cmb_raw.pipeline_barrier(
all_buffer_stages() .. all_buffer_stages(),
hal::memory::Dependencies::empty(),
barriers,
);
cmb_raw.copy_buffer(&src_buffer.raw, &dst_buffer.raw, iter::once(region));
}
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_command_encoder_copy_buffer_to_buffer(
command_encoder_id: CommandEncoderId,
source: BufferId,
source_offset: BufferAddress,
destination: BufferId,
destination_offset: BufferAddress,
size: BufferAddress,
) {
gfx_select!(command_encoder_id => command_encoder_copy_buffer_to_buffer(
&*GLOBAL,
command_encoder_id,
source, source_offset,
destination,
destination_offset,
size))
}
pub fn command_encoder_copy_buffer_to_texture<B: GfxBackend>(
global: &Global,
command_encoder_id: CommandEncoderId,
source: &BufferCopyView,
destination: &TextureCopyView,
copy_size: Extent3d,
) {
let hub = B::hub(global);
let mut token = Token::root();
let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token);
let cmb = &mut cmb_guard[command_encoder_id];
let (buffer_guard, mut token) = hub.buffers.read(&mut token);
let (texture_guard, _) = hub.textures.read(&mut token);
let aspects = texture_guard[destination.texture].full_range.aspects;
let (src_buffer, src_pending) =
cmb.trackers
.buffers
.use_replace(&*buffer_guard, source.buffer, (), BufferUsage::COPY_SRC);
assert!(src_buffer.usage.contains(BufferUsage::COPY_SRC));
let src_barriers = src_pending.map(|pending| hal::memory::Barrier::Buffer {
states: pending.to_states(),
target: &src_buffer.raw,
families: None,
range: None .. None,
});
let (dst_texture, dst_pending) = cmb.trackers.textures.use_replace(
&*texture_guard,
destination.texture,
destination.to_selector(aspects),
TextureUsage::COPY_DST,
);
assert!(dst_texture.usage.contains(TextureUsage::COPY_DST));
let dst_barriers = dst_pending.map(|pending| hal::memory::Barrier::Image {
states: pending.to_states(),
target: &dst_texture.raw,
families: None,
range: pending.selector,
});
let aspects = dst_texture.full_range.aspects;
let bytes_per_texel = conv::map_texture_format(dst_texture.format, cmb.features)
.surface_desc()
.bits as u32
/ BITS_PER_BYTE;
let buffer_width = source.row_pitch / bytes_per_texel;
assert_eq!(source.row_pitch % bytes_per_texel, 0);
let region = hal::command::BufferImageCopy {
buffer_offset: source.offset,
buffer_width,
buffer_height: source.image_height,
image_layers: destination.to_sub_layers(aspects),
image_offset: conv::map_origin(destination.origin),
image_extent: conv::map_extent(copy_size),
};
let cmb_raw = cmb.raw.last_mut().unwrap();
let stages = all_buffer_stages() | all_image_stages();
unsafe {
cmb_raw.pipeline_barrier(
stages .. stages,
hal::memory::Dependencies::empty(),
src_barriers.chain(dst_barriers),
);
cmb_raw.copy_buffer_to_image(
&src_buffer.raw,
&dst_texture.raw,
hal::image::Layout::TransferDstOptimal,
iter::once(region),
);
}
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_command_encoder_copy_buffer_to_texture(
command_encoder_id: CommandEncoderId,
source: &BufferCopyView,
destination: &TextureCopyView,
copy_size: Extent3d,
) {
gfx_select!(command_encoder_id => command_encoder_copy_buffer_to_texture(
&*GLOBAL,
command_encoder_id,
source,
destination,
copy_size))
}
pub fn command_encoder_copy_texture_to_buffer<B: GfxBackend>(
global: &Global,
command_encoder_id: CommandEncoderId,
source: &TextureCopyView,
destination: &BufferCopyView,
copy_size: Extent3d,
) {
let hub = B::hub(global);
let mut token = Token::root();
let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token);
let cmb = &mut cmb_guard[command_encoder_id];
let (buffer_guard, mut token) = hub.buffers.read(&mut token);
let (texture_guard, _) = hub.textures.read(&mut token);
let aspects = texture_guard[source.texture].full_range.aspects;
let (src_texture, src_pending) = cmb.trackers.textures.use_replace(
&*texture_guard,
source.texture,
source.to_selector(aspects),
TextureUsage::COPY_SRC,
);
assert!(src_texture.usage.contains(TextureUsage::COPY_SRC));
let src_barriers = src_pending.map(|pending| hal::memory::Barrier::Image {
states: pending.to_states(),
target: &src_texture.raw,
families: None,
range: pending.selector,
});
let (dst_buffer, dst_barriers) = cmb.trackers.buffers.use_replace(
&*buffer_guard,
destination.buffer,
(),
BufferUsage::COPY_DST,
);
assert!(dst_buffer.usage.contains(BufferUsage::COPY_DST));
let dst_barrier = dst_barriers.map(|pending| hal::memory::Barrier::Buffer {
states: pending.to_states(),
target: &dst_buffer.raw,
families: None,
range: None .. None,
});
let aspects = src_texture.full_range.aspects;
let bytes_per_texel = conv::map_texture_format(src_texture.format, cmb.features)
.surface_desc()
.bits as u32
/ BITS_PER_BYTE;
let buffer_width = destination.row_pitch / bytes_per_texel;
assert_eq!(destination.row_pitch % bytes_per_texel, 0);
let region = hal::command::BufferImageCopy {
buffer_offset: destination.offset,
buffer_width,
buffer_height: destination.image_height,
image_layers: source.to_sub_layers(aspects),
image_offset: conv::map_origin(source.origin),
image_extent: conv::map_extent(copy_size),
};
let cmb_raw = cmb.raw.last_mut().unwrap();
let stages = all_buffer_stages() | all_image_stages();
unsafe {
cmb_raw.pipeline_barrier(
stages .. stages,
hal::memory::Dependencies::empty(),
src_barriers.chain(dst_barrier),
);
cmb_raw.copy_image_to_buffer(
&src_texture.raw,
hal::image::Layout::TransferSrcOptimal,
&dst_buffer.raw,
iter::once(region),
);
}
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_command_encoder_copy_texture_to_buffer(
command_encoder_id: CommandEncoderId,
source: &TextureCopyView,
destination: &BufferCopyView,
copy_size: Extent3d,
) {
gfx_select!(command_encoder_id => command_encoder_copy_texture_to_buffer(
&*GLOBAL,
command_encoder_id,
source,
destination,
copy_size))
}
pub fn command_encoder_copy_texture_to_texture<B: GfxBackend>(
global: &Global,
command_encoder_id: CommandEncoderId,
source: &TextureCopyView,
destination: &TextureCopyView,
copy_size: Extent3d,
) {
let hub = B::hub(global);
let mut token = Token::root();
let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token);
let cmb = &mut cmb_guard[command_encoder_id];
let (_, mut token) = hub.buffers.read(&mut token); // skip token
let (texture_guard, _) = hub.textures.read(&mut token);
// we can't hold both src_pending and dst_pending in scope because they
// borrow the buffer tracker mutably...
let mut barriers = Vec::new();
let aspects = texture_guard[source.texture].full_range.aspects
& texture_guard[destination.texture].full_range.aspects;
let (src_texture, src_pending) = cmb.trackers.textures.use_replace(
&*texture_guard,
source.texture,
source.to_selector(aspects),
TextureUsage::COPY_SRC,
);
assert!(src_texture.usage.contains(TextureUsage::COPY_SRC));
barriers.extend(src_pending.map(|pending| hal::memory::Barrier::Image {
states: pending.to_states(),
target: &src_texture.raw,
families: None,
range: pending.selector,
}));
let (dst_texture, dst_pending) = cmb.trackers.textures.use_replace(
&*texture_guard,
destination.texture,
destination.to_selector(aspects),
TextureUsage::COPY_DST,
);
assert!(dst_texture.usage.contains(TextureUsage::COPY_DST));
barriers.extend(dst_pending.map(|pending| hal::memory::Barrier::Image {
states: pending.to_states(),
target: &dst_texture.raw,
families: None,
range: pending.selector,
}));
let aspects = src_texture.full_range.aspects & dst_texture.full_range.aspects;
let region = hal::command::ImageCopy {
src_subresource: source.to_sub_layers(aspects),
src_offset: conv::map_origin(source.origin),
dst_subresource: destination.to_sub_layers(aspects),
dst_offset: conv::map_origin(destination.origin),
extent: conv::map_extent(copy_size),
};
let cmb_raw = cmb.raw.last_mut().unwrap();
unsafe {
cmb_raw.pipeline_barrier(
all_image_stages() .. all_image_stages(),
hal::memory::Dependencies::empty(),
barriers,
);
cmb_raw.copy_image(
&src_texture.raw,
hal::image::Layout::TransferSrcOptimal,
&dst_texture.raw,
hal::image::Layout::TransferDstOptimal,
iter::once(region),
);
}
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_command_encoder_copy_texture_to_texture(
command_encoder_id: CommandEncoderId,
source: &TextureCopyView,
destination: &TextureCopyView,
copy_size: Extent3d,
) {
gfx_select!(command_encoder_id => command_encoder_copy_texture_to_texture(
&*GLOBAL,
command_encoder_id,
source,
destination,
copy_size))
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,589 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{
backend,
binding_model::MAX_BIND_GROUPS,
device::BIND_BUFFER_ALIGNMENT,
hub::{GfxBackend, Global, Token},
id::{Input, Output},
AdapterId,
AdapterInfo,
Backend,
Device,
DeviceId,
};
#[cfg(feature = "local")]
use crate::{gfx_select, hub::GLOBAL, SurfaceId};
#[cfg(feature = "local")]
use bitflags::bitflags;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use hal::{self, adapter::PhysicalDevice as _, queue::QueueFamily as _, Instance as _};
#[cfg(feature = "local")]
use std::marker::PhantomData;
use std::ffi::c_void;
#[derive(Debug)]
pub struct Instance {
#[cfg(any(
not(any(target_os = "ios", target_os = "macos")),
feature = "gfx-backend-vulkan"
))]
vulkan: Option<gfx_backend_vulkan::Instance>,
#[cfg(any(target_os = "ios", target_os = "macos"))]
metal: gfx_backend_metal::Instance,
#[cfg(windows)]
dx12: Option<gfx_backend_dx12::Instance>,
#[cfg(windows)]
dx11: gfx_backend_dx11::Instance,
}
impl Instance {
pub fn new(name: &str, version: u32) -> Self {
Instance {
#[cfg(any(
not(any(target_os = "ios", target_os = "macos")),
feature = "gfx-backend-vulkan"
))]
vulkan: gfx_backend_vulkan::Instance::create(name, version).ok(),
#[cfg(any(target_os = "ios", target_os = "macos"))]
metal: gfx_backend_metal::Instance::create(name, version).unwrap(),
#[cfg(windows)]
dx12: gfx_backend_dx12::Instance::create(name, version).ok(),
#[cfg(windows)]
dx11: gfx_backend_dx11::Instance::create(name, version).unwrap(),
}
}
#[cfg(not(feature = "local"))]
pub(crate) fn destroy_surface(&mut self, surface: Surface) {
//TODO: fill out the proper destruction once we are on gfx-0.4
#[cfg(any(
not(any(target_os = "ios", target_os = "macos")),
feature = "gfx-backend-vulkan"
))]
{
if let Some(_suf) = surface.vulkan {
//self.vulkan.as_mut().unwrap().destroy_surface(suf);
}
}
#[cfg(any(target_os = "ios", target_os = "macos"))]
{
let _ = surface;
//self.metal.destroy_surface(surface.metal);
}
#[cfg(windows)]
{
if let Some(_suf) = surface.dx12 {
//self.dx12.as_mut().unwrap().destroy_surface(suf);
}
//self.dx11.destroy_surface(surface.dx11);
}
}
}
type GfxSurface<B> = <B as hal::Backend>::Surface;
#[derive(Debug)]
pub struct Surface {
#[cfg(any(
not(any(target_os = "ios", target_os = "macos")),
feature = "gfx-backend-vulkan"
))]
pub(crate) vulkan: Option<GfxSurface<backend::Vulkan>>,
#[cfg(any(target_os = "ios", target_os = "macos"))]
pub(crate) metal: GfxSurface<backend::Metal>,
#[cfg(windows)]
pub(crate) dx12: Option<GfxSurface<backend::Dx12>>,
#[cfg(windows)]
pub(crate) dx11: GfxSurface<backend::Dx11>,
}
#[derive(Debug)]
pub struct Adapter<B: hal::Backend> {
pub(crate) raw: hal::adapter::Adapter<B>,
}
#[repr(C)]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum PowerPreference {
Default = 0,
LowPower = 1,
HighPerformance = 2,
}
#[cfg(feature = "local")]
bitflags! {
#[repr(transparent)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct BackendBit: u32 {
const VULKAN = 1 << Backend::Vulkan as u32;
const GL = 1 << Backend::Gl as u32;
const METAL = 1 << Backend::Metal as u32;
const DX12 = 1 << Backend::Dx12 as u32;
const DX11 = 1 << Backend::Dx11 as u32;
/// Vulkan + METAL + DX12
const PRIMARY = Self::VULKAN.bits | Self::METAL.bits | Self::DX12.bits;
/// OpenGL + DX11
const SECONDARY = Self::GL.bits | Self::DX11.bits;
}
}
#[cfg(feature = "local")]
impl From<Backend> for BackendBit {
fn from(backend: Backend) -> Self {
BackendBit::from_bits(1 << backend as u32).unwrap()
}
}
#[repr(C)]
#[derive(Clone, Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct RequestAdapterOptions {
pub power_preference: PowerPreference,
#[cfg(feature = "local")]
pub backends: BackendBit,
}
impl Default for RequestAdapterOptions {
fn default() -> Self {
RequestAdapterOptions {
power_preference: PowerPreference::Default,
#[cfg(feature = "local")]
backends: BackendBit::PRIMARY,
}
}
}
#[repr(C)]
#[derive(Clone, Debug, Default)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Extensions {
pub anisotropic_filtering: bool,
}
#[repr(C)]
#[derive(Clone, Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Limits {
pub max_bind_groups: u32,
}
impl Default for Limits {
fn default() -> Self {
Limits {
max_bind_groups: MAX_BIND_GROUPS as u32,
}
}
}
#[repr(C)]
#[derive(Clone, Debug, Default)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct DeviceDescriptor {
pub extensions: Extensions,
pub limits: Limits,
}
#[cfg(feature = "local")]
pub fn wgpu_create_surface(raw_handle: raw_window_handle::RawWindowHandle) -> SurfaceId {
use raw_window_handle::RawWindowHandle as Rwh;
let instance = &GLOBAL.instance;
let surface = match raw_handle {
#[cfg(target_os = "ios")]
Rwh::IOS(h) => Surface {
#[cfg(feature = "gfx-backend-vulkan")]
vulkan: None,
metal: instance
.metal
.create_surface_from_uiview(h.ui_view, cfg!(debug_assertions)),
},
#[cfg(target_os = "macos")]
Rwh::MacOS(h) => Surface {
#[cfg(feature = "gfx-backend-vulkan")]
vulkan: instance
.vulkan
.as_ref()
.map(|inst| inst.create_surface_from_ns_view(h.ns_view)),
metal: instance
.metal
.create_surface_from_nsview(h.ns_view, cfg!(debug_assertions)),
},
#[cfg(all(unix, not(target_os = "ios"), not(target_os = "macos")))]
Rwh::Xlib(h) => Surface {
vulkan: instance
.vulkan
.as_ref()
.map(|inst| inst.create_surface_from_xlib(h.display as _, h.window as _)),
},
#[cfg(all(unix, not(target_os = "ios"), not(target_os = "macos")))]
Rwh::Wayland(h) => Surface {
vulkan: instance
.vulkan
.as_ref()
.map(|inst| inst.create_surface_from_wayland(h.display, h.surface)),
},
#[cfg(windows)]
Rwh::Windows(h) => Surface {
vulkan: instance
.vulkan
.as_ref()
.map(|inst| inst.create_surface_from_hwnd(std::ptr::null_mut(), h.hwnd)),
dx12: instance
.dx12
.as_ref()
.map(|inst| inst.create_surface_from_hwnd(h.hwnd)),
dx11: instance.dx11.create_surface_from_hwnd(h.hwnd),
},
_ => panic!("Unsupported window handle"),
};
let mut token = Token::root();
GLOBAL
.surfaces
.register_identity(PhantomData, surface, &mut token)
}
#[cfg(all(
feature = "local",
unix,
not(target_os = "ios"),
not(target_os = "macos")
))]
#[no_mangle]
pub extern "C" fn wgpu_create_surface_from_xlib(
display: *mut *const std::ffi::c_void,
window: u64,
) -> SurfaceId {
use raw_window_handle::unix::XlibHandle;
wgpu_create_surface(raw_window_handle::RawWindowHandle::Xlib(XlibHandle {
window,
display: display as *mut _,
..XlibHandle::empty()
}))
}
#[cfg(all(feature = "local", any(target_os = "ios", target_os = "macos")))]
#[no_mangle]
pub extern "C" fn wgpu_create_surface_from_metal_layer(layer: *mut std::ffi::c_void) -> SurfaceId {
let surface = Surface {
#[cfg(feature = "gfx-backend-vulkan")]
vulkan: None, //TODO: currently requires `NSView`
metal: GLOBAL
.instance
.metal
.create_surface_from_layer(layer as *mut _, cfg!(debug_assertions)),
};
GLOBAL
.surfaces
.register_identity(PhantomData, surface, &mut Token::root())
}
#[cfg(all(feature = "local", windows))]
#[no_mangle]
pub extern "C" fn wgpu_create_surface_from_windows_hwnd(
_hinstance: *mut std::ffi::c_void,
hwnd: *mut std::ffi::c_void,
) -> SurfaceId {
use raw_window_handle::windows::WindowsHandle;
wgpu_create_surface(raw_window_handle::RawWindowHandle::Windows(
raw_window_handle::windows::WindowsHandle {
hwnd,
..WindowsHandle::empty()
},
))
}
pub type RequestAdapterCallback =
extern "C" fn(adapter: *const AdapterId, userdata: *mut c_void);
pub fn request_adapter_async(
global: &Global,
desc: &RequestAdapterOptions,
input_ids: &[Input<AdapterId>],
callback: RequestAdapterCallback,
userdata: *mut c_void,
) {
let adapter = pick_adapter(global, desc, input_ids);
callback(adapter.as_ref().map_or(&AdapterId::ERROR, |x| x as *const _), userdata);
}
fn pick_adapter(
global: &Global,
desc: &RequestAdapterOptions,
input_ids: &[Input<AdapterId>],
) -> Option<AdapterId> {
let instance = &global.instance;
let mut device_types = Vec::new();
#[cfg(not(feature = "local"))]
let find_input = |b: Backend| input_ids.iter().find(|id| id.backend() == b).cloned();
#[cfg(feature = "local")]
let find_input = |b: Backend| {
let _ = input_ids;
if desc.backends.contains(b.into()) {
Some(PhantomData)
} else {
None
}
};
let id_vulkan = find_input(Backend::Vulkan);
let id_metal = find_input(Backend::Metal);
let id_dx12 = find_input(Backend::Dx12);
let id_dx11 = find_input(Backend::Dx11);
#[cfg(any(
not(any(target_os = "ios", target_os = "macos")),
feature = "gfx-backend-vulkan"
))]
let mut adapters_vk = match instance.vulkan {
Some(ref inst) if id_vulkan.is_some() => {
let adapters = inst.enumerate_adapters();
device_types.extend(adapters.iter().map(|ad| ad.info.device_type.clone()));
adapters
}
_ => Vec::new(),
};
#[cfg(any(target_os = "ios", target_os = "macos"))]
let mut adapters_mtl = if id_metal.is_some() {
let adapters = instance.metal.enumerate_adapters();
device_types.extend(adapters.iter().map(|ad| ad.info.device_type.clone()));
adapters
} else {
Vec::new()
};
#[cfg(windows)]
let mut adapters_dx12 = match instance.dx12 {
Some(ref inst) if id_dx12.is_some() => {
let adapters = inst.enumerate_adapters();
device_types.extend(adapters.iter().map(|ad| ad.info.device_type.clone()));
adapters
}
_ => Vec::new(),
};
#[cfg(windows)]
let mut adapters_dx11 = if id_dx11.is_some() {
let adapters = instance.dx11.enumerate_adapters();
device_types.extend(adapters.iter().map(|ad| ad.info.device_type.clone()));
adapters
} else {
Vec::new()
};
if device_types.is_empty() {
log::warn!("No adapters are available!");
return None;
}
let (mut integrated, mut discrete, mut virt, mut other) = (None, None, None, None);
for (i, ty) in device_types.into_iter().enumerate() {
match ty {
hal::adapter::DeviceType::IntegratedGpu => {
integrated = integrated.or(Some(i));
}
hal::adapter::DeviceType::DiscreteGpu => {
discrete = discrete.or(Some(i));
}
hal::adapter::DeviceType::VirtualGpu => {
virt = virt.or(Some(i));
}
_ => {
other = other.or(Some(i));
}
}
}
let preferred_gpu = match desc.power_preference {
PowerPreference::Default => integrated.or(discrete).or(other).or(virt),
PowerPreference::LowPower => integrated.or(other).or(discrete).or(virt),
PowerPreference::HighPerformance => discrete.or(other).or(integrated).or(virt),
};
#[allow(unused_variables)]
let local_or_remote_id = |local_id, remote_id| {
#[cfg(not(feature = "local"))]
let id = remote_id;
#[cfg(feature = "local")]
let id = Some(local_id);
id
};
let mut token = Token::root();
let mut selected = preferred_gpu.unwrap_or(0);
#[cfg(any(
not(any(target_os = "ios", target_os = "macos")),
feature = "gfx-backend-vulkan"
))]
{
if selected < adapters_vk.len() {
let adapter = Adapter {
raw: adapters_vk.swap_remove(selected),
};
log::info!("Adapter Vulkan {:?}", adapter.raw.info);
let id_out = backend::Vulkan::hub(global).adapters.register_identity(
id_vulkan.unwrap(),
adapter,
&mut token,
);
return local_or_remote_id(id_out, id_vulkan);
}
selected -= adapters_vk.len();
}
#[cfg(any(target_os = "ios", target_os = "macos"))]
{
if selected < adapters_mtl.len() {
let adapter = Adapter {
raw: adapters_mtl.swap_remove(selected),
};
log::info!("Adapter Metal {:?}", adapter.raw.info);
let id_out = backend::Metal::hub(global).adapters.register_identity(
id_metal.unwrap(),
adapter,
&mut token,
);
return local_or_remote_id(id_out, id_metal);
}
selected -= adapters_mtl.len();
}
#[cfg(windows)]
{
if selected < adapters_dx12.len() {
let adapter = Adapter {
raw: adapters_dx12.swap_remove(selected),
};
log::info!("Adapter Dx12 {:?}", adapter.raw.info);
let id_out = backend::Dx12::hub(global).adapters.register_identity(
id_dx12.unwrap(),
adapter,
&mut token,
);
return local_or_remote_id(id_out, id_dx12);
}
selected -= adapters_dx12.len();
if selected < adapters_dx11.len() {
let adapter = Adapter {
raw: adapters_dx11.swap_remove(selected),
};
log::info!("Adapter Dx11 {:?}", adapter.raw.info);
let id_out = backend::Dx11::hub(global).adapters.register_identity(
id_dx11.unwrap(),
adapter,
&mut token,
);
return local_or_remote_id(id_out, id_dx11);
}
selected -= adapters_dx11.len();
}
let _ = (selected, id_vulkan, id_metal, id_dx12, id_dx11);
unreachable!()
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_request_adapter_async(
desc: Option<&RequestAdapterOptions>,
callback: RequestAdapterCallback,
userdata: *mut c_void,
) {
request_adapter_async(&*GLOBAL, &desc.cloned().unwrap_or_default(), &[], callback, userdata);
}
pub fn adapter_request_device<B: GfxBackend>(
global: &Global,
adapter_id: AdapterId,
desc: &DeviceDescriptor,
id_in: Input<DeviceId>,
) -> Output<DeviceId> {
let hub = B::hub(global);
let mut token = Token::root();
let device = {
let (adapter_guard, _) = hub.adapters.read(&mut token);
let adapter = &adapter_guard[adapter_id].raw;
let family = adapter
.queue_families
.iter()
.find(|family| family.queue_type().supports_graphics())
.unwrap();
let mut gpu = unsafe {
adapter
.physical_device
.open(&[(family, &[1.0])], hal::Features::empty())
.unwrap()
};
let limits = adapter.physical_device.limits();
assert_eq!(
0,
BIND_BUFFER_ALIGNMENT % limits.min_storage_buffer_offset_alignment,
"Adapter storage buffer offset alignment not compatible with WGPU"
);
assert_eq!(
0,
BIND_BUFFER_ALIGNMENT % limits.min_uniform_buffer_offset_alignment,
"Adapter uniform buffer offset alignment not compatible with WGPU"
);
if limits.max_bound_descriptor_sets == 0 {
log::warn!("max_bind_groups limit is missing");
} else {
assert!(
u32::from(limits.max_bound_descriptor_sets) >= desc.limits.max_bind_groups,
"Adapter does not support the requested max_bind_groups"
);
}
let mem_props = adapter.physical_device.memory_properties();
let supports_texture_d24_s8 = adapter
.physical_device
.format_properties(Some(hal::format::Format::D24UnormS8Uint))
.optimal_tiling
.contains(hal::format::ImageFeature::DEPTH_STENCIL_ATTACHMENT);
Device::new(
gpu.device,
adapter_id,
gpu.queue_groups.swap_remove(0),
mem_props,
supports_texture_d24_s8,
desc.limits.max_bind_groups,
)
};
hub.devices.register_identity(id_in, device, &mut token)
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_adapter_request_device(
adapter_id: AdapterId,
desc: Option<&DeviceDescriptor>,
) -> DeviceId {
let desc = &desc.cloned().unwrap_or_default();
gfx_select!(adapter_id => adapter_request_device(&*GLOBAL, adapter_id, desc, PhantomData))
}
pub fn adapter_get_info<B: GfxBackend>(global: &Global, adapter_id: AdapterId) -> AdapterInfo {
let hub = B::hub(global);
let mut token = Token::root();
let (adapter_guard, _) = hub.adapters.read(&mut token);
let adapter = &adapter_guard[adapter_id];
adapter.raw.info.clone()
}
#[cfg(feature = "local")]
pub fn wgpu_adapter_get_info(adapter_id: AdapterId) -> AdapterInfo {
gfx_select!(adapter_id => adapter_get_info(&*GLOBAL, adapter_id))
}

Просмотреть файл

@ -2,233 +2,16 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
pub mod backend { use std::sync::Arc;
#[cfg(windows)]
pub use gfx_backend_dx11::Backend as Dx11;
#[cfg(windows)]
pub use gfx_backend_dx12::Backend as Dx12;
pub use gfx_backend_empty::Backend as Empty;
#[cfg(any(target_os = "ios", target_os = "macos"))]
pub use gfx_backend_metal::Backend as Metal;
#[cfg(any(
not(any(target_os = "ios", target_os = "macos")),
feature = "gfx-backend-vulkan"
))]
pub use gfx_backend_vulkan::Backend as Vulkan;
}
mod binding_model;
mod command; mod command;
mod conv;
mod device; mod device;
mod hub;
mod id;
mod instance;
mod pipeline;
mod resource;
mod swap_chain;
mod track;
pub use self::binding_model::*;
pub use self::command::*; pub use self::command::*;
pub use self::device::*; pub use self::device::*;
#[cfg(not(feature = "local"))]
pub use self::hub::{Access, Global, IdentityManager, Registry, Token};
pub use self::id::*;
pub use self::instance::*;
pub use self::pipeline::*;
pub use self::resource::*;
pub use self::swap_chain::*;
pub use hal::adapter::AdapterInfo;
pub use hal::pso::read_spirv;
use std::{ type Global = core::hub::Global<parking_lot::Mutex<core::hub::IdentityManager>>;
os::raw::c_char,
ptr,
sync::atomic::{AtomicUsize, Ordering},
};
type SubmissionIndex = usize; lazy_static::lazy_static! {
type Index = u32; static ref GLOBAL: Arc<Global> = Arc::new(Global::new("wgpu"));
type Epoch = u32;
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum Backend {
Empty = 0,
Vulkan = 1,
Metal = 2,
Dx12 = 3,
Dx11 = 4,
Gl = 5,
} }
pub type BufferAddress = u64;
pub type RawString = *const c_char;
//TODO: make it private. Currently used for swapchain creation impl.
#[derive(Debug)]
pub struct RefCount(ptr::NonNull<AtomicUsize>);
unsafe impl Send for RefCount {}
unsafe impl Sync for RefCount {}
impl RefCount {
const MAX: usize = 1 << 24;
fn load(&self) -> usize {
unsafe { self.0.as_ref() }.load(Ordering::Acquire)
}
}
impl Clone for RefCount {
fn clone(&self) -> Self {
let old_size = unsafe { self.0.as_ref() }.fetch_add(1, Ordering::Relaxed);
assert!(old_size < Self::MAX);
RefCount(self.0)
}
}
impl Drop for RefCount {
fn drop(&mut self) {
if unsafe { self.0.as_ref() }.fetch_sub(1, Ordering::Relaxed) == 1 {
let _ = unsafe { Box::from_raw(self.0.as_ptr()) };
}
}
}
#[derive(Debug)]
struct LifeGuard {
ref_count: RefCount,
submission_index: AtomicUsize,
}
impl LifeGuard {
fn new() -> Self {
let bx = Box::new(AtomicUsize::new(1));
LifeGuard {
ref_count: RefCount(ptr::NonNull::new(Box::into_raw(bx)).unwrap()),
submission_index: AtomicUsize::new(0),
}
}
}
#[derive(Clone, Debug)]
struct Stored<T> {
value: T,
ref_count: RefCount,
}
#[repr(C)]
#[derive(Clone, Copy, Debug)]
pub struct Color {
pub r: f64,
pub g: f64,
pub b: f64,
pub a: f64,
}
impl Color {
pub const TRANSPARENT: Self = Color {
r: 0.0,
g: 0.0,
b: 0.0,
a: 0.0,
};
pub const BLACK: Self = Color {
r: 0.0,
g: 0.0,
b: 0.0,
a: 1.0,
};
pub const WHITE: Self = Color {
r: 1.0,
g: 1.0,
b: 1.0,
a: 1.0,
};
pub const RED: Self = Color {
r: 1.0,
g: 0.0,
b: 0.0,
a: 1.0,
};
pub const GREEN: Self = Color {
r: 0.0,
g: 1.0,
b: 0.0,
a: 1.0,
};
pub const BLUE: Self = Color {
r: 0.0,
g: 0.0,
b: 1.0,
a: 1.0,
};
}
#[repr(C)]
#[derive(Clone, Copy, Debug)]
pub struct Origin3d {
pub x: f32,
pub y: f32,
pub z: f32,
}
impl Origin3d {
pub const ZERO: Self = Origin3d {
x: 0.0,
y: 0.0,
z: 0.0,
};
}
impl Default for Origin3d {
fn default() -> Self {
Origin3d::ZERO
}
}
#[repr(C)]
#[derive(Clone, Copy, Debug)]
pub struct Extent3d {
pub width: u32,
pub height: u32,
pub depth: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct U32Array {
pub bytes: *const u32,
pub length: usize,
}
#[derive(Debug)]
pub enum InputState {}
#[macro_export]
macro_rules! gfx_select {
($id:expr => $function:ident( $($param:expr),+ )) => {
match $id.backend() {
#[cfg(any(not(any(target_os = "ios", target_os = "macos")), feature = "gfx-backend-vulkan"))]
$crate::Backend::Vulkan => $function::<$crate::backend::Vulkan>( $($param),+ ),
#[cfg(any(target_os = "ios", target_os = "macos"))]
$crate::Backend::Metal => $function::<$crate::backend::Metal>( $($param),+ ),
#[cfg(windows)]
$crate::Backend::Dx12 => $function::<$crate::backend::Dx12>( $($param),+ ),
#[cfg(windows)]
$crate::Backend::Dx11 => $function::<$crate::backend::Dx11>( $($param),+ ),
_ => unreachable!()
}
};
}
#[derive(Clone, Copy, Debug)]
pub(crate) struct Features {
pub max_bind_groups: u32,
pub supports_texture_d24_s8: bool,
}
/// Fast hash map used internally.
type FastHashMap<K, V> = std::collections::HashMap<K, V, std::hash::BuildHasherDefault<fxhash::FxHasher>>;

Просмотреть файл

@ -1,261 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*! Swap chain management.
## Lifecycle
At the low level, the swap chain is using the new simplified model of gfx-rs.
A swap chain is a separate object that is backend-dependent but shares the index with
the parent surface, which is backend-independent. This ensures a 1:1 correspondence
between them.
`get_next_image()` requests a new image from the surface. It becomes a part of
`TextureViewInner::SwapChain` of the resulted view. The view is registered in the HUB
but not in the device tracker.
The only operation allowed on the view is to be either a color or a resolve attachment.
It can only be used in one command buffer, which needs to be submitted before presenting.
Command buffer tracker knows about the view, but only for the duration of recording.
The view ID is erased from it at the end, so that it's not merged into the device tracker.
When a swapchain view is used in `begin_render_pass()`, we assume the start and end image
layouts purely based on whether or not this view was used in this command buffer before.
It always starts with `Uninitialized` and ends with `Present`, so that no barriers are
needed when we need to actually present it.
In `queue_submit()` we make sure to signal the semaphore whenever we render to a swap
chain view.
In `present()` we return the swap chain image back and wait on the semaphore.
!*/
use crate::{
conv,
hub::{GfxBackend, Global, Token},
resource,
DeviceId,
Extent3d,
Features,
Input,
LifeGuard,
Stored,
SwapChainId,
TextureViewId,
};
#[cfg(feature = "local")]
use crate::{gfx_select, hub::GLOBAL};
use hal::{self, device::Device as _, queue::CommandQueue as _, window::PresentationSurface as _};
use smallvec::SmallVec;
#[cfg(feature = "local")]
use std::marker::PhantomData;
const FRAME_TIMEOUT_MS: u64 = 1000;
pub const DESIRED_NUM_FRAMES: u32 = 3;
#[derive(Debug)]
pub struct SwapChain<B: hal::Backend> {
pub(crate) life_guard: LifeGuard,
pub(crate) device_id: Stored<DeviceId>,
pub(crate) desc: SwapChainDescriptor,
pub(crate) num_frames: hal::window::SwapImageIndex,
pub(crate) semaphore: B::Semaphore,
pub(crate) acquired_view_id: Option<Stored<TextureViewId>>,
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub enum PresentMode {
NoVsync = 0,
Vsync = 1,
}
#[repr(C)]
#[derive(Clone, Debug)]
pub struct SwapChainDescriptor {
pub usage: resource::TextureUsage,
pub format: resource::TextureFormat,
pub width: u32,
pub height: u32,
pub present_mode: PresentMode,
}
impl SwapChainDescriptor {
pub(crate) fn to_hal(
&self,
num_frames: u32,
features: &Features,
) -> hal::window::SwapchainConfig {
let mut config = hal::window::SwapchainConfig::new(
self.width,
self.height,
conv::map_texture_format(self.format, *features),
num_frames,
);
//TODO: check for supported
config.image_usage = conv::map_texture_usage(self.usage, hal::format::Aspects::COLOR);
config.composite_alpha_mode = hal::window::CompositeAlphaMode::OPAQUE;
config.present_mode = match self.present_mode {
PresentMode::NoVsync => hal::window::PresentMode::IMMEDIATE,
PresentMode::Vsync => hal::window::PresentMode::FIFO,
};
config
}
pub fn to_texture_desc(&self) -> resource::TextureDescriptor {
resource::TextureDescriptor {
size: Extent3d {
width: self.width,
height: self.height,
depth: 1,
},
mip_level_count: 1,
array_layer_count: 1,
sample_count: 1,
dimension: resource::TextureDimension::D2,
format: self.format,
usage: self.usage,
}
}
}
#[repr(C)]
#[derive(Debug)]
pub struct SwapChainOutput {
pub view_id: TextureViewId,
}
#[derive(Debug)]
pub enum SwapChainGetNextTextureError {
GpuProcessingTimeout,
}
pub fn swap_chain_get_next_texture<B: GfxBackend>(
global: &Global,
swap_chain_id: SwapChainId,
view_id_in: Input<TextureViewId>,
) -> Result<SwapChainOutput, SwapChainGetNextTextureError> {
let hub = B::hub(global);
let mut token = Token::root();
let (mut surface_guard, mut token) = global.surfaces.write(&mut token);
let surface = &mut surface_guard[swap_chain_id.to_surface_id()];
let (device_guard, mut token) = hub.devices.read(&mut token);
let (mut swap_chain_guard, mut token) = hub.swap_chains.write(&mut token);
let sc = &mut swap_chain_guard[swap_chain_id];
let device = &device_guard[sc.device_id.value];
let (image, _) = {
let suf = B::get_surface_mut(surface);
match unsafe { suf.acquire_image(FRAME_TIMEOUT_MS * 1_000_000) } {
Ok(surface_image) => surface_image,
Err(hal::window::AcquireError::Timeout) => {
return Err(SwapChainGetNextTextureError::GpuProcessingTimeout);
}
Err(e) => {
log::warn!("acquire_image() failed ({:?}), reconfiguring swapchain", e);
let desc = sc.desc.to_hal(sc.num_frames, &device.features);
unsafe {
suf.configure_swapchain(&device.raw, desc).unwrap();
suf.acquire_image(FRAME_TIMEOUT_MS * 1_000_000).unwrap()
}
}
}
};
let view = resource::TextureView {
inner: resource::TextureViewInner::SwapChain {
image,
source_id: Stored {
value: swap_chain_id,
ref_count: sc.life_guard.ref_count.clone(),
},
framebuffers: SmallVec::new(),
},
format: sc.desc.format,
extent: hal::image::Extent {
width: sc.desc.width,
height: sc.desc.height,
depth: 1,
},
samples: 1,
range: hal::image::SubresourceRange {
aspects: hal::format::Aspects::COLOR,
layers: 0 .. 1,
levels: 0 .. 1,
},
life_guard: LifeGuard::new(),
};
let ref_count = view.life_guard.ref_count.clone();
let (view_id, _) = hub.texture_views.new_identity(view_id_in);
hub.texture_views.register(view_id, view, &mut token);
assert!(
sc.acquired_view_id.is_none(),
"Swap chain image is already acquired"
);
sc.acquired_view_id = Some(Stored {
value: view_id,
ref_count,
});
Ok(SwapChainOutput { view_id })
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_swap_chain_get_next_texture(swap_chain_id: SwapChainId) -> SwapChainOutput {
gfx_select!(swap_chain_id => swap_chain_get_next_texture(&*GLOBAL, swap_chain_id, PhantomData)).unwrap_or(SwapChainOutput {
view_id: TextureViewId::ERROR,
})
}
pub fn swap_chain_present<B: GfxBackend>(global: &Global, swap_chain_id: SwapChainId) {
let hub = B::hub(global);
let mut token = Token::root();
let (mut surface_guard, mut token) = global.surfaces.write(&mut token);
let surface = &mut surface_guard[swap_chain_id.to_surface_id()];
let (mut device_guard, mut token) = hub.devices.write(&mut token);
let (mut swap_chain_guard, mut token) = hub.swap_chains.write(&mut token);
let sc = &mut swap_chain_guard[swap_chain_id];
let device = &mut device_guard[sc.device_id.value];
let view_id = sc
.acquired_view_id
.take()
.expect("Swap chain image is not acquired");
let (view, _) = hub.texture_views.unregister(view_id.value, &mut token);
let (image, framebuffers) = match view.inner {
resource::TextureViewInner::Native { .. } => unreachable!(),
resource::TextureViewInner::SwapChain {
image, framebuffers, ..
} => (image, framebuffers),
};
let err = unsafe {
let queue = &mut device.queue_group.queues[0];
queue.present_surface(B::get_surface_mut(surface), image, Some(&sc.semaphore))
};
if let Err(e) = err {
log::warn!("present failed: {:?}", e);
}
for fbo in framebuffers {
unsafe {
device.raw.destroy_framebuffer(fbo);
}
}
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_swap_chain_present(swap_chain_id: SwapChainId) {
gfx_select!(swap_chain_id => swap_chain_present(&*GLOBAL, swap_chain_id))
}

Просмотреть файл

@ -14,7 +14,11 @@ edition = "2018"
[features] [features]
default = [] default = []
[dependencies.core]
path = "../wgpu-core"
package = "wgpu-core"
version = "0.1"
[dependencies] [dependencies]
wgn = { path = "../wgpu-native", package = "wgpu-native", version = "0.4" }
log = "0.4" log = "0.4"
parking_lot = { version = "0.9" } parking_lot = { version = "0.9" }

Просмотреть файл

@ -9,6 +9,7 @@ autogen_warning = """/* DO NOT MODIFY THIS MANUALLY! This file was generated usi
*/ */
typedef void WGPUEmpty; typedef void WGPUEmpty;
typedef struct WGPUGlobal WGPUGlobal;
""" """
include_version = true include_version = true
braces = "SameLine" braces = "SameLine"
@ -22,7 +23,7 @@ exclude = ["BufferMapResult"]
[parse] [parse]
parse_deps = true parse_deps = true
include = ["wgpu-native"] include = ["wgpu-core"]
[fn] [fn]
prefix = "WGPU_INLINE" prefix = "WGPU_INLINE"

Просмотреть файл

@ -2,7 +2,11 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use wgn::{AdapterId, Backend, DeviceId, IdentityManager, SurfaceId}; use core::{
hub::IdentityManager,
id::{AdapterId, DeviceId},
Backend,
};
use parking_lot::Mutex; use parking_lot::Mutex;
@ -10,24 +14,16 @@ use std::{ptr, slice};
pub mod server; pub mod server;
#[derive(Debug)]
#[derive(Debug, Default)]
struct IdentityHub { struct IdentityHub {
adapters: IdentityManager<AdapterId>, adapters: IdentityManager,
devices: IdentityManager<DeviceId>, devices: IdentityManager,
} }
impl IdentityHub { #[derive(Debug, Default)]
fn new(backend: Backend) -> Self {
IdentityHub {
adapters: IdentityManager::new(backend),
devices: IdentityManager::new(backend),
}
}
}
#[derive(Debug)]
struct Identities { struct Identities {
surfaces: IdentityManager<SurfaceId>, surfaces: IdentityManager,
vulkan: IdentityHub, vulkan: IdentityHub,
#[cfg(any(target_os = "ios", target_os = "macos"))] #[cfg(any(target_os = "ios", target_os = "macos"))]
metal: IdentityHub, metal: IdentityHub,
@ -36,17 +32,6 @@ struct Identities {
} }
impl Identities { impl Identities {
fn new() -> Self {
Identities {
surfaces: IdentityManager::new(Backend::Empty),
vulkan: IdentityHub::new(Backend::Vulkan),
#[cfg(any(target_os = "ios", target_os = "macos"))]
metal: IdentityHub::new(Backend::Metal),
#[cfg(windows)]
dx12: IdentityHub::new(Backend::Dx12),
}
}
fn select(&mut self, backend: Backend) -> &mut IdentityHub { fn select(&mut self, backend: Backend) -> &mut IdentityHub {
match backend { match backend {
Backend::Vulkan => &mut self.vulkan, Backend::Vulkan => &mut self.vulkan,
@ -75,7 +60,7 @@ pub struct Infrastructure {
pub extern "C" fn wgpu_client_new() -> Infrastructure { pub extern "C" fn wgpu_client_new() -> Infrastructure {
log::info!("Initializing WGPU client"); log::info!("Initializing WGPU client");
let client = Box::new(Client { let client = Box::new(Client {
identities: Mutex::new(Identities::new()), identities: Mutex::new(Identities::default()),
}); });
Infrastructure { Infrastructure {
client: Box::into_raw(client), client: Box::into_raw(client),
@ -92,22 +77,22 @@ pub extern "C" fn wgpu_client_delete(client: *mut Client) {
#[no_mangle] #[no_mangle]
pub extern "C" fn wgpu_client_make_adapter_ids( pub extern "C" fn wgpu_client_make_adapter_ids(
client: &Client, client: &Client,
ids: *mut wgn::AdapterId, ids: *mut AdapterId,
id_length: usize, id_length: usize,
) -> usize { ) -> usize {
let mut identities = client.identities.lock(); let mut identities = client.identities.lock();
assert_ne!(id_length, 0); assert_ne!(id_length, 0);
let mut ids = unsafe { slice::from_raw_parts_mut(ids, id_length) }.iter_mut(); let mut ids = unsafe { slice::from_raw_parts_mut(ids, id_length) }.iter_mut();
*ids.next().unwrap() = identities.vulkan.adapters.alloc(); *ids.next().unwrap() = identities.vulkan.adapters.alloc(Backend::Vulkan);
#[cfg(any(target_os = "ios", target_os = "macos"))] #[cfg(any(target_os = "ios", target_os = "macos"))]
{ {
*ids.next().unwrap() = identities.metal.adapters.alloc(); *ids.next().unwrap() = identities.metal.adapters.alloc(Backend::Metal);
} }
#[cfg(windows)] #[cfg(windows)]
{ {
*ids.next().unwrap() = identities.dx12.adapters.alloc(); *ids.next().unwrap() = identities.dx12.adapters.alloc(Backend::Dx12);
} }
id_length - ids.len() id_length - ids.len()
@ -116,7 +101,7 @@ pub extern "C" fn wgpu_client_make_adapter_ids(
#[no_mangle] #[no_mangle]
pub extern "C" fn wgpu_client_kill_adapter_ids( pub extern "C" fn wgpu_client_kill_adapter_ids(
client: &Client, client: &Client,
ids: *const wgn::AdapterId, ids: *const AdapterId,
id_length: usize, id_length: usize,
) { ) {
let mut identity = client.identities.lock(); let mut identity = client.identities.lock();
@ -127,20 +112,18 @@ pub extern "C" fn wgpu_client_kill_adapter_ids(
} }
#[no_mangle] #[no_mangle]
pub extern "C" fn wgpu_client_make_device_id( pub extern "C" fn wgpu_client_make_device_id(client: &Client, adapter_id: AdapterId) -> DeviceId {
client: &Client, let backend = adapter_id.backend();
adapter_id: wgn::AdapterId,
) -> wgn::DeviceId {
client client
.identities .identities
.lock() .lock()
.select(adapter_id.backend()) .select(backend)
.devices .devices
.alloc() .alloc(backend)
} }
#[no_mangle] #[no_mangle]
pub extern "C" fn wgpu_client_kill_device_id(client: &Client, id: wgn::DeviceId) { pub extern "C" fn wgpu_client_kill_device_id(client: &Client, id: DeviceId) {
client client
.identities .identities
.lock() .lock()

Просмотреть файл

@ -2,16 +2,18 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use core::{gfx_select, hub::Global, id};
use std::slice; use std::slice;
#[no_mangle] #[no_mangle]
pub extern "C" fn wgpu_server_new() -> *mut wgn::Global { pub extern "C" fn wgpu_server_new() -> *mut Global<()> {
log::info!("Initializing WGPU server"); log::info!("Initializing WGPU server");
Box::into_raw(Box::new(wgn::Global::new("wgpu"))) Box::into_raw(Box::new(Global::new("wgpu")))
} }
#[no_mangle] #[no_mangle]
pub extern "C" fn wgpu_server_delete(global: *mut wgn::Global) { pub extern "C" fn wgpu_server_delete(global: *mut Global<()>) {
log::info!("Terminating WGPU server"); log::info!("Terminating WGPU server");
unsafe { Box::from_raw(global) }.delete(); unsafe { Box::from_raw(global) }.delete();
log::info!("\t...done"); log::info!("\t...done");
@ -23,44 +25,32 @@ pub extern "C" fn wgpu_server_delete(global: *mut wgn::Global) {
/// Returns the index in this list, or -1 if unable to pick. /// Returns the index in this list, or -1 if unable to pick.
#[no_mangle] #[no_mangle]
pub extern "C" fn wgpu_server_instance_request_adapter( pub extern "C" fn wgpu_server_instance_request_adapter(
global: &wgn::Global, global: &Global<()>,
desc: &wgn::RequestAdapterOptions, desc: &core::instance::RequestAdapterOptions,
ids: *const wgn::AdapterId, ids: *const id::AdapterId,
id_length: usize, id_length: usize,
) -> i8 { ) -> i8 {
extern "C" fn request_adapter_callback(
data: *const wgn::AdapterId,
user_data: *mut std::ffi::c_void,
) {
unsafe {
*(user_data as *mut wgn::AdapterId) = *data;
}
}
let ids = unsafe { slice::from_raw_parts(ids, id_length) }; let ids = unsafe { slice::from_raw_parts(ids, id_length) };
let mut adapter_id: wgn::AdapterId = wgn::AdapterId::ERROR; match global.pick_adapter(
let adapter_id_ref = &mut adapter_id; desc,
wgn::request_adapter_async(global, desc, ids, request_adapter_callback, adapter_id_ref as *mut _ as *mut std::ffi::c_void); core::instance::AdapterInputs::IdSet(ids, |i| i.backend()),
if adapter_id == wgn::AdapterId::ERROR { ) {
-1 Some(id) => ids.iter().position(|&i| i == id).unwrap() as i8,
} else { None => -1,
ids.iter().position(|&i| i == adapter_id).unwrap() as i8
} }
} }
#[no_mangle] #[no_mangle]
pub extern "C" fn wgpu_server_adapter_request_device( pub extern "C" fn wgpu_server_adapter_request_device(
global: &wgn::Global, global: &Global<()>,
self_id: wgn::AdapterId, self_id: id::AdapterId,
desc: &wgn::DeviceDescriptor, desc: &core::instance::DeviceDescriptor,
new_id: wgn::DeviceId, new_id: id::DeviceId,
) { ) {
use wgn::adapter_request_device as func; gfx_select!(self_id => global.adapter_request_device(self_id, desc, new_id));
wgn::gfx_select!(self_id => func(global, self_id, desc, new_id));
} }
#[no_mangle] #[no_mangle]
pub extern "C" fn wgpu_server_device_destroy(global: &wgn::Global, self_id: wgn::DeviceId) { pub extern "C" fn wgpu_server_device_destroy(global: &Global<()>, self_id: id::DeviceId) {
use wgn::device_destroy as func; gfx_select!(self_id => global.device_destroy(self_id))
wgn::gfx_select!(self_id => func(global, self_id))
} }