From 099f30712351e16b638b5e465065f172593e5653 Mon Sep 17 00:00:00 2001 From: Hans-Kristian Arntzen Date: Mon, 6 Mar 2017 15:21:00 +0100 Subject: [PATCH] Add traversal for active builtin variables. Refactor some ugly type-copying for access chains. --- spirv_cpp.cpp | 2 + spirv_cross.cpp | 118 +++++++++++++++++++++++++++++++++++++++++++++++- spirv_cross.hpp | 15 ++++++ spirv_glsl.cpp | 35 +++++++------- spirv_hlsl.cpp | 2 + spirv_msl.cpp | 2 + 6 files changed, 155 insertions(+), 19 deletions(-) diff --git a/spirv_cpp.cpp b/spirv_cpp.cpp index 73c8198..515c701 100644 --- a/spirv_cpp.cpp +++ b/spirv_cpp.cpp @@ -297,6 +297,8 @@ string CompilerCPP::compile() backend.explicit_struct_type = true; backend.use_initializer_list = true; + update_active_builtins(); + uint32_t pass_count = 0; do { diff --git a/spirv_cross.cpp b/spirv_cross.cpp index 8e36156..5be2837 100644 --- a/spirv_cross.cpp +++ b/spirv_cross.cpp @@ -491,7 +491,7 @@ bool Compiler::InterfaceVariableAccessHandler::handle(Op opcode, const uint32_t case OpCopyMemory: { - if (length < 3) + if (length < 2) return false; auto *var = compiler.maybe_get(args[0]); @@ -2865,7 +2865,7 @@ void Compiler::analyze_variable_scope(SPIRFunction &entry) case OpCopyMemory: { - if (length < 3) + if (length < 2) return false; uint32_t lhs = args[0]; @@ -3116,3 +3116,117 @@ bool Compiler::get_common_basic_type(const SPIRType &type, SPIRType::BaseType &b return true; } } + +bool Compiler::ActiveBuiltinHandler::handle(spv::Op opcode, const uint32_t *args, uint32_t length) +{ + const auto add_if_builtin = [&](uint32_t id) { + // Only handles variables here. + // Builtins which are part of a block are handled in AccessChain. + auto *var = compiler.maybe_get(id); + if (var && compiler.meta[id].decoration.builtin) + compiler.active_builtins |= 1ull << compiler.meta[id].decoration.builtin_type; + }; + + switch (opcode) + { + case OpStore: + if (length < 1) + return false; + + add_if_builtin(args[0]); + break; + + case OpCopyMemory: + if (length < 2) + return false; + + add_if_builtin(args[0]); + add_if_builtin(args[1]); + break; + + case OpCopyObject: + case OpLoad: + if (length < 3) + return false; + + add_if_builtin(args[2]); + break; + + case OpFunctionCall: + { + if (length < 3) + return false; + + uint32_t count = length - 3; + args += 3; + for (uint32_t i = 0; i < count; i++) + add_if_builtin(args[i]); + break; + } + + case OpAccessChain: + case OpInBoundsAccessChain: + { + if (length < 4) + return false; + + // Only consider global variables, cannot consider variables in functions yet, or other + // access chains as they have not been created yet. + auto *var = compiler.maybe_get(args[2]); + if (!var) + break; + + auto *type = &compiler.get(var->basetype); + + // Start traversing type hierarchy at the proper non-pointer types. + while (type->pointer) + { + assert(type->parent_type); + type = &compiler.get(type->parent_type); + } + + uint32_t count = length - 3; + args += 3; + for (uint32_t i = 0; i < count; i++) + { + // Arrays + if (!type->array.empty()) + { + type = &compiler.get(type->parent_type); + } + // Structs + else if (type->basetype == SPIRType::Struct) + { + uint32_t index = compiler.get(args[i]).scalar(); + + if (index < uint32_t(compiler.meta[type->self].members.size())) + { + auto &decorations = compiler.meta[type->self].members[index]; + if (decorations.builtin) + compiler.active_builtins |= 1ull << decorations.builtin_type; + } + + type = &compiler.get(type->member_types[index]); + } + else + { + // No point in traversing further. We won't find any extra builtins. + break; + } + } + break; + } + + default: + break; + } + + return true; +} + +void Compiler::update_active_builtins() +{ + active_builtins = 0; + ActiveBuiltinHandler handler(*this); + traverse_all_reachable_opcodes(get(entry_point), handler); +} diff --git a/spirv_cross.hpp b/spirv_cross.hpp index c415456..5f78243 100644 --- a/spirv_cross.hpp +++ b/spirv_cross.hpp @@ -577,6 +577,17 @@ protected: void register_combined_image_sampler(SPIRFunction &caller, uint32_t texture_id, uint32_t sampler_id); }; + struct ActiveBuiltinHandler : OpcodeHandler + { + ActiveBuiltinHandler(Compiler &compiler_) + : compiler(compiler_) + { + } + + bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override; + Compiler &compiler; + }; + bool traverse_all_reachable_opcodes(const SPIRBlock &block, OpcodeHandler &handler) const; bool traverse_all_reachable_opcodes(const SPIRFunction &block, OpcodeHandler &handler) const; // This must be an ordered data structure so we always pick the same type aliases. @@ -591,6 +602,10 @@ protected: std::unordered_set forced_temporaries; std::unordered_set forwarded_temporaries; + + uint64_t active_builtins = 0; + // Traverses all reachable opcodes and sets active_builtins to a bitmask of all builtin variables which are accessed in the shader. + void update_active_builtins(); }; } diff --git a/spirv_glsl.cpp b/spirv_glsl.cpp index 15e6862..d53ccd5 100644 --- a/spirv_glsl.cpp +++ b/spirv_glsl.cpp @@ -310,6 +310,7 @@ string CompilerGLSL::compile() // Scan the SPIR-V to find trivial uses of extensions. find_static_extensions(); fixup_image_load_store_access(); + update_active_builtins(); uint32_t pass_count = 0; do @@ -3331,8 +3332,12 @@ string CompilerGLSL::access_chain_internal(uint32_t base, const uint32_t *indice const auto *type = &expression_type(base); - // For resolving array accesses, etc, keep a local copy for poking. - SPIRType temp; + // Start traversing type hierarchy at the proper non-pointer types. + while (type->pointer) + { + assert(type->parent_type); + type = &get(type->parent_type); + } bool access_chain_is_arrayed = false; bool row_major_matrix_needs_conversion = is_non_native_row_major_matrix(base); @@ -3351,11 +3356,8 @@ string CompilerGLSL::access_chain_internal(uint32_t base, const uint32_t *indice expr += to_expression(index); expr += "]"; - // We have to modify the type, so keep a local copy. - if (&temp != type) - temp = *type; - type = &temp; - temp.array.pop_back(); + assert(type->parent_type); + type = &get(type->parent_type); access_chain_is_arrayed = true; } @@ -3414,11 +3416,7 @@ string CompilerGLSL::access_chain_internal(uint32_t base, const uint32_t *indice expr += to_expression(index); expr += "]"; - // We have to modify the type, so keep a local copy. - if (&temp != type) - temp = *type; - type = &temp; - temp.columns = 1; + type = &get(type->parent_type); } // Vector -> Scalar else if (type->vecsize > 1) @@ -3441,11 +3439,7 @@ string CompilerGLSL::access_chain_internal(uint32_t base, const uint32_t *indice expr += "]"; } - // We have to modify the type, so keep a local copy. - if (&temp != type) - temp = *type; - type = &temp; - temp.vecsize = 1; + type = &get(type->parent_type); } else SPIRV_CROSS_THROW("Cannot subdivide a scalar value!"); @@ -3686,6 +3680,13 @@ std::pair CompilerGLSL::flattened_access_chain_offset(uin { const auto *type = &expression_type(base); + // Start traversing type hierarchy at the proper non-pointer types. + while (type->pointer) + { + assert(type->parent_type); + type = &get(type->parent_type); + } + // This holds the type of the current pointer which we are traversing through. // We always start out from a struct type which is the block. // This is primarily used to reflect the array strides and matrix strides later. diff --git a/spirv_hlsl.cpp b/spirv_hlsl.cpp index bef4de6..f360f8c 100644 --- a/spirv_hlsl.cpp +++ b/spirv_hlsl.cpp @@ -1006,6 +1006,8 @@ string CompilerHLSL::compile() backend.use_initializer_list = true; backend.use_constructor_splatting = false; + update_active_builtins(); + uint32_t pass_count = 0; do { diff --git a/spirv_msl.cpp b/spirv_msl.cpp index a78cdb5..502141f 100644 --- a/spirv_msl.cpp +++ b/spirv_msl.cpp @@ -68,6 +68,8 @@ string CompilerMSL::compile() non_stage_in_input_var_ids.clear(); struct_member_padding.clear(); + update_active_builtins(); + // Preprocess OpCodes to extract the need to output additional header content set_enabled_interface_variables(get_active_interface_variables()); preprocess_op_codes();