Merge pull request #82 from zeux/flatten-buffer-block

Implement buffer block flattening
This commit is contained in:
Hans-Kristian Arntzen 2017-01-18 18:48:13 +01:00 коммит произвёл GitHub
Родитель 6198e37f3f 62b27f1d81
Коммит 250ae11f74
21 изменённых файлов: 640 добавлений и 29 удалений

Просмотреть файл

@ -672,7 +672,7 @@ int main(int argc, char *argv[])
if (args.flatten_ubo)
for (auto &ubo : res.uniform_buffers)
compiler->flatten_interface_block(ubo.id);
compiler->flatten_buffer_block(ubo.id);
auto pls_inputs = remap_pls(args.pls_in, res.stage_inputs, &res.subpass_inputs);
auto pls_outputs = remap_pls(args.pls_out, res.stage_outputs, nullptr);

Просмотреть файл

@ -0,0 +1,10 @@
#version 310 es
uniform vec4 UBO[14];
in vec4 aVertex;
void main()
{
gl_Position = (mat4(UBO[0], UBO[1], UBO[2], UBO[3]) * aVertex) + UBO[13];
}

Просмотреть файл

@ -0,0 +1,13 @@
#version 310 es
uniform vec4 UBO[4];
in vec4 aVertex;
out vec3 vNormal;
in vec3 aNormal;
void main()
{
gl_Position = mat4(UBO[0], UBO[1], UBO[2], UBO[3]) * aVertex;
vNormal = aNormal;
}

Просмотреть файл

@ -0,0 +1,29 @@
#version 310 es
struct Light
{
vec3 Position;
float Radius;
vec4 Color;
};
uniform vec4 UBO[12];
in vec4 aVertex;
out vec4 vColor;
in vec3 aNormal;
void main()
{
gl_Position = mat4(UBO[0], UBO[1], UBO[2], UBO[3]) * aVertex;
vColor = vec4(0.0);
for (int i = 0; i < 4; i++)
{
Light light;
light.Position = Light(UBO[i * 2 + 4].xyz, UBO[i * 2 + 4].w, UBO[i * 2 + 5]).Position;
light.Radius = Light(UBO[i * 2 + 4].xyz, UBO[i * 2 + 4].w, UBO[i * 2 + 5]).Radius;
light.Color = Light(UBO[i * 2 + 4].xyz, UBO[i * 2 + 4].w, UBO[i * 2 + 5]).Color;
vec3 L = aVertex.xyz - light.Position;
vColor += (((UBO[i * 2 + 5]) * clamp(1.0 - (length(L) / light.Radius), 0.0, 1.0)) * dot(aNormal, normalize(L)));
}
}

Просмотреть файл

@ -0,0 +1,25 @@
#version 310 es
struct Light
{
vec3 Position;
float Radius;
vec4 Color;
};
uniform vec4 UBO[12];
in vec4 aVertex;
out vec4 vColor;
in vec3 aNormal;
void main()
{
gl_Position = mat4(UBO[0], UBO[1], UBO[2], UBO[3]) * aVertex;
vColor = vec4(0.0);
for (int i = 0; i < 4; i++)
{
vec3 L = aVertex.xyz - (UBO[i * 2 + 4].xyz);
vColor += (((UBO[i * 2 + 5]) * clamp(1.0 - (length(L) / (UBO[i * 2 + 4].w)), 0.0, 1.0)) * dot(aNormal, normalize(L)));
}
}

Просмотреть файл

@ -0,0 +1,10 @@
#version 310 es
uniform vec4 UBO[15];
in ivec2 aIndex;
void main()
{
gl_Position = UBO[aIndex.x * 5 + aIndex.y * 1 + 0];
}

Просмотреть файл

@ -0,0 +1,10 @@
#version 310 es
uniform vec4 UBO[8];
in vec4 aVertex;
void main()
{
gl_Position = (mat4(UBO[0], UBO[1], UBO[2], UBO[3]) * aVertex) + (aVertex * mat4(UBO[4], UBO[5], UBO[6], UBO[7]));
}

Просмотреть файл

@ -0,0 +1,22 @@
#version 310 es
struct Light
{
vec3 Position;
float Radius;
vec4 Color;
};
uniform vec4 UBO[6];
in vec4 aVertex;
out vec4 vColor;
in vec3 aNormal;
void main()
{
gl_Position = mat4(UBO[0], UBO[1], UBO[2], UBO[3]) * aVertex;
vColor = vec4(0.0);
vec3 L = aVertex.xyz - UBO[4].xyz;
vColor += ((UBO[5] * clamp(1.0 - (length(L) / UBO[4].w), 0.0, 1.0)) * dot(aNormal, normalize(L)));
}

Просмотреть файл

@ -0,0 +1,21 @@
#version 310 es
uniform vec4 UBO[8];
out vec4 oA;
out vec4 oB;
out vec4 oC;
out vec4 oD;
out vec4 oE;
out vec4 oF;
void main()
{
gl_Position = vec4(0.0);
oA = UBO[0];
oB = vec4(UBO[1].xy, UBO[1].zw);
oC = vec4(UBO[2].x, UBO[3].xyz);
oD = vec4(UBO[4].xyz, UBO[4].w);
oE = vec4(UBO[5].x, UBO[5].y, UBO[5].z, UBO[5].w);
oF = vec4(UBO[6].x, UBO[6].zw, UBO[7].x);
}

Просмотреть файл

@ -0,0 +1,16 @@
#version 310 es
layout(std140) uniform UBO
{
uniform mat4 uMVP;
vec4 A1[2];
vec4 A2[2][2];
float A3[3];
vec4 Offset;
};
in vec4 aVertex;
void main()
{
gl_Position = uMVP * aVertex + Offset;
}

Просмотреть файл

@ -0,0 +1,15 @@
#version 310 es
layout(std140) uniform UBO
{
uniform mat4 uMVP;
};
in vec4 aVertex;
in vec3 aNormal;
out vec3 vNormal;
void main()
{
gl_Position = uMVP * aVertex;
vNormal = aNormal;
}

Просмотреть файл

@ -0,0 +1,34 @@
#version 310 es
struct Light
{
vec3 Position;
float Radius;
vec4 Color;
};
layout(std140) uniform UBO
{
mat4 uMVP;
Light lights[4];
};
in vec4 aVertex;
in vec3 aNormal;
out vec4 vColor;
void main()
{
gl_Position = uMVP * aVertex;
vColor = vec4(0.0);
for (int i = 0; i < 4; ++i)
{
Light light = lights[i];
vec3 L = aVertex.xyz - light.Position;
vColor += dot(aNormal, normalize(L)) * (clamp(1.0 - length(L) / light.Radius, 0.0, 1.0) * lights[i].Color);
}
}

Просмотреть файл

@ -0,0 +1,33 @@
#version 310 es
struct Light
{
vec3 Position;
float Radius;
vec4 Color;
};
layout(std140) uniform UBO
{
mat4 uMVP;
Light lights[4];
};
in vec4 aVertex;
in vec3 aNormal;
out vec4 vColor;
void main()
{
gl_Position = uMVP * aVertex;
vColor = vec4(0.0);
for (int i = 0; i < 4; ++i)
{
vec3 L = aVertex.xyz - lights[i].Position;
vColor += dot(aNormal, normalize(L)) * (clamp(1.0 - length(L) / lights[i].Radius, 0.0, 1.0) * lights[i].Color);
}
}

Просмотреть файл

@ -0,0 +1,13 @@
#version 310 es
layout(std140) uniform UBO
{
vec4 Data[3][5];
};
in ivec2 aIndex;
void main()
{
gl_Position = Data[aIndex.x][aIndex.y];
}

Просмотреть файл

@ -0,0 +1,14 @@
#version 310 es
layout(std140) uniform UBO
{
layout(column_major) mat4 uMVPR;
layout(row_major) mat4 uMVPC;
};
in vec4 aVertex;
void main()
{
gl_Position = uMVPR * aVertex + uMVPC * aVertex;
}

Просмотреть файл

@ -0,0 +1,30 @@
#version 310 es
struct Light
{
vec3 Position;
float Radius;
vec4 Color;
};
layout(std140) uniform UBO
{
mat4 uMVP;
Light light;
};
in vec4 aVertex;
in vec3 aNormal;
out vec4 vColor;
void main()
{
gl_Position = uMVP * aVertex;
vColor = vec4(0.0);
vec3 L = aVertex.xyz - light.Position;
vColor += dot(aNormal, normalize(L)) * (clamp(1.0 - length(L) / light.Radius, 0.0, 1.0) * light.Color);
}

Просмотреть файл

@ -0,0 +1,42 @@
#version 310 es
// comments note the 16b alignment boundaries (see GL spec 7.6.2.2 Standard Uniform Block Layout)
layout(std140) uniform UBO
{
// 16b boundary
uniform vec4 A;
// 16b boundary
uniform vec2 B0;
uniform vec2 B1;
// 16b boundary
uniform float C0;
// 16b boundary (vec3 is aligned to 16b)
uniform vec3 C1;
// 16b boundary
uniform vec3 D0;
uniform float D1;
// 16b boundary
uniform float E0;
uniform float E1;
uniform float E2;
uniform float E3;
// 16b boundary
uniform float F0;
uniform vec2 F1;
// 16b boundary (vec2 before us is aligned to 8b)
uniform float F2;
};
out vec4 oA, oB, oC, oD, oE, oF;
void main()
{
gl_Position = vec4(0.0);
oA = A;
oB = vec4(B0, B1);
oC = vec4(C0, C1);
oD = vec4(D0, D1);
oE = vec4(E0, E1, E2, E3);
oF = vec4(F0, F1, F2);
}

Просмотреть файл

@ -201,11 +201,7 @@ public:
// Returns the effective size of a buffer block struct member.
virtual size_t get_declared_struct_member_size(const SPIRType &struct_type, uint32_t index) const;
// Legacy GLSL compatibility method.
// Takes a variable with a block interface and flattens it into a T array[N]; array instead.
// For this to work, all types in the block must not themselves be composites
// (except vectors and matrices), and all types must be the same.
// The name of the uniform will be the same as the interface block name.
// Legacy GLSL compatibility method. Deprecated in favor of CompilerGLSL::flatten_buffer_block
void flatten_interface_block(uint32_t id);
// Returns a set of all global variables which are statically accessed

Просмотреть файл

@ -129,6 +129,23 @@ static uint32_t pls_format_to_components(PlsFormat format)
}
}
static const char* vector_swizzle(int vecsize, int index)
{
static const char* swizzle[4][4] =
{
{ ".x", ".y", ".z", ".w" },
{ ".xy", ".yz", ".zw" },
{ ".xyz", ".yzw" },
{ "" }
};
assert(vecsize >= 1 && vecsize <= 4);
assert(index >= 0 && index < 4);
assert(swizzle[vecsize - 1][index]);
return swizzle[vecsize - 1][index];
}
void CompilerGLSL::reset()
{
// We do some speculative optimizations which should pretty much always work out,
@ -1000,9 +1017,28 @@ void CompilerGLSL::emit_push_constant_block_glsl(const SPIRVariable &var)
statement("");
}
void CompilerGLSL::emit_buffer_block(const SPIRVariable &var)
{
if (flattened_buffer_blocks.count(var.self))
{
emit_buffer_block_flattened(var);
}
else if (is_legacy())
{
emit_buffer_block_legacy(var);
}
else
{
emit_buffer_block_native(var);
}
}
void CompilerGLSL::emit_buffer_block_legacy(const SPIRVariable &var)
{
auto &type = get<SPIRType>(var.basetype);
bool ssbo = (meta[type.self].decoration.decoration_flags & (1ull << DecorationBufferBlock)) != 0;
if (ssbo)
SPIRV_CROSS_THROW("SSBOs not supported in legacy targets.");
// We're emitting the push constant block as a regular struct, so disable the block qualifier temporarily.
// Otherwise, we will end up emitting layout() qualifiers on naked structs which is not allowed.
@ -1015,21 +1051,12 @@ void CompilerGLSL::emit_buffer_block_legacy(const SPIRVariable &var)
statement("");
}
void CompilerGLSL::emit_buffer_block(const SPIRVariable &var)
void CompilerGLSL::emit_buffer_block_native(const SPIRVariable &var)
{
auto &type = get<SPIRType>(var.basetype);
bool ssbo = (meta[type.self].decoration.decoration_flags & (1ull << DecorationBufferBlock)) != 0;
bool is_restrict = (meta[var.self].decoration.decoration_flags & (1ull << DecorationRestrict)) != 0;
// By default, for legacy targets, fall back to declaring a uniform struct.
if (is_legacy())
{
if (ssbo)
SPIRV_CROSS_THROW("SSBOs not supported in legacy targets.");
emit_buffer_block_legacy(var);
return;
}
add_resource_name(var.self);
// Block names should never alias.
@ -1061,6 +1088,17 @@ void CompilerGLSL::emit_buffer_block(const SPIRVariable &var)
statement("");
}
void CompilerGLSL::emit_buffer_block_flattened(const SPIRVariable &var)
{
auto &type = get<SPIRType>(var.basetype);
// Block names should never alias.
auto buffer_name = to_name(type.self, false);
size_t buffer_size = (get_declared_struct_size(type) + 15) / 16;
statement("uniform vec4 ", buffer_name, "[", buffer_size, "];");
}
void CompilerGLSL::emit_interface_block(const SPIRVariable &var)
{
auto &execution = get_entry_point();
@ -3252,6 +3290,208 @@ string CompilerGLSL::access_chain(uint32_t base, const uint32_t *indices, uint32
return expr;
}
string CompilerGLSL::access_chain(uint32_t base, const uint32_t *indices, uint32_t count, const SPIRType &target_type, bool* need_transpose)
{
if (flattened_buffer_blocks.count(base))
{
if (need_transpose)
flattened_access_chain_offset(base, indices, count, 0, need_transpose);
return flattened_access_chain(base, indices, count, target_type, 0);
}
else
{
return access_chain(base, indices, count, false, false, need_transpose);
}
}
std::string CompilerGLSL::flattened_access_chain(uint32_t base, const uint32_t *indices, uint32_t count, const SPIRType &target_type, uint32_t offset)
{
if (!target_type.array.empty())
{
SPIRV_CROSS_THROW("Access chains that result in an array can not be flattened");
}
else if (target_type.basetype == SPIRType::Struct)
{
return flattened_access_chain_struct(base, indices, count, target_type, offset);
}
else if (target_type.columns > 1)
{
return flattened_access_chain_matrix(base, indices, count, target_type, offset);
}
else
{
return flattened_access_chain_vector_scalar(base, indices, count, target_type, offset);
}
}
std::string CompilerGLSL::flattened_access_chain_struct(uint32_t base, const uint32_t *indices, uint32_t count, const SPIRType &target_type, uint32_t offset)
{
std::string expr;
expr += type_to_glsl(target_type);
expr += "(";
for (size_t i = 0; i < target_type.member_types.size(); ++i)
{
if (i != 0)
expr += ", ";
const SPIRType &member_type = get<SPIRType>(target_type.member_types[i]);
uint32_t member_offset = type_struct_member_offset(target_type, uint32_t(i));
expr += flattened_access_chain(base, indices, count, member_type, offset + member_offset);
}
expr += ")";
return expr;
}
std::string CompilerGLSL::flattened_access_chain_matrix(uint32_t base, const uint32_t *indices, uint32_t count, const SPIRType &target_type, uint32_t offset)
{
std::string expr;
expr += type_to_glsl(target_type);
expr += "(";
for (uint32_t i = 0; i < target_type.columns; ++i)
{
if (i != 0)
expr += ", ";
expr += flattened_access_chain_vector_scalar(base, indices, count, target_type, offset + i * 16);
}
expr += ")";
return expr;
}
std::string CompilerGLSL::flattened_access_chain_vector_scalar(uint32_t base, const uint32_t *indices, uint32_t count, const SPIRType &target_type, uint32_t offset)
{
if (target_type.basetype != SPIRType::Float)
SPIRV_CROSS_THROW("Access chains that use non-floating-point base types can not be flattened");
auto result = flattened_access_chain_offset(base, indices, count, offset);
assert(result.second % 4 == 0);
uint32_t index = result.second / 4;
auto buffer_name = to_name(expression_type(base).self);
std::string expr;
expr += buffer_name;
expr += "[";
expr += result.first; // this is a series of N1*k1+N2*k2+... that is either empty or ends with a +
expr += convert_to_string(index / 4);
expr += "]";
expr += vector_swizzle(target_type.vecsize, index % 4);
return expr;
}
std::pair<std::string, uint32_t> CompilerGLSL::flattened_access_chain_offset(uint32_t base, const uint32_t *indices, uint32_t count, uint32_t offset, bool *need_transpose)
{
const auto *type = &expression_type(base);
uint32_t type_size = 0;
// For resolving array accesses, etc, keep a local copy for poking.
SPIRType temp;
std::string expr;
bool row_major_matrix_needs_conversion = false;
for (uint32_t i = 0; i < count; i++)
{
uint32_t index = indices[i];
// Arrays
if (!type->array.empty())
{
// We have to modify the type, so keep a local copy.
if (&temp != type)
temp = *type;
type = &temp;
uint32_t array_size = temp.array.back();
temp.array.pop_back();
assert(type_size > 0);
assert(type_size % array_size == 0);
uint32_t array_stride = type_size / array_size;
assert(array_stride % 16 == 0);
expr += to_expression(index);
expr += " * ";
expr += convert_to_string(array_stride / 16);
expr += " + ";
type_size = array_stride;
}
// For structs, the index refers to a constant, which indexes into the members.
// We also check if this member is a builtin, since we then replace the entire expression with the builtin one.
else if (type->basetype == SPIRType::Struct)
{
index = get<SPIRConstant>(index).scalar();
if (index >= type->member_types.size())
SPIRV_CROSS_THROW("Member index is out of bounds!");
offset += type_struct_member_offset(*type, index);
type_size = uint32_t(get_declared_struct_member_size(*type, index));
row_major_matrix_needs_conversion = (combined_decoration_for_member(*type, index) & (1ull << DecorationRowMajor)) != 0;
type = &get<SPIRType>(type->member_types[index]);
}
// Matrix -> Vector
else if (type->columns > 1)
{
if (row_major_matrix_needs_conversion)
SPIRV_CROSS_THROW("Matrix indexing is not supported for flattened row major matrices!");
if (ids[index].get_type() != TypeConstant)
SPIRV_CROSS_THROW("Cannot flatten dynamic matrix indexing!");
index = get<SPIRConstant>(index).scalar();
offset += index * 16;
// We have to modify the type, so keep a local copy.
if (&temp != type)
temp = *type;
type = &temp;
temp.columns = 1;
}
// Vector -> Scalar
else if (type->vecsize > 1)
{
if (ids[index].get_type() != TypeConstant)
SPIRV_CROSS_THROW("Cannot flatten dynamic vector indexing!");
index = get<SPIRConstant>(index).scalar();
offset += index * 4;
// We have to modify the type, so keep a local copy.
if (&temp != type)
temp = *type;
type = &temp;
temp.vecsize = 1;
}
else
SPIRV_CROSS_THROW("Cannot subdivide a scalar value!");
}
if (need_transpose)
*need_transpose = row_major_matrix_needs_conversion;
return std::make_pair(expr, offset);
}
bool CompilerGLSL::should_forward(uint32_t id)
{
// Immutable expression can always be forwarded.
@ -3621,7 +3861,7 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
// If the base is immutable, the access chain pointer must also be.
// If an expression is mutable and forwardable, we speculate that it is immutable.
bool need_transpose;
auto e = access_chain(ops[2], &ops[3], length - 3, false, false, &need_transpose);
auto e = access_chain(ops[2], &ops[3], length - 3, get<SPIRType>(ops[0]), &need_transpose);
auto &expr = set<SPIRExpression>(ops[1], move(e), ops[0], should_forward(ops[2]));
expr.loaded_from = ops[2];
expr.need_transpose = need_transpose;
@ -5394,6 +5634,25 @@ void CompilerGLSL::require_extension(const string &ext)
}
}
void CompilerGLSL::flatten_buffer_block(uint32_t id)
{
auto &var = get<SPIRVariable>(id);
auto &type = get<SPIRType>(var.basetype);
auto name = to_name(type.self, false);
auto flags = meta.at(type.self).decoration.decoration_flags;
if (!type.array.empty())
SPIRV_CROSS_THROW(name + " is an array of UBOs.");
if (type.basetype != SPIRType::Struct)
SPIRV_CROSS_THROW(name + " is not a struct.");
if ((flags & (1ull << DecorationBlock)) == 0)
SPIRV_CROSS_THROW(name + " is not a block.");
if (type.member_types.empty())
SPIRV_CROSS_THROW(name + " is an empty struct.");
flattened_buffer_blocks.insert(id);
}
bool CompilerGLSL::check_atomic_image(uint32_t id)
{
auto &type = expression_type(id);

Просмотреть файл

@ -138,6 +138,12 @@ public:
// require_extension("GL_KHR_my_extension");
void require_extension(const std::string &ext);
// Legacy GLSL compatibility method.
// Takes a uniform or storage buffer variable and flattens it into a vec4 array[N]; array instead.
// For this to work, all types in the block must not be integers or vector of integers.
// The name of the uniform array will be the same as the interface block name.
void flatten_buffer_block(uint32_t id);
protected:
void reset();
void emit_function(SPIRFunction &func, uint64_t return_flags);
@ -264,7 +270,9 @@ protected:
void emit_struct(SPIRType &type);
void emit_resources();
void emit_buffer_block(const SPIRVariable &type);
void emit_buffer_block_native(const SPIRVariable &var);
void emit_buffer_block_legacy(const SPIRVariable &var);
void emit_buffer_block_flattened(const SPIRVariable &type);
void emit_push_constant_block(const SPIRVariable &var);
void emit_push_constant_block_vulkan(const SPIRVariable &var);
void emit_push_constant_block_glsl(const SPIRVariable &var);
@ -307,6 +315,13 @@ protected:
bool suppress_usage_tracking = false);
std::string access_chain(uint32_t base, const uint32_t *indices, uint32_t count, bool index_is_literal,
bool chain_only = false, bool *need_transpose = nullptr);
std::string access_chain(uint32_t base, const uint32_t *indices, uint32_t count, const SPIRType &target_type, bool *need_transpose = nullptr);
std::string flattened_access_chain(uint32_t base, const uint32_t *indices, uint32_t count, const SPIRType &target_type, uint32_t offset);
std::string flattened_access_chain_struct(uint32_t base, const uint32_t *indices, uint32_t count, const SPIRType &target_type, uint32_t offset);
std::string flattened_access_chain_matrix(uint32_t base, const uint32_t *indices, uint32_t count, const SPIRType &target_type, uint32_t offset);
std::string flattened_access_chain_vector_scalar(uint32_t base, const uint32_t *indices, uint32_t count, const SPIRType &target_type, uint32_t offset);
std::pair<std::string, uint32_t> flattened_access_chain_offset(uint32_t base, const uint32_t *indices, uint32_t count, uint32_t offset, bool *need_transpose = nullptr);
const char *index_to_swizzle(uint32_t index);
std::string remap_swizzle(uint32_t result_type, uint32_t input_components, uint32_t expr);
@ -355,6 +370,8 @@ protected:
std::unordered_set<uint32_t> emitted_functions;
std::unordered_set<uint32_t> flattened_buffer_blocks;
// Usage tracking. If a temporary is used more than once, use the temporary instead to
// avoid AST explosion when SPIRV is generated with pure SSA and doesn't write stuff to variables.
std::unordered_map<uint32_t, uint32_t> expression_usage_counts;

Просмотреть файл

@ -66,7 +66,7 @@ def validate_shader(shader, vulkan):
else:
subprocess.check_call(['glslangValidator', shader])
def cross_compile(shader, vulkan, spirv, eliminate, invalid_spirv, is_legacy):
def cross_compile(shader, vulkan, spirv, invalid_spirv, eliminate, is_legacy, flatten_ubo):
spirv_f, spirv_path = tempfile.mkstemp()
glsl_f, glsl_path = tempfile.mkstemp(suffix = os.path.basename(shader))
os.close(spirv_f)
@ -84,25 +84,23 @@ def cross_compile(shader, vulkan, spirv, eliminate, invalid_spirv, is_legacy):
if not invalid_spirv:
subprocess.check_call(['spirv-val', spirv_path])
legacy_cmd = []
extra_args = []
if eliminate:
extra_args += ['--remove-unused-variables']
if is_legacy:
legacy_cmd = ['--version', '100', '--es']
extra_args += ['--version', '100', '--es']
if flatten_ubo:
extra_args += ['--flatten-ubo']
spirv_cross_path = './spirv-cross'
if eliminate:
subprocess.check_call([spirv_cross_path, '--remove-unused-variables', '--entry', 'main', '--output', glsl_path, spirv_path] + legacy_cmd)
else:
subprocess.check_call([spirv_cross_path, '--entry', 'main', '--output', glsl_path, spirv_path] + legacy_cmd)
subprocess.check_call([spirv_cross_path, '--entry', 'main', '--output', glsl_path, spirv_path] + extra_args)
# A shader might not be possible to make valid GLSL from, skip validation for this case.
if (not ('nocompat' in glsl_path)) and (not spirv):
validate_shader(glsl_path, False)
if vulkan or spirv:
if eliminate:
subprocess.check_call([spirv_cross_path, '--remove-unused-variables', '--entry', 'main', '--vulkan-semantics', '--output', vulkan_glsl_path, spirv_path])
else:
subprocess.check_call([spirv_cross_path, '--entry', 'main', '--vulkan-semantics', '--output', vulkan_glsl_path, spirv_path])
subprocess.check_call([spirv_cross_path, '--entry', 'main', '--vulkan-semantics', '--output', vulkan_glsl_path, spirv_path] + extra_args)
validate_shader(vulkan_glsl_path, vulkan)
return (spirv_path, glsl_path, vulkan_glsl_path if vulkan else None)
@ -178,6 +176,9 @@ def shader_is_invalid_spirv(shader):
def shader_is_legacy(shader):
return '.legacy.' in shader
def shader_is_flatten_ubo(shader):
return '.flatten.' in shader
def test_shader(stats, shader, update, keep):
joined_path = os.path.join(shader[0], shader[1])
vulkan = shader_is_vulkan(shader[1])
@ -186,9 +187,10 @@ def test_shader(stats, shader, update, keep):
is_spirv = shader_is_spirv(shader[1])
invalid_spirv = shader_is_invalid_spirv(shader[1])
is_legacy = shader_is_legacy(shader[1])
flatten_ubo = shader_is_flatten_ubo(shader[1])
print('Testing shader:', joined_path)
spirv, glsl, vulkan_glsl = cross_compile(joined_path, vulkan, is_spirv, eliminate, invalid_spirv, is_legacy)
spirv, glsl, vulkan_glsl = cross_compile(joined_path, vulkan, is_spirv, invalid_spirv, eliminate, is_legacy, flatten_ubo)
# Only test GLSL stats if we have a shader following GL semantics.
if stats and (not vulkan) and (not is_spirv) and (not desktop):