Merge pull request #537 from KhronosGroup/fix-535

Unsigned integers are disallowed on legacy targets.
This commit is contained in:
Hans-Kristian Arntzen 2018-04-17 15:30:03 +02:00 коммит произвёл GitHub
Родитель 2684054bbd b9cd3dcd7f
Коммит 3a8335eee0
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
2 изменённых файлов: 78 добавлений и 10 удалений

Просмотреть файл

@ -26,6 +26,45 @@ using namespace spv;
using namespace spirv_cross;
using namespace std;
static bool is_unsigned_opcode(Op op)
{
// Don't have to be exhaustive, only relevant for legacy target checking ...
switch (op)
{
case OpShiftRightLogical:
case OpUGreaterThan:
case OpUGreaterThanEqual:
case OpULessThan:
case OpULessThanEqual:
case OpUConvert:
case OpUDiv:
case OpUMod:
case OpUMulExtended:
case OpConvertUToF:
case OpConvertFToU:
return true;
default:
return false;
}
}
static bool is_unsigned_glsl_opcode(GLSLstd450 op)
{
// Don't have to be exhaustive, only relevant for legacy target checking ...
switch (op)
{
case GLSLstd450UClamp:
case GLSLstd450UMin:
case GLSLstd450UMax:
case GLSLstd450FindUMsb:
return true;
default:
return false;
}
}
static bool packing_is_vec4_padded(BufferPackingStandard packing)
{
switch (packing)
@ -2442,6 +2481,9 @@ string CompilerGLSL::constant_op_expression(const SPIRConstantOp &cop)
bool unary = false;
string op;
if (is_legacy() && is_unsigned_opcode(cop.opcode))
SPIRV_CROSS_THROW("Unsigned integers are not supported on legacy targets.");
// TODO: Find a clean way to reuse emit_instruction.
switch (cop.opcode)
{
@ -3013,7 +3055,14 @@ string CompilerGLSL::constant_expression_vector(const SPIRConstant &c, uint32_t
if (splat)
{
res += convert_to_string(c.scalar(vector, 0));
if (backend.uint32_t_literal_suffix)
if (is_legacy())
{
// Fake unsigned constant literals with signed ones if possible.
// Things like array sizes, etc, tend to be unsigned even though they could just as easily be signed.
if (c.scalar_i32(vector, 0) < 0)
SPIRV_CROSS_THROW("Tried to convert uint literal into int, but this made the literal negative.");
}
else if (backend.uint32_t_literal_suffix)
res += "u";
}
else
@ -3025,7 +3074,15 @@ string CompilerGLSL::constant_expression_vector(const SPIRConstant &c, uint32_t
else
{
res += convert_to_string(c.scalar(vector, i));
if (backend.uint32_t_literal_suffix)
if (is_legacy())
{
// Fake unsigned constant literals with signed ones if possible.
// Things like array sizes, etc, tend to be unsigned even though they could just as easily be signed.
if (c.scalar_i32(vector, i) < 0)
SPIRV_CROSS_THROW(
"Tried to convert uint literal into int, but this made the literal negative.");
}
else if (backend.uint32_t_literal_suffix)
res += "u";
}
@ -4044,7 +4101,10 @@ string CompilerGLSL::to_function_args(uint32_t img, const SPIRType &imgtype, boo
void CompilerGLSL::emit_glsl_op(uint32_t result_type, uint32_t id, uint32_t eop, const uint32_t *args, uint32_t)
{
GLSLstd450 op = static_cast<GLSLstd450>(eop);
auto op = static_cast<GLSLstd450>(eop);
if (is_legacy() && is_unsigned_glsl_opcode(op))
SPIRV_CROSS_THROW("Unsigned integers are not supported on legacy GLSL targets.");
switch (op)
{
@ -4109,8 +4169,8 @@ void CompilerGLSL::emit_glsl_op(uint32_t result_type, uint32_t id, uint32_t eop,
}
// Minmax
case GLSLstd450FMin:
case GLSLstd450UMin:
case GLSLstd450FMin:
case GLSLstd450SMin:
emit_binary_func_op(result_type, id, args[0], args[1], "min");
break;
@ -4530,8 +4590,7 @@ void CompilerGLSL::emit_subgroup_op(const Instruction &i)
{
require_extension_internal("GL_KHR_shader_subgroup_clustered");
}
else if (operation == GroupOperationExclusiveScan ||
operation == GroupOperationInclusiveScan ||
else if (operation == GroupOperationExclusiveScan || operation == GroupOperationInclusiveScan ||
operation == GroupOperationReduce)
{
require_extension_internal("GL_KHR_shader_subgroup_arithmetic");
@ -4633,6 +4692,7 @@ void CompilerGLSL::emit_subgroup_op(const Instruction &i)
emit_unary_func_op(result_type, id, ops[3], "subgroupAllEqual");
break;
// clang-format off
#define GROUP_OP(op, glsl_op) \
case OpGroupNonUniform##op: \
{ \
@ -4663,6 +4723,7 @@ case OpGroupNonUniform##op: \
GROUP_OP(BitwiseOr, Or)
GROUP_OP(BitwiseXor, Xor)
#undef GROUP_OP
// clang-format on
case OpGroupNonUniformQuadSwap:
{
@ -7518,8 +7579,8 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
}
else if (memory == ScopeSubgroup)
{
const uint32_t all_barriers = MemorySemanticsWorkgroupMemoryMask | MemorySemanticsUniformMemoryMask |
MemorySemanticsImageMemoryMask;
const uint32_t all_barriers =
MemorySemanticsWorkgroupMemoryMask | MemorySemanticsUniformMemoryMask | MemorySemanticsImageMemoryMask;
if (semantics & (MemorySemanticsCrossWorkgroupMemoryMask | MemorySemanticsSubgroupMemoryMask))
{
@ -8318,6 +8379,9 @@ string CompilerGLSL::type_to_glsl(const SPIRType &type, uint32_t id)
break;
}
if (type.basetype == SPIRType::UInt && is_legacy())
SPIRV_CROSS_THROW("Unsigned integers are not supported on legacy targets.");
if (type.vecsize == 1 && type.columns == 1) // Scalar builtin
{
switch (type.basetype)

Просмотреть файл

@ -3696,8 +3696,10 @@ void CompilerHLSL::emit_subgroup_op(const Instruction &i)
if (operation == GroupOperationReduce)
{
bool forward = should_forward(ops[4]);
auto left = join("countbits(", to_enclosed_expression(ops[4]), ".x) + countbits(", to_enclosed_expression(ops[4]), ".y)");
auto right = join("countbits(", to_enclosed_expression(ops[4]), ".z) + countbits(", to_enclosed_expression(ops[4]), ".w)");
auto left = join("countbits(", to_enclosed_expression(ops[4]), ".x) + countbits(",
to_enclosed_expression(ops[4]), ".y)");
auto right = join("countbits(", to_enclosed_expression(ops[4]), ".z) + countbits(",
to_enclosed_expression(ops[4]), ".w)");
emit_op(result_type, id, join(left, " + ", right), forward);
inherit_expression_dependencies(id, ops[4]);
}
@ -3735,6 +3737,7 @@ void CompilerHLSL::emit_subgroup_op(const Instruction &i)
break;
}
// clang-format off
#define GROUP_OP(op, hlsl_op, supports_scan) \
case OpGroupNonUniform##op: \
{ \
@ -3769,6 +3772,7 @@ case OpGroupNonUniform##op: \
GROUP_OP(BitwiseOr, BitOr, false)
GROUP_OP(BitwiseXor, BitXor, false)
#undef GROUP_OP
// clang-format on
case OpGroupNonUniformQuadSwap:
{