Rewrite everything to use Bitset rather than uint64_t.

This commit is contained in:
Hans-Kristian Arntzen 2018-03-12 13:09:25 +01:00
Родитель 29315f3b3f
Коммит e8e58844d4
27 изменённых файлов: 628 добавлений и 357 удалений

Просмотреть файл

@ -1,7 +1,7 @@
#!/bin/bash
GLSLANG_REV=9c6f8cc29ba303b43ccf36deea6bb38a304f9b92
SPIRV_TOOLS_REV=e28edd458b729da7bbfd51e375feb33103709e6f
GLSLANG_REV=845860d56513d95e15fe4820df7272f9687d076e
SPIRV_TOOLS_REV=340370eddbb9f0e7d26b6a4f7e22c1b98150e5e1
if [ -d external/glslang ]; then
echo "Updating glslang to revision $GLSLANG_REV."
@ -37,15 +37,16 @@ else
git clone git://github.com/KhronosGroup/SPIRV-Tools.git spirv-tools
cd spirv-tools
git checkout $SPIRV_TOOLS_REV
if [ -d external/spirv-headers ]; then
cd external/spirv-headers
git pull origin master
cd ../..
else
git clone git://github.com/KhronosGroup/SPIRV-Headers.git external/spirv-headers
fi
fi
if [ -d external/spirv-headers ]; then
cd external/spirv-headers
git pull origin master
cd ../..
else
git clone git://github.com/KhronosGroup/SPIRV-Headers.git external/spirv-headers
fi
cd ../..
echo "Building SPIRV-Tools."

Просмотреть файл

@ -212,7 +212,7 @@ static void print_resources(const Compiler &compiler, const char *tag, const vec
for (auto &res : resources)
{
auto &type = compiler.get_type(res.type_id);
auto mask = compiler.get_decoration_mask(res.id);
auto &mask = compiler.get_decoration_bitset(res.id);
if (print_ssbo && compiler.buffer_is_hlsl_counter_buffer(res.id))
continue;
@ -221,8 +221,8 @@ static void print_resources(const Compiler &compiler, const char *tag, const vec
// for SSBOs and UBOs since those are the only meaningful names to use externally.
// Push constant blocks are still accessed by name and not block name, even though they are technically Blocks.
bool is_push_constant = compiler.get_storage_class(res.id) == StorageClassPushConstant;
bool is_block = (compiler.get_decoration_mask(type.self) &
((1ull << DecorationBlock) | (1ull << DecorationBufferBlock))) != 0;
bool is_block = compiler.get_decoration_bitset(type.self).get(DecorationBlock) ||
compiler.get_decoration_bitset(type.self).get(DecorationBufferBlock);
bool is_sized_block = is_block && (compiler.get_storage_class(res.id) == StorageClassUniform ||
compiler.get_storage_class(res.id) == StorageClassUniformConstant);
uint32_t fallback_id = !is_push_constant && is_block ? res.base_type_id : res.id;
@ -238,17 +238,17 @@ static void print_resources(const Compiler &compiler, const char *tag, const vec
fprintf(stderr, " ID %03u : %s%s", res.id,
!res.name.empty() ? res.name.c_str() : compiler.get_fallback_name(fallback_id).c_str(), array.c_str());
if (mask & (1ull << DecorationLocation))
if (mask.get(DecorationLocation))
fprintf(stderr, " (Location : %u)", compiler.get_decoration(res.id, DecorationLocation));
if (mask & (1ull << DecorationDescriptorSet))
if (mask.get(DecorationDescriptorSet))
fprintf(stderr, " (Set : %u)", compiler.get_decoration(res.id, DecorationDescriptorSet));
if (mask & (1ull << DecorationBinding))
if (mask.get(DecorationBinding))
fprintf(stderr, " (Binding : %u)", compiler.get_decoration(res.id, DecorationBinding));
if (mask & (1ull << DecorationInputAttachmentIndex))
if (mask.get(DecorationInputAttachmentIndex))
fprintf(stderr, " (Attachment : %u)", compiler.get_decoration(res.id, DecorationInputAttachmentIndex));
if (mask & (1ull << DecorationNonReadable))
if (mask.get(DecorationNonReadable))
fprintf(stderr, " writeonly");
if (mask & (1ull << DecorationNonWritable))
if (mask.get(DecorationNonWritable))
fprintf(stderr, " readonly");
if (is_sized_block)
fprintf(stderr, " (BlockSize : %u bytes)", block_size);
@ -284,7 +284,7 @@ static const char *execution_model_to_str(spv::ExecutionModel model)
static void print_resources(const Compiler &compiler, const ShaderResources &res)
{
uint64_t modes = compiler.get_execution_mode_mask();
auto &modes = compiler.get_execution_mode_bitset();
fprintf(stderr, "Entry points:\n");
auto entry_points = compiler.get_entry_points_and_stages();
@ -293,11 +293,7 @@ static void print_resources(const Compiler &compiler, const ShaderResources &res
fprintf(stderr, "\n");
fprintf(stderr, "Execution modes:\n");
for (unsigned i = 0; i < 64; i++)
{
if (!(modes & (1ull << i)))
continue;
modes.for_each_bit([&](uint32_t i) {
auto mode = static_cast<ExecutionMode>(i);
uint32_t arg0 = compiler.get_execution_mode_argument(mode, 0);
uint32_t arg1 = compiler.get_execution_mode_argument(mode, 1);
@ -353,7 +349,7 @@ static void print_resources(const Compiler &compiler, const ShaderResources &res
default:
break;
}
}
});
fprintf(stderr, "\n");
print_resources(compiler, "subpass inputs", res.subpass_inputs);
@ -898,7 +894,15 @@ static int main_inner(int argc, char *argv[])
}
if (build_dummy_sampler)
compiler->build_dummy_sampler_for_combined_images();
{
uint32_t sampler = compiler->build_dummy_sampler_for_combined_images();
if (sampler != 0)
{
// Set some defaults to make validation happy.
compiler->set_decoration(sampler, DecorationDescriptorSet, 0);
compiler->set_decoration(sampler, DecorationBinding, 0);
}
}
ShaderResources res;
if (args.remove_unused)

Просмотреть файл

@ -2,7 +2,7 @@
layout(set = 0, binding = 0) uniform sampler Sampler;
layout(set = 0, binding = 0) uniform texture2D SampledImage;
uniform sampler SPIRV_Cross_DummySampler;
layout(set = 0, binding = 0) uniform sampler SPIRV_Cross_DummySampler;
layout(location = 0) out vec4 _entryPointOutput;

Просмотреть файл

@ -1,7 +1,7 @@
#version 450
layout(rgba32f) uniform writeonly imageBuffer RWTex;
uniform samplerBuffer Tex;
layout(binding = 0, rgba32f) uniform writeonly imageBuffer RWTex;
layout(binding = 1) uniform samplerBuffer Tex;
layout(location = 0) out vec4 _entryPointOutput;

Просмотреть файл

@ -4,40 +4,40 @@ layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
layout(binding = 0, std430) readonly buffer Distribution
{
vec2 distribution[];
} _136;
} _137;
layout(binding = 2, std140) uniform UBO
{
vec4 uModTime;
} _165;
} _166;
layout(binding = 1, std430) writeonly buffer HeightmapFFT
{
uint heights[];
} _224;
} _225;
uvec2 workaround_mix(uvec2 a, uvec2 b, bvec2 sel)
{
uint _83;
uint _86;
if (sel.x)
{
_83 = b.x;
_86 = b.x;
}
else
{
_83 = a.x;
_86 = a.x;
}
uint _93 = _83;
uint _94;
uint _94 = _86;
uint _97;
if (sel.y)
{
_94 = b.y;
_97 = b.y;
}
else
{
_94 = a.y;
_97 = a.y;
}
return uvec2(_93, _94);
return uvec2(_94, _97);
}
vec2 alias(vec2 i, vec2 N)
@ -68,13 +68,13 @@ void generate_heightmap()
uvec2 param_1 = uvec2(0u);
bvec2 param_2 = equal(i, uvec2(0u));
uvec2 wi = workaround_mix(param, param_1, param_2);
vec2 a = _136.distribution[(i.y * N.x) + i.x];
vec2 b = _136.distribution[(wi.y * N.x) + wi.x];
vec2 a = _137.distribution[(i.y * N.x) + i.x];
vec2 b = _137.distribution[(wi.y * N.x) + wi.x];
vec2 param_3 = vec2(i);
vec2 param_4 = vec2(N);
vec2 k = _165.uModTime.xy * alias(param_3, param_4);
vec2 k = _166.uModTime.xy * alias(param_3, param_4);
float k_len = length(k);
float w = sqrt(9.81000041961669921875 * k_len) * _165.uModTime.z;
float w = sqrt(9.81000041961669921875 * k_len) * _166.uModTime.z;
float cw = cos(w);
float sw = sin(w);
vec2 param_5 = a;
@ -86,7 +86,7 @@ void generate_heightmap()
b = vec2(b.x, -b.y);
vec2 res = a + b;
vec2 param_9 = res;
_224.heights[(i.y * N.x) + i.x] = pack2(param_9);
_225.heights[(i.y * N.x) + i.x] = pack2(param_9);
}
void main()

Просмотреть файл

@ -2,7 +2,7 @@
precision mediump float;
precision highp int;
layout(location = 0) uniform mediump sampler2D samp;
layout(binding = 0) uniform mediump sampler2D samp;
layout(location = 0) out vec4 FragColor;
layout(location = 2) in vec2 vUV;

Просмотреть файл

@ -58,26 +58,26 @@ vec2 warp_position()
uint ufloor_lod = uint(floor_lod);
uvec2 uPosition = uvec2(Position);
uvec2 mask = (uvec2(1u) << uvec2(ufloor_lod, ufloor_lod + 1u)) - uvec2(1u);
uint _106;
uint _110;
if (uPosition.x < 32u)
{
_106 = mask.x;
_110 = mask.x;
}
else
{
_106 = 0u;
_110 = 0u;
}
uint _116 = _106;
uint _117;
uint _116 = _110;
uint _120;
if (uPosition.y < 32u)
{
_117 = mask.y;
_120 = mask.y;
}
else
{
_117 = 0u;
_120 = 0u;
}
uvec2 rounding = uvec2(_116, _117);
uvec2 rounding = uvec2(_116, _120);
vec4 lower_upper_snapped = vec4((uPosition + rounding).xyxy & (~mask).xxyy);
return mix(lower_upper_snapped.xy, lower_upper_snapped.zw, vec2(fract_lod));
}

Просмотреть файл

@ -59,47 +59,47 @@ vec2 warp_position()
uint ufloor_lod = uint(floor_lod);
uvec4 uPosition = uvec4(Position);
uvec2 mask = (uvec2(1u) << uvec2(ufloor_lod, ufloor_lod + 1u)) - uvec2(1u);
uint _107;
uint _111;
if (uPosition.x < 32u)
{
_107 = mask.x;
_111 = mask.x;
}
else
{
_107 = 0u;
_111 = 0u;
}
uvec4 rounding;
rounding.x = _107;
uint _119;
rounding.x = _111;
uint _122;
if (uPosition.y < 32u)
{
_119 = mask.x;
_122 = mask.x;
}
else
{
_119 = 0u;
_122 = 0u;
}
rounding.y = _119;
uint _130;
rounding.y = _122;
uint _133;
if (uPosition.x < 32u)
{
_130 = mask.y;
_133 = mask.y;
}
else
{
_130 = 0u;
_133 = 0u;
}
rounding.z = _130;
uint _142;
rounding.z = _133;
uint _145;
if (uPosition.y < 32u)
{
_142 = mask.y;
_145 = mask.y;
}
else
{
_142 = 0u;
_145 = 0u;
}
rounding.w = _142;
rounding.w = _145;
vec4 lower_upper_snapped = vec4((uPosition.xyxy + rounding) & (~mask).xxyy);
return mix(lower_upper_snapped.xy, lower_upper_snapped.zw, vec2(fract_lod));
}

Просмотреть файл

@ -2,8 +2,8 @@
#extension GL_AMD_shader_fragment_mask : require
#extension GL_AMD_shader_explicit_vertex_parameter : require
uniform sampler2DMS texture1;
layout(location = 0) in vec4 vary;
layout(binding = 0) uniform sampler2DMS texture1;
layout(location = 0) __explicitInterpAMD in vec4 vary;
void main()
{

Просмотреть файл

@ -16,6 +16,8 @@
OpName %_entryPointOutput "@entryPointOutput"
OpDecorate %pointLightShadowMap DescriptorSet 0
OpDecorate %shadowSamplerPCF DescriptorSet 0
OpDecorate %pointLightShadowMap Binding 0
OpDecorate %shadowSamplerPCF Binding 1
OpDecorate %_entryPointOutput Location 0
%void = OpTypeVoid
%3 = OpTypeFunction %void

Просмотреть файл

@ -29,6 +29,8 @@
OpName %param_1 "param"
OpDecorate %ShadowMap DescriptorSet 0
OpDecorate %ShadowSamplerPCF DescriptorSet 0
OpDecorate %ShadowMap Binding 0
OpDecorate %ShadowSamplerPCF Binding 1
OpDecorate %texCoords_1 Location 0
OpDecorate %cascadeIndex_1 Location 1
OpDecorate %fragDepth_1 Location 2

Просмотреть файл

@ -5,6 +5,7 @@
; Schema: 0
OpCapability Shader
OpCapability SampledBuffer
OpCapability ImageBuffer
%1 = OpExtInstImport "GLSL.std.450"
OpMemoryModel Logical GLSL450
OpEntryPoint Fragment %main "main" %_entryPointOutput
@ -17,6 +18,8 @@
OpName %_entryPointOutput "@entryPointOutput"
OpDecorate %RWTex DescriptorSet 0
OpDecorate %Tex DescriptorSet 0
OpDecorate %RWTex Binding 0
OpDecorate %Tex Binding 1
OpDecorate %_entryPointOutput Location 0
%void = OpTypeVoid
%3 = OpTypeFunction %void

Просмотреть файл

@ -1,7 +1,7 @@
#version 310 es
precision mediump float;
layout(location = 0) uniform sampler2D samp;
layout(binding = 0) uniform sampler2D samp;
layout(location = 0) out vec4 FragColor;
layout(location = 1) in vec3 vNormal;
layout(location = 2) in vec2 vUV;

Просмотреть файл

@ -2,7 +2,7 @@
precision mediump float;
uniform sampler2D tex;
layout(binding = 0) uniform sampler2D tex;
layout(location = 0) out vec4 FragColor;

Просмотреть файл

@ -1,6 +1,6 @@
#version 310 es
uniform sampler2D tex;
layout(binding = 0) uniform sampler2D tex;
void main()
{

123
spirv.hpp
Просмотреть файл

@ -1,4 +1,4 @@
// Copyright (c) 2014-2017 The Khronos Group Inc.
// Copyright (c) 2014-2018 The Khronos Group Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and/or associated documentation files (the "Materials"),
@ -46,12 +46,12 @@ namespace spv {
typedef unsigned int Id;
#define SPV_VERSION 0x10000
#define SPV_REVISION 10
#define SPV_VERSION 0x10300
#define SPV_REVISION 1
static const unsigned int MagicNumber = 0x07230203;
static const unsigned int Version = 0x00010000;
static const unsigned int Revision = 10;
static const unsigned int Version = 0x00010300;
static const unsigned int Revision = 1;
static const unsigned int OpCodeMask = 0xffff;
static const unsigned int WordCountShift = 16;
@ -122,6 +122,15 @@ enum ExecutionMode {
ExecutionModeOutputTriangleStrip = 29,
ExecutionModeVecTypeHint = 30,
ExecutionModeContractionOff = 31,
ExecutionModeInitializer = 33,
ExecutionModeFinalizer = 34,
ExecutionModeSubgroupSize = 35,
ExecutionModeSubgroupsPerWorkgroup = 36,
ExecutionModeSubgroupsPerWorkgroupId = 37,
ExecutionModeLocalSizeId = 38,
ExecutionModeLocalSizeHintId = 39,
ExecutionModePostDepthCoverage = 4446,
ExecutionModeStencilRefReplacingEXT = 5027,
ExecutionModeMax = 0x7fffffff,
};
@ -376,10 +385,16 @@ enum Decoration {
DecorationNoContraction = 42,
DecorationInputAttachmentIndex = 43,
DecorationAlignment = 44,
DecorationMaxByteOffset = 45,
DecorationAlignmentId = 46,
DecorationMaxByteOffsetId = 47,
DecorationExplicitInterpAMD = 4999,
DecorationOverrideCoverageNV = 5248,
DecorationPassthroughNV = 5250,
DecorationViewportRelativeNV = 5252,
DecorationSecondaryViewportRelativeNV = 5256,
DecorationHlslCounterBufferGOOGLE = 5634,
DecorationHlslSemanticGOOGLE = 5635,
DecorationMax = 0x7fffffff,
};
@ -425,21 +440,35 @@ enum BuiltIn {
BuiltInSubgroupLocalInvocationId = 41,
BuiltInVertexIndex = 42,
BuiltInInstanceIndex = 43,
BuiltInSubgroupEqMask = 4416,
BuiltInSubgroupEqMaskKHR = 4416,
BuiltInSubgroupGeMask = 4417,
BuiltInSubgroupGeMaskKHR = 4417,
BuiltInSubgroupGtMask = 4418,
BuiltInSubgroupGtMaskKHR = 4418,
BuiltInSubgroupLeMask = 4419,
BuiltInSubgroupLeMaskKHR = 4419,
BuiltInSubgroupLtMask = 4420,
BuiltInSubgroupLtMaskKHR = 4420,
BuiltInBaseVertex = 4424,
BuiltInBaseInstance = 4425,
BuiltInDrawIndex = 4426,
BuiltInDeviceIndex = 4438,
BuiltInViewIndex = 4440,
BuiltInBaryCoordNoPerspAMD = 4992,
BuiltInBaryCoordNoPerspCentroidAMD = 4993,
BuiltInBaryCoordNoPerspSampleAMD = 4994,
BuiltInBaryCoordSmoothAMD = 4995,
BuiltInBaryCoordSmoothCentroidAMD = 4996,
BuiltInBaryCoordSmoothSampleAMD = 4997,
BuiltInBaryCoordPullModelAMD = 4998,
BuiltInFragStencilRefEXT = 5014,
BuiltInViewportMaskNV = 5253,
BuiltInSecondaryPositionNV = 5257,
BuiltInSecondaryViewportMaskNV = 5258,
BuiltInPositionPerViewNV = 5261,
BuiltInViewportMaskPerViewNV = 5262,
BuiltInFullyCoveredEXT = 5264,
BuiltInMax = 0x7fffffff,
};
@ -458,6 +487,8 @@ enum SelectionControlMask {
enum LoopControlShift {
LoopControlUnrollShift = 0,
LoopControlDontUnrollShift = 1,
LoopControlDependencyInfiniteShift = 2,
LoopControlDependencyLengthShift = 3,
LoopControlMax = 0x7fffffff,
};
@ -465,6 +496,8 @@ enum LoopControlMask {
LoopControlMaskNone = 0,
LoopControlUnrollMask = 0x00000001,
LoopControlDontUnrollMask = 0x00000002,
LoopControlDependencyInfiniteMask = 0x00000004,
LoopControlDependencyLengthMask = 0x00000008,
};
enum FunctionControlShift {
@ -538,6 +571,7 @@ enum GroupOperation {
GroupOperationReduce = 0,
GroupOperationInclusiveScan = 1,
GroupOperationExclusiveScan = 2,
GroupOperationClusteredReduce = 3,
GroupOperationMax = 0x7fffffff,
};
@ -615,6 +649,17 @@ enum Capability {
CapabilityStorageImageReadWithoutFormat = 55,
CapabilityStorageImageWriteWithoutFormat = 56,
CapabilityMultiViewport = 57,
CapabilitySubgroupDispatch = 58,
CapabilityNamedBarrier = 59,
CapabilityPipeStorage = 60,
CapabilityGroupNonUniform = 61,
CapabilityGroupNonUniformVote = 62,
CapabilityGroupNonUniformArithmetic = 63,
CapabilityGroupNonUniformBallot = 64,
CapabilityGroupNonUniformShuffle = 65,
CapabilityGroupNonUniformShuffleRelative = 66,
CapabilityGroupNonUniformClustered = 67,
CapabilityGroupNonUniformQuad = 68,
CapabilitySubgroupBallotKHR = 4423,
CapabilityDrawParameters = 4427,
CapabilitySubgroupVoteKHR = 4431,
@ -628,12 +673,24 @@ enum Capability {
CapabilityMultiView = 4439,
CapabilityVariablePointersStorageBuffer = 4441,
CapabilityVariablePointers = 4442,
CapabilityAtomicStorageOps = 4445,
CapabilitySampleMaskPostDepthCoverage = 4447,
CapabilityFloat16ImageAMD = 5008,
CapabilityImageGatherBiasLodAMD = 5009,
CapabilityFragmentMaskAMD = 5010,
CapabilityStencilExportEXT = 5013,
CapabilityImageReadWriteLodAMD = 5015,
CapabilitySampleMaskOverrideCoverageNV = 5249,
CapabilityGeometryShaderPassthroughNV = 5251,
CapabilityShaderViewportIndexLayerEXT = 5254,
CapabilityShaderViewportIndexLayerNV = 5254,
CapabilityShaderViewportMaskNV = 5255,
CapabilityShaderStereoViewNV = 5259,
CapabilityPerViewAttributesNV = 5260,
CapabilityFragmentFullyCoveredEXT = 5265,
CapabilitySubgroupShuffleINTEL = 5568,
CapabilitySubgroupBufferBlockIOINTEL = 5569,
CapabilitySubgroupImageBlockIOINTEL = 5570,
CapabilityMax = 0x7fffffff,
};
@ -932,6 +989,52 @@ enum Op {
OpAtomicFlagTestAndSet = 318,
OpAtomicFlagClear = 319,
OpImageSparseRead = 320,
OpSizeOf = 321,
OpTypePipeStorage = 322,
OpConstantPipeStorage = 323,
OpCreatePipeFromPipeStorage = 324,
OpGetKernelLocalSizeForSubgroupCount = 325,
OpGetKernelMaxNumSubgroups = 326,
OpTypeNamedBarrier = 327,
OpNamedBarrierInitialize = 328,
OpMemoryNamedBarrier = 329,
OpModuleProcessed = 330,
OpExecutionModeId = 331,
OpDecorateId = 332,
OpGroupNonUniformElect = 333,
OpGroupNonUniformAll = 334,
OpGroupNonUniformAny = 335,
OpGroupNonUniformAllEqual = 336,
OpGroupNonUniformBroadcast = 337,
OpGroupNonUniformBroadcastFirst = 338,
OpGroupNonUniformBallot = 339,
OpGroupNonUniformInverseBallot = 340,
OpGroupNonUniformBallotBitExtract = 341,
OpGroupNonUniformBallotBitCount = 342,
OpGroupNonUniformBallotFindLSB = 343,
OpGroupNonUniformBallotFindMSB = 344,
OpGroupNonUniformShuffle = 345,
OpGroupNonUniformShuffleXor = 346,
OpGroupNonUniformShuffleUp = 347,
OpGroupNonUniformShuffleDown = 348,
OpGroupNonUniformIAdd = 349,
OpGroupNonUniformFAdd = 350,
OpGroupNonUniformIMul = 351,
OpGroupNonUniformFMul = 352,
OpGroupNonUniformSMin = 353,
OpGroupNonUniformUMin = 354,
OpGroupNonUniformFMin = 355,
OpGroupNonUniformSMax = 356,
OpGroupNonUniformUMax = 357,
OpGroupNonUniformFMax = 358,
OpGroupNonUniformBitwiseAnd = 359,
OpGroupNonUniformBitwiseOr = 360,
OpGroupNonUniformBitwiseXor = 361,
OpGroupNonUniformLogicalAnd = 362,
OpGroupNonUniformLogicalOr = 363,
OpGroupNonUniformLogicalXor = 364,
OpGroupNonUniformQuadBroadcast = 365,
OpGroupNonUniformQuadSwap = 366,
OpSubgroupBallotKHR = 4421,
OpSubgroupFirstInvocationKHR = 4422,
OpSubgroupAllKHR = 4428,
@ -948,6 +1051,16 @@ enum Op {
OpGroupSMaxNonUniformAMD = 5007,
OpFragmentMaskFetchAMD = 5011,
OpFragmentFetchAMD = 5012,
OpSubgroupShuffleINTEL = 5571,
OpSubgroupShuffleDownINTEL = 5572,
OpSubgroupShuffleUpINTEL = 5573,
OpSubgroupShuffleXorINTEL = 5574,
OpSubgroupBlockReadINTEL = 5575,
OpSubgroupBlockWriteINTEL = 5576,
OpSubgroupImageBlockReadINTEL = 5577,
OpSubgroupImageBlockWriteINTEL = 5578,
OpDecorateStringGOOGLE = 5632,
OpMemberDecorateStringGOOGLE = 5633,
OpMax = 0x7fffffff,
};

Просмотреть файл

@ -19,6 +19,7 @@
#include "spirv.hpp"
#include <algorithm>
#include <cstdio>
#include <cstring>
#include <functional>
@ -27,6 +28,7 @@
#include <sstream>
#include <stack>
#include <stdexcept>
#include <stdint.h>
#include <string>
#include <unordered_map>
#include <unordered_set>
@ -92,6 +94,125 @@ void join_helper(std::ostringstream &stream, T &&t, Ts &&... ts)
}
}
class Bitset
{
public:
Bitset() = default;
explicit inline Bitset(uint64_t lower_)
: lower(lower_)
{
}
inline bool get(uint32_t bit) const
{
if (bit < 64)
return (lower & (1ull << bit)) != 0;
else
return higher.count(bit) != 0;
}
inline void set(uint32_t bit)
{
if (bit < 64)
lower |= 1ull << bit;
else
higher.insert(bit);
}
inline void clear(uint32_t bit)
{
if (bit < 64)
lower &= ~(1ull << bit);
else
higher.erase(bit);
}
inline uint64_t get_lower() const
{
return lower;
}
inline void reset()
{
lower = 0;
higher.clear();
}
inline void merge_and(const Bitset &other)
{
lower &= other.lower;
std::unordered_set<uint32_t> tmp_set;
for (auto &v : higher)
if (other.higher.count(v) != 0)
tmp_set.insert(v);
higher = std::move(tmp_set);
}
inline void merge_or(const Bitset &other)
{
lower |= other.lower;
for (auto &v : other.higher)
higher.insert(v);
}
inline bool operator==(const Bitset &other) const
{
if (lower != other.lower)
return false;
if (higher.size() != other.higher.size())
return false;
for (auto &v : higher)
if (other.higher.count(v) == 0)
return false;
return true;
}
inline bool operator!=(const Bitset &other) const
{
return !(*this == other);
}
template <typename Op>
void for_each_bit(const Op &op) const
{
// TODO: Add ctz-based iteration.
for (uint32_t i = 0; i < 64; i++)
{
if (lower & (1ull << i))
op(i);
}
if (higher.empty())
return;
// Need to enforce an order here for reproducible results,
// but hitting this path should happen extremely rarely, so having this slow path is fine.
std::vector<uint32_t> bits;
bits.reserve(higher.size());
for (auto &v : higher)
bits.push_back(v);
std::sort(std::begin(bits), std::end(bits));
for (auto &v : bits)
op(v);
}
inline bool empty() const
{
return lower == 0 && higher.empty();
}
private:
// The most common bits to set are all lower than 64,
// so optimize for this case. Bits spilling outside 64 go into a slower data structure.
// In almost all cases, higher data structure will not be used.
uint64_t lower = 0;
std::unordered_set<uint32_t> higher;
};
// Helper template to avoid lots of nasty string temporary munging.
template <typename... Ts>
std::string join(Ts &&... ts)
@ -362,7 +483,7 @@ struct SPIREntryPoint
std::string orig_name;
std::vector<uint32_t> interface_variables;
uint64_t flags = 0;
Bitset flags;
struct
{
uint32_t x = 0, y = 0, z = 0;
@ -1051,7 +1172,7 @@ struct Meta
{
std::string alias;
std::string qualified_alias;
uint64_t decoration_flags = 0;
Bitset decoration_flags;
spv::BuiltIn builtin_type;
uint32_t location = 0;
uint32_t set = 0;
@ -1122,6 +1243,7 @@ static inline bool type_is_floating_point(const SPIRType &type)
{
return type.basetype == SPIRType::Half || type.basetype == SPIRType::Float || type.basetype == SPIRType::Double;
}
}
#endif

Просмотреть файл

@ -53,7 +53,7 @@ void CompilerCPP::emit_interface_block(const SPIRVariable &var)
string buffer_name;
auto flags = meta[type.self].decoration.decoration_flags;
if (flags & (1ull << DecorationBlock))
if (flags.get(DecorationBlock))
{
emit_block_struct(type);
buffer_name = to_name(type.self);
@ -115,7 +115,7 @@ void CompilerCPP::emit_push_constant_block(const SPIRVariable &var)
auto &type = get<SPIRType>(var.basetype);
auto &flags = meta[var.self].decoration.decoration_flags;
if ((flags & (1ull << DecorationBinding)) || (flags & (1ull << DecorationDescriptorSet)))
if (flags.get(DecorationBinding) || flags.get(DecorationDescriptorSet))
SPIRV_CROSS_THROW("Push constant blocks cannot be compiled to GLSL with Binding or Set syntax. "
"Remap to location with reflection API first or disable these decorations.");
@ -151,8 +151,8 @@ void CompilerCPP::emit_resources()
{
auto &type = id.get<SPIRType>();
if (type.basetype == SPIRType::Struct && type.array.empty() && !type.pointer &&
(meta[type.self].decoration.decoration_flags &
((1ull << DecorationBlock) | (1ull << DecorationBufferBlock))) == 0)
(!meta[type.self].decoration.decoration_flags.get(DecorationBlock) &&
!meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock)))
{
emit_struct(type);
}
@ -172,8 +172,8 @@ void CompilerCPP::emit_resources()
if (var.storage != StorageClassFunction && type.pointer && type.storage == StorageClassUniform &&
!is_hidden_variable(var) &&
(meta[type.self].decoration.decoration_flags &
((1ull << DecorationBlock) | (1ull << DecorationBufferBlock))))
(meta[type.self].decoration.decoration_flags.get(DecorationBlock) ||
meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock)))
{
emit_buffer_block(var);
}
@ -317,7 +317,7 @@ string CompilerCPP::compile()
emit_header();
emit_resources();
emit_function(get<SPIRFunction>(entry_point), 0);
emit_function(get<SPIRFunction>(entry_point), Bitset());
pass_count++;
} while (force_recompile);
@ -373,7 +373,7 @@ void CompilerCPP::emit_c_linkage()
end_scope();
}
void CompilerCPP::emit_function_prototype(SPIRFunction &func, uint64_t)
void CompilerCPP::emit_function_prototype(SPIRFunction &func, const Bitset &)
{
if (func.self != entry_point)
add_function_overload(func);

Просмотреть файл

@ -51,7 +51,7 @@ public:
private:
void emit_header() override;
void emit_c_linkage();
void emit_function_prototype(SPIRFunction &func, uint64_t return_flags) override;
void emit_function_prototype(SPIRFunction &func, const Bitset &return_flags) override;
void emit_resources();
void emit_buffer_block(const SPIRVariable &type) override;

Просмотреть файл

@ -101,10 +101,10 @@ bool Compiler::variable_storage_is_aliased(const SPIRVariable &v)
{
auto &type = get<SPIRType>(v.basetype);
bool ssbo = v.storage == StorageClassStorageBuffer ||
((meta[type.self].decoration.decoration_flags & (1ull << DecorationBufferBlock)) != 0);
meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock);
bool image = type.basetype == SPIRType::Image;
bool counter = type.basetype == SPIRType::AtomicCounter;
bool is_restrict = (meta[v.self].decoration.decoration_flags & (1ull << DecorationRestrict)) != 0;
bool is_restrict = meta[v.self].decoration.decoration_flags.get(DecorationRestrict);
return !is_restrict && (ssbo || image || counter);
}
@ -688,7 +688,7 @@ ShaderResources Compiler::get_shader_resources(const unordered_set<uint32_t> *ac
// Input
if (var.storage == StorageClassInput && interface_variable_exists_in_entry_point(var.self))
{
if (meta[type.self].decoration.decoration_flags & (1ull << DecorationBlock))
if (meta[type.self].decoration.decoration_flags.get(DecorationBlock))
res.stage_inputs.push_back(
{ var.self, var.basetype, type.self, get_remapped_declared_block_name(var.self) });
else
@ -702,7 +702,7 @@ ShaderResources Compiler::get_shader_resources(const unordered_set<uint32_t> *ac
// Outputs
else if (var.storage == StorageClassOutput && interface_variable_exists_in_entry_point(var.self))
{
if (meta[type.self].decoration.decoration_flags & (1ull << DecorationBlock))
if (meta[type.self].decoration.decoration_flags.get(DecorationBlock))
res.stage_outputs.push_back(
{ var.self, var.basetype, type.self, get_remapped_declared_block_name(var.self) });
else
@ -710,14 +710,14 @@ ShaderResources Compiler::get_shader_resources(const unordered_set<uint32_t> *ac
}
// UBOs
else if (type.storage == StorageClassUniform &&
(meta[type.self].decoration.decoration_flags & (1ull << DecorationBlock)))
(meta[type.self].decoration.decoration_flags.get(DecorationBlock)))
{
res.uniform_buffers.push_back(
{ var.self, var.basetype, type.self, get_remapped_declared_block_name(var.self) });
}
// Old way to declare SSBOs.
else if (type.storage == StorageClassUniform &&
(meta[type.self].decoration.decoration_flags & (1ull << DecorationBufferBlock)))
(meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock)))
{
res.storage_buffers.push_back(
{ var.self, var.basetype, type.self, get_remapped_declared_block_name(var.self) });
@ -800,6 +800,7 @@ static bool is_valid_spirv_version(uint32_t version)
case 0x10000: // SPIR-V 1.0
case 0x10100: // SPIR-V 1.1
case 0x10200: // SPIR-V 1.2
case 0x10300: // SPIR-V 1.3
return true;
default:
@ -930,13 +931,13 @@ void Compiler::flatten_interface_block(uint32_t id)
{
auto &var = get<SPIRVariable>(id);
auto &type = get<SPIRType>(var.basetype);
auto flags = meta.at(type.self).decoration.decoration_flags;
auto &flags = meta.at(type.self).decoration.decoration_flags;
if (!type.array.empty())
SPIRV_CROSS_THROW("Type is array of UBOs.");
if (type.basetype != SPIRType::Struct)
SPIRV_CROSS_THROW("Type is not a struct.");
if ((flags & (1ull << DecorationBlock)) == 0)
if (!flags.get(DecorationBlock))
SPIRV_CROSS_THROW("Type is not a block.");
if (type.member_types.empty())
SPIRV_CROSS_THROW("Member list of struct is empty.");
@ -1036,7 +1037,7 @@ void Compiler::set_member_decoration(uint32_t id, uint32_t index, Decoration dec
{
meta.at(id).members.resize(max(meta[id].members.size(), size_t(index) + 1));
auto &dec = meta.at(id).members[index];
dec.decoration_flags |= 1ull << decoration;
dec.decoration_flags.set(decoration);
switch (decoration)
{
@ -1122,7 +1123,7 @@ uint32_t Compiler::get_member_decoration(uint32_t id, uint32_t index, Decoration
return 0;
auto &dec = m.members[index];
if (!(dec.decoration_flags & (1ull << decoration)))
if (!dec.decoration_flags.get(decoration))
return 0;
switch (decoration)
@ -1143,17 +1144,25 @@ uint32_t Compiler::get_member_decoration(uint32_t id, uint32_t index, Decoration
}
uint64_t Compiler::get_member_decoration_mask(uint32_t id, uint32_t index) const
{
return get_member_decoration_bitset(id, index).get_lower();
}
const Bitset &Compiler::get_member_decoration_bitset(uint32_t id, uint32_t index) const
{
auto &m = meta.at(id);
if (index >= m.members.size())
return 0;
{
static const Bitset cleared;
return cleared;
}
return m.members[index].decoration_flags;
}
bool Compiler::has_member_decoration(uint32_t id, uint32_t index, Decoration decoration) const
{
return get_member_decoration_mask(id, index) & (1ull << decoration);
return get_member_decoration_bitset(id, index).get(decoration);
}
void Compiler::unset_member_decoration(uint32_t id, uint32_t index, Decoration decoration)
@ -1164,7 +1173,7 @@ void Compiler::unset_member_decoration(uint32_t id, uint32_t index, Decoration d
auto &dec = m.members[index];
dec.decoration_flags &= ~(1ull << decoration);
dec.decoration_flags.clear(decoration);
switch (decoration)
{
case DecorationBuiltIn:
@ -1191,7 +1200,7 @@ void Compiler::unset_member_decoration(uint32_t id, uint32_t index, Decoration d
void Compiler::set_decoration(uint32_t id, Decoration decoration, uint32_t argument)
{
auto &dec = meta.at(id).decoration;
dec.decoration_flags |= 1ull << decoration;
dec.decoration_flags.set(decoration);
switch (decoration)
{
@ -1262,6 +1271,11 @@ const std::string Compiler::get_block_fallback_name(uint32_t id) const
}
uint64_t Compiler::get_decoration_mask(uint32_t id) const
{
return get_decoration_bitset(id).get_lower();
}
const Bitset &Compiler::get_decoration_bitset(uint32_t id) const
{
auto &dec = meta.at(id).decoration;
return dec.decoration_flags;
@ -1269,13 +1283,13 @@ uint64_t Compiler::get_decoration_mask(uint32_t id) const
bool Compiler::has_decoration(uint32_t id, Decoration decoration) const
{
return get_decoration_mask(id) & (1ull << decoration);
return get_decoration_bitset(id).get(decoration);
}
uint32_t Compiler::get_decoration(uint32_t id, Decoration decoration) const
{
auto &dec = meta.at(id).decoration;
if (!(dec.decoration_flags & (1ull << decoration)))
if (!dec.decoration_flags.get(decoration))
return 0;
switch (decoration)
@ -1306,7 +1320,7 @@ uint32_t Compiler::get_decoration(uint32_t id, Decoration decoration) const
void Compiler::unset_decoration(uint32_t id, Decoration decoration)
{
auto &dec = meta.at(id).decoration;
dec.decoration_flags &= ~(1ull << decoration);
dec.decoration_flags.clear(decoration);
switch (decoration)
{
case DecorationBuiltIn:
@ -1474,7 +1488,7 @@ void Compiler::parse(const Instruction &instruction)
{
auto &execution = entry_points[ops[0]];
auto mode = static_cast<ExecutionMode>(ops[1]);
execution.flags |= 1ull << mode;
execution.flags.set(mode);
switch (mode)
{
@ -2318,7 +2332,7 @@ uint32_t Compiler::type_struct_member_offset(const SPIRType &type, uint32_t inde
{
// Decoration must be set in valid SPIR-V, otherwise throw.
auto &dec = meta[type.self].members.at(index);
if (dec.decoration_flags & (1ull << DecorationOffset))
if (dec.decoration_flags.get(DecorationOffset))
return dec.offset;
else
SPIRV_CROSS_THROW("Struct member does not have Offset set.");
@ -2329,7 +2343,7 @@ uint32_t Compiler::type_struct_member_array_stride(const SPIRType &type, uint32_
// Decoration must be set in valid SPIR-V, otherwise throw.
// ArrayStride is part of the array type not OpMemberDecorate.
auto &dec = meta[type.member_types[index]].decoration;
if (dec.decoration_flags & (1ull << DecorationArrayStride))
if (dec.decoration_flags.get(DecorationArrayStride))
return dec.array_stride;
else
SPIRV_CROSS_THROW("Struct member does not have ArrayStride set.");
@ -2340,7 +2354,7 @@ uint32_t Compiler::type_struct_member_matrix_stride(const SPIRType &type, uint32
// Decoration must be set in valid SPIR-V, otherwise throw.
// MatrixStride is part of OpMemberDecorate.
auto &dec = meta[type.self].members[index];
if (dec.decoration_flags & (1ull << DecorationMatrixStride))
if (dec.decoration_flags.get(DecorationMatrixStride))
return dec.matrix_stride;
else
SPIRV_CROSS_THROW("Struct member does not have MatrixStride set.");
@ -2356,7 +2370,7 @@ size_t Compiler::get_declared_struct_size(const SPIRType &type) const
size_t Compiler::get_declared_struct_member_size(const SPIRType &struct_type, uint32_t index) const
{
auto flags = get_member_decoration_mask(struct_type.self, index);
auto &flags = get_member_decoration_bitset(struct_type.self, index);
auto &type = get<SPIRType>(struct_type.member_types[index]);
switch (type.basetype)
@ -2401,9 +2415,9 @@ size_t Compiler::get_declared_struct_member_size(const SPIRType &struct_type, ui
uint32_t matrix_stride = type_struct_member_matrix_stride(struct_type, index);
// Per SPIR-V spec, matrices must be tightly packed and aligned up for vec3 accesses.
if (flags & (1ull << DecorationRowMajor))
if (flags.get(DecorationRowMajor))
return matrix_stride * vecsize;
else if (flags & (1ull << DecorationColMajor))
else if (flags.get(DecorationColMajor))
return matrix_stride * columns;
else
SPIRV_CROSS_THROW("Either row-major or column-major must be declared for matrices.");
@ -2511,6 +2525,11 @@ bool Compiler::types_are_logically_equivalent(const SPIRType &a, const SPIRType
}
uint64_t Compiler::get_execution_mode_mask() const
{
return get_entry_point().flags.get_lower();
}
const Bitset &Compiler::get_execution_mode_bitset() const
{
return get_entry_point().flags;
}
@ -2519,7 +2538,7 @@ void Compiler::set_execution_mode(ExecutionMode mode, uint32_t arg0, uint32_t ar
{
auto &execution = get_entry_point();
execution.flags |= 1ull << mode;
execution.flags.set(mode);
switch (mode)
{
case ExecutionModeLocalSize:
@ -2544,7 +2563,7 @@ void Compiler::set_execution_mode(ExecutionMode mode, uint32_t arg0, uint32_t ar
void Compiler::unset_execution_mode(ExecutionMode mode)
{
auto &execution = get_entry_point();
execution.flags &= ~(1ull << mode);
execution.flags.clear(mode);
}
uint32_t Compiler::get_work_group_size_specialization_constants(SpecializationConstant &x, SpecializationConstant &y,
@ -2969,8 +2988,10 @@ void Compiler::CombinedImageSamplerHandler::register_combined_image_sampler(SPIR
// Inherit RelaxedPrecision (and potentially other useful flags if deemed relevant).
auto &new_flags = compiler.meta[combined_id].decoration.decoration_flags;
auto old_flags = compiler.meta[sampler_id].decoration.decoration_flags;
new_flags = old_flags & (1ull << DecorationRelaxedPrecision);
auto &old_flags = compiler.meta[sampler_id].decoration.decoration_flags;
new_flags.reset();
if (old_flags.get(DecorationRelaxedPrecision))
new_flags.set(DecorationRelaxedPrecision);
param.id = combined_id;
@ -3207,8 +3228,10 @@ bool Compiler::CombinedImageSamplerHandler::handle(Op opcode, const uint32_t *ar
// Inherit RelaxedPrecision (and potentially other useful flags if deemed relevant).
auto &new_flags = compiler.meta[combined_id].decoration.decoration_flags;
// Fetch inherits precision from the image, not sampler (there is no sampler).
auto old_flags = compiler.meta[is_fetch ? image_id : sampler_id].decoration.decoration_flags;
new_flags = old_flags & (1ull << DecorationRelaxedPrecision);
auto &old_flags = compiler.meta[is_fetch ? image_id : sampler_id].decoration.decoration_flags;
new_flags.reset();
if (old_flags.get(DecorationRelaxedPrecision))
new_flags.set(DecorationRelaxedPrecision);
compiler.combined_image_samplers.push_back({ combined_id, image_id, sampler_id });
}
@ -3859,7 +3882,7 @@ void Compiler::analyze_variable_scope(SPIRFunction &entry)
}
}
uint64_t Compiler::get_buffer_block_flags(const SPIRVariable &var)
Bitset Compiler::get_buffer_block_flags(const SPIRVariable &var)
{
auto &type = get<SPIRType>(var.basetype);
assert(type.basetype == SPIRType::Struct);
@ -3867,16 +3890,17 @@ uint64_t Compiler::get_buffer_block_flags(const SPIRVariable &var)
// Some flags like non-writable, non-readable are actually found
// as member decorations. If all members have a decoration set, propagate
// the decoration up as a regular variable decoration.
uint64_t base_flags = meta[var.self].decoration.decoration_flags;
Bitset base_flags = meta[var.self].decoration.decoration_flags;
if (type.member_types.empty())
return base_flags;
uint64_t all_members_flag_mask = ~(0ull);
for (uint32_t i = 0; i < uint32_t(type.member_types.size()); i++)
all_members_flag_mask &= get_member_decoration_mask(type.self, i);
Bitset all_members_flags = get_member_decoration_bitset(type.self, 0);
for (uint32_t i = 1; i < uint32_t(type.member_types.size()); i++)
all_members_flags.merge_and(get_member_decoration_bitset(type.self, i));
return base_flags | all_members_flag_mask;
base_flags.merge_or(all_members_flags);
return base_flags;
}
bool Compiler::get_common_basic_type(const SPIRType &type, SPIRType::BaseType &base_type)
@ -3904,7 +3928,7 @@ bool Compiler::get_common_basic_type(const SPIRType &type, SPIRType::BaseType &b
}
}
void Compiler::ActiveBuiltinHandler::handle_builtin(const SPIRType &type, BuiltIn builtin, uint64_t decoration_flags)
void Compiler::ActiveBuiltinHandler::handle_builtin(const SPIRType &type, BuiltIn builtin, const Bitset &decoration_flags)
{
// If used, we will need to explicitly declare a new array size for these builtins.
@ -3928,7 +3952,7 @@ void Compiler::ActiveBuiltinHandler::handle_builtin(const SPIRType &type, BuiltI
}
else if (builtin == BuiltInPosition)
{
if (decoration_flags & (1ull << DecorationInvariant))
if (decoration_flags.get(DecorationInvariant))
compiler.position_invariant = true;
}
}
@ -3945,7 +3969,7 @@ bool Compiler::ActiveBuiltinHandler::handle(spv::Op opcode, const uint32_t *args
auto &type = compiler.get<SPIRType>(var->basetype);
auto &flags =
type.storage == StorageClassInput ? compiler.active_input_builtins : compiler.active_output_builtins;
flags |= 1ull << decorations.builtin_type;
flags.set(decorations.builtin_type);
handle_builtin(type, decorations.builtin_type, decorations.decoration_flags);
}
};
@ -4033,7 +4057,7 @@ bool Compiler::ActiveBuiltinHandler::handle(spv::Op opcode, const uint32_t *args
auto &decorations = compiler.meta[type->self].members[index];
if (decorations.builtin)
{
flags |= 1ull << decorations.builtin_type;
flags.set(decorations.builtin_type);
handle_builtin(compiler.get<SPIRType>(type->member_types[index]), decorations.builtin_type,
decorations.decoration_flags);
}
@ -4059,8 +4083,8 @@ bool Compiler::ActiveBuiltinHandler::handle(spv::Op opcode, const uint32_t *args
void Compiler::update_active_builtins()
{
active_input_builtins = 0;
active_output_builtins = 0;
active_input_builtins.reset();
active_output_builtins.reset();
cull_distance_count = 0;
clip_distance_count = 0;
ActiveBuiltinHandler handler(*this);
@ -4070,20 +4094,20 @@ void Compiler::update_active_builtins()
// Returns whether this shader uses a builtin of the storage class
bool Compiler::has_active_builtin(BuiltIn builtin, StorageClass storage)
{
uint64_t flags;
const Bitset *flags;
switch (storage)
{
case StorageClassInput:
flags = active_input_builtins;
flags = &active_input_builtins;
break;
case StorageClassOutput:
flags = active_output_builtins;
flags = &active_output_builtins;
break;
default:
return false;
}
return flags & (1ull << builtin);
return flags->get(builtin);
}
void Compiler::analyze_image_and_sampler_usage()

Просмотреть файл

@ -144,7 +144,9 @@ public:
// Gets a bitmask for the decorations which are applied to ID.
// I.e. (1ull << spv::DecorationFoo) | (1ull << spv::DecorationBar)
SPIRV_CROSS_DEPRECATED("Please use get_decoration_bitset instead.")
uint64_t get_decoration_mask(uint32_t id) const;
const Bitset &get_decoration_bitset(uint32_t id) const;
// Returns whether the decoration has been applied to the ID.
bool has_decoration(uint32_t id, spv::Decoration decoration) const;
@ -195,7 +197,9 @@ public:
void set_member_qualified_name(uint32_t type_id, uint32_t index, const std::string &name);
// Gets the decoration mask for a member of a struct, similar to get_decoration_mask.
SPIRV_CROSS_DEPRECATED("Please use get_member_decoration_bitset instead.")
uint64_t get_member_decoration_mask(uint32_t id, uint32_t index) const;
const Bitset &get_member_decoration_bitset(uint32_t id, uint32_t index) const;
// Returns whether the decoration has been applied to a member of a struct.
bool has_member_decoration(uint32_t id, uint32_t index, spv::Decoration decoration) const;
@ -311,7 +315,10 @@ public:
spv::ExecutionModel execution_model) const;
// Query and modify OpExecutionMode.
SPIRV_CROSS_DEPRECATED("Please use get_execution_mode_bitset instead.")
uint64_t get_execution_mode_mask() const;
const Bitset &get_execution_mode_bitset() const;
void unset_execution_mode(spv::ExecutionMode mode);
void set_execution_mode(spv::ExecutionMode mode, uint32_t arg0 = 0, uint32_t arg1 = 0, uint32_t arg2 = 0);
@ -747,7 +754,7 @@ protected:
bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override;
Compiler &compiler;
void handle_builtin(const SPIRType &type, spv::BuiltIn builtin, uint64_t decoration_flags);
void handle_builtin(const SPIRType &type, spv::BuiltIn builtin, const Bitset &decoration_flags);
};
bool traverse_all_reachable_opcodes(const SPIRBlock &block, OpcodeHandler &handler) const;
@ -759,15 +766,15 @@ protected:
VariableTypeRemapCallback variable_remap_callback;
uint64_t get_buffer_block_flags(const SPIRVariable &var);
Bitset get_buffer_block_flags(const SPIRVariable &var);
bool get_common_basic_type(const SPIRType &type, SPIRType::BaseType &base_type);
std::unordered_set<uint32_t> forced_temporaries;
std::unordered_set<uint32_t> forwarded_temporaries;
std::unordered_set<uint32_t> hoisted_temporaries;
uint64_t active_input_builtins = 0;
uint64_t active_output_builtins = 0;
Bitset active_input_builtins;
Bitset active_output_builtins;
uint32_t clip_distance_count = 0;
uint32_t cull_distance_count = 0;
bool position_invariant = false;

Просмотреть файл

@ -341,7 +341,7 @@ void CompilerGLSL::find_static_extensions()
if (!options.es && options.version < 150)
require_extension("GL_ARB_geometry_shader4");
if ((execution.flags & (1ull << ExecutionModeInvocations)) && execution.invocations != 1)
if (execution.flags.get(ExecutionModeInvocations) && execution.invocations != 1)
{
// Instanced GS is part of 400 core or this extension.
if (!options.es && options.version < 400)
@ -398,7 +398,7 @@ string CompilerGLSL::compile()
emit_header();
emit_resources();
emit_function(get<SPIRFunction>(entry_point), 0);
emit_function(get<SPIRFunction>(entry_point), Bitset());
pass_count++;
} while (force_recompile);
@ -429,7 +429,7 @@ void CompilerGLSL::emit_header()
statement("#endif");
}
// Needed for: layout(early_fragment_tests) in;
if (execution.flags & (1ull << ExecutionModeEarlyFragmentTests))
if (execution.flags.get(ExecutionModeEarlyFragmentTests))
require_extension("GL_ARB_shader_image_load_store");
}
@ -446,54 +446,54 @@ void CompilerGLSL::emit_header()
{
case ExecutionModelGeometry:
outputs.push_back(join("max_vertices = ", execution.output_vertices));
if ((execution.flags & (1ull << ExecutionModeInvocations)) && execution.invocations != 1)
if ((execution.flags.get(ExecutionModeInvocations)) && execution.invocations != 1)
inputs.push_back(join("invocations = ", execution.invocations));
if (execution.flags & (1ull << ExecutionModeInputPoints))
if (execution.flags.get(ExecutionModeInputPoints))
inputs.push_back("points");
if (execution.flags & (1ull << ExecutionModeInputLines))
if (execution.flags.get(ExecutionModeInputLines))
inputs.push_back("lines");
if (execution.flags & (1ull << ExecutionModeInputLinesAdjacency))
if (execution.flags.get(ExecutionModeInputLinesAdjacency))
inputs.push_back("lines_adjacency");
if (execution.flags & (1ull << ExecutionModeTriangles))
if (execution.flags.get(ExecutionModeTriangles))
inputs.push_back("triangles");
if (execution.flags & (1ull << ExecutionModeInputTrianglesAdjacency))
if (execution.flags.get(ExecutionModeInputTrianglesAdjacency))
inputs.push_back("triangles_adjacency");
if (execution.flags & (1ull << ExecutionModeOutputTriangleStrip))
if (execution.flags.get(ExecutionModeOutputTriangleStrip))
outputs.push_back("triangle_strip");
if (execution.flags & (1ull << ExecutionModeOutputPoints))
if (execution.flags.get(ExecutionModeOutputPoints))
outputs.push_back("points");
if (execution.flags & (1ull << ExecutionModeOutputLineStrip))
if (execution.flags.get(ExecutionModeOutputLineStrip))
outputs.push_back("line_strip");
break;
case ExecutionModelTessellationControl:
if (execution.flags & (1ull << ExecutionModeOutputVertices))
if (execution.flags.get(ExecutionModeOutputVertices))
outputs.push_back(join("vertices = ", execution.output_vertices));
break;
case ExecutionModelTessellationEvaluation:
if (execution.flags & (1ull << ExecutionModeQuads))
if (execution.flags.get(ExecutionModeQuads))
inputs.push_back("quads");
if (execution.flags & (1ull << ExecutionModeTriangles))
if (execution.flags.get(ExecutionModeTriangles))
inputs.push_back("triangles");
if (execution.flags & (1ull << ExecutionModeIsolines))
if (execution.flags.get(ExecutionModeIsolines))
inputs.push_back("isolines");
if (execution.flags & (1ull << ExecutionModePointMode))
if (execution.flags.get(ExecutionModePointMode))
inputs.push_back("point_mode");
if ((execution.flags & (1ull << ExecutionModeIsolines)) == 0)
if (!execution.flags.get(ExecutionModeIsolines))
{
if (execution.flags & (1ull << ExecutionModeVertexOrderCw))
if (execution.flags.get(ExecutionModeVertexOrderCw))
inputs.push_back("cw");
if (execution.flags & (1ull << ExecutionModeVertexOrderCcw))
if (execution.flags.get(ExecutionModeVertexOrderCcw))
inputs.push_back("ccw");
}
if (execution.flags & (1ull << ExecutionModeSpacingFractionalEven))
if (execution.flags.get(ExecutionModeSpacingFractionalEven))
inputs.push_back("fractional_even_spacing");
if (execution.flags & (1ull << ExecutionModeSpacingFractionalOdd))
if (execution.flags.get(ExecutionModeSpacingFractionalOdd))
inputs.push_back("fractional_odd_spacing");
if (execution.flags & (1ull << ExecutionModeSpacingEqual))
if (execution.flags.get(ExecutionModeSpacingEqual))
inputs.push_back("equal_spacing");
break;
@ -583,11 +583,11 @@ void CompilerGLSL::emit_header()
}
}
if (execution.flags & (1ull << ExecutionModeEarlyFragmentTests))
if (execution.flags.get(ExecutionModeEarlyFragmentTests))
inputs.push_back("early_fragment_tests");
if (execution.flags & (1ull << ExecutionModeDepthGreater))
if (execution.flags.get(ExecutionModeDepthGreater))
inputs.push_back("depth_greater");
if (execution.flags & (1ull << ExecutionModeDepthLess))
if (execution.flags.get(ExecutionModeDepthLess))
inputs.push_back("depth_less");
break;
@ -648,39 +648,41 @@ void CompilerGLSL::emit_struct(SPIRType &type)
statement("");
}
uint64_t CompilerGLSL::combined_decoration_for_member(const SPIRType &type, uint32_t index)
Bitset CompilerGLSL::combined_decoration_for_member(const SPIRType &type, uint32_t index)
{
uint64_t flags = 0;
Bitset flags;
auto &memb = meta[type.self].members;
if (index >= memb.size())
return 0;
return flags;
auto &dec = memb[index];
// If our type is a struct, traverse all the members as well recursively.
flags |= dec.decoration_flags;
flags.merge_or(dec.decoration_flags);
for (uint32_t i = 0; i < type.member_types.size(); i++)
flags |= combined_decoration_for_member(get<SPIRType>(type.member_types[i]), i);
flags.merge_or(combined_decoration_for_member(get<SPIRType>(type.member_types[i]), i));
return flags;
}
string CompilerGLSL::to_interpolation_qualifiers(uint64_t flags)
string CompilerGLSL::to_interpolation_qualifiers(const Bitset &flags)
{
string res;
//if (flags & (1ull << DecorationSmooth))
// res += "smooth ";
if (flags & (1ull << DecorationFlat))
if (flags.get(DecorationFlat))
res += "flat ";
if (flags & (1ull << DecorationNoPerspective))
if (flags.get(DecorationNoPerspective))
res += "noperspective ";
if (flags & (1ull << DecorationCentroid))
if (flags.get(DecorationCentroid))
res += "centroid ";
if (flags & (1ull << DecorationPatch))
if (flags.get(DecorationPatch))
res += "patch ";
if (flags & (1ull << DecorationSample))
if (flags.get(DecorationSample))
res += "sample ";
if (flags & (1ull << DecorationInvariant))
if (flags.get(DecorationInvariant))
res += "invariant ";
if (flags.get(DecorationExplicitInterpAMD))
res += "__explicitInterpAMD";
return res;
}
@ -690,8 +692,8 @@ string CompilerGLSL::layout_for_member(const SPIRType &type, uint32_t index)
if (is_legacy())
return "";
bool is_block = (meta[type.self].decoration.decoration_flags &
((1ull << DecorationBlock) | (1ull << DecorationBufferBlock))) != 0;
bool is_block = meta[type.self].decoration.decoration_flags.get(DecorationBlock) ||
meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock);
if (!is_block)
return "";
@ -718,18 +720,18 @@ string CompilerGLSL::layout_for_member(const SPIRType &type, uint32_t index)
// buffer UBO { layout(row_major) Foo foo; }; // Apply the layout on top-level.
auto flags = combined_decoration_for_member(type, index);
if (flags & (1ull << DecorationRowMajor))
if (flags.get(DecorationRowMajor))
attr.push_back("row_major");
// We don't emit any global layouts, so column_major is default.
//if (flags & (1ull << DecorationColMajor))
// attr.push_back("column_major");
if ((dec.decoration_flags & (1ull << DecorationLocation)) != 0 && can_use_io_location(type.storage))
if (dec.decoration_flags.get(DecorationLocation) && can_use_io_location(type.storage))
attr.push_back(join("location = ", dec.location));
// DecorationCPacked is set by layout_for_variable earlier to mark that we need to emit offset qualifiers.
// This is only done selectively in GLSL as needed.
if (has_decoration(type.self, DecorationCPacked) && (dec.decoration_flags & (1ull << DecorationOffset)) != 0)
if (has_decoration(type.self, DecorationCPacked) && dec.decoration_flags.get(DecorationOffset))
attr.push_back(join("offset = ", dec.offset));
if (attr.empty())
@ -881,7 +883,7 @@ uint32_t CompilerGLSL::type_to_packed_base_size(const SPIRType &type, BufferPack
}
}
uint32_t CompilerGLSL::type_to_packed_alignment(const SPIRType &type, uint64_t flags, BufferPackingStandard packing)
uint32_t CompilerGLSL::type_to_packed_alignment(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing)
{
if (!type.array.empty())
{
@ -940,7 +942,7 @@ uint32_t CompilerGLSL::type_to_packed_alignment(const SPIRType &type, uint64_t f
// Rule 5. Column-major matrices are stored as arrays of
// vectors.
if ((flags & (1ull << DecorationColMajor)) && type.columns > 1)
if (flags.get(DecorationColMajor) && type.columns > 1)
{
if (packing_is_vec4_padded(packing))
return 4 * base_alignment;
@ -953,7 +955,7 @@ uint32_t CompilerGLSL::type_to_packed_alignment(const SPIRType &type, uint64_t f
// Rule 6 implied.
// Rule 7.
if ((flags & (1ull << DecorationRowMajor)) && type.vecsize > 1)
if (flags.get(DecorationRowMajor) && type.vecsize > 1)
{
if (packing_is_vec4_padded(packing))
return 4 * base_alignment;
@ -969,7 +971,7 @@ uint32_t CompilerGLSL::type_to_packed_alignment(const SPIRType &type, uint64_t f
SPIRV_CROSS_THROW("Did not find suitable rule for type. Bogus decorations?");
}
uint32_t CompilerGLSL::type_to_packed_array_stride(const SPIRType &type, uint64_t flags, BufferPackingStandard packing)
uint32_t CompilerGLSL::type_to_packed_array_stride(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing)
{
// Array stride is equal to aligned size of the underlying type.
uint32_t parent = type.parent_type;
@ -991,7 +993,7 @@ uint32_t CompilerGLSL::type_to_packed_array_stride(const SPIRType &type, uint64_
}
}
uint32_t CompilerGLSL::type_to_packed_size(const SPIRType &type, uint64_t flags, BufferPackingStandard packing)
uint32_t CompilerGLSL::type_to_packed_size(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing)
{
if (!type.array.empty())
{
@ -1031,7 +1033,7 @@ uint32_t CompilerGLSL::type_to_packed_size(const SPIRType &type, uint64_t flags,
if (type.columns == 1)
size = type.vecsize * base_alignment;
if ((flags & (1ull << DecorationColMajor)) && type.columns > 1)
if (flags.get(DecorationColMajor) && type.columns > 1)
{
if (packing_is_vec4_padded(packing))
size = type.columns * 4 * base_alignment;
@ -1041,7 +1043,7 @@ uint32_t CompilerGLSL::type_to_packed_size(const SPIRType &type, uint64_t flags,
size = type.columns * type.vecsize * base_alignment;
}
if ((flags & (1ull << DecorationRowMajor)) && type.vecsize > 1)
if (flags.get(DecorationRowMajor) && type.vecsize > 1)
{
if (packing_is_vec4_padded(packing))
size = type.vecsize * 4 * base_alignment;
@ -1182,26 +1184,26 @@ string CompilerGLSL::layout_for_variable(const SPIRVariable &var)
if (options.vulkan_semantics && var.storage == StorageClassPushConstant)
attr.push_back("push_constant");
if (flags & (1ull << DecorationRowMajor))
if (flags.get(DecorationRowMajor))
attr.push_back("row_major");
if (flags & (1ull << DecorationColMajor))
if (flags.get(DecorationColMajor))
attr.push_back("column_major");
if (options.vulkan_semantics)
{
if (flags & (1ull << DecorationInputAttachmentIndex))
if (flags.get(DecorationInputAttachmentIndex))
attr.push_back(join("input_attachment_index = ", dec.input_attachment));
}
if ((flags & (1ull << DecorationLocation)) != 0 && can_use_io_location(var.storage))
if (flags.get(DecorationLocation) && can_use_io_location(var.storage))
{
uint64_t combined_decoration = 0;
Bitset combined_decoration;
for (uint32_t i = 0; i < meta[type.self].members.size(); i++)
combined_decoration |= combined_decoration_for_member(type, i);
combined_decoration.merge_or(combined_decoration_for_member(type, i));
// If our members have location decorations, we don't need to
// emit location decorations at the top as well (looks weird).
if ((combined_decoration & (1ull << DecorationLocation)) == 0)
if (!combined_decoration.get(DecorationLocation))
attr.push_back(join("location = ", dec.location));
}
@ -1209,7 +1211,7 @@ string CompilerGLSL::layout_for_variable(const SPIRVariable &var)
// we should preserve it in Vulkan GLSL mode.
if (var.storage != StorageClassPushConstant)
{
if ((flags & (1ull << DecorationDescriptorSet)) && (dec.set != 0 || options.vulkan_semantics))
if (flags.get(DecorationDescriptorSet) && (dec.set != 0 || options.vulkan_semantics))
attr.push_back(join("set = ", dec.set));
}
@ -1219,19 +1221,19 @@ string CompilerGLSL::layout_for_variable(const SPIRVariable &var)
else
can_use_binding = options.enable_420pack_extension || (options.version >= 420);
if (can_use_binding && (flags & (1ull << DecorationBinding)))
if (can_use_binding && flags.get(DecorationBinding))
attr.push_back(join("binding = ", dec.binding));
if (flags & (1ull << DecorationOffset))
if (flags.get(DecorationOffset))
attr.push_back(join("offset = ", dec.offset));
bool push_constant_block = options.vulkan_semantics && var.storage == StorageClassPushConstant;
bool ssbo_block = var.storage == StorageClassStorageBuffer ||
(var.storage == StorageClassUniform && (typeflags & (1ull << DecorationBufferBlock)));
(var.storage == StorageClassUniform && typeflags.get(DecorationBufferBlock));
// Instead of adding explicit offsets for every element here, just assume we're using std140 or std430.
// If SPIR-V does not comply with either layout, we cannot really work around it.
if (var.storage == StorageClassUniform && (typeflags & (1ull << DecorationBlock)))
if (var.storage == StorageClassUniform && typeflags.get(DecorationBlock))
{
if (buffer_is_packing_standard(type, BufferPackingStd140))
attr.push_back("std140");
@ -1339,7 +1341,8 @@ void CompilerGLSL::emit_push_constant_block_glsl(const SPIRVariable &var)
auto &type = get<SPIRType>(var.basetype);
auto &flags = meta[var.self].decoration.decoration_flags;
flags &= ~((1ull << DecorationBinding) | (1ull << DecorationDescriptorSet));
flags.clear(DecorationBinding);
flags.clear(DecorationDescriptorSet);
#if 0
if (flags & ((1ull << DecorationBinding) | (1ull << DecorationDescriptorSet)))
@ -1350,12 +1353,13 @@ void CompilerGLSL::emit_push_constant_block_glsl(const SPIRVariable &var)
// We're emitting the push constant block as a regular struct, so disable the block qualifier temporarily.
// Otherwise, we will end up emitting layout() qualifiers on naked structs which is not allowed.
auto &block_flags = meta[type.self].decoration.decoration_flags;
uint64_t block_flag = block_flags & (1ull << DecorationBlock);
block_flags &= ~block_flag;
bool block_flag = block_flags.get(DecorationBlock);
block_flags.clear(DecorationBlock);
emit_struct(type);
block_flags |= block_flag;
if (block_flag)
block_flags.set(DecorationBlock);
emit_uniform(var);
statement("");
@ -1375,17 +1379,18 @@ void CompilerGLSL::emit_buffer_block_legacy(const SPIRVariable &var)
{
auto &type = get<SPIRType>(var.basetype);
bool ssbo = var.storage == StorageClassStorageBuffer ||
((meta[type.self].decoration.decoration_flags & (1ull << DecorationBufferBlock)) != 0);
meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock);
if (ssbo)
SPIRV_CROSS_THROW("SSBOs not supported in legacy targets.");
// We're emitting the push constant block as a regular struct, so disable the block qualifier temporarily.
// Otherwise, we will end up emitting layout() qualifiers on naked structs which is not allowed.
auto &block_flags = meta[type.self].decoration.decoration_flags;
uint64_t block_flag = block_flags & (1ull << DecorationBlock);
block_flags &= ~block_flag;
bool block_flag = block_flags.get(DecorationBlock);
block_flags.clear(DecorationBlock);
emit_struct(type);
block_flags |= block_flag;
if (block_flag)
block_flags.set(DecorationBlock);
emit_uniform(var);
statement("");
}
@ -1394,13 +1399,13 @@ void CompilerGLSL::emit_buffer_block_native(const SPIRVariable &var)
{
auto &type = get<SPIRType>(var.basetype);
uint64_t flags = get_buffer_block_flags(var);
Bitset flags = get_buffer_block_flags(var);
bool ssbo = var.storage == StorageClassStorageBuffer ||
((meta[type.self].decoration.decoration_flags & (1ull << DecorationBufferBlock)) != 0);
bool is_restrict = ssbo && (flags & (1ull << DecorationRestrict)) != 0;
bool is_writeonly = ssbo && (flags & (1ull << DecorationNonReadable)) != 0;
bool is_readonly = ssbo && (flags & (1ull << DecorationNonWritable)) != 0;
bool is_coherent = ssbo && (flags & (1ull << DecorationCoherent)) != 0;
meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock);
bool is_restrict = ssbo && flags.get(DecorationRestrict);
bool is_writeonly = ssbo && flags.get(DecorationNonReadable);
bool is_readonly = ssbo && flags.get(DecorationNonWritable);
bool is_coherent = ssbo && flags.get(DecorationCoherent);
// Block names should never alias, but from HLSL input they kind of can because block types are reused for UAVs ...
auto buffer_name = to_name(type.self, false);
@ -1497,7 +1502,7 @@ void CompilerGLSL::emit_flattened_io_block(const SPIRVariable &var, const char *
auto old_flags = meta[type.self].decoration.decoration_flags;
// Emit the members as if they are part of a block to get all qualifiers.
meta[type.self].decoration.decoration_flags |= 1ull << DecorationBlock;
meta[type.self].decoration.decoration_flags.set(DecorationBlock);
type.member_name_cache.clear();
@ -1534,7 +1539,7 @@ void CompilerGLSL::emit_interface_block(const SPIRVariable &var)
auto &type = get<SPIRType>(var.basetype);
// Either make it plain in/out or in/out blocks depending on what shader is doing ...
bool block = (meta[type.self].decoration.decoration_flags & (1ull << DecorationBlock)) != 0;
bool block = meta[type.self].decoration.decoration_flags.get(DecorationBlock);
const char *qual = to_storage_qualifiers_glsl(var);
if (block)
@ -1716,7 +1721,7 @@ void CompilerGLSL::replace_fragment_output(SPIRVariable &var)
{
auto &m = meta[var.self].decoration;
uint32_t location = 0;
if (m.decoration_flags & (1ull << DecorationLocation))
if (m.decoration_flags.get(DecorationLocation))
location = m.location;
// If our variable is arrayed, we must not emit the array part of this as the SPIR-V will
@ -1835,10 +1840,11 @@ void CompilerGLSL::fixup_image_load_store_access()
// If any no-read/no-write flags are actually set, assume that the compiler knows what it's doing.
auto &flags = meta.at(var).decoration.decoration_flags;
static const uint64_t NoWrite = 1ull << DecorationNonWritable;
static const uint64_t NoRead = 1ull << DecorationNonReadable;
if ((flags & (NoWrite | NoRead)) == 0)
flags |= NoRead | NoWrite;
if (!flags.get(DecorationNonWritable) && !flags.get(DecorationNonReadable))
{
flags.set(DecorationNonWritable);
flags.set(DecorationNonReadable);
}
}
}
}
@ -2060,8 +2066,8 @@ void CompilerGLSL::emit_resources()
{
auto &type = id.get<SPIRType>();
if (type.basetype == SPIRType::Struct && type.array.empty() && !type.pointer &&
(meta[type.self].decoration.decoration_flags &
((1ull << DecorationBlock) | (1ull << DecorationBufferBlock))) == 0)
(!meta[type.self].decoration.decoration_flags.get(DecorationBlock) &&
!meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock)))
{
emit_struct(type);
}
@ -2077,8 +2083,8 @@ void CompilerGLSL::emit_resources()
auto &type = get<SPIRType>(var.basetype);
bool is_block_storage = type.storage == StorageClassStorageBuffer || type.storage == StorageClassUniform;
bool has_block_flags = (meta[type.self].decoration.decoration_flags &
((1ull << DecorationBlock) | (1ull << DecorationBufferBlock))) != 0;
bool has_block_flags = meta[type.self].decoration.decoration_flags.get(DecorationBlock) ||
meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock);
if (var.storage != StorageClassFunction && type.pointer && is_block_storage && !is_hidden_variable(var) &&
has_block_flags)
@ -4963,7 +4969,7 @@ std::string CompilerGLSL::flattened_access_chain_struct(uint32_t base, const uin
uint32_t matrix_stride = 0;
if (member_type.columns > 1)
{
need_transpose = (combined_decoration_for_member(target_type, i) & (1ull << DecorationRowMajor)) != 0;
need_transpose = combined_decoration_for_member(target_type, i).get(DecorationRowMajor);
matrix_stride = type_struct_member_matrix_stride(target_type, i);
}
@ -5162,7 +5168,7 @@ std::pair<std::string, uint32_t> CompilerGLSL::flattened_access_chain_offset(con
{
matrix_stride = type_struct_member_matrix_stride(struct_type, index);
row_major_matrix_needs_conversion =
(combined_decoration_for_member(struct_type, index) & (1ull << DecorationRowMajor)) != 0;
combined_decoration_for_member(struct_type, index).get(DecorationRowMajor);
}
else
row_major_matrix_needs_conversion = false;
@ -6869,9 +6875,9 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
if (var)
{
auto &flags = meta.at(var->self).decoration.decoration_flags;
if (flags & (1ull << DecorationNonReadable))
if (flags.get(DecorationNonReadable))
{
flags &= ~(1ull << DecorationNonReadable);
flags.clear(DecorationNonReadable);
force_recompile = true;
}
}
@ -7017,9 +7023,9 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
if (var)
{
auto &flags = meta.at(var->self).decoration.decoration_flags;
if (flags & (1ull << DecorationNonWritable))
if (flags.get(DecorationNonWritable))
{
flags &= ~(1ull << DecorationNonWritable);
flags.clear(DecorationNonWritable);
force_recompile = true;
}
}
@ -7433,7 +7439,7 @@ bool CompilerGLSL::is_non_native_row_major_matrix(uint32_t id)
return false;
// Non-matrix or column-major matrix types do not need to be converted.
if (!(meta[id].decoration.decoration_flags & (1ull << DecorationRowMajor)))
if (!meta[id].decoration.decoration_flags.get(DecorationRowMajor))
return false;
// Only square row-major matrices can be converted at this time.
@ -7454,7 +7460,7 @@ bool CompilerGLSL::member_is_non_native_row_major_matrix(const SPIRType &type, u
return false;
// Non-matrix or column-major matrix types do not need to be converted.
if (!(combined_decoration_for_member(type, index) & (1ull << DecorationRowMajor)))
if (!combined_decoration_for_member(type, index).get(DecorationRowMajor))
return false;
// Only square row-major matrices can be converted at this time.
@ -7498,14 +7504,15 @@ void CompilerGLSL::emit_struct_member(const SPIRType &type, uint32_t member_type
{
auto &membertype = get<SPIRType>(member_type_id);
uint64_t memberflags = 0;
Bitset memberflags;
auto &memb = meta[type.self].members;
if (index < memb.size())
memberflags = memb[index].decoration_flags;
string qualifiers;
bool is_block = (meta[type.self].decoration.decoration_flags &
((1ull << DecorationBlock) | (1ull << DecorationBufferBlock))) != 0;
bool is_block = meta[type.self].decoration.decoration_flags.get(DecorationBlock) ||
meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock);
if (is_block)
qualifiers = to_interpolation_qualifiers(memberflags);
@ -7514,7 +7521,7 @@ void CompilerGLSL::emit_struct_member(const SPIRType &type, uint32_t member_type
variable_decl(membertype, to_member_name(type, index)), ";");
}
const char *CompilerGLSL::flags_to_precision_qualifiers_glsl(const SPIRType &type, uint64_t flags)
const char *CompilerGLSL::flags_to_precision_qualifiers_glsl(const SPIRType &type, const Bitset &flags)
{
// Structs do not have precision qualifiers, neither do doubles (desktop only anyways, so no mediump/highp).
if (type.basetype != SPIRType::Float && type.basetype != SPIRType::Int && type.basetype != SPIRType::UInt &&
@ -7526,7 +7533,7 @@ const char *CompilerGLSL::flags_to_precision_qualifiers_glsl(const SPIRType &typ
{
auto &execution = get_entry_point();
if (flags & (1ull << DecorationRelaxedPrecision))
if (flags.get(DecorationRelaxedPrecision))
{
bool implied_fmediump = type.basetype == SPIRType::Float &&
options.fragment.default_float_precision == Options::Mediump &&
@ -7557,7 +7564,7 @@ const char *CompilerGLSL::flags_to_precision_qualifiers_glsl(const SPIRType &typ
{
// Vulkan GLSL supports precision qualifiers, even in desktop profiles, which is convenient.
// The default is highp however, so only emit mediump in the rare case that a shader has these.
if (flags & (1ull << DecorationRelaxedPrecision))
if (flags.get(DecorationRelaxedPrecision))
{
bool can_use_mediump =
type.basetype == SPIRType::Float || type.basetype == SPIRType::Int || type.basetype == SPIRType::UInt;
@ -7592,13 +7599,13 @@ string CompilerGLSL::to_qualifiers_glsl(uint32_t id)
auto &type = expression_type(id);
if (type.image.dim != DimSubpassData && type.image.sampled == 2)
{
if (flags & (1ull << DecorationCoherent))
if (flags.get(DecorationCoherent))
res += "coherent ";
if (flags & (1ull << DecorationRestrict))
if (flags.get(DecorationRestrict))
res += "restrict ";
if (flags & (1ull << DecorationNonWritable))
if (flags.get(DecorationNonWritable))
res += "readonly ";
if (flags & (1ull << DecorationNonReadable))
if (flags.get(DecorationNonReadable))
res += "writeonly ";
}
@ -7649,7 +7656,7 @@ string CompilerGLSL::variable_decl(const SPIRVariable &variable)
const char *CompilerGLSL::to_pls_qualifiers_glsl(const SPIRVariable &variable)
{
auto flags = meta[variable.self].decoration.decoration_flags;
if (flags & (1ull << DecorationRelaxedPrecision))
if (flags.get(DecorationRelaxedPrecision))
return "mediump ";
else
return "highp ";
@ -8039,7 +8046,7 @@ void CompilerGLSL::flatten_buffer_block(uint32_t id)
SPIRV_CROSS_THROW(name + " is an array of UBOs.");
if (type.basetype != SPIRType::Struct)
SPIRV_CROSS_THROW(name + " is not a struct.");
if ((flags & (1ull << DecorationBlock)) == 0)
if (!flags.get(DecorationBlock))
SPIRV_CROSS_THROW(name + " is not a block.");
if (type.member_types.empty())
SPIRV_CROSS_THROW(name + " is an empty struct.");
@ -8059,10 +8066,10 @@ bool CompilerGLSL::check_atomic_image(uint32_t id)
if (var)
{
auto &flags = meta.at(var->self).decoration.decoration_flags;
if (flags & ((1ull << DecorationNonWritable) | (1ull << DecorationNonReadable)))
if (flags.get(DecorationNonWritable) || flags.get(DecorationNonReadable))
{
flags &= ~(1ull << DecorationNonWritable);
flags &= ~(1ull << DecorationNonReadable);
flags.clear(DecorationNonWritable);
flags.clear(DecorationNonReadable);
force_recompile = true;
}
}
@ -8117,7 +8124,7 @@ void CompilerGLSL::add_function_overload(const SPIRFunction &func)
}
}
void CompilerGLSL::emit_function_prototype(SPIRFunction &func, uint64_t return_flags)
void CompilerGLSL::emit_function_prototype(SPIRFunction &func, const Bitset &return_flags)
{
if (func.self != entry_point)
add_function_overload(func);
@ -8185,7 +8192,7 @@ void CompilerGLSL::emit_function_prototype(SPIRFunction &func, uint64_t return_f
statement(decl);
}
void CompilerGLSL::emit_function(SPIRFunction &func, uint64_t return_flags)
void CompilerGLSL::emit_function(SPIRFunction &func, const Bitset &return_flags)
{
// Avoid potential cycles.
if (func.active)
@ -8228,12 +8235,12 @@ void CompilerGLSL::emit_function(SPIRFunction &func, uint64_t return_flags)
if (b.loop_variables.size() < 2)
continue;
uint64_t flags = get_decoration_mask(b.loop_variables.front());
auto &flags = get_decoration_bitset(b.loop_variables.front());
uint32_t type = get<SPIRVariable>(b.loop_variables.front()).basetype;
bool invalid_initializers = false;
for (auto loop_variable : b.loop_variables)
{
if (flags != get_decoration_mask(loop_variable) ||
if (flags != get_decoration_bitset(loop_variable) ||
type != get<SPIRVariable>(b.loop_variables.front()).basetype)
{
invalid_initializers = true;
@ -8646,7 +8653,7 @@ bool CompilerGLSL::for_loop_initializers_are_same_type(const SPIRBlock &block)
return true;
uint32_t expected = 0;
uint64_t expected_flags = 0;
Bitset expected_flags;
for (auto &var : block.loop_variables)
{
// Don't care about uninitialized variables as they will not be part of the initializers.
@ -8657,13 +8664,13 @@ bool CompilerGLSL::for_loop_initializers_are_same_type(const SPIRBlock &block)
if (expected == 0)
{
expected = get<SPIRVariable>(var).basetype;
expected_flags = get_decoration_mask(var);
expected_flags = get_decoration_bitset(var);
}
else if (expected != get<SPIRVariable>(var).basetype)
return false;
// Precision flags and things like that must also match.
if (expected_flags != get_decoration_mask(var))
if (expected_flags != get_decoration_bitset(var))
return false;
}

Просмотреть файл

@ -192,12 +192,12 @@ public:
protected:
void reset();
void emit_function(SPIRFunction &func, uint64_t return_flags);
void emit_function(SPIRFunction &func, const Bitset &return_flags);
bool has_extension(const std::string &ext) const;
// Virtualize methods which need to be overridden by subclass targets like C++ and such.
virtual void emit_function_prototype(SPIRFunction &func, uint64_t return_flags);
virtual void emit_function_prototype(SPIRFunction &func, const Bitset &return_flags);
// Kinda ugly way to let opcodes peek at their neighbor instructions for trivial peephole scenarios.
const SPIRBlock *current_emitting_block = nullptr;
@ -440,11 +440,11 @@ protected:
virtual std::string to_qualifiers_glsl(uint32_t id);
const char *to_precision_qualifiers_glsl(uint32_t id);
virtual const char *to_storage_qualifiers_glsl(const SPIRVariable &var);
const char *flags_to_precision_qualifiers_glsl(const SPIRType &type, uint64_t flags);
const char *flags_to_precision_qualifiers_glsl(const SPIRType &type, const Bitset &flags);
const char *format_to_glsl(spv::ImageFormat format);
virtual std::string layout_for_member(const SPIRType &type, uint32_t index);
virtual std::string to_interpolation_qualifiers(uint64_t flags);
uint64_t combined_decoration_for_member(const SPIRType &type, uint32_t index);
virtual std::string to_interpolation_qualifiers(const Bitset &flags);
Bitset combined_decoration_for_member(const SPIRType &type, uint32_t index);
std::string layout_for_variable(const SPIRVariable &variable);
std::string to_combined_image_sampler(uint32_t image_id, uint32_t samp_id);
virtual bool skip_argument(uint32_t id) const;
@ -453,9 +453,9 @@ protected:
bool buffer_is_packing_standard(const SPIRType &type, BufferPackingStandard packing, uint32_t start_offset = 0,
uint32_t end_offset = std::numeric_limits<uint32_t>::max());
uint32_t type_to_packed_base_size(const SPIRType &type, BufferPackingStandard packing);
uint32_t type_to_packed_alignment(const SPIRType &type, uint64_t flags, BufferPackingStandard packing);
uint32_t type_to_packed_array_stride(const SPIRType &type, uint64_t flags, BufferPackingStandard packing);
uint32_t type_to_packed_size(const SPIRType &type, uint64_t flags, BufferPackingStandard packing);
uint32_t type_to_packed_alignment(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing);
uint32_t type_to_packed_array_stride(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing);
uint32_t type_to_packed_size(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing);
std::string bitcast_glsl(const SPIRType &result_type, uint32_t arg);
virtual std::string bitcast_glsl_op(const SPIRType &result_type, const SPIRType &argument_type);

Просмотреть файл

@ -474,7 +474,7 @@ void CompilerHLSL::emit_interface_block_globally(const SPIRVariable &var)
// These are emitted inside the interface structs.
auto &flags = meta[var.self].decoration.decoration_flags;
auto old_flags = flags;
flags = 0;
flags.reset();
statement("static ", variable_decl(var), ";");
flags = old_flags;
}
@ -495,11 +495,7 @@ const char *CompilerHLSL::to_storage_qualifiers_glsl(const SPIRVariable &var)
void CompilerHLSL::emit_builtin_outputs_in_struct()
{
bool legacy = hlsl_options.shader_model <= 30;
for (uint32_t i = 0; i < 64; i++)
{
if (!(active_output_builtins & (1ull << i)))
continue;
active_output_builtins.for_each_bit([&](uint32_t i) {
const char *type = nullptr;
const char *semantic = nullptr;
auto builtin = static_cast<BuiltIn>(i);
@ -563,17 +559,13 @@ void CompilerHLSL::emit_builtin_outputs_in_struct()
if (type && semantic)
statement(type, " ", builtin_to_glsl(builtin, StorageClassOutput), " : ", semantic, ";");
}
});
}
void CompilerHLSL::emit_builtin_inputs_in_struct()
{
bool legacy = hlsl_options.shader_model <= 30;
for (uint32_t i = 0; i < 64; i++)
{
if (!(active_input_builtins & (1ull << i)))
continue;
active_input_builtins.for_each_bit([&](uint32_t i) {
const char *type = nullptr;
const char *semantic = nullptr;
auto builtin = static_cast<BuiltIn>(i);
@ -682,7 +674,7 @@ void CompilerHLSL::emit_builtin_inputs_in_struct()
if (type && semantic)
statement(type, " ", builtin_to_glsl(builtin, StorageClassInput), " : ", semantic, ";");
}
});
}
uint32_t CompilerHLSL::type_to_consumed_locations(const SPIRType &type) const
@ -710,22 +702,22 @@ uint32_t CompilerHLSL::type_to_consumed_locations(const SPIRType &type) const
return elements;
}
string CompilerHLSL::to_interpolation_qualifiers(uint64_t flags)
string CompilerHLSL::to_interpolation_qualifiers(const Bitset &flags)
{
string res;
//if (flags & (1ull << DecorationSmooth))
// res += "linear ";
if (flags & (1ull << DecorationFlat))
if (flags.get(DecorationFlat))
res += "nointerpolation ";
if (flags & (1ull << DecorationNoPerspective))
if (flags.get(DecorationNoPerspective))
res += "noperspective ";
if (flags & (1ull << DecorationCentroid))
if (flags.get(DecorationCentroid))
res += "centroid ";
if (flags & (1ull << DecorationPatch))
if (flags.get(DecorationPatch))
res += "patch "; // Seems to be different in actual HLSL.
if (flags & (1ull << DecorationSample))
if (flags.get(DecorationSample))
res += "sample ";
if (flags & (1ull << DecorationInvariant))
if (flags.get(DecorationInvariant))
res += "invariant "; // Not supported?
return res;
@ -771,7 +763,7 @@ void CompilerHLSL::emit_io_block(const SPIRVariable &var)
add_member_name(type, i);
auto &membertype = get<SPIRType>(type.member_types[i]);
statement(to_interpolation_qualifiers(get_member_decoration_mask(type.self, i)),
statement(to_interpolation_qualifiers(get_member_decoration_bitset(type.self, i)),
variable_decl(membertype, to_member_name(type, i)), semantic, ";");
}
@ -813,7 +805,7 @@ void CompilerHLSL::emit_interface_block_in_struct(const SPIRVariable &var, unord
// If an explicit location exists, use it with TEXCOORD[N] semantic.
// Otherwise, pick a vacant location.
if (m.decoration_flags & (1ull << DecorationLocation))
if (m.decoration_flags.get(DecorationLocation))
location_number = m.location;
else
location_number = get_vacant_location();
@ -831,14 +823,14 @@ void CompilerHLSL::emit_interface_block_in_struct(const SPIRVariable &var, unord
{
SPIRType newtype = type;
newtype.columns = 1;
statement(to_interpolation_qualifiers(get_decoration_mask(var.self)),
statement(to_interpolation_qualifiers(get_decoration_bitset(var.self)),
variable_decl(newtype, join(name, "_", i)), " : ", semantic, "_", i, ";");
active_locations.insert(location_number++);
}
}
else
{
statement(to_interpolation_qualifiers(get_decoration_mask(var.self)), variable_decl(type, name), " : ",
statement(to_interpolation_qualifiers(get_decoration_bitset(var.self)), variable_decl(type, name), " : ",
semantic, ";");
// Structs and arrays should consume more locations.
@ -879,12 +871,11 @@ std::string CompilerHLSL::builtin_to_glsl(spv::BuiltIn builtin, spv::StorageClas
void CompilerHLSL::emit_builtin_variables()
{
// Emit global variables for the interface variables which are statically used by the shader.
for (uint32_t i = 0; i < 64; i++)
{
if (!((active_input_builtins | active_output_builtins) & (1ull << i)))
continue;
Bitset builtins = active_input_builtins;
builtins.merge_or(active_output_builtins);
// Emit global variables for the interface variables which are statically used by the shader.
builtins.for_each_bit([&](uint32_t i) {
const char *type = nullptr;
auto builtin = static_cast<BuiltIn>(i);
uint32_t array_size = 0;
@ -952,7 +943,7 @@ void CompilerHLSL::emit_builtin_variables()
break;
}
StorageClass storage = (active_input_builtins & (1ull << i)) != 0 ? StorageClassInput : StorageClassOutput;
StorageClass storage = active_input_builtins.get(i) ? StorageClassInput : StorageClassOutput;
// FIXME: SampleMask can be both in and out with sample builtin,
// need to distinguish that when we add support for that.
@ -963,7 +954,7 @@ void CompilerHLSL::emit_builtin_variables()
else
statement("static ", type, " ", builtin_to_glsl(builtin, storage), ";");
}
}
});
}
void CompilerHLSL::emit_composite_constants()
@ -1045,8 +1036,8 @@ void CompilerHLSL::emit_resources()
{
auto &type = id.get<SPIRType>();
if (type.basetype == SPIRType::Struct && type.array.empty() && !type.pointer &&
(meta[type.self].decoration.decoration_flags &
((1ull << DecorationBlock) | (1ull << DecorationBufferBlock))) == 0)
(!meta[type.self].decoration.decoration_flags.get(DecorationBlock) &&
!meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock)))
{
emit_struct(type);
}
@ -1066,8 +1057,8 @@ void CompilerHLSL::emit_resources()
auto &type = get<SPIRType>(var.basetype);
bool is_block_storage = type.storage == StorageClassStorageBuffer || type.storage == StorageClassUniform;
bool has_block_flags = (meta[type.self].decoration.decoration_flags &
((1ull << DecorationBlock) | (1ull << DecorationBufferBlock))) != 0;
bool has_block_flags = meta[type.self].decoration.decoration_flags.get(DecorationBlock) ||
meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock);
if (var.storage != StorageClassFunction && type.pointer && is_block_storage && !is_hidden_variable(var) &&
has_block_flags)
@ -1131,7 +1122,7 @@ void CompilerHLSL::emit_resources()
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
bool block = (meta[type.self].decoration.decoration_flags & (1ull << DecorationBlock)) != 0;
bool block = meta[type.self].decoration.decoration_flags.get(DecorationBlock);
// Do not emit I/O blocks here.
// I/O blocks can be arrayed, so we must deal with them separately to support geometry shaders
@ -1163,7 +1154,7 @@ void CompilerHLSL::emit_resources()
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
bool block = (meta[type.self].decoration.decoration_flags & (1ull << DecorationBlock)) != 0;
bool block = meta[type.self].decoration.decoration_flags.get(DecorationBlock);
if (var.storage != StorageClassInput && var.storage != StorageClassOutput)
continue;
@ -1231,8 +1222,10 @@ void CompilerHLSL::emit_resources()
return name1.compare(name2) < 0;
};
static const uint64_t implicit_builtins = (1ull << BuiltInNumWorkgroups) | (1ull << BuiltInPointCoord);
if (!input_variables.empty() || (active_input_builtins & ~implicit_builtins))
auto input_builtins = active_input_builtins;
input_builtins.clear(BuiltInNumWorkgroups);
input_builtins.clear(BuiltInPointCoord);
if (!input_variables.empty() || !input_builtins.empty())
{
require_input = true;
statement("struct SPIRV_Cross_Input");
@ -1246,7 +1239,7 @@ void CompilerHLSL::emit_resources()
statement("");
}
if (!output_variables.empty() || active_output_builtins)
if (!output_variables.empty() || !active_output_builtins.empty())
{
require_output = true;
statement("struct SPIRV_Cross_Output");
@ -1721,17 +1714,17 @@ string CompilerHLSL::layout_for_member(const SPIRType &type, uint32_t index)
{
auto flags = combined_decoration_for_member(type, index);
bool is_block = (meta[type.self].decoration.decoration_flags &
((1ull << DecorationBlock) | (1ull << DecorationBufferBlock))) != 0;
bool is_block = meta[type.self].decoration.decoration_flags.get(DecorationBlock) ||
meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock);
if (!is_block)
return "";
// Flip the convention. HLSL is a bit odd in that the memory layout is column major ... but the language API is "row-major".
// The way to deal with this is to multiply everything in inverse order, and reverse the memory layout.
if (flags & (1ull << DecorationColMajor))
if (flags.get(DecorationColMajor))
return "row_major ";
else if (flags & (1ull << DecorationRowMajor))
else if (flags.get(DecorationRowMajor))
return "column_major ";
return "";
@ -1742,14 +1735,15 @@ void CompilerHLSL::emit_struct_member(const SPIRType &type, uint32_t member_type
{
auto &membertype = get<SPIRType>(member_type_id);
uint64_t memberflags = 0;
Bitset memberflags;
auto &memb = meta[type.self].members;
if (index < memb.size())
memberflags = memb[index].decoration_flags;
string qualifiers;
bool is_block = (meta[type.self].decoration.decoration_flags &
((1ull << DecorationBlock) | (1ull << DecorationBufferBlock))) != 0;
bool is_block = meta[type.self].decoration.decoration_flags.get(DecorationBlock) ||
meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock);
if (is_block)
qualifiers = to_interpolation_qualifiers(memberflags);
@ -1779,8 +1773,8 @@ void CompilerHLSL::emit_buffer_block(const SPIRVariable &var)
if (is_uav)
{
uint64_t flags = get_buffer_block_flags(var);
bool is_readonly = (flags & (1ull << DecorationNonWritable)) != 0;
Bitset flags = get_buffer_block_flags(var);
bool is_readonly = flags.get(DecorationNonWritable);
add_resource_name(var.self);
statement(is_readonly ? "ByteAddressBuffer " : "RWByteAddressBuffer ", to_name(var.self),
type_to_array_glsl(type), to_resource_binding(var), ";");
@ -1932,7 +1926,7 @@ string CompilerHLSL::to_func_call_arg(uint32_t id)
return arg_str;
}
void CompilerHLSL::emit_function_prototype(SPIRFunction &func, uint64_t return_flags)
void CompilerHLSL::emit_function_prototype(SPIRFunction &func, const Bitset &return_flags)
{
if (func.self != entry_point)
add_function_overload(func);
@ -2033,7 +2027,7 @@ void CompilerHLSL::emit_hlsl_entry_point()
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
bool block = (meta[type.self].decoration.decoration_flags & (1ull << DecorationBlock)) != 0;
bool block = meta[type.self].decoration.decoration_flags.get(DecorationBlock);
if (var.storage != StorageClassInput && var.storage != StorageClassOutput)
continue;
@ -2076,7 +2070,7 @@ void CompilerHLSL::emit_hlsl_entry_point()
break;
}
case ExecutionModelFragment:
if (execution.flags & (1ull << ExecutionModeEarlyFragmentTests))
if (execution.flags.get(ExecutionModeEarlyFragmentTests))
statement("[earlydepthstencil]");
break;
default:
@ -2088,11 +2082,7 @@ void CompilerHLSL::emit_hlsl_entry_point()
bool legacy = hlsl_options.shader_model <= 30;
// Copy builtins from entry point arguments to globals.
for (uint32_t i = 0; i < 64; i++)
{
if (!(active_input_builtins & (1ull << i)))
continue;
active_input_builtins.for_each_bit([&](uint32_t i) {
auto builtin = builtin_to_glsl(static_cast<BuiltIn>(i), StorageClassInput);
switch (static_cast<BuiltIn>(i))
{
@ -2134,7 +2124,7 @@ void CompilerHLSL::emit_hlsl_entry_point()
statement(builtin, " = stage_input.", builtin, ";");
break;
}
}
});
// Copy from stage input struct to globals.
for (auto &id : ids)
@ -2143,7 +2133,7 @@ void CompilerHLSL::emit_hlsl_entry_point()
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
bool block = (meta[type.self].decoration.decoration_flags & (1ull << DecorationBlock)) != 0;
bool block = meta[type.self].decoration.decoration_flags.get(DecorationBlock);
if (var.storage != StorageClassInput)
continue;
@ -2193,7 +2183,7 @@ void CompilerHLSL::emit_hlsl_entry_point()
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
bool block = (meta[type.self].decoration.decoration_flags & (1ull << DecorationBlock)) != 0;
bool block = meta[type.self].decoration.decoration_flags.get(DecorationBlock);
if (var.storage != StorageClassOutput)
continue;
@ -2213,14 +2203,10 @@ void CompilerHLSL::emit_hlsl_entry_point()
statement("SPIRV_Cross_Output stage_output;");
// Copy builtins from globals to return struct.
for (uint32_t i = 0; i < 64; i++)
{
if (!(active_output_builtins & (1ull << i)))
continue;
active_output_builtins.for_each_bit([&](uint32_t i) {
// PointSize doesn't exist in HLSL.
if (i == BuiltInPointSize)
continue;
return;
switch (static_cast<BuiltIn>(i))
{
@ -2243,7 +2229,7 @@ void CompilerHLSL::emit_hlsl_entry_point()
break;
}
}
}
});
for (auto &id : ids)
{
@ -2251,7 +2237,7 @@ void CompilerHLSL::emit_hlsl_entry_point()
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
bool block = (meta[type.self].decoration.decoration_flags & (1ull << DecorationBlock)) != 0;
bool block = meta[type.self].decoration.decoration_flags.get(DecorationBlock);
if (var.storage != StorageClassOutput)
continue;
@ -2743,8 +2729,8 @@ string CompilerHLSL::to_resource_binding(const SPIRVariable &var)
{
if (has_decoration(type.self, DecorationBufferBlock))
{
uint64_t flags = get_buffer_block_flags(var);
bool is_readonly = (flags & (1ull << DecorationNonWritable)) != 0;
Bitset flags = get_buffer_block_flags(var);
bool is_readonly = flags.get(DecorationNonWritable);
space = is_readonly ? 't' : 'u'; // UAV
}
else if (has_decoration(type.self, DecorationBlock))
@ -4187,7 +4173,7 @@ uint32_t CompilerHLSL::remap_num_workgroups_builtin()
{
update_active_builtins();
if ((active_input_builtins & (1ull << BuiltInNumWorkgroups)) == 0)
if (!active_input_builtins.get(BuiltInNumWorkgroups))
return 0;
// Create a new, fake UBO.
@ -4259,7 +4245,7 @@ string CompilerHLSL::compile()
// Subpass input needs SV_Position.
if (need_subpass_input)
active_input_builtins |= 1ull << BuiltInFragCoord;
active_input_builtins.set(BuiltInFragCoord);
uint32_t pass_count = 0;
do
@ -4275,7 +4261,7 @@ string CompilerHLSL::compile()
emit_header();
emit_resources();
emit_function(get<SPIRFunction>(entry_point), 0);
emit_function(get<SPIRFunction>(entry_point), Bitset());
emit_hlsl_entry_point();
pass_count++;

Просмотреть файл

@ -121,7 +121,7 @@ private:
std::string image_type_hlsl(const SPIRType &type);
std::string image_type_hlsl_modern(const SPIRType &type);
std::string image_type_hlsl_legacy(const SPIRType &type);
void emit_function_prototype(SPIRFunction &func, uint64_t return_flags) override;
void emit_function_prototype(SPIRFunction &func, const Bitset &return_flags) override;
void emit_hlsl_entry_point();
void emit_header() override;
void emit_resources();
@ -143,7 +143,7 @@ private:
void emit_fixup() override;
std::string builtin_to_glsl(spv::BuiltIn builtin, spv::StorageClass storage) override;
std::string layout_for_member(const SPIRType &type, uint32_t index) override;
std::string to_interpolation_qualifiers(uint64_t flags) override;
std::string to_interpolation_qualifiers(const Bitset &flags) override;
std::string bitcast_glsl_op(const SPIRType &result_type, const SPIRType &argument_type) override;
std::string to_func_call_arg(uint32_t id) override;
std::string to_sampler_expression(uint32_t id);

Просмотреть файл

@ -181,7 +181,7 @@ string CompilerMSL::compile()
emit_specialization_constants();
emit_resources();
emit_custom_functions();
emit_function(get<SPIRFunction>(entry_point), 0);
emit_function(get<SPIRFunction>(entry_point), Bitset());
pass_count++;
} while (force_recompile);
@ -623,7 +623,7 @@ uint32_t CompilerMSL::add_interface_block(StorageClass storage)
meta[p_var->self].decoration.qualified_alias = qual_var_name;
// Copy the variable location from the original variable to the member
if (get_decoration_mask(p_var->self) & (1ull << DecorationLocation))
if (get_decoration_bitset(p_var->self).get(DecorationLocation))
{
uint32_t locn = get_decoration(p_var->self, DecorationLocation);
set_member_decoration(ib_type_id, ib_mbr_idx, DecorationLocation, locn);
@ -2147,7 +2147,7 @@ void CompilerMSL::emit_interface_block(uint32_t ib_var_id)
// Emits the declaration signature of the specified function.
// If this is the entry point function, Metal-specific return value and function arguments are added.
void CompilerMSL::emit_function_prototype(SPIRFunction &func, uint64_t)
void CompilerMSL::emit_function_prototype(SPIRFunction &func, const Bitset &)
{
if (func.self != entry_point)
add_function_overload(func);
@ -2542,7 +2542,7 @@ bool CompilerMSL::is_non_native_row_major_matrix(uint32_t id)
return false;
// Non-matrix or column-major matrix types do not need to be converted.
if (!(meta[id].decoration.decoration_flags & (1ull << DecorationRowMajor)))
if (!meta[id].decoration.decoration_flags.get(DecorationRowMajor))
return false;
// Generate a function that will swap matrix elements from row-major to column-major.
@ -2810,7 +2810,7 @@ uint32_t CompilerMSL::get_ordered_member_location(uint32_t type_id, uint32_t ind
if (index < m.members.size())
{
auto &dec = m.members[index];
if (dec.decoration_flags & (1ull << DecorationLocation))
if (dec.decoration_flags.get(DecorationLocation))
return dec.location;
}
@ -2876,9 +2876,9 @@ string CompilerMSL::func_type_decl(SPIRType &type)
entry_type = "vertex";
break;
case ExecutionModelFragment:
entry_type = (execution.flags & (1ull << ExecutionModeEarlyFragmentTests)) ?
"fragment [[ early_fragment_tests ]]" :
"fragment";
entry_type = execution.flags.get(ExecutionModeEarlyFragmentTests) ?
"fragment [[ early_fragment_tests ]]" :
"fragment";
break;
case ExecutionModelGLCompute:
case ExecutionModelKernel:
@ -2909,10 +2909,10 @@ string CompilerMSL::get_argument_address_space(const SPIRVariable &argument)
case StorageClassUniformConstant:
case StorageClassPushConstant:
if (type.basetype == SPIRType::Struct)
return ((meta[type.self].decoration.decoration_flags & (1ull << DecorationBufferBlock)) != 0 &&
(meta[argument.self].decoration.decoration_flags & (1ull << DecorationNonWritable)) == 0) ?
"device" :
"constant";
return (meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock) &&
!meta[argument.self].decoration.decoration_flags.get(DecorationNonWritable)) ?
"device" :
"constant";
break;
@ -3545,9 +3545,9 @@ string CompilerMSL::builtin_qualifier(BuiltIn builtin)
// Fragment function out
case BuiltInFragDepth:
if (execution.flags & (1ull << ExecutionModeDepthGreater))
if (execution.flags.get(ExecutionModeDepthGreater))
return "depth(greater)";
else if (execution.flags & (1ull << ExecutionModeDepthLess))
else if (execution.flags.get(ExecutionModeDepthLess))
return "depth(less)";
else
return "depth(any)";

Просмотреть файл

@ -195,7 +195,7 @@ protected:
void emit_glsl_op(uint32_t result_type, uint32_t result_id, uint32_t op, const uint32_t *args,
uint32_t count) override;
void emit_header() override;
void emit_function_prototype(SPIRFunction &func, uint64_t return_flags) override;
void emit_function_prototype(SPIRFunction &func, const Bitset &return_flags) override;
void emit_sampled_image_op(uint32_t result_type, uint32_t result_id, uint32_t image_id, uint32_t samp_id) override;
void emit_fixup() override;
void emit_struct_member(const SPIRType &type, uint32_t member_type_id, uint32_t index,