[spirv] Add support for constant array type (#565)

* Supported constant array types
* Supported operator[] on arrays
* Supported initializer list containing arrays
This commit is contained in:
Lei Zhang 2017-08-15 03:36:44 -04:00 коммит произвёл David Peixotto
Родитель 634b459283
Коммит c99605827c
12 изменённых файлов: 442 добавлений и 51 удалений

Просмотреть файл

@ -250,6 +250,7 @@ public:
uint32_t getStructType(llvm::ArrayRef<uint32_t> fieldTypes,
llvm::StringRef structName = "",
llvm::ArrayRef<llvm::StringRef> fieldNames = {});
uint32_t getArrayType(uint32_t elemType, uint32_t count);
uint32_t getFunctionType(uint32_t returnType,
llvm::ArrayRef<uint32_t> paramTypes);

Просмотреть файл

@ -67,7 +67,6 @@ void InitListHandler::flatten(const InitListExpr *expr) {
void InitListHandler::decompose(const Expr *expr) {
const QualType type = expr->getType();
assert(!type->isBuiltinType()); // Cannot decompose builtin types
if (hlsl::IsHLSLVecType(type)) {
const uint32_t vec = theEmitter.loadIfGLValue(expr);
@ -94,10 +93,8 @@ void InitListHandler::decompose(const Expr *expr) {
scalars.emplace_back(element, elemType);
}
}
} else if (type->isStructureType()) {
llvm_unreachable("struct initializer should already been handled");
} else {
emitError("decomposing type %0 in initializer list unimplemented") << type;
llvm_unreachable("decompose() should only handle vector or matrix types");
}
}
@ -116,14 +113,14 @@ void InitListHandler::decomposeVector(uint32_t vec, QualType elemType,
}
}
void InitListHandler::tryToSplitStruct() {
bool InitListHandler::tryToSplitStruct() {
if (initializers.empty())
return;
return false;
auto *init = const_cast<Expr *>(initializers.back());
const QualType initType = init->getType();
if (!initType->isStructureType())
return;
return false;
// We are certain the current intializer will be replaced by now.
initializers.pop_back();
@ -145,6 +142,47 @@ void InitListHandler::tryToSplitStruct() {
// Push in the reverse order
initializers.insert(initializers.end(), fields.rbegin(), fields.rend());
return true;
}
bool InitListHandler::tryToSplitConstantArray() {
if (initializers.empty())
return false;
auto *init = const_cast<Expr *>(initializers.back());
const QualType initType = init->getType();
if (!initType->isConstantArrayType())
return false;
// We are certain the current intializer will be replaced by now.
initializers.pop_back();
const auto &context = theEmitter.getASTContext();
const auto u32Type = context.getIntTypeForBitwidth(32, /*sigined*/ 0);
const auto *arrayType = context.getAsConstantArrayType(initType);
const auto elemType = arrayType->getElementType();
// TODO: handle (unlikely) extra large array size?
const auto size = static_cast<uint32_t>(arrayType->getSize().getZExtValue());
// Create ArraySubscriptExpr for each element of the array
// TODO: It will generate lots of elements if the array size is very large.
// But do we have a better solution?
llvm::SmallVector<const Expr *, 4> elements;
for (uint32_t i = 0; i < size; ++i) {
const auto iVal =
llvm::APInt(/*numBits*/ 32, uint64_t(i), /*isSigned*/ false);
auto *index = IntegerLiteral::Create(context, iVal, u32Type, {});
const auto *element = new (context)
ArraySubscriptExpr(init, index, elemType, VK_LValue, OK_Ordinary, {});
elements.push_back(element);
}
// Push in the reverse order
initializers.insert(initializers.end(), elements.rbegin(), elements.rend());
return true;
}
uint32_t InitListHandler::createInitForType(QualType type) {
@ -168,7 +206,10 @@ uint32_t InitListHandler::createInitForType(QualType type) {
if (type->isStructureType())
return createInitForStructType(type);
emitError("unimplemented initializer for type '%0'") << type;
if (type->isConstantArrayType())
return createInitForConstantArrayType(type);
emitError("unimplemented initializer for type %0") << type;
return 0;
}
@ -181,7 +222,9 @@ uint32_t InitListHandler::createInitForBuiltinType(QualType type) {
return theEmitter.castToType(init.first, init.second, type);
}
tryToSplitStruct();
// Keep splitting structs or vectors
while (tryToSplitStruct() || tryToSplitConstantArray())
;
const Expr *init = initializers.back();
initializers.pop_back();
@ -202,8 +245,9 @@ uint32_t InitListHandler::createInitForVectorType(QualType elemType,
// directly. For all other cases, we need to construct a new vector as the
// initializer.
if (scalars.empty()) {
// A struct may contain a whole vector.
tryToSplitStruct();
// Keep splitting structs or vectors
while (tryToSplitStruct() || tryToSplitConstantArray())
;
const Expr *init = initializers.back();
@ -244,8 +288,9 @@ uint32_t InitListHandler::createInitForMatrixType(QualType elemType,
// Same as the vector case, first try to see if we already have a matrix at
// the beginning of the initializer queue.
if (scalars.empty()) {
// A struct may contain a whole matrix.
tryToSplitStruct();
// Keep splitting structs or vectors
while (tryToSplitStruct() || tryToSplitConstantArray())
;
const Expr *init = initializers.back();
@ -283,9 +328,15 @@ uint32_t InitListHandler::createInitForMatrixType(QualType elemType,
}
uint32_t InitListHandler::createInitForStructType(QualType type) {
assert(type->isStructureType());
// Same as the vector case, first try to see if we already have a struct at
// the beginning of the initializer queue.
if (scalars.empty()) {
// Keep splitting arrays
while (tryToSplitConstantArray())
;
const Expr *init = initializers.back();
// We can only avoid decomposing and reconstructing when the type is
// exactly the same.
@ -295,7 +346,9 @@ uint32_t InitListHandler::createInitForStructType(QualType type) {
}
// Otherwise, if the next initializer is a struct, it is not of the same
// type as we expected. Split it.
// type as we expected. Split it. Just need to do one iteration since a
// field in the next struct initializer may be of the same struct type as
// a field we are about the construct.
tryToSplitStruct();
}
@ -309,5 +362,44 @@ uint32_t InitListHandler::createInitForStructType(QualType type) {
return theBuilder.createCompositeConstruct(typeId, fields);
}
uint32_t InitListHandler::createInitForConstantArrayType(QualType type) {
assert(type->isConstantArrayType());
// Same as the vector case, first try to see if we already have an array at
// the beginning of the initializer queue.
if (scalars.empty()) {
// Keep splitting structs
while (tryToSplitStruct())
;
const Expr *init = initializers.back();
// We can only avoid decomposing and reconstructing when the type is
// exactly the same.
if (type.getCanonicalType() == init->getType().getCanonicalType()) {
initializers.pop_back();
return theEmitter.loadIfGLValue(init);
}
// Otherwise, if the next initializer is an array, it is not of the same
// type as we expected. Split it. Just need to do one iteration since the
// next array initializer may have the same element type as the one we
// are about to construct but with different size.
tryToSplitConstantArray();
}
const auto *arrType = theEmitter.getASTContext().getAsConstantArrayType(type);
const auto elemType = arrType->getElementType();
// TODO: handle (unlikely) extra large array size?
const auto size = static_cast<uint32_t>(arrType->getSize().getZExtValue());
llvm::SmallVector<uint32_t, 4> elements;
for (uint32_t i = 0; i < size; ++i)
elements.push_back(createInitForType(elemType));
const uint32_t typeId = typeTranslator.translateType(type);
// TODO: use OpConstantComposite when all components are constants
return theBuilder.createCompositeConstruct(typeId, elements);
}
} // end namespace spirv
} // end namespace clang

Просмотреть файл

@ -60,8 +60,8 @@ namespace spirv {
///
/// When we reach a scalar type, we will try to decode a scalar value from the
/// front of the initializers queue. This may trigger composite extraction
/// since the front of the queue may be a vector/etc. The leftover values after
/// the extraction should be retained for the next decoding. Thus, we need
/// since the front of the queue may be a vector/matrix. The leftover values
/// after the extraction should be retained for the next decoding. Thus, we need
/// another queue, scalars, to keep track of leftover unused scalar values.
/// To adjust properly, when decoding values for a given type, we first try
/// the scalar queue.
@ -70,6 +70,12 @@ namespace spirv {
/// the scalar values previously extracted and retained in the scalars queue.
/// To optimize, if we have no leftover scalars and a value of the same type at
/// the front of the initializers queue, we use the value as a whole.
///
/// If the composite type is vector or matrix, we decompose() it into scalars as
/// explained above. If it is a struct or array type, the element type is not
/// guaranteed to be scalars. But still, we need to split them into their
/// elements. For such cases, we create faux MemberExpr or ArraySubscriptExpr
/// AST nodes for all the elements and push them into the initializers queue.
class InitListHandler {
public:
/// Constructs an InitListHandler which uses the given emitter for normal
@ -100,8 +106,12 @@ private:
void decomposeVector(uint32_t vec, QualType elemType, uint32_t size);
/// If the next initializer is a struct, replaces it with MemberExprs to all
/// its members. Otherwise, does nothing.
void tryToSplitStruct();
/// its members and returns true. Otherwise, does nothing and return false.
bool tryToSplitStruct();
/// If the next initializer is a constant array, replaces it with MemberExprs
/// to all its members and returns true. Otherwise, does nothing and return
/// false.
bool tryToSplitConstantArray();
/// Emits the necessary SPIR-V instructions to create a SPIR-V value of the
/// given type. The scalars and initializers queue will be used to fetch the
@ -112,6 +122,7 @@ private:
uint32_t createInitForMatrixType(QualType elemType, uint32_t rowCount,
uint32_t colCount);
uint32_t createInitForStructType(QualType type);
uint32_t createInitForConstantArrayType(QualType type);
private:
SPIRVEmitter &theEmitter;

Просмотреть файл

@ -438,6 +438,13 @@ ModuleBuilder::getStructType(llvm::ArrayRef<uint32_t> fieldTypes,
return typeId;
}
uint32_t ModuleBuilder::getArrayType(uint32_t elemType, uint32_t count) {
const Type *type = Type::getArray(theContext, elemType, count);
const uint32_t typeId = theContext.getResultIdForType(type);
theModule.addType(type, typeId);
return typeId;
}
uint32_t ModuleBuilder::getFunctionType(uint32_t returnType,
llvm::ArrayRef<uint32_t> paramTypes) {
const Type *type = Type::getFunction(theContext, returnType, paramTypes);

Просмотреть файл

@ -307,6 +307,10 @@ uint32_t SPIRVEmitter::doExpr(const Expr *expr) {
return doCallExpr(funcCall);
}
if (const auto *subscriptExpr = dyn_cast<ArraySubscriptExpr>(expr)) {
return doArraySubscriptExpr(subscriptExpr);
}
if (const auto *condExpr = dyn_cast<ConditionalOperator>(expr)) {
return doConditionalOperator(condExpr);
}
@ -974,6 +978,23 @@ void SPIRVEmitter::doSwitchStmt(const SwitchStmt *switchStmt,
processSwitchStmtUsingIfStmts(switchStmt);
}
uint32_t SPIRVEmitter::doArraySubscriptExpr(const ArraySubscriptExpr *expr) {
// The base of an ArraySubscriptExpr has a wrapping LValueToRValue implicit
// cast. We need to ingore it to avoid creating OpLoad.
const auto *baseExpr = expr->getBase()->IgnoreParenLValueCasts();
const uint32_t valType = typeTranslator.translateType(
// TODO: handle non-constant array types
astContext.getAsConstantArrayType(baseExpr->getType())->getElementType());
const uint32_t ptrType = theBuilder.getPointerType(
valType, declIdMapper.resolveStorageClass(baseExpr));
const uint32_t base = doExpr(baseExpr);
const uint32_t index = doExpr(expr->getIdx());
return theBuilder.createAccessChain(ptrType, base, {index});
}
uint32_t SPIRVEmitter::doBinaryOperator(const BinaryOperator *expr) {
const auto opcode = expr->getOpcode();
@ -3022,27 +3043,21 @@ uint32_t SPIRVEmitter::translateAPValue(const APValue &value,
uint32_t SPIRVEmitter::translateAPInt(const llvm::APInt &intValue,
QualType targetType) {
const auto bitwidth = astContext.getIntWidth(targetType);
if (targetType->isSignedIntegerType()) {
const int64_t value = intValue.getSExtValue();
switch (bitwidth) {
case 32:
return theBuilder.getConstantInt32(static_cast<int32_t>(value));
default:
break;
}
// Try to see if this integer can be represented in 32-bit
if (intValue.isSignedIntN(32))
return theBuilder.getConstantInt32(
static_cast<int32_t>(intValue.getSExtValue()));
} else {
const uint64_t value = intValue.getZExtValue();
switch (bitwidth) {
case 32:
return theBuilder.getConstantUint32(static_cast<uint32_t>(value));
default:
break;
}
// Try to see if this integer can be represented in 32-bit
if (intValue.isIntN(32))
return theBuilder.getConstantUint32(
static_cast<uint32_t>(intValue.getZExtValue()));
}
emitError("APInt for target bitwidth '%0' is not supported yet.") << bitwidth;
emitError("APInt for target bitwidth '%0' is not supported yet.")
<< astContext.getIntWidth(targetType);
return 0;
}

Просмотреть файл

@ -85,6 +85,7 @@ private:
void doDoStmt(const DoStmt *, llvm::ArrayRef<const Attr *> attrs = {});
void doContinueStmt(const ContinueStmt *);
uint32_t doArraySubscriptExpr(const ArraySubscriptExpr *expr);
uint32_t doBinaryOperator(const BinaryOperator *expr);
uint32_t doCallExpr(const CallExpr *callExpr);
uint32_t doCastExpr(const CastExpr *expr);

Просмотреть файл

@ -122,6 +122,14 @@ uint32_t TypeTranslator::translateType(QualType type) {
return theBuilder.getStructType(fieldTypes, decl->getName(), fieldNames);
}
if (const auto *arrayType = dyn_cast<ConstantArrayType>(typePtr)) {
const uint32_t elemType = translateType(arrayType->getElementType());
const auto size =
static_cast<uint32_t>(arrayType->getSize().getZExtValue());
return theBuilder.getArrayType(elemType,
theBuilder.getConstantUint32(size));
}
emitError("Type '%0' is not supported yet.") << type->getTypeClassName();
return 0;
}

Просмотреть файл

@ -0,0 +1,32 @@
// Run: %dxc -T vs_6_0 -E main
struct S {
float2 f[2];
};
void main() {
// CHECK: [[v0102:%\d+]] = OpCompositeConstruct %v2float %float_1 %float_2
// CHECK-NEXT: [[v0304:%\d+]] = OpCompositeConstruct %v2float %float_3 %float_4
// CHECK-NEXT: [[vf0:%\d+]] = OpCompositeConstruct %_arr_v2float_uint_2 [[v0102]] [[v0304]]
// CHECK-NEXT: [[vs0:%\d+]] = OpCompositeConstruct %S [[vf0]]
// CHECK-NEXT: [[v0506:%\d+]] = OpCompositeConstruct %v2float %float_5 %float_6
// CHECK-NEXT: [[v0708:%\d+]] = OpCompositeConstruct %v2float %float_7 %float_8
// CHECK-NEXT: [[vf1:%\d+]] = OpCompositeConstruct %_arr_v2float_uint_2 [[v0506]] [[v0708]]
// CHECK-NEXT: [[vs1:%\d+]] = OpCompositeConstruct %S [[vf1]]
// CHECK-NEXT: [[v0910:%\d+]] = OpCompositeConstruct %v2float %float_9 %float_10
// CHECK-NEXT: [[v1112:%\d+]] = OpCompositeConstruct %v2float %float_11 %float_12
// CHECK-NEXT: [[vf2:%\d+]] = OpCompositeConstruct %_arr_v2float_uint_2 [[v0910]] [[v1112]]
// CHECK-NEXT: [[vs2:%\d+]] = OpCompositeConstruct %S [[vf2]]
// CHECK-NEXT: [[v1314:%\d+]] = OpCompositeConstruct %v2float %float_13 %float_14
// CHECK-NEXT: [[v1516:%\d+]] = OpCompositeConstruct %v2float %float_15 %float_16
// CHECK-NEXT: [[vf3:%\d+]] = OpCompositeConstruct %_arr_v2float_uint_2 [[v1314]] [[v1516]]
// CHECK-NEXT: [[vs3:%\d+]] = OpCompositeConstruct %S [[vf3]]
// CHECK-NEXT: [[va4:%\d+]] = OpCompositeConstruct %_arr_S_uint_4 [[vs0]] [[vs1]] [[vs2]] [[vs3]]
// CHECK-NEXT: {{%\d+}} = OpCompositeConstruct %_arr__arr_S_uint_4_uint_1 [[va4]]
S var[1][4] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0};
}

Просмотреть файл

@ -0,0 +1,67 @@
// Run: %dxc -T vs_6_0 -E main
// TODO: collect consecutive OpAccessChains into one
struct S {
float f[4]; // nested array
float g[4]; // nested array
};
// CHECK-LABLE: %src_main
float main(float val: A, uint index: B) : C {
float r;
S var[8][16]; // struct element
float4 vecvar[4]; // vector element
float2x3 matvar[4]; // matrix element
// CHECK: [[val:%\d+]] = OpLoad %float %val
// CHECK-NEXT: [[idx:%\d+]] = OpLoad %uint %index
// CHECK-NEXT: [[ptr0:%\d+]] = OpAccessChain %_ptr_Function__arr_S_uint_16 %var [[idx]]
// CHECK-NEXT: [[ptr1:%\d+]] = OpAccessChain %_ptr_Function_S [[ptr0]] %int_1
// CHECK-NEXT: [[ptr2:%\d+]] = OpAccessChain %_ptr_Function__arr_float_uint_4 [[ptr1]] %int_0
// CHECK-NEXT: [[ptr3:%\d+]] = OpAccessChain %_ptr_Function_float [[ptr2]] %int_2
// CHECK-NEXT: OpStore [[ptr3]] [[val]]
var[index][1].f[2] = val;
// CHECK: [[ptr0:%\d+]] = OpAccessChain %_ptr_Function__arr_S_uint_16 %var %int_0
// CHECK-NEXT: [[idx:%\d+]] = OpLoad %uint %index
// CHECK-NEXT: [[ptr1:%\d+]] = OpAccessChain %_ptr_Function_S [[ptr0]] [[idx]]
// CHECK-NEXT: [[ptr2:%\d+]] = OpAccessChain %_ptr_Function__arr_float_uint_4 [[ptr1]] %int_1
// CHECK-NEXT: [[idx:%\d+]] = OpLoad %uint %index
// CHECK-NEXT: [[ptr3:%\d+]] = OpAccessChain %_ptr_Function_float [[ptr2]] [[idx]]
// CHECK-NEXT: [[load:%\d+]] = OpLoad %float [[ptr3]]
// CHECK-NEXT: OpStore %r [[load]]
r = var[0][index].g[index];
// CHECK: [[val:%\d+]] = OpLoad %float %val
// CHECK-NEXT: [[vec2:%\d+]] = OpCompositeConstruct %v2float [[val]] [[val]]
// CHECK-NEXT: [[ptr0:%\d+]] = OpAccessChain %_ptr_Function_v4float %vecvar %int_3
// CHECK-NEXT: [[vec4:%\d+]] = OpLoad %v4float [[ptr0]]
// CHECK-NEXT: [[res:%\d+]] = OpVectorShuffle %v4float [[vec4]] [[vec2]] 0 1 5 4
// CHECK-NEXT: OpStore [[ptr0]] [[res]]
vecvar[3].ab = val;
// CHECK-NEXT: [[ptr1:%\d+]] = OpAccessChain %_ptr_Function_v4float %vecvar %int_2
// CHECK-NEXT: [[ptr2:%\d+]] = OpAccessChain %_ptr_Function_float [[ptr1]] %uint_1
// CHECK-NEXT: [[load:%\d+]] = OpLoad %float [[ptr2]]
// CHECK-NEXT: OpStore %r [[load]]
r = vecvar[2][1];
// CHECK: [[val:%\d+]] = OpLoad %float %val
// CHECK-NEXT: [[vec2:%\d+]] = OpCompositeConstruct %v2float [[val]] [[val]]
// CHECK-NEXT: [[ptr0:%\d+]] = OpAccessChain %_ptr_Function_mat2v3float %matvar %int_2
// CHECK-NEXT: [[val0:%\d+]] = OpCompositeExtract %float [[vec2]] 0
// CHECK-NEXT: [[ptr1:%\d+]] = OpAccessChain %_ptr_Function_float [[ptr0]] %int_0 %int_1
// CHECK-NEXT: OpStore [[ptr1]] [[val0]]
// CHECK-NEXT: [[val1:%\d+]] = OpCompositeExtract %float [[vec2]] 1
// CHECK-NEXT: [[ptr2:%\d+]] = OpAccessChain %_ptr_Function_float [[ptr0]] %int_1 %int_2
// CHECK-NEXT: OpStore [[ptr2]] [[val1]]
matvar[2]._12_23 = val;
// CHECK-NEXT: [[ptr3:%\d+]] = OpAccessChain %_ptr_Function_mat2v3float %matvar %int_0
// CHECK-NEXT: [[ptr4:%\d+]] = OpAccessChain %_ptr_Function_float [[ptr3]] %uint_1 %uint_2
// CHECK-NEXT: [[load:%\d+]] = OpLoad %float [[ptr4]]
// CHECK-NEXT: OpStore %r [[load]]
r = matvar[0][1][2];
return r;
}

Просмотреть файл

@ -0,0 +1,19 @@
// Run: %dxc -T vs_6_0 -E main
// CHECK: %_arr_uint_uint_4 = OpTypeArray %uint %uint_4
// CHECK: %_arr_int_uint_8 = OpTypeArray %int %uint_8
// CHECK: %_arr_float_uint_4 = OpTypeArray %float %uint_4
// CHECK: %_arr__arr_float_uint_4_uint_8 = OpTypeArray %_arr_float_uint_4 %uint_8
void main() {
const uint size = 4 * 3 - 4;
// CHECK: %x = OpVariable %_ptr_Function__arr_uint_uint_4 Function
uint x[4];
// CHECK: %y = OpVariable %_ptr_Function__arr_int_uint_8 Function
int y[size];
// CHECK: %z = OpVariable %_ptr_Function__arr__arr_float_uint_4_uint_8 Function
float z[size][4];
}

Просмотреть файл

@ -0,0 +1,136 @@
// Run: %dxc -T vs_6_0 -E main
struct S1 {
float2 a;
};
struct S2 {
float2 b[2];
};
struct T1 {
S2 c; // Need to split to match T2.f1 & T2.f2
S2 d; // Match T2.f3 exactly
};
struct T2 {
S1 e;
S1 f;
S2 g;
};
// Flattend T2: need to split all fields in T2
struct T3 {
float2 h;
float2 i;
float2 j;
float2 k;
};
void main() {
T1 val1[2];
// val2[0]: Construct T2.e from T1.c.b[0]
// CHECK: [[val1_0:%\d+]] = OpAccessChain %_ptr_Function_T1 %val1 %uint_0
// CHECK-NEXT: [[T1_c_b:%\d+]] = OpAccessChain %_ptr_Function__arr_v2float_uint_2 [[val1_0]] %int_0 %int_0
// CHECK-NEXT: [[b_0:%\d+]] = OpAccessChain %_ptr_Function_v2float [[T1_c_b]] %uint_0
// CHECK-NEXT: [[b_0_val:%\d+]] = OpLoad %v2float [[b_0]]
// CHECK-NEXT: [[e_val:%\d+]] = OpCompositeConstruct %S1 [[b_0_val]]
// val2[0]: Construct T2.f from T1.c.b[1]
// CHECK-NEXT: [[val1_0:%\d+]] = OpAccessChain %_ptr_Function_T1 %val1 %uint_0
// CHECK-NEXT: [[T1_c_b:%\d+]] = OpAccessChain %_ptr_Function__arr_v2float_uint_2 [[val1_0]] %int_0 %int_0
// CHECK-NEXT: [[b_1:%\d+]] = OpAccessChain %_ptr_Function_v2float [[T1_c_b]] %uint_1
// CHECK-NEXT: [[b_1_val:%\d+]] = OpLoad %v2float [[b_1]]
// CHECK-NEXT: [[f_val:%\d+]] = OpCompositeConstruct %S1 [[b_1_val]]
// val2[0]: Read T1.d as T2.g
// CHECK-NEXT: [[val1_0:%\d+]] = OpAccessChain %_ptr_Function_T1 %val1 %uint_0
// CHECK-NEXT: [[T1_d:%\d+]] = OpAccessChain %_ptr_Function_S2 [[val1_0]] %int_1
// CHECK-NEXT: [[d_val:%\d+]] = OpLoad %S2 [[T1_d]]
// CHECK-NEXT: [[val2_0:%\d+]] = OpCompositeConstruct %T2 [[e_val]] [[f_val]] [[d_val]]
// val2[1]: Construct T2.e from T1.c.b[0]
// CHECK-NEXT: [[val1_1:%\d+]] = OpAccessChain %_ptr_Function_T1 %val1 %uint_1
// CHECK-NEXT: [[T1_c_b:%\d+]] = OpAccessChain %_ptr_Function__arr_v2float_uint_2 [[val1_1]] %int_0 %int_0
// CHECK-NEXT: [[b_0:%\d+]] = OpAccessChain %_ptr_Function_v2float [[T1_c_b]] %uint_0
// CHECK-NEXT: [[b_0_val:%\d+]] = OpLoad %v2float [[b_0]]
// CHECK-NEXT: [[e_val:%\d+]] = OpCompositeConstruct %S1 [[b_0_val]]
// val2[1]: Construct T2.f from T1.c.b[1]
// CHECK-NEXT: [[val1_1:%\d+]] = OpAccessChain %_ptr_Function_T1 %val1 %uint_1
// CHECK-NEXT: [[T1_c_b:%\d+]] = OpAccessChain %_ptr_Function__arr_v2float_uint_2 [[val1_1]] %int_0 %int_0
// CHECK-NEXT: [[b_1:%\d+]] = OpAccessChain %_ptr_Function_v2float [[T1_c_b]] %uint_1
// CHECK-NEXT: [[b_1_val:%\d+]] = OpLoad %v2float [[b_1]]
// CHECK-NEXT: [[f_val:%\d+]] = OpCompositeConstruct %S1 [[b_1_val]]
// val2[1]: Read T1.d as T2.g
// CHECK-NEXT: [[val1_1:%\d+]] = OpAccessChain %_ptr_Function_T1 %val1 %uint_1
// CHECK-NEXT: [[T1_d:%\d+]] = OpAccessChain %_ptr_Function_S2 [[val1_1]] %int_1
// CHECK-NEXT: [[d_val:%\d+]] = OpLoad %S2 [[T1_d]]
// CHECK-NEXT: [[val2_1:%\d+]] = OpCompositeConstruct %T2 [[e_val]] [[f_val]] [[d_val]]
// CHECK-NEXT: [[val2:%\d+]] = OpCompositeConstruct %_arr_T2_uint_2 [[val2_0]] [[val2_1]]
// CHECK-NEXT: OpStore %val2 [[val2]]
T2 val2[2] = {val1};
// val3[0]: Construct T3.h from T1.c.b[0]
// CHECK: [[val1_0:%\d+]] = OpAccessChain %_ptr_Function_T1 %val1 %int_0
// CHECK-NEXT: [[T1_c_b:%\d+]] = OpAccessChain %_ptr_Function__arr_v2float_uint_2 [[val1_0]] %int_0 %int_0
// CHECK-NEXT: [[b_0:%\d+]] = OpAccessChain %_ptr_Function_v2float [[T1_c_b]] %uint_0
// CHECK-NEXT: [[h_val:%\d+]] = OpLoad %v2float [[b_0]]
// val3[0]: Construct T3.i from T1.c.b[1]
// CHECK-NEXT: [[val1_0:%\d+]] = OpAccessChain %_ptr_Function_T1 %val1 %int_0
// CHECK-NEXT: [[T1_c_b:%\d+]] = OpAccessChain %_ptr_Function__arr_v2float_uint_2 [[val1_0]] %int_0 %int_0
// CHECK-NEXT: [[b_1:%\d+]] = OpAccessChain %_ptr_Function_v2float [[T1_c_b]] %uint_1
// CHECK-NEXT: [[i_val:%\d+]] = OpLoad %v2float [[b_1]]
// val3[0]: Construct T3.j from T1.d.b[0]
// CHECK-NEXT: [[val1_0:%\d+]] = OpAccessChain %_ptr_Function_T1 %val1 %int_0
// CHECK-NEXT: [[T1_d_b:%\d+]] = OpAccessChain %_ptr_Function__arr_v2float_uint_2 [[val1_0]] %int_1 %int_0
// CHECK-NEXT: [[b_0:%\d+]] = OpAccessChain %_ptr_Function_v2float [[T1_d_b]] %uint_0
// CHECK-NEXT: [[j_val:%\d+]] = OpLoad %v2float [[b_0]]
// val3[0]: Construct T3.k from T1.d.b[1]
// CHECK-NEXT: [[val1_0:%\d+]] = OpAccessChain %_ptr_Function_T1 %val1 %int_0
// CHECK-NEXT: [[T1_d_b:%\d+]] = OpAccessChain %_ptr_Function__arr_v2float_uint_2 [[val1_0]] %int_1 %int_0
// CHECK-NEXT: [[b_1:%\d+]] = OpAccessChain %_ptr_Function_v2float [[T1_d_b]] %uint_1
// CHECK-NEXT: [[k_val:%\d+]] = OpLoad %v2float [[b_1]]
// CHECK-NEXT: [[val3_0:%\d+]] = OpCompositeConstruct %T3 [[h_val]] [[i_val]] [[j_val]] [[k_val]]
// val3[1]
// CHECK-NEXT: [[t3_val:%\d+]] = OpLoad %T3 %t3
// val3[2]: Construct T3.h from S1.a
// CHECK-NEXT: [[s1_a:%\d+]] = OpAccessChain %_ptr_Function_v2float %s1 %int_0
// CHECK-NEXT: [[h_val:%\d+]] = OpLoad %v2float [[s1_a]]
// val3[2]: Construct T3.i from S2.b[0]
// CHECK-NEXT: [[s2_b:%\d+]] = OpAccessChain %_ptr_Function__arr_v2float_uint_2 %s2 %int_0
// CHECK-NEXT: [[s2_b_0:%\d+]] = OpAccessChain %_ptr_Function_v2float [[s2_b]] %uint_0
// CHECK-NEXT: [[i_val:%\d+]] = OpLoad %v2float [[s2_b_0]]
// val3[2]: Construct T3.j from S2.b[1]
// CHECK-NEXT: [[s2_b:%\d+]] = OpAccessChain %_ptr_Function__arr_v2float_uint_2 %s2 %int_0
// CHECK-NEXT: [[s2_b_1:%\d+]] = OpAccessChain %_ptr_Function_v2float [[s2_b]] %uint_1
// CHECK-NEXT: [[j_val:%\d+]] = OpLoad %v2float [[s2_b_1]]
// val3[2]: Construct T3.k from S1.a
// CHECK-NEXT: [[s1_a:%\d+]] = OpAccessChain %_ptr_Function_v2float %s1 %int_0
// CHECK-NEXT: [[k_val:%\d+]] = OpLoad %v2float [[s1_a]]
// CHECK-NEXT: [[val3_2:%\d+]] = OpCompositeConstruct %T3 [[h_val]] [[i_val]] [[j_val]] [[k_val]]
// CHECK-NEXT: [[val3:%\d+]] = OpCompositeConstruct %_arr_T3_uint_3 [[val3_0]] [[t3_val]]
// CHECK-NEXT: OpStore %val3 [[val3]]
S1 s1;
S2 s2;
T3 t3;
T3 val3[3] = {val1[0],
t3,
s1, s2, s1};
}

Просмотреть файл

@ -39,6 +39,7 @@ TEST_F(FileTest, ScalarTypes) { runFileTest("type.scalar.hlsl"); }
TEST_F(FileTest, VectorTypes) { runFileTest("type.vector.hlsl"); }
TEST_F(FileTest, MatrixTypes) { runFileTest("type.matrix.hlsl"); }
TEST_F(FileTest, StructTypes) { runFileTest("type.struct.hlsl"); }
TEST_F(FileTest, ArrayTypes) { runFileTest("type.array.hlsl"); }
TEST_F(FileTest, TypedefTypes) { runFileTest("type.typedef.hlsl"); }
// For constants
@ -46,14 +47,16 @@ TEST_F(FileTest, ScalarConstants) { runFileTest("constant.scalar.hlsl"); }
TEST_F(FileTest, VectorConstants) { runFileTest("constant.vector.hlsl"); }
TEST_F(FileTest, MatrixConstants) { runFileTest("constant.matrix.hlsl"); }
TEST_F(FileTest, StructConstants) { runFileTest("constant.struct.hlsl"); }
TEST_F(FileTest, ArrayConstants) { runFileTest("constant.array.hlsl"); }
// For variables
TEST_F(FileTest, VarInit) { runFileTest("var.init.hlsl"); }
TEST_F(FileTest, VarInitScalarVector) { runFileTest("var.init.hlsl"); }
TEST_F(FileTest, VarInitMatrixMxN) { runFileTest("var.init.matrix.mxn.hlsl"); }
TEST_F(FileTest, VarInitMatrixMx1) { runFileTest("var.init.matrix.mx1.hlsl"); }
TEST_F(FileTest, VarInitMatrix1xN) { runFileTest("var.init.matrix.1xn.hlsl"); }
TEST_F(FileTest, VarInitMatrix1x1) { runFileTest("var.init.matrix.1x1.hlsl"); }
TEST_F(FileTest, VarInitStruct) { runFileTest("var.init.struct.hlsl"); }
TEST_F(FileTest, VarInitArray) { runFileTest("var.init.array.hlsl"); }
TEST_F(FileTest, StaticVar) { runFileTest("var.static.hlsl"); }
// For prefix/postfix increment/decrement
@ -178,8 +181,9 @@ TEST_F(FileTest, OpMatrixAccess1x1) {
runFileTest("op.matrix.access.1x1.hlsl");
}
// For struct accessing operator
// For struct & array accessing operator
TEST_F(FileTest, OpStructAccess) { runFileTest("op.struct.access.hlsl"); }
TEST_F(FileTest, OpStructArray) { runFileTest("op.array.access.hlsl"); }
// For casting
TEST_F(FileTest, CastNoOp) { runFileTest("cast.no-op.hlsl"); }
@ -230,17 +234,8 @@ TEST_F(FileTest, DoStmtBreak) { runFileTest("do-stmt.break.hlsl"); }
// For break statements (mix of breaks in loops and switch)
TEST_F(FileTest, BreakStmtMixed) { runFileTest("break-stmt.mixed.hlsl"); }
// For control flows
TEST_F(FileTest, ControlFlowNestedIfForStmt) { runFileTest("cf.if.for.hlsl"); }
TEST_F(FileTest, ControlFlowLogicalAnd) { runFileTest("cf.logical-and.hlsl"); }
TEST_F(FileTest, ControlFlowLogicalOr) { runFileTest("cf.logical-or.hlsl"); }
TEST_F(FileTest, ControlFlowConditionalOp) { runFileTest("cf.cond-op.hlsl"); }
// For function calls
TEST_F(FileTest, FunctionCall) { runFileTest("fn.call.hlsl"); }
// For function parameters
TEST_F(FileTest, FunctionInOutParam) { runFileTest("fn.param.inout.hlsl"); }
// For discard statement
TEST_F(FileTest, Discard) { runFileTest("cf.discard.hlsl"); }
// For early returns
TEST_F(FileTest, EarlyReturn) { runFileTest("cf.return.early.hlsl"); }
@ -248,8 +243,15 @@ TEST_F(FileTest, EarlyReturnFloat4) {
runFileTest("cf.return.early.float4.hlsl");
}
// For discard
TEST_F(FileTest, Discard) { runFileTest("cf.discard.hlsl"); }
// For control flows
TEST_F(FileTest, ControlFlowNestedIfForStmt) { runFileTest("cf.if.for.hlsl"); }
TEST_F(FileTest, ControlFlowLogicalAnd) { runFileTest("cf.logical-and.hlsl"); }
TEST_F(FileTest, ControlFlowLogicalOr) { runFileTest("cf.logical-or.hlsl"); }
TEST_F(FileTest, ControlFlowConditionalOp) { runFileTest("cf.cond-op.hlsl"); }
// For functions
TEST_F(FileTest, FunctionCall) { runFileTest("fn.call.hlsl"); }
TEST_F(FileTest, FunctionInOutParam) { runFileTest("fn.param.inout.hlsl"); }
// For semantics
TEST_F(FileTest, SemanticPositionVS) {
@ -337,7 +339,7 @@ TEST_F(FileTest, IntrinsicsAsin) { runFileTest("intrinsics.asin.hlsl"); }
TEST_F(FileTest, IntrinsicsAcos) { runFileTest("intrinsics.acos.hlsl"); }
TEST_F(FileTest, IntrinsicsAtan) { runFileTest("intrinsics.atan.hlsl"); }
// SPIR-V specific
// Vulkan/SPIR-V specific
TEST_F(FileTest, SpirvStorageClass) { runFileTest("spirv.storage-class.hlsl"); }
TEST_F(FileTest, SpirvEntryFunctionWrapper) {
runFileTest("spirv.entry-function.wrapper.hlsl");