[spirv] support 'T vk::RawBufferLoad<T = uint>(uint64_t address [, uint alignment = 4] )' (#4226)

VK_KHR_buffer_device_address extension support a Vulkan feature to load data
from an arbitrary raw buffer address. We added
'uint vk::RawBufferLoad(uint64_t address)' before, but it is limited to loading data
with the unsigned integer type.

This commit adds
'T vk::RawBufferLoad<T = uint>(uint64_t address [, uint alignment = 4] )' that
allows us to load data with an arbitrary type.
This commit is contained in:
Jaebaek Seo 2022-04-11 16:44:13 -04:00 коммит произвёл GitHub
Родитель a47dc0e657
Коммит 3cb5d80bb8
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
7 изменённых файлов: 359 добавлений и 44 удалений

Просмотреть файл

@ -3782,7 +3782,8 @@ implicit ``vk`` namepsace.
const uint QueueFamilyScope = 5;
uint64_t ReadClock(in uint scope);
uint RawBufferLoad(in uint64_t deviceAddress);
T RawBufferLoad<T = uint>(in uint64_t deviceAddress,
in uint alignment = 4);
} // end namespace
@ -3822,32 +3823,48 @@ For example:
RawBufferLoad
~~~~~~~~~~~~~
This intrinsic funcion has the following signature:
Vulkan extension `VK_KHR_buffer_device_address <https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VK_KHR_buffer_device_address.html>`_
supports getting the 64-bit address of a buffer and passing it to SPIR-V as a
Uniform buffer. SPIR-V can use the address to load the data without descriptor.
We add the following intrinsic funcion to expose a subset of the
`VK_KHR_buffer_device_address <https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VK_KHR_buffer_device_address.html>`_
and `SPV_KHR_physical_storage_buffer <https://github.com/KhronosGroup/SPIRV-Registry/blob/main/extensions/KHR/SPV_KHR_physical_storage_buffer.asciidoc>`_
functionality to HLSL:
.. code:: hlsl
uint RawBufferLoad(in uint64_t deviceAddress);
// It uses 'uint' for the default template argument. The default alignment
// is 4. Note that 'alignment' must be a constant integer.
T RawBufferLoad<T = uint>(in uint64_t deviceAddress, in uint alignment = 4);
This exposes a subset of the `VK_KHR_buffer_device_address <https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VK_KHR_buffer_device_address.html>`_
and `SPV_KHR_physical_storage_buffer <https://github.com/KhronosGroup/SPIRV-Registry/blob/main/extensions/KHR/SPV_KHR_physical_storage_buffer.asciidoc>`_
functionality to HLSL.
It allows the shader program to load a single 32 bit value from a GPU
It allows the shader program to load a single value with type T from a GPU
accessible memory at given address, similar to ``ByteAddressBuffer.Load()``.
Like ``ByteAddressBuffer``, this intrinsic requires a 4 byte aligned address.
The intrinsic allows us to set the alignment. It uses 'uint' when the template
argument is missing and it uses 4 for the default alignment. The alignment
argument must be a constant integer if it is given.
Using this intrinsic adds ``PhysicalStorageBufferAddresses`` capability and
``SPV_KHR_physical_storage_buffer`` extension requirements as well as changing
Note that we support the aligned data load, but we do not support setting
memory layout for the data. Since it is supposed to load "arbitrary" data
from a random device address, we assume that it loads some "bytes of data"
but its format or layout is unknown. Therefore, keep it in mind that it
loads ``sizeof(T)`` bytes of data, but loading data with a complicated struct
type ``T`` is a undefined behavior because of the missing memory layout support.
Loading data with a memory layout is a future work.
Using the intrinsic adds ``PhysicalStorageBufferAddresses`` capability and
``SPV_KHR_physical_storage_buffer`` extension requirements as well as changing
the addressing model to ``PhysicalStorageBuffer64``.
Example:
.. code:: hlsl
uint64_t Address;
uint64_t address;
float4 main() : SV_Target0 {
uint Value = vk::RawBufferLoad(Address);
return asfloat(Value);
double foo = vk::RawBufferLoad<double>(address, 8);
uint bar = vk::RawBufferLoad(address + 8);
...
}
Inline SPIR-V (HLSL version of GL_EXT_spirv_intrinsics)

Просмотреть файл

@ -5723,8 +5723,8 @@ IntrinsicLower gLowerTable[] = {
{IntrinsicOp::IOP_unpack_u8u16, TranslateUnpack, DXIL::OpCode::Unpack4x8},
{IntrinsicOp::IOP_unpack_u8u32, TranslateUnpack, DXIL::OpCode::Unpack4x8},
#ifdef ENABLE_SPIRV_CODEGEN
{ IntrinsicOp::IOP_VkReadClock, UnsupportedVulkanIntrinsic, DXIL::OpCode::NumOpCodes },
{ IntrinsicOp::IOP_VkRawBufferLoad, UnsupportedVulkanIntrinsic, DXIL::OpCode::NumOpCodes },
{ IntrinsicOp::IOP_VkReadClock, UnsupportedVulkanIntrinsic, DXIL::OpCode::NumOpCodes },
{ IntrinsicOp::IOP_Vkext_execution_mode, UnsupportedVulkanIntrinsic, DXIL::OpCode::NumOpCodes },
{ IntrinsicOp::IOP_Vkext_execution_mode_id, UnsupportedVulkanIntrinsic, DXIL::OpCode::NumOpCodes },
#endif // ENABLE_SPIRV_CODEGEN

Просмотреть файл

@ -500,6 +500,49 @@ std::vector<DescriptorSetAndBinding> collectDSetBindingsToCombineSampledImage(
return dsetBindings;
}
// Returns a scalar unsigned integer type or a vector of them or a matrix of
// them depending on the scalar/vector/matrix type of boolType. The element
// type of boolType must be BuiltinType::Bool type.
QualType getUintTypeForBool(ASTContext &astContext,
CompilerInstance &theCompilerInstance,
QualType boolType) {
assert(isBoolOrVecMatOfBoolType(boolType));
uint32_t vecSize = 1, numRows = 0, numCols = 0;
QualType uintType = astContext.UnsignedIntTy;
if (isScalarType(boolType) || isVectorType(boolType, nullptr, &vecSize)) {
if (vecSize == 1)
return uintType;
else
return astContext.getExtVectorType(uintType, vecSize);
} else {
const bool isMat = isMxNMatrix(boolType, nullptr, &numRows, &numCols);
assert(isMat);
(void)isMat;
const clang::Type *type = boolType.getCanonicalType().getTypePtr();
const RecordType *RT = cast<RecordType>(type);
const ClassTemplateSpecializationDecl *templateSpecDecl =
cast<ClassTemplateSpecializationDecl>(RT->getDecl());
ClassTemplateDecl *templateDecl =
templateSpecDecl->getSpecializedTemplate();
return getHLSLMatrixType(astContext, theCompilerInstance.getSema(),
templateDecl, uintType, numRows, numCols);
}
return QualType();
}
bool isVkRawBufferLoadIntrinsic(const clang::FunctionDecl *FD) {
if (!FD->getName().equals("RawBufferLoad"))
return false;
if (auto *nsDecl = dyn_cast<NamespaceDecl>(FD->getDeclContext()))
if (!nsDecl->getName().equals("vk"))
return false;
return true;
}
} // namespace
SpirvEmitter::SpirvEmitter(CompilerInstance &ci)
@ -2411,6 +2454,11 @@ SpirvInstruction *SpirvEmitter::doCallExpr(const CallExpr *callExpr,
return processIntrinsicCallExpr(callExpr);
}
// Handle 'vk::RawBufferLoad()'
if (isVkRawBufferLoadIntrinsic(funcDecl)) {
return processRawBufferLoad(callExpr);
}
// Normal standalone functions
return processCall(callExpr);
}
@ -12798,29 +12846,81 @@ SpirvEmitter::processSpvIntrinsicCallExpr(const CallExpr *expr) {
/*isInstr*/ true, expr->getExprLoc());
}
uint32_t SpirvEmitter::getAlignmentForRawBufferLoad(const CallExpr *callExpr) {
if (callExpr->getNumArgs() == 1)
return 4;
if (callExpr->getNumArgs() > 2) {
emitError("number of arguments for vk::RawBufferLoad() must be 1 or 2",
callExpr->getExprLoc());
return 0;
}
const Expr *alignmentArgExpr = callExpr->getArg(1);
if (const auto *templateParmExpr =
dyn_cast<SubstNonTypeTemplateParmExpr>(alignmentArgExpr)) {
alignmentArgExpr = templateParmExpr->getReplacement();
}
const auto *intLiteral =
dyn_cast<IntegerLiteral>(alignmentArgExpr->IgnoreImplicit());
if (intLiteral == nullptr) {
emitError("alignment argument of vk::RawBufferLoad() must be a constant "
"integer",
callExpr->getArg(1)->getExprLoc());
return 0;
}
return static_cast<uint32_t>(intLiteral->getValue().getZExtValue());
}
SpirvInstruction *SpirvEmitter::processRawBufferLoad(const CallExpr *callExpr) {
clang::SourceLocation loc = callExpr->getExprLoc();
const clang::Expr *addressExpr = callExpr->getArg(0);
SpirvInstruction *address = doExpr(addressExpr);
uint32_t alignment = getAlignmentForRawBufferLoad(callExpr);
if (alignment == 0)
return nullptr;
const SpirvPointerType *bufferType =
spvBuilder.getPhysicalStorageBufferType(spvContext.getUIntType(32));
SpirvInstruction *address = doExpr(callExpr->getArg(0));
QualType bufferType = callExpr->getCallReturnType(astContext);
SourceLocation loc = callExpr->getExprLoc();
if (!isBoolOrVecMatOfBoolType(bufferType)) {
return loadDataFromRawAddress(address, bufferType, alignment, loc);
}
SpirvUnaryOp *bufferReference =
spvBuilder.createUnaryOp(spv::Op::OpBitcast, bufferType, address, loc);
// If callExpr is `vk::RawBufferLoad<bool>(..)`, we have to load 'uint' and
// convert it to boolean data, because a physical pointer cannot have boolean
// type in Vulkan.
if (alignment % 4 != 0) {
emitWarning("Since boolean is a logical type, we use a unsigned integer "
"type to read/write boolean from a buffer. Therefore "
"alignment for the data with a boolean type must be aligned "
"with 4 bytes",
loc);
}
QualType boolType = bufferType;
bufferType = getUintTypeForBool(astContext, theCompilerInstance, boolType);
SpirvInstruction *load =
loadDataFromRawAddress(address, bufferType, alignment, loc);
auto *loadAsBool = castToBool(load, bufferType, boolType, loc);
loadAsBool->setRValue();
return loadAsBool;
}
bufferReference->setStorageClass(spv::StorageClass::PhysicalStorageBuffer);
SpirvInstruction *
SpirvEmitter::loadDataFromRawAddress(SpirvInstruction *addressInUInt64,
QualType bufferType, uint32_t alignment,
SourceLocation loc) {
// Summary:
// %address = OpBitcast %ptrTobufferType %addressInUInt64
// %loadInst = OpLoad %bufferType %address
SpirvAccessChain *ac = spvBuilder.createAccessChain(astContext.UnsignedIntTy,
bufferReference, {}, loc);
const HybridPointerType *bufferPtrType =
spvBuilder.getPhysicalStorageBufferType(bufferType);
SpirvLoad *loadInst =
spvBuilder.createLoad(astContext.UnsignedIntTy, ac, loc);
// Raw buffer loads have the same alignment requirement as
// ByteAddressBuffer in HLSL
loadInst->setAlignment(4);
SpirvUnaryOp *address = spvBuilder.createUnaryOp(
spv::Op::OpBitcast, bufferPtrType, addressInUInt64, loc);
address->setStorageClass(spv::StorageClass::PhysicalStorageBuffer);
SpirvLoad *loadInst = spvBuilder.createLoad(bufferType, address, loc);
loadInst->setAlignment(alignment);
loadInst->setRValue();
return loadInst;
}

Просмотреть файл

@ -626,8 +626,17 @@ private:
/// Process spirv intrinsic type definition
SpirvInstruction *processSpvIntrinsicTypeDef(const CallExpr *expr);
/// Custom intrinsic to support basic buffer_reference use case
/// Process `T vk::RawBufferLoad<T>(in uint64_t address
/// [, in uint alignment])` that loads data from a given device address.
SpirvInstruction *processRawBufferLoad(const CallExpr *callExpr);
SpirvInstruction *loadDataFromRawAddress(SpirvInstruction *addressInUInt64,
QualType bufferType,
uint32_t alignment,
SourceLocation loc);
/// Returns the alignment of `vk::RawBufferLoad()`.
uint32_t getAlignmentForRawBufferLoad(const CallExpr *callExpr);
/// Process vk::ext_execution_mode intrinsic
SpirvInstruction *processIntrinsicExecutionMode(const CallExpr *expr,
bool useIdParams);

Просмотреть файл

@ -3485,6 +3485,133 @@ private:
#ifdef ENABLE_SPIRV_CODEGEN
SmallVector<NamedDecl *, 1> CreateTemplateTypeParmDeclsForVkIntrinsicFunction(
const HLSL_INTRINSIC *intrinsic) {
SmallVector<NamedDecl *, 1> templateTypeParmDecls;
auto &context = m_sema->getASTContext();
const HLSL_INTRINSIC_ARGUMENT *pArgs = intrinsic->pArgs;
UINT uNumArgs = intrinsic->uNumArgs;
TypeSourceInfo *TInfo = nullptr;
for (UINT i = 0; i < uNumArgs; ++i) {
if (pArgs[i].uTemplateId == INTRIN_TEMPLATE_FROM_FUNCTION ||
pArgs[i].uLegalTemplates == LITEMPLATE_ANY) {
IdentifierInfo *id = &context.Idents.get("T");
TemplateTypeParmDecl *templateTypeParmDecl =
TemplateTypeParmDecl::Create(context, m_vkNSDecl, NoLoc, NoLoc, 0,
0, id, TypenameTrue,
ParameterPackFalse);
if (TInfo == nullptr) {
TInfo = m_sema->getASTContext().CreateTypeSourceInfo(
m_context->UnsignedIntTy, 0);
}
templateTypeParmDecl->setDefaultArgument(TInfo);
templateTypeParmDecls.push_back(templateTypeParmDecl);
continue;
}
}
return templateTypeParmDecls;
}
SmallVector<ParmVarDecl *, g_MaxIntrinsicParamCount>
CreateParmDeclsForVkIntrinsicFunction(
const HLSL_INTRINSIC *intrinsic,
const SmallVectorImpl<QualType> &paramTypes,
const SmallVectorImpl<ParameterModifier> &paramMods) {
auto &context = m_sema->getASTContext();
SmallVector<ParmVarDecl *, g_MaxIntrinsicParamCount> paramDecls;
const HLSL_INTRINSIC_ARGUMENT *pArgs = intrinsic->pArgs;
UINT uNumArgs = intrinsic->uNumArgs;
for (UINT i = 1, numVariadicArgs = 0; i < uNumArgs; ++i) {
if (IsVariadicArgument(pArgs[i])) {
++numVariadicArgs;
continue;
}
IdentifierInfo *id = &context.Idents.get(StringRef(pArgs[i].pName));
TypeSourceInfo *TInfo = m_sema->getASTContext().CreateTypeSourceInfo(
paramTypes[i - numVariadicArgs], 0);
ParmVarDecl *paramDecl = ParmVarDecl::Create(
context, nullptr, NoLoc, NoLoc, id, paramTypes[i - numVariadicArgs],
TInfo, StorageClass::SC_None, nullptr,
paramMods[i - 1 - numVariadicArgs]);
paramDecls.push_back(paramDecl);
}
return paramDecls;
}
SmallVector<QualType, 2> VkIntrinsicFunctionParamTypes(
const HLSL_INTRINSIC *intrinsic,
const SmallVectorImpl<NamedDecl *> &templateTypeParmDecls) {
auto &context = m_sema->getASTContext();
const HLSL_INTRINSIC_ARGUMENT *pArgs = intrinsic->pArgs;
UINT uNumArgs = intrinsic->uNumArgs;
SmallVector<QualType, 2> paramTypes;
auto templateParmItr = templateTypeParmDecls.begin();
for (UINT i = 0; i < uNumArgs; ++i) {
if (pArgs[i].uTemplateId == INTRIN_TEMPLATE_FROM_FUNCTION ||
pArgs[i].uLegalTemplates == LITEMPLATE_ANY) {
DXASSERT(templateParmItr != templateTypeParmDecls.end(),
"Missing TemplateTypeParmDecl for a template type parameter");
TemplateTypeParmDecl *templateParmDecl =
dyn_cast<TemplateTypeParmDecl>(*templateParmItr);
DXASSERT(templateParmDecl != nullptr,
"TemplateTypeParmDecl is nullptr");
paramTypes.push_back(context.getTemplateTypeParmType(
0, i, ParameterPackFalse, templateParmDecl));
++templateParmItr;
continue;
}
if (IsVariadicArgument(pArgs[i])) {
continue;
}
switch (pArgs[i].uLegalComponentTypes) {
case LICOMPTYPE_UINT64:
paramTypes.push_back(context.UnsignedLongLongTy);
break;
case LICOMPTYPE_UINT:
paramTypes.push_back(context.UnsignedIntTy);
break;
case LICOMPTYPE_VOID:
paramTypes.push_back(context.VoidTy);
break;
default:
DXASSERT(false, "Argument type of vk:: intrinsic function is not "
"supported");
break;
}
}
return paramTypes;
}
QualType
VkIntrinsicFunctionType(const SmallVectorImpl<QualType> &paramTypes,
const SmallVectorImpl<ParameterModifier> &paramMods) {
DXASSERT(!paramTypes.empty(), "Given param type vector is empty");
ArrayRef<QualType> params({});
if (paramTypes.size() > 1) {
params = ArrayRef<QualType>(&paramTypes[1], paramTypes.size() - 1);
}
FunctionProtoType::ExtProtoInfo EmptyEPI;
return m_sema->getASTContext().getFunctionType(paramTypes[0], params,
EmptyEPI, paramMods);
}
void SetParmDeclsForVkIntrinsicFunction(
TypeSourceInfo *TInfo, FunctionDecl *functionDecl,
const SmallVectorImpl<ParmVarDecl *> &paramDecls) {
FunctionProtoTypeLoc Proto =
TInfo->getTypeLoc().getAs<FunctionProtoTypeLoc>();
// Attach the parameters
for (unsigned P = 0; P < paramDecls.size(); ++P) {
paramDecls[P]->setOwningFunction(functionDecl);
paramDecls[P]->setScopeInfo(0, P);
Proto.setParam(P, paramDecls[P]);
}
functionDecl->setParams(paramDecls);
}
// Adds intrinsic function declarations to the "vk" namespace.
// It does so only if SPIR-V code generation is being done.
// Assumes the implicit "vk" namespace has already been created.
@ -3501,13 +3628,50 @@ private:
const IdentifierInfo &fnII = context.Idents.get(
intrinsic->pArgs->pName, tok::TokenKind::identifier);
DeclarationName functionName(&fnII);
// Create TemplateTypeParmDecl.
SmallVector<NamedDecl *, 1> templateTypeParmDecls =
CreateTemplateTypeParmDeclsForVkIntrinsicFunction(intrinsic);
// Get types for parameters.
SmallVector<QualType, 2> paramTypes =
VkIntrinsicFunctionParamTypes(intrinsic, templateTypeParmDecls);
SmallVector<ParameterModifier, g_MaxIntrinsicParamCount> paramMods;
InitParamMods(intrinsic, paramMods);
// Create FunctionDecl.
QualType fnType = VkIntrinsicFunctionType(paramTypes, paramMods);
TypeSourceInfo *TInfo =
m_sema->getASTContext().CreateTypeSourceInfo(fnType, 0);
FunctionDecl *functionDecl = FunctionDecl::Create(
context, m_vkNSDecl, NoLoc, DeclarationNameInfo(functionName, NoLoc),
/*functionType*/ {}, nullptr, StorageClass::SC_Extern,
InlineSpecifiedFalse, HasWrittenPrototypeTrue);
m_vkNSDecl->addDecl(functionDecl);
functionDecl->setLexicalDeclContext(m_vkNSDecl);
functionDecl->setDeclContext(m_vkNSDecl);
fnType, TInfo, StorageClass::SC_Extern, InlineSpecifiedFalse,
HasWrittenPrototypeTrue);
// Create and set ParmVarDecl.
SmallVector<ParmVarDecl *, g_MaxIntrinsicParamCount> paramDecls =
CreateParmDeclsForVkIntrinsicFunction(intrinsic, paramTypes,
paramMods);
SetParmDeclsForVkIntrinsicFunction(TInfo, functionDecl, paramDecls);
if (!templateTypeParmDecls.empty()) {
TemplateParameterList *templateParmList = TemplateParameterList::Create(
context, NoLoc, NoLoc, templateTypeParmDecls.data(),
templateTypeParmDecls.size(), NoLoc);
functionDecl->setTemplateParameterListsInfo(context, 1,
&templateParmList);
FunctionTemplateDecl *functionTemplate = FunctionTemplateDecl::Create(
context, m_vkNSDecl, NoLoc, functionName, templateParmList,
functionDecl);
functionDecl->setDescribedFunctionTemplate(functionTemplate);
m_vkNSDecl->addDecl(functionTemplate);
functionTemplate->setDeclContext(m_vkNSDecl);
} else {
m_vkNSDecl->addDecl(functionDecl);
functionDecl->setLexicalDeclContext(m_vkNSDecl);
functionDecl->setDeclContext(m_vkNSDecl);
}
functionDecl->setImplicit(true);
}
}
@ -6189,6 +6353,8 @@ bool HLSLExternalSource::MatchArguments(
if (i == 0) {
// [RW]ByteAddressBuffer.Load, default to uint
pNewType = m_context->UnsignedIntTy;
if (pIntrinsic->Op != (UINT)hlsl::IntrinsicOp::MOP_Load)
badArgIdx = std::min(badArgIdx, i);
}
else {
// [RW]ByteAddressBuffer.Store, default to argument type

Просмотреть файл

@ -6,11 +6,33 @@
uint64_t Address;
float4 main() : SV_Target0 {
// CHECK: OpTypePointer PhysicalStorageBuffer %uint
// CHECK: [[addr:%\d+]] = OpLoad %ulong {{%\d+}}
// CHECK-NEXT: [[buf:%\d+]] = OpBitcast %_ptr_PhysicalStorageBuffer_uint [[addr]]
// CHECK-NEXT: [[ac:%\d+]] = OpAccessChain %_ptr_PhysicalStorageBuffer_uint [[buf]]
// CHECK-NEXT: OpLoad %uint [[ac]] Aligned 4
uint Value = vk::RawBufferLoad(Address);
return asfloat(Value);
// CHECK: [[addr:%\d+]] = OpLoad %ulong
// CHECK-NEXT: [[buf:%\d+]] = OpBitcast %_ptr_PhysicalStorageBuffer_float [[addr]]
// CHECK-NEXT: [[load:%\d+]] = OpLoad %float [[buf]] Aligned 4
// CHECK-NEXT: OpStore %x [[load]]
float x = vk::RawBufferLoad<float>(Address);
// CHECK: [[addr:%\d+]] = OpLoad %ulong
// CHECK-NEXT: [[buf:%\d+]] = OpBitcast %_ptr_PhysicalStorageBuffer_double [[addr]]
// CHECK-NEXT: [[load:%\d+]] = OpLoad %double [[buf]] Aligned 8
// CHECK-NEXT: OpStore %y [[load]]
double y = vk::RawBufferLoad<double>(Address, 8);
// CHECK: [[buf:%\d+]] = OpBitcast %_ptr_PhysicalStorageBuffer_uint
// CHECK-NEXT: [[load:%\d+]] = OpLoad %uint [[buf]] Aligned 4
// CHECK-NEXT: [[z:%\d+]] = OpINotEqual %bool [[load]] %uint_0
// CHECK-NEXT: OpStore %z [[z]]
bool z = vk::RawBufferLoad<bool>(Address, 4);
// CHECK: [[buf:%\d+]] = OpBitcast %_ptr_PhysicalStorageBuffer_v2float
// CHECK-NEXT: [[load:%\d+]] = OpLoad %v2float [[buf]] Aligned 8
// CHECK-NEXT: OpStore %w [[load]]
float2 w = vk::RawBufferLoad<float2>(Address, 8);
// CHECK: [[buf:%\d+]] = OpBitcast %_ptr_PhysicalStorageBuffer_uint
// CHECK-NEXT: [[load:%\d+]] = OpLoad %uint [[buf]] Aligned 4
// CHECK-NEXT: OpStore %v [[load]]
uint v = vk::RawBufferLoad(Address);
return float4(w.x, x, y, z);
}

Просмотреть файл

@ -380,7 +380,8 @@ $type2 [[rn]] select(in bool<> cond, in $match<1, 2> any<> t, in $type2 f);
namespace VkIntrinsics {
u64 [[]] ReadClock(in uint scope);
uint [[ro]] RawBufferLoad(in u64 addr);
$funcT [[ro]] RawBufferLoad(in u64 addr);
$funcT [[ro]] RawBufferLoad(in u64 addr, in uint alignment);
void [[]] ext_execution_mode(in uint mode, ...);
void [[]] ext_execution_mode_id(in uint mode, ...);