Fix clang-format comment indents on Windows for winml/ (#17144)

On Windows, clang-format has a bug when AlignTrailingComments.Kind is
set to `Leave`
(https://clang.llvm.org/docs/ClangFormatStyleOptions.html#aligntrailingcomments),
where it will keep adding indentation to comments after each formatting
runs.

This PR changes to always align comments so we do not hit the bug.

As a consequence of the options change we need to reformat some of the
files. Note that this option is aligned with the rest of the repository.
This commit is contained in:
Justin Chu 2023-08-14 23:50:14 -04:00 коммит произвёл GitHub
Родитель 24e0bd37b4
Коммит 416dc2e84d
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
58 изменённых файлов: 304 добавлений и 304 удалений

Просмотреть файл

@ -31,7 +31,7 @@ AlignConsecutiveMacros:
AlignEscapedNewlines: Left
AlignOperands: DontAlign
AlignTrailingComments:
Kind: Leave
Kind: Always
OverEmptyLines: 0
AllowAllArgumentsOnNextLine: true
AllowAllParametersOfDeclarationOnNextLine: true

Просмотреть файл

@ -14,13 +14,13 @@ const OrtApi* GetVersion1Api();
namespace winmla = Windows::AI::MachineLearning::Adapter;
static constexpr WinmlAdapterApi winml_adapter_api_1 = {
// Schema override
// Schema override
&winmla::OverrideSchema,
// OrtEnv methods
// OrtEnv methods
&winmla::EnvConfigureCustomLoggerAndProfiler,
// OrtModel methods
// OrtModel methods
&winmla::CreateModelFromPath,
&winmla::CreateModelFromData,
&winmla::CloneModel,
@ -43,11 +43,11 @@ static constexpr WinmlAdapterApi winml_adapter_api_1 = {
&winmla::ModelEnsureNoFloat16,
&winmla::SaveModel,
// OrtSessionOptions methods
// OrtSessionOptions methods
&OrtSessionOptionsAppendExecutionProvider_CPU,
&winmla::OrtSessionOptionsAppendExecutionProviderEx_DML,
// OrtSession methods
// OrtSession methods
&winmla::CreateSessionWithoutModel,
&winmla::SessionGetExecutionProvider,
&winmla::SessionInitialize,
@ -61,7 +61,7 @@ static constexpr WinmlAdapterApi winml_adapter_api_1 = {
&winmla::SessionGetIntraOpThreadSpinning,
&winmla::SessionGetNamedDimensionsOverrides,
// Dml methods (TODO need to figure out how these need to move to session somehow...)
// Dml methods (TODO need to figure out how these need to move to session somehow...)
&winmla::DmlExecutionProviderFlushContext,
&winmla::DmlExecutionProviderReleaseCompletedReferences,
&winmla::DmlCopyTensor,
@ -93,7 +93,7 @@ static constexpr WinmlAdapterApi winml_adapter_api_1 = {
&winmla::JoinModels,
&winmla::CreateThreadPool,
// Release
// Release
&winmla::ReleaseModel,
&winmla::ReleaseThreadPool,
};

Просмотреть файл

@ -383,7 +383,7 @@ struct WinmlAdapterApi {
// Dml methods (TODO need to figure out how these need to move to session somehow...)
/**
/**
* SessionGetNumberOfIntraOpThreads
* This api returns the number of intra operator threads set on the OrtSession.
*
@ -392,7 +392,7 @@ struct WinmlAdapterApi {
OrtStatus*(ORT_API_CALL* SessionGetNumberOfIntraOpThreads)(_In_ OrtSession* session, _Out_ uint32_t* num_threads)
NO_EXCEPTION;
/**
/**
* SessionGetIntrapOpThreadSpinning
* This api returns false if the ort session options config entry "session.intra_op.allow_spinning" is set to "0", and true otherwise
*
@ -401,7 +401,7 @@ struct WinmlAdapterApi {
OrtStatus*(ORT_API_CALL* SessionGetIntraOpThreadSpinning)(_In_ OrtSession* session, _Out_ bool* allow_spinning)
NO_EXCEPTION;
/**
/**
* SessionGetNamedDimensionsOverrides
* This api returns the named dimension overrides that are specified for this session
*

Просмотреть файл

@ -1,5 +1,5 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Licensed under the MIT License.
#pragma once
#include "adapter/pch.h"
@ -69,7 +69,7 @@ Microsoft::WRL::ComPtr<IDMLDevice> CreateDmlDevice(ID3D12Device* d3d12Device) {
namespace onnxruntime {
void DmlConfigureProviderFactoryMetacommandsEnabled(IExecutionProviderFactory* factory, bool metacommandsEnabled);
}// namespace onnxruntime
} // namespace onnxruntime
#endif // USE_DML

Просмотреть файл

@ -3,7 +3,7 @@
* File built with Microsoft(R) MIDLRT Compiler Engine Version 10.00.0228
*/
#pragma warning(disable : 4049) /* more than 64k source lines */
#pragma warning(disable : 4049) /* more than 64k source lines */
/* verify that the <rpcndr.h> version is high enough to compile this file*/
#ifndef __REQUIRED_RPCNDR_H_VERSION__
@ -41,33 +41,33 @@
#if !defined(SPECIFIC_API_CONTRACT_DEFINITIONS)
#if !defined(WINDOWS_APPLICATIONMODEL_CALLS_CALLSPHONECONTRACT_VERSION)
#define WINDOWS_APPLICATIONMODEL_CALLS_CALLSPHONECONTRACT_VERSION 0x50000
#endif // defined(WINDOWS_APPLICATIONMODEL_CALLS_CALLSPHONECONTRACT_VERSION)
#endif // defined(WINDOWS_APPLICATIONMODEL_CALLS_CALLSPHONECONTRACT_VERSION)
#if !defined(WINDOWS_FOUNDATION_FOUNDATIONCONTRACT_VERSION)
#define WINDOWS_FOUNDATION_FOUNDATIONCONTRACT_VERSION 0x40000
#endif // defined(WINDOWS_FOUNDATION_FOUNDATIONCONTRACT_VERSION)
#endif // defined(WINDOWS_FOUNDATION_FOUNDATIONCONTRACT_VERSION)
#if !defined(WINDOWS_FOUNDATION_UNIVERSALAPICONTRACT_VERSION)
#define WINDOWS_FOUNDATION_UNIVERSALAPICONTRACT_VERSION 0xa0000
#endif // defined(WINDOWS_FOUNDATION_UNIVERSALAPICONTRACT_VERSION)
#endif // defined(WINDOWS_FOUNDATION_UNIVERSALAPICONTRACT_VERSION)
#if !defined(WINDOWS_NETWORKING_SOCKETS_CONTROLCHANNELTRIGGERCONTRACT_VERSION)
#define WINDOWS_NETWORKING_SOCKETS_CONTROLCHANNELTRIGGERCONTRACT_VERSION 0x30000
#endif // defined(WINDOWS_NETWORKING_SOCKETS_CONTROLCHANNELTRIGGERCONTRACT_VERSION)
#endif // defined(WINDOWS_NETWORKING_SOCKETS_CONTROLCHANNELTRIGGERCONTRACT_VERSION)
#if !defined(WINDOWS_PHONE_PHONECONTRACT_VERSION)
#define WINDOWS_PHONE_PHONECONTRACT_VERSION 0x10000
#endif // defined(WINDOWS_PHONE_PHONECONTRACT_VERSION)
#endif // defined(WINDOWS_PHONE_PHONECONTRACT_VERSION)
#if !defined(WINDOWS_PHONE_PHONEINTERNALCONTRACT_VERSION)
#define WINDOWS_PHONE_PHONEINTERNALCONTRACT_VERSION 0x10000
#endif // defined(WINDOWS_PHONE_PHONEINTERNALCONTRACT_VERSION)
#endif // defined(WINDOWS_PHONE_PHONEINTERNALCONTRACT_VERSION)
#if !defined(WINDOWS_UI_WEBUI_CORE_WEBUICOMMANDBARCONTRACT_VERSION)
#define WINDOWS_UI_WEBUI_CORE_WEBUICOMMANDBARCONTRACT_VERSION 0x10000
#endif // defined(WINDOWS_UI_WEBUI_CORE_WEBUICOMMANDBARCONTRACT_VERSION)
#endif // defined(WINDOWS_UI_WEBUI_CORE_WEBUICOMMANDBARCONTRACT_VERSION)
#endif // defined(SPECIFIC_API_CONTRACT_DEFINITIONS)
#endif // defined(SPECIFIC_API_CONTRACT_DEFINITIONS)
// Header files for imported files
#include "Windows.Foundation.h"
@ -81,7 +81,7 @@
#pragma once
#pragma warning(pop)
#else // !defined(__cplusplus)
#else // !defined(__cplusplus)
/* Forward Declarations */
#pragma warning(push)
@ -90,8 +90,8 @@
#pragma once
#pragma warning(pop)
#endif // defined(__cplusplus)
#endif // defined(__cplusplus)
#pragma pop_macro("MIDL_CONST_ID")
#endif // __dualapipartitionattribute_p_h__
#endif // __dualapipartitionattribute_p_h__
#endif // __dualapipartitionattribute_h__
#endif // __dualapipartitionattribute_h__

Просмотреть файл

@ -39,4 +39,4 @@ void LearningModelExperimental::SetName(hstring const& model_name) {
modelp->SetName(model_name);
}
}// namespace WINML_EXPERIMENTALP
} // namespace WINML_EXPERIMENTALP

Просмотреть файл

@ -20,11 +20,11 @@ struct LearningModelExperimental : LearningModelExperimentalT<LearningModelExper
Microsoft::AI::MachineLearning::LearningModel model_;
};
}// namespace WINML_EXPERIMENTALP
} // namespace WINML_EXPERIMENTALP
namespace WINML_EXPERIMENTAL::factory_implementation {
struct LearningModelExperimental
: LearningModelExperimentalT<LearningModelExperimental, implementation::LearningModelExperimental> {};
}// namespace WINML_EXPERIMENTAL::factory_implementation
} // namespace WINML_EXPERIMENTAL::factory_implementation

Просмотреть файл

@ -40,4 +40,4 @@ const std::unordered_map<std::string, std::string>& LearningModelJoinOptions::Ge
return linkages_;
}
}// namespace WINML_EXPERIMENTALP
} // namespace WINML_EXPERIMENTALP

Просмотреть файл

@ -33,4 +33,4 @@ namespace WINML_EXPERIMENTAL::factory_implementation {
struct LearningModelJoinOptions
: LearningModelJoinOptionsT<LearningModelJoinOptions, implementation::LearningModelJoinOptions> {};
}// namespace WINML_EXPERIMENTAL::factory_implementation
} // namespace WINML_EXPERIMENTAL::factory_implementation

Просмотреть файл

@ -40,10 +40,10 @@ struct LearningModelOperator : LearningModelOperatorT<LearningModelOperator> {
wfc::IMap<winrt::hstring, winrt::hstring> output_mapping_;
};
} // namespace WINML_EXPERIMENTALP
} // namespace WINML_EXPERIMENTALP
namespace WINML_EXPERIMENTAL::factory_implementation {
struct LearningModelOperator : LearningModelOperatorT<LearningModelOperator, implementation::LearningModelOperator> {};
}// namespace WINML_EXPERIMENTAL::factory_implementation
} // namespace WINML_EXPERIMENTAL::factory_implementation

Просмотреть файл

@ -98,4 +98,4 @@ winml_experimental::LearningModelBuilder LearningModelOperatorSet::Add(
return builder_;
}
}// namespace WINML_EXPERIMENTALP
} // namespace WINML_EXPERIMENTALP

Просмотреть файл

@ -13,4 +13,4 @@ struct LearningModelOperatorSet : LearningModelOperatorSetT<LearningModelOperato
winml_experimental::LearningModelBuilder builder_;
wfc::IVector<winml_experimental::LearningModelOperator> operators_;
};
}// namespace WINML_EXPERIMENTALP
} // namespace WINML_EXPERIMENTALP

Просмотреть файл

@ -21,4 +21,4 @@ winml_experimental::LearningModelBuilder LearningModelOutputs::Add(winml::ILearn
return builder_;
}
} // namespace WINML_EXPERIMENTALP
} // namespace WINML_EXPERIMENTALP

Просмотреть файл

@ -25,4 +25,4 @@ struct LearningModelSessionOptionsExperimental : LearningModelSessionOptionsExpe
LearningModelSessionOptionsExperimental,
implementation::LearningModelSessionOptionsExperimental> {};
}// namespace WINML_EXPERIMENTAL::factory_implementation
} // namespace WINML_EXPERIMENTAL::factory_implementation

Просмотреть файл

@ -127,15 +127,15 @@ HRESULT _winml::GetDXCoreHardwareAdapterWithPreference(
HRESULT _winml::CreateD3D11On12Device(ID3D12Device* device12, ID3D11Device** device11) {
return CommonDeviceHelpers::RunDelayLoadedApi(
D3D11On12CreateDevice,
device12, // pointer to d3d12 device
device12, // pointer to d3d12 device
D3D11_CREATE_DEVICE_BGRA_SUPPORT, // required in order to interop with Direct2D
nullptr, // feature level (defaults to d3d12)
0, // size of feature levels in bytes
nullptr, // an array of unique command queues for D3D11On12 to use
0, // size of the command queue array
0, // D3D12 device node to use
device11, // d3d11 device out param
nullptr, // pointer to d3d11 device context (unused)
nullptr, // feature level (defaults to d3d12)
0, // size of feature levels in bytes
nullptr, // an array of unique command queues for D3D11On12 to use
0, // size of the command queue array
0, // D3D12 device node to use
device11, // d3d11 device out param
nullptr, // pointer to d3d11 device context (unused)
nullptr
); // pointer to the returned feature level (unused)
}

Просмотреть файл

@ -44,4 +44,4 @@ void StoreSpanIntoDisjointBuffers(
LoadOrStoreDisjointBuffers(false /*store into buffers*/, num_buffers, get_buffer, buffer_span);
}
} // namespace _winml
} // namespace _winml

Просмотреть файл

@ -3,8 +3,8 @@
namespace _winml {
NominalRangeConverter::NominalRangeConverter(winml::LearningModelPixelRange pixelRange) {
// For Normalization: the formula is input_range[min, max] / scale - shift
// For DeNormalization: the formula is (input_range[min, max] + shift) * scale
// For Normalization: the formula is input_range[min, max] / scale - shift
// For DeNormalization: the formula is (input_range[min, max] + shift) * scale
if (pixelRange == winml::LearningModelPixelRange::ZeroTo255) {
scale = 1.f;
shift = 0;
@ -17,9 +17,9 @@ NominalRangeConverter::NominalRangeConverter(winml::LearningModelPixelRange pixe
}
};
// [0, 255] --> [0, 255]
// [0, 255] / 255 --> [0, 1]
// [0, 255] * 2 / 255 - 1 --> [-1, 1]
// [0, 255] --> [0, 255]
// [0, 255] / 255 --> [0, 1]
// [0, 255] * 2 / 255 - 1 --> [-1, 1]
float NominalRangeConverter::Normalize(float val) const {
return val / scale - shift;
}
@ -38,9 +38,9 @@ __m128 NominalRangeConverter::Normalize(__m128 sse_data) const {
}
#endif
// [0, 255] --> [0, 255]
// ([0, 1] + 0 ) * 255 -> [0, 1]
// ([-1, 1] + 1) * 255 / 2 --> [-1, 1]
// [0, 255] --> [0, 255]
// ([0, 1] + 0 ) * 255 -> [0, 1]
// ([-1, 1] + 1) * 255 / 2 --> [-1, 1]
float NominalRangeConverter::Denormalize(float val) const {
return scale * (val + shift);
}
@ -58,4 +58,4 @@ __m128 NominalRangeConverter::Denormalize(__m128 sse_data) const {
return _mm_mul_ps(sse_added, sse_scale);
}
#endif
} // namespace _winml
} // namespace _winml

Просмотреть файл

@ -15,4 +15,4 @@ void StoreSpanIntoDisjointBuffers(
size_t num_buffers, std::function<gsl::span<byte>(size_t)> get_buffer, gsl::span<byte>& buffer_span
);
} // namespace _winml
} // namespace _winml

Просмотреть файл

@ -32,4 +32,4 @@ class NominalRangeConverter {
float scale;
int32_t shift;
};
} // namespace _winml
} // namespace _winml

Просмотреть файл

@ -80,7 +80,7 @@ CREATE_TENSOR(TensorInt64Bit, int64_t, int64_t)
CREATE_TENSOR(TensorFloat16Bit, _winml::Half, float)
#pragma warning(push)
#pragma warning(disable : 4702) // Unreachable code (one of TensorBase's constructor unconditionally throws for \
#pragma warning(disable : 4702) // Unreachable code (one of TensorBase's constructor unconditionally throws for \
// std::string because it's not supported with D3D12 resources)
CREATE_TENSOR(TensorString, std::string, winrt::hstring)
#pragma warning(pop)
@ -420,8 +420,8 @@ inline winml::ILearningModelFeatureValue CreateFeatureValueFromInspectable(
BindingType, const wf::IInspectable& inspectable, const winml::ITensorFeatureDescriptor& descriptor
);
constexpr std::array<TensorCreator, 13> creators = {
// Vector and VectorViews of float16 and int8 collide with float and uint8 respectively.
// They are omitted because of this ambiguity and are not constructible via raw winrt collections.
// Vector and VectorViews of float16 and int8 collide with float and uint8 respectively.
// They are omitted because of this ambiguity and are not constructible via raw winrt collections.
CreateTensorValueFromInspectable<winmlp::TensorBoolean, bool>,
CreateTensorValueFromInspectable<winmlp::TensorFloat, float>,
CreateTensorValueFromInspectable<winmlp::TensorDouble, double>,

Просмотреть файл

@ -90,7 +90,7 @@ ImageColorSpaceGamma ImageFeatureDescriptor::GetColorSpaceGamma() {
HRESULT
ImageFeatureDescriptor::GetDescriptorInfo(_winml::IEngineFactory* engine_factory, _winml::IDescriptorInfo** info) {
// TODO: Need to add denotations here
// TODO: Need to add denotations here
engine_factory->CreateTensorDescriptorInfo(tensor_kind_, shape_.data(), shape_.size(), info);
return S_OK;
}

Просмотреть файл

@ -424,7 +424,7 @@ std::optional<ImageFeatureValue::ImageResourceMetadata> ImageFeatureValue::GetIn
THROW_HR(WINML_ERR_INVALID_BINDING);
}
//NCHW layout
//NCHW layout
auto imageTensorDescriptor = CreateImageTensorDescriptor(
tensorKind, pixelFormat.value(), pixelRange.value(), m_batchSize, descriptorWidth, descriptorHeight
);

Просмотреть файл

@ -84,17 +84,17 @@ LearningModel::LearningModel(const hstring& path, const winml::ILearningModelOpe
0, // size of mapping object, high
0, // size of mapping object, low
NULL
)); // name of mapping object
)); // name of mapping object
WINML_THROW_HR_IF_TRUE_MSG(__HRESULT_FROM_WIN32(GetLastError()), file_mapping == nullptr, "Model load failed!");
auto buffer = MapViewOfFile(
file_mapping.get(), // handle to mapping object
FILE_MAP_READ, // read/write
0, // high-order 32 bits of file offset
0, // low-order 32 bits of file offset
file_mapping.get(), // handle to mapping object
FILE_MAP_READ, // read/write
0, // high-order 32 bits of file offset
0, // low-order 32 bits of file offset
0
); // number of bytes to map. 0 means read whole file.
); // number of bytes to map. 0 means read whole file.
WINML_THROW_HR_IF_TRUE_MSG(__HRESULT_FROM_WIN32(GetLastError()), buffer == nullptr, "Model load failed!");
LARGE_INTEGER file_size;

Просмотреть файл

@ -21,7 +21,7 @@ struct MapFeatureDescriptor : MapFeatureDescriptorT<
winml::ILearningModelFeatureDescriptor valueKind
);
// IMapDescriptor
// IMapDescriptor
winml::TensorKind KeyKind();
winml::ILearningModelFeatureDescriptor ValueDescriptor();

Просмотреть файл

@ -32,7 +32,7 @@ numeric_data::numeric_data(
buffers_ = {combined_buffer_};
auto buffer = buffer_at(0);
// The initial release of WinML (RS5) shipped with behavior that would
// The initial release of WinML (RS5) shipped with behavior that would
// zero-initialize uninitialized tensors. After measuring, the performance impact
// of memsetting the memory buffer is quite small (<1ms for 3channel 720x720 TensorFloats).
// To maintain parity with RS5 behavior, we always zero out the memory buffer.

Просмотреть файл

@ -134,7 +134,7 @@ HRESULT IsFloat16Blocked(ID3D12Device& device, bool* isBlocked) {
*isBlocked = CheckAdapterFP16Blocked(isMcdmAdapter, vendorId, majorVersion, minorVersion);
return S_OK;
}
}// namespace
} // namespace
namespace CommonDeviceHelpers {
constexpr uint32_t c_intelVendorId = 0x8086;

Просмотреть файл

@ -350,7 +350,7 @@ void SessionGetInputRequiredDeviceId() {
);
WINML_EXPECT_EQUAL(0, device_id);
}
}// namespace
} // namespace
const AdapterDmlEpTestApi& getapi() {
static constexpr AdapterDmlEpTestApi api = {

Просмотреть файл

@ -348,7 +348,7 @@ void GetNumberOfIntraOpThreads() {
winml_adapter_api->SessionGetNumberOfIntraOpThreads(session.get(), &num_threads);
WINML_EXPECT_EQUAL(num_threads, desired_num_threads);
}
}// namespace
} // namespace
const AdapterSessionTestAPI& getapi() {
static AdapterSessionTestAPI api = {

Просмотреть файл

@ -14,7 +14,7 @@ static void AdapterTestSetup() {
ort_api = OrtGetApiBase()->GetApi(ORT_API_VERSION);
winml_adapter_api = OrtGetWinMLAdapter(ORT_API_VERSION);
// for model tests
// for model tests
std::wstring module_path = FileHelpers::GetModulePath();
std::string squeezenet_path = std::wstring_convert<std::codecvt_utf8<wchar_t>>().to_bytes(
module_path + L"squeezenet_modifiedforruntimestests.onnx"

Просмотреть файл

@ -242,11 +242,11 @@ static void CloseModelCheckMetadata() {
}
static void CheckLearningModelPixelRange() {
std::vector<std::wstring> modelPaths = { // NominalRange_0_255 and image output
std::vector<std::wstring> modelPaths = {// NominalRange_0_255 and image output
L"Add_ImageNet1920WithImageMetadataBgr8_SRGB_0_255.onnx",
// Normalized_0_1 and image output
// Normalized_0_1 and image output
L"Add_ImageNet1920WithImageMetadataBgr8_SRGB_0_1.onnx",
// Normalized_1_1 and image output
// Normalized_1_1 and image output
L"Add_ImageNet1920WithImageMetadataBgr8_SRGB_1_1.onnx"};
std::vector<LearningModelPixelRange> pixelRanges = {
LearningModelPixelRange::ZeroTo255, LearningModelPixelRange::ZeroToOne, LearningModelPixelRange::MinusOneToOne};

Просмотреть файл

@ -69,7 +69,7 @@ static void CpuSqueezeNetBindInputTensorAsInspectable() {
static void CastMapInt64() {
WINML_EXPECT_NO_THROW(LearningModel::LoadFromFilePath(FileHelpers::GetModulePath() + L"castmap-int64.onnx"));
// TODO: Check Descriptor
// TODO: Check Descriptor
}
static void DictionaryVectorizerMapInt64() {
@ -82,7 +82,7 @@ static void DictionaryVectorizerMapInt64() {
WINML_EXPECT_TRUE(mapDescriptor.KeyKind() == TensorKind::Int64);
WINML_EXPECT_TRUE(mapDescriptor.ValueDescriptor().Kind() == LearningModelFeatureKind::Tensor);
auto tensorDescriptor = mapDescriptor.ValueDescriptor().as<TensorFeatureDescriptor>();
// empty size means tensor of scalar value
// empty size means tensor of scalar value
WINML_EXPECT_TRUE(tensorDescriptor.Shape().Size() == 0);
WINML_EXPECT_TRUE(tensorDescriptor.TensorKind() == TensorKind::Float);
@ -95,7 +95,7 @@ static void DictionaryVectorizerMapInt64() {
auto mapInputName = inputDescriptor.Name();
// Bind as IMap
// Bind as IMap
auto abiMap = winrt::single_threaded_map(std::move(map));
binding.Bind(mapInputName, abiMap);
auto mapInputInspectable = abiMap.as<wf::IInspectable>();
@ -104,7 +104,7 @@ static void DictionaryVectorizerMapInt64() {
WINML_EXPECT_TRUE(first.Current().Value() == mapInputInspectable);
WINML_EXPECT_TRUE(binding.Lookup(mapInputName) == mapInputInspectable);
// Bind as IMapView
// Bind as IMapView
auto mapView = abiMap.GetView();
binding.Bind(mapInputName, mapView);
mapInputInspectable = mapView.as<wf::IInspectable>();
@ -126,7 +126,7 @@ static void DictionaryVectorizerMapString() {
WINML_EXPECT_TRUE(mapDescriptor.ValueDescriptor().Kind() == LearningModelFeatureKind::Tensor);
auto tensorDescriptor = mapDescriptor.ValueDescriptor().as<TensorFeatureDescriptor>();
// empty size means tensor of scalar value
// empty size means tensor of scalar value
WINML_EXPECT_TRUE(tensorDescriptor.Shape().Size() == 0);
WINML_EXPECT_TRUE(tensorDescriptor.TensorKind() == TensorKind::Float);
@ -169,7 +169,7 @@ static void RunZipMapInt64(winml::LearningModel model, OutputBindingStrategy bin
std::vector<float> inputs = {0.5f, 0.25f, 0.125f};
std::vector<int64_t> shape = {1, 3};
// Bind inputs
// Bind inputs
auto inputTensor = TensorFloat::CreateFromArray(shape, winrt::array_view<const float>(std::move(inputs)));
binding.Bind(winrt::hstring(L"X"), inputTensor);
@ -177,21 +177,21 @@ static void RunZipMapInt64(winml::LearningModel model, OutputBindingStrategy bin
typedef IVector<ABIMap> ABISequeneceOfMap;
ABISequeneceOfMap abiOutput = nullptr;
// Bind outputs
// Bind outputs
if (bindingStrategy == OutputBindingStrategy::Bound) {
abiOutput = winrt::single_threaded_vector<ABIMap>();
binding.Bind(winrt::hstring(L"Y"), abiOutput);
}
// Evaluate
// Evaluate
auto result = session.Evaluate(binding, L"0").Outputs();
if (bindingStrategy == OutputBindingStrategy::Bound) {
// from output binding
// from output binding
const auto& out1 = abiOutput.GetAt(0);
const auto& out2 = result.Lookup(L"Y").as<IVectorView<ABIMap>>().GetAt(0);
WINML_LOG_COMMENT((std::ostringstream() << "size: " << out1.Size()).str());
// check outputs
// check outputs
auto iter1 = out1.First();
auto iter2 = out2.First();
for (uint32_t i = 0, size = (uint32_t)inputs.size(); i < size; ++i) {
@ -231,7 +231,7 @@ static void ZipMapInt64Unbound() {
}
static void ZipMapString() {
// output constraint: "seq(map(string, float))" or "seq(map(int64, float))"
// output constraint: "seq(map(string, float))" or "seq(map(int64, float))"
LearningModel learningModel = nullptr;
WINML_EXPECT_NO_THROW(APITest::LoadModel(L"zipmap-string.onnx", learningModel));
auto outputs = learningModel.OutputFeatures();
@ -263,11 +263,11 @@ static void ZipMapString() {
binding.Bind(winrt::hstring(L"X"), inputTensor);
binding.Bind(winrt::hstring(L"Y"), ABIOutput);
auto result = session.Evaluate(binding, L"0").Outputs();
// from output binding
// from output binding
const auto& out1 = ABIOutput.GetAt(0);
const auto& out2 = result.Lookup(L"Y").as<IVectorView<ABIMap>>().GetAt(0);
WINML_LOG_COMMENT((std::ostringstream() << "size: " << out1.Size()).str());
// single key,value pair for each map
// single key,value pair for each map
auto iter1 = out1.First();
auto iter2 = out2.First();
for (uint32_t i = 0, size = (uint32_t)inputs.size(); i < size; ++i) {
@ -323,30 +323,30 @@ static void ImageBindingDimensions() {
LearningModelSession learningModelSession = nullptr;
LearningModelDevice leraningModelDevice = nullptr;
std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx";
// load a model with expected input size: 224 x 224
// load a model with expected input size: 224 x 224
WINML_EXPECT_NO_THROW(leraningModelDevice = LearningModelDevice(LearningModelDeviceKind::Default));
WINML_EXPECT_NO_THROW(learningModel = LearningModel::LoadFromFilePath(filePath));
WINML_EXPECT_TRUE(learningModel != nullptr);
WINML_EXPECT_NO_THROW(learningModelSession = LearningModelSession(learningModel, leraningModelDevice));
WINML_EXPECT_NO_THROW(learningModelBinding = LearningModelBinding(learningModelSession));
// Create input images and execute bind
// Test Case 1: both width and height are larger than model expects
// Create input images and execute bind
// Test Case 1: both width and height are larger than model expects
VideoFrame inputImage1(BitmapPixelFormat::Rgba8, 1000, 1000);
ImageFeatureValue inputTensor = ImageFeatureValue::CreateFromVideoFrame(inputImage1);
WINML_EXPECT_NO_THROW(learningModelBinding.Bind(L"data_0", inputTensor));
// Test Case 2: only height is larger, while width is smaller
// Test Case 2: only height is larger, while width is smaller
VideoFrame inputImage2(BitmapPixelFormat::Rgba8, 20, 1000);
inputTensor = ImageFeatureValue::CreateFromVideoFrame(inputImage2);
WINML_EXPECT_NO_THROW(learningModelBinding.Bind(L"data_0", inputTensor));
// Test Case 3: only width is larger, while height is smaller
// Test Case 3: only width is larger, while height is smaller
VideoFrame inputImage3(BitmapPixelFormat::Rgba8, 1000, 20);
inputTensor = ImageFeatureValue::CreateFromVideoFrame(inputImage3);
WINML_EXPECT_NO_THROW(learningModelBinding.Bind(L"data_0", inputTensor));
// Test Case 4: both width and height are smaller than model expects
// Test Case 4: both width and height are smaller than model expects
VideoFrame inputImage4(BitmapPixelFormat::Rgba8, 20, 20);
inputTensor = ImageFeatureValue::CreateFromVideoFrame(inputImage4);
WINML_EXPECT_NO_THROW(learningModelBinding.Bind(L"data_0", inputTensor));
@ -367,60 +367,60 @@ static void VerifyInvalidBindExceptions() {
auto ensureWinmlSizeMismatch = std::bind(matchException, std::placeholders::_1, WINML_ERR_SIZE_MISMATCH);
auto ensureWinmlInvalidBinding = std::bind(matchException, std::placeholders::_1, WINML_ERR_INVALID_BINDING);
/*
/*
Verify tensor bindings throw correct bind exceptions
*/
// Bind invalid image as tensorfloat input
// Bind invalid image as tensorfloat input
auto image = FileHelpers::LoadImageFeatureValue(L"227x227.png");
WINML_EXPECT_THROW_SPECIFIC(binding.Bind(L"X", image), winrt::hresult_error, ensureWinmlSizeMismatch);
// Bind invalid map as tensorfloat input
// Bind invalid map as tensorfloat input
std::unordered_map<float, float> map;
auto abiMap = winrt::single_threaded_map(std::move(map));
WINML_EXPECT_THROW_SPECIFIC(binding.Bind(L"X", abiMap), winrt::hresult_error, ensureWinmlInvalidBinding);
// Bind invalid sequence as tensorfloat input
// Bind invalid sequence as tensorfloat input
std::vector<uint32_t> sequence;
auto abiSequence = winrt::single_threaded_vector(std::move(sequence));
WINML_EXPECT_THROW_SPECIFIC(binding.Bind(L"X", abiSequence), winrt::hresult_error, ensureWinmlInvalidBinding);
// Bind invalid tensor size as tensorfloat input
// Bind invalid tensor size as tensorfloat input
auto tensorBoolean = TensorBoolean::Create();
WINML_EXPECT_THROW_SPECIFIC(binding.Bind(L"X", tensorBoolean), winrt::hresult_error, ensureWinmlInvalidBinding);
// Bind invalid tensor shape as tensorfloat input
// Bind invalid tensor shape as tensorfloat input
auto tensorInvalidShape = TensorFloat::Create(std::vector<int64_t>{2, 3, 4});
WINML_EXPECT_THROW_SPECIFIC(binding.Bind(L"X", tensorInvalidShape), winrt::hresult_error, ensureWinmlInvalidBinding);
/*
/*
Verify sequence bindings throw correct bind exceptions
*/
// Bind invalid image as sequence<map<int, float> output
// Bind invalid image as sequence<map<int, float> output
WINML_EXPECT_THROW_SPECIFIC(binding.Bind(L"Y", image), winrt::hresult_error, ensureWinmlInvalidBinding);
// Bind invalid map as sequence<map<int, float> output
// Bind invalid map as sequence<map<int, float> output
WINML_EXPECT_THROW_SPECIFIC(binding.Bind(L"Y", abiMap), winrt::hresult_error, ensureWinmlInvalidBinding);
// Bind invalid sequence<int> as sequence<map<int, float> output
// Bind invalid sequence<int> as sequence<map<int, float> output
WINML_EXPECT_THROW_SPECIFIC(binding.Bind(L"Y", abiSequence), winrt::hresult_error, ensureWinmlInvalidBinding);
// Bind invalid tensor as sequence<map<int, float> output
// Bind invalid tensor as sequence<map<int, float> output
WINML_EXPECT_THROW_SPECIFIC(binding.Bind(L"Y", tensorBoolean), winrt::hresult_error, ensureWinmlInvalidBinding);
/*
/*
Verify image bindings throw correct bind exceptions
*/
// WINML_EXPECT_NO_THROW(LoadModel(L"fns-candy.onnx"));
// WINML_EXPECT_NO_THROW(LoadModel(L"fns-candy.onnx"));
// LearningModelSession imageSession(m_model);
// LearningModelBinding imageBinding(imageSession);
// LearningModelSession imageSession(m_model);
// LearningModelBinding imageBinding(imageSession);
// auto inputName = m_model.InputFeatures().First().Current().Name();
// auto inputName = m_model.InputFeatures().First().Current().Name();
// // Bind invalid map as image input
// // Bind invalid map as image input
// WINML_EXPECT_THROW_SPECIFIC(imageBinding.Bind(inputName, abiMap), winrt::hresult_error, ensureWinmlInvalidBinding);
// // Bind invalid sequence as image input

Просмотреть файл

@ -259,7 +259,7 @@ static void CreateSessionWithCastToFloat16InModel() {
}
static void CreateSessionWithFloat16InitializersInModel() {
// load a model
// load a model
LearningModel learningModel = nullptr;
WINML_EXPECT_NO_THROW(APITest::LoadModel(L"fp16-initializer.onnx", learningModel));

Просмотреть файл

@ -37,7 +37,7 @@ struct BufferBackedRandomAccessStreamReadAsync
virtual /* [propget] */ HRESULT STDMETHODCALLTYPE get_Id(
/* [retval][out] */ __RPC__out unsigned __int32* id
) override {
*id = 0; // Do we need to implement this?
*id = 0; // Do we need to implement this?
return S_OK;
}
@ -134,7 +134,7 @@ struct RandomAccessStream
return S_OK;
}
// Content Provider
// Content Provider
/* [propget] */ virtual HRESULT STDMETHODCALLTYPE get_ContentType(
/* [retval, out] */ __RPC__deref_out_opt HSTRING* value
@ -142,7 +142,7 @@ struct RandomAccessStream
return WindowsCreateString(nullptr, 0, value);
}
// IRandomAccessStream
// IRandomAccessStream
/* [propget] */ virtual HRESULT STDMETHODCALLTYPE get_Size(
/* [retval, out] */ __RPC__out UINT64* value
@ -210,18 +210,18 @@ struct RandomAccessStream
return S_OK;
}
// IInputStream
// IInputStream
virtual HRESULT STDMETHODCALLTYPE ReadAsync(
/* [in] */ __RPC__in_opt ABI::Windows::Storage::Streams::IBuffer* buffer,
/* [in] */ UINT32 count,
/* [in] */ ABI::Windows::Storage::Streams::InputStreamOptions /*options*/,
/* [retval, out] */
/* [retval, out] */
__RPC__deref_out_opt __FIAsyncOperationWithProgress_2_Windows__CStorage__CStreams__CIBuffer_UINT32** operation
) override {
auto read_async = Microsoft::WRL::Make<BufferBackedRandomAccessStreamReadAsync>();
read_async.CopyTo(operation);
// perform the "async work" which is actually synchronous atm
// perform the "async work" which is actually synchronous atm
Microsoft::WRL::ComPtr<ABI::Windows::Storage::Streams::IBuffer> spBuffer = buffer;
Microsoft::WRL::ComPtr<Windows::Storage::Streams::IBufferByteAccess> out_buffer_byte_access;
spBuffer.As<Windows::Storage::Streams::IBufferByteAccess>(&out_buffer_byte_access);
@ -240,7 +240,7 @@ struct RandomAccessStream
return S_OK;
}
// IOutputStream
// IOutputStream
virtual HRESULT STDMETHODCALLTYPE WriteAsync(
/* [in] */ __RPC__in_opt ABI::Windows::Storage::Streams::IBuffer* /*buffer*/,
/* [retval, out] */ __RPC__deref_out_opt __FIAsyncOperationWithProgress_2_UINT32_UINT32** /*operation*/
@ -254,7 +254,7 @@ struct RandomAccessStream
return E_NOTIMPL;
}
// IClosable
// IClosable
virtual HRESULT STDMETHODCALLTYPE Close(void) override {
buffer_ = nullptr;
return S_OK;
@ -287,7 +287,7 @@ struct BufferBackedRandomAccessStreamReferenceOpenReadAsync
virtual /* [propget] */ HRESULT STDMETHODCALLTYPE get_Id(
/* [retval][out] */ __RPC__out unsigned __int32* id
) override {
*id = 0; // Do we need to implement this?
*id = 0; // Do we need to implement this?
return S_OK;
}
@ -364,6 +364,6 @@ struct BufferBackedRandomAccessStreamReference
}
};
} // namespace WinMLTest
} // namespace WinMLTest
#endif // RANDOM_ACCESS_STREAM_H
#endif // RANDOM_ACCESS_STREAM_H

Просмотреть файл

@ -49,9 +49,9 @@ struct directx_device : public learning_model_device {
}
};
}// namespace gpu
}// namespace MachineLearning
}// namespace AI
} // namespace Microsoft
} // namespace gpu
} // namespace MachineLearning
} // namespace AI
} // namespace Microsoft
#endif // MICROSOFT_AI_MACHINELEARNING_GPU_H
#endif // MICROSOFT_AI_MACHINELEARNING_GPU_H

Просмотреть файл

@ -16,8 +16,8 @@ namespace AI {
namespace MachineLearning {
using tensor_shape_type = int64_t;
}
}// namespace AI
} // namespace Microsoft
} // namespace AI
} // namespace Microsoft
#include "winml_microsoft.h"
@ -30,10 +30,10 @@ using learning_model_device = WinMLLearningModelDevice;
using learning_model_session = WinMLLearningModelSession;
using learning_model_binding = WinMLLearningModelBinding;
using learning_model_results = WinMLLearningModelResults;
}// namespace Details
}// namespace MachineLearning
}// namespace AI
} // namespace Microsoft
} // namespace Details
} // namespace MachineLearning
} // namespace AI
} // namespace Microsoft
namespace Microsoft {
namespace AI {
@ -146,8 +146,8 @@ learning_model_results learning_model_session::evaluate(learning_model_binding&
return Details::learning_model_results(m_session.evaluate(binding.m_binding));
}
}// namespace MachineLearning
}// namespace AI
} // namespace Microsoft
} // namespace MachineLearning
} // namespace AI
} // namespace Microsoft
#endif // MICROSOFT_AI_MACHINELEARNING_H_
#endif // MICROSOFT_AI_MACHINELEARNING_H_

Просмотреть файл

@ -60,6 +60,6 @@ struct WeakBuffer
}
};
} // namespace WinMLTest
} // namespace WinMLTest
#endif // WEAK_BUFFER_H
#endif // WEAK_BUFFER_H

Просмотреть файл

@ -109,9 +109,9 @@ struct weak_single_threaded_iterable
};
};
}// namespace Details
}// namespace MachineLearning
}// namespace AI
} // namespace Microsoft
} // namespace Details
} // namespace MachineLearning
} // namespace AI
} // namespace Microsoft
#endif // WEAK_SINGLE_THREADED_ITERABLE_H_
#endif // WEAK_SINGLE_THREADED_ITERABLE_H_

Просмотреть файл

@ -421,7 +421,7 @@ __declspec(selectany
) const GUID TensorFactory2IID<double>::IID = ABI::Microsoft::AI::MachineLearning::IID_ITensorDoubleStatics2;
inline HRESULT GetActivationFactory(const wchar_t* p_class_id, const IID& iid, void** factory) noexcept {
// Fallback to OS binary if the redistributable is not present!
// Fallback to OS binary if the redistributable is not present!
auto library = LoadLibraryExW(MachineLearningDll, nullptr, 0);
if (library == nullptr) {
return HRESULT_FROM_WIN32(GetLastError());
@ -500,15 +500,15 @@ class WinMLLearningModel {
int32_t Initialize(const char* bytes, size_t size, bool with_copy = false) {
auto hr = RoInitialize(RO_INIT_TYPE::RO_INIT_SINGLETHREADED);
// https://docs.microsoft.com/en-us/windows/win32/api/roapi/nf-roapi-roinitialize#return-value
// RPC_E_CHANGED_MODE indicates already initialized as multithreaded
// https://docs.microsoft.com/en-us/windows/win32/api/roapi/nf-roapi-roinitialize#return-value
// RPC_E_CHANGED_MODE indicates already initialized as multithreaded
if (hr < 0 && hr != RPC_E_CHANGED_MODE) {
return static_cast<int32_t>(hr);
}
Microsoft::WRL::ComPtr<ABI::Windows::Storage::Streams::IRandomAccessStreamReference> random_access_stream_ref;
if (with_copy) {
// Create in memory stream
// Create in memory stream
Microsoft::WRL::ComPtr<IInspectable> in_memory_random_access_stream_insp;
RETURN_HR_IF_FAILED(RoActivateInstance(
Microsoft::WRL::Wrappers::HStringReference(RuntimeClass_Windows_Storage_Streams_InMemoryRandomAccessStream)
@ -516,31 +516,31 @@ class WinMLLearningModel {
in_memory_random_access_stream_insp.GetAddressOf()
));
// QI memory stream to output stream
// QI memory stream to output stream
Microsoft::WRL::ComPtr<ABI::Windows::Storage::Streams::IOutputStream> output_stream;
RETURN_HR_IF_FAILED(in_memory_random_access_stream_insp.As(&output_stream));
// Create data writer factory
// Create data writer factory
Microsoft::WRL::ComPtr<ABI::Windows::Storage::Streams::IDataWriterFactory> activation_factory;
RETURN_HR_IF_FAILED(RoGetActivationFactory(
Microsoft::WRL::Wrappers::HStringReference(RuntimeClass_Windows_Storage_Streams_DataWriter).Get(),
IID_PPV_ARGS(activation_factory.GetAddressOf())
));
// Create data writer object based on the in memory stream
// Create data writer object based on the in memory stream
Microsoft::WRL::ComPtr<ABI::Windows::Storage::Streams::IDataWriter> data_writer;
RETURN_HR_IF_FAILED(activation_factory->CreateDataWriter(output_stream.Get(), data_writer.GetAddressOf()));
// Write the model to the data writer and thus to the stream
// Write the model to the data writer and thus to the stream
RETURN_HR_IF_FAILED(
data_writer->WriteBytes(static_cast<uint32_t>(size), reinterpret_cast<BYTE*>(const_cast<char*>(bytes)))
);
// QI the in memory stream to a random access stream
// QI the in memory stream to a random access stream
Microsoft::WRL::ComPtr<ABI::Windows::Storage::Streams::IRandomAccessStream> random_access_stream;
RETURN_HR_IF_FAILED(in_memory_random_access_stream_insp.As(&random_access_stream));
// Create a random access stream reference factory
// Create a random access stream reference factory
Microsoft::WRL::ComPtr<ABI::Windows::Storage::Streams::IRandomAccessStreamReferenceStatics>
random_access_stream_ref_statics;
RETURN_HR_IF_FAILED(RoGetActivationFactory(
@ -549,8 +549,8 @@ class WinMLLearningModel {
IID_PPV_ARGS(random_access_stream_ref_statics.GetAddressOf())
));
// Create a random access stream reference from the random access stream view on top of
// the in memory stream
// Create a random access stream reference from the random access stream view on top of
// the in memory stream
RETURN_HR_IF_FAILED(random_access_stream_ref_statics->CreateFromStream(
random_access_stream.Get(), random_access_stream_ref.GetAddressOf()
));
@ -570,7 +570,7 @@ class WinMLLearningModel {
));
}
// Create a learning model factory
// Create a learning model factory
Microsoft::WRL::ComPtr<ABI::Microsoft::AI::MachineLearning::ILearningModelStatics> learning_model;
RETURN_HR_IF_FAILED(GetActivationFactory(
RuntimeClass_Microsoft_AI_MachineLearning_LearningModel,
@ -578,8 +578,8 @@ class WinMLLearningModel {
&learning_model
));
// Create a learning model from the factory with the random access stream reference that points
// to the random access stream view on top of the in memory stream copy of the model
// Create a learning model from the factory with the random access stream reference that points
// to the random access stream view on top of the in memory stream copy of the model
RETURN_HR_IF_FAILED(learning_model->LoadFromStream(random_access_stream_ref.Get(), m_learning_model.GetAddressOf())
);

Просмотреть файл

@ -421,7 +421,7 @@ __declspec(selectany
) const GUID TensorFactory2IID<double>::IID = ABI::Windows::AI::MachineLearning::IID_ITensorDoubleStatics2;
inline HRESULT GetActivationFactory(const wchar_t* p_class_id, const IID& iid, void** factory) noexcept {
// Fallback to OS binary if the redistributable is not present!
// Fallback to OS binary if the redistributable is not present!
auto library = LoadLibraryExW(MachineLearningDll, nullptr, 0);
using DllGetActivationFactory = HRESULT __stdcall(HSTRING, void** factory);
@ -499,7 +499,7 @@ class WinMLLearningModel {
Microsoft::WRL::ComPtr<ABI::Windows::Storage::Streams::IRandomAccessStreamReference> random_access_stream_ref;
if (with_copy) {
// Create in memory stream
// Create in memory stream
Microsoft::WRL::ComPtr<IInspectable> in_memory_random_access_stream_insp;
RETURN_HR_IF_FAILED(RoActivateInstance(
Microsoft::WRL::Wrappers::HStringReference(RuntimeClass_Windows_Storage_Streams_InMemoryRandomAccessStream)
@ -507,31 +507,31 @@ class WinMLLearningModel {
in_memory_random_access_stream_insp.GetAddressOf()
));
// QI memory stream to output stream
// QI memory stream to output stream
Microsoft::WRL::ComPtr<ABI::Windows::Storage::Streams::IOutputStream> output_stream;
RETURN_HR_IF_FAILED(in_memory_random_access_stream_insp.As(&output_stream));
// Create data writer factory
// Create data writer factory
Microsoft::WRL::ComPtr<ABI::Windows::Storage::Streams::IDataWriterFactory> activation_factory;
RETURN_HR_IF_FAILED(RoGetActivationFactory(
Microsoft::WRL::Wrappers::HStringReference(RuntimeClass_Windows_Storage_Streams_DataWriter).Get(),
IID_PPV_ARGS(activation_factory.GetAddressOf())
));
// Create data writer object based on the in memory stream
// Create data writer object based on the in memory stream
Microsoft::WRL::ComPtr<ABI::Windows::Storage::Streams::IDataWriter> data_writer;
RETURN_HR_IF_FAILED(activation_factory->CreateDataWriter(output_stream.Get(), data_writer.GetAddressOf()));
// Write the model to the data writer and thus to the stream
// Write the model to the data writer and thus to the stream
RETURN_HR_IF_FAILED(
data_writer->WriteBytes(static_cast<uint32_t>(size), reinterpret_cast<BYTE*>(const_cast<char*>(bytes)))
);
// QI the in memory stream to a random access stream
// QI the in memory stream to a random access stream
Microsoft::WRL::ComPtr<ABI::Windows::Storage::Streams::IRandomAccessStream> random_access_stream;
RETURN_HR_IF_FAILED(in_memory_random_access_stream_insp.As(&random_access_stream));
// Create a random access stream reference factory
// Create a random access stream reference factory
Microsoft::WRL::ComPtr<ABI::Windows::Storage::Streams::IRandomAccessStreamReferenceStatics>
random_access_stream_ref_statics;
RETURN_HR_IF_FAILED(RoGetActivationFactory(
@ -540,8 +540,8 @@ class WinMLLearningModel {
IID_PPV_ARGS(random_access_stream_ref_statics.GetAddressOf())
));
// Create a random access stream reference from the random access stream view on top of
// the in memory stream
// Create a random access stream reference from the random access stream view on top of
// the in memory stream
RETURN_HR_IF_FAILED(random_access_stream_ref_statics->CreateFromStream(
random_access_stream.Get(), random_access_stream_ref.GetAddressOf()
));
@ -554,7 +554,7 @@ class WinMLLearningModel {
));
}
// Create a learning model factory
// Create a learning model factory
Microsoft::WRL::ComPtr<ABI::Windows::AI::MachineLearning::ILearningModelStatics> learning_model;
RETURN_HR_IF_FAILED(GetActivationFactory(
RuntimeClass_Windows_AI_MachineLearning_LearningModel,
@ -568,8 +568,8 @@ class WinMLLearningModel {
RETURN_HR_IF_FAILED(async_operation->put_Completed(store_completed_handler.Get()));
RETURN_HR_IF_FAILED(store_completed_handler->Wait());
// Create a learning model from the factory with the random access stream reference that points
// to the random access stream view on top of the in memory stream copy of the model
// Create a learning model from the factory with the random access stream reference that points
// to the random access stream view on top of the in memory stream copy of the model
RETURN_HR_IF_FAILED(learning_model->LoadFromStream(random_access_stream_ref.Get(), m_learning_model.GetAddressOf())
);

Просмотреть файл

@ -104,7 +104,7 @@ void ModelValidator::FnsCandy16(
float dataTolerance
) {
ORT_UNUSED_PARAMETER(dataTolerance);
// file name strings
// file name strings
static wchar_t* modelFileName = L"winmlperf_coreml_FNS-Candy_prerelease_fp16.onnx";
static wchar_t* inputDataImageFileName = L"fish_720.png";
static wchar_t* outputDataFileName = L"output.png";
@ -115,7 +115,7 @@ void ModelValidator::FnsCandy16(
auto fullModelPath = modulePath + modelFileName;
auto outputFileName = modulePath + outputDataFileName;
// WinML model creation
// WinML model creation
LearningModel model = nullptr;
model = LearningModel::LoadFromFilePath(fullModelPath);
@ -126,7 +126,7 @@ void ModelValidator::FnsCandy16(
auto fullImagePath = modulePath + inputDataImageFileName;
BindImage(modelBinding, inputBindingName, fullImagePath.c_str(), bindInputsAsIInspectable);
// create the tensor for the actual output
// create the tensor for the actual output
auto output = model.OutputFeatures().First().Current();
if (output.Kind() != LearningModelFeatureKind::Tensor) {
throw winrt::hresult_invalid_argument(L"Model output kind is not type Tensor");
@ -135,16 +135,16 @@ void ModelValidator::FnsCandy16(
auto shape = winrt::single_threaded_vector(std::vector<int64_t>{1, 1});
auto outputTensor = BindImageOutput(outputBindingStrategy, modelBinding, outputDataBindingName);
// Evaluate the model
// Evaluate the model
std::cout << "Calling EvaluateSync on instance" << instance << "\n";
LearningModelEvaluationResult result = nullptr;
result = modelSession.Evaluate(modelBinding, {});
// Get results
// Get results
if (outputBindingStrategy == OutputBindingStrategy::Unbound) {
// When output binding strategy is unbound, the output tensor was not set on bind.
// Therefore, we need to retrieve it from the LearnignModelEvaluationResult
// TODO: is this right? outputTensorT is unused...
// When output binding strategy is unbound, the output tensor was not set on bind.
// Therefore, we need to retrieve it from the LearnignModelEvaluationResult
// TODO: is this right? outputTensorT is unused...
/*auto outputTensorT = */ result.Outputs().Lookup(outputDataBindingName).as<TensorFloat16Bit>();
} else {
if (result.Outputs().Lookup(outputDataBindingName) != outputTensor) {
@ -171,7 +171,7 @@ void ModelValidator::SqueezeNet(
OutputBindingStrategy outputBindingStrategy,
bool bindInputsAsIInspectable
) {
// file name strings
// file name strings
static wchar_t* modelFileName = L"model.onnx";
static wchar_t* inputDataFileName = L"test_data_0_input.pb";
static wchar_t* outputDataFileName = L"test_data_0_output.pb";
@ -183,7 +183,7 @@ void ModelValidator::SqueezeNet(
auto fullModelPath = modulePath + modelFileName;
auto outputFileName = modulePath + outputDataFileName;
// WinML model creation
// WinML model creation
LearningModel model = nullptr;
model = LearningModel::LoadFromFilePath(fullModelPath);
@ -201,13 +201,13 @@ void ModelValidator::SqueezeNet(
BindTensor(modelBinding, inputBindingName, inputTensor, bindInputsAsIInspectable);
}
// load up the expected output
// load up the expected output
auto expectedResultsTensor = ProtobufHelpers::LoadTensorFromProtobufFile(outputFileName, false);
if (expectedResultsTensor == nullptr) {
throw winrt::hresult_invalid_argument(L"Expected Results from protobuf file are null.");
}
// create the tensor for the actual output
// create the tensor for the actual output
auto output = model.OutputFeatures().First().Current();
if (output.Kind() != LearningModelFeatureKind::Tensor) {
throw winrt::hresult_invalid_argument(L"Expected output feature kind of model to be Tensor");
@ -216,15 +216,15 @@ void ModelValidator::SqueezeNet(
auto outputTensor =
BindOutput<TensorFloat>(outputBindingStrategy, modelBinding, outputDataBindingName, expectedResultsTensor.Shape());
// Evaluate the model
// Evaluate the model
std::cout << "Calling EvaluateSync on instance " << instance << "\n";
LearningModelEvaluationResult result = nullptr;
result = modelSession.Evaluate(modelBinding, {});
// Get results
// Get results
if (outputBindingStrategy == OutputBindingStrategy::Unbound) {
// When output binding strategy is unbound, the output tensor was not set on bind.
// Therefore, we need to retrieve it from the LearnignModelEvaluationResult
// When output binding strategy is unbound, the output tensor was not set on bind.
// Therefore, we need to retrieve it from the LearnignModelEvaluationResult
outputTensor = result.Outputs().Lookup(outputDataBindingName).as<ITensor>();
} else {
if (result.Outputs().Lookup(outputDataBindingName) != outputTensor) {
@ -250,4 +250,4 @@ void ModelValidator::SqueezeNet(
}
}
}
}// namespace WinML::Engine::Test
} // namespace WinML::Engine::Test

Просмотреть файл

@ -29,4 +29,4 @@ void SqueezeNet(
OutputBindingStrategy outputBindingStrategy = OutputBindingStrategy::Bound,
bool bindInputsAsIInspectable = false
);
}// namespace WinML::Engine::Test::ModelValidator
} // namespace WinML::Engine::Test::ModelValidator

Просмотреть файл

@ -31,12 +31,12 @@ std::wstring GetModulePath() {
}
std::wstring GetWinMLPath() {
// bool inboxDll = false;
// TODO Add command line parsing
// if (SUCCEEDED(WEX::TestExecution::RuntimeParameters::TryGetValue(L"inbox", inboxDll)) && inboxDll)
// {
// return L"";
// }
// bool inboxDll = false;
// TODO Add command line parsing
// if (SUCCEEDED(WEX::TestExecution::RuntimeParameters::TryGetValue(L"inbox", inboxDll)) && inboxDll)
// {
// return L"";
// }
return GetModulePath();
}
@ -63,4 +63,4 @@ winml::ImageFeatureValue LoadImageFeatureValue(const std::wstring& imagePath) {
auto videoFrame = wm::VideoFrame::CreateWithSoftwareBitmap(softwareBitmap);
return ImageFeatureValue::CreateFromVideoFrame(videoFrame);
}
}// namespace FileHelpers
} // namespace FileHelpers

Просмотреть файл

@ -14,4 +14,4 @@ std::wstring GetWinMLPath();
wgi::SoftwareBitmap GetSoftwareBitmapFromFile(const std::wstring& filePath);
winml::ImageFeatureValue LoadImageFeatureValue(const std::wstring& imagePath);
}// namespace FileHelpers
} // namespace FileHelpers

Просмотреть файл

@ -15,14 +15,14 @@
#include "onnx/onnx-ml.pb.h"
#pragma warning(pop)
namespace ProtobufHelpers {
// LoadTensorFromProtobufFile take a path to a FP32 data file and loads it into a 32bit array or
// 16bit array based on isFp16
// LoadTensorFromProtobufFile take a path to a FP32 data file and loads it into a 32bit array or
// 16bit array based on isFp16
winml::ITensor LoadTensorFromProtobufFile(const std::wstring& filePath, bool isFp16);
// LoadTensorFloat16FromProtobufFile takes a path to a FP16 data file and loads it into a 16bit array
// LoadTensorFloat16FromProtobufFile takes a path to a FP16 data file and loads it into a 16bit array
winml::TensorFloat16Bit LoadTensorFloat16FromProtobufFile(const std::wstring& filePath);
winml::LearningModel CreateModel(winml::TensorKind kind, const std::vector<int64_t>& shape, uint32_t num_elements = 1);
// Populates TensorProto with tensor from protobuf file
// Populates TensorProto with tensor from protobuf file
bool LoadOnnxTensorFromProtobufFile(onnx::TensorProto& tensor, std::wstring filePath);
}// namespace ProtobufHelpers
} // namespace ProtobufHelpers

Просмотреть файл

@ -3,6 +3,6 @@
#pragma once
namespace RuntimeParameters {
// Runtime parameters passed through CLI arguments
// Runtime parameters passed through CLI arguments
extern std::unordered_map<std::string, std::string> Parameters;
}// namespace RuntimeParameters
} // namespace RuntimeParameters

Просмотреть файл

@ -60,7 +60,7 @@ void RunAsync(std::vector<EvaluationUnit>& evaluation_units) {
std::for_each(evaluation_units.begin(), evaluation_units.end(), [](EvaluationUnit& unit) {
unit.operation = unit.session.EvaluateAsync(unit.binding, L"");
});
// get results
// get results
std::for_each(evaluation_units.begin(), evaluation_units.end(), [](EvaluationUnit& unit) {
unit.result = unit.operation.get();
});
@ -121,7 +121,7 @@ void EvalAsyncDifferentSessions() {
std::vector<EvaluationUnit> evaluation_units(num_units, EvaluationUnit());
auto ifv = FileHelpers::LoadImageFeatureValue(L"kitten_224.png");
// same model, different session
// same model, different session
auto model = LearningModel::LoadFromFilePath(FileHelpers::GetModulePath() + L"model.onnx");
for (unsigned int i = 0; i < num_units; ++i) {
evaluation_units[i].model = model;
@ -143,7 +143,7 @@ void EvalAsyncDifferentBindings() {
std::vector<ImageFeatureValue> ifvs = {
FileHelpers::LoadImageFeatureValue(L"kitten_224.png"), FileHelpers::LoadImageFeatureValue(L"fish.png")};
// same session, different binding
// same session, different binding
auto model = LearningModel::LoadFromFilePath(FileHelpers::GetModulePath() + L"model.onnx");
auto session = LearningModelSession(model);
for (unsigned int i = 0; i < num_units; ++i) {
@ -171,7 +171,7 @@ unsigned int GetRandomNumber(unsigned int max_number) {
}
void MultiThreadLoadModel() {
// load same model
// load same model
auto path = FileHelpers::GetModulePath() + L"model.onnx";
ThreadPool pool(NUM_THREADS);
try {
@ -193,8 +193,8 @@ void MultiThreadMultiSessionOnDevice(const LearningModelDevice& device) {
std::vector<ImageFeatureValue> ivfs = {
FileHelpers::LoadImageFeatureValue(L"kitten_224.png"), FileHelpers::LoadImageFeatureValue(L"fish.png")};
std::vector<int> max_indices = {
281, // tabby, tabby cat
0 // tench, Tinca tinca
281, // tabby, tabby cat
0 // tench, Tinca tinca
};
std::vector<float> max_values = {0.9314f, 0.7385f};
float tolerance = 0.001f;
@ -202,11 +202,11 @@ void MultiThreadMultiSessionOnDevice(const LearningModelDevice& device) {
ThreadPool pool(NUM_THREADS);
try {
device.as<IMetacommandsController>()->SetMetacommandsEnabled(false);
// create all the sessions
// create all the sessions
for (unsigned i = 0; i < NUM_THREADS; ++i) {
modelSessions[i] = LearningModelSession(model, device);
}
// start all the threads
// start all the threads
for (unsigned i_thread = 0; i_thread < NUM_THREADS; ++i_thread) {
LearningModelSession& model_session = modelSessions[i_thread];
pool.SubmitWork([&model_session, &ivfs, &max_indices, &max_values, tolerance, i_thread]() {
@ -259,8 +259,8 @@ void MultiThreadSingleSessionOnDevice(const LearningModelDevice& device) {
std::vector<ImageFeatureValue> ivfs = {
FileHelpers::LoadImageFeatureValue(L"kitten_224.png"), FileHelpers::LoadImageFeatureValue(L"fish.png")};
std::vector<int> max_indices = {
281, // tabby, tabby cat
0 // tench, Tinca tinca
281, // tabby, tabby cat
0 // tench, Tinca tinca
};
std::vector<float> max_values = {0.9314f, 0.7385f};
float tolerance = 0.001f;
@ -309,7 +309,7 @@ void MultiThreadSingleSession() {
void MultiThreadSingleSessionGpu() {
MultiThreadSingleSessionOnDevice(LearningModelDevice(LearningModelDeviceKind::DirectX));
}
}// namespace
} // namespace
const ConcurrencyTestsApi& getapi() {
static ConcurrencyTestsApi api = {

Просмотреть файл

@ -7,7 +7,7 @@ ThreadPool::ThreadPool(unsigned int initial_pool_size) : m_destruct_pool(false),
m_threads.emplace_back([this]() {
while (true) {
std::unique_lock<std::mutex> lock(m_mutex);
// thread listening for event and acquire lock if event triggered
// thread listening for event and acquire lock if event triggered
m_cond_var.wait(lock, [this] { return m_destruct_pool || !m_work_queue.empty(); });
if (!m_work_queue.empty()) {
auto work = m_work_queue.front();
@ -15,8 +15,8 @@ ThreadPool::ThreadPool(unsigned int initial_pool_size) : m_destruct_pool(false),
lock.unlock();
work();
} else {
// Work queue is empty but lock acquired
// This means we are destructing the pool
// Work queue is empty but lock acquired
// This means we are destructing the pool
break;
}
}
@ -26,7 +26,7 @@ ThreadPool::ThreadPool(unsigned int initial_pool_size) : m_destruct_pool(false),
ThreadPool::~ThreadPool() {
m_destruct_pool = true;
m_cond_var.notify_all(); // notify destruction to threads
m_cond_var.notify_all(); // notify destruction to threads
for (auto& thread : m_threads) {
thread.join();
}

Просмотреть файл

@ -23,11 +23,11 @@ class ThreadPool {
auto task = std::make_shared<std::packaged_task<decltype(f(args...))()>>(std::forward<decltype(func)>(func));
{
std::lock_guard<std::mutex> lock(m_mutex);
// wrap packed task into a void return function type so that it can be stored in queue
// wrap packed task into a void return function type so that it can be stored in queue
m_work_queue.push([task]() { (*task)(); });
}
m_cond_var.notify_one(); // unblocks one of the waiting threads
m_cond_var.notify_one(); // unblocks one of the waiting threads
return task->get_future();
}
};

Просмотреть файл

@ -19,7 +19,7 @@ using namespace wgi;
namespace ImageTestHelper {
BitmapPixelFormat GetPixelFormat(const std::wstring& inputPixelFormat) {
// Return corresponding BitmapPixelFormat according to input string
// Return corresponding BitmapPixelFormat according to input string
if (L"Bgra8" == inputPixelFormat || L"Bgr8" == inputPixelFormat) {
return BitmapPixelFormat::Bgra8;
} else if (L"Rgba8" == inputPixelFormat || L"Rgb8" == inputPixelFormat) {
@ -42,7 +42,7 @@ TensorFloat LoadInputImageFromCPU(SoftwareBitmap softwareBitmap, const std::wstr
uint32_t height = softwareBitmap.PixelHeight();
uint32_t width = softwareBitmap.PixelWidth();
// TODO: Need modification for Gray8
// TODO: Need modification for Gray8
std::vector<int64_t> shape = {1, 3, height, width};
float* pCPUTensor;
uint32_t uCapacity;
@ -50,7 +50,7 @@ TensorFloat LoadInputImageFromCPU(SoftwareBitmap softwareBitmap, const std::wstr
com_ptr<ITensorNative> itn = tf.as<ITensorNative>();
itn->GetBuffer(reinterpret_cast<BYTE**>(&pCPUTensor), &uCapacity);
if (BitmapPixelFormat::Bgra8 == GetPixelFormat(modelPixelFormat)) {
// loop condition is i < size - 2 to avoid potential for extending past the memory buffer
// loop condition is i < size - 2 to avoid potential for extending past the memory buffer
for (UINT32 i = 0; i < size - 2; i += 4) {
UINT32 pixelInd = i / 4;
pCPUTensor[pixelInd] = (float)pData[i];
@ -65,8 +65,8 @@ TensorFloat LoadInputImageFromCPU(SoftwareBitmap softwareBitmap, const std::wstr
pCPUTensor[(height * width * 2) + pixelInd] = (float)pData[i];
}
}
// else if()
// TODO: for Gray8
// else if()
// TODO: for Gray8
else {
std::cerr << "Unsupported pixelFormat";
}
@ -87,7 +87,7 @@ TensorFloat LoadInputImageFromGPU(SoftwareBitmap softwareBitmap, const std::wstr
float* pCPUTensor;
uint32_t uCapacity;
// CPU tensor initialization
// CPU tensor initialization
TensorFloat tf = TensorFloat::Create(shape);
com_ptr<ITensorNative> itn = tf.as<ITensorNative>();
itn->GetBuffer(reinterpret_cast<BYTE**>(&pCPUTensor), &uCapacity);
@ -95,7 +95,7 @@ TensorFloat LoadInputImageFromGPU(SoftwareBitmap softwareBitmap, const std::wstr
uint32_t height = softwareBitmap.PixelHeight();
uint32_t width = softwareBitmap.PixelWidth();
if (BitmapPixelFormat::Bgra8 == GetPixelFormat(modelPixelFormat)) {
// loop condition is i < size - 2 to avoid potential for extending past the memory buffer
// loop condition is i < size - 2 to avoid potential for extending past the memory buffer
for (UINT32 i = 0; i < size - 2; i += 4) {
UINT32 pixelInd = i / 4;
pCPUTensor[pixelInd] = (float)pData[i];
@ -110,19 +110,19 @@ TensorFloat LoadInputImageFromGPU(SoftwareBitmap softwareBitmap, const std::wstr
pCPUTensor[(height * width * 2) + pixelInd] = (float)pData[i];
}
}
// else if()
// TODO: for Gray8
// else if()
// TODO: for Gray8
else {
std::cerr << "unsupported pixelFormat";
}
// create the d3d device.
// create the d3d device.
com_ptr<ID3D12Device> pD3D12Device = nullptr;
WINML_EXPECT_NO_THROW(D3D12CreateDevice(
nullptr, D3D_FEATURE_LEVEL::D3D_FEATURE_LEVEL_11_0, __uuidof(ID3D12Device), reinterpret_cast<void**>(&pD3D12Device)
));
// create the command queue.
// create the command queue.
com_ptr<ID3D12CommandQueue> dxQueue = nullptr;
D3D12_COMMAND_QUEUE_DESC commandQueueDesc = {};
commandQueueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT;
@ -133,7 +133,7 @@ TensorFloat LoadInputImageFromGPU(SoftwareBitmap softwareBitmap, const std::wstr
com_ptr<::IUnknown> spUnk;
devicefactory->CreateFromD3D12CommandQueue(dxQueue.get(), spUnk.put());
// Create ID3D12GraphicsCommandList and Allocator
// Create ID3D12GraphicsCommandList and Allocator
D3D12_COMMAND_LIST_TYPE queuetype = dxQueue->GetDesc().Type;
com_ptr<ID3D12CommandAllocator> alloctor;
com_ptr<ID3D12GraphicsCommandList> cmdList;
@ -144,8 +144,8 @@ TensorFloat LoadInputImageFromGPU(SoftwareBitmap softwareBitmap, const std::wstr
0, queuetype, alloctor.get(), nullptr, winrt::guid_of<ID3D12CommandList>(), cmdList.put_void()
);
// Create Committed Resource
// 3 is number of channels we use. R G B without alpha.
// Create Committed Resource
// 3 is number of channels we use. R G B without alpha.
UINT64 bufferbytesize = 3 * sizeof(float) * softwareBitmap.PixelWidth() * softwareBitmap.PixelHeight();
D3D12_HEAP_PROPERTIES heapProperties = {
D3D12_HEAP_TYPE_DEFAULT, D3D12_CPU_PAGE_PROPERTY_UNKNOWN, D3D12_MEMORY_POOL_UNKNOWN, 0, 0};
@ -174,7 +174,7 @@ TensorFloat LoadInputImageFromGPU(SoftwareBitmap softwareBitmap, const std::wstr
pGPUResource.put_void()
);
// Create the GPU upload buffer.
// Create the GPU upload buffer.
auto heap_properties = CD3DX12_HEAP_PROPERTIES(D3D12_HEAP_TYPE_UPLOAD);
auto buffer_desc = CD3DX12_RESOURCE_DESC::Buffer(bufferbytesize);
WINML_EXPECT_NO_THROW(pD3D12Device->CreateCommittedResource(
@ -187,38 +187,38 @@ TensorFloat LoadInputImageFromGPU(SoftwareBitmap softwareBitmap, const std::wstr
imageUploadHeap.put_void()
));
// Copy from Cpu to GPU
// Copy from Cpu to GPU
D3D12_SUBRESOURCE_DATA CPUData = {};
CPUData.pData = reinterpret_cast<BYTE*>(pCPUTensor);
CPUData.RowPitch = static_cast<LONG_PTR>(bufferbytesize);
CPUData.SlicePitch = static_cast<LONG_PTR>(bufferbytesize);
UpdateSubresources(cmdList.get(), pGPUResource.get(), imageUploadHeap.get(), 0, 0, 1, &CPUData);
// Close the command list and execute it to begin the initial GPU setup.
// Close the command list and execute it to begin the initial GPU setup.
WINML_EXPECT_NO_THROW(cmdList->Close());
ID3D12CommandList* ppCommandLists[] = {cmdList.get()};
dxQueue->ExecuteCommandLists(_countof(ppCommandLists), ppCommandLists);
//Create Event
//Create Event
HANDLE directEvent = CreateEvent(nullptr, FALSE, FALSE, nullptr);
wil::unique_event hDirectEvent(directEvent);
//Create Fence
//Create Fence
::Microsoft::WRL::ComPtr<ID3D12Fence> spDirectFence = nullptr;
WINML_EXPECT_HRESULT_SUCCEEDED(
pD3D12Device->CreateFence(0, D3D12_FENCE_FLAG_NONE, IID_PPV_ARGS(spDirectFence.ReleaseAndGetAddressOf()))
);
//Adds fence to queue
//Adds fence to queue
WINML_EXPECT_HRESULT_SUCCEEDED(dxQueue->Signal(spDirectFence.Get(), FENCE_SIGNAL_VALUE));
WINML_EXPECT_HRESULT_SUCCEEDED(spDirectFence->SetEventOnCompletion(FENCE_SIGNAL_VALUE, hDirectEvent.get()));
//Wait for signal
//Wait for signal
DWORD retVal = WaitForSingleObject(hDirectEvent.get(), INFINITE);
if (retVal != WAIT_OBJECT_0) {
WINML_EXPECT_HRESULT_SUCCEEDED(E_UNEXPECTED);
}
// GPU tensorize
// GPU tensorize
com_ptr<::IUnknown> spUnkTensor;
TensorFloat input1imagetensor(nullptr);
int64_t shapes[4] = {1, 3, softwareBitmap.PixelWidth(), softwareBitmap.PixelHeight()};
@ -229,7 +229,7 @@ TensorFloat LoadInputImageFromGPU(SoftwareBitmap softwareBitmap, const std::wstr
}
bool VerifyHelper(VideoFrame actual, VideoFrame expected) {
// Verify two input ImageFeatureValues are identified.
// Verify two input ImageFeatureValues are identified.
auto softwareBitmapActual = actual.SoftwareBitmap();
auto softwareBitmapExpected = expected.SoftwareBitmap();
WINML_EXPECT_TRUE(softwareBitmapActual.PixelHeight() == softwareBitmapExpected.PixelHeight());
@ -252,15 +252,15 @@ bool VerifyHelper(VideoFrame actual, VideoFrame expected) {
byte* pActualByte = actualBytes;
byte* pExpectedByte = expectedBytes;
// hard code, might need to be modified later.
// hard code, might need to be modified later.
const float cMaxErrorRate = 0.4f;
int8_t epsilon = 20;
// Even given two same ImageFeatureValues, the comparison cannot exactly match.
// So we use error rate.
// Even given two same ImageFeatureValues, the comparison cannot exactly match.
// So we use error rate.
UINT errors = 0;
for (uint32_t i = 0; i < size; i++, pActualByte++, pExpectedByte++) {
// Only the check the first three channels, which are (B, G, R)
// Only the check the first three channels, which are (B, G, R)
if ((i + 1) % 4 == 0)
continue;
auto diff = (*pActualByte - *pExpectedByte);
@ -271,4 +271,4 @@ bool VerifyHelper(VideoFrame actual, VideoFrame expected) {
std::cerr << "total errors is " << errors << "/" << size << ", errors rate is " << (float)errors / size << std::endl;
return (float)errors / size < cMaxErrorRate;
}
}// namespace ImageTestHelper
} // namespace ImageTestHelper

Просмотреть файл

@ -43,4 +43,4 @@ winml::TensorFloat LoadInputImageFromGPU(wgi::SoftwareBitmap softwareBitmap, con
bool VerifyHelper(wm::VideoFrame actual, wm::VideoFrame expected);
}// namespace ImageTestHelper
} // namespace ImageTestHelper

Просмотреть файл

@ -77,7 +77,7 @@ class ImageTests : public ::testing::Test {
BitmapDecoder bitmap_decoder = BitmapDecoder::CreateAsync(stream).get();
SoftwareBitmap software_bitmap = bitmap_decoder.GetSoftwareBitmapAsync().get();
// Convert the input image to PixelFormat specified
// Convert the input image to PixelFormat specified
software_bitmap = SoftwareBitmap::Convert(software_bitmap, ImageTestHelper::GetPixelFormat(input_pixel_format));
auto input_feature = m_model.InputFeatures().First();
@ -139,7 +139,7 @@ class ImageTests : public ::testing::Test {
WINML_EXPECT_NO_THROW(m_model_binding.Bind(output_data_binding_name, output_tensor));
}
// Else for Unbound
// Else for Unbound
return frame;
}
@ -186,7 +186,7 @@ class ImageTests : public ::testing::Test {
WINML_EXPECT_NO_THROW(m_model_binding.Bind(output_data_binding_name, output_video_frames));
}
// Else for Unbound
// Else for Unbound
return output_video_frames;
}
@ -211,13 +211,13 @@ class ImageTests : public ::testing::Test {
bool ShouldSkip(
const std::wstring& model_file_name, const std::wstring& image_file_name, const InputImageSource input_image_source
) {
// Case that the tensor's shape doesn't match model's shape should be skiped
// Case that the tensor's shape doesn't match model's shape should be skiped
if ((L"1080.jpg" == image_file_name || L"kitten_224.png" == image_file_name) && (InputImageSource::FromGPUResource == input_image_source || InputImageSource::FromCPUResource == input_image_source)) {
return true;
}
// Case that the images's shape doesn't match model's shape which expects free dimension should be skiped.
// Because the fns-candy is not real model that can handle free dimensional input
// Case that the images's shape doesn't match model's shape which expects free dimension should be skiped.
// Because the fns-candy is not real model that can handle free dimensional input
if ((L"1080.jpg" == image_file_name || L"kitten_224.png" == image_file_name) && L"fns-candy_Bgr8_freeDimInput.onnx" == model_file_name) {
return true;
}
@ -229,7 +229,7 @@ class ImageTests : public ::testing::Test {
const std::wstring& path, BitmapAlphaMode expected_mode, BitmapPixelFormat expected_format, bool supported
) {
WINML_EXPECT_NO_THROW(LoadModel(path));
//input does not have image metadata and output does
//input does not have image metadata and output does
WINML_EXPECT_TRUE(m_model.OutputFeatures().First().HasCurrent());
@ -255,7 +255,7 @@ class ImageTests : public ::testing::Test {
WINML_EXPECT_EQUAL(image_descriptor.BitmapAlphaMode(), expected_mode);
WINML_EXPECT_EQUAL(image_descriptor.BitmapPixelFormat(), expected_format);
} else {
//not an image descriptor. a regular tensor
//not an image descriptor. a regular tensor
WINML_EXPECT_THROW_SPECIFIC(
m_model.OutputFeatures().First().Current().as(image_descriptor),
winrt::hresult_no_interface,
@ -264,7 +264,7 @@ class ImageTests : public ::testing::Test {
TensorFeatureDescriptor tensor_descriptor = nullptr;
WINML_EXPECT_NO_THROW(m_model.OutputFeatures().First().Current().as(tensor_descriptor));
// Make sure we fail binding ImageFeatureValue
// Make sure we fail binding ImageFeatureValue
LearningModelSession session(m_model);
LearningModelBinding binding(session);
auto ifv = FileHelpers::LoadImageFeatureValue(L"1080.jpg");
@ -280,7 +280,7 @@ class ImageTests : public ::testing::Test {
static const wchar_t* model_file_name = L"Add_ImageNet1920.onnx";
std::wstring module_path = FileHelpers::GetModulePath();
// WinML model creation
// WinML model creation
LearningModel model(nullptr);
std::wstring full_model_path = module_path + model_file_name;
WINML_EXPECT_NO_THROW(model = LearningModel::LoadFromFilePath(full_model_path));
@ -288,7 +288,7 @@ class ImageTests : public ::testing::Test {
LearningModelSession model_session(model, LearningModelDevice(device_kind));
LearningModelBinding model_binding(model_session);
//Input Binding
//Input Binding
auto feature = model.InputFeatures().First();
WINML_EXPECT_NO_THROW(model_binding.Bind(feature.Current().Name(), image1));
feature.MoveNext();
@ -408,11 +408,11 @@ TEST_P(ImageTest, ImageTest) {
EvaluateTest(param.evaluation_strategy);
// benchmark used to compare with the output from model
// benchmark used to compare with the output from model
std::wstring benchmark_file_name =
std::wstring(param.model_pixel_format + L'_' + param.input_pixel_format + L'_' + param.image_file_name);
// Verify the output by comparing with the benchmark image
// Verify the output by comparing with the benchmark image
std::wstring bm_image_path = FileHelpers::GetModulePath() + L"groundTruth\\" + benchmark_file_name;
if (OutputBindingStrategy::Unbound == param.output_binding_strategy) {
std::wstring output_data_binding_name = std::wstring(m_model.OutputFeatures().First().Current().Name());
@ -492,10 +492,10 @@ TEST_P(BatchTest, BatchSupport) {
GPUTEST;
}
// create model, device and session
// create model, device and session
PrepareModelSessionBinding(param.model_file_name, param.device_kind, optimized_batch_size);
// create the input video_frames
// create the input video_frames
std::vector<VideoFrame> input_frames = {};
if (param.input_images.empty()) {
for (int i = 0; i < param.batch_size; ++i) {
@ -544,7 +544,7 @@ TEST_P(BatchTest, BatchSupport) {
EvaluateTest(param.evaluation_strategy);
// benchmark used to compare with the output from model
// benchmark used to compare with the output from model
if (OutputBindingStrategy::Unbound == param.output_binding_strategy) {
std::wstring output_data_binding_name = std::wstring(m_model.OutputFeatures().First().Current().Name());
output_video_frames = m_result.Outputs().Lookup(output_data_binding_name).try_as<IVector<VideoFrame>>();

Просмотреть файл

@ -74,4 +74,4 @@ bool CompareSequenceOfMapsStringToFloat(
return true;
}
}// namespace CompareFeatureValuesHelper
} // namespace CompareFeatureValuesHelper

Просмотреть файл

@ -377,7 +377,7 @@ std::string GetFullNameOfTest(ITestCase* testCase, winml::LearningModelDeviceKin
// The model path is structured like this "<opset>/<model_name>/model.onnx
// The desired naming of the test is like this <model_name>_<opset>_<CPU/GPU>
name += tokenizedModelPath[tokenizedModelPath.size() - 2] += "_"; // model name
name += tokenizedModelPath[tokenizedModelPath.size() - 3]; // opset version
name += tokenizedModelPath[tokenizedModelPath.size() - 3]; // opset version
// To introduce models from model zoo, the model path is structured like this "<source>/<opset>/<model_name>/?.onnx"
std::string source = tokenizedModelPath[tokenizedModelPath.size() - 4];

Просмотреть файл

@ -44,7 +44,7 @@ winml::ITensor CreateStringTensor(Ort::Value& val) {
WINML_EXPECT_NO_THROW(val.GetStringTensorContent(buffer.get(), bufferLength, offsets.data(), offsets.size()));
// now go build all the strings
// now go build all the strings
for (size_t i = 0; i < length; ++i) {
size_t strLength = 0;
// are we on the last one?

Просмотреть файл

@ -5,7 +5,7 @@ namespace OrtValueHelpers {
winml::ITensor LoadTensorFromOrtValue(Ort::Value& val);
Ort::Value CreateOrtValueFromITensor(winml::ITensor winmlTensor);
}// namespace OrtValueHelpers
} // namespace OrtValueHelpers
template <ONNXTensorElementDataType T>
struct ONNXTensorElementDataTypeToWinMLTensorKind {

Просмотреть файл

@ -41,10 +41,10 @@ struct CustomOperatorProvider
void RegisterSchemas() { NoisyReluOperatorFactory::RegisterNoisyReluSchema(m_registry); }
void RegisterKernels() {
// Replace the Relu operator kernel
// Replace the Relu operator kernel
ReluOperatorFactory::RegisterReluKernel(m_registry);
// Add a new operator kernel for Relu
// Add a new operator kernel for Relu
NoisyReluOperatorFactory::RegisterNoisyReluKernel(m_registry);
}

Просмотреть файл

@ -29,49 +29,49 @@ struct NoisyReluOperator : winrt::implements<NoisyReluOperator, IMLOperatorKerne
NoisyReluOperator(float mean, float variance) : m_mean(mean), m_variance(variance) {}
// Computes the outputs of the kernel. This may be called multiple times
// simultaneously within the same instance of the class. Implementations
// of this method must be thread-safe.
// Computes the outputs of the kernel. This may be called multiple times
// simultaneously within the same instance of the class. Implementations
// of this method must be thread-safe.
STDMETHOD(Compute)(IMLOperatorKernelContext* context) {
try {
// Get the input tensor
// Get the input tensor
winrt::com_ptr<IMLOperatorTensor> inputTensor;
context->GetInputTensor(0, inputTensor.put());
// Get the output tensor
// Get the output tensor
winrt::com_ptr<IMLOperatorTensor> outputTensor;
context->GetOutputTensor(0, outputTensor.put());
// Get the input and output shape sizes
// Get the input and output shape sizes
uint32_t inputDimsSize = inputTensor->GetDimensionCount();
uint32_t outputDimsSize = outputTensor->GetDimensionCount();
if (inputDimsSize != outputDimsSize) {
return E_UNEXPECTED;
}
// Get the input shape
// Get the input shape
std::vector<uint32_t> inputDims(inputDimsSize);
outputTensor->GetShape(inputDimsSize, inputDims.data());
// Get the output shape
// Get the output shape
std::vector<uint32_t> outputDims(outputDimsSize);
outputTensor->GetShape(outputDimsSize, outputDims.data());
// For the number of total elements in the input and output shapes
// For the number of total elements in the input and output shapes
auto outputDataSize = std::accumulate(outputDims.begin(), outputDims.end(), 1, std::multiplies<uint32_t>());
auto inputDataSize = std::accumulate(inputDims.begin(), inputDims.end(), 1, std::multiplies<uint32_t>());
if (outputDataSize != inputDataSize) {
return E_UNEXPECTED;
}
// If the tensor types are both float type
// If the tensor types are both float type
if (outputTensor->GetTensorDataType() == MLOperatorTensorDataType::Float && inputTensor->GetTensorDataType() == MLOperatorTensorDataType::Float) {
// For cpu data
// For cpu data
if (outputTensor->IsCpuData() && inputTensor->IsCpuData()) {
ComputeInternal<float>(inputTensor.get(), outputTensor.get(), inputDataSize);
}
} else if (outputTensor->GetTensorDataType() == MLOperatorTensorDataType::Double && inputTensor->GetTensorDataType() == MLOperatorTensorDataType::Double) {
// For cpu data
// For cpu data
if (outputTensor->IsCpuData() && inputTensor->IsCpuData()) {
ComputeInternal<double>(inputTensor.get(), outputTensor.get(), inputDataSize);
}
@ -85,7 +85,7 @@ struct NoisyReluOperator : winrt::implements<NoisyReluOperator, IMLOperatorKerne
template <typename T, typename U = T>
void ComputeInternal(IMLOperatorTensor* pInputTensor, IMLOperatorTensor* pOutputTensor, uint32_t size) {
// Create a normal distribution
// Create a normal distribution
std::normal_distribution<> dist{m_mean, m_variance};
std::random_device rd{};
std::mt19937 gen{rd()};

Просмотреть файл

@ -22,51 +22,51 @@ struct ReluShapeInferrer : winrt::implements<ReluShapeInferrer, IMLOperatorShape
struct ReluOperator : winrt::implements<ReluOperator, IMLOperatorKernel> {
ReluOperator() {}
// Computes the outputs of the kernel. In this case, the output will represent
// the Rectified Linear Unit (Relu) output.
//
// Based on the operators location in the model graph this operator may be called multiple times
// or simultaneously within the same instance of the class during evaluation. Implementations
// of this method must be thread-safe.
// Computes the outputs of the kernel. In this case, the output will represent
// the Rectified Linear Unit (Relu) output.
//
// Based on the operators location in the model graph this operator may be called multiple times
// or simultaneously within the same instance of the class during evaluation. Implementations
// of this method must be thread-safe.
STDMETHOD(Compute)(IMLOperatorKernelContext* context) {
// Get the input tensor
// Get the input tensor
winrt::com_ptr<IMLOperatorTensor> inputTensor;
context->GetInputTensor(0, inputTensor.put());
// Get the output tensor
// Get the output tensor
winrt::com_ptr<IMLOperatorTensor> outputTensor;
context->GetOutputTensor(0, outputTensor.put());
// Get the input and output shape sizes
// Get the input and output shape sizes
uint32_t inputDimsSize = inputTensor->GetDimensionCount();
uint32_t outputDimsSize = outputTensor->GetDimensionCount();
if (inputDimsSize != outputDimsSize) {
return E_UNEXPECTED;
}
// Get the input shape
// Get the input shape
std::vector<uint32_t> inputDims(inputDimsSize);
outputTensor->GetShape(inputDimsSize, inputDims.data());
// Get the output shape
// Get the output shape
std::vector<uint32_t> outputDims(outputDimsSize);
outputTensor->GetShape(outputDimsSize, outputDims.data());
// For the number of total elements in the input and output shapes
// For the number of total elements in the input and output shapes
auto outputDataSize = std::accumulate(outputDims.begin(), outputDims.end(), 1, std::multiplies<uint32_t>());
auto inputDataSize = std::accumulate(inputDims.begin(), inputDims.end(), 1, std::multiplies<uint32_t>());
if (outputDataSize != inputDataSize) {
return E_UNEXPECTED;
}
// If the tensor types are both float type
// If the tensor types are both float type
if (outputTensor->GetTensorDataType() == MLOperatorTensorDataType::Float && inputTensor->GetTensorDataType() == MLOperatorTensorDataType::Float) {
// For cpu data
// For cpu data
if (outputTensor->IsCpuData() && inputTensor->IsCpuData()) {
ComputeInternal<float>(inputTensor.get(), outputTensor.get(), inputDataSize);
}
} else if (outputTensor->GetTensorDataType() == MLOperatorTensorDataType::Double && inputTensor->GetTensorDataType() == MLOperatorTensorDataType::Double) {
// For cpu data
// For cpu data
if (outputTensor->IsCpuData() && inputTensor->IsCpuData()) {
ComputeInternal<double>(inputTensor.get(), outputTensor.get(), inputDataSize);
}