Support Models with Image Denotation Input (#209)

* Added support for models with image denotation

* Added bind image support for models with image denotation

* Added mnist and mnist input files

* Model tensor inputs should have channel 3 or 1 if binding image

* Add mnist tests

* Removed modelbinding.h

* PRcomments

* Fixed is image, is csv input check
This commit is contained in:
Ryan Lai 2019-04-09 17:33:57 -07:00 коммит произвёл GitHub
Родитель 4554ed6859
Коммит 5cf6f977ef
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
10 изменённых файлов: 355 добавлений и 382 удалений

Просмотреть файл

@ -212,9 +212,10 @@ namespace WinMLRunnerTest
return true;
}
TEST_CLASS(GarbageInputTest){ public : TEST_CLASS_INITIALIZE(SetupClass){
// Make test_folder_input folder before starting the tests
std::string mkFolderCommand = "mkdir " + std::string(INPUT_FOLDER_PATH.begin(), INPUT_FOLDER_PATH.end());
TEST_CLASS(GarbageInputTest) {
public: TEST_CLASS_INITIALIZE(SetupClass) {
// Make test_folder_input folder before starting the tests
std::string mkFolderCommand = "mkdir " + std::string(INPUT_FOLDER_PATH.begin(), INPUT_FOLDER_PATH.end());
system(mkFolderCommand.c_str());
std::vector<std::string> models = { "SqueezeNet.onnx", "keras_Add_ImageNet_small.onnx" };
@ -226,7 +227,7 @@ namespace WinMLRunnerTest
copyCommand += model;
copyCommand += ' ' + std::string(INPUT_FOLDER_PATH.begin(), INPUT_FOLDER_PATH.end());
system(copyCommand.c_str());
}
}
} // namespace WinMLRunnerTest
TEST_CLASS_CLEANUP(CleanupClass)
@ -284,18 +285,6 @@ namespace WinMLRunnerTest
Assert::AreEqual(static_cast<size_t>(2), GetOutputCSVLineCount());
}
TEST_METHOD(GarbageInputCpuClientDeviceCpuBoundRGBImage)
{
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
const std::wstring command =
BuildCommand({ EXE_PATH, L"-model", modelPath, L"-PerfOutput", OUTPUT_PATH, L"-perf", L"-CPU",
L"-CPUBoundInput", L"-RGB", L"-CreateDeviceOnClient" });
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
// We need to expect one more line because of the header
Assert::AreEqual(static_cast<size_t>(2), GetOutputCSVLineCount());
}
TEST_METHOD(GarbageInputCpuWinMLDeviceCpuBoundRGBImage)
{
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
@ -308,18 +297,6 @@ namespace WinMLRunnerTest
Assert::AreEqual(static_cast<size_t>(2), GetOutputCSVLineCount());
}
TEST_METHOD(GarbageInputCpuClientDeviceCpuBoundBGRImage)
{
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
const std::wstring command =
BuildCommand({ EXE_PATH, L"-model", modelPath, L"-PerfOutput", OUTPUT_PATH, L"-perf", L"-CPU",
L"-CPUBoundInput", L"-BGR", L"-CreateDeviceOnClient" });
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
// We need to expect one more line because of the header
Assert::AreEqual(static_cast<size_t>(2), GetOutputCSVLineCount());
}
TEST_METHOD(GarbageInputCpuWinMLDeviceCpuBoundBGRImage)
{
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
@ -332,18 +309,6 @@ namespace WinMLRunnerTest
Assert::AreEqual(static_cast<size_t>(2), GetOutputCSVLineCount());
}
TEST_METHOD(GarbageInputCpuClientDeviceCpuBoundTensor)
{
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
const std::wstring command =
BuildCommand({ EXE_PATH, L"-model", modelPath, L"-PerfOutput", OUTPUT_PATH, L"-perf", L"-CPU",
L"-CPUBoundInput", L"-tensor", L"-CreateDeviceOnClient" });
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
// We need to expect one more line because of the header
Assert::AreEqual(static_cast<size_t>(2), GetOutputCSVLineCount());
}
TEST_METHOD(GarbageInputCpuWinMLDeviceCpuBoundTensor)
{
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
@ -356,18 +321,6 @@ namespace WinMLRunnerTest
Assert::AreEqual(static_cast<size_t>(2), GetOutputCSVLineCount());
}
TEST_METHOD(GarbageInputCpuClientDeviceGpuBoundRGBImage)
{
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
const std::wstring command =
BuildCommand({ EXE_PATH, L"-model", modelPath, L"-PerfOutput", OUTPUT_PATH, L"-perf", L"-CPU",
L"-GPUBoundInput", L"-RGB", L"-CreateDeviceOnClient" });
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
// We need to expect one more line because of the header
Assert::AreEqual(static_cast<size_t>(2), GetOutputCSVLineCount());
}
TEST_METHOD(GarbageInputCpuWinMLDeviceGpuBoundRGBImage)
{
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
@ -380,18 +333,6 @@ namespace WinMLRunnerTest
Assert::AreEqual(static_cast<size_t>(2), GetOutputCSVLineCount());
}
TEST_METHOD(GarbageInputCpuClientDeviceGpuBoundBGRImage)
{
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
const std::wstring command =
BuildCommand({ EXE_PATH, L"-model", modelPath, L"-PerfOutput", OUTPUT_PATH, L"-perf", L"-CPU",
L"-GPUBoundInput", L"-BGR", L"-CreateDeviceOnClient" });
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
// We need to expect one more line because of the header
Assert::AreEqual(static_cast<size_t>(2), GetOutputCSVLineCount());
}
TEST_METHOD(GarbageInputCpuWinMLDeviceGpuBoundBGRImage)
{
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
@ -404,18 +345,6 @@ namespace WinMLRunnerTest
Assert::AreEqual(static_cast<size_t>(2), GetOutputCSVLineCount());
}
TEST_METHOD(GarbageInputCpuClientDeviceGpuBoundTensor)
{
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
const std::wstring command =
BuildCommand({ EXE_PATH, L"-model", modelPath, L"-PerfOutput", OUTPUT_PATH, L"-perf", L"-CPU",
L"-GPUBoundInput", L"-tensor", L"-CreateDeviceOnClient" });
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
// We need to expect one more line because of the header
Assert::AreEqual(static_cast<size_t>(2), GetOutputCSVLineCount());
}
TEST_METHOD(GarbageInputCpuWinMLDeviceGpuBoundTensor)
{
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
@ -572,32 +501,6 @@ namespace WinMLRunnerTest
Assert::AreEqual(static_cast<size_t>(2), GetOutputCSVLineCount());
}
TEST_METHOD(GarbageInputAllPermutations)
{
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
const std::wstring command = BuildCommand({
EXE_PATH,
L"-model",
modelPath,
L"-PerfOutput",
OUTPUT_PATH,
L"-perf",
L"-CPU",
L"-GPU",
L"-CreateDeviceOnClient",
L"-CreateDeviceInWinML",
L"-CPUBoundInput",
L"-GPUBoundInput",
L"-RGB",
L"-BGR",
L"-tensor"
});
Assert::AreEqual(S_OK, RunProc((wchar_t *)command.c_str()));
// We need to expect one more line because of the header
Assert::AreEqual(static_cast<size_t>(25), GetOutputCSVLineCount());
}
TEST_METHOD(RunAllModelsInFolderGarbageInput)
{
const std::wstring command = BuildCommand({ EXE_PATH, L"-folder", INPUT_FOLDER_PATH, L"-PerfOutput", OUTPUT_PATH, L"-perf" });
@ -606,31 +509,6 @@ namespace WinMLRunnerTest
// We need to expect one more line because of the header
Assert::AreEqual(static_cast<size_t>(5), GetOutputCSVLineCount());
}
TEST_METHOD(RunAllModelsInFolderGarbageInputWithAllPermutations)
{
const std::wstring command = BuildCommand({
EXE_PATH,
L"-folder",
INPUT_FOLDER_PATH,
L"-PerfOutput",
OUTPUT_PATH,
L"-perf",
L"-CPU",
L"-GPU",
L"-CreateDeviceOnClient",
L"-CreateDeviceInWinML",
L"-CPUBoundInput",
L"-GPUBoundInput",
L"-RGB",
L"-BGR",
L"-tensor"
});
Assert::AreEqual(S_OK, RunProc((wchar_t *)command.c_str()));
// We need to expect one more line because of the header
Assert::AreEqual(static_cast<size_t>(49), GetOutputCSVLineCount());
}
};
TEST_CLASS(ImageInputTest)
@ -697,6 +575,27 @@ namespace WinMLRunnerTest
Assert::AreEqual(true, CompareTensors(L"OutputTensorData\\Squeezenet_fish_input_CPU.csv",
TENSOR_DATA_PATH + L"\\softmaxout_1CpuIteration1.csv"));
}
TEST_METHOD(ProvidedImageInputOnlyGpuSaveTensorImageDenotation)
{
const std::wstring modelPath = CURRENT_PATH + L"mnist.onnx";
const std::wstring inputPath = CURRENT_PATH + L"mnist_28.png";
const std::wstring command = BuildCommand({ EXE_PATH, L"-model ", modelPath, L"-input", inputPath,
L"-SaveTensorData", L"First", TENSOR_DATA_PATH, L"-GPU" });
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
Assert::AreEqual(true, CompareTensors(L"OutputTensorData\\Mnist_8_input_GPU.csv",
TENSOR_DATA_PATH + L"\\Plus214_Output_0GpuIteration1.csv"));
}
TEST_METHOD(ProvidedImageInputOnlyCpuSaveTensorImageDenotation)
{
const std::wstring modelPath = CURRENT_PATH + L"mnist.onnx";
const std::wstring inputPath = CURRENT_PATH + L"mnist_28.png";
const std::wstring command = BuildCommand({ EXE_PATH, L"-model ", modelPath, L"-input", inputPath,
L"-SaveTensorData", L"First", TENSOR_DATA_PATH, L"-CPU" });
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
Assert::AreEqual(true, CompareTensors(L"OutputTensorData\\Mnist_8_input_CPU.csv",
TENSOR_DATA_PATH + L"\\Plus214_Output_0CpuIteration1.csv"));
}
TEST_METHOD(ProvidedImageInputOnlyGpuSaveTensorFp16)
{
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet_fp16.onnx";
@ -787,6 +686,27 @@ namespace WinMLRunnerTest
Assert::AreEqual(true, CompareTensorsFP16(L"OutputTensorData\\Squeezenet_fp16_fish_input_CPU.csv",
TENSOR_DATA_PATH + L"\\softmaxout_1CpuIteration1.csv"));
}
TEST_METHOD(ProvidedCSVInputOnlyGpuSaveTensorImageDenotation)
{
const std::wstring modelPath = CURRENT_PATH + L"mnist.onnx";
const std::wstring inputPath = CURRENT_PATH + L"mnist_28.csv";
const std::wstring command = BuildCommand({ EXE_PATH, L"-model ", modelPath, L"-input", inputPath,
L"-SaveTensorData", L"First", TENSOR_DATA_PATH, L"-GPU" });
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
Assert::AreEqual(true, CompareTensors(L"OutputTensorData\\Mnist_8_input_GPU.csv",
TENSOR_DATA_PATH + L"\\Plus214_Output_0GpuIteration1.csv"));
}
TEST_METHOD(ProvidedCSVInputOnlyCpuSaveTensorImageDenotation)
{
const std::wstring modelPath = CURRENT_PATH + L"mnist.onnx";
const std::wstring inputPath = CURRENT_PATH + L"mnist_28.csv";
const std::wstring command = BuildCommand({ EXE_PATH, L"-model ", modelPath, L"-input", inputPath,
L"-SaveTensorData", L"First", TENSOR_DATA_PATH, L"-CPU" });
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
Assert::AreEqual(true, CompareTensors(L"OutputTensorData\\Mnist_8_input_CPU.csv",
TENSOR_DATA_PATH + L"\\Plus214_Output_0CpuIteration1.csv"));
}
};
TEST_CLASS(ConcurrencyTest)

Просмотреть файл

@ -368,6 +368,39 @@
<DeploymentContent Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</DeploymentContent>
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</Content>
<Content Include="OutputTensorData\Mnist_8_input_CPU.csv">
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">false</ExcludedFromBuild>
<DeploymentContent Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</DeploymentContent>
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">false</ExcludedFromBuild>
<DeploymentContent Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</DeploymentContent>
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</ExcludedFromBuild>
<DeploymentContent Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</DeploymentContent>
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</ExcludedFromBuild>
<DeploymentContent Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</DeploymentContent>
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</Content>
<Content Include="OutputTensorData\Mnist_8_input_GPU.csv">
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">false</ExcludedFromBuild>
<DeploymentContent Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</DeploymentContent>
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">false</ExcludedFromBuild>
<DeploymentContent Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</DeploymentContent>
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</ExcludedFromBuild>
<DeploymentContent Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</DeploymentContent>
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</ExcludedFromBuild>
<DeploymentContent Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</DeploymentContent>
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</Content>
</ItemGroup>
<ItemGroup>
<CopyFileToFolders Include="..\..\SharedContent\media\mnist_28.csv">
<FileType>Document</FileType>
</CopyFileToFolders>
<CopyFileToFolders Include="..\..\SharedContent\models\mnist.onnx">
<FileType>Document</FileType>
</CopyFileToFolders>
</ItemGroup>
<ItemGroup>
<CopyFileToFolders Include="..\..\SharedContent\media\mnist_28.png" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">

Просмотреть файл

@ -33,4 +33,15 @@
<Filter>SharedContent</Filter>
</Content>
</ItemGroup>
<ItemGroup>
<CopyFileToFolders Include="..\..\SharedContent\media\mnist_28.png">
<Filter>SharedContent</Filter>
</CopyFileToFolders>
<CopyFileToFolders Include="..\..\SharedContent\media\mnist_28.csv">
<Filter>SharedContent</Filter>
</CopyFileToFolders>
<CopyFileToFolders Include="..\..\SharedContent\models\mnist.onnx">
<Filter>SharedContent</Filter>
</CopyFileToFolders>
</ItemGroup>
</Project>

Просмотреть файл

@ -35,7 +35,7 @@ Required command-Line arguments:
-GPU : run model on default GPU
-GPUHighPerformance : run model on GPU with highest performance
-GPUMinPower : run model on GPU with the least power
-CreateDeviceOnClient : create the device on the client and pass it to WinML
-CreateDeviceOnClient : create the D3D device on the client and pass it to WinML to create session
-CreateDeviceInWinML : create the device inside WinML
-CPUBoundInput : bind the input to the CPU
-GPUBoundInput : bind the input to the GPU

Просмотреть файл

@ -2,7 +2,6 @@
#include <random>
#include <time.h>
#include "Common.h"
#include "ModelBinding.h"
#include "Windows.AI.Machinelearning.Native.h"
using namespace winrt::Windows::Media;
@ -63,7 +62,7 @@ template <> struct TensorKindToType<TensorKind::Float>
};
template <> struct TensorKindToType<TensorKind::Float16>
{
typedef float Type;
typedef HALF Type;
};
template <> struct TensorKindToType<TensorKind::String>
{
@ -127,26 +126,46 @@ template <> struct TensorKindToValue<TensorKind::String>
typedef TensorString Type;
};
void GetHeightAndWidthFromLearningModelFeatureDescriptor(const ILearningModelFeatureDescriptor& modelFeatureDescriptor,
uint64_t& width, uint64_t& height)
{
if (modelFeatureDescriptor.Kind() == LearningModelFeatureKind::Tensor)
{
// We assume NCHW
auto tensorDescriptor = modelFeatureDescriptor.try_as<TensorFeatureDescriptor>();
if (tensorDescriptor.Shape().Size() != 4)
{
throw hresult_invalid_argument(L"Cannot generate arbitrary image for tensor input of dimensions: " +
tensorDescriptor.Shape().Size());
}
height = tensorDescriptor.Shape().GetAt(2);
width = tensorDescriptor.Shape().GetAt(3);
}
else if (modelFeatureDescriptor.Kind() == LearningModelFeatureKind::Image)
{
auto imageDescriptor = modelFeatureDescriptor.try_as<IImageFeatureDescriptor>();
height = imageDescriptor.Height();
width = imageDescriptor.Width();
}
else
{
throw hresult_not_implemented(
L"Generating arbitrary image not supported for input types that aren't tensor or image.");
}
}
namespace BindingUtilities
{
static unsigned int seed = 0;
static std::independent_bits_engine<std::default_random_engine, CHAR_BIT, unsigned int> randomBitsEngine;
SoftwareBitmap GenerateGarbageImage(const TensorFeatureDescriptor& imageDescriptor, InputDataType inputDataType)
SoftwareBitmap GenerateGarbageImage(const ILearningModelFeatureDescriptor& modelFeatureDescriptor,
InputDataType inputDataType)
{
assert(inputDataType != InputDataType::Tensor);
// We assume NCHW and NCDHW
uint64_t width = imageDescriptor.Shape().GetAt(imageDescriptor.Shape().Size() - 1);
uint64_t height = imageDescriptor.Shape().GetAt(imageDescriptor.Shape().Size() - 2);
uint64_t channelCount = imageDescriptor.Shape().GetAt(1);
uint64_t batchCount = imageDescriptor.Shape().GetAt(0);
// If the batchCount is infinite, we can put as many images as we want
if (batchCount >= ULLONG_MAX)
{
batchCount = 3;
}
uint64_t width = 0;
uint64_t height = 0;
GetHeightAndWidthFromLearningModelFeatureDescriptor(modelFeatureDescriptor, width, height);
// We have to create RGBA8 or BGRA8 images, so we need 4 channels
uint32_t totalByteSize = static_cast<uint32_t>(width) * static_cast<uint32_t>(height) * 4;
@ -168,17 +187,16 @@ namespace BindingUtilities
static_cast<int32_t>(width), static_cast<int32_t>(height));
}
SoftwareBitmap LoadImageFile(const TensorFeatureDescriptor& imageDescriptor, InputDataType inputDataType,
const hstring& filePath, const CommandLineArgs& args, uint32_t iterationNum)
SoftwareBitmap LoadImageFile(const ILearningModelFeatureDescriptor& modelFeatureDescriptor,
const InputDataType inputDataType, const hstring& filePath,
const CommandLineArgs& args, uint32_t iterationNum)
{
assert(inputDataType != InputDataType::Tensor);
// We assume NCHW and NCDHW
uint64_t width = imageDescriptor.Shape().GetAt(imageDescriptor.Shape().Size() - 1);
uint64_t height = imageDescriptor.Shape().GetAt(imageDescriptor.Shape().Size() - 2);
uint64_t channelCount = imageDescriptor.Shape().GetAt(1);
uint64_t batchCount = imageDescriptor.Shape().GetAt(0);
uint64_t width = 0;
uint64_t height = 0;
GetHeightAndWidthFromLearningModelFeatureDescriptor(modelFeatureDescriptor, width, height);
try
{
// open the file
@ -269,18 +287,26 @@ namespace BindingUtilities
}
template <typename T>
void WriteDataToBinding(const std::vector<std::string>& elementStrings, ModelBinding<T>& binding)
void WriteDataToBinding(const std::vector<std::string>& elementStrings, T* bindingMemory,
uint32_t bindingMemorySize)
{
if (binding.GetDataBufferSize() != elementStrings.size())
if (bindingMemorySize / sizeof(T) != elementStrings.size())
{
throw hresult_invalid_argument(L"CSV Input is size/shape is different from what model expects");
}
T* data = binding.GetData();
T* data = bindingMemory;
for (const auto& elementString : elementStrings)
{
T value;
float value;
std::stringstream(elementString) >> value;
*data = value;
if (!std::is_same<T, HALF>::value)
{
*data = static_cast<T>(value);
}
else
{
*reinterpret_cast<HALF*>(data) = XMConvertFloatToHalf(value);
}
data++;
}
}
@ -300,142 +326,157 @@ namespace BindingUtilities
}
template <TensorKind T>
static ITensor CreateTensor(const CommandLineArgs& args, std::vector<std::string>& tensorStringInput,
TensorFeatureDescriptor& tensorDescriptor)
static ITensor CreateTensor(const CommandLineArgs& args, const std::vector<std::string>& tensorStringInput,
const IVectorView<int64_t>& tensorShape)
{
using TensorValue = typename TensorKindToValue<T>::Type;
using DataType = typename TensorKindToType<T>::Type;
if (!args.CsvPath().empty())
std::vector<int64_t> vecShape = {};
for (UINT dim = 0; dim < tensorShape.Size(); dim++)
{
ModelBinding<DataType> binding(tensorDescriptor);
WriteDataToBinding<DataType>(tensorStringInput, binding);
return TensorValue::CreateFromArray(binding.GetShapeBuffer(), binding.GetDataBuffer());
}
else if (args.IsGarbageInput())
{
std::vector<int64_t> vecShape = {};
auto tensorDescriptorShape = tensorDescriptor.Shape();
for (UINT dim = 0; dim < tensorDescriptorShape.Size(); dim++)
INT64 dimSize = tensorShape.GetAt(dim);
if (dimSize > 0) // If the dimension is greater than 0, then it is known.
{
INT64 dimSize = tensorDescriptorShape.GetAt(dim);
if (dimSize > 0) // If the dimension is greater than 0, then it is known.
vecShape.push_back(dimSize);
}
else // otherwise, make sure that the dimension is -1, representing free dimension. If not, then it's an
// invalid model.
{
if (dimSize == -1)
{
vecShape.push_back(dimSize);
vecShape.push_back(1);
}
else // otherwise, make sure that the dimension is -1, representing free dimension. If not, then it's an
// invalid model.
else
{
if (dimSize == -1)
{
vecShape.push_back(1);
}
else
{
throw hresult_invalid_argument(L"Failed to create a tensor with an unknown dimension of: " +
dimSize);
}
throw hresult_invalid_argument(L"Failed to create a tensor with an unknown dimension of: " +
dimSize);
}
}
auto tensorValue = TensorValue::Create(vecShape);
com_ptr<ITensorNative> spTensorValueNative;
tensorValue.as(spTensorValueNative);
BYTE* actualData;
uint32_t actualSizeInBytes;
spTensorValueNative->GetBuffer(
&actualData, &actualSizeInBytes); // Need to GetBuffer to have CPU memory backing tensorValue
return tensorValue;
}
else
auto tensorValue = TensorValue::Create(vecShape);
com_ptr<ITensorNative> spTensorValueNative;
tensorValue.as(spTensorValueNative);
BYTE* actualData;
uint32_t actualSizeInBytes;
spTensorValueNative->GetBuffer(&actualData,
&actualSizeInBytes); // Need to GetBuffer to have CPU memory backing tensorValue
if (args.IsCSVInput())
{
WriteDataToBinding<DataType>(tensorStringInput, reinterpret_cast<DataType*>(actualData), actualSizeInBytes);
}
else if (args.IsImageInput())
{
// Creating Tensors for Input Images haven't been added yet.
throw hresult_not_implemented(L"Creating Tensors for Input Images haven't been implemented yet!");
}
return tensorValue;
}
// Binds tensor floats, ints, doubles from CSV data.
ITensor CreateBindableTensor(const ILearningModelFeatureDescriptor& description, const CommandLineArgs& args)
{
auto name = description.Name();
auto tensorDescriptor = description.try_as<TensorFeatureDescriptor>();
if (!tensorDescriptor)
{
std::cout << "BindingUtilities: Input Descriptor type isn't tensor." << std::endl;
throw;
}
std::vector<std::string> elementStrings;
if (!args.CsvPath().empty())
{
elementStrings = ParseCSVElementStrings(args.CsvPath());
}
switch (tensorDescriptor.TensorKind())
// Try Image Feature Descriptor
auto imageFeatureDescriptor = description.try_as<ImageFeatureDescriptor>();
if (imageFeatureDescriptor)
{
case TensorKind::Undefined:
int64_t channels;
if (imageFeatureDescriptor.BitmapPixelFormat() == BitmapPixelFormat::Gray16 ||
imageFeatureDescriptor.BitmapPixelFormat() == BitmapPixelFormat::Gray8)
{
std::cout << "BindingUtilities: TensorKind is undefined." << std::endl;
throw hresult_invalid_argument();
channels = 1;
}
case TensorKind::Float:
else if (imageFeatureDescriptor.BitmapPixelFormat() == BitmapPixelFormat::Bgra8 ||
imageFeatureDescriptor.BitmapPixelFormat() == BitmapPixelFormat::Rgba16 ||
imageFeatureDescriptor.BitmapPixelFormat() == BitmapPixelFormat::Rgba8)
{
return CreateTensor<TensorKind::Float>(args, elementStrings, tensorDescriptor);
channels = 3;
}
break;
case TensorKind::Float16:
else
{
return CreateTensor<TensorKind::Float16>(args, elementStrings, tensorDescriptor);
throw hresult_not_implemented(L"BitmapPixel format not yet handled by WinMLRunner.");
}
break;
case TensorKind::Double:
{
return CreateTensor<TensorKind::Double>(args, elementStrings, tensorDescriptor);
}
break;
case TensorKind::Int8:
{
return CreateTensor<TensorKind::Int8>(args, elementStrings, tensorDescriptor);
}
break;
case TensorKind::UInt8:
{
return CreateTensor<TensorKind::UInt8>(args, elementStrings, tensorDescriptor);
}
break;
case TensorKind::Int16:
{
return CreateTensor<TensorKind::Int16>(args, elementStrings, tensorDescriptor);
}
break;
case TensorKind::UInt16:
{
return CreateTensor<TensorKind::UInt16>(args, elementStrings, tensorDescriptor);
}
break;
case TensorKind::Int32:
{
return CreateTensor<TensorKind::Int32>(args, elementStrings, tensorDescriptor);
}
break;
case TensorKind::UInt32:
{
return CreateTensor<TensorKind::UInt32>(args, elementStrings, tensorDescriptor);
}
break;
case TensorKind::Int64:
{
return CreateTensor<TensorKind::Int64>(args, elementStrings, tensorDescriptor);
}
break;
case TensorKind::UInt64:
{
return CreateTensor<TensorKind::UInt64>(args, elementStrings, tensorDescriptor);
}
break;
std::vector<int64_t> shape = { 1, channels, imageFeatureDescriptor.Height(),
imageFeatureDescriptor.Width() };
IVectorView<int64_t> shapeVectorView = single_threaded_vector(std::move(shape)).GetView();
return CreateTensor<TensorKind::Float>(args, elementStrings, shapeVectorView);
}
auto tensorDescriptor = description.try_as<TensorFeatureDescriptor>();
if (tensorDescriptor)
{
switch (tensorDescriptor.TensorKind())
{
case TensorKind::Undefined:
{
std::cout << "BindingUtilities: TensorKind is undefined." << std::endl;
throw hresult_invalid_argument();
}
case TensorKind::Float:
{
return CreateTensor<TensorKind::Float>(args, elementStrings, tensorDescriptor.Shape());
}
break;
case TensorKind::Float16:
{
return CreateTensor<TensorKind::Float16>(args, elementStrings, tensorDescriptor.Shape());
}
break;
case TensorKind::Double:
{
return CreateTensor<TensorKind::Double>(args, elementStrings, tensorDescriptor.Shape());
}
break;
case TensorKind::Int8:
{
return CreateTensor<TensorKind::Int8>(args, elementStrings, tensorDescriptor.Shape());
}
break;
case TensorKind::UInt8:
{
return CreateTensor<TensorKind::UInt8>(args, elementStrings, tensorDescriptor.Shape());
}
break;
case TensorKind::Int16:
{
return CreateTensor<TensorKind::Int16>(args, elementStrings, tensorDescriptor.Shape());
}
break;
case TensorKind::UInt16:
{
return CreateTensor<TensorKind::UInt16>(args, elementStrings, tensorDescriptor.Shape());
}
break;
case TensorKind::Int32:
{
return CreateTensor<TensorKind::Int32>(args, elementStrings, tensorDescriptor.Shape());
}
break;
case TensorKind::UInt32:
{
return CreateTensor<TensorKind::UInt32>(args, elementStrings, tensorDescriptor.Shape());
}
break;
case TensorKind::Int64:
{
return CreateTensor<TensorKind::Int64>(args, elementStrings, tensorDescriptor.Shape());
}
break;
case TensorKind::UInt64:
{
return CreateTensor<TensorKind::UInt64>(args, elementStrings, tensorDescriptor.Shape());
}
break;
}
}
std::cout << "BindingUtilities: TensorKind has not been implemented." << std::endl;
throw hresult_not_implemented();
}
@ -445,20 +486,10 @@ namespace BindingUtilities
InputDataType inputDataType, const IDirect3DDevice winrtDevice,
const CommandLineArgs& args, uint32_t iterationNum)
{
auto imageDescriptor = featureDescriptor.try_as<TensorFeatureDescriptor>();
if (!imageDescriptor)
{
std::cout << "BindingUtilities: Input Descriptor type isn't tensor." << std::endl;
throw;
}
auto softwareBitmap =
imagePath.empty() ? GenerateGarbageImage(imageDescriptor, inputDataType)
: LoadImageFile(imageDescriptor, inputDataType, imagePath.c_str(), args, iterationNum);
imagePath.empty() ? GenerateGarbageImage(featureDescriptor, inputDataType)
: LoadImageFile(featureDescriptor, inputDataType, imagePath.c_str(), args, iterationNum);
auto videoFrame = CreateVideoFrame(softwareBitmap, inputBindingType, inputDataType, winrtDevice);
return ImageFeatureValue::CreateFromVideoFrame(videoFrame);
}
@ -577,8 +608,7 @@ namespace BindingUtilities
{
auto maxValue = pair.first;
auto maxIndex = pair.second;
std::wcout << " index: " << maxIndex << ", value: " << maxValue
<< std::endl;
std::wcout << " index: " << maxIndex << ", value: " << maxValue << std::endl;
}
}
}

Просмотреть файл

@ -19,7 +19,7 @@ void CommandLineArgs::PrintUsage()
std::cout << " -GPU : run model on default GPU" << std::endl;
std::cout << " -GPUHighPerformance : run model on GPU with highest performance" << std::endl;
std::cout << " -GPUMinPower : run model on GPU with the least power" << std::endl;
std::cout << " -CreateDeviceOnClient : create the device on the client and pass it to WinML" << std::endl;
std::cout << " -CreateDeviceOnClient : create the D3D device on the client and pass it to WinML to create session" << std::endl;
std::cout << " -CreateDeviceInWinML : create the device inside WinML" << std::endl;
std::cout << " -CPUBoundInput : bind the input to the CPU" << std::endl;
std::cout << " -GPUBoundInput : bind the input to the GPU" << std::endl;

Просмотреть файл

@ -68,6 +68,8 @@ public:
// When there is no image or csv input provided, then garbage input binding is used.
return m_imagePath.empty() && m_csvData.empty();
}
bool IsCSVInput() const { return m_imagePath.empty() && !m_csvData.empty(); }
bool IsImageInput() const { return !m_imagePath.empty() && m_csvData.empty(); }
uint32_t NumIterations() const { return m_numIterations; }
uint32_t NumThreads() const { return m_numThreads; }

Просмотреть файл

@ -1,76 +0,0 @@
#pragma once
#include "Common.h"
// Stores data and size of input and output bindings.
template <typename T> class ModelBinding
{
public:
ModelBinding(winrt::Windows::AI::MachineLearning::ILearningModelFeatureDescriptor variableDesc)
: m_bindingDesc(variableDesc)
{
UINT numElements = 0;
if (variableDesc.Kind() == LearningModelFeatureKind::Tensor)
{
InitTensorBinding(variableDesc, numElements);
}
else
{
ThrowFailure(L"ModelBinding: Binding feature type not implemented");
}
}
winrt::Windows::AI::MachineLearning::ILearningModelFeatureDescriptor GetDesc() { return m_bindingDesc; }
UINT GetNumElements() const { return m_numElements; }
UINT GetElementSize() const { return m_elementSize; }
std::vector<int64_t> GetShapeBuffer() { return m_shapeBuffer; }
T* GetData() { return m_dataBuffer.data(); }
std::vector<T> GetDataBuffer() { return m_dataBuffer; }
size_t GetDataBufferSize() { return m_dataBuffer.size(); }
private:
void InitNumElementsAndShape(winrt::Windows::Foundation::Collections::IVectorView<int64_t>* shape,
UINT numDimensions, UINT numElements)
{
int unknownDim = -1;
UINT numKnownElements = 1;
for (UINT dim = 0; dim < numDimensions; dim++)
{
INT64 dimSize = shape->GetAt(dim);
if (dimSize <= 0)
{
if (unknownDim == -1)
{
dimSize = 1;
}
}
else
{
numKnownElements *= static_cast<UINT>(dimSize);
}
m_shapeBuffer.push_back(dimSize);
}
m_numElements = numKnownElements;
}
void InitTensorBinding(winrt::Windows::AI::MachineLearning::ILearningModelFeatureDescriptor descriptor,
UINT numElements)
{
auto tensorDescriptor = descriptor.as<winrt::Windows::AI::MachineLearning::TensorFeatureDescriptor>();
InitNumElementsAndShape(&tensorDescriptor.Shape(), tensorDescriptor.Shape().Size(), 1);
m_dataBuffer.resize(m_numElements);
}
winrt::Windows::AI::MachineLearning::ILearningModelFeatureDescriptor m_bindingDesc;
std::vector<INT64> m_shapeBuffer;
UINT m_numElements = 0;
UINT m_elementSize = 0;
std::vector<T> m_dataBuffer;
};

Просмотреть файл

@ -1,6 +1,5 @@
#include "Common.h"
#include "OutputHelper.h"
#include "ModelBinding.h"
#include "BindingUtilities.h"
#include <filesystem>
#include <d3d11.h>
@ -79,7 +78,7 @@ std::vector<ILearningModelFeatureValue> GenerateInputFeatures(const LearningMode
if (inputDataType == InputDataType::Tensor || i > 0)
{
// For now, only the first input can be bound with real data
// If CSV data is provided, then every input will contain the same CSV data
auto tensorFeature = BindingUtilities::CreateBindableTensor(description, args);
inputFeatures.push_back(tensorFeature);
}
@ -202,7 +201,7 @@ HRESULT EvaluateModel(const LearningModel& model, const CommandLineArgs& args, O
#endif
try
{
if (deviceCreationLocation == DeviceCreationLocation::ClientCode && deviceType != DeviceType::CPU)
if (deviceCreationLocation == DeviceCreationLocation::UserD3DDevice && deviceType != DeviceType::CPU)
{
// Enumerate Adapters to pick the requested one.
com_ptr<IDXGIFactory6> factory;
@ -383,9 +382,9 @@ HRESULT EvaluateModelWithDeviceType(const LearningModel& model, const DeviceType
const std::vector<InputDataType>& inputDataTypes,
const std::vector<DeviceCreationLocation> deviceCreationLocations,
const CommandLineArgs& args, const std::wstring& modelPath, OutputHelper& output,
Profiler<WINML_MODEL_TEST_PERF>& profiler,
TensorFeatureDescriptor& tensorDescriptor)
Profiler<WINML_MODEL_TEST_PERF>& profiler)
{
HRESULT lastEvaluateModelResult = S_OK;
for (const auto& inputBindingType : inputBindingTypes)
{
for (const auto& inputDataType : inputDataTypes)
@ -398,15 +397,6 @@ HRESULT EvaluateModelWithDeviceType(const LearningModel& model, const DeviceType
profiler.Reset(WINML_MODEL_TEST_PERF::BIND_VALUE, WINML_MODEL_TEST_PERF::COUNT);
}
if (inputDataType != InputDataType::Tensor)
{
// Currently GPU binding only work with 4D tensors and RGBA/BGRA images
if (tensorDescriptor.Shape().Size() != 4 || tensorDescriptor.Shape().GetAt(1) != 3)
{
continue;
}
}
HRESULT evalHResult = EvaluateModel(model, args, output, deviceType, inputBindingType, inputDataType,
deviceCreationLocation, profiler);
@ -436,6 +426,60 @@ HRESULT EvaluateModelWithDeviceType(const LearningModel& model, const DeviceType
return S_OK;
}
HRESULT CheckIfModelAndConfigurationsAreSupported(LearningModel& model, const std::wstring& modelPath,
const DeviceType deviceType,
const std::vector<InputDataType>& inputDataTypes,
const std::vector<DeviceCreationLocation>& deviceCreationLocations)
{
// Does user want image as input binding
bool hasInputBindingImage = std::any_of(inputDataTypes.begin(), inputDataTypes.end(), [](const InputDataType inputDataType){
return inputDataType == InputDataType::ImageBGR || inputDataType == InputDataType::ImageRGB;
});
for (auto inputFeature : model.InputFeatures())
{
if (inputFeature.Kind() != LearningModelFeatureKind::Tensor &&
inputFeature.Kind() != LearningModelFeatureKind::Image)
{
std::wcout << L"Model: " + modelPath + L" has an input type that isn't supported by WinMLRunner yet."
<< std::endl;
return E_NOTIMPL;
}
else if (inputFeature.Kind() == LearningModelFeatureKind::Tensor)
{
auto tensorFeatureDescriptor = inputFeature.try_as<TensorFeatureDescriptor>();
if (tensorFeatureDescriptor.Shape().Size() > 4 && deviceType != DeviceType::CPU)
{
std::cout << "Input feature " << to_string(inputFeature.Name())
<< " shape is too large. GPU path only accepts tensor dimensions <= 4 : "
<< tensorFeatureDescriptor.Shape().Size() << std::endl;
return E_INVALIDARG;
}
// If image as input binding, then the model's tensor inputs should have channel 3 or 1
if (hasInputBindingImage &&
(tensorFeatureDescriptor.Shape().Size() != 4 ||
(tensorFeatureDescriptor.Shape().GetAt(1) != 1 && tensorFeatureDescriptor.Shape().GetAt(1) != 3)))
{
std::cout << "Attempting to bind image but input feature " << to_string(inputFeature.Name())
<< " shape is invalid. Shape should be 4 dimensions (NCHW) with C = 3." << std::endl;
return E_INVALIDARG;
}
}
}
// Creating D3D12 device on client doesn't make sense for CPU deviceType
if (deviceType == DeviceType::CPU && std::any_of(deviceCreationLocations.begin(), deviceCreationLocations.end(),
[](const DeviceCreationLocation deviceCreationLocation) {
return deviceCreationLocation == DeviceCreationLocation::UserD3DDevice; }))
{
std::cout << "Cannot create D3D12 device on client if CPU device type is selected." << std::endl;
return E_INVALIDARG;
}
return S_OK;
}
HRESULT EvaluateModels(const std::vector<std::wstring>& modelPaths, const std::vector<DeviceType>& deviceTypes,
const std::vector<InputBindingType>& inputBindingTypes,
const std::vector<InputDataType>& inputDataTypes,
@ -457,24 +501,19 @@ HRESULT EvaluateModels(const std::vector<std::wstring>& modelPaths, const std::v
std::cout << hr.message().c_str() << std::endl;
return hr.code();
}
auto firstFeature = model.InputFeatures().First().Current();
auto tensorDescriptor = firstFeature.try_as<TensorFeatureDescriptor>();
// Map and Sequence bindings are not supported yet
if (!tensorDescriptor)
{
std::wcout << L"Model: " + path + L" has an input type that isn't supported by WinMLRunner yet."
<< std::endl;
continue;
}
for (const auto& deviceType : deviceTypes)
{
HRESULT evaluateModelWithDeviceTypeResult =
EvaluateModelWithDeviceType(model, deviceType, inputBindingTypes, inputDataTypes,
deviceCreationLocations, args, path, output, profiler, tensorDescriptor);
if (FAILED(evaluateModelWithDeviceTypeResult))
lastEvaluateModelResult = CheckIfModelAndConfigurationsAreSupported(model, path, deviceType, inputDataTypes,
deviceCreationLocations);
if (FAILED(lastEvaluateModelResult))
{
continue;
}
lastEvaluateModelResult =
EvaluateModelWithDeviceType(model, deviceType, inputBindingTypes, inputDataTypes,
deviceCreationLocations, args, path, output, profiler);
if (FAILED(lastEvaluateModelResult))
{
lastEvaluateModelResult = evaluateModelWithDeviceTypeResult;
std::cout << "Run failed for DeviceType: " << TypeHelper::Stringify(deviceType) << std::endl;
}
}
@ -560,13 +599,13 @@ std::vector<DeviceCreationLocation> FetchDeviceCreationLocations(const CommandLi
if (args.IsCreateDeviceOnClient())
{
deviceCreationLocations.push_back(DeviceCreationLocation::ClientCode);
deviceCreationLocations.push_back(DeviceCreationLocation::UserD3DDevice);
}
return deviceCreationLocations;
}
int run(CommandLineArgs& args, Profiler<WINML_MODEL_TEST_PERF>& profiler)
int run(CommandLineArgs& args, Profiler<WINML_MODEL_TEST_PERF>& profiler) try
{
// Initialize COM in a multi-threaded environment.
winrt::init_apartment();
@ -594,7 +633,6 @@ int run(CommandLineArgs& args, Profiler<WINML_MODEL_TEST_PERF>& profiler)
{
output.SetDefaultCSVFileName();
}
if (args.IsSaveTensor() || args.IsPerIterationCapture())
{
output.SetDefaultPerIterationFolder(args.TensorOutputPath());
@ -621,3 +659,18 @@ int run(CommandLineArgs& args, Profiler<WINML_MODEL_TEST_PERF>& profiler)
}
return 0;
}
catch (const hresult_error& error)
{
wprintf(error.message().c_str());
return error.code();
}
catch (const std::exception& error)
{
printf(error.what());
return EXIT_FAILURE;
}
catch (...)
{
printf("Unknown exception occurred.");
return EXIT_FAILURE;
}

Просмотреть файл

@ -32,7 +32,7 @@ enum class DeviceType
enum class DeviceCreationLocation
{
WinML,
ClientCode
UserD3DDevice
};
class TypeHelper
@ -102,7 +102,7 @@ public:
{
switch (deviceCreationLocation)
{
case DeviceCreationLocation::ClientCode:
case DeviceCreationLocation::UserD3DDevice:
return "Client";
case DeviceCreationLocation::WinML:
return "WinML";