Add per TEST_METHOD names to the output directory structure, so parallel unit test runs won't overwrite one another. (#219)
This commit is contained in:
Родитель
a8727a3e5d
Коммит
25e1ded8c2
|
@ -32,13 +32,16 @@ static HRESULT RunProc(LPWSTR commandLine)
|
||||||
return HRESULT_FROM_WIN32(exitCode);
|
return HRESULT_FROM_WIN32(exitCode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Use this test method definition, if the test needs access to the METHOD_NAME
|
||||||
|
#define TEST_METHOD_WITH_NAME(methodName) TEST_METHOD(methodName) { const std::wstring METHOD_NAME(L#methodName);
|
||||||
|
|
||||||
namespace WinMLRunnerTest
|
namespace WinMLRunnerTest
|
||||||
{
|
{
|
||||||
static const std::wstring CURRENT_PATH = FileHelper::GetModulePath();
|
static const std::wstring CURRENT_PATH = FileHelper::GetModulePath();
|
||||||
static const std::wstring EXE_PATH = CURRENT_PATH + L"WinMLRunner.exe";
|
static const std::wstring EXE_PATH = CURRENT_PATH + L"WinMLRunner.exe";
|
||||||
static const std::wstring INPUT_FOLDER_PATH = CURRENT_PATH + L"test_folder_input";
|
static const std::wstring INPUT_FOLDER_PATH = CURRENT_PATH + L"test_folder_input";
|
||||||
static const std::wstring OUTPUT_PATH = CURRENT_PATH + L"test_output.csv";
|
static const std::wstring OUTPUT_PATH = CURRENT_PATH + L"test_output.csv";
|
||||||
static const std::wstring TENSOR_DATA_PATH = CURRENT_PATH + L"TestResult";
|
static const std::wstring TENSOR_DATA_PATH = CURRENT_PATH + L"TestResults";
|
||||||
|
|
||||||
static std::wstring BuildCommand(std::initializer_list<std::wstring>&& arguments)
|
static std::wstring BuildCommand(std::initializer_list<std::wstring>&& arguments)
|
||||||
{
|
{
|
||||||
|
@ -217,8 +220,7 @@ namespace WinMLRunnerTest
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CLASS(GarbageInputTest) {
|
TEST_CLASS(GarbageInputTest) { public: TEST_CLASS_INITIALIZE(SetupClass) {
|
||||||
public: TEST_CLASS_INITIALIZE(SetupClass) {
|
|
||||||
// Make test_folder_input folder before starting the tests
|
// Make test_folder_input folder before starting the tests
|
||||||
std::string mkFolderCommand = "mkdir " + std::string(INPUT_FOLDER_PATH.begin(), INPUT_FOLDER_PATH.end());
|
std::string mkFolderCommand = "mkdir " + std::string(INPUT_FOLDER_PATH.begin(), INPUT_FOLDER_PATH.end());
|
||||||
system(mkFolderCommand.c_str());
|
system(mkFolderCommand.c_str());
|
||||||
|
@ -241,6 +243,13 @@ public: TEST_CLASS_INITIALIZE(SetupClass) {
|
||||||
std::string copyCommand = "rd /s /q ";
|
std::string copyCommand = "rd /s /q ";
|
||||||
copyCommand += std::string(INPUT_FOLDER_PATH.begin(), INPUT_FOLDER_PATH.end());
|
copyCommand += std::string(INPUT_FOLDER_PATH.begin(), INPUT_FOLDER_PATH.end());
|
||||||
system(copyCommand.c_str());
|
system(copyCommand.c_str());
|
||||||
|
try
|
||||||
|
{
|
||||||
|
std::filesystem::remove_all(std::string(TENSOR_DATA_PATH.begin(), TENSOR_DATA_PATH.end()).c_str());
|
||||||
|
}
|
||||||
|
catch (const std::filesystem::filesystem_error &)
|
||||||
|
{
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_METHOD_CLEANUP(CleanupMethod)
|
TEST_METHOD_CLEANUP(CleanupMethod)
|
||||||
|
@ -521,6 +530,7 @@ public: TEST_CLASS_INITIALIZE(SetupClass) {
|
||||||
}
|
}
|
||||||
catch (const std::filesystem::filesystem_error &) {}
|
catch (const std::filesystem::filesystem_error &) {}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_METHOD(ProvidedImageInputCpuAndGpu)
|
TEST_METHOD(ProvidedImageInputCpuAndGpu)
|
||||||
{
|
{
|
||||||
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
|
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
|
||||||
|
@ -552,78 +562,84 @@ public: TEST_CLASS_INITIALIZE(SetupClass) {
|
||||||
BuildCommand({ EXE_PATH, L"-model ", modelPath, L"-input", inputPath, L"-autoScale", L"Cubic" });
|
BuildCommand({ EXE_PATH, L"-model ", modelPath, L"-input", inputPath, L"-autoScale", L"Cubic" });
|
||||||
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
|
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
|
||||||
}
|
}
|
||||||
TEST_METHOD(ProvidedImageInputOnlyGpuSaveTensor)
|
|
||||||
{
|
TEST_METHOD_WITH_NAME(ProvidedImageInputOnlyGpuSaveTensor)
|
||||||
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
|
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
|
||||||
const std::wstring inputPath = CURRENT_PATH + L"fish.png";
|
const std::wstring inputPath = CURRENT_PATH + L"fish.png";
|
||||||
|
const std::wstring tensorDataPath = TENSOR_DATA_PATH + L"\\" + METHOD_NAME;
|
||||||
const std::wstring command = BuildCommand({ EXE_PATH, L"-model ", modelPath, L"-input", inputPath,
|
const std::wstring command = BuildCommand({ EXE_PATH, L"-model ", modelPath, L"-input", inputPath,
|
||||||
L"-SaveTensorData", L"First", L"-PerIterationPath", TENSOR_DATA_PATH, L"-GPU" });
|
L"-SaveTensorData", L"First", L"-PerIterationPath", tensorDataPath, L"-GPU" });
|
||||||
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
|
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
|
||||||
Assert::AreEqual(true, CompareTensors(L"OutputTensorData\\Squeezenet_fish_input_GPU.csv",
|
Assert::AreEqual(true, CompareTensors(L"OutputTensorData\\Squeezenet_fish_input_GPU.csv",
|
||||||
TENSOR_DATA_PATH + L"\\softmaxout_1GpuIteration1.csv"));
|
tensorDataPath + L"\\softmaxout_1GpuIteration1.csv"));
|
||||||
}
|
|
||||||
TEST_METHOD(ProvidedImageInputOnlyCpuSaveTensor)
|
|
||||||
{
|
|
||||||
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
|
|
||||||
const std::wstring inputPath = CURRENT_PATH + L"fish.png";
|
|
||||||
const std::wstring command = BuildCommand({ EXE_PATH, L"-model ", modelPath, L"-input", inputPath,
|
|
||||||
L"-SaveTensorData", L"First", L"-PerIterationPath", TENSOR_DATA_PATH, L"-CPU" });
|
|
||||||
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
|
|
||||||
Assert::AreEqual(true, CompareTensors(L"OutputTensorData\\Squeezenet_fish_input_CPU.csv",
|
|
||||||
TENSOR_DATA_PATH + L"\\softmaxout_1CpuIteration1.csv"));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_METHOD(ProvidedImageInputOnlyGpuSaveTensorImageDenotation)
|
TEST_METHOD_WITH_NAME(ProvidedImageInputOnlyCpuSaveTensor)
|
||||||
{
|
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
|
||||||
|
const std::wstring inputPath = CURRENT_PATH + L"fish.png";
|
||||||
|
const std::wstring tensorDataPath = TENSOR_DATA_PATH + L"\\" + METHOD_NAME;
|
||||||
|
const std::wstring command = BuildCommand({ EXE_PATH, L"-model ", modelPath, L"-input", inputPath,
|
||||||
|
L"-SaveTensorData", L"First", L"-PerIterationPath", tensorDataPath, L"-CPU" });
|
||||||
|
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
|
||||||
|
Assert::AreEqual(true, CompareTensors(L"OutputTensorData\\Squeezenet_fish_input_CPU.csv",
|
||||||
|
tensorDataPath + L"\\softmaxout_1CpuIteration1.csv"));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_METHOD_WITH_NAME(ProvidedImageInputOnlyGpuSaveTensorImageDenotation)
|
||||||
const std::wstring modelPath = CURRENT_PATH + L"mnist.onnx";
|
const std::wstring modelPath = CURRENT_PATH + L"mnist.onnx";
|
||||||
const std::wstring inputPath = CURRENT_PATH + L"mnist_28.png";
|
const std::wstring inputPath = CURRENT_PATH + L"mnist_28.png";
|
||||||
|
const std::wstring tensorDataPath = TENSOR_DATA_PATH + L"\\" + METHOD_NAME;
|
||||||
const std::wstring command = BuildCommand({ EXE_PATH, L"-model ", modelPath, L"-input", inputPath,
|
const std::wstring command = BuildCommand({ EXE_PATH, L"-model ", modelPath, L"-input", inputPath,
|
||||||
L"-SaveTensorData", L"First", L"-PerIterationPath", TENSOR_DATA_PATH, L"-GPU" });
|
L"-SaveTensorData", L"First", L"-PerIterationPath", tensorDataPath, L"-GPU" });
|
||||||
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
|
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
|
||||||
Assert::AreEqual(true, CompareTensors(L"OutputTensorData\\Mnist_8_input_GPU.csv",
|
Assert::AreEqual(true, CompareTensors(L"OutputTensorData\\Mnist_8_input_GPU.csv",
|
||||||
TENSOR_DATA_PATH + L"\\Plus214_Output_0GpuIteration1.csv"));
|
tensorDataPath + L"\\Plus214_Output_0GpuIteration1.csv"));
|
||||||
}
|
}
|
||||||
TEST_METHOD(ProvidedImageInputOnlyCpuSaveTensorImageDenotation)
|
|
||||||
{
|
TEST_METHOD_WITH_NAME(ProvidedImageInputOnlyCpuSaveTensorImageDenotation)
|
||||||
const std::wstring modelPath = CURRENT_PATH + L"mnist.onnx";
|
const std::wstring modelPath = CURRENT_PATH + L"mnist.onnx";
|
||||||
const std::wstring inputPath = CURRENT_PATH + L"mnist_28.png";
|
const std::wstring inputPath = CURRENT_PATH + L"mnist_28.png";
|
||||||
|
const std::wstring tensorDataPath = TENSOR_DATA_PATH + L"\\" + METHOD_NAME;
|
||||||
const std::wstring command = BuildCommand({ EXE_PATH, L"-model ", modelPath, L"-input", inputPath,
|
const std::wstring command = BuildCommand({ EXE_PATH, L"-model ", modelPath, L"-input", inputPath,
|
||||||
L"-SaveTensorData", L"First", L"-PerIterationPath", TENSOR_DATA_PATH, L"-CPU" });
|
L"-SaveTensorData", L"First", L"-PerIterationPath", tensorDataPath, L"-CPU" });
|
||||||
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
|
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
|
||||||
Assert::AreEqual(true, CompareTensors(L"OutputTensorData\\Mnist_8_input_CPU.csv",
|
Assert::AreEqual(true, CompareTensors(L"OutputTensorData\\Mnist_8_input_CPU.csv",
|
||||||
TENSOR_DATA_PATH + L"\\Plus214_Output_0CpuIteration1.csv"));
|
tensorDataPath + L"\\Plus214_Output_0CpuIteration1.csv"));
|
||||||
}
|
}
|
||||||
TEST_METHOD(ProvidedImageInputOnlyGpuSaveTensorFp16)
|
|
||||||
{
|
TEST_METHOD_WITH_NAME(ProvidedImageInputOnlyGpuSaveTensorFp16)
|
||||||
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet_fp16.onnx";
|
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet_fp16.onnx";
|
||||||
const std::wstring inputPath = CURRENT_PATH + L"fish.png";
|
const std::wstring inputPath = CURRENT_PATH + L"fish.png";
|
||||||
|
const std::wstring tensorDataPath = TENSOR_DATA_PATH + L"\\" + METHOD_NAME;
|
||||||
const std::wstring command = BuildCommand({ EXE_PATH, L"-model ", modelPath, L"-input", inputPath,
|
const std::wstring command = BuildCommand({ EXE_PATH, L"-model ", modelPath, L"-input", inputPath,
|
||||||
L"-SaveTensorData", L"First", L"-PerIterationPath", TENSOR_DATA_PATH, L"-GPU" });
|
L"-SaveTensorData", L"First", L"-PerIterationPath", tensorDataPath, L"-GPU" });
|
||||||
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
|
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
|
||||||
Assert::AreEqual(true, CompareTensorsFP16(L"OutputTensorData\\Squeezenet_fp16_fish_input_GPU.csv",
|
Assert::AreEqual(true, CompareTensorsFP16(L"OutputTensorData\\Squeezenet_fp16_fish_input_GPU.csv",
|
||||||
TENSOR_DATA_PATH + L"\\softmaxout_1GpuIteration1.csv"));
|
tensorDataPath + L"\\softmaxout_1GpuIteration1.csv"));
|
||||||
}
|
}
|
||||||
TEST_METHOD(ProvidedImageInputOnlyCpuSaveTensorFp16)
|
|
||||||
{
|
TEST_METHOD_WITH_NAME(ProvidedImageInputOnlyCpuSaveTensorFp16)
|
||||||
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet_fp16.onnx";
|
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet_fp16.onnx";
|
||||||
const std::wstring inputPath = CURRENT_PATH + L"fish.png";
|
const std::wstring inputPath = CURRENT_PATH + L"fish.png";
|
||||||
|
const std::wstring tensorDataPath = TENSOR_DATA_PATH + L"\\" + METHOD_NAME;
|
||||||
const std::wstring command = BuildCommand({ EXE_PATH, L"-model ", modelPath, L"-input", inputPath,
|
const std::wstring command = BuildCommand({ EXE_PATH, L"-model ", modelPath, L"-input", inputPath,
|
||||||
L"-SaveTensorData", L"First", L"-PerIterationPath", TENSOR_DATA_PATH, L"-CPU" });
|
L"-SaveTensorData", L"First", L"-PerIterationPath", tensorDataPath, L"-CPU" });
|
||||||
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
|
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
|
||||||
Assert::AreEqual(true, CompareTensorsFP16(L"OutputTensorData\\Squeezenet_fp16_fish_input_CPU.csv",
|
Assert::AreEqual(true, CompareTensorsFP16(L"OutputTensorData\\Squeezenet_fp16_fish_input_CPU.csv",
|
||||||
TENSOR_DATA_PATH + L"\\softmaxout_1CpuIteration1.csv"));
|
tensorDataPath + L"\\softmaxout_1CpuIteration1.csv"));
|
||||||
}
|
}
|
||||||
TEST_METHOD(ProvidedImageInputOnlyCpuPerIterationPerformance)
|
|
||||||
{
|
TEST_METHOD_WITH_NAME(ProvidedImageInputOnlyCpuPerIterationPerformance)
|
||||||
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
|
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
|
||||||
|
const std::wstring tensorDataPath = TENSOR_DATA_PATH + L"\\" + METHOD_NAME;
|
||||||
const std::wstring command =
|
const std::wstring command =
|
||||||
BuildCommand({ EXE_PATH, L"-model", modelPath, L"-PerfOutput", OUTPUT_PATH, L"-perf",
|
BuildCommand({ EXE_PATH, L"-model", modelPath, L"-PerfOutput", OUTPUT_PATH, L"-perf",
|
||||||
L"-SavePerIterationPerf", L"-BaseOutputPath", TENSOR_DATA_PATH,
|
L"-SavePerIterationPerf", L"-BaseOutputPath", tensorDataPath,
|
||||||
L"-PerIterationPath PerIterationData", L"-CPU" });
|
L"-PerIterationPath PerIterationData", L"-CPU" });
|
||||||
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
|
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
|
||||||
|
|
||||||
// We need to expect one more line because of the header
|
// We need to expect one more line because of the header
|
||||||
Assert::AreEqual(static_cast<size_t>(2), GetOutputCSVLineCount(TENSOR_DATA_PATH + L"\\PerIterationData\\Summary.csv"));
|
Assert::AreEqual(static_cast<size_t>(2), GetOutputCSVLineCount(tensorDataPath + L"\\PerIterationData\\Summary.csv"));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -641,6 +657,7 @@ public: TEST_CLASS_INITIALIZE(SetupClass) {
|
||||||
}
|
}
|
||||||
catch (const std::filesystem::filesystem_error &) {}
|
catch (const std::filesystem::filesystem_error &) {}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_METHOD(ProvidedCSVInput)
|
TEST_METHOD(ProvidedCSVInput)
|
||||||
{
|
{
|
||||||
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
|
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
|
||||||
|
@ -648,6 +665,7 @@ public: TEST_CLASS_INITIALIZE(SetupClass) {
|
||||||
const std::wstring command = BuildCommand({ EXE_PATH, L"-model", modelPath, L"-input", inputPath });
|
const std::wstring command = BuildCommand({ EXE_PATH, L"-model", modelPath, L"-input", inputPath });
|
||||||
Assert::AreEqual(S_OK, RunProc((wchar_t *)command.c_str()));
|
Assert::AreEqual(S_OK, RunProc((wchar_t *)command.c_str()));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_METHOD(ProvidedCSVBadBinding)
|
TEST_METHOD(ProvidedCSVBadBinding)
|
||||||
{
|
{
|
||||||
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
|
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
|
||||||
|
@ -655,76 +673,82 @@ public: TEST_CLASS_INITIALIZE(SetupClass) {
|
||||||
const std::wstring command = BuildCommand({ EXE_PATH, L"-model", modelPath, L"-input", inputPath });
|
const std::wstring command = BuildCommand({ EXE_PATH, L"-model", modelPath, L"-input", inputPath });
|
||||||
Assert::AreEqual(HRESULT_FROM_WIN32(ERROR_INVALID_PARAMETER), RunProc((wchar_t *)command.c_str()));
|
Assert::AreEqual(HRESULT_FROM_WIN32(ERROR_INVALID_PARAMETER), RunProc((wchar_t *)command.c_str()));
|
||||||
}
|
}
|
||||||
TEST_METHOD(ProvidedCSVInputGPUSaveCpuBoundTensor)
|
|
||||||
{
|
TEST_METHOD_WITH_NAME(ProvidedCSVInputGPUSaveCpuBoundTensor)
|
||||||
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
|
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
|
||||||
const std::wstring inputPath = CURRENT_PATH + L"fish.csv";
|
const std::wstring inputPath = CURRENT_PATH + L"fish.csv";
|
||||||
|
const std::wstring tensorDataPath = TENSOR_DATA_PATH + L"\\" + METHOD_NAME;
|
||||||
const std::wstring command = BuildCommand({ EXE_PATH, L"-model", modelPath, L"-input", inputPath,
|
const std::wstring command = BuildCommand({ EXE_PATH, L"-model", modelPath, L"-input", inputPath,
|
||||||
L"-SaveTensorData", L"First", L"-PerIterationPath", TENSOR_DATA_PATH, L"-GPU" });
|
L"-SaveTensorData", L"First", L"-PerIterationPath", tensorDataPath, L"-GPU" });
|
||||||
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
|
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
|
||||||
Assert::AreEqual(true, CompareTensors(L"OutputTensorData\\Squeezenet_fish_input_GPU.csv",
|
Assert::AreEqual(true, CompareTensors(L"OutputTensorData\\Squeezenet_fish_input_GPU.csv",
|
||||||
TENSOR_DATA_PATH + L"\\softmaxout_1GpuIteration1.csv"));
|
tensorDataPath + L"\\softmaxout_1GpuIteration1.csv"));
|
||||||
}
|
|
||||||
TEST_METHOD(ProvidedCSVInputGPUSaveGpuBoundTensor)
|
|
||||||
{
|
|
||||||
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
|
|
||||||
const std::wstring inputPath = CURRENT_PATH + L"fish.csv";
|
|
||||||
const std::wstring command = BuildCommand({ EXE_PATH, L"-model", modelPath, L"-input", inputPath,
|
|
||||||
L"-SaveTensorData", L"First", L"-PerIterationPath", TENSOR_DATA_PATH, L"-GPU", L"-GPUBoundInput" });
|
|
||||||
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
|
|
||||||
Assert::AreEqual(true, CompareTensors(L"OutputTensorData\\Squeezenet_fish_input_GPU.csv",
|
|
||||||
TENSOR_DATA_PATH + L"\\softmaxout_1GpuIteration1.csv"));
|
|
||||||
}
|
|
||||||
TEST_METHOD(ProvidedCSVInputCPUSaveCpuBoundTensor)
|
|
||||||
{
|
|
||||||
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
|
|
||||||
const std::wstring inputPath = CURRENT_PATH + L"fish.csv";
|
|
||||||
const std::wstring command = BuildCommand({ EXE_PATH, L"-model", modelPath, L"-input", inputPath,
|
|
||||||
L"-SaveTensorData", L"First", L"-PerIterationPath", TENSOR_DATA_PATH, L"-CPU" });
|
|
||||||
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
|
|
||||||
Assert::AreEqual(true, CompareTensors(L"OutputTensorData\\Squeezenet_fish_input_CPU.csv",
|
|
||||||
TENSOR_DATA_PATH + L"\\softmaxout_1CpuIteration1.csv"));
|
|
||||||
}
|
|
||||||
TEST_METHOD(ProvidedCSVInputGPUSaveCpuBoundTensorFp16)
|
|
||||||
{
|
|
||||||
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet_fp16.onnx";
|
|
||||||
const std::wstring inputPath = CURRENT_PATH + L"fish.csv";
|
|
||||||
const std::wstring command = BuildCommand({ EXE_PATH, L"-model", modelPath, L"-input", inputPath,
|
|
||||||
L"-SaveTensorData", L"First", L"-PerIterationPath", TENSOR_DATA_PATH, L"-GPU" });
|
|
||||||
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
|
|
||||||
Assert::AreEqual(true, CompareTensorsFP16(L"OutputTensorData\\Squeezenet_fp16_fish_input_GPU.csv",
|
|
||||||
TENSOR_DATA_PATH + L"\\softmaxout_1GpuIteration1.csv"));
|
|
||||||
}
|
|
||||||
TEST_METHOD(ProvidedCSVInputCPUSaveCpuBoundTensorFp16)
|
|
||||||
{
|
|
||||||
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet_fp16.onnx";
|
|
||||||
const std::wstring inputPath = CURRENT_PATH + L"fish.csv";
|
|
||||||
const std::wstring command = BuildCommand({ EXE_PATH, L"-model", modelPath, L"-input", inputPath,
|
|
||||||
L"-SaveTensorData", L"First", L"-PerIterationPath", TENSOR_DATA_PATH, L"-CPU" });
|
|
||||||
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
|
|
||||||
Assert::AreEqual(true, CompareTensorsFP16(L"OutputTensorData\\Squeezenet_fp16_fish_input_CPU.csv",
|
|
||||||
TENSOR_DATA_PATH + L"\\softmaxout_1CpuIteration1.csv"));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_METHOD(ProvidedCSVInputOnlyGpuSaveCpuBoundTensorImageDenotation)
|
TEST_METHOD_WITH_NAME(ProvidedCSVInputGPUSaveGpuBoundTensor)
|
||||||
{
|
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
|
||||||
|
const std::wstring inputPath = CURRENT_PATH + L"fish.csv";
|
||||||
|
const std::wstring tensorDataPath = TENSOR_DATA_PATH + L"\\" + METHOD_NAME;
|
||||||
|
const std::wstring command = BuildCommand({ EXE_PATH, L"-model", modelPath, L"-input", inputPath,
|
||||||
|
L"-SaveTensorData", L"First", L"-PerIterationPath", tensorDataPath, L"-GPU", L"-GPUBoundInput" });
|
||||||
|
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
|
||||||
|
Assert::AreEqual(true, CompareTensors(L"OutputTensorData\\Squeezenet_fish_input_GPU.csv",
|
||||||
|
tensorDataPath + L"\\softmaxout_1GpuIteration1.csv"));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_METHOD_WITH_NAME(ProvidedCSVInputCPUSaveCpuBoundTensor)
|
||||||
|
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet.onnx";
|
||||||
|
const std::wstring inputPath = CURRENT_PATH + L"fish.csv";
|
||||||
|
const std::wstring tensorDataPath = TENSOR_DATA_PATH + L"\\" + METHOD_NAME;
|
||||||
|
const std::wstring command = BuildCommand({ EXE_PATH, L"-model", modelPath, L"-input", inputPath,
|
||||||
|
L"-SaveTensorData", L"First", L"-PerIterationPath", tensorDataPath, L"-CPU" });
|
||||||
|
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
|
||||||
|
Assert::AreEqual(true, CompareTensors(L"OutputTensorData\\Squeezenet_fish_input_CPU.csv",
|
||||||
|
tensorDataPath + L"\\softmaxout_1CpuIteration1.csv"));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_METHOD_WITH_NAME(ProvidedCSVInputGPUSaveCpuBoundTensorFp16)
|
||||||
|
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet_fp16.onnx";
|
||||||
|
const std::wstring inputPath = CURRENT_PATH + L"fish.csv";
|
||||||
|
const std::wstring tensorDataPath = TENSOR_DATA_PATH + L"\\" + METHOD_NAME;
|
||||||
|
const std::wstring command = BuildCommand({ EXE_PATH, L"-model", modelPath, L"-input", inputPath,
|
||||||
|
L"-SaveTensorData", L"First", L"-PerIterationPath", tensorDataPath, L"-GPU" });
|
||||||
|
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
|
||||||
|
Assert::AreEqual(true, CompareTensorsFP16(L"OutputTensorData\\Squeezenet_fp16_fish_input_GPU.csv",
|
||||||
|
tensorDataPath + L"\\softmaxout_1GpuIteration1.csv"));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_METHOD_WITH_NAME(ProvidedCSVInputCPUSaveCpuBoundTensorFp16)
|
||||||
|
const std::wstring modelPath = CURRENT_PATH + L"SqueezeNet_fp16.onnx";
|
||||||
|
const std::wstring inputPath = CURRENT_PATH + L"fish.csv";
|
||||||
|
const std::wstring tensorDataPath = TENSOR_DATA_PATH + L"\\" + METHOD_NAME;
|
||||||
|
const std::wstring command = BuildCommand({ EXE_PATH, L"-model", modelPath, L"-input", inputPath,
|
||||||
|
L"-SaveTensorData", L"First", L"-PerIterationPath", tensorDataPath, L"-CPU" });
|
||||||
|
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
|
||||||
|
Assert::AreEqual(true, CompareTensorsFP16(L"OutputTensorData\\Squeezenet_fp16_fish_input_CPU.csv",
|
||||||
|
tensorDataPath + L"\\softmaxout_1CpuIteration1.csv"));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_METHOD_WITH_NAME(ProvidedCSVInputOnlyGpuSaveCpuBoundTensorImageDenotation)
|
||||||
const std::wstring modelPath = CURRENT_PATH + L"mnist.onnx";
|
const std::wstring modelPath = CURRENT_PATH + L"mnist.onnx";
|
||||||
const std::wstring inputPath = CURRENT_PATH + L"mnist_28.csv";
|
const std::wstring inputPath = CURRENT_PATH + L"mnist_28.csv";
|
||||||
|
const std::wstring tensorDataPath = TENSOR_DATA_PATH + L"\\" + METHOD_NAME;
|
||||||
const std::wstring command = BuildCommand({ EXE_PATH, L"-model ", modelPath, L"-input", inputPath,
|
const std::wstring command = BuildCommand({ EXE_PATH, L"-model ", modelPath, L"-input", inputPath,
|
||||||
L"-SaveTensorData", L"First", L"-PerIterationPath", TENSOR_DATA_PATH, L"-GPU" });
|
L"-SaveTensorData", L"First", L"-PerIterationPath", tensorDataPath, L"-GPU" });
|
||||||
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
|
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
|
||||||
Assert::AreEqual(true, CompareTensors(L"OutputTensorData\\Mnist_8_input_GPU.csv",
|
Assert::AreEqual(true, CompareTensors(L"OutputTensorData\\Mnist_8_input_GPU.csv",
|
||||||
TENSOR_DATA_PATH + L"\\Plus214_Output_0GpuIteration1.csv"));
|
tensorDataPath + L"\\Plus214_Output_0GpuIteration1.csv"));
|
||||||
}
|
}
|
||||||
TEST_METHOD(ProvidedCSVInputOnlyCpuSaveCpuBoundTensorImageDenotation)
|
|
||||||
{
|
TEST_METHOD_WITH_NAME(ProvidedCSVInputOnlyCpuSaveCpuBoundTensorImageDenotation)
|
||||||
const std::wstring modelPath = CURRENT_PATH + L"mnist.onnx";
|
const std::wstring modelPath = CURRENT_PATH + L"mnist.onnx";
|
||||||
const std::wstring inputPath = CURRENT_PATH + L"mnist_28.csv";
|
const std::wstring inputPath = CURRENT_PATH + L"mnist_28.csv";
|
||||||
|
const std::wstring tensorDataPath = TENSOR_DATA_PATH + L"\\" + METHOD_NAME;
|
||||||
const std::wstring command = BuildCommand({ EXE_PATH, L"-model ", modelPath, L"-input", inputPath,
|
const std::wstring command = BuildCommand({ EXE_PATH, L"-model ", modelPath, L"-input", inputPath,
|
||||||
L"-SaveTensorData", L"First", L"-PerIterationPath", TENSOR_DATA_PATH, L"-CPU" });
|
L"-SaveTensorData", L"First", L"-PerIterationPath", tensorDataPath, L"-CPU" });
|
||||||
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
|
Assert::AreEqual(S_OK, RunProc((wchar_t*)command.c_str()));
|
||||||
Assert::AreEqual(true, CompareTensors(L"OutputTensorData\\Mnist_8_input_CPU.csv",
|
Assert::AreEqual(true, CompareTensors(L"OutputTensorData\\Mnist_8_input_CPU.csv",
|
||||||
TENSOR_DATA_PATH + L"\\Plus214_Output_0CpuIteration1.csv"));
|
tensorDataPath + L"\\Plus214_Output_0CpuIteration1.csv"));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче