This commit is contained in:
Xiang Zhang 2020-02-19 14:33:24 -08:00
Родитель a9b1628546
Коммит 4da54c2d7c
12 изменённых файлов: 42 добавлений и 1059 удалений

Просмотреть файл

@ -112,30 +112,28 @@
</ClCompile>
</ItemGroup>
<ItemGroup>
<CopyFileToFolders Include="..\..\..\SharedContent\models\SqueezeNet_batch3.onnx">
<DeploymentContent Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</DeploymentContent>
<FileType>Document</FileType>
</CopyFileToFolders>
<CopyFileToFolders Include="..\..\..\SharedContent\models\SqueezeNet_free.onnx">
<DeploymentContent Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</DeploymentContent>
<FileType>Document</FileType>
</CopyFileToFolders>
<None Include="packages.config">
<SubType>Designer</SubType>
</None>
<CopyFileToFolders Include="SqueezeNet.onnx">
<DeploymentContent Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</DeploymentContent>
<FileType>Document</FileType>
</CopyFileToFolders>
<CopyFileToFolders Include="SqueezeNet_free.onnx">
<DeploymentContent Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</DeploymentContent>
<FileType>Document</FileType>
</CopyFileToFolders>
</ItemGroup>
<ItemGroup>
<CopyFileToFolders Include="kitten_224.png">
<CopyFileToFolders Include="..\..\..\SharedContent\media\fish.png">
<DeploymentContent Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</DeploymentContent>
</CopyFileToFolders>
<CopyFileToFolders Include="..\..\..\SharedContent\media\kitten_224.png">
<DeploymentContent Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</DeploymentContent>
</CopyFileToFolders>
</ItemGroup>
<ItemGroup>
<CopyFileToFolders Include="Labels.txt">
<DeploymentContent Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</DeploymentContent>
</CopyFileToFolders>
</ItemGroup>
<ItemGroup>
<CopyFileToFolders Include="fish.png">
<CopyFileToFolders Include="..\..\SqueezeNetObjectDetection\Desktop\cpp\Labels.txt">
<DeploymentContent Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</DeploymentContent>
</CopyFileToFolders>
</ItemGroup>

Просмотреть файл

@ -40,19 +40,19 @@
<None Include="packages.config" />
</ItemGroup>
<ItemGroup>
<CopyFileToFolders Include="kitten_224.png">
<CopyFileToFolders Include="..\..\..\SharedContent\media\fish.png">
<Filter>Resource Files</Filter>
</CopyFileToFolders>
<CopyFileToFolders Include="SqueezeNet.onnx">
<CopyFileToFolders Include="..\..\..\SharedContent\media\kitten_224.png">
<Filter>Resource Files</Filter>
</CopyFileToFolders>
<CopyFileToFolders Include="SqueezeNet_free.onnx">
<CopyFileToFolders Include="..\..\..\SharedContent\models\SqueezeNet_batch3.onnx">
<Filter>Resource Files</Filter>
</CopyFileToFolders>
<CopyFileToFolders Include="Labels.txt">
<CopyFileToFolders Include="..\..\..\SharedContent\models\SqueezeNet_free.onnx">
<Filter>Resource Files</Filter>
</CopyFileToFolders>
<CopyFileToFolders Include="fish.png">
<CopyFileToFolders Include="..\..\SqueezeNetObjectDetection\Desktop\cpp\Labels.txt">
<Filter>Resource Files</Filter>
</CopyFileToFolders>
</ItemGroup>

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -38,7 +38,7 @@ std::wstring GetModulePath() {
}
std::vector<float>
SoftwareBitmapToSoftwareTensor(SoftwareBitmap softwareBitmap) {
SoftwareBitmapToFloatVector(SoftwareBitmap softwareBitmap) {
/* Manully tensorize from CPU resource, steps:
1. Get the access to buffer of softwarebitmap
2. Transform the data in buffer to a vector of float
@ -69,38 +69,18 @@ SoftwareBitmapToSoftwareTensor(SoftwareBitmap softwareBitmap) {
// 2. Transform the data in buffer to a vector of float
if (BitmapPixelFormat::Bgra8 == pixelFormat) {
for (UINT32 i = 0; i < size; i += 4) {
for (uint32_t i = 0; i < size; i += 4) {
// suppose the model expects BGR image.
// index 0 is B, 1 is G, 2 is R, 3 is alpha(dropped).
UINT32 pixelInd = i / 4;
uint32_t pixelInd = i / 4;
outputVector[pixelInd] = (float)pData[i];
outputVector[(height * width) + pixelInd] = (float)pData[i + 1];
outputVector[(height * width * 2) + pixelInd] = (float)pData[i + 2];
}
} else if (BitmapPixelFormat::Rgba8 == pixelFormat) {
for (UINT32 i = 0; i < size; i += 4) {
// suppose the model expects BGR image.
// index 0 is B, 1 is G, 2 is R, 3 is alpha(dropped).
UINT32 pixelInd = i / 4;
outputVector[pixelInd] = (float)pData[i + 2];
outputVector[(height * width) + pixelInd] = (float)pData[i + 1];
outputVector[(height * width * 2) + pixelInd] = (float)pData[i];
}
} else if (BitmapPixelFormat::Gray8 == pixelFormat) {
for (UINT32 i = 0; i < size; i += 4) {
// suppose the model expects BGR image.
// index 0 is B, 1 is G, 2 is R, 3 is alpha(dropped).
UINT32 pixelInd = i / 4;
float red = (float)pData[i + 2];
float green = (float)pData[i + 1];
float blue = (float)pData[i];
float gray = 0.2126f * red + 0.7152f * green + 0.0722f * blue;
outputVector[pixelInd] = gray;
}
}
// Pixel Value Normalization can be done at here. We are using the range from
// 0-255, but the range can be normilized to 0-1 before we return the
// TensorFloat.
// 0-255, but the range can be normilized to 0-1 before we return
return outputVector;
}
@ -130,10 +110,10 @@ hstring GetModelPath(std::string modelType) {
hstring modelPath;
if (modelType == "fixedBatchSize") {
modelPath =
static_cast<hstring>(GetModulePath().c_str()) + L"SqueezeNet_free.onnx";
static_cast<hstring>(GetModulePath().c_str()) + L"SqueezeNet.onnx";
} else {
modelPath =
static_cast<hstring>(GetModulePath().c_str()) + L"SqueezeNet.onnx";
static_cast<hstring>(GetModulePath().c_str()) + L"SqueezeNet_free.onnx";
}
return modelPath;
}
@ -145,9 +125,11 @@ TensorFloat CreateInputTensorFloat() {
auto imagePath = static_cast<hstring>(GetModulePath().c_str()) + imageName;
auto imageFrame = LoadImageFile(imagePath);
std::vector<float> imageVector =
SoftwareBitmapToSoftwareTensor(imageFrame.SoftwareBitmap());
SoftwareBitmapToFloatVector(imageFrame.SoftwareBitmap());
inputVector.insert(inputVector.end(), imageVector.begin(), imageVector.end());
}
// 224, 224 below are height and width specified in model input.
auto inputShape = std::vector<int64_t>{ BATCH_SIZE, 3, 224, 224 };
auto inputValue = TensorFloat::CreateFromIterable(
inputShape,

Просмотреть файл

@ -11,7 +11,7 @@ namespace SampleHelper
std::wstring GetModulePath();
// Convert SoftwareBitmap to std::vector<float>
std::vector<float> SoftwareBitmapToSoftwareTensor(
std::vector<float> SoftwareBitmapToFloatVector(
winrt::Windows::Graphics::Imaging::SoftwareBitmap softwareBitmap);
// Load Image File as VideoFrame

Двоичные данные
Samples/BatchSupport/BatchSupport/fish.png

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 131 KiB

Двоичные данные
Samples/BatchSupport/BatchSupport/kitten_224.png

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 55 KiB

Просмотреть файл

@ -25,25 +25,28 @@ int main(int argc, char *argv[]) {
// did they pass in the args
if (ParseArgs(argc, argv) == false) {
printf("Usage: %s [fixedBatchSize|freeBatchSize] [TensorFloat|VideoFrame] \n", argv[0]);
printf("Usage: %s [freeBatchSize|fixedBatchSize] [TensorFloat|VideoFrame] \n", argv[0]);
}
// load the model
hstring modelPath = SampleHelper::GetModelPath(modelType);
printf("Loading modelfile '%ws' on the CPU\n", modelPath.c_str());
DWORD ticks = GetTickCount();
auto model = LearningModel::LoadFromFilePath(modelPath);
ticks = GetTickCount() - ticks;
printf("model file loaded in %d ticks\n", ticks);
// now create a session and binding
LearningModelDeviceKind deviceKind = LearningModelDeviceKind::Cpu;
LearningModelSessionOptions options;
LearningModelSession session = nullptr;
if ("freeBatchSize" == modelType) {
// If the model has free dimentional batch, override the free dimension with batch_size
// If the model has free dimensional batch, override the free dimension with batch_size
// for performance improvement.
LearningModelSessionOptions options;
printf("Override Batch Size by %d\n", BATCH_SIZE);
options.BatchSizeOverride(static_cast<uint32_t>(BATCH_SIZE));
session = LearningModelSession(model, LearningModelDevice(deviceKind), options);
}
else {
session = LearningModelSession(model, LearningModelDevice(deviceKind));
}
LearningModelSession session(model, LearningModelDevice(deviceKind), options);
LearningModelBinding binding(session);
// bind the intput image
@ -69,7 +72,7 @@ int main(int argc, char *argv[]) {
// now run the model
printf("Running the model...\n");
ticks = GetTickCount();
DWORD ticks = GetTickCount();
auto results = session.EvaluateAsync(binding, L"RunId").get();
ticks = GetTickCount() - ticks;
printf("model run took %d ticks\n", ticks);

Просмотреть файл

@ -1,4 +1,4 @@
<?xml version="1.0" encoding="utf-8"?>
<packages>
<package id="Microsoft.Windows.CppWinRT" version="2.0.200203.5" targetFramework="native" />
<package id="Microsoft.Windows.CppWinRT" version="1.0.181129.3" targetFramework="native" />
</packages>

Просмотреть файл

@ -53,7 +53,7 @@ This sample tells how to bind and evaluate batches of input in WinML
## Prepare Model
1. Download [WinMLDashboard](https://github.com/microsoft/Windows-Machine-Learning/releases/tag/v0.6.1)
2. change the batch dimension of model input and output to a fixed number or -1 (free dimension)
<img src='.\forReadMe\fixBatchSize.png' width=400 /> <img src='.\forReadMe\freeBatchSize.png' width=400 />
<img src='./forReadMe/fixBatchSize.png' width=400 /> <img src='./forReadMe/freeBatchSize.png' width=400 />
## Create Session and Bind Inputs
Take binding batches of VideoFrame as example: