Add include for generated file
This commit is contained in:
Родитель
6cc69e5f4b
Коммит
d033dc470c
|
@ -37,6 +37,9 @@ namespace WinMLSamplesGallery
|
|||
case "EncryptedModel":
|
||||
SampleFrame.Navigate(typeof(Samples.EncryptedModel));
|
||||
break;
|
||||
case "AdapterSelection":
|
||||
SampleFrame.Navigate(typeof(Samples.AdapterSelection));
|
||||
break;
|
||||
}
|
||||
if (sampleMetadata.Docs.Count > 0)
|
||||
DocsHeader.Visibility = Visibility.Visible;
|
||||
|
|
|
@ -71,6 +71,17 @@
|
|||
"CSharpGithubLink": "https://github.com/microsoft/Windows-Machine-Learning/blob/master/Samples/WinMLSamplesGallery/WinMLSamplesGallery/Samples/EncryptedModel/EncryptedModel.xaml.cs",
|
||||
"Docs": [],
|
||||
"IsRecentlyAdded": true
|
||||
},
|
||||
{
|
||||
"Title": "Adapter Selection",
|
||||
"DescriptionShort": "Select different adapters based on your power and performance needs.",
|
||||
"Description": "The sample showcases how to use Windows ML with different adapters that have tradeoffs between power and performance.",
|
||||
"Icon": "\uE155",
|
||||
"Tag": "AdapterSelection",
|
||||
"XAMLGithubLink": "https://github.com/microsoft/Windows-Machine-Learning/blob/master/Samples/WinMLSamplesGallery/WinMLSamplesGallery/Samples/EncryptedModel/EncryptedModel.xaml",
|
||||
"CSharpGithubLink": "https://github.com/microsoft/Windows-Machine-Learning/blob/master/Samples/WinMLSamplesGallery/WinMLSamplesGallery/Samples/EncryptedModel/EncryptedModel.xaml.cs",
|
||||
"Docs": [],
|
||||
"IsRecentlyAdded": true
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
<Page
|
||||
x:Class="WinMLSamplesGallery.Samples.AdapterSelection"
|
||||
xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation"
|
||||
xmlns:d="http://schemas.microsoft.com/expression/blend/2008"
|
||||
xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"
|
||||
xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006"
|
||||
xmlns:local_controls="using:WinMLSamplesGallery.Controls"
|
||||
xmlns:local_samples="using:WinMLSamplesGallery.Samples"
|
||||
mc:Ignorable="d"
|
||||
FontFamily="Arial">
|
||||
|
||||
<StackPanel>
|
||||
<ComboBox x:Name="DeviceBox" SelectedIndex="0">
|
||||
<x:String>CPU</x:String>
|
||||
<x:String>DirectX</x:String>
|
||||
<x:String>DirectXHighPerformance</x:String>
|
||||
<x:String>DirectXMinPower</x:String>
|
||||
<x:String>Intel Iris Plus Graphics</x:String>
|
||||
<x:String>NVIDIA GeForce GTX 1650</x:String>
|
||||
</ComboBox>
|
||||
</StackPanel>
|
||||
</Page>
|
|
@ -0,0 +1,23 @@
|
|||
using Microsoft.AI.MachineLearning;
|
||||
using Microsoft.UI.Xaml;
|
||||
using Microsoft.UI.Xaml.Controls;
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Threading.Tasks;
|
||||
using Windows.Graphics.Imaging;
|
||||
using Windows.Media;
|
||||
using Windows.Storage;
|
||||
|
||||
namespace WinMLSamplesGallery.Samples
|
||||
{
|
||||
|
||||
public sealed partial class AdapterSelection : Page
|
||||
{
|
||||
public AdapterSelection()
|
||||
{
|
||||
this.InitializeComponent();
|
||||
System.Diagnostics.Debug.WriteLine("Initialized Adapter Selection");
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,263 +1,263 @@
|
|||
using Microsoft.AI.MachineLearning;
|
||||
using Microsoft.UI.Xaml;
|
||||
using Microsoft.UI.Xaml.Controls;
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using Microsoft.AI.MachineLearning;
|
||||
using Microsoft.UI.Xaml;
|
||||
using Microsoft.UI.Xaml.Controls;
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Threading.Tasks;
|
||||
using Windows.Graphics.Imaging;
|
||||
using Windows.Media;
|
||||
using Windows.Storage;
|
||||
|
||||
namespace WinMLSamplesGallery.Samples
|
||||
{
|
||||
public sealed class EvalResult
|
||||
{
|
||||
public string nonBatchedAvgTime { get; set; }
|
||||
public string batchedAvgTime { get; set; }
|
||||
public string timeRatio { get; set; }
|
||||
}
|
||||
|
||||
public sealed partial class Batching : Page
|
||||
{
|
||||
const int NumInputImages = 50;
|
||||
const int NumEvalIterations = 100;
|
||||
|
||||
private LearningModel _model = null;
|
||||
private LearningModelSession _nonBatchingSession = null;
|
||||
using System.Threading.Tasks;
|
||||
using Windows.Graphics.Imaging;
|
||||
using Windows.Media;
|
||||
using Windows.Storage;
|
||||
|
||||
namespace WinMLSamplesGallery.Samples
|
||||
{
|
||||
public sealed class EvalResult
|
||||
{
|
||||
public string nonBatchedAvgTime { get; set; }
|
||||
public string batchedAvgTime { get; set; }
|
||||
public string timeRatio { get; set; }
|
||||
}
|
||||
|
||||
public sealed partial class Batching : Page
|
||||
{
|
||||
const int NumInputImages = 50;
|
||||
const int NumEvalIterations = 100;
|
||||
|
||||
private LearningModel _model = null;
|
||||
private LearningModelSession _nonBatchingSession = null;
|
||||
private LearningModelSession _batchingSession = null;
|
||||
|
||||
float _avgNonBatchedDuration = 0;
|
||||
float _avgBatchDuration = 0;
|
||||
|
||||
// Marked volatile since it's updated across threads
|
||||
static volatile bool navigatingAwayFromPage = false;
|
||||
|
||||
|
||||
public Batching()
|
||||
{
|
||||
this.InitializeComponent();
|
||||
// Ensure static variable is always false on page initialization
|
||||
float _avgNonBatchedDuration = 0;
|
||||
float _avgBatchDuration = 0;
|
||||
|
||||
// Marked volatile since it's updated across threads
|
||||
static volatile bool navigatingAwayFromPage = false;
|
||||
|
||||
|
||||
public Batching()
|
||||
{
|
||||
this.InitializeComponent();
|
||||
// Ensure static variable is always false on page initialization
|
||||
navigatingAwayFromPage = false;
|
||||
|
||||
// Load the model
|
||||
var modelName = "squeezenet1.1-7-batched.onnx";
|
||||
var modelPath = Path.Join(Windows.ApplicationModel.Package.Current.InstalledLocation.Path, "Models", modelName);
|
||||
_model = LearningModel.LoadFromFilePath(modelPath);
|
||||
}
|
||||
|
||||
async private void StartInference(object sender, RoutedEventArgs e)
|
||||
{
|
||||
ShowStatus();
|
||||
ResetMetrics();
|
||||
|
||||
var inputImages = await GetInputImages();
|
||||
int batchSize = GetBatchSizeFromBatchSizeSlider();
|
||||
|
||||
_nonBatchingSession = await CreateLearningModelSession(_model);
|
||||
_batchingSession = await CreateLearningModelSession(_model, batchSize);
|
||||
|
||||
UpdateStatus(false);
|
||||
await Classify(inputImages);
|
||||
|
||||
UpdateStatus(true);
|
||||
await ClassifyBatched(inputImages, batchSize);
|
||||
|
||||
ShowUI();
|
||||
}
|
||||
|
||||
private void ShowStatus()
|
||||
{
|
||||
StartInferenceBtn.IsEnabled = false;
|
||||
BatchSizeSlider.IsEnabled = false;
|
||||
DeviceComboBox.IsEnabled = false;
|
||||
EvalResults.Visibility = Visibility.Collapsed;
|
||||
LoadingContainer.Visibility = Visibility.Visible;
|
||||
}
|
||||
|
||||
private void ResetMetrics()
|
||||
{
|
||||
_avgNonBatchedDuration = 0;
|
||||
_avgBatchDuration = 0;
|
||||
}
|
||||
|
||||
// Test input consists of 50 images (25 bird and 25 cat)
|
||||
private async Task<List<VideoFrame>> GetInputImages()
|
||||
{
|
||||
var birdFile = await StorageFile.GetFileFromApplicationUriAsync(new Uri("ms-appx:///InputData/hummingbird.jpg"));
|
||||
var catFile = await StorageFile .GetFileFromApplicationUriAsync(new Uri("ms-appx:///InputData/kitten.png"));
|
||||
var birdImage = await CreateSoftwareBitmapFromStorageFile(birdFile);
|
||||
var catImage = await CreateSoftwareBitmapFromStorageFile(catFile);
|
||||
var inputImages = new List<VideoFrame>();
|
||||
for (int i = 0; i < NumInputImages / 2; i++)
|
||||
{
|
||||
inputImages.Add(VideoFrame.CreateWithSoftwareBitmap(birdImage));
|
||||
inputImages.Add(VideoFrame.CreateWithSoftwareBitmap(catImage));
|
||||
}
|
||||
return inputImages;
|
||||
}
|
||||
|
||||
private async Task<SoftwareBitmap> CreateSoftwareBitmapFromStorageFile(StorageFile file)
|
||||
{
|
||||
var stream = await file.OpenAsync(FileAccessMode.Read);
|
||||
var decoder = await BitmapDecoder.CreateAsync(stream);
|
||||
var bitmap = await decoder.GetSoftwareBitmapAsync();
|
||||
return bitmap;
|
||||
}
|
||||
|
||||
private void UpdateStatus(bool isBatchingEval)
|
||||
{
|
||||
var modelName = "squeezenet1.1-7-batched.onnx";
|
||||
var modelPath = Path.Join(Windows.ApplicationModel.Package.Current.InstalledLocation.Path, "Models", modelName);
|
||||
_model = LearningModel.LoadFromFilePath(modelPath);
|
||||
}
|
||||
|
||||
async private void StartInference(object sender, RoutedEventArgs e)
|
||||
{
|
||||
ShowStatus();
|
||||
ResetMetrics();
|
||||
|
||||
var inputImages = await GetInputImages();
|
||||
int batchSize = GetBatchSizeFromBatchSizeSlider();
|
||||
|
||||
_nonBatchingSession = await CreateLearningModelSession(_model);
|
||||
_batchingSession = await CreateLearningModelSession(_model, batchSize);
|
||||
|
||||
UpdateStatus(false);
|
||||
await Classify(inputImages);
|
||||
|
||||
UpdateStatus(true);
|
||||
await ClassifyBatched(inputImages, batchSize);
|
||||
|
||||
ShowUI();
|
||||
}
|
||||
|
||||
private void ShowStatus()
|
||||
{
|
||||
StartInferenceBtn.IsEnabled = false;
|
||||
BatchSizeSlider.IsEnabled = false;
|
||||
DeviceComboBox.IsEnabled = false;
|
||||
EvalResults.Visibility = Visibility.Collapsed;
|
||||
LoadingContainer.Visibility = Visibility.Visible;
|
||||
}
|
||||
|
||||
private void ResetMetrics()
|
||||
{
|
||||
_avgNonBatchedDuration = 0;
|
||||
_avgBatchDuration = 0;
|
||||
}
|
||||
|
||||
// Test input consists of 50 images (25 bird and 25 cat)
|
||||
private async Task<List<VideoFrame>> GetInputImages()
|
||||
{
|
||||
var birdFile = await StorageFile.GetFileFromApplicationUriAsync(new Uri("ms-appx:///InputData/hummingbird.jpg"));
|
||||
var catFile = await StorageFile .GetFileFromApplicationUriAsync(new Uri("ms-appx:///InputData/kitten.png"));
|
||||
var birdImage = await CreateSoftwareBitmapFromStorageFile(birdFile);
|
||||
var catImage = await CreateSoftwareBitmapFromStorageFile(catFile);
|
||||
var inputImages = new List<VideoFrame>();
|
||||
for (int i = 0; i < NumInputImages / 2; i++)
|
||||
{
|
||||
inputImages.Add(VideoFrame.CreateWithSoftwareBitmap(birdImage));
|
||||
inputImages.Add(VideoFrame.CreateWithSoftwareBitmap(catImage));
|
||||
}
|
||||
return inputImages;
|
||||
}
|
||||
|
||||
private async Task<SoftwareBitmap> CreateSoftwareBitmapFromStorageFile(StorageFile file)
|
||||
{
|
||||
var stream = await file.OpenAsync(FileAccessMode.Read);
|
||||
var decoder = await BitmapDecoder.CreateAsync(stream);
|
||||
var bitmap = await decoder.GetSoftwareBitmapAsync();
|
||||
return bitmap;
|
||||
}
|
||||
|
||||
private void UpdateStatus(bool isBatchingEval)
|
||||
{
|
||||
if (isBatchingEval)
|
||||
{
|
||||
EvalText.Text = "Inferencing Batched Inputs:";
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
EvalText.Text = "Inferencing Non-Batched Inputs:";
|
||||
}
|
||||
}
|
||||
|
||||
private async Task<LearningModelSession> CreateLearningModelSession(LearningModel model, int batchSizeOverride=-1)
|
||||
{
|
||||
var deviceKind = DeviceComboBox.GetDeviceKind();
|
||||
var device = new LearningModelDevice(deviceKind);
|
||||
var options = new LearningModelSessionOptions();
|
||||
if (batchSizeOverride > 0)
|
||||
{
|
||||
options.BatchSizeOverride = (uint)batchSizeOverride;
|
||||
}
|
||||
var session = new LearningModelSession(model, device, options);
|
||||
return session;
|
||||
}
|
||||
|
||||
async private Task Classify(List<VideoFrame> inputImages)
|
||||
{
|
||||
float totalEvalDurations = 0;
|
||||
for (int i = 0; i < NumEvalIterations; i++)
|
||||
{
|
||||
}
|
||||
}
|
||||
|
||||
private async Task<LearningModelSession> CreateLearningModelSession(LearningModel model, int batchSizeOverride=-1)
|
||||
{
|
||||
var deviceKind = DeviceComboBox.GetDeviceKind();
|
||||
var device = new LearningModelDevice(deviceKind);
|
||||
var options = new LearningModelSessionOptions();
|
||||
if (batchSizeOverride > 0)
|
||||
{
|
||||
options.BatchSizeOverride = (uint)batchSizeOverride;
|
||||
}
|
||||
var session = new LearningModelSession(model, device, options);
|
||||
return session;
|
||||
}
|
||||
|
||||
async private Task Classify(List<VideoFrame> inputImages)
|
||||
{
|
||||
float totalEvalDurations = 0;
|
||||
for (int i = 0; i < NumEvalIterations; i++)
|
||||
{
|
||||
if (navigatingAwayFromPage)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
UpdateProgress(i);
|
||||
float evalDuration = await Task.Run(() => Evaluate(_nonBatchingSession, inputImages));
|
||||
totalEvalDurations += evalDuration;
|
||||
}
|
||||
_avgNonBatchedDuration = totalEvalDurations / NumEvalIterations;
|
||||
}
|
||||
|
||||
private static float Evaluate(LearningModelSession session, List<VideoFrame> input)
|
||||
{
|
||||
string inputName = session.Model.InputFeatures[0].Name;
|
||||
float totalDuration = 0;
|
||||
var binding = new LearningModelBinding(session);
|
||||
for (int j = 0; j < input.Count; j++)
|
||||
{
|
||||
}
|
||||
|
||||
UpdateProgress(i);
|
||||
float evalDuration = await Task.Run(() => Evaluate(_nonBatchingSession, inputImages));
|
||||
totalEvalDurations += evalDuration;
|
||||
}
|
||||
_avgNonBatchedDuration = totalEvalDurations / NumEvalIterations;
|
||||
}
|
||||
|
||||
private static float Evaluate(LearningModelSession session, List<VideoFrame> input)
|
||||
{
|
||||
string inputName = session.Model.InputFeatures[0].Name;
|
||||
float totalDuration = 0;
|
||||
var binding = new LearningModelBinding(session);
|
||||
for (int j = 0; j < input.Count; j++)
|
||||
{
|
||||
if (navigatingAwayFromPage)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
var start = HighResolutionClock.UtcNow();
|
||||
binding.Bind(inputName, input[j]);
|
||||
session.Evaluate(binding, "");
|
||||
var stop = HighResolutionClock.UtcNow();
|
||||
var duration = HighResolutionClock.DurationInMs(start, stop);
|
||||
totalDuration += duration;
|
||||
}
|
||||
return totalDuration;
|
||||
}
|
||||
|
||||
async private Task ClassifyBatched(List<VideoFrame> inputImages, int batchSize)
|
||||
{
|
||||
float totalEvalDurations = 0;
|
||||
for (int i = 0; i < NumEvalIterations; i++)
|
||||
{
|
||||
if (navigatingAwayFromPage)
|
||||
break;
|
||||
UpdateProgress(i);
|
||||
float evalDuration = await Task.Run(() => EvaluateBatched(_batchingSession, inputImages, batchSize));
|
||||
totalEvalDurations += evalDuration;
|
||||
}
|
||||
_avgBatchDuration = totalEvalDurations / NumEvalIterations;
|
||||
}
|
||||
|
||||
private static float EvaluateBatched(LearningModelSession session, List<VideoFrame> input, int batchSize)
|
||||
{
|
||||
int numBatches = (int) Math.Ceiling((Decimal) input.Count / batchSize);
|
||||
string inputName = session.Model.InputFeatures[0].Name;
|
||||
float totalDuration = 0;
|
||||
var binding = new LearningModelBinding(session);
|
||||
for (int i = 0; i < numBatches; i++)
|
||||
{
|
||||
}
|
||||
|
||||
var start = HighResolutionClock.UtcNow();
|
||||
binding.Bind(inputName, input[j]);
|
||||
session.Evaluate(binding, "");
|
||||
var stop = HighResolutionClock.UtcNow();
|
||||
var duration = HighResolutionClock.DurationInMs(start, stop);
|
||||
totalDuration += duration;
|
||||
}
|
||||
return totalDuration;
|
||||
}
|
||||
|
||||
async private Task ClassifyBatched(List<VideoFrame> inputImages, int batchSize)
|
||||
{
|
||||
float totalEvalDurations = 0;
|
||||
for (int i = 0; i < NumEvalIterations; i++)
|
||||
{
|
||||
if (navigatingAwayFromPage)
|
||||
break;
|
||||
UpdateProgress(i);
|
||||
float evalDuration = await Task.Run(() => EvaluateBatched(_batchingSession, inputImages, batchSize));
|
||||
totalEvalDurations += evalDuration;
|
||||
}
|
||||
_avgBatchDuration = totalEvalDurations / NumEvalIterations;
|
||||
}
|
||||
|
||||
private static float EvaluateBatched(LearningModelSession session, List<VideoFrame> input, int batchSize)
|
||||
{
|
||||
int numBatches = (int) Math.Ceiling((Decimal) input.Count / batchSize);
|
||||
string inputName = session.Model.InputFeatures[0].Name;
|
||||
float totalDuration = 0;
|
||||
var binding = new LearningModelBinding(session);
|
||||
for (int i = 0; i < numBatches; i++)
|
||||
{
|
||||
if (navigatingAwayFromPage)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
int rangeStart = batchSize * i;
|
||||
List<VideoFrame> batch;
|
||||
// Add padding to the last batch if necessary
|
||||
if (rangeStart + batchSize > input.Count)
|
||||
{
|
||||
int numInputsRemaining = input.Count - rangeStart;
|
||||
int paddingAmount = batchSize - numInputsRemaining;
|
||||
batch = input.GetRange(rangeStart, numInputsRemaining);
|
||||
batch.AddRange(input.GetRange(0, paddingAmount));
|
||||
}
|
||||
else
|
||||
{
|
||||
batch = input.GetRange(rangeStart, batchSize);
|
||||
}
|
||||
var start = HighResolutionClock.UtcNow();
|
||||
binding.Bind(inputName, batch);
|
||||
session.Evaluate(binding, "");
|
||||
var stop = HighResolutionClock.UtcNow();
|
||||
var duration = HighResolutionClock.DurationInMs(start, stop);
|
||||
totalDuration += duration;
|
||||
}
|
||||
return totalDuration;
|
||||
}
|
||||
|
||||
private int GetBatchSizeFromBatchSizeSlider()
|
||||
{
|
||||
return int.Parse(BatchSizeSlider.Value.ToString());
|
||||
}
|
||||
|
||||
private void UpdateProgress(int attemptNumber)
|
||||
{
|
||||
EvalProgressText.Text = "Attempt " + attemptNumber.ToString() + "/" + NumEvalIterations.ToString();
|
||||
EvalProgressBar.Value = attemptNumber + 1;
|
||||
}
|
||||
|
||||
private void ShowUI()
|
||||
{
|
||||
float ratio = (1 - (_avgBatchDuration / _avgNonBatchedDuration)) * 100;
|
||||
var evalResult = new EvalResult
|
||||
{
|
||||
nonBatchedAvgTime = _avgNonBatchedDuration.ToString("0.00"),
|
||||
batchedAvgTime = _avgBatchDuration.ToString("0.00"),
|
||||
timeRatio = ratio.ToString("0.0")
|
||||
};
|
||||
List<EvalResult> results = new List<EvalResult>();
|
||||
results.Insert(0, evalResult);
|
||||
LoadingContainer.Visibility = Visibility.Collapsed;
|
||||
EvalResults.Visibility = Visibility.Visible;
|
||||
StartInferenceBtn.IsEnabled = true;
|
||||
BatchSizeSlider.IsEnabled = true;
|
||||
DeviceComboBox.IsEnabled = true;
|
||||
EvalResults.ItemsSource = results;
|
||||
}
|
||||
|
||||
private void UpdateBatchSizeText(object sender, RoutedEventArgs e)
|
||||
{
|
||||
BatchSizeText.Text = "Batch Size: " + BatchSizeSlider.Value.ToString();
|
||||
}
|
||||
|
||||
public void StopAllEvents()
|
||||
{
|
||||
navigatingAwayFromPage = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int rangeStart = batchSize * i;
|
||||
List<VideoFrame> batch;
|
||||
// Add padding to the last batch if necessary
|
||||
if (rangeStart + batchSize > input.Count)
|
||||
{
|
||||
int numInputsRemaining = input.Count - rangeStart;
|
||||
int paddingAmount = batchSize - numInputsRemaining;
|
||||
batch = input.GetRange(rangeStart, numInputsRemaining);
|
||||
batch.AddRange(input.GetRange(0, paddingAmount));
|
||||
}
|
||||
else
|
||||
{
|
||||
batch = input.GetRange(rangeStart, batchSize);
|
||||
}
|
||||
var start = HighResolutionClock.UtcNow();
|
||||
binding.Bind(inputName, batch);
|
||||
session.Evaluate(binding, "");
|
||||
var stop = HighResolutionClock.UtcNow();
|
||||
var duration = HighResolutionClock.DurationInMs(start, stop);
|
||||
totalDuration += duration;
|
||||
}
|
||||
return totalDuration;
|
||||
}
|
||||
|
||||
private int GetBatchSizeFromBatchSizeSlider()
|
||||
{
|
||||
return int.Parse(BatchSizeSlider.Value.ToString());
|
||||
}
|
||||
|
||||
private void UpdateProgress(int attemptNumber)
|
||||
{
|
||||
EvalProgressText.Text = "Attempt " + attemptNumber.ToString() + "/" + NumEvalIterations.ToString();
|
||||
EvalProgressBar.Value = attemptNumber + 1;
|
||||
}
|
||||
|
||||
private void ShowUI()
|
||||
{
|
||||
float ratio = (1 - (_avgBatchDuration / _avgNonBatchedDuration)) * 100;
|
||||
var evalResult = new EvalResult
|
||||
{
|
||||
nonBatchedAvgTime = _avgNonBatchedDuration.ToString("0.00"),
|
||||
batchedAvgTime = _avgBatchDuration.ToString("0.00"),
|
||||
timeRatio = ratio.ToString("0.0")
|
||||
};
|
||||
List<EvalResult> results = new List<EvalResult>();
|
||||
results.Insert(0, evalResult);
|
||||
LoadingContainer.Visibility = Visibility.Collapsed;
|
||||
EvalResults.Visibility = Visibility.Visible;
|
||||
StartInferenceBtn.IsEnabled = true;
|
||||
BatchSizeSlider.IsEnabled = true;
|
||||
DeviceComboBox.IsEnabled = true;
|
||||
EvalResults.ItemsSource = results;
|
||||
}
|
||||
|
||||
private void UpdateBatchSizeText(object sender, RoutedEventArgs e)
|
||||
{
|
||||
BatchSizeText.Text = "Batch Size: " + BatchSizeSlider.Value.ToString();
|
||||
}
|
||||
|
||||
public void StopAllEvents()
|
||||
{
|
||||
navigatingAwayFromPage = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
# Rules in this file were initially inferred by Visual Studio IntelliCode from the C:\Users\numform\Windows-Machine-Learning\Samples\WinMLSamplesGallery\WinMLSamplesGalleryNative\ codebase based on best match to current usage at 12/9/2021
|
||||
# You can modify the rules from these initially generated values to suit your own policies
|
||||
# You can learn more about editorconfig here: https://docs.microsoft.com/en-us/visualstudio/ide/editorconfig-code-style-settings-reference
|
||||
[*.cs]
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
#include "pch.h"
|
||||
#include "AdapterList.h"
|
||||
|
||||
namespace winrt::WinMLSamplesGalleryNative::implementation
|
||||
{
|
||||
void AdapterList::GetAdapters(hstring const& path) {
|
||||
return;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
#pragma once
|
||||
#include "AdapterList.g.h"
|
||||
|
||||
namespace winrt::WinMLSamplesGalleryNative::implementation
|
||||
{
|
||||
struct AdapterList : AdapterListT<AdapterList>
|
||||
{
|
||||
AdapterList() = default;
|
||||
static void GetAdapters(hstring const& path);
|
||||
};
|
||||
}
|
||||
namespace winrt::WinMLSamplesGalleryNative::factory_implementation
|
||||
{
|
||||
struct AdapterList : AdapterListT<AdapterList, implementation::AdapterList>
|
||||
{
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
export module Module;
|
||||
|
||||
export void MyFunc();
|
|
@ -16,4 +16,10 @@ namespace WinMLSamplesGalleryNative
|
|||
{
|
||||
static Microsoft.AI.MachineLearning.LearningModel LoadEncryptedResource(String key);
|
||||
}
|
||||
|
||||
[default_interface]
|
||||
runtimeclass AdapterList
|
||||
{
|
||||
static void GetAdapters(String key);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -142,6 +142,7 @@
|
|||
</ResourceCompile>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemGroup>
|
||||
<ClInclude Include="AdapterList.h" />
|
||||
<ClInclude Include="EncryptedModels.h" />
|
||||
<ClInclude Include="OpenCVImage.h" />
|
||||
<ClInclude Include="pch.h" />
|
||||
|
@ -149,6 +150,7 @@
|
|||
<ClInclude Include="WeakBuffer.h" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClCompile Include="AdapterList.cpp" />
|
||||
<ClCompile Include="EncryptedModels.cpp" />
|
||||
<ClCompile Include="OpenCVImage.cpp" />
|
||||
<ClCompile Include="pch.cpp">
|
||||
|
@ -160,6 +162,7 @@
|
|||
<Midl Include="WinMLSamplesGalleryNative.idl" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<None Include=".editorconfig" />
|
||||
<None Include="datafile.bin" />
|
||||
<None Include="packages.config" />
|
||||
<None Include="squeezenet1.1-7-batched.onnx" />
|
||||
|
|
|
@ -9,6 +9,9 @@
|
|||
<ClCompile Include="OpenCVImage.cpp">
|
||||
<Filter>OpenCVInterop</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="AdapterList.cpp">
|
||||
<Filter>AdapterList</Filter>
|
||||
</ClCompile>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClInclude Include="pch.h" />
|
||||
|
@ -24,6 +27,9 @@
|
|||
<ClInclude Include="WeakBuffer.h">
|
||||
<Filter>AbiHelpers</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="AdapterList.h">
|
||||
<Filter>AdapterList</Filter>
|
||||
</ClInclude>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ResourceCompile Include="Resource.rc" />
|
||||
|
@ -37,6 +43,7 @@
|
|||
<None Include="squeezenet1.1-7-batched.onnx" />
|
||||
<None Include="WinMLSamplesGalleryNative.def" />
|
||||
<None Include="PropertySheet.props" />
|
||||
<None Include=".editorconfig" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<Text Include="readme.txt" />
|
||||
|
@ -51,5 +58,8 @@
|
|||
<Filter Include="AbiHelpers">
|
||||
<UniqueIdentifier>{ed4f3871-4514-4b93-9ace-1315cad5491e}</UniqueIdentifier>
|
||||
</Filter>
|
||||
<Filter Include="AdapterList">
|
||||
<UniqueIdentifier>{9df6a233-c2c6-4062-b738-31f7ba631acd}</UniqueIdentifier>
|
||||
</Filter>
|
||||
</ItemGroup>
|
||||
</Project>
|
Загрузка…
Ссылка в новой задаче