Delete IntelligentAPI_EmotionRecognizer directory
This commit is contained in:
Родитель
06162d451b
Коммит
1f4cfdaa0e
|
@ -1,164 +0,0 @@
|
|||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Linq;
|
||||
using System.Text;
|
||||
using System.Threading.Tasks;
|
||||
using Windows.AI.MachineLearning;
|
||||
using Windows.Foundation;
|
||||
using Windows.Graphics.Imaging;
|
||||
using Windows.Media;
|
||||
using Windows.Media.FaceAnalysis;
|
||||
using Windows.Storage;
|
||||
using Windows.Storage.Streams;
|
||||
|
||||
namespace CommunityToolkit.Labs.Intelligent.EmotionRecognition
|
||||
{
|
||||
public class DetectedEmotion
|
||||
{
|
||||
public int emotionIndex;
|
||||
public string emotion;
|
||||
}
|
||||
public class EmotionRecognizer
|
||||
{
|
||||
private LearningModel _model = null;
|
||||
private LearningModelSession _session = null;
|
||||
private LearningModelBinding _binding = null;
|
||||
private static EmotionRecognizer instance = null;
|
||||
|
||||
private static List<string> labels;
|
||||
|
||||
|
||||
private async Task InitModelAsync()
|
||||
{
|
||||
// load model file
|
||||
var file = await StorageFile.GetFileFromApplicationUriAsync(new Uri("ms-appx:///IntelligentAPI_EmotionRecognizer/Assets/model_emotion.onnx"));
|
||||
|
||||
//Loads the mdoel from the file
|
||||
_model = await LearningModel.LoadFromStorageFileAsync(file);
|
||||
|
||||
//Creating a session that binds the model to the device running the model
|
||||
_session = new LearningModelSession(_model, new LearningModelDevice(GetDeviceKind()));
|
||||
}
|
||||
|
||||
private void LoadLabels()
|
||||
{
|
||||
labels = new List<string>()
|
||||
{
|
||||
"Neutral",
|
||||
"Happiness",
|
||||
"Surprise",
|
||||
"Sadness",
|
||||
"Anger",
|
||||
"Disgust",
|
||||
"Fear",
|
||||
"Contempt"
|
||||
};
|
||||
}
|
||||
|
||||
LearningModelDeviceKind GetDeviceKind()
|
||||
{
|
||||
return LearningModelDeviceKind.Cpu;
|
||||
}
|
||||
|
||||
private async static Task<IList<DetectedFace>> DetectFacesInImageAsync(SoftwareBitmap bitmap)
|
||||
{
|
||||
FaceDetector faceDetector = await FaceDetector.CreateAsync();
|
||||
var convertedBitmap = SoftwareBitmap.Convert(bitmap, BitmapPixelFormat.Gray8);
|
||||
return await faceDetector.DetectFacesAsync(convertedBitmap);
|
||||
|
||||
}
|
||||
|
||||
public async static Task<DetectedEmotion> DetectEmotion(SoftwareBitmap bitmap)
|
||||
{
|
||||
if (instance == null)
|
||||
{
|
||||
instance = new EmotionRecognizer();
|
||||
}
|
||||
|
||||
return await instance.EvaluateFrame(bitmap);
|
||||
}
|
||||
|
||||
public async Task<DetectedEmotion> EvaluateFrame(SoftwareBitmap softwareBitmap)
|
||||
{
|
||||
await InitModelAsync();
|
||||
LoadLabels();
|
||||
DetectedFace detectedFace = await DetectFace(softwareBitmap);
|
||||
if (detectedFace != null)
|
||||
{
|
||||
return await EvaluateEmotionInFace(detectedFace, softwareBitmap);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public async Task<DetectedEmotion> EvaluateEmotionInFace(DetectedFace detectedFace, SoftwareBitmap softwareBitmap)
|
||||
{
|
||||
|
||||
var boundingBox = new Rect(detectedFace.FaceBox.X,
|
||||
detectedFace.FaceBox.Y,
|
||||
detectedFace.FaceBox.Width,
|
||||
detectedFace.FaceBox.Height);
|
||||
|
||||
softwareBitmap = SoftwareBitmap.Convert(softwareBitmap, BitmapPixelFormat.Bgra8);
|
||||
|
||||
var croppedFace = await Crop(softwareBitmap, boundingBox);
|
||||
LearningModelEvaluationResult emotionResults = await BindAndEvaluateModelAsync(croppedFace);
|
||||
|
||||
// to get percentages, you'd need to run the output through a softmax function
|
||||
// we don't need percentages, we just need max value
|
||||
TensorFloat emotionIndexTensor = emotionResults.Outputs["Plus692_Output_0"] as TensorFloat;
|
||||
|
||||
var emotionList = emotionIndexTensor.GetAsVectorView().ToList();
|
||||
var emotionIndex = emotionList.IndexOf(emotionList.Max());
|
||||
|
||||
return new DetectedEmotion() { emotionIndex = emotionIndex, emotion = labels[emotionIndex] };
|
||||
|
||||
|
||||
}
|
||||
|
||||
private static async Task<DetectedFace> DetectFace(SoftwareBitmap softwareBitmap)
|
||||
{
|
||||
var faces = await DetectFacesInImageAsync(softwareBitmap);
|
||||
|
||||
// if there is a face in the frame, evaluate the emotion
|
||||
var detectedFace = faces.FirstOrDefault();
|
||||
return detectedFace;
|
||||
}
|
||||
|
||||
public static async Task<SoftwareBitmap> Crop(SoftwareBitmap softwareBitmap, Rect bounds)
|
||||
{
|
||||
VideoFrame vid = VideoFrame.CreateWithSoftwareBitmap(softwareBitmap);
|
||||
vid = await Crop(vid, bounds);
|
||||
return vid.SoftwareBitmap;
|
||||
|
||||
}
|
||||
public static async Task<VideoFrame> Crop(VideoFrame videoFrame, Rect bounds)
|
||||
{
|
||||
BitmapBounds cropBounds = new BitmapBounds()
|
||||
{
|
||||
Width = (uint)bounds.Width,
|
||||
Height = (uint)bounds.Height,
|
||||
X = (uint)bounds.X,
|
||||
Y = (uint)bounds.Y
|
||||
};
|
||||
VideoFrame result = new VideoFrame(BitmapPixelFormat.Bgra8,
|
||||
(int)cropBounds.Width,
|
||||
(int)cropBounds.Height,
|
||||
BitmapAlphaMode.Premultiplied);
|
||||
|
||||
await videoFrame.CopyToAsync(result, cropBounds, null);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
private async Task<LearningModelEvaluationResult> BindAndEvaluateModelAsync(SoftwareBitmap croppedFace)
|
||||
{
|
||||
//Create Learning model binding which binds
|
||||
_binding = new LearningModelBinding(_session);
|
||||
_binding.Bind("Input3", VideoFrame.CreateWithSoftwareBitmap(croppedFace));
|
||||
var emotionResults = await _session.EvaluateAsync(_binding, "id");
|
||||
return emotionResults;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
|
@ -1,24 +0,0 @@
|
|||
<Project Sdk="MSBuild.Sdk.Extras">
|
||||
<PropertyGroup>
|
||||
<RootNamespace>IntelligentAPI.EmotionRecognition</RootNamespace>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup>
|
||||
<PackageId>CommunityToolkit.Labs.Intelligent.EmotionRecognition2</PackageId>
|
||||
<Description>
|
||||
This package performs Emotion Recognition on an input image by using the Emotion FERPlus model.
|
||||
</Description>
|
||||
<Version>0.0.2</Version>
|
||||
</PropertyGroup>
|
||||
<ItemGroup>
|
||||
<EmbeddedResource Include="Properties\IntelligentAPI_EmotionRecognizer.rd.xml" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<Content Include="Assets\model_emotion.onnx" Pack="True" PackagePath="lib/uap10.0.17763/Assets"/>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<None Include="ImageClassifier.licenseheader" />
|
||||
</ItemGroup>
|
||||
<Target Name="CustomBeforeBuild" BeforeTargets="BeforeBuild">
|
||||
<Exec Command="powershell.exe –NonInteractive –ExecutionPolicy Unrestricted -command "& {.\Scripts\script.ps1 }"" />
|
||||
</Target>
|
||||
</Project>
|
|
@ -1,21 +0,0 @@
|
|||
using System.Reflection;
|
||||
using System.Runtime.CompilerServices;
|
||||
using System.Runtime.InteropServices;
|
||||
|
||||
// General Information about an assembly is controlled through the following
|
||||
// set of attributes. Change these attribute values to modify the information
|
||||
// associated with an assembly.
|
||||
[assembly: AssemblyTrademark("")]
|
||||
[assembly: AssemblyCulture("")]
|
||||
|
||||
// Version information for an assembly consists of the following four values:
|
||||
//
|
||||
// Major Version
|
||||
// Minor Version
|
||||
// Build Number
|
||||
// Revision
|
||||
//
|
||||
// You can specify all the values or you can default the Build and Revision Numbers
|
||||
// by using the '*' as shown below:
|
||||
// [assembly: AssemblyVersion("1.0.*")]
|
||||
[assembly: ComVisible(false)]
|
|
@ -1,33 +0,0 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!--
|
||||
This file contains Runtime Directives, specifications about types your application accesses
|
||||
through reflection and other dynamic code patterns. Runtime Directives are used to control the
|
||||
.NET Native optimizer and ensure that it does not remove code accessed by your library. If your
|
||||
library does not do any reflection, then you generally do not need to edit this file. However,
|
||||
if your library reflects over types, especially types passed to it or derived from its types,
|
||||
then you should write Runtime Directives.
|
||||
|
||||
The most common use of reflection in libraries is to discover information about types passed
|
||||
to the library. Runtime Directives have three ways to express requirements on types passed to
|
||||
your library.
|
||||
|
||||
1. Parameter, GenericParameter, TypeParameter, TypeEnumerableParameter
|
||||
Use these directives to reflect over types passed as a parameter.
|
||||
|
||||
2. SubTypes
|
||||
Use a SubTypes directive to reflect over types derived from another type.
|
||||
|
||||
3. AttributeImplies
|
||||
Use an AttributeImplies directive to indicate that your library needs to reflect over
|
||||
types or methods decorated with an attribute.
|
||||
|
||||
For more information on writing Runtime Directives for libraries, please visit
|
||||
https://go.microsoft.com/fwlink/?LinkID=391919
|
||||
-->
|
||||
<Directives xmlns="http://schemas.microsoft.com/netfx/2013/01/metadata">
|
||||
<Library Name="IntelligentAPI_EmotionRecognizer">
|
||||
|
||||
<!-- add directives for your library here -->
|
||||
|
||||
</Library>
|
||||
</Directives>
|
|
@ -1,15 +0,0 @@
|
|||
$ProgressPreference = 'SilentlyContinue'
|
||||
|
||||
$emotionferplusfile = "./Assets/model_emotion.onnx"
|
||||
if (-not(Test-Path -Path $emotionferplusfile -PathType Leaf)) {
|
||||
try {
|
||||
Invoke-WebRequest -URI "https://github.com/onnx/models/raw/master/vision/body_analysis/emotion_ferplus/model/emotion-ferplus-8.onnx" -OutFile $emotionferplusfile
|
||||
Write-Host "The file [$emotionferplusfile] has been created."
|
||||
}
|
||||
catch {
|
||||
throw $_.Exception.Message
|
||||
}
|
||||
}
|
||||
else {
|
||||
Write-Host "The file [$emotionferplusfile] exists."
|
||||
}
|
Загрузка…
Ссылка в новой задаче