use the same device as session to create input VideoFrame and experiment with frame size
This commit is contained in:
Родитель
37dcbd952b
Коммит
cb3e3cdc5b
|
@ -102,6 +102,8 @@ SegmentModel::SegmentModel(UINT32 w, UINT32 h) :
|
|||
}
|
||||
|
||||
void SegmentModel::SetModels(UINT32 w, UINT32 h) {
|
||||
|
||||
w /= 4; h /= 4;
|
||||
SetImageSize(w, h);
|
||||
m_sess = CreateLearningModelSession(Invert(1, 3, h, w));
|
||||
m_sessStyleTransfer = CreateLearningModelSession(StyleTransfer());
|
||||
|
@ -132,17 +134,19 @@ void SegmentModel::SetImageSize(UINT32 w, UINT32 h)
|
|||
m_imageHeightInPixels = h;
|
||||
}
|
||||
|
||||
void SegmentModel::Run(IDirect3DSurface src, IDirect3DSurface dest, IDirect3DDevice device)
|
||||
void SegmentModel::Run(IDirect3DSurface src, IDirect3DSurface dest)
|
||||
{
|
||||
OutputDebugString(L"\n [ Starting run | ");
|
||||
// 1. Get input buffer as a VideoFrame
|
||||
VideoFrame input = VideoFrame::CreateWithDirect3D11Surface(src);
|
||||
VideoFrame output = VideoFrame::CreateWithDirect3D11Surface(dest);
|
||||
|
||||
|
||||
auto device = m_sessFCN.Device().Direct3D11Device();
|
||||
auto desc = input.Direct3DSurface().Description();
|
||||
auto descOut = output.Direct3DSurface().Description();
|
||||
VideoFrame input2 = VideoFrame::CreateAsDirect3D11SurfaceBacked(desc.Format, desc.Width, desc.Height, device);
|
||||
VideoFrame output2 = VideoFrame::CreateAsDirect3D11SurfaceBacked(descOut.Format, descOut.Width, descOut.Height, device);
|
||||
VideoFrame input2 = VideoFrame::CreateAsDirect3D11SurfaceBacked(desc.Format, desc.Width/4, desc.Height/4, device);
|
||||
VideoFrame output2 = VideoFrame::CreateAsDirect3D11SurfaceBacked(descOut.Format, descOut.Width/4, descOut.Height/4, device);
|
||||
|
||||
input.CopyToAsync(input2).get(); // TODO: I'm guessing it's this copy that's causing issues...
|
||||
output.CopyToAsync(output2).get();
|
||||
|
@ -191,14 +195,6 @@ void SegmentModel::Run(IDirect3DSurface src, IDirect3DSurface dest, IDirect3DDev
|
|||
// Copy back to the correct surface for MFT
|
||||
output2.CopyToAsync(output).get();
|
||||
|
||||
// Clean up bindings before returning
|
||||
m_bindPreprocess.Clear();
|
||||
m_bindFCN.Clear();
|
||||
m_bindPostprocess.Clear();
|
||||
input.Close();
|
||||
input2.Close();
|
||||
output2.Close();
|
||||
output.Close();
|
||||
OutputDebugString(L" | Ending run ]");
|
||||
|
||||
}
|
||||
|
@ -267,7 +263,7 @@ void SegmentModel::SubmitEval(VideoFrame input, VideoFrame output) {
|
|||
// return without waiting for the submit to finish, setup the completion handler
|
||||
}
|
||||
|
||||
void SegmentModel::RunStyleTransfer(IDirect3DSurface src, IDirect3DSurface dest, IDirect3DDevice device)
|
||||
void SegmentModel::RunStyleTransfer(IDirect3DSurface src, IDirect3DSurface dest)
|
||||
{
|
||||
OutputDebugString(L"\n[Starting RunStyleTransfer | ");
|
||||
|
||||
|
@ -277,8 +273,9 @@ void SegmentModel::RunStyleTransfer(IDirect3DSurface src, IDirect3DSurface dest,
|
|||
auto desc = input.Direct3DSurface().Description();
|
||||
auto descOut = output.Direct3DSurface().Description();
|
||||
|
||||
VideoFrame output2 = VideoFrame::CreateAsDirect3D11SurfaceBacked(descOut.Format, 720, 720, device);
|
||||
VideoFrame input2 = VideoFrame::CreateAsDirect3D11SurfaceBacked(desc.Format, 720,720, device);
|
||||
auto sessdevice = m_sessStyleTransfer.Device().Direct3D11Device();
|
||||
VideoFrame output2 = VideoFrame::CreateAsDirect3D11SurfaceBacked(descOut.Format, 720, 720, sessdevice);
|
||||
VideoFrame input2 = VideoFrame::CreateAsDirect3D11SurfaceBacked(desc.Format, 720,720, sessdevice);
|
||||
input.CopyToAsync(input2).get(); // TODO: Can input stay the same if NV12?
|
||||
output.CopyToAsync(output2).get();
|
||||
desc = input2.Direct3DSurface().Description();
|
||||
|
@ -296,17 +293,13 @@ void SegmentModel::RunStyleTransfer(IDirect3DSurface src, IDirect3DSurface dest,
|
|||
auto results = m_sessStyleTransfer.Evaluate(m_bindStyleTransfer, L"");
|
||||
|
||||
output2.CopyToAsync(output).get(); // Should put onto the correct surface now? Make sure, can return the surface instead later
|
||||
m_bindStyleTransfer.Clear();
|
||||
input.Close();
|
||||
input2.Close();
|
||||
output2.Close();
|
||||
output.Close();
|
||||
|
||||
OutputDebugString(L" Ending RunStyleTransfer]");
|
||||
|
||||
}
|
||||
|
||||
|
||||
void SegmentModel::RunTestDXGI(IDirect3DSurface src, IDirect3DSurface dest, IDirect3DDevice device)
|
||||
void SegmentModel::RunTestDXGI(IDirect3DSurface src, IDirect3DSurface dest)
|
||||
{
|
||||
|
||||
OutputDebugString(L"\n [ Starting runTest | "); i++;
|
||||
|
@ -317,7 +310,8 @@ void SegmentModel::RunTestDXGI(IDirect3DSurface src, IDirect3DSurface dest, IDir
|
|||
auto desc = input.Direct3DSurface().Description();
|
||||
auto descOut = output.Direct3DSurface().Description();
|
||||
|
||||
// TODO: Use a specific device to create so not piling up on resources?
|
||||
// TODO: Use a specific device to create so not piling up on resources?
|
||||
auto device = m_sess.Device().Direct3D11Device();
|
||||
VideoFrame output2 = VideoFrame::CreateAsDirect3D11SurfaceBacked(descOut.Format, descOut.Width, descOut.Height, device);
|
||||
VideoFrame input2 = VideoFrame::CreateAsDirect3D11SurfaceBacked(desc.Format, desc.Width, desc.Height, device);
|
||||
input.CopyToAsync(input2).get(); // TODO: Can input stay the same if NV12?
|
||||
|
@ -493,7 +487,7 @@ LearningModel SegmentModel::ReshapeFlatBufferToNCHW(long n, long c, long h, long
|
|||
}
|
||||
|
||||
LearningModelSession SegmentModel::CreateLearningModelSession(const LearningModel& model, bool closeModel) {
|
||||
auto device = m_useGPU ? LearningModelDevice(LearningModelDeviceKind::DirectX) : LearningModelDevice(LearningModelDeviceKind::Default); // Todo: Have a toggle between GPU/ CPU?
|
||||
auto device = m_useGPU ? LearningModelDevice(LearningModelDeviceKind::DirectXHighPerformance) : LearningModelDevice(LearningModelDeviceKind::Default); // Todo: Have a toggle between GPU/ CPU?
|
||||
auto options = LearningModelSessionOptions();
|
||||
options.BatchSizeOverride(0);
|
||||
options.CloseModelOnSessionCreation(closeModel);
|
||||
|
|
|
@ -34,9 +34,9 @@ public:
|
|||
SegmentModel(UINT32 w, UINT32 h);
|
||||
void SetModels(UINT32 w, UINT32 h);
|
||||
|
||||
void Run(IDirect3DSurface src, IDirect3DSurface dest, IDirect3DDevice device);
|
||||
void RunTestDXGI(IDirect3DSurface src, IDirect3DSurface dest, IDirect3DDevice device);
|
||||
void RunStyleTransfer(IDirect3DSurface src, IDirect3DSurface dest, IDirect3DDevice device);
|
||||
void Run(IDirect3DSurface src, IDirect3DSurface dest);
|
||||
void RunTestDXGI(IDirect3DSurface src, IDirect3DSurface dest);
|
||||
void RunStyleTransfer(IDirect3DSurface src, IDirect3DSurface dest);
|
||||
|
||||
LearningModelSession CreateLearningModelSession(const LearningModel& model, bool closedModel=true);
|
||||
void SetImageSize(UINT32 w, UINT32 h);
|
||||
|
|
|
@ -1418,28 +1418,20 @@ IDirect3DSurface TransformBlur::SampleToD3Dsurface(IMFSample* sample)
|
|||
HRESULT TransformBlur::OnProcessOutput(IMFSample** ppOut)
|
||||
{
|
||||
HRESULT hr = S_OK;
|
||||
|
||||
|
||||
// TODO: Cache so don't have to create each time
|
||||
winrt::com_ptr<IVideoFrameNativeFactory> factory;
|
||||
hr = CoCreateInstance(CLSID_VideoFrameNativeFactory, NULL, CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&factory));
|
||||
|
||||
IDirect3DSurface src = SampleToD3Dsurface(m_spSample.get());
|
||||
IDirect3DSurface dest = SampleToD3Dsurface(*ppOut);
|
||||
|
||||
winrt::com_ptr<ID3D11Texture2D> pTextSrc;
|
||||
winrt::com_ptr<ID3D11Texture2D> pTextCreate;
|
||||
winrt::com_ptr<ID3D11Texture2D> pTextDest;
|
||||
|
||||
// Extract the device
|
||||
winrt::com_ptr<IDXGIDevice> pDXGIDevice{ m_spDevice.as<IDXGIDevice>() };
|
||||
IDirect3DDevice direct3DDevice{ nullptr };
|
||||
hr = CreateDirect3D11DeviceFromDXGIDevice(pDXGIDevice.get(), reinterpret_cast<IInspectable**>(winrt::put_abi(direct3DDevice)));
|
||||
|
||||
// Invoke the image transform function.
|
||||
if (SUCCEEDED(hr))
|
||||
{
|
||||
// Do the copies inside runtest
|
||||
auto now = std::chrono::high_resolution_clock::now();
|
||||
m_segmentModel.Run(src, dest, direct3DDevice);
|
||||
m_segmentModel.Run(src, dest);
|
||||
auto timePassed = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::high_resolution_clock::now() - now);
|
||||
OutputDebugString(std::to_wstring(timePassed.count()).c_str());
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче