зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1650696 - P4. Remove the expectation for a MediaDataDecoder to work on a specified TaskQueue. r=jolin
It will now be up to the caller to determine where the decoder is going to run. This allows to simplify the audio decoders so that they can run synchronously and be wrapped in a Wasm sandbox (which doesn't support multi-threading) The structure guarantees that all MediaDataDecoder methods are called on the same thread it's been initialised. To achieve this, wherever a MediaDataDecoder was created, we wrap it in a MediaDataDecoderProxy that ensures that all methods are running on the given thread. We keep the behaviour of all methods in all MediaDataDecoder to assert that they are running on the expected thread for diagnostic purposes. It could go in the future. Video decoders that could block excessingly the thread on which they are called are made to run on their own task queue. The Apple decoder is mostly entirely asynchronous, with the exception of the drain method which could block. We exclude the android and omx decoders are the framework they use is 100% asynchronous and already operate on another thread. Differential Revision: https://phabricator.services.mozilla.com/D86929
This commit is contained in:
Родитель
0dbb1ed812
Коммит
5be22726b0
|
@ -8,6 +8,7 @@
|
|||
|
||||
#include "BufferMediaResource.h"
|
||||
#include "MediaData.h"
|
||||
#include "MediaDataDecoderProxy.h"
|
||||
#include "PDMFactory.h"
|
||||
#include "VideoUtils.h"
|
||||
#include "WebMDemuxer.h"
|
||||
|
@ -17,11 +18,11 @@
|
|||
#include "mozilla/StaticMutex.h"
|
||||
#include "mozilla/StaticPrefs_media.h"
|
||||
#include "mozilla/TaskQueue.h"
|
||||
#include "mozilla/Telemetry.h"
|
||||
#include "mozilla/dom/ContentChild.h"
|
||||
#include "mozilla/gfx/gfxVars.h"
|
||||
#include "nsIGfxInfo.h"
|
||||
#include "mozilla/Telemetry.h"
|
||||
#include "nsGkAtoms.h"
|
||||
#include "nsIGfxInfo.h"
|
||||
|
||||
#ifndef MOZ_WIDGET_ANDROID
|
||||
# include "WebMSample.h"
|
||||
|
@ -246,11 +247,15 @@ void BenchmarkPlayback::InitDecoder(UniquePtr<TrackInfo>&& aInfo) {
|
|||
|
||||
RefPtr<PDMFactory> platform = new PDMFactory();
|
||||
mInfo = std::move(aInfo);
|
||||
mDecoder = platform->CreateDecoder({*mInfo, mDecoderTaskQueue});
|
||||
if (!mDecoder) {
|
||||
RefPtr<MediaDataDecoder> decoder =
|
||||
platform->CreateDecoder(CreateDecoderParams{*mInfo});
|
||||
|
||||
if (!decoder) {
|
||||
Error(MediaResult(NS_ERROR_FAILURE, "Failed to create decoder"));
|
||||
return;
|
||||
}
|
||||
mDecoder = new MediaDataDecoderProxy(decoder.forget(),
|
||||
do_AddRef(mDecoderTaskQueue.get()));
|
||||
RefPtr<Benchmark> ref(mGlobalState);
|
||||
mDecoder->Init()->Then(
|
||||
Thread(), __func__,
|
||||
|
|
|
@ -6,10 +6,15 @@
|
|||
|
||||
#include "MediaFormatReader.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <map>
|
||||
#include <queue>
|
||||
|
||||
#include "AllocationPolicy.h"
|
||||
#include "DecoderBenchmark.h"
|
||||
#include "GeckoProfiler.h"
|
||||
#include "MediaData.h"
|
||||
#include "MediaDataDecoderProxy.h"
|
||||
#include "MediaInfo.h"
|
||||
#include "VideoFrameContainer.h"
|
||||
#include "VideoUtils.h"
|
||||
|
@ -25,10 +30,6 @@
|
|||
#include "nsContentUtils.h"
|
||||
#include "nsPrintfCString.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <map>
|
||||
#include <queue>
|
||||
|
||||
using namespace mozilla::media;
|
||||
|
||||
static mozilla::LazyLogModule sFormatDecoderLog("MediaFormatReader");
|
||||
|
@ -354,12 +355,17 @@ MediaResult MediaFormatReader::DecoderFactory::DoCreateDecoder(Data& aData) {
|
|||
|
||||
switch (aData.mTrack) {
|
||||
case TrackInfo::kAudioTrack: {
|
||||
aData.mDecoder = platform->CreateDecoder(
|
||||
{*ownerData.GetCurrentInfo()->GetAsAudioInfo(), ownerData.mTaskQueue,
|
||||
mOwner->mCrashHelper,
|
||||
RefPtr<MediaDataDecoder> decoder = platform->CreateDecoder(
|
||||
{*ownerData.GetCurrentInfo()->GetAsAudioInfo(), mOwner->mCrashHelper,
|
||||
CreateDecoderParams::UseNullDecoder(ownerData.mIsNullDecode),
|
||||
&result, TrackInfo::kAudioTrack,
|
||||
&mOwner->OnTrackWaitingForKeyProducer()});
|
||||
if (!decoder) {
|
||||
aData.mDecoder = nullptr;
|
||||
break;
|
||||
}
|
||||
aData.mDecoder = new MediaDataDecoderProxy(
|
||||
decoder.forget(), do_AddRef(ownerData.mTaskQueue.get()));
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -369,8 +375,8 @@ MediaResult MediaFormatReader::DecoderFactory::DoCreateDecoder(Data& aData) {
|
|||
using Option = CreateDecoderParams::Option;
|
||||
using OptionSet = CreateDecoderParams::OptionSet;
|
||||
|
||||
aData.mDecoder = platform->CreateDecoder(
|
||||
{*ownerData.GetCurrentInfo()->GetAsVideoInfo(), ownerData.mTaskQueue,
|
||||
RefPtr<MediaDataDecoder> decoder = platform->CreateDecoder(
|
||||
{*ownerData.GetCurrentInfo()->GetAsVideoInfo(),
|
||||
mOwner->mKnowsCompositor, mOwner->GetImageContainer(),
|
||||
mOwner->mCrashHelper,
|
||||
CreateDecoderParams::UseNullDecoder(ownerData.mIsNullDecode),
|
||||
|
@ -380,6 +386,12 @@ MediaResult MediaFormatReader::DecoderFactory::DoCreateDecoder(Data& aData) {
|
|||
OptionSet(ownerData.mHardwareDecodingDisabled
|
||||
? Option::HardwareDecoderNotAllowed
|
||||
: Option::Default)});
|
||||
if (!decoder) {
|
||||
aData.mDecoder = nullptr;
|
||||
break;
|
||||
}
|
||||
aData.mDecoder = new MediaDataDecoderProxy(
|
||||
decoder.forget(), do_AddRef(ownerData.mTaskQueue.get()));
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -5,11 +5,11 @@
|
|||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
#include "RemoteAudioDecoder.h"
|
||||
|
||||
#include "RemoteDecoderManagerChild.h"
|
||||
#include "MediaDataDecoderProxy.h"
|
||||
#include "OpusDecoder.h"
|
||||
#include "RemoteDecoderManagerChild.h"
|
||||
#include "VorbisDecoder.h"
|
||||
#include "WAVDecoder.h"
|
||||
|
||||
#include "mozilla/PodOperations.h"
|
||||
|
||||
namespace mozilla {
|
||||
|
@ -82,17 +82,17 @@ RemoteAudioDecoderParent::RemoteAudioDecoderParent(
|
|||
: RemoteDecoderParent(aParent, aManagerThread, aDecodeTaskQueue),
|
||||
mAudioInfo(aAudioInfo) {
|
||||
CreateDecoderParams params(mAudioInfo);
|
||||
params.mTaskQueue = mDecodeTaskQueue;
|
||||
params.mOptions = aOptions;
|
||||
MediaResult error(NS_OK);
|
||||
params.mError = &error;
|
||||
|
||||
RefPtr<MediaDataDecoder> decoder;
|
||||
if (VorbisDataDecoder::IsVorbis(params.mConfig.mMimeType)) {
|
||||
mDecoder = new VorbisDataDecoder(params);
|
||||
decoder = new VorbisDataDecoder(params);
|
||||
} else if (OpusDataDecoder::IsOpus(params.mConfig.mMimeType)) {
|
||||
mDecoder = new OpusDataDecoder(params);
|
||||
decoder = new OpusDataDecoder(params);
|
||||
} else if (WaveDataDecoder::IsWave(params.mConfig.mMimeType)) {
|
||||
mDecoder = new WaveDataDecoder(params);
|
||||
decoder = new WaveDataDecoder(params);
|
||||
}
|
||||
|
||||
if (NS_FAILED(error)) {
|
||||
|
@ -100,6 +100,11 @@ RemoteAudioDecoderParent::RemoteAudioDecoderParent(
|
|||
*aErrorDescription = error.Description();
|
||||
}
|
||||
|
||||
if (decoder) {
|
||||
mDecoder = new MediaDataDecoderProxy(decoder.forget(),
|
||||
do_AddRef(mDecodeTaskQueue.get()));
|
||||
}
|
||||
|
||||
*aSuccess = !!mDecoder;
|
||||
}
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#endif
|
||||
#include "GPUVideoImage.h"
|
||||
#include "ImageContainer.h" // for PlanarYCbCrData and BufferRecycleBin
|
||||
#include "MediaDataDecoderProxy.h"
|
||||
#include "MediaInfo.h"
|
||||
#include "PDMFactory.h"
|
||||
#include "RemoteDecoderManagerChild.h"
|
||||
|
@ -271,7 +272,6 @@ RemoteVideoDecoderParent::RemoteVideoDecoderParent(
|
|||
}
|
||||
|
||||
CreateDecoderParams params(mVideoInfo);
|
||||
params.mTaskQueue = mDecodeTaskQueue;
|
||||
params.mKnowsCompositor = mKnowsCompositor;
|
||||
params.mImageContainer = container;
|
||||
params.mRate = CreateDecoderParams::VideoFrameRate(aFramerate);
|
||||
|
@ -279,6 +279,7 @@ RemoteVideoDecoderParent::RemoteVideoDecoderParent(
|
|||
MediaResult error(NS_OK);
|
||||
params.mError = &error;
|
||||
|
||||
RefPtr<MediaDataDecoder> decoder;
|
||||
if (XRE_IsGPUProcess()) {
|
||||
#ifdef XP_WIN
|
||||
// Ensure everything is properly initialized on the right thread.
|
||||
|
@ -288,7 +289,7 @@ RemoteVideoDecoderParent::RemoteVideoDecoderParent(
|
|||
// PDM factory logic for picking a decoder.
|
||||
RefPtr<WMFDecoderModule> pdm(new WMFDecoderModule());
|
||||
pdm->Startup();
|
||||
mDecoder = pdm->CreateVideoDecoder(params);
|
||||
decoder = pdm->CreateVideoDecoder(params);
|
||||
#else
|
||||
MOZ_ASSERT(false,
|
||||
"Can't use RemoteVideoDecoder in the GPU process on non-Windows "
|
||||
|
@ -299,9 +300,9 @@ RemoteVideoDecoderParent::RemoteVideoDecoderParent(
|
|||
#ifdef MOZ_AV1
|
||||
if (AOMDecoder::IsAV1(params.mConfig.mMimeType)) {
|
||||
if (StaticPrefs::media_av1_use_dav1d()) {
|
||||
mDecoder = new DAV1DDecoder(params);
|
||||
decoder = new DAV1DDecoder(params);
|
||||
} else {
|
||||
mDecoder = new AOMDecoder(params);
|
||||
decoder = new AOMDecoder(params);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -311,6 +312,10 @@ RemoteVideoDecoderParent::RemoteVideoDecoderParent(
|
|||
*aErrorDescription = error.Description();
|
||||
}
|
||||
|
||||
if (decoder) {
|
||||
mDecoder = new MediaDataDecoderProxy(decoder.forget(),
|
||||
do_AddRef(mDecodeTaskQueue.get()));
|
||||
}
|
||||
*aSuccess = !!mDecoder;
|
||||
}
|
||||
|
||||
|
|
|
@ -247,7 +247,7 @@ already_AddRefed<Promise> MediaCapabilities::DecodingInfo(
|
|||
// MediaDataDecoder keeps a reference to the config object, so we must
|
||||
// keep it alive until the decoder has been shutdown.
|
||||
CreateDecoderParams params{
|
||||
*config, taskQueue, compositor,
|
||||
*config, compositor,
|
||||
CreateDecoderParams::VideoFrameRate(frameRate),
|
||||
TrackInfo::kVideoTrack};
|
||||
// We want to ensure that all decoder's queries are occurring only
|
||||
|
|
|
@ -198,7 +198,6 @@ AllocationWrapper::CreateDecoder(const CreateDecoderParams& aParams,
|
|||
// aParams.mConfig is guaranteed to stay alive during the lifetime of the
|
||||
// MediaDataDecoder, so keeping a pointer to the object is safe.
|
||||
const TrackInfo* config = &aParams.mConfig;
|
||||
RefPtr<TaskQueue> taskQueue = aParams.mTaskQueue;
|
||||
DecoderDoctorDiagnostics* diagnostics = aParams.mDiagnostics;
|
||||
RefPtr<layers::ImageContainer> imageContainer = aParams.mImageContainer;
|
||||
RefPtr<layers::KnowsCompositor> knowsCompositor = aParams.mKnowsCompositor;
|
||||
|
@ -227,7 +226,6 @@ AllocationWrapper::CreateDecoder(const CreateDecoderParams& aParams,
|
|||
TrackTypeToStr(type)));
|
||||
RefPtr<PDMFactory> pdm = new PDMFactory();
|
||||
CreateDecoderParams params{*config,
|
||||
taskQueue,
|
||||
diagnostics,
|
||||
imageContainer,
|
||||
&result,
|
||||
|
|
|
@ -37,7 +37,6 @@ class ImageContainer;
|
|||
class GpuDecoderModule;
|
||||
class MediaDataDecoder;
|
||||
class RemoteDecoderModule;
|
||||
class TaskQueue;
|
||||
class CDMProxy;
|
||||
|
||||
static LazyLogModule sPDMLog("PlatformDecoderModule");
|
||||
|
@ -105,7 +104,6 @@ struct MOZ_STACK_CLASS CreateDecoderParams final {
|
|||
}
|
||||
|
||||
const TrackInfo& mConfig;
|
||||
TaskQueue* mTaskQueue = nullptr;
|
||||
DecoderDoctorDiagnostics* mDiagnostics = nullptr;
|
||||
layers::ImageContainer* mImageContainer = nullptr;
|
||||
MediaResult* mError = nullptr;
|
||||
|
@ -119,7 +117,6 @@ struct MOZ_STACK_CLASS CreateDecoderParams final {
|
|||
VideoFrameRate mRate;
|
||||
|
||||
private:
|
||||
void Set(TaskQueue* aTaskQueue) { mTaskQueue = aTaskQueue; }
|
||||
void Set(DecoderDoctorDiagnostics* aDiagnostics) {
|
||||
mDiagnostics = aDiagnostics;
|
||||
}
|
||||
|
@ -281,6 +278,8 @@ class MediaDataDecoder : public DecoderDoctorLifeLogger<MediaDataDecoder> {
|
|||
// it can call Shutdown() to cancel this operation. Any initialization
|
||||
// that requires blocking the calling thread in this function *must*
|
||||
// be done here so that it can be canceled by calling Shutdown()!
|
||||
// Methods Decode, DecodeBatch, Drain, Flush, Shutdown are guaranteed to be
|
||||
// called on the thread where Init() first ran.
|
||||
virtual RefPtr<InitPromise> Init() = 0;
|
||||
|
||||
// Inserts a sample into the decoder's decode pipeline. The DecodePromise will
|
||||
|
|
|
@ -5,19 +5,21 @@
|
|||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "AOMDecoder.h"
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include "ImageContainer.h"
|
||||
#include "MediaResult.h"
|
||||
#include "TimeUnits.h"
|
||||
#include "aom/aomdx.h"
|
||||
#include "aom/aom_image.h"
|
||||
#include "aom/aomdx.h"
|
||||
#include "gfx2DGlue.h"
|
||||
#include "mozilla/PodOperations.h"
|
||||
#include "mozilla/SyncRunnable.h"
|
||||
#include "mozilla/TaskQueue.h"
|
||||
#include "nsError.h"
|
||||
#include "prsystem.h"
|
||||
#include "ImageContainer.h"
|
||||
#include "nsThreadUtils.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include "prsystem.h"
|
||||
|
||||
#undef LOG
|
||||
#define LOG(arg, ...) \
|
||||
|
@ -77,7 +79,8 @@ static MediaResult InitContext(AOMDecoder& aAOMDecoder, aom_codec_ctx_t* aCtx,
|
|||
|
||||
AOMDecoder::AOMDecoder(const CreateDecoderParams& aParams)
|
||||
: mImageContainer(aParams.mImageContainer),
|
||||
mTaskQueue(aParams.mTaskQueue),
|
||||
mTaskQueue(new TaskQueue(
|
||||
GetMediaThreadPool(MediaThreadType::PLATFORM_DECODER), "AOMDecoder")),
|
||||
mInfo(aParams.VideoConfig()) {
|
||||
PodZero(&mCodec);
|
||||
}
|
||||
|
@ -91,7 +94,7 @@ RefPtr<ShutdownPromise> AOMDecoder::Shutdown() {
|
|||
if (res != AOM_CODEC_OK) {
|
||||
LOGEX_RESULT(self.get(), res, "aom_codec_destroy");
|
||||
}
|
||||
return ShutdownPromise::CreateAndResolve(true, __func__);
|
||||
return self->mTaskQueue->BeginShutdown();
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -6,11 +6,11 @@
|
|||
#if !defined(AOMDecoder_h_)
|
||||
# define AOMDecoder_h_
|
||||
|
||||
# include "PlatformDecoderModule.h"
|
||||
# include "mozilla/Span.h"
|
||||
|
||||
# include <stdint.h>
|
||||
|
||||
# include "PlatformDecoderModule.h"
|
||||
# include "aom/aom_decoder.h"
|
||||
# include "mozilla/Span.h"
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
|
||||
#include "DAV1DDecoder.h"
|
||||
|
||||
#include "mozilla/TaskQueue.h"
|
||||
#include "mozilla/gfx/gfxVars.h"
|
||||
#include "nsThreadUtils.h"
|
||||
|
||||
|
@ -18,7 +19,9 @@ namespace mozilla {
|
|||
|
||||
DAV1DDecoder::DAV1DDecoder(const CreateDecoderParams& aParams)
|
||||
: mInfo(aParams.VideoConfig()),
|
||||
mTaskQueue(aParams.mTaskQueue),
|
||||
mTaskQueue(
|
||||
new TaskQueue(GetMediaThreadPool(MediaThreadType::PLATFORM_DECODER),
|
||||
"Dav1dDecoder")),
|
||||
mImageContainer(aParams.mImageContainer) {}
|
||||
|
||||
RefPtr<MediaDataDecoder::InitPromise> DAV1DDecoder::Init() {
|
||||
|
@ -281,7 +284,7 @@ RefPtr<ShutdownPromise> DAV1DDecoder::Shutdown() {
|
|||
RefPtr<DAV1DDecoder> self = this;
|
||||
return InvokeAsync(mTaskQueue, __func__, [self]() {
|
||||
dav1d_close(&self->mContext);
|
||||
return ShutdownPromise::CreateAndResolve(true, __func__);
|
||||
return self->mTaskQueue->BeginShutdown();
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -5,17 +5,17 @@
|
|||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "OpusDecoder.h"
|
||||
#include "OpusParser.h"
|
||||
#include "TimeUnits.h"
|
||||
#include "VorbisUtils.h"
|
||||
#include "VorbisDecoder.h" // For VorbisLayout
|
||||
#include "mozilla/EndianUtils.h"
|
||||
#include "mozilla/PodOperations.h"
|
||||
#include "mozilla/SyncRunnable.h"
|
||||
#include "VideoUtils.h"
|
||||
|
||||
#include <inttypes.h> // For PRId64
|
||||
|
||||
#include "OpusParser.h"
|
||||
#include "TimeUnits.h"
|
||||
#include "VideoUtils.h"
|
||||
#include "VorbisDecoder.h" // For VorbisLayout
|
||||
#include "VorbisUtils.h"
|
||||
#include "mozilla/EndianUtils.h"
|
||||
#include "mozilla/PodOperations.h"
|
||||
#include "mozilla/SyncRunnable.h"
|
||||
#include "opus/opus.h"
|
||||
extern "C" {
|
||||
#include "opus/opus_multistream.h"
|
||||
|
@ -29,7 +29,6 @@ namespace mozilla {
|
|||
|
||||
OpusDataDecoder::OpusDataDecoder(const CreateDecoderParams& aParams)
|
||||
: mInfo(aParams.AudioConfig()),
|
||||
mTaskQueue(aParams.mTaskQueue),
|
||||
mOpusDecoder(nullptr),
|
||||
mSkip(0),
|
||||
mDecodedHeader(false),
|
||||
|
@ -40,6 +39,7 @@ OpusDataDecoder::OpusDataDecoder(const CreateDecoderParams& aParams)
|
|||
CreateDecoderParams::Option::DefaultPlaybackDeviceMono)) {}
|
||||
|
||||
OpusDataDecoder::~OpusDataDecoder() {
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
if (mOpusDecoder) {
|
||||
opus_multistream_decoder_destroy(mOpusDecoder);
|
||||
mOpusDecoder = nullptr;
|
||||
|
@ -47,10 +47,8 @@ OpusDataDecoder::~OpusDataDecoder() {
|
|||
}
|
||||
|
||||
RefPtr<ShutdownPromise> OpusDataDecoder::Shutdown() {
|
||||
RefPtr<OpusDataDecoder> self = this;
|
||||
return InvokeAsync(mTaskQueue, __func__, [self]() {
|
||||
return ShutdownPromise::CreateAndResolve(true, __func__);
|
||||
});
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
return ShutdownPromise::CreateAndResolve(true, __func__);
|
||||
}
|
||||
|
||||
void OpusDataDecoder::AppendCodecDelay(MediaByteBuffer* config,
|
||||
|
@ -61,6 +59,7 @@ void OpusDataDecoder::AppendCodecDelay(MediaByteBuffer* config,
|
|||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::InitPromise> OpusDataDecoder::Init() {
|
||||
mThread = GetCurrentSerialEventTarget();
|
||||
size_t length = mInfo.mCodecSpecificConfig->Length();
|
||||
uint8_t* p = mInfo.mCodecSpecificConfig->Elements();
|
||||
if (length < sizeof(uint64_t)) {
|
||||
|
@ -179,12 +178,7 @@ nsresult OpusDataDecoder::DecodeHeader(const unsigned char* aData,
|
|||
|
||||
RefPtr<MediaDataDecoder::DecodePromise> OpusDataDecoder::Decode(
|
||||
MediaRawData* aSample) {
|
||||
return InvokeAsync<MediaRawData*>(mTaskQueue, this, __func__,
|
||||
&OpusDataDecoder::ProcessDecode, aSample);
|
||||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::DecodePromise> OpusDataDecoder::ProcessDecode(
|
||||
MediaRawData* aSample) {
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
uint32_t channels = mOpusParser->mChannels;
|
||||
|
||||
if (mPaddingDiscarded) {
|
||||
|
@ -349,29 +343,23 @@ RefPtr<MediaDataDecoder::DecodePromise> OpusDataDecoder::ProcessDecode(
|
|||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::DecodePromise> OpusDataDecoder::Drain() {
|
||||
RefPtr<OpusDataDecoder> self = this;
|
||||
// InvokeAsync dispatches a task that will be run after any pending decode
|
||||
// completes. As such, once the drain task run, there's nothing more to do.
|
||||
return InvokeAsync(mTaskQueue, __func__, [] {
|
||||
return DecodePromise::CreateAndResolve(DecodedData(), __func__);
|
||||
});
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
return DecodePromise::CreateAndResolve(DecodedData(), __func__);
|
||||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::FlushPromise> OpusDataDecoder::Flush() {
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
if (!mOpusDecoder) {
|
||||
return FlushPromise::CreateAndResolve(true, __func__);
|
||||
}
|
||||
|
||||
RefPtr<OpusDataDecoder> self = this;
|
||||
return InvokeAsync(mTaskQueue, __func__, [self, this]() {
|
||||
MOZ_ASSERT(mOpusDecoder);
|
||||
// Reset the decoder.
|
||||
opus_multistream_decoder_ctl(mOpusDecoder, OPUS_RESET_STATE);
|
||||
mSkip = mOpusParser->mPreSkip;
|
||||
mPaddingDiscarded = false;
|
||||
mLastFrameTime.reset();
|
||||
return FlushPromise::CreateAndResolve(true, __func__);
|
||||
});
|
||||
MOZ_ASSERT(mOpusDecoder);
|
||||
// Reset the decoder.
|
||||
opus_multistream_decoder_ctl(mOpusDecoder, OPUS_RESET_STATE);
|
||||
mSkip = mOpusParser->mPreSkip;
|
||||
mPaddingDiscarded = false;
|
||||
mLastFrameTime.reset();
|
||||
return FlushPromise::CreateAndResolve(true, __func__);
|
||||
}
|
||||
|
||||
/* static */
|
||||
|
|
|
@ -47,10 +47,8 @@ class OpusDataDecoder : public MediaDataDecoder,
|
|||
private:
|
||||
nsresult DecodeHeader(const unsigned char* aData, size_t aLength);
|
||||
|
||||
RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample);
|
||||
|
||||
const AudioInfo& mInfo;
|
||||
const RefPtr<TaskQueue> mTaskQueue;
|
||||
nsCOMPtr<nsISerialEventTarget> mThread;
|
||||
|
||||
// Opus decoder state
|
||||
UniquePtr<OpusParser> mOpusParser;
|
||||
|
|
|
@ -5,14 +5,16 @@
|
|||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "TheoraDecoder.h"
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include "ImageContainer.h"
|
||||
#include "TimeUnits.h"
|
||||
#include "XiphExtradata.h"
|
||||
#include "gfx2DGlue.h"
|
||||
#include "mozilla/PodOperations.h"
|
||||
#include "mozilla/TaskQueue.h"
|
||||
#include "nsError.h"
|
||||
#include "ImageContainer.h"
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#undef LOG
|
||||
#define LOG(arg, ...) \
|
||||
|
@ -42,7 +44,9 @@ ogg_packet InitTheoraPacket(const unsigned char* aData, size_t aLength,
|
|||
TheoraDecoder::TheoraDecoder(const CreateDecoderParams& aParams)
|
||||
: mImageAllocator(aParams.mKnowsCompositor),
|
||||
mImageContainer(aParams.mImageContainer),
|
||||
mTaskQueue(aParams.mTaskQueue),
|
||||
mTaskQueue(
|
||||
new TaskQueue(GetMediaThreadPool(MediaThreadType::PLATFORM_DECODER),
|
||||
"TheoraDecoder")),
|
||||
mTheoraInfo{},
|
||||
mTheoraComment{},
|
||||
mTheoraSetupInfo(nullptr),
|
||||
|
@ -66,7 +70,7 @@ RefPtr<ShutdownPromise> TheoraDecoder::Shutdown() {
|
|||
th_decode_free(mTheoraDecoderContext);
|
||||
mTheoraDecoderContext = nullptr;
|
||||
}
|
||||
return ShutdownPromise::CreateAndResolve(true, __func__);
|
||||
return mTaskQueue->BeginShutdown();
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -6,10 +6,11 @@
|
|||
#if !defined(TheoraDecoder_h_)
|
||||
# define TheoraDecoder_h_
|
||||
|
||||
# include <stdint.h>
|
||||
|
||||
# include "PlatformDecoderModule.h"
|
||||
# include "ogg/ogg.h"
|
||||
# include "theora/theoradec.h"
|
||||
# include <stdint.h>
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
|
@ -39,9 +40,9 @@ class TheoraDecoder : public MediaDataDecoder,
|
|||
|
||||
RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample);
|
||||
|
||||
RefPtr<layers::KnowsCompositor> mImageAllocator;
|
||||
RefPtr<layers::ImageContainer> mImageContainer;
|
||||
RefPtr<TaskQueue> mTaskQueue;
|
||||
const RefPtr<layers::KnowsCompositor> mImageAllocator;
|
||||
const RefPtr<layers::ImageContainer> mImageContainer;
|
||||
const RefPtr<TaskQueue> mTaskQueue;
|
||||
|
||||
// Theora header & decoder state
|
||||
th_info mTheoraInfo;
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include "gfx2DGlue.h"
|
||||
#include "mozilla/PodOperations.h"
|
||||
#include "mozilla/SyncRunnable.h"
|
||||
#include "mozilla/TaskQueue.h"
|
||||
#include "mozilla/Unused.h"
|
||||
#include "nsError.h"
|
||||
#include "prsystem.h"
|
||||
|
@ -68,7 +69,8 @@ static nsresult InitContext(vpx_codec_ctx_t* aCtx, const VideoInfo& aInfo,
|
|||
VPXDecoder::VPXDecoder(const CreateDecoderParams& aParams)
|
||||
: mImageContainer(aParams.mImageContainer),
|
||||
mImageAllocator(aParams.mKnowsCompositor),
|
||||
mTaskQueue(aParams.mTaskQueue),
|
||||
mTaskQueue(new TaskQueue(
|
||||
GetMediaThreadPool(MediaThreadType::PLATFORM_DECODER), "VPXDecoder")),
|
||||
mInfo(aParams.VideoConfig()),
|
||||
mCodec(MimeTypeToCodec(aParams.VideoConfig().mMimeType)),
|
||||
mLowLatency(
|
||||
|
@ -85,7 +87,7 @@ RefPtr<ShutdownPromise> VPXDecoder::Shutdown() {
|
|||
return InvokeAsync(mTaskQueue, __func__, [self]() {
|
||||
vpx_codec_destroy(&self->mVPX);
|
||||
vpx_codec_destroy(&self->mVPXAlpha);
|
||||
return ShutdownPromise::CreateAndResolve(true, __func__);
|
||||
return self->mTaskQueue->BeginShutdown();
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -112,7 +114,7 @@ RefPtr<MediaDataDecoder::FlushPromise> VPXDecoder::Flush() {
|
|||
|
||||
RefPtr<MediaDataDecoder::DecodePromise> VPXDecoder::ProcessDecode(
|
||||
MediaRawData* aSample) {
|
||||
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
|
||||
MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
|
||||
|
||||
if (vpx_codec_err_t r = vpx_codec_decode(&mVPX, aSample->Data(),
|
||||
aSample->Size(), nullptr, 0)) {
|
||||
|
|
|
@ -5,13 +5,13 @@
|
|||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "VorbisDecoder.h"
|
||||
|
||||
#include "VideoUtils.h"
|
||||
#include "VorbisUtils.h"
|
||||
#include "XiphExtradata.h"
|
||||
|
||||
#include "mozilla/Logging.h"
|
||||
#include "mozilla/PodOperations.h"
|
||||
#include "mozilla/SyncRunnable.h"
|
||||
#include "VideoUtils.h"
|
||||
|
||||
#undef LOG
|
||||
#define LOG(type, msg) MOZ_LOG(sPDMLog, type, msg)
|
||||
|
@ -32,10 +32,7 @@ ogg_packet InitVorbisPacket(const unsigned char* aData, size_t aLength,
|
|||
}
|
||||
|
||||
VorbisDataDecoder::VorbisDataDecoder(const CreateDecoderParams& aParams)
|
||||
: mInfo(aParams.AudioConfig()),
|
||||
mTaskQueue(aParams.mTaskQueue),
|
||||
mPacketCount(0),
|
||||
mFrames(0) {
|
||||
: mInfo(aParams.AudioConfig()), mPacketCount(0), mFrames(0) {
|
||||
// Zero these member vars to avoid crashes in Vorbis clear functions when
|
||||
// destructor is called before |Init|.
|
||||
PodZero(&mVorbisBlock);
|
||||
|
@ -45,6 +42,7 @@ VorbisDataDecoder::VorbisDataDecoder(const CreateDecoderParams& aParams)
|
|||
}
|
||||
|
||||
VorbisDataDecoder::~VorbisDataDecoder() {
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
vorbis_block_clear(&mVorbisBlock);
|
||||
vorbis_dsp_clear(&mVorbisDsp);
|
||||
vorbis_info_clear(&mVorbisInfo);
|
||||
|
@ -52,13 +50,12 @@ VorbisDataDecoder::~VorbisDataDecoder() {
|
|||
}
|
||||
|
||||
RefPtr<ShutdownPromise> VorbisDataDecoder::Shutdown() {
|
||||
RefPtr<VorbisDataDecoder> self = this;
|
||||
return InvokeAsync(mTaskQueue, __func__, [self]() {
|
||||
return ShutdownPromise::CreateAndResolve(true, __func__);
|
||||
});
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
return ShutdownPromise::CreateAndResolve(true, __func__);
|
||||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::InitPromise> VorbisDataDecoder::Init() {
|
||||
mThread = GetCurrentSerialEventTarget();
|
||||
vorbis_info_init(&mVorbisInfo);
|
||||
vorbis_comment_init(&mVorbisComment);
|
||||
PodZero(&mVorbisDsp);
|
||||
|
@ -141,13 +138,7 @@ nsresult VorbisDataDecoder::DecodeHeader(const unsigned char* aData,
|
|||
|
||||
RefPtr<MediaDataDecoder::DecodePromise> VorbisDataDecoder::Decode(
|
||||
MediaRawData* aSample) {
|
||||
return InvokeAsync<MediaRawData*>(mTaskQueue, this, __func__,
|
||||
&VorbisDataDecoder::ProcessDecode, aSample);
|
||||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::DecodePromise> VorbisDataDecoder::ProcessDecode(
|
||||
MediaRawData* aSample) {
|
||||
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
|
||||
const unsigned char* aData = aSample->Data();
|
||||
size_t aLength = aSample->Size();
|
||||
|
@ -268,21 +259,18 @@ RefPtr<MediaDataDecoder::DecodePromise> VorbisDataDecoder::ProcessDecode(
|
|||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::DecodePromise> VorbisDataDecoder::Drain() {
|
||||
return InvokeAsync(mTaskQueue, __func__, [] {
|
||||
return DecodePromise::CreateAndResolve(DecodedData(), __func__);
|
||||
});
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
return DecodePromise::CreateAndResolve(DecodedData(), __func__);
|
||||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::FlushPromise> VorbisDataDecoder::Flush() {
|
||||
RefPtr<VorbisDataDecoder> self = this;
|
||||
return InvokeAsync(mTaskQueue, __func__, [self]() {
|
||||
// Ignore failed results from vorbis_synthesis_restart. They
|
||||
// aren't fatal and it fails when ResetDecode is called at a
|
||||
// time when no vorbis data has been read.
|
||||
vorbis_synthesis_restart(&self->mVorbisDsp);
|
||||
self->mLastFrameTime.reset();
|
||||
return FlushPromise::CreateAndResolve(true, __func__);
|
||||
});
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
// Ignore failed results from vorbis_synthesis_restart. They
|
||||
// aren't fatal and it fails when ResetDecode is called at a
|
||||
// time when no vorbis data has been read.
|
||||
vorbis_synthesis_restart(&mVorbisDsp);
|
||||
mLastFrameTime.reset();
|
||||
return FlushPromise::CreateAndResolve(true, __func__);
|
||||
}
|
||||
|
||||
/* static */
|
||||
|
|
|
@ -41,10 +41,9 @@ class VorbisDataDecoder : public MediaDataDecoder,
|
|||
|
||||
private:
|
||||
nsresult DecodeHeader(const unsigned char* aData, size_t aLength);
|
||||
RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample);
|
||||
|
||||
const AudioInfo& mInfo;
|
||||
const RefPtr<TaskQueue> mTaskQueue;
|
||||
nsCOMPtr<nsISerialEventTarget> mThread;
|
||||
|
||||
// Vorbis decoder state
|
||||
vorbis_info mVorbisInfo;
|
||||
|
|
|
@ -4,12 +4,13 @@
|
|||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "WAVDecoder.h"
|
||||
|
||||
#include "AudioSampleFormat.h"
|
||||
#include "BufferReader.h"
|
||||
#include "VideoUtils.h"
|
||||
#include "mozilla/Casting.h"
|
||||
#include "mozilla/SyncRunnable.h"
|
||||
#include "VideoUtils.h"
|
||||
#include "WAVDecoder.h"
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
|
@ -43,27 +44,21 @@ int16_t DecodeULawSample(uint8_t aValue) {
|
|||
}
|
||||
|
||||
WaveDataDecoder::WaveDataDecoder(const CreateDecoderParams& aParams)
|
||||
: mInfo(aParams.AudioConfig()), mTaskQueue(aParams.mTaskQueue) {}
|
||||
: mInfo(aParams.AudioConfig()) {}
|
||||
|
||||
RefPtr<ShutdownPromise> WaveDataDecoder::Shutdown() {
|
||||
RefPtr<WaveDataDecoder> self = this;
|
||||
return InvokeAsync(mTaskQueue, __func__, [self]() {
|
||||
return ShutdownPromise::CreateAndResolve(true, __func__);
|
||||
});
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
return ShutdownPromise::CreateAndResolve(true, __func__);
|
||||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::InitPromise> WaveDataDecoder::Init() {
|
||||
mThread = GetCurrentSerialEventTarget();
|
||||
return InitPromise::CreateAndResolve(TrackInfo::kAudioTrack, __func__);
|
||||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::DecodePromise> WaveDataDecoder::Decode(
|
||||
MediaRawData* aSample) {
|
||||
return InvokeAsync<MediaRawData*>(mTaskQueue, this, __func__,
|
||||
&WaveDataDecoder::ProcessDecode, aSample);
|
||||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::DecodePromise> WaveDataDecoder::ProcessDecode(
|
||||
MediaRawData* aSample) {
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
size_t aLength = aSample->Size();
|
||||
BufferReader aReader(aSample->Data(), aLength);
|
||||
int64_t aOffset = aSample->mOffset;
|
||||
|
@ -141,15 +136,13 @@ RefPtr<MediaDataDecoder::DecodePromise> WaveDataDecoder::ProcessDecode(
|
|||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::DecodePromise> WaveDataDecoder::Drain() {
|
||||
return InvokeAsync(mTaskQueue, __func__, [] {
|
||||
return DecodePromise::CreateAndResolve(DecodedData(), __func__);
|
||||
});
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
return DecodePromise::CreateAndResolve(DecodedData(), __func__);
|
||||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::FlushPromise> WaveDataDecoder::Flush() {
|
||||
return InvokeAsync(mTaskQueue, __func__, []() {
|
||||
return FlushPromise::CreateAndResolve(true, __func__);
|
||||
});
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
return FlushPromise::CreateAndResolve(true, __func__);
|
||||
}
|
||||
|
||||
/* static */
|
||||
|
|
|
@ -31,9 +31,8 @@ class WaveDataDecoder : public MediaDataDecoder,
|
|||
}
|
||||
|
||||
private:
|
||||
RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample);
|
||||
const AudioInfo& mInfo;
|
||||
const RefPtr<TaskQueue> mTaskQueue;
|
||||
nsCOMPtr<nsISerialEventTarget> mThread;
|
||||
};
|
||||
|
||||
} // namespace mozilla
|
||||
|
|
|
@ -71,15 +71,13 @@ class EMEDecryptor : public MediaDataDecoder,
|
|||
public DecoderDoctorLifeLogger<EMEDecryptor> {
|
||||
public:
|
||||
EMEDecryptor(MediaDataDecoder* aDecoder, CDMProxy* aProxy,
|
||||
TaskQueue* aDecodeTaskQueue, TrackInfo::TrackType aType,
|
||||
TrackInfo::TrackType aType,
|
||||
MediaEventProducer<TrackInfo::TrackType>* aOnWaitingForKey,
|
||||
UniquePtr<ADTSSampleConverter> aConverter = nullptr)
|
||||
: mDecoder(aDecoder),
|
||||
mTaskQueue(aDecodeTaskQueue),
|
||||
mProxy(aProxy),
|
||||
mSamplesWaitingForKey(
|
||||
new SamplesWaitingForKey(mProxy, aType, aOnWaitingForKey)),
|
||||
mThroughputLimiter(aDecodeTaskQueue),
|
||||
mADTSSampleConverter(std::move(aConverter)),
|
||||
mIsShutdown(false) {
|
||||
DDLINKCHILD("decoder", mDecoder.get());
|
||||
|
@ -87,38 +85,38 @@ class EMEDecryptor : public MediaDataDecoder,
|
|||
|
||||
RefPtr<InitPromise> Init() override {
|
||||
MOZ_ASSERT(!mIsShutdown);
|
||||
mThread = GetCurrentSerialEventTarget();
|
||||
mThroughputLimiter.emplace(mThread);
|
||||
|
||||
return mDecoder->Init();
|
||||
}
|
||||
|
||||
RefPtr<DecodePromise> Decode(MediaRawData* aSample) override {
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
MOZ_RELEASE_ASSERT(mDecrypts.Count() == 0,
|
||||
"Can only process one sample at a time");
|
||||
RefPtr<DecodePromise> p = mDecodePromise.Ensure(__func__);
|
||||
|
||||
RefPtr<EMEDecryptor> self = this;
|
||||
RefPtr<MediaRawData> sample = aSample;
|
||||
return InvokeAsync(mTaskQueue, __func__, [self, this, sample]() {
|
||||
MOZ_RELEASE_ASSERT(mDecrypts.Count() == 0,
|
||||
"Can only process one sample at a time");
|
||||
RefPtr<DecodePromise> p = mDecodePromise.Ensure(__func__);
|
||||
|
||||
mSamplesWaitingForKey->WaitIfKeyNotUsable(sample)
|
||||
->Then(
|
||||
mTaskQueue, __func__,
|
||||
[self](const RefPtr<MediaRawData>& aSample) {
|
||||
self->mKeyRequest.Complete();
|
||||
self->ThrottleDecode(aSample);
|
||||
},
|
||||
[self]() { self->mKeyRequest.Complete(); })
|
||||
->Track(mKeyRequest);
|
||||
|
||||
return p;
|
||||
});
|
||||
mSamplesWaitingForKey->WaitIfKeyNotUsable(aSample)
|
||||
->Then(
|
||||
mThread, __func__,
|
||||
[self](const RefPtr<MediaRawData>& aSample) {
|
||||
self->mKeyRequest.Complete();
|
||||
self->ThrottleDecode(aSample);
|
||||
},
|
||||
[self]() { self->mKeyRequest.Complete(); })
|
||||
->Track(mKeyRequest);
|
||||
return p;
|
||||
}
|
||||
|
||||
void ThrottleDecode(MediaRawData* aSample) {
|
||||
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
|
||||
RefPtr<EMEDecryptor> self = this;
|
||||
mThroughputLimiter.Throttle(aSample)
|
||||
mThroughputLimiter->Throttle(aSample)
|
||||
->Then(
|
||||
mTaskQueue, __func__,
|
||||
mThread, __func__,
|
||||
[self](RefPtr<MediaRawData> aSample) {
|
||||
self->mThrottleRequest.Complete();
|
||||
self->AttemptDecode(aSample);
|
||||
|
@ -128,7 +126,7 @@ class EMEDecryptor : public MediaDataDecoder,
|
|||
}
|
||||
|
||||
void AttemptDecode(MediaRawData* aSample) {
|
||||
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
if (mIsShutdown) {
|
||||
NS_WARNING("EME encrypted sample arrived after shutdown");
|
||||
mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
|
||||
|
@ -146,13 +144,13 @@ class EMEDecryptor : public MediaDataDecoder,
|
|||
|
||||
mDecrypts.Put(aSample, new DecryptPromiseRequestHolder());
|
||||
mProxy->Decrypt(aSample)
|
||||
->Then(mTaskQueue, __func__, this, &EMEDecryptor::Decrypted,
|
||||
->Then(mThread, __func__, this, &EMEDecryptor::Decrypted,
|
||||
&EMEDecryptor::Decrypted)
|
||||
->Track(*mDecrypts.Get(aSample));
|
||||
}
|
||||
|
||||
void Decrypted(const DecryptResult& aDecrypted) {
|
||||
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
MOZ_ASSERT(aDecrypted.mSample);
|
||||
|
||||
UniquePtr<DecryptPromiseRequestHolder> holder;
|
||||
|
@ -198,7 +196,7 @@ class EMEDecryptor : public MediaDataDecoder,
|
|||
writer->mCrypto = CryptoSample();
|
||||
RefPtr<EMEDecryptor> self = this;
|
||||
mDecoder->Decode(aDecrypted.mSample)
|
||||
->Then(mTaskQueue, __func__,
|
||||
->Then(mThread, __func__,
|
||||
[self](DecodePromise::ResolveOrRejectValue&& aValue) {
|
||||
self->mDecodeRequest.Complete();
|
||||
self->mDecodePromise.ResolveOrReject(std::move(aValue),
|
||||
|
@ -209,54 +207,47 @@ class EMEDecryptor : public MediaDataDecoder,
|
|||
}
|
||||
|
||||
RefPtr<FlushPromise> Flush() override {
|
||||
RefPtr<EMEDecryptor> self = this;
|
||||
return InvokeAsync(
|
||||
mTaskQueue, __func__, [self, this]() -> RefPtr<FlushPromise> {
|
||||
MOZ_ASSERT(!mIsShutdown);
|
||||
mKeyRequest.DisconnectIfExists();
|
||||
mThrottleRequest.DisconnectIfExists();
|
||||
mDecodeRequest.DisconnectIfExists();
|
||||
mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
|
||||
mThroughputLimiter.Flush();
|
||||
for (auto iter = mDecrypts.Iter(); !iter.Done(); iter.Next()) {
|
||||
auto holder = iter.UserData();
|
||||
holder->DisconnectIfExists();
|
||||
iter.Remove();
|
||||
}
|
||||
RefPtr<SamplesWaitingForKey> k = mSamplesWaitingForKey;
|
||||
return mDecoder->Flush()->Then(mTaskQueue, __func__, [k]() {
|
||||
k->Flush();
|
||||
return FlushPromise::CreateAndResolve(true, __func__);
|
||||
});
|
||||
});
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
MOZ_ASSERT(!mIsShutdown);
|
||||
mKeyRequest.DisconnectIfExists();
|
||||
mThrottleRequest.DisconnectIfExists();
|
||||
mDecodeRequest.DisconnectIfExists();
|
||||
mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
|
||||
mThroughputLimiter->Flush();
|
||||
for (auto iter = mDecrypts.Iter(); !iter.Done(); iter.Next()) {
|
||||
auto holder = iter.UserData();
|
||||
holder->DisconnectIfExists();
|
||||
iter.Remove();
|
||||
}
|
||||
RefPtr<SamplesWaitingForKey> k = mSamplesWaitingForKey;
|
||||
return mDecoder->Flush()->Then(mThread, __func__, [k]() {
|
||||
k->Flush();
|
||||
return FlushPromise::CreateAndResolve(true, __func__);
|
||||
});
|
||||
}
|
||||
|
||||
RefPtr<DecodePromise> Drain() override {
|
||||
RefPtr<EMEDecryptor> self = this;
|
||||
return InvokeAsync(mTaskQueue, __func__, [self, this]() {
|
||||
MOZ_ASSERT(!mIsShutdown);
|
||||
MOZ_ASSERT(mDecodePromise.IsEmpty() && !mDecodeRequest.Exists(),
|
||||
"Must wait for decoding to complete");
|
||||
for (auto iter = mDecrypts.Iter(); !iter.Done(); iter.Next()) {
|
||||
auto holder = iter.UserData();
|
||||
holder->DisconnectIfExists();
|
||||
iter.Remove();
|
||||
}
|
||||
return mDecoder->Drain();
|
||||
});
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
MOZ_ASSERT(!mIsShutdown);
|
||||
MOZ_ASSERT(mDecodePromise.IsEmpty() && !mDecodeRequest.Exists(),
|
||||
"Must wait for decoding to complete");
|
||||
for (auto iter = mDecrypts.Iter(); !iter.Done(); iter.Next()) {
|
||||
auto holder = iter.UserData();
|
||||
holder->DisconnectIfExists();
|
||||
iter.Remove();
|
||||
}
|
||||
return mDecoder->Drain();
|
||||
}
|
||||
|
||||
RefPtr<ShutdownPromise> Shutdown() override {
|
||||
RefPtr<EMEDecryptor> self = this;
|
||||
return InvokeAsync(mTaskQueue, __func__, [self, this]() {
|
||||
MOZ_ASSERT(!mIsShutdown);
|
||||
mIsShutdown = true;
|
||||
mSamplesWaitingForKey->BreakCycles();
|
||||
mSamplesWaitingForKey = nullptr;
|
||||
RefPtr<MediaDataDecoder> decoder = std::move(mDecoder);
|
||||
mProxy = nullptr;
|
||||
return decoder->Shutdown();
|
||||
});
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
MOZ_ASSERT(!mIsShutdown);
|
||||
mIsShutdown = true;
|
||||
mSamplesWaitingForKey->BreakCycles();
|
||||
mSamplesWaitingForKey = nullptr;
|
||||
RefPtr<MediaDataDecoder> decoder = std::move(mDecoder);
|
||||
mProxy = nullptr;
|
||||
return decoder->Shutdown();
|
||||
}
|
||||
|
||||
nsCString GetDescriptionName() const override {
|
||||
|
@ -269,13 +260,13 @@ class EMEDecryptor : public MediaDataDecoder,
|
|||
|
||||
private:
|
||||
RefPtr<MediaDataDecoder> mDecoder;
|
||||
RefPtr<TaskQueue> mTaskQueue;
|
||||
nsCOMPtr<nsISerialEventTarget> mThread;
|
||||
RefPtr<CDMProxy> mProxy;
|
||||
nsClassHashtable<nsRefPtrHashKey<MediaRawData>, DecryptPromiseRequestHolder>
|
||||
mDecrypts;
|
||||
RefPtr<SamplesWaitingForKey> mSamplesWaitingForKey;
|
||||
MozPromiseRequestHolder<SamplesWaitingForKey::WaitForKeyPromise> mKeyRequest;
|
||||
DecryptThroughputLimit mThroughputLimiter;
|
||||
Maybe<DecryptThroughputLimit> mThroughputLimiter;
|
||||
MozPromiseRequestHolder<DecryptThroughputLimit::ThrottlePromise>
|
||||
mThrottleRequest;
|
||||
MozPromiseHolder<DecodePromise> mDecodePromise;
|
||||
|
@ -287,9 +278,10 @@ class EMEDecryptor : public MediaDataDecoder,
|
|||
};
|
||||
|
||||
EMEMediaDataDecoderProxy::EMEMediaDataDecoderProxy(
|
||||
already_AddRefed<nsISerialEventTarget> aProxyThread, CDMProxy* aProxy,
|
||||
const CreateDecoderParams& aParams)
|
||||
: MediaDataDecoderProxy(std::move(aProxyThread)),
|
||||
const CreateDecoderParams& aParams,
|
||||
already_AddRefed<MediaDataDecoder> aProxyDecoder,
|
||||
already_AddRefed<nsISerialEventTarget> aProxyThread, CDMProxy* aProxy)
|
||||
: MediaDataDecoderProxy(std::move(aProxyDecoder), std::move(aProxyThread)),
|
||||
mThread(GetCurrentSerialEventTarget()),
|
||||
mSamplesWaitingForKey(new SamplesWaitingForKey(
|
||||
aProxy, aParams.mType, aParams.mOnWaitingForKeyEvent)),
|
||||
|
@ -373,7 +365,10 @@ static already_AddRefed<MediaDataDecoderProxy> CreateDecoderWrapper(
|
|||
return nullptr;
|
||||
}
|
||||
RefPtr<MediaDataDecoderProxy> decoder(
|
||||
new EMEMediaDataDecoderProxy(thread.forget(), aProxy, aParams));
|
||||
new EMEMediaDataDecoderProxy(aParams,
|
||||
do_AddRef(new ChromiumCDMVideoDecoder(
|
||||
GMPVideoDecoderParams(aParams), aProxy)),
|
||||
thread.forget(), aProxy));
|
||||
return decoder.forget();
|
||||
}
|
||||
|
||||
|
@ -391,8 +386,6 @@ already_AddRefed<MediaDataDecoder> EMEDecoderModule::CreateVideoDecoder(
|
|||
// GMP decodes. Assume that means it can decrypt too.
|
||||
RefPtr<MediaDataDecoderProxy> wrapper =
|
||||
CreateDecoderWrapper(mProxy, aParams);
|
||||
auto params = GMPVideoDecoderParams(aParams);
|
||||
wrapper->SetProxyTarget(new ChromiumCDMVideoDecoder(params, mProxy));
|
||||
return wrapper.forget();
|
||||
}
|
||||
|
||||
|
@ -402,9 +395,8 @@ already_AddRefed<MediaDataDecoder> EMEDecoderModule::CreateVideoDecoder(
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
RefPtr<MediaDataDecoder> emeDecoder(
|
||||
new EMEDecryptor(decoder, mProxy, aParams.mTaskQueue, aParams.mType,
|
||||
aParams.mOnWaitingForKeyEvent));
|
||||
RefPtr<MediaDataDecoder> emeDecoder(new EMEDecryptor(
|
||||
decoder, mProxy, aParams.mType, aParams.mOnWaitingForKeyEvent));
|
||||
return emeDecoder.forget();
|
||||
}
|
||||
|
||||
|
@ -435,7 +427,7 @@ already_AddRefed<MediaDataDecoder> EMEDecoderModule::CreateAudioDecoder(
|
|||
}
|
||||
|
||||
RefPtr<MediaDataDecoder> emeDecoder(
|
||||
new EMEDecryptor(decoder, mProxy, aParams.mTaskQueue, aParams.mType,
|
||||
new EMEDecryptor(decoder, mProxy, aParams.mType,
|
||||
aParams.mOnWaitingForKeyEvent, std::move(converter)));
|
||||
return emeDecoder.forget();
|
||||
}
|
||||
|
|
|
@ -45,9 +45,10 @@ class EMEMediaDataDecoderProxy
|
|||
: public MediaDataDecoderProxy,
|
||||
public DecoderDoctorLifeLogger<EMEMediaDataDecoderProxy> {
|
||||
public:
|
||||
EMEMediaDataDecoderProxy(already_AddRefed<nsISerialEventTarget> aProxyThread,
|
||||
CDMProxy* aProxy,
|
||||
const CreateDecoderParams& aParams);
|
||||
EMEMediaDataDecoderProxy(const CreateDecoderParams& aParams,
|
||||
already_AddRefed<MediaDataDecoder> aProxyDecoder,
|
||||
already_AddRefed<nsISerialEventTarget> aProxyThread,
|
||||
CDMProxy* aProxy);
|
||||
EMEMediaDataDecoderProxy(const CreateDecoderParams& aParams,
|
||||
already_AddRefed<MediaDataDecoder> aProxyDecoder,
|
||||
CDMProxy* aProxy);
|
||||
|
|
|
@ -27,7 +27,8 @@ GMPDecoderModule::GMPDecoderModule() = default;
|
|||
|
||||
GMPDecoderModule::~GMPDecoderModule() = default;
|
||||
|
||||
static already_AddRefed<MediaDataDecoderProxy> CreateDecoderWrapper() {
|
||||
static already_AddRefed<MediaDataDecoderProxy> CreateDecoderWrapper(
|
||||
GMPVideoDecoderParams&& aParams) {
|
||||
RefPtr<gmp::GeckoMediaPluginService> s(
|
||||
gmp::GeckoMediaPluginService::GetGeckoMediaPluginService());
|
||||
if (!s) {
|
||||
|
@ -37,8 +38,9 @@ static already_AddRefed<MediaDataDecoderProxy> CreateDecoderWrapper() {
|
|||
if (!thread) {
|
||||
return nullptr;
|
||||
}
|
||||
RefPtr<MediaDataDecoderProxy> decoder(
|
||||
new MediaDataDecoderProxy(thread.forget()));
|
||||
|
||||
RefPtr<MediaDataDecoderProxy> decoder(new MediaDataDecoderProxy(
|
||||
do_AddRef(new GMPVideoDecoder(std::move(aParams))), thread.forget()));
|
||||
return decoder.forget();
|
||||
}
|
||||
|
||||
|
@ -50,10 +52,7 @@ already_AddRefed<MediaDataDecoder> GMPDecoderModule::CreateVideoDecoder(
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
RefPtr<MediaDataDecoderProxy> wrapper = CreateDecoderWrapper();
|
||||
auto params = GMPVideoDecoderParams(aParams);
|
||||
wrapper->SetProxyTarget(new GMPVideoDecoder(params));
|
||||
return wrapper.forget();
|
||||
return CreateDecoderWrapper(GMPVideoDecoderParams(aParams));
|
||||
}
|
||||
|
||||
already_AddRefed<MediaDataDecoder> GMPDecoderModule::CreateAudioDecoder(
|
||||
|
|
|
@ -31,7 +31,6 @@ static bool IsOnGMPThread() {
|
|||
|
||||
GMPVideoDecoderParams::GMPVideoDecoderParams(const CreateDecoderParams& aParams)
|
||||
: mConfig(aParams.VideoConfig()),
|
||||
mTaskQueue(aParams.mTaskQueue),
|
||||
mImageContainer(aParams.mImageContainer),
|
||||
mLayersBackend(aParams.GetLayersBackend()),
|
||||
mCrashHelper(aParams.mCrashHelper) {}
|
||||
|
|
|
@ -20,7 +20,6 @@ struct GMPVideoDecoderParams {
|
|||
explicit GMPVideoDecoderParams(const CreateDecoderParams& aParams);
|
||||
|
||||
const VideoInfo& mConfig;
|
||||
TaskQueue* mTaskQueue;
|
||||
layers::ImageContainer* mImageContainer;
|
||||
layers::LayersBackend mLayersBackend;
|
||||
RefPtr<GMPCrashHelper> mCrashHelper;
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
|
||||
#include "RemoteDataDecoder.h"
|
||||
|
||||
#include <jni.h>
|
||||
|
||||
#include "AndroidBridge.h"
|
||||
#include "AndroidDecoderModule.h"
|
||||
#include "EMEDecoderModule.h"
|
||||
|
@ -11,21 +13,18 @@
|
|||
#include "JavaCallbacksSupport.h"
|
||||
#include "MediaData.h"
|
||||
#include "MediaInfo.h"
|
||||
#include "SimpleMap.h"
|
||||
#include "VPXDecoder.h"
|
||||
#include "VideoUtils.h"
|
||||
#include "mozilla/java/CodecProxyWrappers.h"
|
||||
#include "mozilla/java/GeckoSurfaceWrappers.h"
|
||||
#include "mozilla/java/SampleWrappers.h"
|
||||
#include "mozilla/java/SampleBufferWrappers.h"
|
||||
#include "mozilla/java/SampleWrappers.h"
|
||||
#include "mozilla/java/SurfaceAllocatorWrappers.h"
|
||||
#include "SimpleMap.h"
|
||||
#include "VideoUtils.h"
|
||||
#include "VPXDecoder.h"
|
||||
|
||||
#include "nsPromiseFlatString.h"
|
||||
#include "nsThreadUtils.h"
|
||||
#include "prlog.h"
|
||||
|
||||
#include <jni.h>
|
||||
|
||||
#undef LOG
|
||||
#define LOG(arg, ...) \
|
||||
MOZ_LOG(sAndroidDecoderModuleLog, mozilla::LogLevel::Debug, \
|
||||
|
@ -119,9 +118,9 @@ class RemoteVideoDecoder : public RemoteDataDecoder {
|
|||
|
||||
RemoteVideoDecoder(const VideoInfo& aConfig,
|
||||
java::sdk::MediaFormat::Param aFormat,
|
||||
const nsString& aDrmStubId, TaskQueue* aTaskQueue)
|
||||
const nsString& aDrmStubId)
|
||||
: RemoteDataDecoder(MediaData::Type::VIDEO_DATA, aConfig.mMimeType,
|
||||
aFormat, aDrmStubId, aTaskQueue),
|
||||
aFormat, aDrmStubId),
|
||||
mConfig(aConfig) {}
|
||||
|
||||
~RemoteVideoDecoder() {
|
||||
|
@ -131,6 +130,7 @@ class RemoteVideoDecoder : public RemoteDataDecoder {
|
|||
}
|
||||
|
||||
RefPtr<InitPromise> Init() override {
|
||||
mThread = GetCurrentSerialEventTarget();
|
||||
java::sdk::BufferInfo::LocalRef bufferInfo;
|
||||
if (NS_FAILED(java::sdk::BufferInfo::New(&bufferInfo)) || !bufferInfo) {
|
||||
return InitPromise::CreateAndReject(NS_ERROR_OUT_OF_MEMORY, __func__);
|
||||
|
@ -172,30 +172,25 @@ class RemoteVideoDecoder : public RemoteDataDecoder {
|
|||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::FlushPromise> Flush() override {
|
||||
RefPtr<RemoteVideoDecoder> self = this;
|
||||
return InvokeAsync(mTaskQueue, __func__, [self, this]() {
|
||||
mInputInfos.Clear();
|
||||
mSeekTarget.reset();
|
||||
mLatestOutputTime.reset();
|
||||
return RemoteDataDecoder::ProcessFlush();
|
||||
});
|
||||
AssertOnThread();
|
||||
mInputInfos.Clear();
|
||||
mSeekTarget.reset();
|
||||
mLatestOutputTime.reset();
|
||||
return RemoteDataDecoder::Flush();
|
||||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::DecodePromise> Decode(
|
||||
MediaRawData* aSample) override {
|
||||
RefPtr<RemoteVideoDecoder> self = this;
|
||||
RefPtr<MediaRawData> sample = aSample;
|
||||
return InvokeAsync(mTaskQueue, __func__, [self, sample]() {
|
||||
const VideoInfo* config = sample->mTrackInfo
|
||||
? sample->mTrackInfo->GetAsVideoInfo()
|
||||
: &self->mConfig;
|
||||
MOZ_ASSERT(config);
|
||||
AssertOnThread();
|
||||
|
||||
InputInfo info(sample->mDuration.ToMicroseconds(), config->mImage,
|
||||
config->mDisplay);
|
||||
self->mInputInfos.Insert(sample->mTime.ToMicroseconds(), info);
|
||||
return self->RemoteDataDecoder::ProcessDecode(sample);
|
||||
});
|
||||
const VideoInfo* config =
|
||||
aSample->mTrackInfo ? aSample->mTrackInfo->GetAsVideoInfo() : &mConfig;
|
||||
MOZ_ASSERT(config);
|
||||
|
||||
InputInfo info(aSample->mDuration.ToMicroseconds(), config->mImage,
|
||||
config->mDisplay);
|
||||
mInputInfos.Insert(aSample->mTime.ToMicroseconds(), info);
|
||||
return RemoteDataDecoder::Decode(aSample);
|
||||
}
|
||||
|
||||
bool SupportDecoderRecycling() const override {
|
||||
|
@ -203,22 +198,26 @@ class RemoteVideoDecoder : public RemoteDataDecoder {
|
|||
}
|
||||
|
||||
void SetSeekThreshold(const TimeUnit& aTime) override {
|
||||
RefPtr<RemoteVideoDecoder> self = this;
|
||||
nsCOMPtr<nsIRunnable> runnable = NS_NewRunnableFunction(
|
||||
"RemoteVideoDecoder::SetSeekThreshold", [self, aTime]() {
|
||||
if (aTime.IsValid()) {
|
||||
self->mSeekTarget = Some(aTime);
|
||||
} else {
|
||||
self->mSeekTarget.reset();
|
||||
}
|
||||
});
|
||||
nsresult rv = mTaskQueue->Dispatch(runnable.forget());
|
||||
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
|
||||
Unused << rv;
|
||||
auto setter = [self = RefPtr{this}, aTime] {
|
||||
if (aTime.IsValid()) {
|
||||
self->mSeekTarget = Some(aTime);
|
||||
} else {
|
||||
self->mSeekTarget.reset();
|
||||
}
|
||||
};
|
||||
if (mThread->IsOnCurrentThread()) {
|
||||
setter();
|
||||
} else {
|
||||
nsCOMPtr<nsIRunnable> runnable = NS_NewRunnableFunction(
|
||||
"RemoteVideoDecoder::SetSeekThreshold", std::move(setter));
|
||||
nsresult rv = mThread->Dispatch(runnable.forget());
|
||||
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
|
||||
Unused << rv;
|
||||
}
|
||||
}
|
||||
|
||||
bool IsUsefulData(const RefPtr<MediaData>& aSample) override {
|
||||
AssertOnTaskQueue();
|
||||
AssertOnThread();
|
||||
|
||||
if (mLatestOutputTime && aSample->mTime < mLatestOutputTime.value()) {
|
||||
return false;
|
||||
|
@ -247,9 +246,9 @@ class RemoteVideoDecoder : public RemoteDataDecoder {
|
|||
// Use GlobalRef as the parameter type to keep the Java object referenced
|
||||
// until running.
|
||||
void ProcessOutput(java::Sample::GlobalRef&& aSample) {
|
||||
if (!mTaskQueue->IsCurrentThreadIn()) {
|
||||
if (!mThread->IsOnCurrentThread()) {
|
||||
nsresult rv =
|
||||
mTaskQueue->Dispatch(NewRunnableMethod<java::Sample::GlobalRef&&>(
|
||||
mThread->Dispatch(NewRunnableMethod<java::Sample::GlobalRef&&>(
|
||||
"RemoteVideoDecoder::ProcessOutput", this,
|
||||
&RemoteVideoDecoder::ProcessOutput, std::move(aSample)));
|
||||
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
|
||||
|
@ -257,7 +256,7 @@ class RemoteVideoDecoder : public RemoteDataDecoder {
|
|||
return;
|
||||
}
|
||||
|
||||
AssertOnTaskQueue();
|
||||
AssertOnThread();
|
||||
if (GetState() == State::SHUTDOWN) {
|
||||
aSample->Dispose();
|
||||
return;
|
||||
|
@ -324,10 +323,10 @@ class RemoteVideoDecoder : public RemoteDataDecoder {
|
|||
bool mIsCodecSupportAdaptivePlayback = false;
|
||||
// Can be accessed on any thread, but only written on during init.
|
||||
bool mIsHardwareAccelerated = false;
|
||||
// Accessed on mTaskQueue and reader's TaskQueue. SimpleMap however is
|
||||
// Accessed on mThread and reader's thread. SimpleMap however is
|
||||
// thread-safe, so it's okay to do so.
|
||||
SimpleMap<InputInfo> mInputInfos;
|
||||
// Only accessed on the TaskQueue.
|
||||
// Only accessed on mThread.
|
||||
Maybe<TimeUnit> mSeekTarget;
|
||||
Maybe<TimeUnit> mLatestOutputTime;
|
||||
};
|
||||
|
@ -336,9 +335,9 @@ class RemoteAudioDecoder : public RemoteDataDecoder {
|
|||
public:
|
||||
RemoteAudioDecoder(const AudioInfo& aConfig,
|
||||
java::sdk::MediaFormat::Param aFormat,
|
||||
const nsString& aDrmStubId, TaskQueue* aTaskQueue)
|
||||
const nsString& aDrmStubId)
|
||||
: RemoteDataDecoder(MediaData::Type::AUDIO_DATA, aConfig.mMimeType,
|
||||
aFormat, aDrmStubId, aTaskQueue) {
|
||||
aFormat, aDrmStubId) {
|
||||
JNIEnv* const env = jni::GetEnvForThread();
|
||||
|
||||
bool formatHasCSD = false;
|
||||
|
@ -353,6 +352,7 @@ class RemoteAudioDecoder : public RemoteDataDecoder {
|
|||
}
|
||||
|
||||
RefPtr<InitPromise> Init() override {
|
||||
mThread = GetCurrentSerialEventTarget();
|
||||
java::sdk::BufferInfo::LocalRef bufferInfo;
|
||||
if (NS_FAILED(java::sdk::BufferInfo::New(&bufferInfo)) || !bufferInfo) {
|
||||
return InitPromise::CreateAndReject(NS_ERROR_OUT_OF_MEMORY, __func__);
|
||||
|
@ -381,23 +381,18 @@ class RemoteAudioDecoder : public RemoteDataDecoder {
|
|||
}
|
||||
|
||||
RefPtr<FlushPromise> Flush() override {
|
||||
RefPtr<RemoteAudioDecoder> self = this;
|
||||
return InvokeAsync(mTaskQueue, __func__, [self]() {
|
||||
self->mFirstDemuxedSampleTime.reset();
|
||||
return self->RemoteDataDecoder::ProcessFlush();
|
||||
});
|
||||
AssertOnThread();
|
||||
mFirstDemuxedSampleTime.reset();
|
||||
return RemoteDataDecoder::Flush();
|
||||
}
|
||||
|
||||
RefPtr<DecodePromise> Decode(MediaRawData* aSample) override {
|
||||
RefPtr<RemoteAudioDecoder> self = this;
|
||||
RefPtr<MediaRawData> sample = aSample;
|
||||
return InvokeAsync(mTaskQueue, __func__, [self, sample]() {
|
||||
if (!self->mFirstDemuxedSampleTime) {
|
||||
MOZ_ASSERT(sample->mTime.IsValid());
|
||||
self->mFirstDemuxedSampleTime.emplace(sample->mTime);
|
||||
}
|
||||
return self->RemoteDataDecoder::ProcessDecode(sample);
|
||||
});
|
||||
AssertOnThread();
|
||||
if (!mFirstDemuxedSampleTime) {
|
||||
MOZ_ASSERT(aSample->mTime.IsValid());
|
||||
mFirstDemuxedSampleTime.emplace(aSample->mTime);
|
||||
}
|
||||
return RemoteDataDecoder::Decode(aSample);
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -450,10 +445,10 @@ class RemoteAudioDecoder : public RemoteDataDecoder {
|
|||
}
|
||||
|
||||
bool ShouldDiscardSample(int64_t aSession) const {
|
||||
AssertOnTaskQueue();
|
||||
AssertOnThread();
|
||||
// HandleOutput() runs on Android binder thread pool and could be preempted
|
||||
// by RemoteDateDecoder task queue. That means ProcessOutput() could be
|
||||
// scheduled after ProcessShutdown() or ProcessFlush(). We won't need the
|
||||
// scheduled after Shutdown() or Flush(). We won't need the
|
||||
// sample which is returned after calling Shutdown() and Flush(). We can
|
||||
// check mFirstDemuxedSampleTime to know whether the Flush() has been
|
||||
// called, becasue it would be reset in Flush().
|
||||
|
@ -466,10 +461,10 @@ class RemoteAudioDecoder : public RemoteDataDecoder {
|
|||
// until running.
|
||||
void ProcessOutput(java::Sample::GlobalRef&& aSample,
|
||||
java::SampleBuffer::GlobalRef&& aBuffer) {
|
||||
if (!mTaskQueue->IsCurrentThreadIn()) {
|
||||
nsresult rv = mTaskQueue->Dispatch(
|
||||
NewRunnableMethod<java::Sample::GlobalRef&&,
|
||||
java::SampleBuffer::GlobalRef&&>(
|
||||
if (!mThread->IsOnCurrentThread()) {
|
||||
nsresult rv =
|
||||
mThread->Dispatch(NewRunnableMethod<java::Sample::GlobalRef&&,
|
||||
java::SampleBuffer::GlobalRef&&>(
|
||||
"RemoteAudioDecoder::ProcessOutput", this,
|
||||
&RemoteAudioDecoder::ProcessOutput, std::move(aSample),
|
||||
std::move(aBuffer)));
|
||||
|
@ -478,7 +473,7 @@ class RemoteAudioDecoder : public RemoteDataDecoder {
|
|||
return;
|
||||
}
|
||||
|
||||
AssertOnTaskQueue();
|
||||
AssertOnThread();
|
||||
|
||||
if (ShouldDiscardSample(aSample->Session()) || !aBuffer->IsValid()) {
|
||||
aSample->Dispose();
|
||||
|
@ -539,8 +534,8 @@ class RemoteAudioDecoder : public RemoteDataDecoder {
|
|||
}
|
||||
|
||||
void ProcessOutputFormatChange(int32_t aChannels, int32_t aSampleRate) {
|
||||
if (!mTaskQueue->IsCurrentThreadIn()) {
|
||||
nsresult rv = mTaskQueue->Dispatch(NewRunnableMethod<int32_t, int32_t>(
|
||||
if (!mThread->IsOnCurrentThread()) {
|
||||
nsresult rv = mThread->Dispatch(NewRunnableMethod<int32_t, int32_t>(
|
||||
"RemoteAudioDecoder::ProcessOutputFormatChange", this,
|
||||
&RemoteAudioDecoder::ProcessOutputFormatChange, aChannels,
|
||||
aSampleRate));
|
||||
|
@ -549,7 +544,7 @@ class RemoteAudioDecoder : public RemoteDataDecoder {
|
|||
return;
|
||||
}
|
||||
|
||||
AssertOnTaskQueue();
|
||||
AssertOnThread();
|
||||
|
||||
mOutputChannels = aChannels;
|
||||
mOutputSampleRate = aSampleRate;
|
||||
|
@ -571,7 +566,7 @@ already_AddRefed<MediaDataDecoder> RemoteDataDecoder::CreateAudioDecoder(
|
|||
nullptr);
|
||||
|
||||
RefPtr<MediaDataDecoder> decoder =
|
||||
new RemoteAudioDecoder(config, format, aDrmStubId, aParams.mTaskQueue);
|
||||
new RemoteAudioDecoder(config, format, aDrmStubId);
|
||||
if (aProxy) {
|
||||
decoder = new EMEMediaDataDecoderProxy(aParams, decoder.forget(), aProxy);
|
||||
}
|
||||
|
@ -589,7 +584,7 @@ already_AddRefed<MediaDataDecoder> RemoteDataDecoder::CreateVideoDecoder(
|
|||
nullptr);
|
||||
|
||||
RefPtr<MediaDataDecoder> decoder =
|
||||
new RemoteVideoDecoder(config, format, aDrmStubId, aParams.mTaskQueue);
|
||||
new RemoteVideoDecoder(config, format, aDrmStubId);
|
||||
if (aProxy) {
|
||||
decoder = new EMEMediaDataDecoderProxy(aParams, decoder.forget(), aProxy);
|
||||
}
|
||||
|
@ -599,24 +594,16 @@ already_AddRefed<MediaDataDecoder> RemoteDataDecoder::CreateVideoDecoder(
|
|||
RemoteDataDecoder::RemoteDataDecoder(MediaData::Type aType,
|
||||
const nsACString& aMimeType,
|
||||
java::sdk::MediaFormat::Param aFormat,
|
||||
const nsString& aDrmStubId,
|
||||
TaskQueue* aTaskQueue)
|
||||
const nsString& aDrmStubId)
|
||||
: mType(aType),
|
||||
mMimeType(aMimeType),
|
||||
mFormat(aFormat),
|
||||
mDrmStubId(aDrmStubId),
|
||||
mTaskQueue(aTaskQueue),
|
||||
mSession(0),
|
||||
mNumPendingInputs(0) {}
|
||||
|
||||
RefPtr<MediaDataDecoder::FlushPromise> RemoteDataDecoder::Flush() {
|
||||
RefPtr<RemoteDataDecoder> self = this;
|
||||
return InvokeAsync(mTaskQueue, this, __func__,
|
||||
&RemoteDataDecoder::ProcessFlush);
|
||||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::FlushPromise> RemoteDataDecoder::ProcessFlush() {
|
||||
AssertOnTaskQueue();
|
||||
AssertOnThread();
|
||||
MOZ_ASSERT(GetState() != State::SHUTDOWN);
|
||||
|
||||
mDecodedData = DecodedData();
|
||||
|
@ -629,42 +616,34 @@ RefPtr<MediaDataDecoder::FlushPromise> RemoteDataDecoder::ProcessFlush() {
|
|||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::DecodePromise> RemoteDataDecoder::Drain() {
|
||||
RefPtr<RemoteDataDecoder> self = this;
|
||||
return InvokeAsync(mTaskQueue, __func__, [self, this]() {
|
||||
if (GetState() == State::SHUTDOWN) {
|
||||
return DecodePromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED,
|
||||
__func__);
|
||||
}
|
||||
RefPtr<DecodePromise> p = mDrainPromise.Ensure(__func__);
|
||||
if (GetState() == State::DRAINED) {
|
||||
// There's no operation to perform other than returning any already
|
||||
// decoded data.
|
||||
ReturnDecodedData();
|
||||
return p;
|
||||
}
|
||||
|
||||
if (GetState() == State::DRAINING) {
|
||||
// Draining operation already pending, let it complete its course.
|
||||
return p;
|
||||
}
|
||||
|
||||
SetState(State::DRAINING);
|
||||
self->mInputBufferInfo->Set(
|
||||
0, 0, -1, java::sdk::MediaCodec::BUFFER_FLAG_END_OF_STREAM);
|
||||
mSession = mJavaDecoder->Input(nullptr, self->mInputBufferInfo, nullptr);
|
||||
AssertOnThread();
|
||||
if (GetState() == State::SHUTDOWN) {
|
||||
return DecodePromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED,
|
||||
__func__);
|
||||
}
|
||||
RefPtr<DecodePromise> p = mDrainPromise.Ensure(__func__);
|
||||
if (GetState() == State::DRAINED) {
|
||||
// There's no operation to perform other than returning any already
|
||||
// decoded data.
|
||||
ReturnDecodedData();
|
||||
return p;
|
||||
});
|
||||
}
|
||||
|
||||
if (GetState() == State::DRAINING) {
|
||||
// Draining operation already pending, let it complete its course.
|
||||
return p;
|
||||
}
|
||||
|
||||
SetState(State::DRAINING);
|
||||
mInputBufferInfo->Set(0, 0, -1,
|
||||
java::sdk::MediaCodec::BUFFER_FLAG_END_OF_STREAM);
|
||||
mSession = mJavaDecoder->Input(nullptr, mInputBufferInfo, nullptr);
|
||||
return p;
|
||||
}
|
||||
|
||||
RefPtr<ShutdownPromise> RemoteDataDecoder::Shutdown() {
|
||||
LOG("");
|
||||
RefPtr<RemoteDataDecoder> self = this;
|
||||
return InvokeAsync(mTaskQueue, this, __func__,
|
||||
&RemoteDataDecoder::ProcessShutdown);
|
||||
}
|
||||
|
||||
RefPtr<ShutdownPromise> RemoteDataDecoder::ProcessShutdown() {
|
||||
AssertOnTaskQueue();
|
||||
AssertOnThread();
|
||||
SetState(State::SHUTDOWN);
|
||||
if (mJavaDecoder) {
|
||||
mJavaDecoder->Release();
|
||||
|
@ -742,15 +721,7 @@ static java::sdk::CryptoInfo::LocalRef GetCryptoInfoFromSample(
|
|||
|
||||
RefPtr<MediaDataDecoder::DecodePromise> RemoteDataDecoder::Decode(
|
||||
MediaRawData* aSample) {
|
||||
RefPtr<RemoteDataDecoder> self = this;
|
||||
RefPtr<MediaRawData> sample = aSample;
|
||||
return InvokeAsync(mTaskQueue, __func__,
|
||||
[self, sample]() { return self->ProcessDecode(sample); });
|
||||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::DecodePromise> RemoteDataDecoder::ProcessDecode(
|
||||
MediaRawData* aSample) {
|
||||
AssertOnTaskQueue();
|
||||
AssertOnThread();
|
||||
MOZ_ASSERT(GetState() != State::SHUTDOWN);
|
||||
MOZ_ASSERT(aSample != nullptr);
|
||||
jni::ByteBuffer::LocalRef bytes = jni::ByteBuffer::New(
|
||||
|
@ -769,7 +740,7 @@ RefPtr<MediaDataDecoder::DecodePromise> RemoteDataDecoder::ProcessDecode(
|
|||
}
|
||||
|
||||
void RemoteDataDecoder::UpdatePendingInputStatus(PendingOp aOp) {
|
||||
AssertOnTaskQueue();
|
||||
AssertOnThread();
|
||||
switch (aOp) {
|
||||
case PendingOp::INCREASE:
|
||||
mNumPendingInputs++;
|
||||
|
@ -784,15 +755,15 @@ void RemoteDataDecoder::UpdatePendingInputStatus(PendingOp aOp) {
|
|||
}
|
||||
|
||||
void RemoteDataDecoder::UpdateInputStatus(int64_t aTimestamp, bool aProcessed) {
|
||||
if (!mTaskQueue->IsCurrentThreadIn()) {
|
||||
nsresult rv = mTaskQueue->Dispatch(NewRunnableMethod<int64_t, bool>(
|
||||
if (!mThread->IsOnCurrentThread()) {
|
||||
nsresult rv = mThread->Dispatch(NewRunnableMethod<int64_t, bool>(
|
||||
"RemoteDataDecoder::UpdateInputStatus", this,
|
||||
&RemoteDataDecoder::UpdateInputStatus, aTimestamp, aProcessed));
|
||||
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
|
||||
Unused << rv;
|
||||
return;
|
||||
}
|
||||
AssertOnTaskQueue();
|
||||
AssertOnThread();
|
||||
if (GetState() == State::SHUTDOWN) {
|
||||
return;
|
||||
}
|
||||
|
@ -810,7 +781,7 @@ void RemoteDataDecoder::UpdateInputStatus(int64_t aTimestamp, bool aProcessed) {
|
|||
}
|
||||
|
||||
void RemoteDataDecoder::UpdateOutputStatus(RefPtr<MediaData>&& aSample) {
|
||||
AssertOnTaskQueue();
|
||||
AssertOnThread();
|
||||
if (GetState() == State::SHUTDOWN) {
|
||||
return;
|
||||
}
|
||||
|
@ -821,7 +792,7 @@ void RemoteDataDecoder::UpdateOutputStatus(RefPtr<MediaData>&& aSample) {
|
|||
}
|
||||
|
||||
void RemoteDataDecoder::ReturnDecodedData() {
|
||||
AssertOnTaskQueue();
|
||||
AssertOnThread();
|
||||
MOZ_ASSERT(GetState() != State::SHUTDOWN);
|
||||
|
||||
// We only want to clear mDecodedData when we have resolved the promises.
|
||||
|
@ -836,15 +807,15 @@ void RemoteDataDecoder::ReturnDecodedData() {
|
|||
}
|
||||
|
||||
void RemoteDataDecoder::DrainComplete() {
|
||||
if (!mTaskQueue->IsCurrentThreadIn()) {
|
||||
nsresult rv = mTaskQueue->Dispatch(
|
||||
if (!mThread->IsOnCurrentThread()) {
|
||||
nsresult rv = mThread->Dispatch(
|
||||
NewRunnableMethod("RemoteDataDecoder::DrainComplete", this,
|
||||
&RemoteDataDecoder::DrainComplete));
|
||||
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
|
||||
Unused << rv;
|
||||
return;
|
||||
}
|
||||
AssertOnTaskQueue();
|
||||
AssertOnThread();
|
||||
if (GetState() == State::SHUTDOWN) {
|
||||
return;
|
||||
}
|
||||
|
@ -853,14 +824,14 @@ void RemoteDataDecoder::DrainComplete() {
|
|||
}
|
||||
|
||||
void RemoteDataDecoder::Error(const MediaResult& aError) {
|
||||
if (!mTaskQueue->IsCurrentThreadIn()) {
|
||||
nsresult rv = mTaskQueue->Dispatch(NewRunnableMethod<MediaResult>(
|
||||
if (!mThread->IsOnCurrentThread()) {
|
||||
nsresult rv = mThread->Dispatch(NewRunnableMethod<MediaResult>(
|
||||
"RemoteDataDecoder::Error", this, &RemoteDataDecoder::Error, aError));
|
||||
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
|
||||
Unused << rv;
|
||||
return;
|
||||
}
|
||||
AssertOnTaskQueue();
|
||||
AssertOnThread();
|
||||
if (GetState() == State::SHUTDOWN) {
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -8,9 +8,9 @@
|
|||
#include "AndroidDecoderModule.h"
|
||||
#include "SurfaceTexture.h"
|
||||
#include "TimeUnits.h"
|
||||
#include "mozilla/java/CodecProxyWrappers.h"
|
||||
#include "mozilla/Maybe.h"
|
||||
#include "mozilla/Monitor.h"
|
||||
#include "mozilla/java/CodecProxyWrappers.h"
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
|
@ -39,28 +39,23 @@ class RemoteDataDecoder : public MediaDataDecoder,
|
|||
virtual ~RemoteDataDecoder() {}
|
||||
RemoteDataDecoder(MediaData::Type aType, const nsACString& aMimeType,
|
||||
java::sdk::MediaFormat::Param aFormat,
|
||||
const nsString& aDrmStubId, TaskQueue* aTaskQueue);
|
||||
const nsString& aDrmStubId);
|
||||
|
||||
// Methods only called on mTaskQueue.
|
||||
RefPtr<FlushPromise> ProcessFlush();
|
||||
RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample);
|
||||
RefPtr<ShutdownPromise> ProcessShutdown();
|
||||
// Methods only called on mThread.
|
||||
void UpdateInputStatus(int64_t aTimestamp, bool aProcessed);
|
||||
void UpdateOutputStatus(RefPtr<MediaData>&& aSample);
|
||||
void ReturnDecodedData();
|
||||
void DrainComplete();
|
||||
void Error(const MediaResult& aError);
|
||||
void AssertOnTaskQueue() const {
|
||||
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
|
||||
}
|
||||
void AssertOnThread() const { MOZ_ASSERT(mThread->IsOnCurrentThread()); }
|
||||
|
||||
enum class State { DRAINED, DRAINABLE, DRAINING, SHUTDOWN };
|
||||
void SetState(State aState) {
|
||||
AssertOnTaskQueue();
|
||||
AssertOnThread();
|
||||
mState = aState;
|
||||
}
|
||||
State GetState() const {
|
||||
AssertOnTaskQueue();
|
||||
AssertOnThread();
|
||||
return mState;
|
||||
}
|
||||
|
||||
|
@ -76,25 +71,25 @@ class RemoteDataDecoder : public MediaDataDecoder,
|
|||
java::CodecProxy::NativeCallbacks::GlobalRef mJavaCallbacks;
|
||||
nsString mDrmStubId;
|
||||
|
||||
RefPtr<TaskQueue> mTaskQueue;
|
||||
nsCOMPtr<nsISerialEventTarget> mThread;
|
||||
|
||||
// Preallocated Java object used as a reusable storage for input buffer
|
||||
// information. Contents must be changed only on mTaskQueue.
|
||||
// information. Contents must be changed only on mThread.
|
||||
java::sdk::BufferInfo::GlobalRef mInputBufferInfo;
|
||||
|
||||
// Session ID attached to samples. It is returned by CodecProxy::Input().
|
||||
// Accessed on mTaskqueue only.
|
||||
// Accessed on mThread only.
|
||||
int64_t mSession;
|
||||
|
||||
private:
|
||||
enum class PendingOp { INCREASE, DECREASE, CLEAR };
|
||||
void UpdatePendingInputStatus(PendingOp aOp);
|
||||
size_t HasPendingInputs() {
|
||||
AssertOnTaskQueue();
|
||||
AssertOnThread();
|
||||
return mNumPendingInputs > 0;
|
||||
}
|
||||
|
||||
// The following members must only be accessed on mTaskqueue.
|
||||
// The following members must only be accessed on mThread.
|
||||
MozPromiseHolder<DecodePromise> mDecodePromise;
|
||||
MozPromiseHolder<DecodePromise> mDrainPromise;
|
||||
DecodedData mDecodedData;
|
||||
|
|
|
@ -23,10 +23,9 @@
|
|||
|
||||
namespace mozilla {
|
||||
|
||||
AppleATDecoder::AppleATDecoder(const AudioInfo& aConfig, TaskQueue* aTaskQueue)
|
||||
AppleATDecoder::AppleATDecoder(const AudioInfo& aConfig)
|
||||
: mConfig(aConfig),
|
||||
mFileStreamError(false),
|
||||
mTaskQueue(aTaskQueue),
|
||||
mConverter(nullptr),
|
||||
mOutputFormat(),
|
||||
mStream(nullptr),
|
||||
|
@ -59,24 +58,14 @@ RefPtr<MediaDataDecoder::InitPromise> AppleATDecoder::Init() {
|
|||
RESULT_DETAIL("Non recognised format")),
|
||||
__func__);
|
||||
}
|
||||
mThread = GetCurrentSerialEventTarget();
|
||||
|
||||
return InitPromise::CreateAndResolve(TrackType::kAudioTrack, __func__);
|
||||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::DecodePromise> AppleATDecoder::Decode(
|
||||
MediaRawData* aSample) {
|
||||
LOG("mp4 input sample %p %lld us %lld pts%s %llu bytes audio", aSample,
|
||||
aSample->mDuration.ToMicroseconds(), aSample->mTime.ToMicroseconds(),
|
||||
aSample->mKeyframe ? " keyframe" : "",
|
||||
(unsigned long long)aSample->Size());
|
||||
RefPtr<AppleATDecoder> self = this;
|
||||
RefPtr<MediaRawData> sample = aSample;
|
||||
return InvokeAsync(mTaskQueue, __func__,
|
||||
[self, sample] { return self->ProcessDecode(sample); });
|
||||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::FlushPromise> AppleATDecoder::ProcessFlush() {
|
||||
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
|
||||
RefPtr<MediaDataDecoder::FlushPromise> AppleATDecoder::Flush() {
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
LOG("Flushing AudioToolbox AAC decoder");
|
||||
mQueuedSamples.Clear();
|
||||
mDecodedSamples.Clear();
|
||||
|
||||
|
@ -95,29 +84,20 @@ RefPtr<MediaDataDecoder::FlushPromise> AppleATDecoder::ProcessFlush() {
|
|||
return FlushPromise::CreateAndResolve(true, __func__);
|
||||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::FlushPromise> AppleATDecoder::Flush() {
|
||||
LOG("Flushing AudioToolbox AAC decoder");
|
||||
return InvokeAsync(mTaskQueue, this, __func__, &AppleATDecoder::ProcessFlush);
|
||||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::DecodePromise> AppleATDecoder::Drain() {
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
LOG("Draining AudioToolbox AAC decoder");
|
||||
RefPtr<AppleATDecoder> self = this;
|
||||
return InvokeAsync(mTaskQueue, __func__, [] {
|
||||
return DecodePromise::CreateAndResolve(DecodedData(), __func__);
|
||||
});
|
||||
return DecodePromise::CreateAndResolve(DecodedData(), __func__);
|
||||
}
|
||||
|
||||
RefPtr<ShutdownPromise> AppleATDecoder::Shutdown() {
|
||||
RefPtr<AppleATDecoder> self = this;
|
||||
return InvokeAsync(mTaskQueue, __func__, [self]() {
|
||||
self->ProcessShutdown();
|
||||
return ShutdownPromise::CreateAndResolve(true, __func__);
|
||||
});
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
ProcessShutdown();
|
||||
return ShutdownPromise::CreateAndResolve(true, __func__);
|
||||
}
|
||||
|
||||
void AppleATDecoder::ProcessShutdown() {
|
||||
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
|
||||
if (mStream) {
|
||||
OSStatus rv = AudioFileStreamClose(mStream);
|
||||
|
@ -176,9 +156,13 @@ static OSStatus _PassthroughInputDataCallback(
|
|||
return noErr;
|
||||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::DecodePromise> AppleATDecoder::ProcessDecode(
|
||||
RefPtr<MediaDataDecoder::DecodePromise> AppleATDecoder::Decode(
|
||||
MediaRawData* aSample) {
|
||||
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
LOG("mp4 input sample %p %lld us %lld pts%s %llu bytes audio", aSample,
|
||||
aSample->mDuration.ToMicroseconds(), aSample->mTime.ToMicroseconds(),
|
||||
aSample->mKeyframe ? " keyframe" : "",
|
||||
(unsigned long long)aSample->Size());
|
||||
|
||||
MediaResult rv = NS_OK;
|
||||
if (!mConverter) {
|
||||
|
@ -207,7 +191,7 @@ RefPtr<MediaDataDecoder::DecodePromise> AppleATDecoder::ProcessDecode(
|
|||
}
|
||||
|
||||
MediaResult AppleATDecoder::DecodeSample(MediaRawData* aSample) {
|
||||
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
|
||||
// Array containing the queued decoded audio frames, about to be output.
|
||||
nsTArray<AudioDataValue> outputData;
|
||||
|
@ -312,7 +296,7 @@ MediaResult AppleATDecoder::DecodeSample(MediaRawData* aSample) {
|
|||
|
||||
MediaResult AppleATDecoder::GetInputAudioDescription(
|
||||
AudioStreamBasicDescription& aDesc, const nsTArray<uint8_t>& aExtraData) {
|
||||
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
|
||||
// Request the properties from CoreAudio using the codec magic cookie
|
||||
AudioFormatInfo formatInfo;
|
||||
|
@ -406,7 +390,7 @@ AudioConfig::Channel ConvertChannelLabel(AudioChannelLabel id) {
|
|||
// Will set mChannelLayout if a channel layout could properly be identified
|
||||
// and is supported.
|
||||
nsresult AppleATDecoder::SetupChannelLayout() {
|
||||
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
|
||||
// Determine the channel layout.
|
||||
UInt32 propertySize;
|
||||
|
@ -494,7 +478,7 @@ nsresult AppleATDecoder::SetupChannelLayout() {
|
|||
}
|
||||
|
||||
MediaResult AppleATDecoder::SetupDecoder(MediaRawData* aSample) {
|
||||
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
static const uint32_t MAX_FRAMES = 2;
|
||||
|
||||
if (mFormatID == kAudioFormatMPEG4AAC && mConfig.mExtendedProfile == 2 &&
|
||||
|
@ -576,7 +560,7 @@ static void _MetadataCallback(void* aAppleATDecoder, AudioFileStreamID aStream,
|
|||
AudioFileStreamPropertyID aProperty,
|
||||
UInt32* aFlags) {
|
||||
AppleATDecoder* decoder = static_cast<AppleATDecoder*>(aAppleATDecoder);
|
||||
MOZ_RELEASE_ASSERT(decoder->mTaskQueue->IsCurrentThreadIn());
|
||||
MOZ_RELEASE_ASSERT(decoder->mThread->IsOnCurrentThread());
|
||||
|
||||
LOGEX(decoder, "MetadataCallback receiving: '%s'", FourCC2Str(aProperty));
|
||||
if (aProperty == kAudioFileStreamProperty_MagicCookieData) {
|
||||
|
@ -608,7 +592,7 @@ static void _SampleCallback(void* aSBR, UInt32 aNumBytes, UInt32 aNumPackets,
|
|||
|
||||
nsresult AppleATDecoder::GetImplicitAACMagicCookie(
|
||||
const MediaRawData* aSample) {
|
||||
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
|
||||
// Prepend ADTS header to AAC audio.
|
||||
RefPtr<MediaRawData> adtssample(aSample->Clone());
|
||||
|
|
|
@ -21,7 +21,7 @@ DDLoggedTypeDeclNameAndBase(AppleATDecoder, MediaDataDecoder);
|
|||
class AppleATDecoder : public MediaDataDecoder,
|
||||
public DecoderDoctorLifeLogger<AppleATDecoder> {
|
||||
public:
|
||||
AppleATDecoder(const AudioInfo& aConfig, TaskQueue* aTaskQueue);
|
||||
explicit AppleATDecoder(const AudioInfo& aConfig);
|
||||
~AppleATDecoder();
|
||||
|
||||
RefPtr<InitPromise> Init() override;
|
||||
|
@ -43,7 +43,7 @@ class AppleATDecoder : public MediaDataDecoder,
|
|||
// the magic cookie property.
|
||||
bool mFileStreamError;
|
||||
|
||||
const RefPtr<TaskQueue> mTaskQueue;
|
||||
nsCOMPtr<nsISerialEventTarget> mThread;
|
||||
|
||||
private:
|
||||
AudioConverterRef mConverter;
|
||||
|
@ -55,8 +55,6 @@ class AppleATDecoder : public MediaDataDecoder,
|
|||
UniquePtr<AudioConverter> mAudioConverter;
|
||||
DecodedData mDecodedSamples;
|
||||
|
||||
RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample);
|
||||
RefPtr<FlushPromise> ProcessFlush();
|
||||
void ProcessShutdown();
|
||||
MediaResult DecodeSample(MediaRawData* aSample);
|
||||
MediaResult GetInputAudioDescription(AudioStreamBasicDescription& aDesc,
|
||||
|
|
|
@ -59,17 +59,15 @@ already_AddRefed<MediaDataDecoder> AppleDecoderModule::CreateVideoDecoder(
|
|||
const CreateDecoderParams& aParams) {
|
||||
RefPtr<MediaDataDecoder> decoder;
|
||||
if (IsVideoSupported(aParams.VideoConfig(), aParams.mOptions)) {
|
||||
decoder = new AppleVTDecoder(aParams.VideoConfig(), aParams.mTaskQueue,
|
||||
aParams.mImageContainer, aParams.mOptions,
|
||||
aParams.mKnowsCompositor);
|
||||
decoder = new AppleVTDecoder(aParams.VideoConfig(), aParams.mImageContainer,
|
||||
aParams.mOptions, aParams.mKnowsCompositor);
|
||||
}
|
||||
return decoder.forget();
|
||||
}
|
||||
|
||||
already_AddRefed<MediaDataDecoder> AppleDecoderModule::CreateAudioDecoder(
|
||||
const CreateDecoderParams& aParams) {
|
||||
RefPtr<MediaDataDecoder> decoder =
|
||||
new AppleATDecoder(aParams.AudioConfig(), aParams.mTaskQueue);
|
||||
RefPtr<MediaDataDecoder> decoder = new AppleATDecoder(aParams.AudioConfig());
|
||||
return decoder.forget();
|
||||
}
|
||||
|
||||
|
@ -139,7 +137,7 @@ bool AppleDecoderModule::CanCreateVP9Decoder() {
|
|||
VPXDecoder::GetVPCCBox(info.mExtraData, VPXDecoder::VPXStreamInfo());
|
||||
|
||||
RefPtr<AppleVTDecoder> decoder =
|
||||
new AppleVTDecoder(info, nullptr, nullptr, {}, nullptr);
|
||||
new AppleVTDecoder(info, nullptr, {}, nullptr);
|
||||
nsAutoCString reason;
|
||||
MediaResult rv = decoder->InitializeSession();
|
||||
bool isHardwareAccelerated = decoder->IsHardwareAccelerated(reason);
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include "gfxPlatform.h"
|
||||
#include "mozilla/ArrayUtils.h"
|
||||
#include "mozilla/Logging.h"
|
||||
#include "mozilla/TaskQueue.h"
|
||||
#include "nsThreadUtils.h"
|
||||
|
||||
#define LOG(...) DDMOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, __VA_ARGS__)
|
||||
|
@ -30,7 +31,7 @@ namespace mozilla {
|
|||
|
||||
using namespace layers;
|
||||
|
||||
AppleVTDecoder::AppleVTDecoder(const VideoInfo& aConfig, TaskQueue* aTaskQueue,
|
||||
AppleVTDecoder::AppleVTDecoder(const VideoInfo& aConfig,
|
||||
layers::ImageContainer* aImageContainer,
|
||||
CreateDecoderParams::OptionSet aOptions,
|
||||
layers::KnowsCompositor* aKnowsCompositor)
|
||||
|
@ -48,7 +49,9 @@ AppleVTDecoder::AppleVTDecoder(const VideoInfo& aConfig, TaskQueue* aTaskQueue,
|
|||
: VPXDecoder::IsVP9(aConfig.mMimeType)
|
||||
? StreamType::VP9
|
||||
: StreamType::Unknown),
|
||||
mTaskQueue(aTaskQueue),
|
||||
mTaskQueue(
|
||||
new TaskQueue(GetMediaThreadPool(MediaThreadType::PLATFORM_DECODER),
|
||||
"AppleVTDecoder")),
|
||||
mMaxRefFrames(
|
||||
mStreamType != StreamType::H264 ||
|
||||
aOptions.contains(CreateDecoderParams::Option::LowLatency)
|
||||
|
@ -119,15 +122,11 @@ RefPtr<MediaDataDecoder::DecodePromise> AppleVTDecoder::Drain() {
|
|||
}
|
||||
|
||||
RefPtr<ShutdownPromise> AppleVTDecoder::Shutdown() {
|
||||
if (mTaskQueue) {
|
||||
RefPtr<AppleVTDecoder> self = this;
|
||||
return InvokeAsync(mTaskQueue, __func__, [self]() {
|
||||
self->ProcessShutdown();
|
||||
return ShutdownPromise::CreateAndResolve(true, __func__);
|
||||
});
|
||||
}
|
||||
ProcessShutdown();
|
||||
return ShutdownPromise::CreateAndResolve(true, __func__);
|
||||
RefPtr<AppleVTDecoder> self = this;
|
||||
return InvokeAsync(mTaskQueue, __func__, [self]() {
|
||||
self->ProcessShutdown();
|
||||
return self->mTaskQueue->BeginShutdown();
|
||||
});
|
||||
}
|
||||
|
||||
// Helper to fill in a timestamp structure.
|
||||
|
@ -145,7 +144,7 @@ static CMSampleTimingInfo TimingInfoFromSample(MediaRawData* aSample) {
|
|||
}
|
||||
|
||||
void AppleVTDecoder::ProcessDecode(MediaRawData* aSample) {
|
||||
AssertOnTaskQueueThread();
|
||||
AssertOnTaskQueue();
|
||||
|
||||
if (mIsFlushing) {
|
||||
MonitorAutoLock mon(mMonitor);
|
||||
|
@ -232,7 +231,7 @@ void AppleVTDecoder::ProcessShutdown() {
|
|||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::FlushPromise> AppleVTDecoder::ProcessFlush() {
|
||||
AssertOnTaskQueueThread();
|
||||
AssertOnTaskQueue();
|
||||
nsresult rv = WaitForAsynchronousFrames();
|
||||
if (NS_FAILED(rv)) {
|
||||
LOG("AppleVTDecoder::Flush failed waiting for platform decoder");
|
||||
|
@ -249,7 +248,7 @@ RefPtr<MediaDataDecoder::FlushPromise> AppleVTDecoder::ProcessFlush() {
|
|||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::DecodePromise> AppleVTDecoder::ProcessDrain() {
|
||||
AssertOnTaskQueueThread();
|
||||
AssertOnTaskQueue();
|
||||
nsresult rv = WaitForAsynchronousFrames();
|
||||
if (NS_FAILED(rv)) {
|
||||
LOG("AppleVTDecoder::Drain failed waiting for platform decoder");
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <CoreFoundation/CFDictionary.h> // For CFDictionaryRef
|
||||
#include <CoreMedia/CoreMedia.h> // For CMVideoFormatDescriptionRef
|
||||
#include <VideoToolbox/VideoToolbox.h> // For VTDecompressionSessionRef
|
||||
|
||||
#include "AppleDecoderModule.h"
|
||||
#include "PlatformDecoderModule.h"
|
||||
#include "ReorderQueue.h"
|
||||
|
@ -24,7 +25,7 @@ DDLoggedTypeDeclNameAndBase(AppleVTDecoder, MediaDataDecoder);
|
|||
class AppleVTDecoder : public MediaDataDecoder,
|
||||
public DecoderDoctorLifeLogger<AppleVTDecoder> {
|
||||
public:
|
||||
AppleVTDecoder(const VideoInfo& aConfig, TaskQueue* aTaskQueue,
|
||||
AppleVTDecoder(const VideoInfo& aConfig,
|
||||
layers::ImageContainer* aImageContainer,
|
||||
CreateDecoderParams::OptionSet aOptions,
|
||||
layers::KnowsCompositor* aKnowsCompositor);
|
||||
|
@ -79,9 +80,7 @@ class AppleVTDecoder : public MediaDataDecoder,
|
|||
void ProcessDecode(MediaRawData* aSample);
|
||||
void MaybeResolveBufferedFrames();
|
||||
|
||||
void AssertOnTaskQueueThread() {
|
||||
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
|
||||
}
|
||||
void AssertOnTaskQueue() { MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn()); }
|
||||
|
||||
AppleFrameRef* CreateAppleFrameRef(const MediaRawData* aSample);
|
||||
CFDictionaryRef CreateOutputConfiguration();
|
||||
|
@ -110,7 +109,6 @@ class AppleVTDecoder : public MediaDataDecoder,
|
|||
|
||||
// Set on reader/decode thread calling Flush() to indicate that output is
|
||||
// not required and so input samples on mTaskQueue need not be processed.
|
||||
// Cleared on mTaskQueue in ProcessDrain().
|
||||
Atomic<bool> mIsFlushing;
|
||||
// Protects mReorderQueue and mPromise.
|
||||
Monitor mMonitor;
|
||||
|
|
|
@ -4,8 +4,6 @@
|
|||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "mozilla/TaskQueue.h"
|
||||
|
||||
#include "FFmpegAudioDecoder.h"
|
||||
#include "TimeUnits.h"
|
||||
#include "VideoUtils.h"
|
||||
|
@ -14,9 +12,8 @@
|
|||
namespace mozilla {
|
||||
|
||||
FFmpegAudioDecoder<LIBAV_VER>::FFmpegAudioDecoder(FFmpegLibWrapper* aLib,
|
||||
TaskQueue* aTaskQueue,
|
||||
const AudioInfo& aConfig)
|
||||
: FFmpegDataDecoder(aLib, aTaskQueue, GetCodecId(aConfig.mMimeType)) {
|
||||
: FFmpegDataDecoder(aLib, GetCodecId(aConfig.mMimeType)) {
|
||||
MOZ_COUNT_CTOR(FFmpegAudioDecoder);
|
||||
// Use a new MediaByteBuffer as the object will be modified during
|
||||
// initialization.
|
||||
|
@ -178,6 +175,7 @@ MediaResult FFmpegAudioDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
|
|||
uint8_t* aData, int aSize,
|
||||
bool* aGotFrame,
|
||||
DecodedData& aResults) {
|
||||
MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
|
||||
AVPacket packet;
|
||||
mLib->av_init_packet(&packet);
|
||||
|
||||
|
|
|
@ -25,8 +25,7 @@ class FFmpegAudioDecoder<LIBAV_VER>
|
|||
: public FFmpegDataDecoder<LIBAV_VER>,
|
||||
public DecoderDoctorLifeLogger<FFmpegAudioDecoder<LIBAV_VER>> {
|
||||
public:
|
||||
FFmpegAudioDecoder(FFmpegLibWrapper* aLib, TaskQueue* aTaskQueue,
|
||||
const AudioInfo& aConfig);
|
||||
FFmpegAudioDecoder(FFmpegLibWrapper* aLib, const AudioInfo& aConfig);
|
||||
virtual ~FFmpegAudioDecoder();
|
||||
|
||||
RefPtr<InitPromise> Init() override;
|
||||
|
|
|
@ -9,8 +9,8 @@
|
|||
# include <unistd.h>
|
||||
#endif
|
||||
|
||||
#include "FFmpegLog.h"
|
||||
#include "FFmpegDataDecoder.h"
|
||||
#include "FFmpegLog.h"
|
||||
#include "mozilla/TaskQueue.h"
|
||||
#include "prsystem.h"
|
||||
|
||||
|
@ -19,7 +19,6 @@ namespace mozilla {
|
|||
StaticMutex FFmpegDataDecoder<LIBAV_VER>::sMonitor;
|
||||
|
||||
FFmpegDataDecoder<LIBAV_VER>::FFmpegDataDecoder(FFmpegLibWrapper* aLib,
|
||||
TaskQueue* aTaskQueue,
|
||||
AVCodecID aCodecID)
|
||||
: mLib(aLib),
|
||||
mCodecContext(nullptr),
|
||||
|
@ -27,7 +26,9 @@ FFmpegDataDecoder<LIBAV_VER>::FFmpegDataDecoder(FFmpegLibWrapper* aLib,
|
|||
mFrame(NULL),
|
||||
mExtraData(nullptr),
|
||||
mCodecID(aCodecID),
|
||||
mTaskQueue(aTaskQueue),
|
||||
mTaskQueue(
|
||||
new TaskQueue(GetMediaThreadPool(MediaThreadType::PLATFORM_DECODER),
|
||||
"FFmpegDataDecoder")),
|
||||
mLastInputDts(media::TimeUnit::FromNegativeInfinity()) {
|
||||
MOZ_ASSERT(aLib);
|
||||
MOZ_COUNT_CTOR(FFmpegDataDecoder);
|
||||
|
@ -116,15 +117,11 @@ MediaResult FFmpegDataDecoder<LIBAV_VER>::InitDecoder() {
|
|||
}
|
||||
|
||||
RefPtr<ShutdownPromise> FFmpegDataDecoder<LIBAV_VER>::Shutdown() {
|
||||
if (mTaskQueue) {
|
||||
RefPtr<FFmpegDataDecoder<LIBAV_VER>> self = this;
|
||||
return InvokeAsync(mTaskQueue, __func__, [self]() {
|
||||
self->ProcessShutdown();
|
||||
return ShutdownPromise::CreateAndResolve(true, __func__);
|
||||
});
|
||||
}
|
||||
ProcessShutdown();
|
||||
return ShutdownPromise::CreateAndResolve(true, __func__);
|
||||
RefPtr<FFmpegDataDecoder<LIBAV_VER>> self = this;
|
||||
return InvokeAsync(mTaskQueue, __func__, [self]() {
|
||||
self->ProcessShutdown();
|
||||
return self->mTaskQueue->BeginShutdown();
|
||||
});
|
||||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::DecodePromise> FFmpegDataDecoder<LIBAV_VER>::Decode(
|
||||
|
@ -210,7 +207,7 @@ FFmpegDataDecoder<LIBAV_VER>::ProcessDrain() {
|
|||
|
||||
RefPtr<MediaDataDecoder::FlushPromise>
|
||||
FFmpegDataDecoder<LIBAV_VER>::ProcessFlush() {
|
||||
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
|
||||
MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
|
||||
if (mCodecContext) {
|
||||
mLib->avcodec_flush_buffers(mCodecContext);
|
||||
}
|
||||
|
@ -241,7 +238,7 @@ void FFmpegDataDecoder<LIBAV_VER>::ProcessShutdown() {
|
|||
}
|
||||
|
||||
AVFrame* FFmpegDataDecoder<LIBAV_VER>::PrepareFrame() {
|
||||
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
|
||||
MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
|
||||
#if LIBAVCODEC_VERSION_MAJOR >= 55
|
||||
if (mFrame) {
|
||||
mLib->av_frame_unref(mFrame);
|
||||
|
|
|
@ -7,9 +7,11 @@
|
|||
#ifndef __FFmpegDataDecoder_h__
|
||||
#define __FFmpegDataDecoder_h__
|
||||
|
||||
#include "PlatformDecoderModule.h"
|
||||
#include "FFmpegLibWrapper.h"
|
||||
#include "PlatformDecoderModule.h"
|
||||
#include "mozilla/StaticMutex.h"
|
||||
|
||||
// This must be the last header included
|
||||
#include "FFmpegLibs.h"
|
||||
|
||||
namespace mozilla {
|
||||
|
@ -26,8 +28,7 @@ class FFmpegDataDecoder<LIBAV_VER>
|
|||
: public MediaDataDecoder,
|
||||
public DecoderDoctorLifeLogger<FFmpegDataDecoder<LIBAV_VER>> {
|
||||
public:
|
||||
FFmpegDataDecoder(FFmpegLibWrapper* aLib, TaskQueue* aTaskQueue,
|
||||
AVCodecID aCodecID);
|
||||
FFmpegDataDecoder(FFmpegLibWrapper* aLib, AVCodecID aCodecID);
|
||||
virtual ~FFmpegDataDecoder();
|
||||
|
||||
static bool Link();
|
||||
|
@ -61,6 +62,7 @@ class FFmpegDataDecoder<LIBAV_VER>
|
|||
|
||||
protected:
|
||||
static StaticMutex sMonitor;
|
||||
const RefPtr<TaskQueue> mTaskQueue;
|
||||
|
||||
private:
|
||||
RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample);
|
||||
|
@ -71,7 +73,6 @@ class FFmpegDataDecoder<LIBAV_VER>
|
|||
virtual bool NeedParser() const { return false; }
|
||||
virtual int ParserFlags() const { return PARSER_FLAG_COMPLETE_FRAMES; }
|
||||
|
||||
const RefPtr<TaskQueue> mTaskQueue;
|
||||
MozPromiseHolder<DecodePromise> mPromise;
|
||||
media::TimeUnit mLastInputDts;
|
||||
};
|
||||
|
|
|
@ -47,8 +47,8 @@ class FFmpegDecoderModule : public PlatformDecoderModule {
|
|||
return nullptr;
|
||||
}
|
||||
RefPtr<MediaDataDecoder> decoder = new FFmpegVideoDecoder<V>(
|
||||
mLib, aParams.mTaskQueue, aParams.VideoConfig(),
|
||||
aParams.mKnowsCompositor, aParams.mImageContainer,
|
||||
mLib, aParams.VideoConfig(), aParams.mKnowsCompositor,
|
||||
aParams.mImageContainer,
|
||||
aParams.mOptions.contains(CreateDecoderParams::Option::LowLatency),
|
||||
aParams.mOptions.contains(
|
||||
CreateDecoderParams::Option::HardwareDecoderNotAllowed));
|
||||
|
@ -57,8 +57,8 @@ class FFmpegDecoderModule : public PlatformDecoderModule {
|
|||
|
||||
already_AddRefed<MediaDataDecoder> CreateAudioDecoder(
|
||||
const CreateDecoderParams& aParams) override {
|
||||
RefPtr<MediaDataDecoder> decoder = new FFmpegAudioDecoder<V>(
|
||||
mLib, aParams.mTaskQueue, aParams.AudioConfig());
|
||||
RefPtr<MediaDataDecoder> decoder =
|
||||
new FFmpegAudioDecoder<V>(mLib, aParams.AudioConfig());
|
||||
return decoder.forget();
|
||||
}
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "FFmpegVideoDecoder.h"
|
||||
|
||||
#include "FFmpegLog.h"
|
||||
#include "ImageContainer.h"
|
||||
#include "MP4Decoder.h"
|
||||
|
@ -12,10 +13,10 @@
|
|||
#include "VPXDecoder.h"
|
||||
#include "mozilla/layers/KnowsCompositor.h"
|
||||
#ifdef MOZ_WAYLAND_USE_VAAPI
|
||||
# include "H264.h"
|
||||
# include "gfxPlatformGtk.h"
|
||||
# include "mozilla/layers/DMABUFSurfaceImage.h"
|
||||
# include "mozilla/widget/DMABufLibWrapper.h"
|
||||
# include "H264.h"
|
||||
#endif
|
||||
|
||||
#include "libavutil/pixfmt.h"
|
||||
|
@ -337,10 +338,10 @@ void FFmpegVideoDecoder<LIBAV_VER>::PtsCorrectionContext::Reset() {
|
|||
}
|
||||
|
||||
FFmpegVideoDecoder<LIBAV_VER>::FFmpegVideoDecoder(
|
||||
FFmpegLibWrapper* aLib, TaskQueue* aTaskQueue, const VideoInfo& aConfig,
|
||||
FFmpegLibWrapper* aLib, const VideoInfo& aConfig,
|
||||
KnowsCompositor* aAllocator, ImageContainer* aImageContainer,
|
||||
bool aLowLatency, bool aDisableHardwareDecoding)
|
||||
: FFmpegDataDecoder(aLib, aTaskQueue, GetCodecId(aConfig.mMimeType)),
|
||||
: FFmpegDataDecoder(aLib, GetCodecId(aConfig.mMimeType)),
|
||||
#ifdef MOZ_WAYLAND_USE_VAAPI
|
||||
mVAAPIDeviceContext(nullptr),
|
||||
mDisableHardwareDecoding(aDisableHardwareDecoding),
|
||||
|
@ -444,6 +445,7 @@ void FFmpegVideoDecoder<LIBAV_VER>::InitVAAPICodecContext() {
|
|||
MediaResult FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
|
||||
MediaRawData* aSample, uint8_t* aData, int aSize, bool* aGotFrame,
|
||||
MediaDataDecoder::DecodedData& aResults) {
|
||||
MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
|
||||
AVPacket packet;
|
||||
mLib->av_init_packet(&packet);
|
||||
|
||||
|
@ -589,7 +591,7 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
|
|||
#endif
|
||||
}
|
||||
|
||||
gfx::YUVColorSpace FFmpegVideoDecoder<LIBAV_VER>::GetFrameColorSpace() {
|
||||
gfx::YUVColorSpace FFmpegVideoDecoder<LIBAV_VER>::GetFrameColorSpace() const {
|
||||
if (mLib->av_frame_get_colorspace) {
|
||||
switch (mLib->av_frame_get_colorspace(mFrame)) {
|
||||
#if LIBAVCODEC_VERSION_MAJOR >= 55
|
||||
|
@ -611,7 +613,7 @@ gfx::YUVColorSpace FFmpegVideoDecoder<LIBAV_VER>::GetFrameColorSpace() {
|
|||
|
||||
MediaResult FFmpegVideoDecoder<LIBAV_VER>::CreateImage(
|
||||
int64_t aOffset, int64_t aPts, int64_t aDuration,
|
||||
MediaDataDecoder::DecodedData& aResults) {
|
||||
MediaDataDecoder::DecodedData& aResults) const {
|
||||
FFMPEG_LOG("Got one frame output with pts=%" PRId64 " dts=%" PRId64
|
||||
" duration=%" PRId64 " opaque=%" PRId64,
|
||||
aPts, mFrame->pkt_dts, aDuration, mCodecContext->reordered_opaque);
|
||||
|
@ -831,6 +833,7 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER>::CreateImageDMABuf(
|
|||
|
||||
RefPtr<MediaDataDecoder::FlushPromise>
|
||||
FFmpegVideoDecoder<LIBAV_VER>::ProcessFlush() {
|
||||
MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
|
||||
mPtsContext.Reset();
|
||||
mDurationMap.Clear();
|
||||
return FFmpegDataDecoder::ProcessFlush();
|
||||
|
@ -862,6 +865,7 @@ AVCodecID FFmpegVideoDecoder<LIBAV_VER>::GetCodecId(
|
|||
}
|
||||
|
||||
void FFmpegVideoDecoder<LIBAV_VER>::ProcessShutdown() {
|
||||
MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
|
||||
#ifdef MOZ_WAYLAND_USE_VAAPI
|
||||
ReleaseDMABufSurfaces();
|
||||
if (mVAAPIDeviceContext) {
|
||||
|
|
|
@ -7,12 +7,12 @@
|
|||
#ifndef __FFmpegVideoDecoder_h__
|
||||
#define __FFmpegVideoDecoder_h__
|
||||
|
||||
#include "FFmpegLibWrapper.h"
|
||||
#include "FFmpegDataDecoder.h"
|
||||
#include "FFmpegLibWrapper.h"
|
||||
#include "SimpleMap.h"
|
||||
#ifdef MOZ_WAYLAND_USE_VAAPI
|
||||
# include "mozilla/widget/DMABufSurface.h"
|
||||
# include "mozilla/LinkedList.h"
|
||||
# include "mozilla/widget/DMABufSurface.h"
|
||||
#endif
|
||||
|
||||
namespace mozilla {
|
||||
|
@ -107,8 +107,8 @@ class FFmpegVideoDecoder<LIBAV_VER>
|
|||
typedef SimpleMap<int64_t> DurationMap;
|
||||
|
||||
public:
|
||||
FFmpegVideoDecoder(FFmpegLibWrapper* aLib, TaskQueue* aTaskQueue,
|
||||
const VideoInfo& aConfig, KnowsCompositor* aAllocator,
|
||||
FFmpegVideoDecoder(FFmpegLibWrapper* aLib, const VideoInfo& aConfig,
|
||||
KnowsCompositor* aAllocator,
|
||||
ImageContainer* aImageContainer, bool aLowLatency,
|
||||
bool aDisableHardwareDecoding);
|
||||
|
||||
|
@ -144,10 +144,10 @@ class FFmpegVideoDecoder<LIBAV_VER>
|
|||
mCodecID == AV_CODEC_ID_VP8;
|
||||
#endif
|
||||
}
|
||||
gfx::YUVColorSpace GetFrameColorSpace();
|
||||
gfx::YUVColorSpace GetFrameColorSpace() const;
|
||||
|
||||
MediaResult CreateImage(int64_t aOffset, int64_t aPts, int64_t aDuration,
|
||||
MediaDataDecoder::DecodedData& aResults);
|
||||
MediaDataDecoder::DecodedData& aResults) const;
|
||||
|
||||
#ifdef MOZ_WAYLAND_USE_VAAPI
|
||||
MediaResult InitVAAPIDecoder();
|
||||
|
|
|
@ -9,9 +9,7 @@
|
|||
#include "OMX_Audio.h"
|
||||
#include "OMX_Component.h"
|
||||
#include "OMX_Types.h"
|
||||
|
||||
#include "OmxPlatformLayer.h"
|
||||
|
||||
#include "mozilla/IntegerPrintfMacros.h"
|
||||
|
||||
#ifdef LOG
|
||||
|
@ -94,11 +92,9 @@ class MediaDataHelper {
|
|||
};
|
||||
|
||||
OmxDataDecoder::OmxDataDecoder(const TrackInfo& aTrackInfo,
|
||||
TaskQueue* aTaskQueue,
|
||||
layers::ImageContainer* aImageContainer)
|
||||
: mOmxTaskQueue(
|
||||
CreateMediaDecodeTaskQueue("OmxDataDecoder::mOmxTaskQueue")),
|
||||
mTaskQueue(aTaskQueue),
|
||||
mImageContainer(aImageContainer),
|
||||
mWatchManager(this, mOmxTaskQueue),
|
||||
mOmxState(OMX_STATETYPE::OMX_StateInvalid, "OmxDataDecoder::mOmxState"),
|
||||
|
@ -135,6 +131,7 @@ void OmxDataDecoder::EndOfStream() {
|
|||
RefPtr<MediaDataDecoder::InitPromise> OmxDataDecoder::Init() {
|
||||
LOG("");
|
||||
|
||||
mThread = GetCurrentSerialEventTarget();
|
||||
RefPtr<OmxDataDecoder> self = this;
|
||||
return InvokeAsync(mOmxTaskQueue, __func__, [self, this]() {
|
||||
InitializationTask();
|
||||
|
@ -158,6 +155,7 @@ RefPtr<MediaDataDecoder::InitPromise> OmxDataDecoder::Init() {
|
|||
RefPtr<MediaDataDecoder::DecodePromise> OmxDataDecoder::Decode(
|
||||
MediaRawData* aSample) {
|
||||
LOG("sample %p", aSample);
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
MOZ_ASSERT(mInitPromise.IsEmpty());
|
||||
|
||||
RefPtr<OmxDataDecoder> self = this;
|
||||
|
@ -176,6 +174,7 @@ RefPtr<MediaDataDecoder::DecodePromise> OmxDataDecoder::Decode(
|
|||
|
||||
RefPtr<MediaDataDecoder::FlushPromise> OmxDataDecoder::Flush() {
|
||||
LOG("");
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
|
||||
mFlushing = true;
|
||||
|
||||
|
@ -184,6 +183,7 @@ RefPtr<MediaDataDecoder::FlushPromise> OmxDataDecoder::Flush() {
|
|||
|
||||
RefPtr<MediaDataDecoder::DecodePromise> OmxDataDecoder::Drain() {
|
||||
LOG("");
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
|
||||
RefPtr<OmxDataDecoder> self = this;
|
||||
return InvokeAsync(mOmxTaskQueue, __func__, [self]() {
|
||||
|
@ -195,6 +195,7 @@ RefPtr<MediaDataDecoder::DecodePromise> OmxDataDecoder::Drain() {
|
|||
|
||||
RefPtr<ShutdownPromise> OmxDataDecoder::Shutdown() {
|
||||
LOG("");
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread());
|
||||
|
||||
mShuttingDown = true;
|
||||
|
||||
|
@ -269,7 +270,7 @@ RefPtr<ShutdownPromise> OmxDataDecoder::DoAsyncShutdown() {
|
|||
return ShutdownPromise::CreateAndReject(false, __func__);
|
||||
})
|
||||
->Then(
|
||||
mTaskQueue, __func__,
|
||||
mThread, __func__,
|
||||
[self]() {
|
||||
self->mOmxTaskQueue->BeginShutdown();
|
||||
self->mOmxTaskQueue->AwaitShutdownAndIdle();
|
||||
|
|
|
@ -7,17 +7,14 @@
|
|||
#if !defined(OmxDataDecoder_h_)
|
||||
# define OmxDataDecoder_h_
|
||||
|
||||
# include "mozilla/Monitor.h"
|
||||
# include "mozilla/StateWatching.h"
|
||||
|
||||
# include "AudioCompactor.h"
|
||||
# include "ImageContainer.h"
|
||||
# include "MediaInfo.h"
|
||||
# include "PlatformDecoderModule.h"
|
||||
|
||||
# include "OMX_Component.h"
|
||||
|
||||
# include "OmxPromiseLayer.h"
|
||||
# include "PlatformDecoderModule.h"
|
||||
# include "mozilla/Monitor.h"
|
||||
# include "mozilla/StateWatching.h"
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
|
@ -64,7 +61,7 @@ class OmxDataDecoder : public MediaDataDecoder,
|
|||
virtual ~OmxDataDecoder();
|
||||
|
||||
public:
|
||||
OmxDataDecoder(const TrackInfo& aTrackInfo, TaskQueue* aTaskQueue,
|
||||
OmxDataDecoder(const TrackInfo& aTrackInfo,
|
||||
layers::ImageContainer* aImageContainer);
|
||||
|
||||
RefPtr<InitPromise> Init() override;
|
||||
|
@ -154,8 +151,7 @@ class OmxDataDecoder : public MediaDataDecoder,
|
|||
// The Omx TaskQueue.
|
||||
RefPtr<TaskQueue> mOmxTaskQueue;
|
||||
|
||||
RefPtr<TaskQueue> mTaskQueue;
|
||||
|
||||
nsCOMPtr<nsISerialEventTarget> mThread;
|
||||
RefPtr<layers::ImageContainer> mImageContainer;
|
||||
|
||||
WatchManager<OmxDataDecoder> mWatchManager;
|
||||
|
|
|
@ -34,15 +34,14 @@ OmxDecoderModule* OmxDecoderModule::Create() {
|
|||
|
||||
already_AddRefed<MediaDataDecoder> OmxDecoderModule::CreateVideoDecoder(
|
||||
const CreateDecoderParams& aParams) {
|
||||
RefPtr<OmxDataDecoder> decoder = new OmxDataDecoder(
|
||||
aParams.mConfig, aParams.mTaskQueue, aParams.mImageContainer);
|
||||
RefPtr<OmxDataDecoder> decoder =
|
||||
new OmxDataDecoder(aParams.mConfig, aParams.mImageContainer);
|
||||
return decoder.forget();
|
||||
}
|
||||
|
||||
already_AddRefed<MediaDataDecoder> OmxDecoderModule::CreateAudioDecoder(
|
||||
const CreateDecoderParams& aParams) {
|
||||
RefPtr<OmxDataDecoder> decoder =
|
||||
new OmxDataDecoder(aParams.mConfig, aParams.mTaskQueue, nullptr);
|
||||
RefPtr<OmxDataDecoder> decoder = new OmxDataDecoder(aParams.mConfig, nullptr);
|
||||
return decoder.forget();
|
||||
}
|
||||
|
||||
|
|
|
@ -5,8 +5,10 @@
|
|||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "WMFDecoderModule.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
|
||||
#include "DriverCrashGuard.h"
|
||||
#include "GfxDriverInfo.h"
|
||||
#include "MFTDecoder.h"
|
||||
|
@ -134,8 +136,7 @@ already_AddRefed<MediaDataDecoder> WMFDecoderModule::CreateVideoDecoder(
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
RefPtr<MediaDataDecoder> decoder =
|
||||
new WMFMediaDataDecoder(manager.release(), aParams.mTaskQueue);
|
||||
RefPtr<MediaDataDecoder> decoder = new WMFMediaDataDecoder(manager.release());
|
||||
|
||||
return decoder.forget();
|
||||
}
|
||||
|
@ -149,8 +150,7 @@ already_AddRefed<MediaDataDecoder> WMFDecoderModule::CreateAudioDecoder(
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
RefPtr<MediaDataDecoder> decoder =
|
||||
new WMFMediaDataDecoder(manager.release(), aParams.mTaskQueue);
|
||||
RefPtr<MediaDataDecoder> decoder = new WMFMediaDataDecoder(manager.release());
|
||||
return decoder.forget();
|
||||
}
|
||||
|
||||
|
|
|
@ -5,21 +5,24 @@
|
|||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "WMFMediaDataDecoder.h"
|
||||
|
||||
#include "VideoUtils.h"
|
||||
#include "WMFUtils.h"
|
||||
#include "mozilla/Telemetry.h"
|
||||
#include "nsTArray.h"
|
||||
|
||||
#include "mozilla/Logging.h"
|
||||
#include "mozilla/SyncRunnable.h"
|
||||
#include "mozilla/TaskQueue.h"
|
||||
#include "mozilla/Telemetry.h"
|
||||
#include "nsTArray.h"
|
||||
|
||||
#define LOG(...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
WMFMediaDataDecoder::WMFMediaDataDecoder(MFTManager* aMFTManager,
|
||||
TaskQueue* aTaskQueue)
|
||||
: mTaskQueue(aTaskQueue), mMFTManager(aMFTManager) {}
|
||||
WMFMediaDataDecoder::WMFMediaDataDecoder(MFTManager* aMFTManager)
|
||||
: mTaskQueue(
|
||||
new TaskQueue(GetMediaThreadPool(MediaThreadType::PLATFORM_DECODER),
|
||||
"WMFMediaDataDecoder")),
|
||||
mMFTManager(aMFTManager) {}
|
||||
|
||||
WMFMediaDataDecoder::~WMFMediaDataDecoder() {}
|
||||
|
||||
|
@ -55,25 +58,18 @@ static void SendTelemetry(unsigned long hr) {
|
|||
|
||||
RefPtr<ShutdownPromise> WMFMediaDataDecoder::Shutdown() {
|
||||
MOZ_DIAGNOSTIC_ASSERT(!mIsShutDown);
|
||||
|
||||
mIsShutDown = true;
|
||||
|
||||
if (mTaskQueue) {
|
||||
return InvokeAsync(mTaskQueue, this, __func__,
|
||||
&WMFMediaDataDecoder::ProcessShutdown);
|
||||
}
|
||||
return ProcessShutdown();
|
||||
}
|
||||
|
||||
RefPtr<ShutdownPromise> WMFMediaDataDecoder::ProcessShutdown() {
|
||||
if (mMFTManager) {
|
||||
mMFTManager->Shutdown();
|
||||
mMFTManager = nullptr;
|
||||
if (!mRecordedError && mHasSuccessfulOutput) {
|
||||
SendTelemetry(S_OK);
|
||||
return InvokeAsync(mTaskQueue, __func__, [self = RefPtr{this}, this] {
|
||||
if (mMFTManager) {
|
||||
mMFTManager->Shutdown();
|
||||
mMFTManager = nullptr;
|
||||
if (!mRecordedError && mHasSuccessfulOutput) {
|
||||
SendTelemetry(S_OK);
|
||||
}
|
||||
}
|
||||
}
|
||||
return ShutdownPromise::CreateAndResolve(true, __func__);
|
||||
return mTaskQueue->BeginShutdown();
|
||||
});
|
||||
}
|
||||
|
||||
// Inserts data into the decoder's pipeline.
|
||||
|
|
|
@ -86,7 +86,7 @@ class WMFMediaDataDecoder
|
|||
: public MediaDataDecoder,
|
||||
public DecoderDoctorLifeLogger<WMFMediaDataDecoder> {
|
||||
public:
|
||||
WMFMediaDataDecoder(MFTManager* aOutputSource, TaskQueue* aTaskQueue);
|
||||
explicit WMFMediaDataDecoder(MFTManager* aOutputSource);
|
||||
~WMFMediaDataDecoder();
|
||||
|
||||
RefPtr<MediaDataDecoder::InitPromise> Init() override;
|
||||
|
@ -131,8 +131,6 @@ class WMFMediaDataDecoder
|
|||
// all available output.
|
||||
RefPtr<DecodePromise> ProcessDrain();
|
||||
|
||||
RefPtr<ShutdownPromise> ProcessShutdown();
|
||||
|
||||
const RefPtr<TaskQueue> mTaskQueue;
|
||||
|
||||
UniquePtr<MFTManager> mMFTManager;
|
||||
|
|
|
@ -15,78 +15,70 @@ namespace mozilla {
|
|||
using media::TimeInterval;
|
||||
using media::TimeUnit;
|
||||
|
||||
// All the MediaDataDecoder overridden methods dispatch to mTaskQueue in order
|
||||
// to insure that access to mTrimmers is only ever accessed on the same thread.
|
||||
|
||||
RefPtr<MediaDataDecoder::InitPromise> AudioTrimmer::Init() {
|
||||
RefPtr<AudioTrimmer> self = this;
|
||||
return InvokeAsync(mTaskQueue, __func__,
|
||||
[self]() { return self->mDecoder->Init(); });
|
||||
mThread = GetCurrentSerialEventTarget();
|
||||
return mDecoder->Init();
|
||||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::DecodePromise> AudioTrimmer::Decode(
|
||||
MediaRawData* aSample) {
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread(),
|
||||
"We're not on the thread we were first initialized on");
|
||||
// A compress sample indicates that it needs to be trimmed after decoding by
|
||||
// having its mOriginalPresentationWindow member set; in which case
|
||||
// mOriginalPresentationWindow contains the original time and duration of the
|
||||
// frame set by the demuxer and mTime and mDuration set to what it should be
|
||||
// after trimming.
|
||||
RefPtr<MediaRawData> sample = aSample;
|
||||
if (sample->mOriginalPresentationWindow) {
|
||||
LOG("sample[%" PRId64 ",%" PRId64 "] has trimming info ([%" PRId64
|
||||
",%" PRId64 "]",
|
||||
sample->mOriginalPresentationWindow->mStart.ToMicroseconds(),
|
||||
sample->mOriginalPresentationWindow->mEnd.ToMicroseconds(),
|
||||
sample->mTime.ToMicroseconds(), sample->GetEndTime().ToMicroseconds());
|
||||
mTrimmers.AppendElement(
|
||||
Some(TimeInterval(sample->mTime, sample->GetEndTime())));
|
||||
sample->mTime = sample->mOriginalPresentationWindow->mStart;
|
||||
sample->mDuration = sample->mOriginalPresentationWindow->Length();
|
||||
} else {
|
||||
LOG("sample[%" PRId64 ",%" PRId64 "] no trimming information",
|
||||
sample->mTime.ToMicroseconds(), sample->GetEndTime().ToMicroseconds());
|
||||
mTrimmers.AppendElement(Nothing());
|
||||
}
|
||||
RefPtr<AudioTrimmer> self = this;
|
||||
return InvokeAsync(mTaskQueue, __func__, [self, sample, this]() {
|
||||
if (sample->mOriginalPresentationWindow) {
|
||||
LOG("sample[%" PRId64 ",%" PRId64 "] has trimming info ([%" PRId64
|
||||
",%" PRId64 "]",
|
||||
sample->mOriginalPresentationWindow->mStart.ToMicroseconds(),
|
||||
sample->mOriginalPresentationWindow->mEnd.ToMicroseconds(),
|
||||
sample->mTime.ToMicroseconds(),
|
||||
sample->GetEndTime().ToMicroseconds());
|
||||
mTrimmers.AppendElement(
|
||||
Some(TimeInterval(sample->mTime, sample->GetEndTime())));
|
||||
sample->mTime = sample->mOriginalPresentationWindow->mStart;
|
||||
sample->mDuration = sample->mOriginalPresentationWindow->Length();
|
||||
} else {
|
||||
LOG("sample[%" PRId64 ",%" PRId64 "] no trimming information",
|
||||
sample->mTime.ToMicroseconds(),
|
||||
sample->GetEndTime().ToMicroseconds());
|
||||
mTrimmers.AppendElement(Nothing());
|
||||
}
|
||||
RefPtr<DecodePromise> p = self->mDecoder->Decode(sample)->Then(
|
||||
self->mTaskQueue, __func__,
|
||||
[self, sample](DecodePromise::ResolveOrRejectValue&& aValue) {
|
||||
return self->HandleDecodedResult(std::move(aValue), sample);
|
||||
});
|
||||
return p;
|
||||
});
|
||||
RefPtr<DecodePromise> p = mDecoder->Decode(sample)->Then(
|
||||
GetCurrentSerialEventTarget(), __func__,
|
||||
[self, sample](DecodePromise::ResolveOrRejectValue&& aValue) {
|
||||
return self->HandleDecodedResult(std::move(aValue), sample);
|
||||
});
|
||||
return p;
|
||||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::FlushPromise> AudioTrimmer::Flush() {
|
||||
RefPtr<AudioTrimmer> self = this;
|
||||
return InvokeAsync(mTaskQueue, __func__, [self]() {
|
||||
RefPtr<FlushPromise> p = self->mDecoder->Flush();
|
||||
self->mTrimmers.Clear();
|
||||
return p;
|
||||
});
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread(),
|
||||
"We're not on the thread we were first initialized on");
|
||||
RefPtr<FlushPromise> p = mDecoder->Flush();
|
||||
mTrimmers.Clear();
|
||||
return p;
|
||||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::DecodePromise> AudioTrimmer::Drain() {
|
||||
RefPtr<AudioTrimmer> self = this;
|
||||
return InvokeAsync(mTaskQueue, __func__, [self, this]() {
|
||||
LOG("Draining");
|
||||
RefPtr<DecodePromise> p = self->mDecoder->Drain()->Then(
|
||||
self->mTaskQueue, __func__,
|
||||
[self](DecodePromise::ResolveOrRejectValue&& aValue) {
|
||||
auto p = self->HandleDecodedResult(std::move(aValue), nullptr);
|
||||
return p;
|
||||
});
|
||||
return p;
|
||||
});
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread(),
|
||||
"We're not on the thread we were first initialized on");
|
||||
LOG("Draining");
|
||||
RefPtr<DecodePromise> p = mDecoder->Drain()->Then(
|
||||
GetCurrentSerialEventTarget(), __func__,
|
||||
[self = RefPtr{this}](DecodePromise::ResolveOrRejectValue&& aValue) {
|
||||
auto p = self->HandleDecodedResult(std::move(aValue), nullptr);
|
||||
return p;
|
||||
});
|
||||
return p;
|
||||
}
|
||||
|
||||
RefPtr<ShutdownPromise> AudioTrimmer::Shutdown() {
|
||||
RefPtr<AudioTrimmer> self = this;
|
||||
return InvokeAsync(self->mTaskQueue, __func__,
|
||||
[self]() { return self->mDecoder->Shutdown(); });
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread(),
|
||||
"We're not on the thread we were first initialized on");
|
||||
return mDecoder->Shutdown();
|
||||
}
|
||||
|
||||
nsCString AudioTrimmer::GetDescriptionName() const {
|
||||
|
@ -111,6 +103,8 @@ MediaDataDecoder::ConversionRequired AudioTrimmer::NeedsConversion() const {
|
|||
|
||||
RefPtr<MediaDataDecoder::DecodePromise> AudioTrimmer::HandleDecodedResult(
|
||||
DecodePromise::ResolveOrRejectValue&& aValue, MediaRawData* aRaw) {
|
||||
MOZ_ASSERT(mThread->IsOnCurrentThread(),
|
||||
"We're not on the thread we were first initialized on");
|
||||
if (aValue.IsReject()) {
|
||||
return DecodePromise::CreateAndReject(std::move(aValue.RejectValue()),
|
||||
__func__);
|
||||
|
|
|
@ -18,7 +18,7 @@ class AudioTrimmer : public MediaDataDecoder {
|
|||
public:
|
||||
AudioTrimmer(already_AddRefed<MediaDataDecoder> aDecoder,
|
||||
const CreateDecoderParams& aParams)
|
||||
: mDecoder(aDecoder), mTaskQueue(aParams.mTaskQueue) {}
|
||||
: mDecoder(aDecoder) {}
|
||||
|
||||
RefPtr<InitPromise> Init() override;
|
||||
RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
|
||||
|
@ -39,8 +39,8 @@ class AudioTrimmer : public MediaDataDecoder {
|
|||
private:
|
||||
RefPtr<DecodePromise> HandleDecodedResult(
|
||||
DecodePromise::ResolveOrRejectValue&& aValue, MediaRawData* aRaw);
|
||||
RefPtr<MediaDataDecoder> mDecoder;
|
||||
RefPtr<TaskQueue> mTaskQueue;
|
||||
const RefPtr<MediaDataDecoder> mDecoder;
|
||||
nsCOMPtr<nsISerialEventTarget> mThread;
|
||||
AutoTArray<Maybe<media::TimeInterval>, 2> mTrimmers;
|
||||
};
|
||||
|
||||
|
|
|
@ -231,7 +231,6 @@ MediaChangeMonitor::MediaChangeMonitor(PlatformDecoderModule* aPDM,
|
|||
mCurrentConfig(aParams.VideoConfig()),
|
||||
mKnowsCompositor(aParams.mKnowsCompositor),
|
||||
mImageContainer(aParams.mImageContainer),
|
||||
mTaskQueue(aParams.mTaskQueue),
|
||||
mDecoder(nullptr),
|
||||
mGMPCrashHelper(aParams.mCrashHelper),
|
||||
mLastError(NS_OK),
|
||||
|
@ -257,176 +256,160 @@ MediaChangeMonitor::MediaChangeMonitor(PlatformDecoderModule* aPDM,
|
|||
MediaChangeMonitor::~MediaChangeMonitor() = default;
|
||||
|
||||
RefPtr<MediaDataDecoder::InitPromise> MediaChangeMonitor::Init() {
|
||||
RefPtr<MediaChangeMonitor> self = this;
|
||||
return InvokeAsync(
|
||||
mTaskQueue, __func__, [self, this]() -> RefPtr<InitPromise> {
|
||||
if (mDecoder) {
|
||||
RefPtr<InitPromise> p = mInitPromise.Ensure(__func__);
|
||||
mDecoder->Init()
|
||||
->Then(mTaskQueue, __func__,
|
||||
[self, this](InitPromise::ResolveOrRejectValue&& aValue) {
|
||||
mInitPromiseRequest.Complete();
|
||||
if (aValue.IsResolve()) {
|
||||
mDecoderInitialized = true;
|
||||
mConversionRequired =
|
||||
Some(mDecoder->NeedsConversion());
|
||||
mCanRecycleDecoder = Some(CanRecycleDecoder());
|
||||
}
|
||||
return mInitPromise.ResolveOrRejectIfExists(
|
||||
std::move(aValue), __func__);
|
||||
})
|
||||
->Track(mInitPromiseRequest);
|
||||
return p;
|
||||
}
|
||||
mThread = GetCurrentSerialEventTarget();
|
||||
if (mDecoder) {
|
||||
RefPtr<InitPromise> p = mInitPromise.Ensure(__func__);
|
||||
RefPtr<MediaChangeMonitor> self = this;
|
||||
mDecoder->Init()
|
||||
->Then(GetCurrentSerialEventTarget(), __func__,
|
||||
[self, this](InitPromise::ResolveOrRejectValue&& aValue) {
|
||||
mInitPromiseRequest.Complete();
|
||||
if (aValue.IsResolve()) {
|
||||
mDecoderInitialized = true;
|
||||
mConversionRequired = Some(mDecoder->NeedsConversion());
|
||||
mCanRecycleDecoder = Some(CanRecycleDecoder());
|
||||
}
|
||||
return mInitPromise.ResolveOrRejectIfExists(std::move(aValue),
|
||||
__func__);
|
||||
})
|
||||
->Track(mInitPromiseRequest);
|
||||
return p;
|
||||
}
|
||||
|
||||
// We haven't been able to initialize a decoder due to missing
|
||||
// extradata.
|
||||
return MediaDataDecoder::InitPromise::CreateAndResolve(
|
||||
TrackType::kVideoTrack, __func__);
|
||||
});
|
||||
// We haven't been able to initialize a decoder due to missing
|
||||
// extradata.
|
||||
return MediaDataDecoder::InitPromise::CreateAndResolve(TrackType::kVideoTrack,
|
||||
__func__);
|
||||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::DecodePromise> MediaChangeMonitor::Decode(
|
||||
MediaRawData* aSample) {
|
||||
RefPtr<MediaChangeMonitor> self = this;
|
||||
RefPtr<MediaRawData> sample = aSample;
|
||||
return InvokeAsync(mTaskQueue, __func__, [self, this, sample]() {
|
||||
MOZ_RELEASE_ASSERT(mFlushPromise.IsEmpty(),
|
||||
"Flush operatin didn't complete");
|
||||
AssertOnThread();
|
||||
MOZ_RELEASE_ASSERT(mFlushPromise.IsEmpty(), "Flush operatin didn't complete");
|
||||
|
||||
MOZ_RELEASE_ASSERT(
|
||||
!mDecodePromiseRequest.Exists() && !mInitPromiseRequest.Exists(),
|
||||
"Can't request a new decode until previous one completed");
|
||||
MOZ_RELEASE_ASSERT(
|
||||
!mDecodePromiseRequest.Exists() && !mInitPromiseRequest.Exists(),
|
||||
"Can't request a new decode until previous one completed");
|
||||
|
||||
MediaResult rv = CheckForChange(sample);
|
||||
MediaResult rv = CheckForChange(aSample);
|
||||
|
||||
if (rv == NS_ERROR_NOT_INITIALIZED) {
|
||||
// We are missing the required init data to create the decoder.
|
||||
if (mErrorIfNoInitializationData) {
|
||||
// This frame can't be decoded and should be treated as an error.
|
||||
return DecodePromise::CreateAndReject(rv, __func__);
|
||||
}
|
||||
// Swallow the frame, and await delivery of init data.
|
||||
return DecodePromise::CreateAndResolve(DecodedData(), __func__);
|
||||
}
|
||||
if (rv == NS_ERROR_DOM_MEDIA_INITIALIZING_DECODER) {
|
||||
// The decoder is pending initialization.
|
||||
RefPtr<DecodePromise> p = mDecodePromise.Ensure(__func__);
|
||||
return p;
|
||||
}
|
||||
|
||||
if (NS_FAILED(rv)) {
|
||||
if (rv == NS_ERROR_NOT_INITIALIZED) {
|
||||
// We are missing the required init data to create the decoder.
|
||||
if (mErrorIfNoInitializationData) {
|
||||
// This frame can't be decoded and should be treated as an error.
|
||||
return DecodePromise::CreateAndReject(rv, __func__);
|
||||
}
|
||||
// Swallow the frame, and await delivery of init data.
|
||||
return DecodePromise::CreateAndResolve(DecodedData(), __func__);
|
||||
}
|
||||
if (rv == NS_ERROR_DOM_MEDIA_INITIALIZING_DECODER) {
|
||||
// The decoder is pending initialization.
|
||||
RefPtr<DecodePromise> p = mDecodePromise.Ensure(__func__);
|
||||
return p;
|
||||
}
|
||||
|
||||
if (mNeedKeyframe && !sample->mKeyframe) {
|
||||
return DecodePromise::CreateAndResolve(DecodedData(), __func__);
|
||||
}
|
||||
if (NS_FAILED(rv)) {
|
||||
return DecodePromise::CreateAndReject(rv, __func__);
|
||||
}
|
||||
|
||||
rv = mChangeMonitor->PrepareSample(*mConversionRequired, sample,
|
||||
mNeedKeyframe);
|
||||
if (NS_FAILED(rv)) {
|
||||
return DecodePromise::CreateAndReject(rv, __func__);
|
||||
}
|
||||
if (mNeedKeyframe && !aSample->mKeyframe) {
|
||||
return DecodePromise::CreateAndResolve(DecodedData(), __func__);
|
||||
}
|
||||
|
||||
mNeedKeyframe = false;
|
||||
rv = mChangeMonitor->PrepareSample(*mConversionRequired, aSample,
|
||||
mNeedKeyframe);
|
||||
if (NS_FAILED(rv)) {
|
||||
return DecodePromise::CreateAndReject(rv, __func__);
|
||||
}
|
||||
|
||||
return mDecoder->Decode(sample);
|
||||
});
|
||||
mNeedKeyframe = false;
|
||||
|
||||
return mDecoder->Decode(aSample);
|
||||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::FlushPromise> MediaChangeMonitor::Flush() {
|
||||
RefPtr<MediaChangeMonitor> self = this;
|
||||
return InvokeAsync(mTaskQueue, __func__, [self, this]() {
|
||||
mDecodePromiseRequest.DisconnectIfExists();
|
||||
mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
|
||||
mNeedKeyframe = true;
|
||||
mPendingFrames.Clear();
|
||||
AssertOnThread();
|
||||
mDecodePromiseRequest.DisconnectIfExists();
|
||||
mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
|
||||
mNeedKeyframe = true;
|
||||
mPendingFrames.Clear();
|
||||
|
||||
MOZ_RELEASE_ASSERT(mFlushPromise.IsEmpty(),
|
||||
"Previous flush didn't complete");
|
||||
MOZ_RELEASE_ASSERT(mFlushPromise.IsEmpty(), "Previous flush didn't complete");
|
||||
|
||||
/*
|
||||
When we detect a change of content in the byte stream, we first drain the
|
||||
current decoder (1), flush (2), shut it down (3) create a new decoder and
|
||||
initialize it (4). It is possible for MediaChangeMonitor::Flush to be
|
||||
called during any of those times. If during (1):
|
||||
- mDrainRequest will not be empty.
|
||||
- The old decoder can still be used, with the current extradata as
|
||||
stored in mCurrentConfig.mExtraData.
|
||||
/*
|
||||
When we detect a change of content in the byte stream, we first drain the
|
||||
current decoder (1), flush (2), shut it down (3) create a new decoder and
|
||||
initialize it (4). It is possible for MediaChangeMonitor::Flush to be
|
||||
called during any of those times. If during (1):
|
||||
- mDrainRequest will not be empty.
|
||||
- The old decoder can still be used, with the current extradata as
|
||||
stored in mCurrentConfig.mExtraData.
|
||||
|
||||
If during (2):
|
||||
- mFlushRequest will not be empty.
|
||||
- The old decoder can still be used, with the current extradata as
|
||||
stored in mCurrentConfig.mExtraData.
|
||||
If during (2):
|
||||
- mFlushRequest will not be empty.
|
||||
- The old decoder can still be used, with the current extradata as
|
||||
stored in mCurrentConfig.mExtraData.
|
||||
|
||||
If during (3):
|
||||
- mShutdownRequest won't be empty.
|
||||
- mDecoder is empty.
|
||||
- The old decoder is no longer referenced by the MediaChangeMonitor.
|
||||
If during (3):
|
||||
- mShutdownRequest won't be empty.
|
||||
- mDecoder is empty.
|
||||
- The old decoder is no longer referenced by the MediaChangeMonitor.
|
||||
|
||||
If during (4):
|
||||
- mInitPromiseRequest won't be empty.
|
||||
- mDecoder is set but not usable yet.
|
||||
*/
|
||||
If during (4):
|
||||
- mInitPromiseRequest won't be empty.
|
||||
- mDecoder is set but not usable yet.
|
||||
*/
|
||||
|
||||
if (mDrainRequest.Exists() || mFlushRequest.Exists() ||
|
||||
mShutdownRequest.Exists() || mInitPromiseRequest.Exists()) {
|
||||
// We let the current decoder complete and will resume after.
|
||||
RefPtr<FlushPromise> p = mFlushPromise.Ensure(__func__);
|
||||
return p;
|
||||
}
|
||||
if (mDecoder && mDecoderInitialized) {
|
||||
return mDecoder->Flush();
|
||||
}
|
||||
return FlushPromise::CreateAndResolve(true, __func__);
|
||||
});
|
||||
if (mDrainRequest.Exists() || mFlushRequest.Exists() ||
|
||||
mShutdownRequest.Exists() || mInitPromiseRequest.Exists()) {
|
||||
// We let the current decoder complete and will resume after.
|
||||
RefPtr<FlushPromise> p = mFlushPromise.Ensure(__func__);
|
||||
return p;
|
||||
}
|
||||
if (mDecoder && mDecoderInitialized) {
|
||||
return mDecoder->Flush();
|
||||
}
|
||||
return FlushPromise::CreateAndResolve(true, __func__);
|
||||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::DecodePromise> MediaChangeMonitor::Drain() {
|
||||
RefPtr<MediaChangeMonitor> self = this;
|
||||
return InvokeAsync(mTaskQueue, __func__, [self, this]() {
|
||||
MOZ_RELEASE_ASSERT(!mDrainRequest.Exists());
|
||||
mNeedKeyframe = true;
|
||||
if (mDecoder && mDecoderInitialized) {
|
||||
return mDecoder->Drain();
|
||||
}
|
||||
return DecodePromise::CreateAndResolve(DecodedData(), __func__);
|
||||
});
|
||||
AssertOnThread();
|
||||
MOZ_RELEASE_ASSERT(!mDrainRequest.Exists());
|
||||
mNeedKeyframe = true;
|
||||
if (mDecoder) {
|
||||
return mDecoder->Drain();
|
||||
}
|
||||
return DecodePromise::CreateAndResolve(DecodedData(), __func__);
|
||||
}
|
||||
|
||||
RefPtr<ShutdownPromise> MediaChangeMonitor::Shutdown() {
|
||||
RefPtr<MediaChangeMonitor> self = this;
|
||||
return InvokeAsync(mTaskQueue, __func__, [self, this]() {
|
||||
mInitPromiseRequest.DisconnectIfExists();
|
||||
mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
|
||||
mDecodePromiseRequest.DisconnectIfExists();
|
||||
mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
|
||||
mDrainRequest.DisconnectIfExists();
|
||||
mFlushRequest.DisconnectIfExists();
|
||||
mFlushPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
|
||||
mShutdownRequest.DisconnectIfExists();
|
||||
AssertOnThread();
|
||||
mInitPromiseRequest.DisconnectIfExists();
|
||||
mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
|
||||
mDecodePromiseRequest.DisconnectIfExists();
|
||||
mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
|
||||
mDrainRequest.DisconnectIfExists();
|
||||
mFlushRequest.DisconnectIfExists();
|
||||
mFlushPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
|
||||
mShutdownRequest.DisconnectIfExists();
|
||||
|
||||
if (mShutdownPromise) {
|
||||
// We have a shutdown in progress, return that promise instead as we can't
|
||||
// shutdown a decoder twice.
|
||||
RefPtr<ShutdownPromise> p = std::move(mShutdownPromise);
|
||||
return p;
|
||||
}
|
||||
return ShutdownDecoder();
|
||||
});
|
||||
if (mShutdownPromise) {
|
||||
// We have a shutdown in progress, return that promise instead as we can't
|
||||
// shutdown a decoder twice.
|
||||
RefPtr<ShutdownPromise> p = std::move(mShutdownPromise);
|
||||
return p;
|
||||
}
|
||||
return ShutdownDecoder();
|
||||
}
|
||||
|
||||
RefPtr<ShutdownPromise> MediaChangeMonitor::ShutdownDecoder() {
|
||||
RefPtr<MediaChangeMonitor> self = this;
|
||||
return InvokeAsync(mTaskQueue, __func__, [self, this]() {
|
||||
mConversionRequired.reset();
|
||||
if (mDecoder) {
|
||||
RefPtr<MediaDataDecoder> decoder = std::move(mDecoder);
|
||||
return decoder->Shutdown();
|
||||
}
|
||||
return ShutdownPromise::CreateAndResolve(true, __func__);
|
||||
});
|
||||
AssertOnThread();
|
||||
mConversionRequired.reset();
|
||||
if (mDecoder) {
|
||||
RefPtr<MediaDataDecoder> decoder = std::move(mDecoder);
|
||||
return decoder->Shutdown();
|
||||
}
|
||||
return ShutdownPromise::CreateAndResolve(true, __func__);
|
||||
}
|
||||
|
||||
bool MediaChangeMonitor::IsHardwareAccelerated(
|
||||
|
@ -454,9 +437,9 @@ void MediaChangeMonitor::SetSeekThreshold(const media::TimeUnit& aTime) {
|
|||
|
||||
MediaResult MediaChangeMonitor::CreateDecoder(
|
||||
DecoderDoctorDiagnostics* aDiagnostics) {
|
||||
// This is the only one of two methods to run outside the TaskQueue when
|
||||
// This is the only one of two methods to run outside the init thread when
|
||||
// called from the constructor.
|
||||
MOZ_ASSERT(mInConstructor || mTaskQueue->IsCurrentThreadIn());
|
||||
MOZ_ASSERT(mInConstructor || (mThread && mThread->IsOnCurrentThread()));
|
||||
|
||||
if (!mChangeMonitor->CanBeInstantiated()) {
|
||||
// nothing found yet, will try again later
|
||||
|
@ -466,18 +449,18 @@ MediaResult MediaChangeMonitor::CreateDecoder(
|
|||
|
||||
MediaResult error = NS_OK;
|
||||
mDecoder = mPDM->CreateVideoDecoder(
|
||||
{mCurrentConfig, mTaskQueue, aDiagnostics, mImageContainer,
|
||||
mKnowsCompositor, mGMPCrashHelper, mType, mOnWaitingForKeyEvent,
|
||||
mDecoderOptions, mRate, &error});
|
||||
{mCurrentConfig, aDiagnostics, mImageContainer, mKnowsCompositor,
|
||||
mGMPCrashHelper, mType, mOnWaitingForKeyEvent, mDecoderOptions, mRate,
|
||||
&error});
|
||||
|
||||
if (!mDecoder) {
|
||||
// We failed to create a decoder with the existing PDM; attempt once again
|
||||
// with a PDMFactory.
|
||||
RefPtr<PDMFactory> factory = new PDMFactory();
|
||||
mDecoder = factory->CreateDecoder(
|
||||
{mCurrentConfig, mTaskQueue, aDiagnostics, mImageContainer,
|
||||
mKnowsCompositor, mGMPCrashHelper, mType, mOnWaitingForKeyEvent,
|
||||
mDecoderOptions, mRate, &error, CreateDecoderParams::NoWrapper(true)});
|
||||
{mCurrentConfig, aDiagnostics, mImageContainer, mKnowsCompositor,
|
||||
mGMPCrashHelper, mType, mOnWaitingForKeyEvent, mDecoderOptions, mRate,
|
||||
&error, CreateDecoderParams::NoWrapper(true)});
|
||||
|
||||
if (!mDecoder) {
|
||||
if (NS_FAILED(error)) {
|
||||
|
@ -498,8 +481,6 @@ MediaResult MediaChangeMonitor::CreateDecoder(
|
|||
}
|
||||
|
||||
MediaResult MediaChangeMonitor::CreateDecoderAndInit(MediaRawData* aSample) {
|
||||
AssertOnTaskQueue();
|
||||
|
||||
MediaResult rv = mChangeMonitor->CheckForChange(aSample);
|
||||
if (!NS_SUCCEEDED(rv) && rv != NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER) {
|
||||
return rv;
|
||||
|
@ -512,7 +493,7 @@ MediaResult MediaChangeMonitor::CreateDecoderAndInit(MediaRawData* aSample) {
|
|||
RefPtr<MediaRawData> sample = aSample;
|
||||
mDecoder->Init()
|
||||
->Then(
|
||||
mTaskQueue, __func__,
|
||||
GetCurrentSerialEventTarget(), __func__,
|
||||
[self, sample, this](const TrackType aTrackType) {
|
||||
mInitPromiseRequest.Complete();
|
||||
mDecoderInitialized = true;
|
||||
|
@ -549,16 +530,12 @@ MediaResult MediaChangeMonitor::CreateDecoderAndInit(MediaRawData* aSample) {
|
|||
}
|
||||
|
||||
bool MediaChangeMonitor::CanRecycleDecoder() const {
|
||||
AssertOnTaskQueue();
|
||||
|
||||
MOZ_ASSERT(mDecoder);
|
||||
return StaticPrefs::media_decoder_recycle_enabled() &&
|
||||
mDecoder->SupportDecoderRecycling();
|
||||
}
|
||||
|
||||
void MediaChangeMonitor::DecodeFirstSample(MediaRawData* aSample) {
|
||||
AssertOnTaskQueue();
|
||||
|
||||
// We feed all the data to AnnexB decoder as a non-keyframe could contain
|
||||
// the SPS/PPS when used with WebRTC and this data is needed by the decoder.
|
||||
if (mNeedKeyframe && !aSample->mKeyframe &&
|
||||
|
@ -583,7 +560,7 @@ void MediaChangeMonitor::DecodeFirstSample(MediaRawData* aSample) {
|
|||
RefPtr<MediaChangeMonitor> self = this;
|
||||
mDecoder->Decode(aSample)
|
||||
->Then(
|
||||
mTaskQueue, __func__,
|
||||
GetCurrentSerialEventTarget(), __func__,
|
||||
[self, this](MediaDataDecoder::DecodedData&& aResults) {
|
||||
mDecodePromiseRequest.Complete();
|
||||
mPendingFrames.AppendElements(std::move(aResults));
|
||||
|
@ -598,8 +575,6 @@ void MediaChangeMonitor::DecodeFirstSample(MediaRawData* aSample) {
|
|||
}
|
||||
|
||||
MediaResult MediaChangeMonitor::CheckForChange(MediaRawData* aSample) {
|
||||
AssertOnTaskQueue();
|
||||
|
||||
if (!mDecoder) {
|
||||
return CreateDecoderAndInit(aSample);
|
||||
}
|
||||
|
@ -623,14 +598,13 @@ MediaResult MediaChangeMonitor::CheckForChange(MediaRawData* aSample) {
|
|||
}
|
||||
|
||||
void MediaChangeMonitor::DrainThenFlushDecoder(MediaRawData* aPendingSample) {
|
||||
AssertOnTaskQueue();
|
||||
AssertOnThread();
|
||||
MOZ_ASSERT(mDecoderInitialized);
|
||||
|
||||
RefPtr<MediaRawData> sample = aPendingSample;
|
||||
RefPtr<MediaChangeMonitor> self = this;
|
||||
mDecoder->Drain()
|
||||
->Then(
|
||||
mTaskQueue, __func__,
|
||||
GetCurrentSerialEventTarget(), __func__,
|
||||
[self, sample, this](MediaDataDecoder::DecodedData&& aResults) {
|
||||
mDrainRequest.Complete();
|
||||
if (!mFlushPromise.IsEmpty()) {
|
||||
|
@ -661,14 +635,13 @@ void MediaChangeMonitor::DrainThenFlushDecoder(MediaRawData* aPendingSample) {
|
|||
|
||||
void MediaChangeMonitor::FlushThenShutdownDecoder(
|
||||
MediaRawData* aPendingSample) {
|
||||
AssertOnTaskQueue();
|
||||
AssertOnThread();
|
||||
MOZ_ASSERT(mDecoderInitialized);
|
||||
|
||||
RefPtr<MediaRawData> sample = aPendingSample;
|
||||
RefPtr<MediaChangeMonitor> self = this;
|
||||
mDecoder->Flush()
|
||||
->Then(
|
||||
mTaskQueue, __func__,
|
||||
GetCurrentSerialEventTarget(), __func__,
|
||||
[self, sample, this]() {
|
||||
mFlushRequest.Complete();
|
||||
|
||||
|
@ -681,7 +654,7 @@ void MediaChangeMonitor::FlushThenShutdownDecoder(
|
|||
mShutdownPromise = ShutdownDecoder();
|
||||
mShutdownPromise
|
||||
->Then(
|
||||
mTaskQueue, __func__,
|
||||
GetCurrentSerialEventTarget(), __func__,
|
||||
[self, sample, this]() {
|
||||
mShutdownRequest.Complete();
|
||||
mShutdownPromise = nullptr;
|
||||
|
|
|
@ -76,9 +76,7 @@ class MediaChangeMonitor : public MediaDataDecoder,
|
|||
private:
|
||||
UniquePtr<CodecChangeMonitor> mChangeMonitor;
|
||||
|
||||
void AssertOnTaskQueue() const {
|
||||
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
|
||||
}
|
||||
void AssertOnThread() const { MOZ_ASSERT(mThread->IsOnCurrentThread()); }
|
||||
|
||||
bool CanRecycleDecoder() const;
|
||||
|
||||
|
@ -98,7 +96,7 @@ class MediaChangeMonitor : public MediaDataDecoder,
|
|||
VideoInfo mCurrentConfig;
|
||||
RefPtr<layers::KnowsCompositor> mKnowsCompositor;
|
||||
RefPtr<layers::ImageContainer> mImageContainer;
|
||||
const RefPtr<TaskQueue> mTaskQueue;
|
||||
nsCOMPtr<nsISerialEventTarget> mThread;
|
||||
RefPtr<MediaDataDecoder> mDecoder;
|
||||
MozPromiseRequestHolder<InitPromise> mInitPromiseRequest;
|
||||
MozPromiseHolder<InitPromise> mInitPromise;
|
||||
|
|
|
@ -38,6 +38,7 @@ bool MediaDataDecoderProxy::CanDecodeBatch() const {
|
|||
|
||||
RefPtr<MediaDataDecoder::DecodePromise> MediaDataDecoderProxy::DecodeBatch(
|
||||
nsTArray<RefPtr<MediaRawData>>&& aSamples) {
|
||||
MOZ_ASSERT(!mIsShutdown);
|
||||
if (!mProxyThread) {
|
||||
return mProxyDecoder->DecodeBatch(std::move(aSamples));
|
||||
}
|
||||
|
@ -80,8 +81,16 @@ RefPtr<ShutdownPromise> MediaDataDecoderProxy::Shutdown() {
|
|||
if (!mProxyThread) {
|
||||
return mProxyDecoder->Shutdown();
|
||||
}
|
||||
// We chain another promise to ensure that the proxied decoder gets destructed
|
||||
// on the proxy thread.
|
||||
return InvokeAsync(mProxyThread, __func__, [self = RefPtr{this}] {
|
||||
return self->mProxyDecoder->Shutdown();
|
||||
RefPtr<ShutdownPromise> p = self->mProxyDecoder->Shutdown()->Then(
|
||||
self->mProxyThread, __func__,
|
||||
[self](const ShutdownPromise::ResolveOrRejectValue& aResult) {
|
||||
self->mProxyDecoder = nullptr;
|
||||
return ShutdownPromise::CreateAndResolveOrReject(aResult, __func__);
|
||||
});
|
||||
return p;
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -22,32 +22,12 @@ class MediaDataDecoderProxy
|
|||
public DecoderDoctorLifeLogger<MediaDataDecoderProxy> {
|
||||
public:
|
||||
explicit MediaDataDecoderProxy(
|
||||
already_AddRefed<nsISerialEventTarget> aProxyThread)
|
||||
: mProxyThread(aProxyThread)
|
||||
# if defined(DEBUG)
|
||||
,
|
||||
mIsShutdown(false)
|
||||
# endif
|
||||
{
|
||||
}
|
||||
|
||||
explicit MediaDataDecoderProxy(
|
||||
already_AddRefed<MediaDataDecoder> aProxyDecoder)
|
||||
: mProxyDecoder(aProxyDecoder)
|
||||
# if defined(DEBUG)
|
||||
,
|
||||
mIsShutdown(false)
|
||||
# endif
|
||||
{
|
||||
already_AddRefed<MediaDataDecoder> aProxyDecoder,
|
||||
already_AddRefed<nsISerialEventTarget> aProxyThread = nullptr)
|
||||
: mProxyDecoder(aProxyDecoder), mProxyThread(aProxyThread) {
|
||||
DDLINKCHILD("proxy decoder", mProxyDecoder.get());
|
||||
}
|
||||
|
||||
void SetProxyTarget(MediaDataDecoder* aProxyDecoder) {
|
||||
MOZ_ASSERT(aProxyDecoder);
|
||||
mProxyDecoder = aProxyDecoder;
|
||||
DDLINKCHILD("proxy decoder", aProxyDecoder);
|
||||
}
|
||||
|
||||
RefPtr<InitPromise> Init() override;
|
||||
RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
|
||||
bool CanDecodeBatch() const override;
|
||||
|
@ -63,11 +43,12 @@ class MediaDataDecoderProxy
|
|||
ConversionRequired NeedsConversion() const override;
|
||||
|
||||
private:
|
||||
// Set on construction and clear on the proxy thread if set.
|
||||
RefPtr<MediaDataDecoder> mProxyDecoder;
|
||||
nsCOMPtr<nsISerialEventTarget> mProxyThread;
|
||||
const nsCOMPtr<nsISerialEventTarget> mProxyThread;
|
||||
|
||||
# if defined(DEBUG)
|
||||
Atomic<bool> mIsShutdown;
|
||||
Atomic<bool> mIsShutdown = Atomic<bool>(false);
|
||||
# endif
|
||||
};
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include "BufferMediaResource.h"
|
||||
#include "DecoderTraits.h"
|
||||
#include "MediaContainerType.h"
|
||||
#include "MediaDataDecoderProxy.h"
|
||||
#include "MediaDataDemuxer.h"
|
||||
#include "MediaQueue.h"
|
||||
#include "PDMFactory.h"
|
||||
|
@ -293,10 +294,12 @@ MediaResult MediaDecodeTask::CreateDecoder(const AudioInfo& info) {
|
|||
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
|
||||
nsPrintfCString("error creating %s decoder",
|
||||
TrackTypeToStr(TrackInfo::kAudioTrack)));
|
||||
mDecoder = pdm->CreateDecoder(
|
||||
{info, mPDecoderTaskQueue, &result, TrackInfo::kAudioTrack});
|
||||
RefPtr<MediaDataDecoder> decoder =
|
||||
pdm->CreateDecoder({info, &result, TrackInfo::kAudioTrack});
|
||||
|
||||
if (mDecoder) {
|
||||
if (decoder) {
|
||||
mDecoder = new MediaDataDecoderProxy(decoder.forget(),
|
||||
do_AddRef(mPDecoderTaskQueue.get()));
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -3,8 +3,10 @@
|
|||
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "WebrtcMediaDataDecoderCodec.h"
|
||||
|
||||
#include "ImageContainer.h"
|
||||
#include "Layers.h"
|
||||
#include "MediaDataDecoderProxy.h"
|
||||
#include "PDMFactory.h"
|
||||
#include "VideoUtils.h"
|
||||
#include "mozilla/layers/ImageBridgeChild.h"
|
||||
|
@ -143,7 +145,7 @@ int32_t WebrtcMediaDataDecoder::Release() {
|
|||
}
|
||||
|
||||
bool WebrtcMediaDataDecoder::OnTaskQueue() const {
|
||||
return OwnerThread()->IsCurrentThreadIn();
|
||||
return mTaskQueue->IsOnCurrentThread();
|
||||
}
|
||||
|
||||
int32_t WebrtcMediaDataDecoder::CreateDecoder() {
|
||||
|
@ -154,18 +156,23 @@ int32_t WebrtcMediaDataDecoder::CreateDecoder() {
|
|||
Release();
|
||||
}
|
||||
|
||||
mDecoder = mFactory->CreateDecoder(
|
||||
{mInfo, mTaskQueue,
|
||||
RefPtr<MediaDataDecoder> decoder = mFactory->CreateDecoder(
|
||||
{mInfo,
|
||||
CreateDecoderParams::OptionSet(
|
||||
CreateDecoderParams::Option::LowLatency,
|
||||
CreateDecoderParams::Option::FullH264Parsing,
|
||||
CreateDecoderParams::Option::ErrorIfNoInitializationData),
|
||||
mTrackType, mImageContainer, knowsCompositor});
|
||||
|
||||
if (!mDecoder) {
|
||||
if (!decoder) {
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
|
||||
// We need to wrap our decoder in a MediaDataDecoderProxy so that it always
|
||||
// run on an nsISerialEventTarget (which the webrtc code doesn't do)
|
||||
mDecoder =
|
||||
new MediaDataDecoderProxy(decoder.forget(), do_AddRef(mTaskQueue.get()));
|
||||
|
||||
media::Await(
|
||||
do_AddRef(mThreadPool), mDecoder->Init(),
|
||||
[&](TrackInfo::TrackType) { mError = NS_OK; },
|
||||
|
|
|
@ -50,7 +50,6 @@ class WebrtcMediaDataDecoder : public WebrtcVideoDecoder {
|
|||
private:
|
||||
~WebrtcMediaDataDecoder();
|
||||
void QueueFrame(MediaRawData* aFrame);
|
||||
AbstractThread* OwnerThread() const { return mTaskQueue; }
|
||||
bool OnTaskQueue() const;
|
||||
int32_t CreateDecoder();
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче