diff --git a/dom/media/fmp4/BlankDecoderModule.cpp b/dom/media/fmp4/BlankDecoderModule.cpp index ce57670b0065..241f2d3757f1 100644 --- a/dom/media/fmp4/BlankDecoderModule.cpp +++ b/dom/media/fmp4/BlankDecoderModule.cpp @@ -213,11 +213,11 @@ public: // Decode thread. virtual already_AddRefed - CreateH264Decoder(const mp4_demuxer::VideoDecoderConfig& aConfig, - layers::LayersBackend aLayersBackend, - layers::ImageContainer* aImageContainer, - MediaTaskQueue* aVideoTaskQueue, - MediaDataDecoderCallback* aCallback) MOZ_OVERRIDE { + CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig, + layers::LayersBackend aLayersBackend, + layers::ImageContainer* aImageContainer, + MediaTaskQueue* aVideoTaskQueue, + MediaDataDecoderCallback* aCallback) MOZ_OVERRIDE { BlankVideoDataCreator* creator = new BlankVideoDataCreator( aConfig.display_width, aConfig.display_height, aImageContainer); nsRefPtr decoder = diff --git a/dom/media/fmp4/MP4Reader.cpp b/dom/media/fmp4/MP4Reader.cpp index c8344679a484..45ad0f24346e 100644 --- a/dom/media/fmp4/MP4Reader.cpp +++ b/dom/media/fmp4/MP4Reader.cpp @@ -311,6 +311,14 @@ MP4Reader::IsSupportedAudioMimeType(const char* aMimeType) mPlatform->SupportsAudioMimeType(aMimeType); } +bool +MP4Reader::IsSupportedVideoMimeType(const char* aMimeType) +{ + return (!strcmp(aMimeType, "video/mp4") || + !strcmp(aMimeType, "video/avc")) && + mPlatform->SupportsVideoMimeType(aMimeType); +} + nsresult MP4Reader::ReadMetadata(MediaInfo* aInfo, MetadataTags** aTags) @@ -413,22 +421,25 @@ MP4Reader::ReadMetadata(MediaInfo* aInfo, if (HasVideo()) { const VideoDecoderConfig& video = mDemuxer->VideoConfig(); + if (mInfo.mVideo.mHasVideo && !IsSupportedVideoMimeType(video.mime_type)) { + return NS_ERROR_FAILURE; + } mInfo.mVideo.mDisplay = nsIntSize(video.display_width, video.display_height); mVideo.mCallback = new DecoderCallback(this, kVideo); if (mSharedDecoderManager) { mVideo.mDecoder = - mSharedDecoderManager->CreateH264Decoder(video, - mLayersBackendType, - mDecoder->GetImageContainer(), - mVideo.mTaskQueue, - mVideo.mCallback); + mSharedDecoderManager->CreateVideoDecoder(video, + mLayersBackendType, + mDecoder->GetImageContainer(), + mVideo.mTaskQueue, + mVideo.mCallback); } else { - mVideo.mDecoder = mPlatform->CreateH264Decoder(video, - mLayersBackendType, - mDecoder->GetImageContainer(), - mVideo.mTaskQueue, - mVideo.mCallback); + mVideo.mDecoder = mPlatform->CreateVideoDecoder(video, + mLayersBackendType, + mDecoder->GetImageContainer(), + mVideo.mTaskQueue, + mVideo.mCallback); } NS_ENSURE_TRUE(mVideo.mDecoder != nullptr, NS_ERROR_FAILURE); nsresult rv = mVideo.mDecoder->Init(); diff --git a/dom/media/fmp4/MP4Reader.h b/dom/media/fmp4/MP4Reader.h index af7cf2112b0f..ba4a1625bcaa 100644 --- a/dom/media/fmp4/MP4Reader.h +++ b/dom/media/fmp4/MP4Reader.h @@ -109,6 +109,7 @@ private: void DrainComplete(mp4_demuxer::TrackType aTrack); void UpdateIndex(); bool IsSupportedAudioMimeType(const char* aMimeType); + bool IsSupportedVideoMimeType(const char* aMimeType); void NotifyResourcesStatusChanged(); bool IsWaitingOnCodecResource(); virtual bool IsWaitingOnCDMResource() MOZ_OVERRIDE; diff --git a/dom/media/fmp4/PlatformDecoderModule.cpp b/dom/media/fmp4/PlatformDecoderModule.cpp index 72bf757ca7ba..b4c5ab0c0783 100644 --- a/dom/media/fmp4/PlatformDecoderModule.cpp +++ b/dom/media/fmp4/PlatformDecoderModule.cpp @@ -182,4 +182,10 @@ PlatformDecoderModule::SupportsAudioMimeType(const char* aMimeType) return !strcmp(aMimeType, "audio/mp4a-latm"); } +bool +PlatformDecoderModule::SupportsVideoMimeType(const char* aMimeType) +{ + return !strcmp(aMimeType, "video/mp4") || !strcmp(aMimeType, "video/avc"); +} + } // namespace mozilla diff --git a/dom/media/fmp4/PlatformDecoderModule.h b/dom/media/fmp4/PlatformDecoderModule.h index 1ee322855174..ff6094f336b2 100644 --- a/dom/media/fmp4/PlatformDecoderModule.h +++ b/dom/media/fmp4/PlatformDecoderModule.h @@ -97,7 +97,7 @@ public: // It is safe to store a reference to aConfig. // This is called on the decode task queue. virtual already_AddRefed - CreateH264Decoder(const mp4_demuxer::VideoDecoderConfig& aConfig, + CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig, layers::LayersBackend aLayersBackend, layers::ImageContainer* aImageContainer, MediaTaskQueue* aVideoTaskQueue, @@ -122,6 +122,7 @@ public: // If more audio codec is to be supported, SupportsAudioMimeType will have // to be extended virtual bool SupportsAudioMimeType(const char* aMimeType); + virtual bool SupportsVideoMimeType(const char* aMimeType); virtual ~PlatformDecoderModule() {} diff --git a/dom/media/fmp4/SharedDecoderManager.cpp b/dom/media/fmp4/SharedDecoderManager.cpp index b8a98ba7da54..344f3dfefa03 100644 --- a/dom/media/fmp4/SharedDecoderManager.cpp +++ b/dom/media/fmp4/SharedDecoderManager.cpp @@ -66,14 +66,14 @@ SharedDecoderManager::SharedDecoderManager() SharedDecoderManager::~SharedDecoderManager() {} already_AddRefed -SharedDecoderManager::CreateH264Decoder( +SharedDecoderManager::CreateVideoDecoder( const mp4_demuxer::VideoDecoderConfig& aConfig, layers::LayersBackend aLayersBackend, layers::ImageContainer* aImageContainer, MediaTaskQueue* aVideoTaskQueue, MediaDataDecoderCallback* aCallback) { if (!mDecoder) { nsAutoPtr platform(PlatformDecoderModule::Create()); - mDecoder = platform->CreateH264Decoder( + mDecoder = platform->CreateVideoDecoder( aConfig, aLayersBackend, aImageContainer, aVideoTaskQueue, mCallback); if (!mDecoder) { return nullptr; diff --git a/dom/media/fmp4/SharedDecoderManager.h b/dom/media/fmp4/SharedDecoderManager.h index 13e8ba230a72..0835cc41fbeb 100644 --- a/dom/media/fmp4/SharedDecoderManager.h +++ b/dom/media/fmp4/SharedDecoderManager.h @@ -24,7 +24,7 @@ public: SharedDecoderManager(); - already_AddRefed CreateH264Decoder( + already_AddRefed CreateVideoDecoder( const mp4_demuxer::VideoDecoderConfig& aConfig, layers::LayersBackend aLayersBackend, layers::ImageContainer* aImageContainer, MediaTaskQueue* aVideoTaskQueue, diff --git a/dom/media/fmp4/android/AndroidDecoderModule.cpp b/dom/media/fmp4/android/AndroidDecoderModule.cpp index 7fa011d90db4..af3c01457e8f 100644 --- a/dom/media/fmp4/android/AndroidDecoderModule.cpp +++ b/dom/media/fmp4/android/AndroidDecoderModule.cpp @@ -135,7 +135,7 @@ bool AndroidDecoderModule::SupportsAudioMimeType(const char* aMimeType) { } already_AddRefed -AndroidDecoderModule::CreateH264Decoder( +AndroidDecoderModule::CreateVideoDecoder( const mp4_demuxer::VideoDecoderConfig& aConfig, layers::LayersBackend aLayersBackend, layers::ImageContainer* aImageContainer, diff --git a/dom/media/fmp4/android/AndroidDecoderModule.h b/dom/media/fmp4/android/AndroidDecoderModule.h index a3a1288cf867..343630fe143c 100644 --- a/dom/media/fmp4/android/AndroidDecoderModule.h +++ b/dom/media/fmp4/android/AndroidDecoderModule.h @@ -34,11 +34,11 @@ public: virtual nsresult Shutdown() MOZ_OVERRIDE; virtual already_AddRefed - CreateH264Decoder(const mp4_demuxer::VideoDecoderConfig& aConfig, - layers::LayersBackend aLayersBackend, - layers::ImageContainer* aImageContainer, - MediaTaskQueue* aVideoTaskQueue, - MediaDataDecoderCallback* aCallback) MOZ_OVERRIDE; + CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig, + layers::LayersBackend aLayersBackend, + layers::ImageContainer* aImageContainer, + MediaTaskQueue* aVideoTaskQueue, + MediaDataDecoderCallback* aCallback) MOZ_OVERRIDE; virtual already_AddRefed CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig, diff --git a/dom/media/fmp4/apple/AppleDecoderModule.cpp b/dom/media/fmp4/apple/AppleDecoderModule.cpp index 94733400aff3..54f3b1225f90 100644 --- a/dom/media/fmp4/apple/AppleDecoderModule.cpp +++ b/dom/media/fmp4/apple/AppleDecoderModule.cpp @@ -144,11 +144,11 @@ AppleDecoderModule::Shutdown() } already_AddRefed -AppleDecoderModule::CreateH264Decoder(const mp4_demuxer::VideoDecoderConfig& aConfig, - layers::LayersBackend aLayersBackend, - layers::ImageContainer* aImageContainer, - MediaTaskQueue* aVideoTaskQueue, - MediaDataDecoderCallback* aCallback) +AppleDecoderModule::CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig, + layers::LayersBackend aLayersBackend, + layers::ImageContainer* aImageContainer, + MediaTaskQueue* aVideoTaskQueue, + MediaDataDecoderCallback* aCallback) { nsRefPtr decoder; diff --git a/dom/media/fmp4/apple/AppleDecoderModule.h b/dom/media/fmp4/apple/AppleDecoderModule.h index a42536316861..bb90d2454784 100644 --- a/dom/media/fmp4/apple/AppleDecoderModule.h +++ b/dom/media/fmp4/apple/AppleDecoderModule.h @@ -26,11 +26,11 @@ public: // Decode thread. virtual already_AddRefed - CreateH264Decoder(const mp4_demuxer::VideoDecoderConfig& aConfig, - layers::LayersBackend aLayersBackend, - layers::ImageContainer* aImageContainer, - MediaTaskQueue* aVideoTaskQueue, - MediaDataDecoderCallback* aCallback) MOZ_OVERRIDE; + CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig, + layers::LayersBackend aLayersBackend, + layers::ImageContainer* aImageContainer, + MediaTaskQueue* aVideoTaskQueue, + MediaDataDecoderCallback* aCallback) MOZ_OVERRIDE; // Decode thread. virtual already_AddRefed diff --git a/dom/media/fmp4/eme/EMEDecoderModule.cpp b/dom/media/fmp4/eme/EMEDecoderModule.cpp index f60f70fb05bb..64d30bfbbe94 100644 --- a/dom/media/fmp4/eme/EMEDecoderModule.cpp +++ b/dom/media/fmp4/eme/EMEDecoderModule.cpp @@ -194,11 +194,11 @@ EMEDecoderModule::Shutdown() } already_AddRefed -EMEDecoderModule::CreateH264Decoder(const VideoDecoderConfig& aConfig, - layers::LayersBackend aLayersBackend, - layers::ImageContainer* aImageContainer, - MediaTaskQueue* aVideoTaskQueue, - MediaDataDecoderCallback* aCallback) +EMEDecoderModule::CreateVideoDecoder(const VideoDecoderConfig& aConfig, + layers::LayersBackend aLayersBackend, + layers::ImageContainer* aImageContainer, + MediaTaskQueue* aVideoTaskQueue, + MediaDataDecoderCallback* aCallback) { if (mCDMDecodesVideo && aConfig.crypto.valid) { nsRefPtr decoder(new EMEH264Decoder(mProxy, @@ -210,11 +210,11 @@ EMEDecoderModule::CreateH264Decoder(const VideoDecoderConfig& aConfig, return decoder.forget(); } - nsRefPtr decoder(mPDM->CreateH264Decoder(aConfig, - aLayersBackend, - aImageContainer, - aVideoTaskQueue, - aCallback)); + nsRefPtr decoder(mPDM->CreateVideoDecoder(aConfig, + aLayersBackend, + aImageContainer, + aVideoTaskQueue, + aCallback)); if (!decoder) { return nullptr; } diff --git a/dom/media/fmp4/eme/EMEDecoderModule.h b/dom/media/fmp4/eme/EMEDecoderModule.h index 89ec4597ec9e..8bd9953800a5 100644 --- a/dom/media/fmp4/eme/EMEDecoderModule.h +++ b/dom/media/fmp4/eme/EMEDecoderModule.h @@ -34,7 +34,7 @@ public: // Decode thread. virtual already_AddRefed - CreateH264Decoder(const mp4_demuxer::VideoDecoderConfig& aConfig, + CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig, layers::LayersBackend aLayersBackend, layers::ImageContainer* aImageContainer, MediaTaskQueue* aVideoTaskQueue, diff --git a/dom/media/fmp4/ffmpeg/FFmpegDecoderModule.h b/dom/media/fmp4/ffmpeg/FFmpegDecoderModule.h index 6f54f18fb27b..fe38198ab35e 100644 --- a/dom/media/fmp4/ffmpeg/FFmpegDecoderModule.h +++ b/dom/media/fmp4/ffmpeg/FFmpegDecoderModule.h @@ -26,11 +26,11 @@ public: virtual nsresult Shutdown() MOZ_OVERRIDE { return NS_OK; } virtual already_AddRefed - CreateH264Decoder(const mp4_demuxer::VideoDecoderConfig& aConfig, - layers::LayersBackend aLayersBackend, - layers::ImageContainer* aImageContainer, - MediaTaskQueue* aVideoTaskQueue, - MediaDataDecoderCallback* aCallback) MOZ_OVERRIDE + CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig, + layers::LayersBackend aLayersBackend, + layers::ImageContainer* aImageContainer, + MediaTaskQueue* aVideoTaskQueue, + MediaDataDecoderCallback* aCallback) MOZ_OVERRIDE { nsRefPtr decoder = new FFmpegH264Decoder(aVideoTaskQueue, aCallback, aConfig, diff --git a/dom/media/fmp4/gonk/GonkDecoderModule.cpp b/dom/media/fmp4/gonk/GonkDecoderModule.cpp index d1f739da6e1d..4251bd808f45 100644 --- a/dom/media/fmp4/gonk/GonkDecoderModule.cpp +++ b/dom/media/fmp4/gonk/GonkDecoderModule.cpp @@ -35,11 +35,11 @@ GonkDecoderModule::Shutdown() } already_AddRefed -GonkDecoderModule::CreateH264Decoder(const mp4_demuxer::VideoDecoderConfig& aConfig, - mozilla::layers::LayersBackend aLayersBackend, - mozilla::layers::ImageContainer* aImageContainer, - MediaTaskQueue* aVideoTaskQueue, - MediaDataDecoderCallback* aCallback) +GonkDecoderModule::CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig, + mozilla::layers::LayersBackend aLayersBackend, + mozilla::layers::ImageContainer* aImageContainer, + MediaTaskQueue* aVideoTaskQueue, + MediaDataDecoderCallback* aCallback) { nsRefPtr decoder = new GonkMediaDataDecoder(new GonkVideoDecoderManager(aImageContainer,aConfig), diff --git a/dom/media/fmp4/gonk/GonkDecoderModule.h b/dom/media/fmp4/gonk/GonkDecoderModule.h index edda66f4d086..fd228bb97632 100644 --- a/dom/media/fmp4/gonk/GonkDecoderModule.h +++ b/dom/media/fmp4/gonk/GonkDecoderModule.h @@ -21,11 +21,11 @@ public: // Decode thread. virtual already_AddRefed - CreateH264Decoder(const mp4_demuxer::VideoDecoderConfig& aConfig, - mozilla::layers::LayersBackend aLayersBackend, - mozilla::layers::ImageContainer* aImageContainer, - MediaTaskQueue* aVideoTaskQueue, - MediaDataDecoderCallback* aCallback) MOZ_OVERRIDE; + CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig, + mozilla::layers::LayersBackend aLayersBackend, + mozilla::layers::ImageContainer* aImageContainer, + MediaTaskQueue* aVideoTaskQueue, + MediaDataDecoderCallback* aCallback) MOZ_OVERRIDE; // Decode thread. virtual already_AddRefed diff --git a/dom/media/fmp4/wmf/WMFDecoderModule.cpp b/dom/media/fmp4/wmf/WMFDecoderModule.cpp index 73c8d2118387..2bdf8156d96e 100644 --- a/dom/media/fmp4/wmf/WMFDecoderModule.cpp +++ b/dom/media/fmp4/wmf/WMFDecoderModule.cpp @@ -59,16 +59,15 @@ WMFDecoderModule::Shutdown() { DebugOnly hr = wmf::MFShutdown(); NS_ASSERTION(SUCCEEDED(hr), "MFShutdown failed"); - return NS_OK; } already_AddRefed -WMFDecoderModule::CreateH264Decoder(const mp4_demuxer::VideoDecoderConfig& aConfig, - layers::LayersBackend aLayersBackend, - layers::ImageContainer* aImageContainer, - MediaTaskQueue* aVideoTaskQueue, - MediaDataDecoderCallback* aCallback) +WMFDecoderModule::CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig, + layers::LayersBackend aLayersBackend, + layers::ImageContainer* aImageContainer, + MediaTaskQueue* aVideoTaskQueue, + MediaDataDecoderCallback* aCallback) { nsRefPtr decoder = new WMFMediaDataDecoder(new WMFVideoMFTManager(aConfig, @@ -92,6 +91,15 @@ WMFDecoderModule::CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aCon return decoder.forget(); } +bool +WMFDecoderModule::SupportsVideoMimeType(const char* aMimeType) +{ + return !strcmp(aMimeType, "video/mp4") || + !strcmp(aMimeType, "video/avc") || + !strcmp(aMimeType, "video/webm; codecs=vp8") || + !strcmp(aMimeType, "video/webm; codecs=vp9"); +} + bool WMFDecoderModule::SupportsAudioMimeType(const char* aMimeType) { diff --git a/dom/media/fmp4/wmf/WMFDecoderModule.h b/dom/media/fmp4/wmf/WMFDecoderModule.h index 4688fea4f290..71005da852bc 100644 --- a/dom/media/fmp4/wmf/WMFDecoderModule.h +++ b/dom/media/fmp4/wmf/WMFDecoderModule.h @@ -23,17 +23,18 @@ public: virtual nsresult Shutdown() MOZ_OVERRIDE; virtual already_AddRefed - CreateH264Decoder(const mp4_demuxer::VideoDecoderConfig& aConfig, - layers::LayersBackend aLayersBackend, - layers::ImageContainer* aImageContainer, - MediaTaskQueue* aVideoTaskQueue, - MediaDataDecoderCallback* aCallback) MOZ_OVERRIDE; + CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig, + layers::LayersBackend aLayersBackend, + layers::ImageContainer* aImageContainer, + MediaTaskQueue* aVideoTaskQueue, + MediaDataDecoderCallback* aCallback) MOZ_OVERRIDE; virtual already_AddRefed CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig, MediaTaskQueue* aAudioTaskQueue, MediaDataDecoderCallback* aCallback) MOZ_OVERRIDE; + bool SupportsVideoMimeType(const char* aMimeType) MOZ_OVERRIDE; bool SupportsAudioMimeType(const char* aMimeType) MOZ_OVERRIDE; // Called on main thread. diff --git a/dom/media/fmp4/wmf/WMFVideoMFTManager.cpp b/dom/media/fmp4/wmf/WMFVideoMFTManager.cpp index 134a851f4f18..5b83fb22c9d1 100644 --- a/dom/media/fmp4/wmf/WMFVideoMFTManager.cpp +++ b/dom/media/fmp4/wmf/WMFVideoMFTManager.cpp @@ -30,6 +30,38 @@ using mozilla::layers::Image; using mozilla::layers::LayerManager; using mozilla::layers::LayersBackend; +const GUID MFVideoFormat_VP80 = +{ + 0x30385056, + 0x0000, + 0x0010, + {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +}; + +const GUID MFVideoFormat_VP90 = +{ + 0x30395056, + 0x0000, + 0x0010, + {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +}; + +const CLSID CLSID_WebmMfVp8Dec = +{ + 0x451e3cb7, + 0x2622, + 0x4ba5, + {0x8e, 0x1d, 0x44, 0xb3, 0xc4, 0x1d, 0x09, 0x24} +}; + +const CLSID CLSID_WebmMfVp9Dec = +{ + 0x7ab4bd2, + 0x1979, + 0x4fcd, + {0xa6, 0x97, 0xdf, 0x9a, 0xd1, 0x5b, 0x34, 0xfe} +}; + namespace mozilla { WMFVideoMFTManager::WMFVideoMFTManager( @@ -48,6 +80,18 @@ WMFVideoMFTManager::WMFVideoMFTManager( NS_ASSERTION(!NS_IsMainThread(), "Should not be on main thread."); MOZ_ASSERT(mImageContainer); MOZ_COUNT_CTOR(WMFVideoMFTManager); + + // Need additional checks/params to check vp8/vp9 + if (!strcmp(aConfig.mime_type, "video/mp4") || + !strcmp(aConfig.mime_type, "video/avc")) { + mStreamType = H264; + } else if (!strcmp(aConfig.mime_type, "video/webm; codecs=vp8")) { + mStreamType = VP8; + } else if (!strcmp(aConfig.mime_type, "video/webm; codecs=vp9")) { + mStreamType = VP9; + } else { + mStreamType = Unknown; + } } WMFVideoMFTManager::~WMFVideoMFTManager() @@ -57,6 +101,30 @@ WMFVideoMFTManager::~WMFVideoMFTManager() DeleteOnMainThread(mDXVA2Manager); } +const GUID& +WMFVideoMFTManager::GetMFTGUID() +{ + MOZ_ASSERT(mStreamType != Unknown); + switch (mStreamType) { + case H264: return CLSID_CMSH264DecoderMFT; + case VP8: return CLSID_WebmMfVp8Dec; + case VP9: return CLSID_WebmMfVp9Dec; + default: return GUID_NULL; + }; +} + +const GUID& +WMFVideoMFTManager::GetMediaSubtypeGUID() +{ + MOZ_ASSERT(mStreamType != Unknown); + switch (mStreamType) { + case H264: return MFVideoFormat_H264; + case VP8: return MFVideoFormat_VP80; + case VP9: return MFVideoFormat_VP90; + default: return GUID_NULL; + }; +} + class CreateDXVAManagerEvent : public nsRunnable { public: NS_IMETHOD Run() { @@ -95,7 +163,7 @@ WMFVideoMFTManager::Init() RefPtr decoder(new MFTDecoder()); - HRESULT hr = decoder->Create(CLSID_CMSH264DecoderMFT); + HRESULT hr = decoder->Create(GetMFTGUID()); NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr); if (useDxva) { @@ -126,7 +194,7 @@ WMFVideoMFTManager::Init() hr = type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video); NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr); - hr = type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264); + hr = type->SetGUID(MF_MT_SUBTYPE, GetMediaSubtypeGUID()); NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr); hr = type->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_MixedInterlaceOrProgressive); @@ -145,8 +213,10 @@ WMFVideoMFTManager::Init() HRESULT WMFVideoMFTManager::Input(mp4_demuxer::MP4Sample* aSample) { - // We must prepare samples in AVC Annex B. - mp4_demuxer::AnnexB::ConvertSample(aSample); + if (mStreamType != VP8 && mStreamType != VP9) { + // We must prepare samples in AVC Annex B. + mp4_demuxer::AnnexB::ConvertSample(aSample); + } // Forward sample data to the decoder. const uint8_t* data = reinterpret_cast(aSample->data); uint32_t length = aSample->size; diff --git a/dom/media/fmp4/wmf/WMFVideoMFTManager.h b/dom/media/fmp4/wmf/WMFVideoMFTManager.h index 1e1f2c89bc7b..3594666eae3f 100644 --- a/dom/media/fmp4/wmf/WMFVideoMFTManager.h +++ b/dom/media/fmp4/wmf/WMFVideoMFTManager.h @@ -65,6 +65,18 @@ private: const bool mDXVAEnabled; const layers::LayersBackend mLayersBackend; bool mUseHwAccel; + + enum StreamType { + Unknown, + H264, + VP8, + VP9 + }; + + StreamType mStreamType; + + const GUID& GetMFTGUID(); + const GUID& GetMediaSubtypeGUID(); }; } // namespace mozilla diff --git a/dom/media/webm/IntelWebMVideoDecoder.cpp b/dom/media/webm/IntelWebMVideoDecoder.cpp new file mode 100644 index 000000000000..d68c4645c7a7 --- /dev/null +++ b/dom/media/webm/IntelWebMVideoDecoder.cpp @@ -0,0 +1,464 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +#include "IntelWebMVideoDecoder.h" + +#include "gfx2DGlue.h" +#include "Layers.h" +#include "MediaResource.h" +#include "MediaTaskQueue.h" +#include "mozilla/dom/HTMLMediaElement.h" +#include "nsError.h" +#include "SharedThreadPool.h" +#include "WebMReader.h" +#include "VorbisUtils.h" +#include "nestegg/nestegg.h" + +#define VPX_DONT_DEFINE_STDINT_TYPES +#include "vpx/vp8dx.h" +#include "vpx/vpx_decoder.h" + +#undef LOG +#ifdef PR_LOGGING +PRLogModuleInfo* GetDemuxerLog(); +#define LOG(...) PR_LOG(GetDemuxerLog(), PR_LOG_DEBUG, (__VA_ARGS__)) +#else +#define LOG(...) +#endif + +using namespace mp4_demuxer; + +namespace mozilla { + +using layers::Image; +using layers::LayerManager; +using layers::LayersBackend; + +class VP8Sample : public MP4Sample +{ +public: + VP8Sample(int64_t aTimestamp, + int64_t aDuration, + int64_t aByteOffset, + uint8_t* aData, + size_t aSize, + bool aSyncPoint) + { + decode_timestamp = -1; + composition_timestamp = aTimestamp; + duration = aDuration; + byte_offset = aByteOffset; + is_sync_point = aSyncPoint; + + data = new uint8_t[aSize]; + size = aSize; + memmove(data, aData, size); + } + + ~VP8Sample() + { + delete data; + } +}; + +IntelWebMVideoDecoder::IntelWebMVideoDecoder(WebMReader* aReader) + : WebMVideoDecoder() + , mReader(aReader) + , mMonitor("IntelWebMVideoDecoder") + , mNumSamplesInput(0) + , mNumSamplesOutput(0) + , mDecodeAhead(2) + , mInputExhausted(false) + , mDrainComplete(false) + , mError(false) + , mEOS(false) + , mIsFlushing(false) +{ + MOZ_COUNT_CTOR(IntelWebMVideoDecoder); +} + +IntelWebMVideoDecoder::~IntelWebMVideoDecoder() +{ + MOZ_COUNT_DTOR(IntelWebMVideoDecoder); + Shutdown(); +} + +void +IntelWebMVideoDecoder::Shutdown() +{ + if (mMediaDataDecoder) { + Flush(); + mMediaDataDecoder->Shutdown(); + mMediaDataDecoder = nullptr; + } + + mTaskQueue = nullptr; + + mQueuedVideoSample = nullptr; + mReader = nullptr; +} + +/* static */ +WebMVideoDecoder* +IntelWebMVideoDecoder::Create(WebMReader* aReader) +{ + nsAutoPtr decoder(new IntelWebMVideoDecoder(aReader)); + + decoder->mTaskQueue = aReader->GetTaskQueue(); + NS_ENSURE_TRUE(decoder->mTaskQueue, nullptr); + + return decoder.forget(); +} + +bool +IntelWebMVideoDecoder::IsSupportedVideoMimeType(const char* aMimeType) +{ + return (!strcmp(aMimeType, "video/webm; codecs=vp8") || + !strcmp(aMimeType, "video/webm; codecs=vp9")) && + mPlatform->SupportsVideoMimeType(aMimeType); +} + +nsresult +IntelWebMVideoDecoder::Init(unsigned int aWidth, unsigned int aHeight) +{ + mPlatform = PlatformDecoderModule::Create(); + if (!mPlatform) { + return NS_ERROR_FAILURE; + } + + mDecoderConfig = new VideoDecoderConfig(); + mDecoderConfig->duration = 0; + mDecoderConfig->display_width = aWidth; + mDecoderConfig->display_height = aHeight; + + switch (mReader->GetVideoCodec()) { + case NESTEGG_CODEC_VP8: + mDecoderConfig->mime_type = "video/webm; codecs=vp8"; + break; + case NESTEGG_CODEC_VP9: + mDecoderConfig->mime_type = "video/webm; codecs=vp9"; + break; + default: + return NS_ERROR_FAILURE; + } + + const VideoDecoderConfig& video = *mDecoderConfig; + if (!IsSupportedVideoMimeType(video.mime_type)) { + return NS_ERROR_FAILURE; + } + mMediaDataDecoder = mPlatform->CreateVideoDecoder(video, + mReader->GetLayersBackendType(), + mReader->GetDecoder()->GetImageContainer(), + mTaskQueue, + this); + if (!mMediaDataDecoder) { + return NS_ERROR_FAILURE; + } + nsresult rv = mMediaDataDecoder->Init(); + NS_ENSURE_SUCCESS(rv, rv); + return NS_OK; +} + +bool +IntelWebMVideoDecoder::Demux(nsAutoPtr& aSample, bool* aEOS) +{ + nsAutoRef holder(mReader->NextPacket(WebMReader::VIDEO)); + if (!holder) { + return false; + } + + nestegg_packet* packet = holder->mPacket; + unsigned int track = 0; + int r = nestegg_packet_track(packet, &track); + if (r == -1) { + return false; + } + + unsigned int count = 0; + r = nestegg_packet_count(packet, &count); + if (r == -1) { + return false; + } + + uint64_t tstamp = 0; + r = nestegg_packet_tstamp(packet, &tstamp); + if (r == -1) { + return false; + } + + // The end time of this frame is the start time of the next frame. Fetch + // the timestamp of the next packet for this track. If we've reached the + // end of the resource, use the file's duration as the end time of this + // video frame. + uint64_t next_tstamp = 0; + nsAutoRef next_holder(mReader->NextPacket(WebMReader::VIDEO)); + if (next_holder) { + r = nestegg_packet_tstamp(next_holder->mPacket, &next_tstamp); + if (r == -1) { + return false; + } + mReader->PushVideoPacket(next_holder.disown()); + } else { + next_tstamp = tstamp; + next_tstamp += tstamp - mReader->GetLastVideoFrameTime(); + } + mReader->SetLastVideoFrameTime(tstamp); + + int64_t tstamp_usecs = tstamp / NS_PER_USEC; + for (uint32_t i = 0; i < count; ++i) { + unsigned char* data; + size_t length; + r = nestegg_packet_data(packet, i, &data, &length); + if (r == -1) { + return false; + } + + vpx_codec_stream_info_t si; + memset(&si, 0, sizeof(si)); + si.sz = sizeof(si); + if (mReader->GetVideoCodec() == NESTEGG_CODEC_VP8) { + vpx_codec_peek_stream_info(vpx_codec_vp8_dx(), data, length, &si); + } else if (mReader->GetVideoCodec() == NESTEGG_CODEC_VP9) { + vpx_codec_peek_stream_info(vpx_codec_vp9_dx(), data, length, &si); + } + + MOZ_ASSERT(mPlatform && mMediaDataDecoder); + + aSample = new VP8Sample(tstamp_usecs, + (next_tstamp/NS_PER_USEC) - tstamp_usecs, + 0, + data, + length, + si.is_kf); + } + + return true; +} + +bool +IntelWebMVideoDecoder::Decode() +{ + MOZ_ASSERT(mMediaDataDecoder); + + mMonitor.Lock(); + uint64_t prevNumFramesOutput = mNumSamplesOutput; + while (prevNumFramesOutput == mNumSamplesOutput) { + mMonitor.AssertCurrentThreadOwns(); + if (mError) { + // Decode error! + mMonitor.Unlock(); + return false; + } + while (prevNumFramesOutput == mNumSamplesOutput && + (mInputExhausted || + (mNumSamplesInput - mNumSamplesOutput) < mDecodeAhead) && + !mEOS) { + mMonitor.AssertCurrentThreadOwns(); + mMonitor.Unlock(); + nsAutoPtr compressed(PopSample()); + if (!compressed) { + // EOS, or error. Let the state machine know there are no more + // frames coming. + LOG("Draining Video"); + mMonitor.Lock(); + MOZ_ASSERT(!mEOS); + mEOS = true; + MOZ_ASSERT(!mDrainComplete); + mDrainComplete = false; + mMonitor.Unlock(); + mMediaDataDecoder->Drain(); + } else { +#ifdef LOG_SAMPLE_DECODE + LOG("PopSample %s time=%lld dur=%lld", TrackTypeToStr(aTrack), + compressed->composition_timestamp, compressed->duration); +#endif + mMonitor.Lock(); + mDrainComplete = false; + mInputExhausted = false; + mNumSamplesInput++; + mMonitor.Unlock(); + if (NS_FAILED(mMediaDataDecoder->Input(compressed))) { + return false; + } + // If Input() failed, we let the auto pointer delete |compressed|. + // Otherwise, we assume the decoder will delete it when it's finished + // with it. + compressed.forget(); + } + mMonitor.Lock(); + } + mMonitor.AssertCurrentThreadOwns(); + while (!mError && + prevNumFramesOutput == mNumSamplesOutput && + (!mInputExhausted || mEOS) && + !mDrainComplete) { + mMonitor.Wait(); + } + if (mError || + (mEOS && mDrainComplete)) { + break; + } + + } + mMonitor.AssertCurrentThreadOwns(); + bool rv = !(mEOS || mError); + mMonitor.Unlock(); + return rv; +} + +bool +IntelWebMVideoDecoder::SkipVideoDemuxToNextKeyFrame(int64_t aTimeThreshold, uint32_t& aParsed) +{ + MOZ_ASSERT(mReader->GetDecoder()); + + Flush(); + + // Loop until we reach the next keyframe after the threshold. + while (true) { + nsAutoPtr compressed(PopSample()); + if (!compressed) { + // EOS, or error. Let the state machine know. + return false; + } + aParsed++; + if (!compressed->is_sync_point || + compressed->composition_timestamp < aTimeThreshold) { + continue; + } + mQueuedVideoSample = compressed; + break; + } + + return true; +} + +bool +IntelWebMVideoDecoder::DecodeVideoFrame(bool& aKeyframeSkip, + int64_t aTimeThreshold) +{ + uint32_t parsed = 0, decoded = 0; + AbstractMediaDecoder::AutoNotifyDecoded autoNotify(mReader->GetDecoder(), parsed, decoded); + + MOZ_ASSERT(mPlatform && mReader->GetDecoder()); + + if (aKeyframeSkip) { + bool ok = SkipVideoDemuxToNextKeyFrame(aTimeThreshold, parsed); + if (!ok) { + NS_WARNING("Failed to skip demux up to next keyframe"); + return false; + } + aKeyframeSkip = false; + nsresult rv = mMediaDataDecoder->Flush(); + NS_ENSURE_SUCCESS(rv, false); + } + + NS_ASSERTION(mReader->GetDecoder()->OnDecodeThread(), "Should be on decode thread."); + bool rv = Decode(); + { + // Report the number of "decoded" frames as the difference in the + // mNumSamplesOutput field since the last time we were called. + MonitorAutoLock mon(mMonitor); + uint64_t delta = mNumSamplesOutput - mLastReportedNumDecodedFrames; + decoded = static_cast(delta); + mLastReportedNumDecodedFrames = mNumSamplesOutput; + } + return rv; +} + +VP8Sample* +IntelWebMVideoDecoder::PopSample() +{ + VP8Sample* sample = nullptr; + if (mQueuedVideoSample) { + return mQueuedVideoSample.forget(); + } + while (mSampleQueue.empty()) { + nsAutoPtr sample; + bool eos = false; + bool ok = Demux(sample, &eos); + if (!ok || eos) { + MOZ_ASSERT(!sample); + return nullptr; + } + MOZ_ASSERT(sample); + mSampleQueue.push_back(sample.forget()); + } + + MOZ_ASSERT(!mSampleQueue.empty()); + sample = mSampleQueue.front(); + mSampleQueue.pop_front(); + return sample; +} + +void +IntelWebMVideoDecoder::Output(MediaData* aSample) +{ +#ifdef LOG_SAMPLE_DECODE + LOG("Decoded video sample time=%lld dur=%lld", + aSample->mTime, aSample->mDuration); +#endif + + // Don't accept output while we're flushing. + MonitorAutoLock mon(mMonitor); + if (mIsFlushing) { + mon.NotifyAll(); + return; + } + + MOZ_ASSERT(aSample->mType == MediaData::VIDEO_DATA); + mReader->VideoQueue().Push(static_cast(aSample)); + + mNumSamplesOutput++; + mon.NotifyAll(); +} + +void +IntelWebMVideoDecoder::DrainComplete() +{ + MonitorAutoLock mon(mMonitor); + mDrainComplete = true; + mon.NotifyAll(); +} + +void +IntelWebMVideoDecoder::InputExhausted() +{ + MonitorAutoLock mon(mMonitor); + mInputExhausted = true; + mon.NotifyAll(); +} + +void +IntelWebMVideoDecoder::Error() +{ + MonitorAutoLock mon(mMonitor); + mError = true; + mon.NotifyAll(); +} + +nsresult +IntelWebMVideoDecoder::Flush() +{ + if (!mReader->GetDecoder()) { + return NS_ERROR_FAILURE; + } + // Purge the current decoder's state. + // Set a flag so that we ignore all output while we call + // MediaDataDecoder::Flush(). + { + MonitorAutoLock mon(mMonitor); + mIsFlushing = true; + mDrainComplete = false; + mEOS = false; + } + mMediaDataDecoder->Flush(); + { + MonitorAutoLock mon(mMonitor); + mIsFlushing = false; + } + return NS_OK; +} + +} // namespace mozilla diff --git a/dom/media/webm/IntelWebMVideoDecoder.h b/dom/media/webm/IntelWebMVideoDecoder.h new file mode 100644 index 000000000000..fcf6d8eb5c0c --- /dev/null +++ b/dom/media/webm/IntelWebMVideoDecoder.h @@ -0,0 +1,91 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +#if !defined(IntelWebMVideoDecoder_h_) +#define IntelWebMVideoDecoder_h_ + +#include + +#include "WebMReader.h" +#include "nsAutoPtr.h" +#include "PlatformDecoderModule.h" +#include "mozilla/Monitor.h" + +#include "mp4_demuxer/mp4_demuxer.h" +#include "mp4_demuxer/DecoderData.h" + +class MediaTaskQueue; + +namespace mozilla { + +class VP8Sample; + +typedef std::deque VP8SampleQueue; + +class IntelWebMVideoDecoder : public WebMVideoDecoder, public MediaDataDecoderCallback +{ +public: + static WebMVideoDecoder* Create(WebMReader* aReader); + virtual nsresult Init(unsigned int aWidth, unsigned int aHeight) MOZ_OVERRIDE; + virtual nsresult Flush() MOZ_OVERRIDE; + virtual void Shutdown() MOZ_OVERRIDE; + + virtual bool DecodeVideoFrame(bool &aKeyframeSkip, + int64_t aTimeThreshold) MOZ_OVERRIDE; + + virtual void Output(MediaData* aSample) MOZ_OVERRIDE; + + virtual void DrainComplete() MOZ_OVERRIDE; + + virtual void InputExhausted() MOZ_OVERRIDE; + virtual void Error() MOZ_OVERRIDE; + + IntelWebMVideoDecoder(WebMReader* aReader); + ~IntelWebMVideoDecoder(); + +private: + void InitLayersBackendType(); + + bool Decode(); + + bool Demux(nsAutoPtr& aSample, bool* aEOS); + + bool SkipVideoDemuxToNextKeyFrame(int64_t aTimeThreshold, uint32_t& parsed); + + bool IsSupportedVideoMimeType(const char* aMimeType); + + VP8Sample* PopSample(); + + nsRefPtr mReader; + nsAutoPtr mPlatform; + nsRefPtr mMediaDataDecoder; + + // TaskQueue on which decoder can choose to decode. + // Only non-null up until the decoder is created. + nsRefPtr mTaskQueue; + + // Monitor that protects all non-threadsafe state; the primitives + // that follow. + Monitor mMonitor; + nsAutoPtr mDecoderConfig; + + VP8SampleQueue mSampleQueue; + nsAutoPtr mQueuedVideoSample; + uint64_t mNumSamplesInput; + uint64_t mNumSamplesOutput; + uint64_t mLastReportedNumDecodedFrames; + uint32_t mDecodeAhead; + + // Whether this stream exists in the media. + bool mInputExhausted; + bool mDrainComplete; + bool mError; + bool mEOS; + bool mIsFlushing; +}; + +} // namespace mozilla + +#endif diff --git a/dom/media/webm/SoftwareWebMVideoDecoder.cpp b/dom/media/webm/SoftwareWebMVideoDecoder.cpp new file mode 100644 index 000000000000..0d9e0e4054e1 --- /dev/null +++ b/dom/media/webm/SoftwareWebMVideoDecoder.cpp @@ -0,0 +1,236 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +#include "SoftwareWebMVideoDecoder.h" +#include "AbstractMediaDecoder.h" +#include "gfx2DGlue.h" +#include "MediaDecoderStateMachine.h" +#include "MediaResource.h" +#include "mozilla/dom/TimeRanges.h" +#include "nsError.h" +#include "OggReader.h" +#include "VorbisUtils.h" +#include "WebMBufferedParser.h" +#include "WebMReader.h" + +#include + +#define VPX_DONT_DEFINE_STDINT_TYPES +#include "vpx/vp8dx.h" +#include "vpx/vpx_decoder.h" + +static const unsigned NS_PER_USEC = 1000; +static const unsigned NS_PER_S = 1e9; + +namespace mozilla { + +using namespace gfx; +using namespace layers; + +SoftwareWebMVideoDecoder::SoftwareWebMVideoDecoder(WebMReader* aReader) + : WebMVideoDecoder(), + mReader(aReader) +{ + MOZ_COUNT_CTOR(SoftwareWebMVideoDecoder); + memset(&mVPX, 0, sizeof(vpx_codec_ctx_t)); +} + +SoftwareWebMVideoDecoder::~SoftwareWebMVideoDecoder() +{ + MOZ_COUNT_DTOR(SoftwareWebMVideoDecoder); +} + +void +SoftwareWebMVideoDecoder::Shutdown() +{ + vpx_codec_destroy(&mVPX); + mReader = nullptr; +} + +/* static */ +WebMVideoDecoder* +SoftwareWebMVideoDecoder::Create(WebMReader* aReader) +{ + return new SoftwareWebMVideoDecoder(aReader); +} + +nsresult +SoftwareWebMVideoDecoder::Init(unsigned int aWidth, unsigned int aHeight) +{ + vpx_codec_iface_t* dx = nullptr; + switch(mReader->GetVideoCodec()) { + case NESTEGG_CODEC_VP8: + dx = vpx_codec_vp8_dx(); + break; + case NESTEGG_CODEC_VP9: + dx = vpx_codec_vp9_dx(); + break; + } + if (!dx || vpx_codec_dec_init(&mVPX, dx, nullptr, 0)) { + return NS_ERROR_FAILURE; + } + return NS_OK; +} + +bool +SoftwareWebMVideoDecoder::DecodeVideoFrame(bool &aKeyframeSkip, + int64_t aTimeThreshold) +{ + NS_ASSERTION(mReader->GetDecoder()->OnDecodeThread(), + "Should be on decode thread."); + + // Record number of frames decoded and parsed. Automatically update the + // stats counters using the AutoNotifyDecoded stack-based class. + uint32_t parsed = 0, decoded = 0; + AbstractMediaDecoder::AutoNotifyDecoded autoNotify(mReader->GetDecoder(), + parsed, decoded); + + nsAutoRef holder(mReader->NextPacket(WebMReader::VIDEO)); + if (!holder) { + return false; + } + + nestegg_packet* packet = holder->mPacket; + unsigned int track = 0; + int r = nestegg_packet_track(packet, &track); + if (r == -1) { + return false; + } + + unsigned int count = 0; + r = nestegg_packet_count(packet, &count); + if (r == -1) { + return false; + } + + uint64_t tstamp = 0; + r = nestegg_packet_tstamp(packet, &tstamp); + if (r == -1) { + return false; + } + + // The end time of this frame is the start time of the next frame. Fetch + // the timestamp of the next packet for this track. If we've reached the + // end of the resource, use the file's duration as the end time of this + // video frame. + uint64_t next_tstamp = 0; + nsAutoRef next_holder(mReader->NextPacket(WebMReader::VIDEO)); + if (next_holder) { + r = nestegg_packet_tstamp(next_holder->mPacket, &next_tstamp); + if (r == -1) { + return false; + } + mReader->PushVideoPacket(next_holder.disown()); + } else { + next_tstamp = tstamp; + next_tstamp += tstamp - mReader->GetLastVideoFrameTime(); + } + mReader->SetLastVideoFrameTime(tstamp); + + int64_t tstamp_usecs = tstamp / NS_PER_USEC; + for (uint32_t i = 0; i < count; ++i) { + unsigned char* data; + size_t length; + r = nestegg_packet_data(packet, i, &data, &length); + if (r == -1) { + return false; + } + + vpx_codec_stream_info_t si; + memset(&si, 0, sizeof(si)); + si.sz = sizeof(si); + if (mReader->GetVideoCodec() == NESTEGG_CODEC_VP8) { + vpx_codec_peek_stream_info(vpx_codec_vp8_dx(), data, length, &si); + } else if (mReader->GetVideoCodec() == NESTEGG_CODEC_VP9) { + vpx_codec_peek_stream_info(vpx_codec_vp9_dx(), data, length, &si); + } + if (aKeyframeSkip && (!si.is_kf || tstamp_usecs < aTimeThreshold)) { + // Skipping to next keyframe... + parsed++; // Assume 1 frame per chunk. + continue; + } + + if (aKeyframeSkip && si.is_kf) { + aKeyframeSkip = false; + } + + if (vpx_codec_decode(&mVPX, data, length, nullptr, 0)) { + return false; + } + + // If the timestamp of the video frame is less than + // the time threshold required then it is not added + // to the video queue and won't be displayed. + if (tstamp_usecs < aTimeThreshold) { + parsed++; // Assume 1 frame per chunk. + continue; + } + + vpx_codec_iter_t iter = nullptr; + vpx_image_t *img; + + while ((img = vpx_codec_get_frame(&mVPX, &iter))) { + NS_ASSERTION(img->fmt == VPX_IMG_FMT_I420, "WebM image format not I420"); + + // Chroma shifts are rounded down as per the decoding examples in the SDK + VideoData::YCbCrBuffer b; + b.mPlanes[0].mData = img->planes[0]; + b.mPlanes[0].mStride = img->stride[0]; + b.mPlanes[0].mHeight = img->d_h; + b.mPlanes[0].mWidth = img->d_w; + b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0; + + b.mPlanes[1].mData = img->planes[1]; + b.mPlanes[1].mStride = img->stride[1]; + b.mPlanes[1].mHeight = (img->d_h + 1) >> img->y_chroma_shift; + b.mPlanes[1].mWidth = (img->d_w + 1) >> img->x_chroma_shift; + b.mPlanes[1].mOffset = b.mPlanes[1].mSkip = 0; + + b.mPlanes[2].mData = img->planes[2]; + b.mPlanes[2].mStride = img->stride[2]; + b.mPlanes[2].mHeight = (img->d_h + 1) >> img->y_chroma_shift; + b.mPlanes[2].mWidth = (img->d_w + 1) >> img->x_chroma_shift; + b.mPlanes[2].mOffset = b.mPlanes[2].mSkip = 0; + + nsIntRect pictureRect = mReader->GetPicture(); + IntRect picture = ToIntRect(pictureRect); + nsIntSize initFrame = mReader->GetInitialFrame(); + if (img->d_w != static_cast(initFrame.width) || + img->d_h != static_cast(initFrame.height)) { + // Frame size is different from what the container reports. This is + // legal in WebM, and we will preserve the ratio of the crop rectangle + // as it was reported relative to the picture size reported by the + // container. + picture.x = (pictureRect.x * img->d_w) / initFrame.width; + picture.y = (pictureRect.y * img->d_h) / initFrame.height; + picture.width = (img->d_w * pictureRect.width) / initFrame.width; + picture.height = (img->d_h * pictureRect.height) / initFrame.height; + } + + VideoInfo videoInfo = mReader->GetMediaInfo().mVideo; + nsRefPtr v = VideoData::Create(videoInfo, + mReader->GetDecoder()->GetImageContainer(), + holder->mOffset, + tstamp_usecs, + (next_tstamp / NS_PER_USEC) - tstamp_usecs, + b, + si.is_kf, + -1, + picture); + if (!v) { + return false; + } + parsed++; + decoded++; + NS_ASSERTION(decoded <= parsed, + "Expect only 1 frame per chunk per packet in WebM..."); + mReader->VideoQueue().Push(v); + } + } + + return true; +} + +} // namespace mozilla diff --git a/dom/media/webm/SoftwareWebMVideoDecoder.h b/dom/media/webm/SoftwareWebMVideoDecoder.h new file mode 100644 index 000000000000..36b345bd15ca --- /dev/null +++ b/dom/media/webm/SoftwareWebMVideoDecoder.h @@ -0,0 +1,39 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +#if !defined(SoftwareWebMVideoDecoder_h_) +#define SoftwareWebMVideoDecoder_h_ + +#include + +#include "WebMReader.h" + +namespace mozilla { + +class SoftwareWebMVideoDecoder : public WebMVideoDecoder +{ +public: + static WebMVideoDecoder* Create(WebMReader* aReader); + + virtual nsresult Init(unsigned int aWidth, unsigned int aHeight) MOZ_OVERRIDE; + + virtual bool DecodeVideoFrame(bool &aKeyframeSkip, + int64_t aTimeThreshold) MOZ_OVERRIDE; + + virtual void Shutdown() MOZ_OVERRIDE; + + SoftwareWebMVideoDecoder(WebMReader* aReader); + ~SoftwareWebMVideoDecoder(); + +private: + nsRefPtr mReader; + + // VPx decoder state + vpx_codec_ctx_t mVPX; +}; + +} // namespace mozilla + +#endif diff --git a/dom/media/webm/WebMReader.cpp b/dom/media/webm/WebMReader.cpp index 9357f79bb125..9c68eb4632e8 100644 --- a/dom/media/webm/WebMReader.cpp +++ b/dom/media/webm/WebMReader.cpp @@ -7,11 +7,15 @@ #include "MediaDecoderStateMachine.h" #include "AbstractMediaDecoder.h" #include "MediaResource.h" +#include "SoftwareWebMVideoDecoder.h" #include "WebMReader.h" #include "WebMBufferedParser.h" #include "mozilla/dom/TimeRanges.h" #include "VorbisUtils.h" #include "gfx2DGlue.h" +#include "Layers.h" +#include "mozilla/Preferences.h" +#include "SharedThreadPool.h" #include @@ -21,19 +25,17 @@ #include "OggReader.h" -using mozilla::NesteggPacketHolder; - -template <> -class nsAutoRefTraits : - public nsPointerRefTraits -{ -public: - static void Release(NesteggPacketHolder* aHolder) { delete aHolder; } -}; +// IntelWebMVideoDecoder uses the WMF backend, which is Windows Vista+ only. +#if defined(MOZ_FMP4) && defined(MOZ_WMF) +#include "IntelWebMVideoDecoder.h" +#define MOZ_PDM_VPX 1 +#endif // Un-comment to enable logging of seek bisections. //#define SEEK_LOGGING +#undef LOG + #ifdef PR_LOGGING #include "prprf.h" #define LOG(type, msg) PR_LOG(gMediaDecoderLog, type, msg) @@ -55,9 +57,6 @@ using namespace layers; extern PRLogModuleInfo* gMediaDecoderLog; PRLogModuleInfo* gNesteggLog; -static const unsigned NS_PER_USEC = 1000; -static const double NS_PER_S = 1e9; - // Functions for reading and seeking using MediaResource required for // nestegg_io. The 'user data' passed to these functions is the // decoder from which the media resource is obtained. @@ -148,9 +147,9 @@ static void webm_log(nestegg * context, #endif } -ogg_packet -InitOggPacket(const unsigned char* aData, size_t aLength, bool aBOS, bool aEOS, - int64_t aGranulepos, int64_t aPacketNo) +ogg_packet InitOggPacket(const unsigned char* aData, size_t aLength, + bool aBOS, bool aEOS, + int64_t aGranulepos, int64_t aPacketNo) { ogg_packet packet; packet.packet = const_cast(aData); @@ -162,6 +161,10 @@ InitOggPacket(const unsigned char* aData, size_t aLength, bool aBOS, bool aEOS, return packet; } +#if defined(MOZ_PDM_VPX) +static bool sIsIntelDecoderEnabled = false; +#endif + WebMReader::WebMReader(AbstractMediaDecoder* aDecoder) : MediaDecoderReader(aDecoder) , mContext(nullptr) @@ -178,6 +181,7 @@ WebMReader::WebMReader(AbstractMediaDecoder* aDecoder) , mLastVideoFrameTime(0) , mAudioCodec(-1) , mVideoCodec(-1) + , mLayersBackendType(layers::LayersBackend::LAYERS_NONE) , mHasVideo(false) , mHasAudio(false) #ifdef MOZ_OPUS @@ -192,43 +196,66 @@ WebMReader::WebMReader(AbstractMediaDecoder* aDecoder) #endif // Zero these member vars to avoid crashes in VP8 destroy and Vorbis clear // functions when destructor is called before |Init|. - memset(&mVPX, 0, sizeof(vpx_codec_ctx_t)); memset(&mVorbisBlock, 0, sizeof(vorbis_block)); memset(&mVorbisDsp, 0, sizeof(vorbis_dsp_state)); memset(&mVorbisInfo, 0, sizeof(vorbis_info)); memset(&mVorbisComment, 0, sizeof(vorbis_comment)); + +#if defined(MOZ_PDM_VPX) + sIsIntelDecoderEnabled = Preferences::GetBool("media.webm.intel_decoder.enabled", false); +#endif } WebMReader::~WebMReader() { Cleanup(); - mVideoPackets.Reset(); mAudioPackets.Reset(); - - vpx_codec_destroy(&mVPX); - vorbis_block_clear(&mVorbisBlock); vorbis_dsp_clear(&mVorbisDsp); vorbis_info_clear(&mVorbisInfo); vorbis_comment_clear(&mVorbisComment); - if (mOpusDecoder) { opus_multistream_decoder_destroy(mOpusDecoder); mOpusDecoder = nullptr; } - + MOZ_ASSERT(!mVideoDecoder); MOZ_COUNT_DTOR(WebMReader); } +void WebMReader::Shutdown() +{ +#if defined(MOZ_PDM_VPX) + if (mTaskQueue) { + mTaskQueue->Shutdown(); + } +#endif + + if (mVideoDecoder) { + mVideoDecoder->Shutdown(); + mVideoDecoder = nullptr; + } +} + nsresult WebMReader::Init(MediaDecoderReader* aCloneDonor) { - vorbis_info_init(&mVorbisInfo); vorbis_comment_init(&mVorbisComment); memset(&mVorbisDsp, 0, sizeof(vorbis_dsp_state)); memset(&mVorbisBlock, 0, sizeof(vorbis_block)); +#if defined(MOZ_PDM_VPX) + if (sIsIntelDecoderEnabled) { + PlatformDecoderModule::Init(); + + InitLayersBackendType(); + + mTaskQueue = new MediaTaskQueue( + SharedThreadPool::Get(NS_LITERAL_CSTRING("IntelVP8 Video Decode"))); + NS_ENSURE_TRUE(mTaskQueue, NS_ERROR_FAILURE); + } +#endif + if (aCloneDonor) { mBufferedState = static_cast(aCloneDonor)->mBufferedState; } else { @@ -238,6 +265,31 @@ nsresult WebMReader::Init(MediaDecoderReader* aCloneDonor) return NS_OK; } +void WebMReader::InitLayersBackendType() +{ + if (!IsVideoContentType(GetDecoder()->GetResource()->GetContentType())) { + // Not playing video, we don't care about the layers backend type. + return; + } + // Extract the layer manager backend type so that platform decoders + // can determine whether it's worthwhile using hardware accelerated + // video decoding. + MediaDecoderOwner* owner = mDecoder->GetOwner(); + if (!owner) { + NS_WARNING("WebMReader without a decoder owner, can't get HWAccel"); + return; + } + + dom::HTMLMediaElement* element = owner->GetMediaElement(); + NS_ENSURE_TRUE_VOID(element); + + nsRefPtr layerManager = + nsContentUtils::LayerManagerForDocument(element->OwnerDoc()); + NS_ENSURE_TRUE_VOID(layerManager); + + mLayersBackendType = layerManager->GetCompositorBackendType(); +} + nsresult WebMReader::ResetDecode() { mAudioFrames = 0; @@ -315,7 +367,8 @@ nsresult WebMReader::ReadMetadata(MediaInfo* aInfo, return NS_ERROR_FAILURE; } int type = nestegg_track_type(mContext, track); - if (!mHasVideo && type == NESTEGG_TRACK_VIDEO) { + if (!mHasVideo && type == NESTEGG_TRACK_VIDEO && + mDecoder->GetImageContainer()) { nestegg_video_params params; r = nestegg_track_video_params(mContext, track, ¶ms); if (r == -1) { @@ -323,14 +376,28 @@ nsresult WebMReader::ReadMetadata(MediaInfo* aInfo, return NS_ERROR_FAILURE; } - vpx_codec_iface_t* dx = nullptr; mVideoCodec = nestegg_track_codec_id(mContext, track); - if (mVideoCodec == NESTEGG_CODEC_VP8) { - dx = vpx_codec_vp8_dx(); - } else if (mVideoCodec == NESTEGG_CODEC_VP9) { - dx = vpx_codec_vp9_dx(); + +#if defined(MOZ_PDM_VPX) + if (sIsIntelDecoderEnabled) { + mVideoDecoder = IntelWebMVideoDecoder::Create(this); + if (mVideoDecoder && + NS_FAILED(mVideoDecoder->Init(params.display_width, params.display_height))) { + mVideoDecoder = nullptr; + } } - if (!dx || vpx_codec_dec_init(&mVPX, dx, nullptr, 0)) { +#endif + + // If there's no decoder yet (e.g. HW decoder not available), use the software decoder. + if (!mVideoDecoder) { + mVideoDecoder = SoftwareWebMVideoDecoder::Create(this); + if (mVideoDecoder && + NS_FAILED(mVideoDecoder->Init(params.display_width, params.display_height))) { + mVideoDecoder = nullptr; + } + } + + if (!mVideoDecoder) { Cleanup(); return NS_ERROR_FAILURE; } @@ -348,8 +415,7 @@ nsresult WebMReader::ReadMetadata(MediaInfo* aInfo, if (pictureRect.width <= 0 || pictureRect.height <= 0 || pictureRect.x < 0 || - pictureRect.y < 0) - { + pictureRect.y < 0) { pictureRect.x = 0; pictureRect.y = 0; pictureRect.width = params.width; @@ -390,8 +456,7 @@ nsresult WebMReader::ReadMetadata(MediaInfo* aInfo, mInfo.mVideo.mStereoMode = StereoMode::RIGHT_LEFT; break; } - } - else if (!mHasAudio && type == NESTEGG_TRACK_AUDIO) { + } else if (!mHasAudio && type == NESTEGG_TRACK_AUDIO) { nestegg_audio_params params; r = nestegg_track_audio_params(mContext, track, ¶ms); if (r == -1) { @@ -888,161 +953,12 @@ bool WebMReader::DecodeAudioData() return DecodeAudioPacket(holder->mPacket, holder->mOffset); } -bool WebMReader::DecodeVideoFrame(bool &aKeyframeSkip, - int64_t aTimeThreshold) +bool WebMReader::DecodeVideoFrame(bool &aKeyframeSkip, int64_t aTimeThreshold) { - NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread."); - - // Record number of frames decoded and parsed. Automatically update the - // stats counters using the AutoNotifyDecoded stack-based class. - uint32_t parsed = 0, decoded = 0; - AbstractMediaDecoder::AutoNotifyDecoded autoNotify(mDecoder, parsed, decoded); - - nsAutoRef holder(NextPacket(VIDEO)); - if (!holder) { - return false; - } - - nestegg_packet* packet = holder->mPacket; - unsigned int track = 0; - int r = nestegg_packet_track(packet, &track); - if (r == -1) { - return false; - } - - unsigned int count = 0; - r = nestegg_packet_count(packet, &count); - if (r == -1) { - return false; - } - - uint64_t tstamp = 0; - r = nestegg_packet_tstamp(packet, &tstamp); - if (r == -1) { - return false; - } - - // The end time of this frame is the start time of the next frame. Fetch - // the timestamp of the next packet for this track. If we've reached the - // end of the resource, use the file's duration as the end time of this - // video frame. - uint64_t next_tstamp = 0; - nsAutoRef next_holder(NextPacket(VIDEO)); - if (next_holder) { - r = nestegg_packet_tstamp(next_holder->mPacket, &next_tstamp); - if (r == -1) { - return false; - } - PushVideoPacket(next_holder.disown()); - } else { - next_tstamp = tstamp; - next_tstamp += tstamp - mLastVideoFrameTime; - } - mLastVideoFrameTime = tstamp; - - int64_t tstamp_usecs = tstamp / NS_PER_USEC; - for (uint32_t i = 0; i < count; ++i) { - unsigned char* data; - size_t length; - r = nestegg_packet_data(packet, i, &data, &length); - if (r == -1) { - return false; - } - - vpx_codec_stream_info_t si; - memset(&si, 0, sizeof(si)); - si.sz = sizeof(si); - if (mVideoCodec == NESTEGG_CODEC_VP8) { - vpx_codec_peek_stream_info(vpx_codec_vp8_dx(), data, length, &si); - } else if (mVideoCodec == NESTEGG_CODEC_VP9) { - vpx_codec_peek_stream_info(vpx_codec_vp9_dx(), data, length, &si); - } - if (aKeyframeSkip && (!si.is_kf || tstamp_usecs < aTimeThreshold)) { - // Skipping to next keyframe... - parsed++; // Assume 1 frame per chunk. - continue; - } - - if (aKeyframeSkip && si.is_kf) { - aKeyframeSkip = false; - } - - if (vpx_codec_decode(&mVPX, data, length, nullptr, 0)) { - return false; - } - - // If the timestamp of the video frame is less than - // the time threshold required then it is not added - // to the video queue and won't be displayed. - if (tstamp_usecs < aTimeThreshold) { - parsed++; // Assume 1 frame per chunk. - continue; - } - - vpx_codec_iter_t iter = nullptr; - vpx_image_t *img; - - while ((img = vpx_codec_get_frame(&mVPX, &iter))) { - NS_ASSERTION(img->fmt == VPX_IMG_FMT_I420, "WebM image format not I420"); - - // Chroma shifts are rounded down as per the decoding examples in the SDK - VideoData::YCbCrBuffer b; - b.mPlanes[0].mData = img->planes[0]; - b.mPlanes[0].mStride = img->stride[0]; - b.mPlanes[0].mHeight = img->d_h; - b.mPlanes[0].mWidth = img->d_w; - b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0; - - b.mPlanes[1].mData = img->planes[1]; - b.mPlanes[1].mStride = img->stride[1]; - b.mPlanes[1].mHeight = (img->d_h + 1) >> img->y_chroma_shift; - b.mPlanes[1].mWidth = (img->d_w + 1) >> img->x_chroma_shift; - b.mPlanes[1].mOffset = b.mPlanes[1].mSkip = 0; - - b.mPlanes[2].mData = img->planes[2]; - b.mPlanes[2].mStride = img->stride[2]; - b.mPlanes[2].mHeight = (img->d_h + 1) >> img->y_chroma_shift; - b.mPlanes[2].mWidth = (img->d_w + 1) >> img->x_chroma_shift; - b.mPlanes[2].mOffset = b.mPlanes[2].mSkip = 0; - - IntRect picture = ToIntRect(mPicture); - if (img->d_w != static_cast(mInitialFrame.width) || - img->d_h != static_cast(mInitialFrame.height)) { - // Frame size is different from what the container reports. This is - // legal in WebM, and we will preserve the ratio of the crop rectangle - // as it was reported relative to the picture size reported by the - // container. - picture.x = (mPicture.x * img->d_w) / mInitialFrame.width; - picture.y = (mPicture.y * img->d_h) / mInitialFrame.height; - picture.width = (img->d_w * mPicture.width) / mInitialFrame.width; - picture.height = (img->d_h * mPicture.height) / mInitialFrame.height; - } - - nsRefPtr v = VideoData::Create(mInfo.mVideo, - mDecoder->GetImageContainer(), - holder->mOffset, - tstamp_usecs, - (next_tstamp / NS_PER_USEC)-tstamp_usecs, - b, - si.is_kf, - -1, - picture); - if (!v) { - return false; - } - parsed++; - decoded++; - NS_ASSERTION(decoded <= parsed, - "Expect only 1 frame per chunk per packet in WebM..."); - VideoQueue().Push(v); - } - } - - return true; + return mVideoDecoder->DecodeVideoFrame(aKeyframeSkip, aTimeThreshold); } -void -WebMReader::PushVideoPacket(NesteggPacketHolder* aItem) +void WebMReader::PushVideoPacket(NesteggPacketHolder* aItem) { mVideoPackets.PushFront(aItem); } @@ -1057,6 +973,8 @@ void WebMReader::Seek(int64_t aTarget, int64_t aStartTime, int64_t aEndTime, nsresult WebMReader::SeekInternal(int64_t aTarget, int64_t aStartTime) { NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread."); + nsresult rv = mVideoDecoder->Flush(); + NS_ENSURE_SUCCESS(rv, rv); LOG(PR_LOG_DEBUG, ("Reader [%p] for Decoder [%p]: About to seek to %fs", this, mDecoder, double(aTarget) / USECS_PER_S)); @@ -1163,4 +1081,29 @@ int64_t WebMReader::GetEvictionOffset(double aTime) return offset; } +int WebMReader::GetVideoCodec() +{ + return mVideoCodec; +} + +nsIntRect WebMReader::GetPicture() +{ + return mPicture; +} + +nsIntSize WebMReader::GetInitialFrame() +{ + return mInitialFrame; +} + +uint64_t WebMReader::GetLastVideoFrameTime() +{ + return mLastVideoFrameTime; +} + +void WebMReader::SetLastVideoFrameTime(uint64_t aFrameTime) +{ + mLastVideoFrameTime = aFrameTime; +} + } // namespace mozilla diff --git a/dom/media/webm/WebMReader.h b/dom/media/webm/WebMReader.h index 46102e15e161..4fdeaca36650 100644 --- a/dom/media/webm/WebMReader.h +++ b/dom/media/webm/WebMReader.h @@ -16,6 +16,8 @@ #define VPX_DONT_DEFINE_STDINT_TYPES #include "vpx/vpx_codec.h" +#include "mozilla/layers/LayersTypes.h" + #ifdef MOZ_TREMOR #include "tremor/ivorbiscodec.h" #else @@ -26,10 +28,6 @@ #include "OpusParser.h" #endif -namespace mozilla { - -class WebMBufferedState; - // Holds a nestegg_packet, and its file offset. This is needed so we // know the offset in the file we've played up to, in order to calculate // whether it's likely we can play through to the end without needing @@ -55,6 +53,18 @@ private: NesteggPacketHolder& operator= (NesteggPacketHolder const& aOther); }; +template <> +class nsAutoRefTraits : public nsPointerRefTraits +{ +public: + static void Release(NesteggPacketHolder* aHolder) { delete aHolder; } +}; + +namespace mozilla { +class WebMBufferedState; +static const unsigned NS_PER_USEC = 1000; +static const double NS_PER_S = 1e9; + // Thread and type safe wrapper around nsDeque. class PacketQueueDeallocator : public nsDequeFunctor { virtual void* operator() (void* aObject) { @@ -101,6 +111,21 @@ class WebMPacketQueue : private nsDeque { } }; +class WebMReader; + +// Class to handle various video decode paths +class WebMVideoDecoder +{ +public: + virtual nsresult Init(unsigned int aWidth = 0, unsigned int aHeight = 0) = 0; + virtual nsresult Flush() { return NS_OK; } + virtual void Shutdown() = 0; + virtual bool DecodeVideoFrame(bool &aKeyframeSkip, + int64_t aTimeThreshold) = 0; + WebMVideoDecoder() {} + virtual ~WebMVideoDecoder() {} +}; + class WebMReader : public MediaDecoderReader { public: @@ -110,15 +135,13 @@ protected: ~WebMReader(); public: + virtual void Shutdown() MOZ_OVERRIDE; virtual nsresult Init(MediaDecoderReader* aCloneDonor); virtual nsresult ResetDecode(); virtual bool DecodeAudioData(); - // If the Theora granulepos has not been captured, it may read several packets - // until one with a granulepos has been captured, to ensure that all packets - // read have valid time info. virtual bool DecodeVideoFrame(bool &aKeyframeSkip, - int64_t aTimeThreshold); + int64_t aTimeThreshold); virtual bool HasAudio() { @@ -143,7 +166,6 @@ public: virtual bool IsMediaSeekable() MOZ_OVERRIDE; -protected: // Value passed to NextPacket to determine if we are reading a video or an // audio packet. enum TrackType { @@ -159,6 +181,15 @@ protected: // Pushes a packet to the front of the video packet queue. virtual void PushVideoPacket(NesteggPacketHolder* aItem); + int GetVideoCodec(); + nsIntRect GetPicture(); + nsIntSize GetInitialFrame(); + uint64_t GetLastVideoFrameTime(); + void SetLastVideoFrameTime(uint64_t aFrameTime); + layers::LayersBackend GetLayersBackendType() { return mLayersBackendType; } + MediaTaskQueue* GetTaskQueue() { return mTaskQueue; } + +protected: #ifdef MOZ_OPUS // Setup opus decoder bool InitOpusDecoder(); @@ -186,13 +217,16 @@ protected: virtual nsresult SeekInternal(int64_t aTime, int64_t aStartTime); + // Initializes mLayersBackendType if possible. + void InitLayersBackendType(); + private: // libnestegg context for webm container. Access on state machine thread // or decoder thread only. nestegg* mContext; - // VP8 decoder state - vpx_codec_ctx_t mVPX; + // The video decoder + nsAutoPtr mVideoDecoder; // Vorbis decoder state vorbis_info mVorbisInfo; @@ -247,6 +281,9 @@ private: // Codec ID of video track int mVideoCodec; + layers::LayersBackend mLayersBackendType; + nsRefPtr mTaskQueue; + // Booleans to indicate if we have audio and/or video data bool mHasVideo; bool mHasAudio; diff --git a/dom/media/webm/moz.build b/dom/media/webm/moz.build index e9665cbef9d4..7d0c586ccd8c 100644 --- a/dom/media/webm/moz.build +++ b/dom/media/webm/moz.build @@ -5,17 +5,23 @@ # file, You can obtain one at http://mozilla.org/MPL/2.0/. EXPORTS += [ + 'IntelWebMVideoDecoder.h', + 'SoftwareWebMVideoDecoder.h', 'WebMBufferedParser.h', 'WebMDecoder.h', 'WebMReader.h', ] UNIFIED_SOURCES += [ + 'SoftwareWebMVideoDecoder.cpp', 'WebMBufferedParser.cpp', 'WebMDecoder.cpp', 'WebMReader.cpp', ] +if CONFIG['MOZ_FMP4']: + UNIFIED_SOURCES += ['IntelWebMVideoDecoder.cpp'] + if CONFIG['MOZ_WEBM_ENCODER']: EXPORTS += ['WebMWriter.h'] UNIFIED_SOURCES += ['EbmlComposer.cpp', diff --git a/modules/libpref/init/all.js b/modules/libpref/init/all.js index 59d2445c40bb..4afbeace7c7a 100644 --- a/modules/libpref/init/all.js +++ b/modules/libpref/init/all.js @@ -284,6 +284,9 @@ pref("media.wave.enabled", true); #endif #ifdef MOZ_WEBM pref("media.webm.enabled", true); +#if defined(MOZ_FMP4) && defined(MOZ_WMF) +pref("media.webm.intel_decoder.enabled", false); +#endif #endif #ifdef MOZ_GSTREAMER pref("media.gstreamer.enabled", true);