diff --git a/b2g/app/b2g.js b/b2g/app/b2g.js index 3a5a8a35b366..a4199208798f 100644 --- a/b2g/app/b2g.js +++ b/b2g/app/b2g.js @@ -1124,6 +1124,9 @@ pref("dom.audiochannel.mutedByDefault", true); // requests. pref("dom.bluetooth.app-origin", "app://bluetooth.gaiamobile.org"); +// Enable W3C WebBluetooth API and disable B2G only GATT client API. +pref("dom.bluetooth.webbluetooth.enabled", false); + // Default device name for Presentation API pref("dom.presentation.device.name", "Firefox OS"); diff --git a/b2g/config/aries-l/sources.xml b/b2g/config/aries-l/sources.xml index e4e95dd761ab..2b293e68882e 100644 --- a/b2g/config/aries-l/sources.xml +++ b/b2g/config/aries-l/sources.xml @@ -21,7 +21,7 @@ - + diff --git a/b2g/config/aries/sources.xml b/b2g/config/aries/sources.xml index 98f9f438f730..ef381391f7a2 100644 --- a/b2g/config/aries/sources.xml +++ b/b2g/config/aries/sources.xml @@ -21,7 +21,7 @@ - + diff --git a/b2g/config/dolphin/sources.xml b/b2g/config/dolphin/sources.xml index 2421cb4f3154..4dcf2b130a98 100644 --- a/b2g/config/dolphin/sources.xml +++ b/b2g/config/dolphin/sources.xml @@ -21,7 +21,7 @@ - + diff --git a/b2g/config/emulator-ics/sources.xml b/b2g/config/emulator-ics/sources.xml index 2c3e03df1a73..61704c6934cb 100644 --- a/b2g/config/emulator-ics/sources.xml +++ b/b2g/config/emulator-ics/sources.xml @@ -21,7 +21,7 @@ - + diff --git a/b2g/config/emulator-jb/sources.xml b/b2g/config/emulator-jb/sources.xml index 947499805cf2..757ea11b5a33 100644 --- a/b2g/config/emulator-jb/sources.xml +++ b/b2g/config/emulator-jb/sources.xml @@ -21,7 +21,7 @@ - + diff --git a/b2g/config/emulator-kk/sources.xml b/b2g/config/emulator-kk/sources.xml index 829f2709b787..22f7c6c1ea58 100644 --- a/b2g/config/emulator-kk/sources.xml +++ b/b2g/config/emulator-kk/sources.xml @@ -21,7 +21,7 @@ - + @@ -139,7 +139,7 @@ - + diff --git a/b2g/config/emulator-l/sources.xml b/b2g/config/emulator-l/sources.xml index 2b580abe5fde..3d5dcb67ac85 100644 --- a/b2g/config/emulator-l/sources.xml +++ b/b2g/config/emulator-l/sources.xml @@ -21,7 +21,7 @@ - + diff --git a/b2g/config/emulator/sources.xml b/b2g/config/emulator/sources.xml index 2c3e03df1a73..61704c6934cb 100644 --- a/b2g/config/emulator/sources.xml +++ b/b2g/config/emulator/sources.xml @@ -21,7 +21,7 @@ - + diff --git a/b2g/config/flame-kk/sources.xml b/b2g/config/flame-kk/sources.xml index b749ca9f49a1..3b24aeee07b5 100644 --- a/b2g/config/flame-kk/sources.xml +++ b/b2g/config/flame-kk/sources.xml @@ -21,7 +21,7 @@ - + diff --git a/b2g/config/gaia.json b/b2g/config/gaia.json index f997cd82f7a3..0bd03de9e9ae 100644 --- a/b2g/config/gaia.json +++ b/b2g/config/gaia.json @@ -1,9 +1,9 @@ { "git": { - "git_revision": "385ec34c8fe447342e81a40b4e1cc9a80f37fc33", + "git_revision": "4023297b16fdc46de3ddb04be4f3c575313d1cde", "remote": "https://git.mozilla.org/releases/gaia.git", "branch": "" }, - "revision": "c53c24531e4d32550f37c5ff5359eb70af822a73", + "revision": "1520b4ebcfc727b7153be5242339b8f577ab65b4", "repo_path": "integration/gaia-central" } diff --git a/b2g/config/nexus-4-kk/sources.xml b/b2g/config/nexus-4-kk/sources.xml index e622981f7a97..29258a45f62f 100644 --- a/b2g/config/nexus-4-kk/sources.xml +++ b/b2g/config/nexus-4-kk/sources.xml @@ -21,7 +21,7 @@ - + diff --git a/b2g/config/nexus-4/sources.xml b/b2g/config/nexus-4/sources.xml index 5521a1df9543..b6c92a6c58d9 100644 --- a/b2g/config/nexus-4/sources.xml +++ b/b2g/config/nexus-4/sources.xml @@ -21,7 +21,7 @@ - + diff --git a/b2g/config/nexus-5-l/sources.xml b/b2g/config/nexus-5-l/sources.xml index 507821ae98c6..843d4db2bb99 100644 --- a/b2g/config/nexus-5-l/sources.xml +++ b/b2g/config/nexus-5-l/sources.xml @@ -21,7 +21,7 @@ - + diff --git a/configure.in b/configure.in index 576e76529bf6..fd4d530f5b7b 100644 --- a/configure.in +++ b/configure.in @@ -1692,15 +1692,6 @@ if test -n "$MOZ_USE_SYSTRACE"; then AC_DEFINE(MOZ_USE_SYSTRACE) fi -# For profiling builds keep the symbol information -if test "$MOZ_PROFILING" -a -z "$STRIP_FLAGS"; then - case "$OS_TARGET" in - Linux|DragonFly|FreeBSD|NetBSD|OpenBSD) - STRIP_FLAGS="--strip-debug" - ;; - esac -fi - dnl ======================================================== dnl = Use Valgrind dnl ======================================================== @@ -1799,6 +1790,38 @@ if test -n "$MOZ_VTUNE"; then AC_DEFINE(MOZ_VTUNE) fi +# For profiling builds keep the symbol information +if test "$MOZ_PROFILING" -a -z "$STRIP_FLAGS"; then + case "$OS_TARGET" in + Linux|DragonFly|FreeBSD|NetBSD|OpenBSD) + STRIP_FLAGS="--strip-debug" + ;; + esac +fi + +dnl ======================================================== +dnl = Enable DMD +dnl ======================================================== + +MOZ_ARG_ENABLE_BOOL(dmd, +[ --enable-dmd Enable DMD; also enables jemalloc, replace-malloc and profiling], + MOZ_DMD=1, + MOZ_DMD= ) + +if test "$MOZ_DMD"; then + AC_DEFINE(MOZ_DMD) + + if test "${CPU_ARCH}" = "arm"; then + CFLAGS="$CFLAGS -funwind-tables" + CXXFLAGS="$CXXFLAGS -funwind-tables" + fi + + MOZ_MEMORY=1 # DMD enables jemalloc + MOZ_REPLACE_MALLOC=1 # DMD enables replace-malloc + MOZ_PROFILING=1 # DMD enables profiling +fi +AC_SUBST(MOZ_DMD) + dnl ======================================================== dnl Profiling dnl ======================================================== @@ -7062,28 +7085,6 @@ if test -n "$MOZ_DEBUG"; then AC_DEFINE(MOZ_DUMP_PAINTING) fi -dnl ======================================================== -dnl = Enable DMD -dnl ======================================================== - -MOZ_ARG_ENABLE_BOOL(dmd, -[ --enable-dmd Enable DMD; also enables jemalloc and replace-malloc], - MOZ_DMD=1, - MOZ_DMD= ) - -if test "$MOZ_DMD"; then - AC_DEFINE(MOZ_DMD) - - if test "${CPU_ARCH}" = "arm"; then - CFLAGS="$CFLAGS -funwind-tables" - CXXFLAGS="$CXXFLAGS -funwind-tables" - fi - - MOZ_MEMORY=1 # DMD enables jemalloc - MOZ_REPLACE_MALLOC=1 # DMD enables replace-malloc -fi -AC_SUBST(MOZ_DMD) - dnl ======================================================== dnl = Enable jemalloc dnl ======================================================== diff --git a/dom/base/nsGlobalWindow.cpp b/dom/base/nsGlobalWindow.cpp index 02be10503074..b3554fd5e7d7 100644 --- a/dom/base/nsGlobalWindow.cpp +++ b/dom/base/nsGlobalWindow.cpp @@ -5880,7 +5880,6 @@ private: RefPtr mTask; }; - static const uint32_t kNextPaintTimeout = 1000; // ms static const char* const kPaintedTopic; RefPtr mWindow; @@ -5940,8 +5939,14 @@ FullscreenTransitionTask::Run() // Completely fixing those cases seems to be tricky, and since they // should rarely happen, it probably isn't worth to fix. Hence we // simply add a timeout here to ensure we never hang forever. + // In addition, if the page is complicated or the machine is less + // powerful, layout could take a long time, in which case, staying + // in black screen for that long could hurt user experience even + // more than exposing an intermediate state. mTimer = do_CreateInstance(NS_TIMER_CONTRACTID); - mTimer->Init(observer, kNextPaintTimeout, nsITimer::TYPE_ONE_SHOT); + uint32_t timeout = + Preferences::GetUint("full-screen-api.transition.timeout", 500); + mTimer->Init(observer, timeout, nsITimer::TYPE_ONE_SHOT); } else if (stage == eAfterToggle) { mWidget->PerformFullscreenTransition(nsIWidget::eAfterFullscreenToggle, mDuration.mFadeOut, mTransitionData, diff --git a/dom/bluetooth/common/webapi/BluetoothManager.cpp b/dom/bluetooth/common/webapi/BluetoothManager.cpp index 176e17b849f9..c9fddf7ed6d0 100644 --- a/dom/bluetooth/common/webapi/BluetoothManager.cpp +++ b/dom/bluetooth/common/webapi/BluetoothManager.cpp @@ -13,6 +13,7 @@ #include "mozilla/dom/bluetooth/BluetoothManager.h" #include "mozilla/dom/bluetooth/BluetoothTypes.h" #include "mozilla/dom/BluetoothManagerBinding.h" +#include "mozilla/Preferences.h" #include "mozilla/Services.h" #include "nsContentUtils.h" #include "nsDOMClassInfo.h" @@ -284,3 +285,10 @@ BluetoothManager::WrapObject(JSContext* aCx, JS::Handle aGivenProto) { return BluetoothManagerBinding::Wrap(aCx, this, aGivenProto); } + +// static +bool +BluetoothManager::B2GGattClientEnabled(JSContext* cx, JSObject* aGlobal) +{ + return !Preferences::GetBool("dom.bluetooth.webbluetooth.enabled"); +} diff --git a/dom/bluetooth/common/webapi/BluetoothManager.h b/dom/bluetooth/common/webapi/BluetoothManager.h index dce208b02dac..6595e51e55bc 100644 --- a/dom/bluetooth/common/webapi/BluetoothManager.h +++ b/dom/bluetooth/common/webapi/BluetoothManager.h @@ -77,6 +77,12 @@ public: */ void AppendAdapter(const BluetoothValue& aValue); + /** + * Check whether B2G only GATT client API is enabled (true) or W3C + * WebBluetooth API is enabled (false). + */ + static bool B2GGattClientEnabled(JSContext* cx, JSObject* aGlobal); + private: BluetoothManager(nsPIDOMWindow* aWindow); ~BluetoothManager(); diff --git a/dom/canvas/WebGLContextGL.cpp b/dom/canvas/WebGLContextGL.cpp index c3947f4e43fd..ef3716142ae8 100644 --- a/dom/canvas/WebGLContextGL.cpp +++ b/dom/canvas/WebGLContextGL.cpp @@ -542,7 +542,42 @@ WebGLContext::FramebufferTexture2D(GLenum target, return; } - if (!IsWebGL2() && level != 0) { + if (textarget != LOCAL_GL_TEXTURE_2D && + (textarget < LOCAL_GL_TEXTURE_CUBE_MAP_POSITIVE_X || + textarget > LOCAL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Z)) + { + return ErrorInvalidEnumInfo("framebufferTexture2D: textarget:", + textarget); + } + + if (IsWebGL2()) { + /* GLES 3.0.4 p208: + * If textarget is one of TEXTURE_CUBE_MAP_POSITIVE_X, + * TEXTURE_CUBE_MAP_POSITIVE_Y, TEXTURE_CUBE_MAP_POSITIVE_Z, + * TEXTURE_CUBE_MAP_NEGATIVE_X, TEXTURE_CUBE_MAP_NEGATIVE_Y, + * or TEXTURE_CUBE_MAP_NEGATIVE_Z, then level must be greater + * than or equal to zero and less than or equal to log2 of the + * value of MAX_CUBE_MAP_TEXTURE_SIZE. If textarget is TEXTURE_2D, + * level must be greater than or equal to zero and no larger than + * log2 of the value of MAX_TEXTURE_SIZE. Otherwise, an + * INVALID_VALUE error is generated. + */ + + if (textarget == LOCAL_GL_TEXTURE_2D) { + if (uint32_t(level) > FloorLog2(mImplMaxTextureSize)) { + ErrorInvalidValue("framebufferTexture2D: level is too large."); + return; + } + } else { + MOZ_ASSERT(textarget >= LOCAL_GL_TEXTURE_CUBE_MAP_POSITIVE_X && + textarget <= LOCAL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Z); + + if (uint32_t(level) > FloorLog2(mImplMaxCubeMapTextureSize)) { + ErrorInvalidValue("framebufferTexture2D: level is too large."); + return; + } + } + } else if (level != 0) { ErrorInvalidValue("framebufferTexture2D: level must be 0."); return; } @@ -567,14 +602,6 @@ WebGLContext::FramebufferTexture2D(GLenum target, " framebuffer 0."); } - if (textarget != LOCAL_GL_TEXTURE_2D && - (textarget < LOCAL_GL_TEXTURE_CUBE_MAP_POSITIVE_X || - textarget > LOCAL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Z)) - { - return ErrorInvalidEnumInfo("framebufferTexture2D: textarget:", - textarget); - } - if (!ValidateFramebufferAttachment(fb, attachment, "framebufferTexture2D")) return; diff --git a/dom/canvas/WebGLFramebuffer.cpp b/dom/canvas/WebGLFramebuffer.cpp index 064044f52a4e..d100d3fdaf99 100644 --- a/dom/canvas/WebGLFramebuffer.cpp +++ b/dom/canvas/WebGLFramebuffer.cpp @@ -1157,6 +1157,8 @@ WebGLFramebuffer::GetAttachmentParameter(const char* funcName, JSContext* cx, attachPoint = GetAttachPoint(LOCAL_GL_DEPTH_ATTACHMENT); } + FinalizeAttachments(); + return attachPoint->GetParameter(funcName, mContext, cx, target, attachment, pname, out_error); } diff --git a/dom/html/HTMLMediaElement.cpp b/dom/html/HTMLMediaElement.cpp index 37c4fb283026..0dae88a84e75 100644 --- a/dom/html/HTMLMediaElement.cpp +++ b/dom/html/HTMLMediaElement.cpp @@ -737,6 +737,7 @@ void HTMLMediaElement::AbortExistingLoads() } mError = nullptr; + mCurrentPlayRangeStart = -1.0; mLoadedDataFired = false; mAutoplaying = true; mIsLoadingFromSourceChildren = false; diff --git a/dom/media/MediaFormatReader.cpp b/dom/media/MediaFormatReader.cpp index 6c65699e638d..f7d06bfcc844 100644 --- a/dom/media/MediaFormatReader.cpp +++ b/dom/media/MediaFormatReader.cpp @@ -96,8 +96,7 @@ MediaFormatReader::Shutdown() mAudio.RejectPromise(CANCELED, __func__); } mAudio.mInitPromise.DisconnectIfExists(); - mAudio.mDecoder->Shutdown(); - mAudio.mDecoder = nullptr; + mAudio.ShutdownDecoder(); } if (mAudio.mTrackDemuxer) { mAudio.ResetDemuxer(); @@ -117,8 +116,7 @@ MediaFormatReader::Shutdown() mVideo.RejectPromise(CANCELED, __func__); } mVideo.mInitPromise.DisconnectIfExists(); - mVideo.mDecoder->Shutdown(); - mVideo.mDecoder = nullptr; + mVideo.ShutdownDecoder(); } if (mVideo.mTrackDemuxer) { mVideo.ResetDemuxer(); @@ -381,6 +379,8 @@ MediaFormatReader::EnsureDecoderCreated(TrackType aTrack) decoder.mDecoderInitialized = false; + MonitorAutoLock mon(decoder.mMonitor); + switch (aTrack) { case TrackType::kAudioTrack: decoder.mDecoder = @@ -406,6 +406,11 @@ MediaFormatReader::EnsureDecoderCreated(TrackType aTrack) default: break; } + if (decoder.mDecoder ) { + decoder.mDescription = decoder.mDecoder->GetDescriptionName(); + } else { + decoder.mDescription = "error creating decoder"; + } return decoder.mDecoder != nullptr; } @@ -429,13 +434,14 @@ MediaFormatReader::EnsureDecoderInitialized(TrackType aTrack) auto& decoder = self->GetDecoderData(aTrack); decoder.mInitPromise.Complete(); decoder.mDecoderInitialized = true; + MonitorAutoLock mon(decoder.mMonitor); + decoder.mDescription = decoder.mDecoder->GetDescriptionName(); self->ScheduleUpdate(aTrack); }, [self, aTrack] (MediaDataDecoder::DecoderFailureReason aResult) { auto& decoder = self->GetDecoderData(aTrack); decoder.mInitPromise.Complete(); - decoder.mDecoder->Shutdown(); - decoder.mDecoder = nullptr; + decoder.ShutdownDecoder(); self->NotifyError(aTrack); })); return false; @@ -465,8 +471,7 @@ MediaFormatReader::DisableHardwareAcceleration() if (HasVideo() && !mHardwareAccelerationDisabled) { mHardwareAccelerationDisabled = true; Flush(TrackInfo::kVideoTrack); - mVideo.mDecoder->Shutdown(); - mVideo.mDecoder = nullptr; + mVideo.ShutdownDecoder(); if (!EnsureDecoderCreated(TrackType::kVideoTrack)) { LOG("Unable to re-create decoder, aborting"); NotifyError(TrackInfo::kVideoTrack); @@ -919,8 +924,7 @@ MediaFormatReader::HandleDemuxedSamples(TrackType aTrack, // Flush will clear our array of queued samples. So make a copy now. nsTArray> samples{decoder.mQueuedSamples}; Flush(aTrack); - decoder.mDecoder->Shutdown(); - decoder.mDecoder = nullptr; + decoder.ShutdownDecoder(); if (sample->mKeyframe) { decoder.mQueuedSamples.AppendElements(Move(samples)); NotifyDecodingRequested(aTrack); @@ -1604,11 +1608,8 @@ void MediaFormatReader::ReleaseMediaResources() if (mVideoFrameContainer) { mVideoFrameContainer->ClearCurrentFrame(); } - if (mVideo.mDecoder) { - mVideo.mInitPromise.DisconnectIfExists(); - mVideo.mDecoder->Shutdown(); - mVideo.mDecoder = nullptr; - } + mVideo.mInitPromise.DisconnectIfExists(); + mVideo.ShutdownDecoder(); } bool @@ -1666,12 +1667,25 @@ void MediaFormatReader::GetMozDebugReaderData(nsAString& aString) { nsAutoCString result; + const char* audioName = "unavailable"; + const char* videoName = audioName; + + if (HasAudio()) { + MonitorAutoLock mon(mAudio.mMonitor); + audioName = mAudio.mDescription; + } + if (HasVideo()) { + MonitorAutoLock mon(mVideo.mMonitor); + videoName = mVideo.mDescription; + } + + result += nsPrintfCString("audio decoder: %s\n", audioName); + result += nsPrintfCString("audio frames decoded: %lld\n", + mAudio.mNumSamplesOutputTotal); + result += nsPrintfCString("video decoder: %s\n", videoName); result += nsPrintfCString("hardware video decoding: %s\n", VideoIsHardwareAccelerated() ? "enabled" : "disabled"); - result += nsPrintfCString("audio frames decoded: %lld (skipped:%lld)\n" - "video frames decoded: %lld (skipped:%lld)\n", - mAudio.mNumSamplesOutputTotal, - mAudio.mNumSamplesSkippedTotal, + result += nsPrintfCString("video frames decoded: %lld (skipped:%lld)\n", mVideo.mNumSamplesOutputTotal, mVideo.mNumSamplesSkippedTotal); aString += NS_ConvertUTF8toUTF16(result); diff --git a/dom/media/MediaFormatReader.h b/dom/media/MediaFormatReader.h index 30207b59fc4e..4fe53ed24be3 100644 --- a/dom/media/MediaFormatReader.h +++ b/dom/media/MediaFormatReader.h @@ -10,6 +10,7 @@ #include "mozilla/Atomics.h" #include "mozilla/Maybe.h" #include "mozilla/TaskQueue.h" +#include "mozilla/Monitor.h" #include "MediaDataDemuxer.h" #include "MediaDecoderReader.h" @@ -213,6 +214,8 @@ private: uint32_t aDecodeAhead) : mOwner(aOwner) , mType(aType) + , mMonitor("DecoderData") + , mDescription("shutdown") , mDecodeAhead(aDecodeAhead) , mUpdateScheduled(false) , mDemuxEOS(false) @@ -240,14 +243,27 @@ private: // Disambiguate Audio vs Video. MediaData::Type mType; RefPtr mTrackDemuxer; - // The platform decoder. - RefPtr mDecoder; // TaskQueue on which decoder can choose to decode. // Only non-null up until the decoder is created. RefPtr mTaskQueue; // Callback that receives output and error notifications from the decoder. nsAutoPtr mCallback; + // Monitor protecting mDescription and mDecoder. + Monitor mMonitor; + // The platform decoder. + RefPtr mDecoder; + const char* mDescription; + void ShutdownDecoder() + { + MonitorAutoLock mon(mMonitor); + if (mDecoder) { + mDecoder->Shutdown(); + } + mDescription = "shutdown"; + mDecoder = nullptr; + } + // Only accessed from reader's task queue. uint32_t mDecodeAhead; bool mUpdateScheduled; diff --git a/dom/media/mediasink/DecodedAudioDataSink.cpp b/dom/media/mediasink/DecodedAudioDataSink.cpp index e2e87dafa755..8fa34f20f15c 100644 --- a/dom/media/mediasink/DecodedAudioDataSink.cpp +++ b/dom/media/mediasink/DecodedAudioDataSink.cpp @@ -202,12 +202,18 @@ DecodedAudioDataSink::PopFrames(uint32_t aFrames) UniquePtr mData; }; - if (!mCurrentData) { + while (!mCurrentData) { // No data in the queue. Return an empty chunk. if (AudioQueue().GetSize() == 0) { return MakeUnique(); } + // Ignore the element with 0 frames and try next. + if (AudioQueue().PeekFront()->mFrames == 0) { + RefPtr releaseMe = AudioQueue().PopFront(); + continue; + } + // See if there's a gap in the audio. If there is, push silence into the // audio hardware, so we can play across the gap. // Calculate the timestamp of the next chunk of audio in numbers of @@ -239,6 +245,7 @@ DecodedAudioDataSink::PopFrames(uint32_t aFrames) mCursor = MakeUnique(mCurrentData->mAudioData.get(), mCurrentData->mChannels, mCurrentData->mFrames); + MOZ_ASSERT(mCurrentData->mFrames > 0); } auto framesToPop = std::min(aFrames, mCursor->Available()); diff --git a/dom/media/platforms/PlatformDecoderModule.h b/dom/media/platforms/PlatformDecoderModule.h index 0f68a9125b1e..c6ad46306d93 100644 --- a/dom/media/platforms/PlatformDecoderModule.h +++ b/dom/media/platforms/PlatformDecoderModule.h @@ -220,6 +220,11 @@ public: { return NS_OK; } + + // Return the name of the MediaDataDecoder, only used for decoding. + // Only return a static const string, as the information may be accessed + // in a non thread-safe fashion. + virtual const char* GetDescriptionName() const = 0; }; } // namespace mozilla diff --git a/dom/media/platforms/agnostic/BlankDecoderModule.cpp b/dom/media/platforms/agnostic/BlankDecoderModule.cpp index 39f8d7f8a182..2e6329ecad2c 100644 --- a/dom/media/platforms/agnostic/BlankDecoderModule.cpp +++ b/dom/media/platforms/agnostic/BlankDecoderModule.cpp @@ -93,6 +93,11 @@ public: return NS_OK; } + const char* GetDescriptionName() const override + { + return "blank media data decoder"; + } + private: nsAutoPtr mCreator; RefPtr mTaskQueue; diff --git a/dom/media/platforms/agnostic/OpusDecoder.h b/dom/media/platforms/agnostic/OpusDecoder.h index efafd58d5c2a..74f3bb3dba15 100644 --- a/dom/media/platforms/agnostic/OpusDecoder.h +++ b/dom/media/platforms/agnostic/OpusDecoder.h @@ -27,6 +27,10 @@ public: nsresult Flush() override; nsresult Drain() override; nsresult Shutdown() override; + const char* GetDescriptionName() const override + { + return "opus audio decoder"; + } // Return true if mimetype is Opus static bool IsOpus(const nsACString& aMimeType); diff --git a/dom/media/platforms/agnostic/VPXDecoder.h b/dom/media/platforms/agnostic/VPXDecoder.h index 824a169e7562..b6c323366852 100644 --- a/dom/media/platforms/agnostic/VPXDecoder.h +++ b/dom/media/platforms/agnostic/VPXDecoder.h @@ -33,6 +33,10 @@ public: nsresult Flush() override; nsresult Drain() override; nsresult Shutdown() override; + const char* GetDescriptionName() const override + { + return "libvpx video decoder"; + } // Return true if mimetype is a VPX codec static bool IsVPX(const nsACString& aMimeType); diff --git a/dom/media/platforms/agnostic/VorbisDecoder.h b/dom/media/platforms/agnostic/VorbisDecoder.h index 41546aa824c1..7f35301d08ac 100644 --- a/dom/media/platforms/agnostic/VorbisDecoder.h +++ b/dom/media/platforms/agnostic/VorbisDecoder.h @@ -30,6 +30,10 @@ public: nsresult Flush() override; nsresult Drain() override; nsresult Shutdown() override; + const char* GetDescriptionName() const override + { + return "vorbis audio decoder"; + } // Return true if mimetype is Vorbis static bool IsVorbis(const nsACString& aMimeType); diff --git a/dom/media/platforms/agnostic/eme/EMEDecoderModule.cpp b/dom/media/platforms/agnostic/eme/EMEDecoderModule.cpp index 79508e19d082..0cac98517048 100644 --- a/dom/media/platforms/agnostic/eme/EMEDecoderModule.cpp +++ b/dom/media/platforms/agnostic/eme/EMEDecoderModule.cpp @@ -137,6 +137,10 @@ public: return rv; } + const char* GetDescriptionName() const override { + return mDecoder->GetDescriptionName(); + } + private: RefPtr mDecoder; diff --git a/dom/media/platforms/agnostic/gmp/GMPAudioDecoder.h b/dom/media/platforms/agnostic/gmp/GMPAudioDecoder.h index 79e335e84fa1..717827fd63cd 100644 --- a/dom/media/platforms/agnostic/gmp/GMPAudioDecoder.h +++ b/dom/media/platforms/agnostic/gmp/GMPAudioDecoder.h @@ -74,6 +74,10 @@ public: nsresult Flush() override; nsresult Drain() override; nsresult Shutdown() override; + const char* GetDescriptionName() const override + { + return "GMP audio decoder"; + } protected: virtual void InitTags(nsTArray& aTags); diff --git a/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.h b/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.h index e563fc4f8568..7d2f3c2767ae 100644 --- a/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.h +++ b/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.h @@ -89,6 +89,10 @@ public: nsresult Flush() override; nsresult Drain() override; nsresult Shutdown() override; + const char* GetDescriptionName() const override + { + return "GMP video decoder"; + } protected: virtual void InitTags(nsTArray& aTags); diff --git a/dom/media/platforms/agnostic/gmp/MediaDataDecoderProxy.h b/dom/media/platforms/agnostic/gmp/MediaDataDecoderProxy.h index ced341d17722..e3c6f2ff4dc3 100644 --- a/dom/media/platforms/agnostic/gmp/MediaDataDecoderProxy.h +++ b/dom/media/platforms/agnostic/gmp/MediaDataDecoderProxy.h @@ -138,6 +138,11 @@ public: nsresult Drain() override; nsresult Shutdown() override; + const char* GetDescriptionName() const override + { + return "GMP proxy data decoder"; + } + // Called by MediaDataDecoderCallbackProxy. void FlushComplete(); diff --git a/dom/media/platforms/android/AndroidDecoderModule.cpp b/dom/media/platforms/android/AndroidDecoderModule.cpp index dd043b253fca..287069853d08 100644 --- a/dom/media/platforms/android/AndroidDecoderModule.cpp +++ b/dom/media/platforms/android/AndroidDecoderModule.cpp @@ -85,6 +85,11 @@ public: } + const char* GetDescriptionName() const override + { + return "android video decoder"; + } + RefPtr Init() override { mSurfaceTexture = AndroidSurfaceTexture::Create(); @@ -189,6 +194,11 @@ public: } } + const char* GetDescriptionName() const override + { + return "android audio decoder"; + } + nsresult Output(BufferInfo::Param aInfo, void* aBuffer, MediaFormat::Param aFormat, const TimeUnit& aDuration) { diff --git a/dom/media/platforms/android/AndroidDecoderModule.h b/dom/media/platforms/android/AndroidDecoderModule.h index d3f216d1725c..06425d974662 100644 --- a/dom/media/platforms/android/AndroidDecoderModule.h +++ b/dom/media/platforms/android/AndroidDecoderModule.h @@ -57,6 +57,10 @@ public: nsresult Drain() override; nsresult Shutdown() override; nsresult Input(MediaRawData* aSample) override; + const char* GetDescriptionName() const override + { + return "android decoder"; + } protected: enum ModuleState { diff --git a/dom/media/platforms/apple/AppleATDecoder.h b/dom/media/platforms/apple/AppleATDecoder.h index 18759a555eb5..4f5a0f065cb2 100644 --- a/dom/media/platforms/apple/AppleATDecoder.h +++ b/dom/media/platforms/apple/AppleATDecoder.h @@ -31,6 +31,11 @@ public: nsresult Drain() override; nsresult Shutdown() override; + const char* GetDescriptionName() const override + { + return "apple CoreMedia decoder"; + } + // Callbacks also need access to the config. const AudioInfo& mConfig; diff --git a/dom/media/platforms/apple/AppleVDADecoder.h b/dom/media/platforms/apple/AppleVDADecoder.h index d40850f9a280..6b0b0b37af1c 100644 --- a/dom/media/platforms/apple/AppleVDADecoder.h +++ b/dom/media/platforms/apple/AppleVDADecoder.h @@ -81,6 +81,11 @@ public: return true; } + const char* GetDescriptionName() const override + { + return "apple VDA decoder"; + } + // Access from the taskqueue and the decoder's thread. // OutputFrame is thread-safe. nsresult OutputFrame(CVPixelBufferRef aImage, diff --git a/dom/media/platforms/apple/AppleVTDecoder.h b/dom/media/platforms/apple/AppleVTDecoder.h index 7e6aa58b325e..3a6536b4229b 100644 --- a/dom/media/platforms/apple/AppleVTDecoder.h +++ b/dom/media/platforms/apple/AppleVTDecoder.h @@ -27,6 +27,13 @@ public: return mIsHardwareAccelerated; } + const char* GetDescriptionName() const override + { + return mIsHardwareAccelerated + ? "apple hardware VT decoder" + : "apple software VT decoder"; + } + protected: void ProcessFlush() override; void ProcessDrain() override; @@ -43,7 +50,7 @@ private: nsresult WaitForAsynchronousFrames(); CFDictionaryRef CreateDecoderSpecification(); CFDictionaryRef CreateDecoderExtensions(); - bool mIsHardwareAccelerated; + Atomic mIsHardwareAccelerated; }; } // namespace mozilla diff --git a/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.h b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.h index 73b0d6b1c421..c6092722e3cd 100644 --- a/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.h +++ b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.h @@ -31,6 +31,10 @@ public: void ProcessDrain() override; void InitCodecContext() override; static AVCodecID GetCodecId(const nsACString& aMimeType); + const char* GetDescriptionName() const override + { + return "ffmpeg audio decoder"; + } private: void DecodePacket(MediaRawData* aSample); diff --git a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h index 304bace269ff..7f3896b210d8 100644 --- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h +++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h @@ -44,6 +44,14 @@ public: void ProcessDrain() override; void ProcessFlush() override; void InitCodecContext() override; + const char* GetDescriptionName() const override + { +#ifdef USING_MOZFFVPX + return "ffvpx video decoder"; +#else + return "ffmpeg video decoder"; +#endif + } static AVCodecID GetCodecId(const nsACString& aMimeType); private: diff --git a/dom/media/platforms/gonk/GonkAudioDecoderManager.h b/dom/media/platforms/gonk/GonkAudioDecoderManager.h index 61959f7727b2..ca22ee3dfe25 100644 --- a/dom/media/platforms/gonk/GonkAudioDecoderManager.h +++ b/dom/media/platforms/gonk/GonkAudioDecoderManager.h @@ -33,6 +33,11 @@ public: void ProcessFlush() override; + const char* GetDescriptionName() const override + { + return "gonk audio decoder"; + } + private: bool InitMediaCodecProxy(); diff --git a/dom/media/platforms/gonk/GonkMediaDataDecoder.h b/dom/media/platforms/gonk/GonkMediaDataDecoder.h index 0e4c5887c5b0..f6f642d6ade7 100644 --- a/dom/media/platforms/gonk/GonkMediaDataDecoder.h +++ b/dom/media/platforms/gonk/GonkMediaDataDecoder.h @@ -28,6 +28,7 @@ public: virtual ~GonkDecoderManager() {} virtual RefPtr Init() = 0; + virtual const char* GetDescriptionName() const = 0; // Asynchronously send sample into mDecoder. If out of input buffer, aSample // will be queued for later re-send. @@ -199,6 +200,11 @@ public: nsresult Shutdown() override; + const char* GetDescriptionName() const override + { + return "gonk decoder"; + } + private: android::sp mManager; diff --git a/dom/media/platforms/gonk/GonkVideoDecoderManager.h b/dom/media/platforms/gonk/GonkVideoDecoderManager.h index 1e3b3139b6ba..91eb6e5d416e 100644 --- a/dom/media/platforms/gonk/GonkVideoDecoderManager.h +++ b/dom/media/platforms/gonk/GonkVideoDecoderManager.h @@ -50,6 +50,11 @@ public: nsresult Shutdown() override; + const char* GetDescriptionName() const override + { + return "gonk video decoder"; + } + static void RecycleCallback(TextureClient* aClient, void* aClosure); protected: diff --git a/dom/media/platforms/omx/OmxDataDecoder.h b/dom/media/platforms/omx/OmxDataDecoder.h index 8e834f5be62d..111395deef0f 100644 --- a/dom/media/platforms/omx/OmxDataDecoder.h +++ b/dom/media/platforms/omx/OmxDataDecoder.h @@ -71,6 +71,11 @@ public: nsresult Shutdown() override; + const char* GetDescriptionName() const override + { + return "omx decoder"; + } + // Return true if event is handled. bool Event(OMX_EVENTTYPE aEvent, OMX_U32 aData1, OMX_U32 aData2); diff --git a/dom/media/platforms/wmf/MFTDecoder.cpp b/dom/media/platforms/wmf/MFTDecoder.cpp index 5558d6d7b2bf..e5fd17da4a5a 100644 --- a/dom/media/platforms/wmf/MFTDecoder.cpp +++ b/dom/media/platforms/wmf/MFTDecoder.cpp @@ -236,7 +236,9 @@ MFTDecoder::Output(RefPtr* aOutput) // Treat other errors as unexpected, and warn. NS_ENSURE_TRUE(SUCCEEDED(hr), hr); - MOZ_ASSERT(output.pSample); + if (!output.pSample) { + return S_OK; + } if (mDiscontinuity) { output.pSample->SetUINT32(MFSampleExtension_Discontinuity, TRUE); diff --git a/dom/media/platforms/wmf/WMFAudioMFTManager.cpp b/dom/media/platforms/wmf/WMFAudioMFTManager.cpp index a2b5b518ed2b..b65d3f0c4f03 100644 --- a/dom/media/platforms/wmf/WMFAudioMFTManager.cpp +++ b/dom/media/platforms/wmf/WMFAudioMFTManager.cpp @@ -10,7 +10,7 @@ #include "WMFUtils.h" #include "nsTArray.h" #include "TimeUnits.h" - +#include "mozilla/Telemetry.h" #include "mozilla/Logging.h" extern mozilla::LogModule* GetPDMLog(); @@ -226,6 +226,16 @@ WMFAudioMFTManager::Output(int64_t aStreamOffset, NS_ENSURE_TRUE(SUCCEEDED(hr), hr); + if (!sample) { + LOG("Audio MFTDecoder returned success but null output."); + nsCOMPtr task = NS_NewRunnableFunction([]() -> void { + LOG("Reporting telemetry AUDIO_MFT_OUTPUT_NULL_SAMPLES"); + Telemetry::Accumulate(Telemetry::ID::AUDIO_MFT_OUTPUT_NULL_SAMPLES, 1); + }); + AbstractThread::MainThread()->Dispatch(task.forget()); + return E_FAIL; + } + RefPtr buffer; hr = sample->ConvertToContiguousBuffer(getter_AddRefs(buffer)); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); diff --git a/dom/media/platforms/wmf/WMFAudioMFTManager.h b/dom/media/platforms/wmf/WMFAudioMFTManager.h index 69b66e2120a9..c332cbebd2e3 100644 --- a/dom/media/platforms/wmf/WMFAudioMFTManager.h +++ b/dom/media/platforms/wmf/WMFAudioMFTManager.h @@ -38,6 +38,11 @@ public: return TrackInfo::kAudioTrack; } + const char* GetDescriptionName() const override + { + return "wmf audio decoder"; + } + private: HRESULT UpdateOutputType(); diff --git a/dom/media/platforms/wmf/WMFMediaDataDecoder.h b/dom/media/platforms/wmf/WMFMediaDataDecoder.h index 12579185b6b1..ce1e23b006d1 100644 --- a/dom/media/platforms/wmf/WMFMediaDataDecoder.h +++ b/dom/media/platforms/wmf/WMFMediaDataDecoder.h @@ -54,6 +54,8 @@ public: virtual void ConfigurationChanged(const TrackInfo& aConfig) {} + virtual const char* GetDescriptionName() const = 0; + protected: // IMFTransform wrapper that performs the decoding. RefPtr mDecoder; @@ -85,6 +87,11 @@ public: nsresult ConfigurationChanged(const TrackInfo& aConfig) override; + const char* GetDescriptionName() const override + { + return mMFTManager ? mMFTManager->GetDescriptionName() : ""; + } + private: // Called on the task queue. Inserts the sample into the decoder, and diff --git a/dom/media/platforms/wmf/WMFVideoMFTManager.cpp b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp index 9fafe6abac9f..b81a846af200 100644 --- a/dom/media/platforms/wmf/WMFVideoMFTManager.cpp +++ b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp @@ -21,6 +21,7 @@ #include "IMFYCbCrImage.h" #include "mozilla/WindowsVersion.h" #include "mozilla/Preferences.h" +#include "mozilla/Telemetry.h" #include "nsPrintfCString.h" extern mozilla::LogModule* GetPDMLog(); @@ -78,6 +79,9 @@ WMFVideoMFTManager::WMFVideoMFTManager( , mImageContainer(aImageContainer) , mDXVAEnabled(aDXVAEnabled) , mLayersBackend(aLayersBackend) + , mNullOutputCount(0) + , mGotValidOutputAfterNullOutput(false) + , mGotExcessiveNullOutput(false) // mVideoStride, mVideoWidth, mVideoHeight, mUseHwAccel are initialized in // Init(). { @@ -103,6 +107,20 @@ WMFVideoMFTManager::~WMFVideoMFTManager() if (mDXVA2Manager) { DeleteOnMainThread(mDXVA2Manager); } + + // Record whether the video decoder successfully decoded, or output null + // samples but did/didn't recover. + uint32_t telemetry = (mNullOutputCount == 0) ? 0 : + (mGotValidOutputAfterNullOutput && mGotExcessiveNullOutput) ? 1 : + mGotExcessiveNullOutput ? 2 : + mGotValidOutputAfterNullOutput ? 3 : + 4; + + nsCOMPtr task = NS_NewRunnableFunction([=]() -> void { + LOG(nsPrintfCString("Reporting telemetry VIDEO_MFT_OUTPUT_NULL_SAMPLES=%d", telemetry).get()); + Telemetry::Accumulate(Telemetry::ID::VIDEO_MFT_OUTPUT_NULL_SAMPLES, telemetry); + }); + AbstractThread::MainThread()->Dispatch(task.forget()); } const GUID& @@ -575,6 +593,23 @@ WMFVideoMFTManager::Output(int64_t aStreamOffset, continue; } if (SUCCEEDED(hr)) { + if (!sample) { + LOG("Video MFTDecoder returned success but no output!"); + // On some machines/input the MFT returns success but doesn't output + // a video frame. If we detect this, try again, but only up to a + // point; after 250 failures, give up. Note we count all failures + // over the life of the decoder, as we may end up exiting with a + // NEED_MORE_INPUT and coming back to hit the same error. So just + // counting with a local variable (like typeChangeCount does) may + // not work in this situation. + ++mNullOutputCount; + if (mNullOutputCount > 250) { + LOG("Excessive Video MFTDecoder returning success but no output; giving up"); + mGotExcessiveNullOutput = true; + return E_FAIL; + } + continue; + } break; } // Else unexpected error, assert, and bail. @@ -595,6 +630,10 @@ WMFVideoMFTManager::Output(int64_t aStreamOffset, aOutData = frame; + if (mNullOutputCount) { + mGotValidOutputAfterNullOutput = true; + } + return S_OK; } diff --git a/dom/media/platforms/wmf/WMFVideoMFTManager.h b/dom/media/platforms/wmf/WMFVideoMFTManager.h index b0e2e2e26b2b..adf29f3a4c99 100644 --- a/dom/media/platforms/wmf/WMFVideoMFTManager.h +++ b/dom/media/platforms/wmf/WMFVideoMFTManager.h @@ -41,6 +41,13 @@ public: void ConfigurationChanged(const TrackInfo& aConfig) override; + const char* GetDescriptionName() const override + { + nsCString failureReason; + return IsHardwareAccelerated(failureReason) + ? "wmf hardware video decoder" : "wmf software video decoder"; + } + private: bool InitializeDXVA(bool aForceD3D9); @@ -88,6 +95,10 @@ private: const GUID& GetMFTGUID(); const GUID& GetMediaSubtypeGUID(); + + uint32_t mNullOutputCount; + bool mGotValidOutputAfterNullOutput; + bool mGotExcessiveNullOutput; }; } // namespace mozilla diff --git a/dom/media/platforms/wrappers/FuzzingWrapper.h b/dom/media/platforms/wrappers/FuzzingWrapper.h index 838aae9e7d07..f6b34624bf52 100644 --- a/dom/media/platforms/wrappers/FuzzingWrapper.h +++ b/dom/media/platforms/wrappers/FuzzingWrapper.h @@ -111,6 +111,10 @@ private: nsresult Shutdown() override; bool IsHardwareAccelerated(nsACString& aFailureReason) const override; nsresult ConfigurationChanged(const TrackInfo& aConfig) override; + const char* GetDescriptionName() const override + { + return mDecoder->GetDescriptionName(); + } RefPtr mDecoder; RefPtr mCallbackWrapper; diff --git a/dom/media/platforms/wrappers/H264Converter.h b/dom/media/platforms/wrappers/H264Converter.h index e57f3650a9a3..56d2f6da01b1 100644 --- a/dom/media/platforms/wrappers/H264Converter.h +++ b/dom/media/platforms/wrappers/H264Converter.h @@ -35,6 +35,13 @@ public: nsresult Drain() override; nsresult Shutdown() override; bool IsHardwareAccelerated(nsACString& aFailureReason) const override; + const char* GetDescriptionName() const override + { + if (mDecoder) { + return mDecoder->GetDescriptionName(); + } + return "H264Converter decoder (pending)"; + } // Return true if mimetype is H.264. static bool IsH264(const TrackInfo& aConfig); diff --git a/dom/media/webspeech/synth/nsSynthVoiceRegistry.cpp b/dom/media/webspeech/synth/nsSynthVoiceRegistry.cpp index f32eac9b8ecc..9d49145f82f7 100644 --- a/dom/media/webspeech/synth/nsSynthVoiceRegistry.cpp +++ b/dom/media/webspeech/synth/nsSynthVoiceRegistry.cpp @@ -789,7 +789,13 @@ nsSynthVoiceRegistry::SpeakImpl(VoiceData* aVoice, aTask->InitDirectAudio(); } - aVoice->mService->Speak(aText, aVoice->mUri, aVolume, aRate, aPitch, aTask); + if (NS_FAILED(aVoice->mService->Speak(aText, aVoice->mUri, aVolume, aRate, + aPitch, aTask))) { + if (serviceType == nsISpeechService::SERVICETYPE_INDIRECT_AUDIO) { + aTask->DispatchError(0, 0); + } + // XXX When using direct audio, no way to dispatch error + } } } // namespace dom diff --git a/dom/media/webspeech/synth/test/nsFakeSynthServices.cpp b/dom/media/webspeech/synth/test/nsFakeSynthServices.cpp index b72f2b09dc6d..3a4969838ce6 100644 --- a/dom/media/webspeech/synth/test/nsFakeSynthServices.cpp +++ b/dom/media/webspeech/synth/test/nsFakeSynthServices.cpp @@ -268,8 +268,7 @@ FakeIndirectAudioSynth::Speak(const nsAString& aText, const nsAString& aUri, } if (flags & eFailAtStart) { - aTask->DispatchError(0, 0); - return NS_OK; + return NS_ERROR_FAILURE; } RefPtr cb = new FakeSynthCallback( diff --git a/dom/tests/mochitest/general/mochitest.ini b/dom/tests/mochitest/general/mochitest.ini index 82a7f5770d62..c10ae3f2a7f7 100644 --- a/dom/tests/mochitest/general/mochitest.ini +++ b/dom/tests/mochitest/general/mochitest.ini @@ -72,6 +72,7 @@ skip-if = e10s || buildapp == 'mulet' || buildapp == 'b2g' || toolkit == 'androi [test_for_of.html] [test_frameElementWrapping.html] [test_pointerPreserves3D.html] +[test_pointerPreserves3DClip.html] [test_framedhistoryframes.html] [test_idleapi_permissions.html] skip-if = buildapp == 'b2g' || buildapp == 'mulet' diff --git a/dom/tests/mochitest/general/test_pointerPreserves3DClip.html b/dom/tests/mochitest/general/test_pointerPreserves3DClip.html new file mode 100644 index 000000000000..29c8f3e7e6c3 --- /dev/null +++ b/dom/tests/mochitest/general/test_pointerPreserves3DClip.html @@ -0,0 +1,55 @@ + + + + Test for pointer events with preserve-3d and clips + + + + + +
+
+
+
+
+
+
+
+ + + diff --git a/dom/webidl/BluetoothAdapter.webidl b/dom/webidl/BluetoothAdapter.webidl index ce32df716d75..6b9555a3e600 100644 --- a/dom/webidl/BluetoothAdapter.webidl +++ b/dom/webidl/BluetoothAdapter.webidl @@ -128,10 +128,17 @@ interface BluetoothAdapter : EventTarget { sequence getPairedDevices(); - [NewObject] + /** + * [B2G only GATT client API] + * |startLeScan| and |stopLeScan| methods are exposed only if + * "dom.bluetooth.webbluetooth.enabled" preference is false. + */ + [NewObject, + Func="mozilla::dom::bluetooth::BluetoothManager::B2GGattClientEnabled"] Promise startLeScan(sequence serviceUuids); - [NewObject] + [NewObject, + Func="mozilla::dom::bluetooth::BluetoothManager::B2GGattClientEnabled"] Promise stopLeScan(BluetoothDiscoveryHandle discoveryHandle); [NewObject, Throws, AvailableIn=CertifiedApps] diff --git a/dom/webidl/BluetoothDevice.webidl b/dom/webidl/BluetoothDevice.webidl index cdc8b4e05f81..06646784fe65 100644 --- a/dom/webidl/BluetoothDevice.webidl +++ b/dom/webidl/BluetoothDevice.webidl @@ -16,7 +16,12 @@ interface BluetoothDevice : EventTarget /** * Retrieve the BluetoothGatt interface to interact with remote BLE devices. * This attribute is null if the device type is not dual or le. + * + * [B2G only GATT client API] + * gatt attribute is exposed only if "dom.bluetooth.webbluetooth.enabled" + * preference is false. */ + [Func="mozilla::dom::bluetooth::BluetoothManager::B2GGattClientEnabled"] readonly attribute BluetoothGatt? gatt; [Cached, Pure] diff --git a/dom/webidl/BluetoothGatt.webidl b/dom/webidl/BluetoothGatt.webidl index e9c48b7b4fa0..ddc9260d9097 100644 --- a/dom/webidl/BluetoothGatt.webidl +++ b/dom/webidl/BluetoothGatt.webidl @@ -4,7 +4,13 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this file, * You can obtain one at http://mozilla.org/MPL/2.0/. */ -[CheckAnyPermissions="bluetooth"] +/** + * [B2G only GATT client API] + * BluetoothGatt interface is exposed only if + * "dom.bluetooth.webbluetooth.enabled" preference is false. + */ +[CheckAnyPermissions="bluetooth", + Func="mozilla::dom::bluetooth::BluetoothManager::B2GGattClientEnabled"] interface BluetoothGatt : EventTarget { [Cached, Pure] diff --git a/dom/webidl/BluetoothGattCharacteristicEvent.webidl b/dom/webidl/BluetoothGattCharacteristicEvent.webidl index ce6b87ce0921..1b117bc447bd 100644 --- a/dom/webidl/BluetoothGattCharacteristicEvent.webidl +++ b/dom/webidl/BluetoothGattCharacteristicEvent.webidl @@ -4,7 +4,13 @@ * You can obtain one at http://mozilla.org/MPL/2.0/. */ +/** + * [B2G only GATT client API] + * BluetoothGattCharacteristicEvent interface is exposed only if + * "dom.bluetooth.webbluetooth.enabled" preference is false. + */ [CheckAnyPermissions="bluetooth", + Func="mozilla::dom::bluetooth::BluetoothManager::B2GGattClientEnabled", Constructor(DOMString type, optional BluetoothGattCharacteristicEventInit eventInitDict)] interface BluetoothGattCharacteristicEvent : Event diff --git a/dom/webidl/BluetoothLeDeviceEvent.webidl b/dom/webidl/BluetoothLeDeviceEvent.webidl index fb70dd259159..a6e272bf6a01 100644 --- a/dom/webidl/BluetoothLeDeviceEvent.webidl +++ b/dom/webidl/BluetoothLeDeviceEvent.webidl @@ -4,7 +4,13 @@ * You can obtain one at http://mozilla.org/MPL/2.0/. */ +/** + * [B2G only GATT client API] + * BluetoothLeDeviceEvent interface is exposed only if + * "dom.bluetooth.webbluetooth.enabled" preference is false. + */ [CheckAnyPermissions="bluetooth", + Func="mozilla::dom::bluetooth::BluetoothManager::B2GGattClientEnabled", Constructor(DOMString type, optional BluetoothLeDeviceEventInit eventInitDict)] interface BluetoothLeDeviceEvent : Event { diff --git a/gfx/layers/apz/src/AsyncPanZoomController.cpp b/gfx/layers/apz/src/AsyncPanZoomController.cpp index 1ce82d9eee7f..c616301e6083 100644 --- a/gfx/layers/apz/src/AsyncPanZoomController.cpp +++ b/gfx/layers/apz/src/AsyncPanZoomController.cpp @@ -1339,7 +1339,13 @@ nsEventStatus AsyncPanZoomController::OnTouchEnd(const MultiTouchInput& aEvent) if (CurrentTouchBlock()->GetActiveTouchCount() == 0) { // It's possible we may be overscrolled if the user tapped during a // previous overscroll pan. Make sure to snap back in this situation. - if (!SnapBackIfOverscrolled()) { + // An ancestor APZC could be overscrolled instead of this APZC, so + // walk the handoff chain as well. + CurrentTouchBlock()->GetOverscrollHandoffChain()->SnapBackOverscrolledApzc(this); + // SnapBackOverscrolledApzc() will put any APZC it causes to snap back + // into the OVERSCROLL_ANIMATION state. If that's not us, since we're + // done TOUCHING enter the NOTHING state. + if (mState != OVERSCROLL_ANIMATION) { SetState(NOTHING); } } @@ -3558,6 +3564,11 @@ AsyncPanZoomController::ResetTouchInputState() listener->HandleInputEvent(cancel); } CancelAnimationAndGestureState(); + // Clear overscroll along the entire handoff chain, in case an APZC + // later in the chain is overscrolled. + if (TouchBlockState* block = CurrentTouchBlock()) { + block->GetOverscrollHandoffChain()->ClearOverscroll(); + } } void diff --git a/gfx/layers/apz/test/gtest/InputUtils.h b/gfx/layers/apz/test/gtest/InputUtils.h index 4770a4acc3df..1c32d134c6c2 100644 --- a/gfx/layers/apz/test/gtest/InputUtils.h +++ b/gfx/layers/apz/test/gtest/InputUtils.h @@ -32,21 +32,28 @@ // (which expects an untransformed point). We handle both cases by setting both // the transformed and untransformed fields to the same value. SingleTouchData -CreateSingleTouchData(int32_t aIdentifier, int aX, int aY) +CreateSingleTouchData(int32_t aIdentifier, const ScreenIntPoint& aPoint) { - SingleTouchData touch(aIdentifier, ScreenIntPoint(aX, aY), ScreenSize(0, 0), 0, 0); - touch.mLocalScreenPoint = ParentLayerPoint(aX, aY); + SingleTouchData touch(aIdentifier, aPoint, ScreenSize(0, 0), 0, 0); + touch.mLocalScreenPoint = ParentLayerPoint(aPoint.x, aPoint.y); return touch; } +// Convenience wrapper for CreateSingleTouchData() that takes loose coordinates. +SingleTouchData +CreateSingleTouchData(int32_t aIdentifier, ScreenIntCoord aX, ScreenIntCoord aY) +{ + return CreateSingleTouchData(aIdentifier, ScreenIntPoint(aX, aY)); +} + PinchGestureInput CreatePinchGestureInput(PinchGestureInput::PinchGestureType aType, - int aFocusX, int aFocusY, + const ScreenIntPoint& aFocus, float aCurrentSpan, float aPreviousSpan) { - PinchGestureInput result(aType, 0, TimeStamp(), ScreenPoint(aFocusX, aFocusY), + PinchGestureInput result(aType, 0, TimeStamp(), aFocus, aCurrentSpan, aPreviousSpan, 0); - result.mLocalFocusPoint = ParentLayerPoint(aFocusX, aFocusY); + result.mLocalFocusPoint = ParentLayerPoint(aFocus.x, aFocus.y); return result; } @@ -76,34 +83,38 @@ CreateMultiTouchInput(MultiTouchInput::MultiTouchType aType, TimeStamp aTime) template nsEventStatus -TouchDown(const RefPtr& aTarget, int aX, int aY, TimeStamp aTime, uint64_t* aOutInputBlockId = nullptr) +TouchDown(const RefPtr& aTarget, const ScreenIntPoint& aPoint, + TimeStamp aTime, uint64_t* aOutInputBlockId = nullptr) { MultiTouchInput mti = CreateMultiTouchInput(MultiTouchInput::MULTITOUCH_START, aTime); - mti.mTouches.AppendElement(CreateSingleTouchData(0, aX, aY)); + mti.mTouches.AppendElement(CreateSingleTouchData(0, aPoint)); return aTarget->ReceiveInputEvent(mti, nullptr, aOutInputBlockId); } template nsEventStatus -TouchMove(const RefPtr& aTarget, int aX, int aY, TimeStamp aTime) +TouchMove(const RefPtr& aTarget, const ScreenIntPoint& aPoint, + TimeStamp aTime) { MultiTouchInput mti = CreateMultiTouchInput(MultiTouchInput::MULTITOUCH_MOVE, aTime); - mti.mTouches.AppendElement(CreateSingleTouchData(0, aX, aY)); + mti.mTouches.AppendElement(CreateSingleTouchData(0, aPoint)); return aTarget->ReceiveInputEvent(mti, nullptr, nullptr); } template nsEventStatus -TouchUp(const RefPtr& aTarget, int aX, int aY, TimeStamp aTime) +TouchUp(const RefPtr& aTarget, const ScreenIntPoint& aPoint, + TimeStamp aTime) { MultiTouchInput mti = CreateMultiTouchInput(MultiTouchInput::MULTITOUCH_END, aTime); - mti.mTouches.AppendElement(CreateSingleTouchData(0, aX, aY)); + mti.mTouches.AppendElement(CreateSingleTouchData(0, aPoint)); return aTarget->ReceiveInputEvent(mti, nullptr, nullptr); } template void -Tap(const RefPtr& aTarget, int aX, int aY, MockContentControllerDelayed* aMcc, +Tap(const RefPtr& aTarget, const ScreenIntPoint& aPoint, + MockContentControllerDelayed* aMcc, TimeDuration aTapLength, nsEventStatus (*aOutEventStatuses)[2] = nullptr, uint64_t* aOutInputBlockId = nullptr) @@ -115,7 +126,7 @@ Tap(const RefPtr& aTarget, int aX, int aY, MockContentControllerD aOutInputBlockId = &blockId; } - nsEventStatus status = TouchDown(aTarget, aX, aY, aMcc->Time(), aOutInputBlockId); + nsEventStatus status = TouchDown(aTarget, aPoint, aMcc->Time(), aOutInputBlockId); if (aOutEventStatuses) { (*aOutEventStatuses)[0] = status; } @@ -127,7 +138,7 @@ Tap(const RefPtr& aTarget, int aX, int aY, MockContentControllerD SetDefaultAllowedTouchBehavior(aTarget, *aOutInputBlockId); } - status = TouchUp(aTarget, aX, aY, aMcc->Time()); + status = TouchUp(aTarget, aPoint, aMcc->Time()); if (aOutEventStatuses) { (*aOutEventStatuses)[1] = status; } @@ -135,11 +146,12 @@ Tap(const RefPtr& aTarget, int aX, int aY, MockContentControllerD template void -TapAndCheckStatus(const RefPtr& aTarget, int aX, int aY, - MockContentControllerDelayed* aMcc, TimeDuration aTapLength) +TapAndCheckStatus(const RefPtr& aTarget, + const ScreenIntPoint& aPoint, MockContentControllerDelayed* aMcc, + TimeDuration aTapLength) { nsEventStatus statuses[2]; - Tap(aTarget, aX, aY, aMcc, aTapLength, &statuses); + Tap(aTarget, aPoint, aMcc, aTapLength, &statuses); EXPECT_EQ(nsEventStatus_eConsumeDoDefault, statuses[0]); EXPECT_EQ(nsEventStatus_eConsumeDoDefault, statuses[1]); } @@ -148,8 +160,8 @@ template void Pan(const RefPtr& aTarget, MockContentControllerDelayed* aMcc, - const ScreenPoint& aTouchStart, - const ScreenPoint& aTouchEnd, + const ScreenIntPoint& aTouchStart, + const ScreenIntPoint& aTouchEnd, bool aKeepFingerDown = false, nsTArray* aAllowedTouchBehaviors = nullptr, nsEventStatus (*aOutEventStatuses)[4] = nullptr, @@ -173,7 +185,9 @@ Pan(const RefPtr& aTarget, } // Make sure the move is large enough to not be handled as a tap - nsEventStatus status = TouchDown(aTarget, aTouchStart.x, aTouchStart.y + OVERCOME_TOUCH_TOLERANCE, aMcc->Time(), aOutInputBlockId); + nsEventStatus status = TouchDown(aTarget, + ScreenIntPoint(aTouchStart.x, aTouchStart.y + OVERCOME_TOUCH_TOLERANCE), + aMcc->Time(), aOutInputBlockId); if (aOutEventStatuses) { (*aOutEventStatuses)[0] = status; } @@ -190,14 +204,14 @@ Pan(const RefPtr& aTarget, } } - status = TouchMove(aTarget, aTouchStart.x, aTouchStart.y, aMcc->Time()); + status = TouchMove(aTarget, aTouchStart, aMcc->Time()); if (aOutEventStatuses) { (*aOutEventStatuses)[1] = status; } aMcc->AdvanceBy(TIME_BETWEEN_TOUCH_EVENT); - status = TouchMove(aTarget, aTouchEnd.x, aTouchEnd.y, aMcc->Time()); + status = TouchMove(aTarget, aTouchEnd, aMcc->Time()); if (aOutEventStatuses) { (*aOutEventStatuses)[2] = status; } @@ -205,7 +219,7 @@ Pan(const RefPtr& aTarget, aMcc->AdvanceBy(TIME_BETWEEN_TOUCH_EVENT); if (!aKeepFingerDown) { - status = TouchUp(aTarget, aTouchEnd.x, aTouchEnd.y, aMcc->Time()); + status = TouchUp(aTarget, aTouchEnd, aMcc->Time()); } else { status = nsEventStatus_eIgnore; } @@ -232,7 +246,7 @@ Pan(const RefPtr& aTarget, nsEventStatus (*aOutEventStatuses)[4] = nullptr, uint64_t* aOutInputBlockId = nullptr) { - ::Pan(aTarget, aMcc, ScreenPoint(10, aTouchStartY), ScreenPoint(10, aTouchEndY), + ::Pan(aTarget, aMcc, ScreenIntPoint(10, aTouchStartY), ScreenIntPoint(10, aTouchEndY), aKeepFingerDown, aAllowedTouchBehaviors, aOutEventStatuses, aOutInputBlockId); } @@ -279,19 +293,20 @@ ApzcPanNoFling(const RefPtr& aApzc, template void PinchWithPinchInput(const RefPtr& aTarget, - int aFocusX, int aFocusY, int aSecondFocusX, int aSecondFocusY, float aScale, + const ScreenIntPoint& aFocus, + const ScreenIntPoint& aSecondFocus, float aScale, nsEventStatus (*aOutEventStatuses)[3] = nullptr) { nsEventStatus actualStatus = aTarget->ReceiveInputEvent( CreatePinchGestureInput(PinchGestureInput::PINCHGESTURE_START, - aFocusX, aFocusY, 10.0, 10.0), + aFocus, 10.0, 10.0), nullptr); if (aOutEventStatuses) { (*aOutEventStatuses)[0] = actualStatus; } actualStatus = aTarget->ReceiveInputEvent( CreatePinchGestureInput(PinchGestureInput::PINCHGESTURE_SCALE, - aSecondFocusX, aSecondFocusY, 10.0 * aScale, 10.0), + aSecondFocus, 10.0 * aScale, 10.0), nullptr); if (aOutEventStatuses) { (*aOutEventStatuses)[1] = actualStatus; @@ -300,7 +315,7 @@ PinchWithPinchInput(const RefPtr& aTarget, CreatePinchGestureInput(PinchGestureInput::PINCHGESTURE_END, // note: negative values here tell APZC // not to turn the pinch into a pan - aFocusX, aFocusY, -1.0, -1.0), + aFocus, -1.0, -1.0), nullptr); if (aOutEventStatuses) { (*aOutEventStatuses)[2] = actualStatus; @@ -310,11 +325,11 @@ PinchWithPinchInput(const RefPtr& aTarget, template void PinchWithPinchInputAndCheckStatus(const RefPtr& aTarget, - int aFocusX, int aFocusY, float aScale, + const ScreenIntPoint& aFocus, float aScale, bool aShouldTriggerPinch) { nsEventStatus statuses[3]; // scalebegin, scale, scaleend - PinchWithPinchInput(aTarget, aFocusX, aFocusY, aFocusX, aFocusY, aScale, &statuses); + PinchWithPinchInput(aTarget, aFocus, aFocus, aScale, &statuses); nsEventStatus expectedStatus = aShouldTriggerPinch ? nsEventStatus_eConsumeNoDefault @@ -326,7 +341,7 @@ PinchWithPinchInputAndCheckStatus(const RefPtr& aTarget, template void PinchWithTouchInput(const RefPtr& aTarget, - int aFocusX, int aFocusY, float aScale, + const ScreenIntPoint& aFocus, float aScale, int& inputId, nsTArray* aAllowedTouchBehaviors = nullptr, nsEventStatus (*aOutEventStatuses)[4] = nullptr, @@ -345,8 +360,8 @@ PinchWithTouchInput(const RefPtr& aTarget, } MultiTouchInput mtiStart = MultiTouchInput(MultiTouchInput::MULTITOUCH_START, 0, TimeStamp(), 0); - mtiStart.mTouches.AppendElement(CreateSingleTouchData(inputId, aFocusX, aFocusY)); - mtiStart.mTouches.AppendElement(CreateSingleTouchData(inputId + 1, aFocusX, aFocusY)); + mtiStart.mTouches.AppendElement(CreateSingleTouchData(inputId, aFocus)); + mtiStart.mTouches.AppendElement(CreateSingleTouchData(inputId + 1, aFocus)); nsEventStatus status = aTarget->ReceiveInputEvent(mtiStart, aOutInputBlockId); if (aOutEventStatuses) { (*aOutEventStatuses)[0] = status; @@ -360,24 +375,24 @@ PinchWithTouchInput(const RefPtr& aTarget, } MultiTouchInput mtiMove1 = MultiTouchInput(MultiTouchInput::MULTITOUCH_MOVE, 0, TimeStamp(), 0); - mtiMove1.mTouches.AppendElement(CreateSingleTouchData(inputId, aFocusX - pinchLength, aFocusY)); - mtiMove1.mTouches.AppendElement(CreateSingleTouchData(inputId + 1, aFocusX + pinchLength, aFocusY)); + mtiMove1.mTouches.AppendElement(CreateSingleTouchData(inputId, aFocus.x - pinchLength, aFocus.y)); + mtiMove1.mTouches.AppendElement(CreateSingleTouchData(inputId + 1, aFocus.x + pinchLength, aFocus.y)); status = aTarget->ReceiveInputEvent(mtiMove1, nullptr); if (aOutEventStatuses) { (*aOutEventStatuses)[1] = status; } MultiTouchInput mtiMove2 = MultiTouchInput(MultiTouchInput::MULTITOUCH_MOVE, 0, TimeStamp(), 0); - mtiMove2.mTouches.AppendElement(CreateSingleTouchData(inputId, aFocusX - pinchLengthScaled, aFocusY)); - mtiMove2.mTouches.AppendElement(CreateSingleTouchData(inputId + 1, aFocusX + pinchLengthScaled, aFocusY)); + mtiMove2.mTouches.AppendElement(CreateSingleTouchData(inputId, aFocus.x - pinchLengthScaled, aFocus.y)); + mtiMove2.mTouches.AppendElement(CreateSingleTouchData(inputId + 1, aFocus.x + pinchLengthScaled, aFocus.y)); status = aTarget->ReceiveInputEvent(mtiMove2, nullptr); if (aOutEventStatuses) { (*aOutEventStatuses)[2] = status; } MultiTouchInput mtiEnd = MultiTouchInput(MultiTouchInput::MULTITOUCH_END, 0, TimeStamp(), 0); - mtiEnd.mTouches.AppendElement(CreateSingleTouchData(inputId, aFocusX - pinchLengthScaled, aFocusY)); - mtiEnd.mTouches.AppendElement(CreateSingleTouchData(inputId + 1, aFocusX + pinchLengthScaled, aFocusY)); + mtiEnd.mTouches.AppendElement(CreateSingleTouchData(inputId, aFocus.x - pinchLengthScaled, aFocus.y)); + mtiEnd.mTouches.AppendElement(CreateSingleTouchData(inputId + 1, aFocus.x + pinchLengthScaled, aFocus.y)); status = aTarget->ReceiveInputEvent(mtiEnd, nullptr); if (aOutEventStatuses) { (*aOutEventStatuses)[3] = status; @@ -389,12 +404,12 @@ PinchWithTouchInput(const RefPtr& aTarget, template void PinchWithTouchInputAndCheckStatus(const RefPtr& aTarget, - int aFocusX, int aFocusY, float aScale, + const ScreenIntPoint& aFocus, float aScale, int& inputId, bool aShouldTriggerPinch, nsTArray* aAllowedTouchBehaviors) { nsEventStatus statuses[4]; // down, move, move, up - PinchWithTouchInput(aTarget, aFocusX, aFocusY, aScale, inputId, aAllowedTouchBehaviors, &statuses); + PinchWithTouchInput(aTarget, aFocus, aScale, inputId, aAllowedTouchBehaviors, &statuses); nsEventStatus expectedMoveStatus = aShouldTriggerPinch ? nsEventStatus_eConsumeDoDefault @@ -406,12 +421,13 @@ PinchWithTouchInputAndCheckStatus(const RefPtr& aTarget, template void -DoubleTap(const RefPtr& aTarget, int aX, int aY, MockContentControllerDelayed* aMcc, +DoubleTap(const RefPtr& aTarget, const ScreenIntPoint& aPoint, + MockContentControllerDelayed* aMcc, nsEventStatus (*aOutEventStatuses)[4] = nullptr, uint64_t (*aOutInputBlockIds)[2] = nullptr) { uint64_t blockId; - nsEventStatus status = TouchDown(aTarget, aX, aY, aMcc->Time(), &blockId); + nsEventStatus status = TouchDown(aTarget, aPoint, aMcc->Time(), &blockId); if (aOutEventStatuses) { (*aOutEventStatuses)[0] = status; } @@ -426,12 +442,12 @@ DoubleTap(const RefPtr& aTarget, int aX, int aY, MockContentContr SetDefaultAllowedTouchBehavior(aTarget, blockId); } - status = TouchUp(aTarget, aX, aY, aMcc->Time()); + status = TouchUp(aTarget, aPoint, aMcc->Time()); if (aOutEventStatuses) { (*aOutEventStatuses)[1] = status; } aMcc->AdvanceByMillis(10); - status = TouchDown(aTarget, aX, aY, aMcc->Time(), &blockId); + status = TouchDown(aTarget, aPoint, aMcc->Time(), &blockId); if (aOutEventStatuses) { (*aOutEventStatuses)[2] = status; } @@ -444,7 +460,7 @@ DoubleTap(const RefPtr& aTarget, int aX, int aY, MockContentContr SetDefaultAllowedTouchBehavior(aTarget, blockId); } - status = TouchUp(aTarget, aX, aY, aMcc->Time()); + status = TouchUp(aTarget, aPoint, aMcc->Time()); if (aOutEventStatuses) { (*aOutEventStatuses)[3] = status; } @@ -452,11 +468,12 @@ DoubleTap(const RefPtr& aTarget, int aX, int aY, MockContentContr template void -DoubleTapAndCheckStatus(const RefPtr& aTarget, int aX, int aY, - MockContentControllerDelayed* aMcc, uint64_t (*aOutInputBlockIds)[2] = nullptr) +DoubleTapAndCheckStatus(const RefPtr& aTarget, + const ScreenIntPoint& aPoint, MockContentControllerDelayed* aMcc, + uint64_t (*aOutInputBlockIds)[2] = nullptr) { nsEventStatus statuses[4]; - DoubleTap(aTarget, aX, aY, aMcc, &statuses, aOutInputBlockIds); + DoubleTap(aTarget, aPoint, aMcc, &statuses, aOutInputBlockIds); EXPECT_EQ(nsEventStatus_eConsumeDoDefault, statuses[0]); EXPECT_EQ(nsEventStatus_eConsumeDoDefault, statuses[1]); EXPECT_EQ(nsEventStatus_eConsumeDoDefault, statuses[2]); diff --git a/gfx/layers/apz/test/gtest/TestBasic.cpp b/gfx/layers/apz/test/gtest/TestBasic.cpp index 648c8eeaafcc..1bc9fc9fc1c2 100644 --- a/gfx/layers/apz/test/gtest/TestBasic.cpp +++ b/gfx/layers/apz/test/gtest/TestBasic.cpp @@ -22,7 +22,7 @@ TEST_F(APZCBasicTester, Overzoom) { EXPECT_CALL(*mcc, RequestContentRepaint(_)).Times(1); - PinchWithPinchInputAndCheckStatus(apzc, 50, 50, 0.5, true); + PinchWithPinchInputAndCheckStatus(apzc, ScreenIntPoint(50, 50), 0.5, true); fm = apzc->GetFrameMetrics(); EXPECT_EQ(0.8f, fm.GetZoom().ToScaleFactor().scale); @@ -295,8 +295,8 @@ TEST_F(APZCBasicTester, OverScroll_Bug1152051b) { // to schedule a new one since we're still overscrolled. We don't pan because // panning can trigger functions that clear the overscroll animation state // in other ways. - TouchDown(apzc, 10, 10, mcc->Time(), nullptr); - TouchUp(apzc, 10, 10, mcc->Time()); + TouchDown(apzc, ScreenIntPoint(10, 10), mcc->Time(), nullptr); + TouchUp(apzc, ScreenIntPoint(10, 10), mcc->Time()); // Sample the second overscroll animation to its end. // If the ending of the first overscroll animation fails to clear state diff --git a/gfx/layers/apz/test/gtest/TestEventRegions.cpp b/gfx/layers/apz/test/gtest/TestEventRegions.cpp index 3cf8d7741b22..bb20749be499 100644 --- a/gfx/layers/apz/test/gtest/TestEventRegions.cpp +++ b/gfx/layers/apz/test/gtest/TestEventRegions.cpp @@ -186,18 +186,18 @@ TEST_F(APZEventRegionsTester, HitRegionImmediateResponse) { // Tap in the exposed hit regions of each of the layers once and ensure // the clicks are dispatched right away - Tap(manager, 10, 10, mcc, tapDuration); + Tap(manager, ScreenIntPoint(10, 10), mcc, tapDuration); mcc->RunThroughDelayedTasks(); // this runs the tap event check.Call("Tapped on left"); - Tap(manager, 110, 110, mcc, tapDuration); + Tap(manager, ScreenIntPoint(110, 110), mcc, tapDuration); mcc->RunThroughDelayedTasks(); // this runs the tap event check.Call("Tapped on bottom"); - Tap(manager, 110, 10, mcc, tapDuration); + Tap(manager, ScreenIntPoint(110, 10), mcc, tapDuration); mcc->RunThroughDelayedTasks(); // this runs the tap event check.Call("Tapped on root"); // Now tap on the dispatch-to-content region where the layers overlap - Tap(manager, 10, 110, mcc, tapDuration); + Tap(manager, ScreenIntPoint(10, 110), mcc, tapDuration); mcc->RunThroughDelayedTasks(); // this runs the main-thread timeout check.Call("Tap pending on d-t-c region"); mcc->RunThroughDelayedTasks(); // this runs the tap event @@ -205,7 +205,7 @@ TEST_F(APZEventRegionsTester, HitRegionImmediateResponse) { // Now let's do that again, but simulate a main-thread response uint64_t inputBlockId = 0; - Tap(manager, 10, 110, mcc, tapDuration, nullptr, &inputBlockId); + Tap(manager, ScreenIntPoint(10, 110), mcc, tapDuration, nullptr, &inputBlockId); nsTArray targets; targets.AppendElement(left->GetGuid()); manager->SetTargetAPZC(inputBlockId, targets); @@ -221,7 +221,7 @@ TEST_F(APZEventRegionsTester, HitRegionAccumulatesChildren) { // content controller, which indicates the input events got routed correctly // to the APZC. EXPECT_CALL(*mcc, HandleSingleTap(_, _, rootApzc->GetGuid())).Times(1); - Tap(manager, 10, 160, mcc, TimeDuration::FromMilliseconds(100)); + Tap(manager, ScreenIntPoint(10, 160), mcc, TimeDuration::FromMilliseconds(100)); } TEST_F(APZEventRegionsTester, Obscuration) { @@ -260,7 +260,7 @@ TEST_F(APZEventRegionsTester, Bug1117712) { // These touch events should hit the dispatch-to-content region of layers[3] // and so get queued with that APZC as the tentative target. uint64_t inputBlockId = 0; - Tap(manager, 55, 5, mcc, TimeDuration::FromMilliseconds(100), nullptr, &inputBlockId); + Tap(manager, ScreenIntPoint(55, 5), mcc, TimeDuration::FromMilliseconds(100), nullptr, &inputBlockId); // But now we tell the APZ that really it hit layers[2], and expect the tap // to be delivered at the correct coordinates. EXPECT_CALL(*mcc, HandleSingleTap(CSSPoint(55, 5), 0, apzc2->GetGuid())).Times(1); diff --git a/gfx/layers/apz/test/gtest/TestGestureDetector.cpp b/gfx/layers/apz/test/gtest/TestGestureDetector.cpp index 26814efdc21b..d47fd9ea1094 100644 --- a/gfx/layers/apz/test/gtest/TestGestureDetector.cpp +++ b/gfx/layers/apz/test/gtest/TestGestureDetector.cpp @@ -210,12 +210,12 @@ protected: // Deliver a tap to abort the fling. Ensure that we get a HandleSingleTap // call out of it if and only if the fling is slow. EXPECT_CALL(*mcc, HandleSingleTap(_, 0, apzc->GetGuid())).Times(tapCallsExpected); - Tap(apzc, 10, 10, mcc, 0); + Tap(apzc, ScreenIntPoint(10, 10), mcc, 0); while (mcc->RunThroughDelayedTasks()); // Deliver another tap, to make sure that taps are flowing properly once // the fling is aborted. - Tap(apzc, 100, 100, mcc, 0); + Tap(apzc, ScreenIntPoint(100, 100), mcc, 0); while (mcc->RunThroughDelayedTasks()); // Verify that we didn't advance any further after the fling was aborted, in either case. @@ -247,7 +247,7 @@ protected: EXPECT_GT(finalPoint.y, point.y); // Now we put our finger down to stop the fling - TouchDown(apzc, 10, 10, mcc->Time(), &blockId); + TouchDown(apzc, ScreenIntPoint(10, 10), mcc->Time(), &blockId); // Re-sample to make sure it hasn't moved apzc->SampleContentTransformForFrame(&viewTransform, point, TimeDuration::FromMilliseconds(10)); @@ -264,7 +264,7 @@ protected: EXPECT_EQ(finalPoint.y, point.y); // clean up - TouchUp(apzc, 10, 10, mcc->Time()); + TouchUp(apzc, ScreenIntPoint(10, 10), mcc->Time()); apzc->AssertStateIsReset(); } @@ -300,7 +300,7 @@ TEST_F(APZCGestureDetectorTester, ShortPress) { } check.Call("pre-tap"); - TapAndCheckStatus(apzc, 10, 10, mcc, TimeDuration::FromMilliseconds(100)); + TapAndCheckStatus(apzc, ScreenIntPoint(10, 10), mcc, TimeDuration::FromMilliseconds(100)); check.Call("post-tap"); apzc->AssertStateIsReset(); @@ -320,7 +320,7 @@ TEST_F(APZCGestureDetectorTester, MediumPress) { } check.Call("pre-tap"); - TapAndCheckStatus(apzc, 10, 10, mcc, TimeDuration::FromMilliseconds(400)); + TapAndCheckStatus(apzc, ScreenIntPoint(10, 10), mcc, TimeDuration::FromMilliseconds(400)); check.Call("post-tap"); apzc->AssertStateIsReset(); @@ -333,7 +333,7 @@ protected: uint64_t blockId = 0; - nsEventStatus status = TouchDown(apzc, 10, 10, mcc->Time(), &blockId); + nsEventStatus status = TouchDown(apzc, ScreenIntPoint(10, 10), mcc->Time(), &blockId); EXPECT_EQ(nsEventStatus_eConsumeDoDefault, status); if (gfxPrefs::TouchActionEnabled() && status != nsEventStatus_eConsumeNoDefault) { @@ -376,7 +376,7 @@ protected: // Finally, simulate lifting the finger. Since the long-press wasn't // prevent-defaulted, we should get a long-tap-up event. check.Call("preHandleSingleTap"); - status = TouchUp(apzc, 10, 10, mcc->Time()); + status = TouchUp(apzc, ScreenIntPoint(10, 10), mcc->Time()); mcc->RunThroughDelayedTasks(); EXPECT_EQ(nsEventStatus_eConsumeDoDefault, status); check.Call("postHandleSingleTap"); @@ -394,7 +394,7 @@ protected: touchEndY = 50; uint64_t blockId = 0; - nsEventStatus status = TouchDown(apzc, touchX, touchStartY, mcc->Time(), &blockId); + nsEventStatus status = TouchDown(apzc, ScreenIntPoint(touchX, touchStartY), mcc->Time(), &blockId); EXPECT_EQ(nsEventStatus_eConsumeDoDefault, status); if (gfxPrefs::TouchActionEnabled() && status != nsEventStatus_eConsumeNoDefault) { @@ -436,7 +436,7 @@ protected: EXPECT_EQ(nsEventStatus_eConsumeDoDefault, status); EXPECT_CALL(*mcc, HandleSingleTap(CSSPoint(touchX, touchEndY), 0, apzc->GetGuid())).Times(0); - status = TouchUp(apzc, touchX, touchEndY, mcc->Time()); + status = TouchUp(apzc, ScreenIntPoint(touchX, touchEndY), mcc->Time()); EXPECT_EQ(nsEventStatus_eConsumeDoDefault, status); ParentLayerPoint pointOut; @@ -482,7 +482,7 @@ TEST_F(APZCGestureDetectorTester, DoubleTap) { EXPECT_CALL(*mcc, HandleDoubleTap(CSSPoint(10, 10), 0, apzc->GetGuid())).Times(1); uint64_t blockIds[2]; - DoubleTapAndCheckStatus(apzc, 10, 10, mcc, &blockIds); + DoubleTapAndCheckStatus(apzc, ScreenIntPoint(10, 10), mcc, &blockIds); // responses to the two touchstarts apzc->ContentReceivedInputBlock(blockIds[0], false); @@ -499,7 +499,7 @@ TEST_F(APZCGestureDetectorTester, DoubleTapNotZoomable) { EXPECT_CALL(*mcc, HandleDoubleTap(CSSPoint(10, 10), 0, apzc->GetGuid())).Times(0); uint64_t blockIds[2]; - DoubleTapAndCheckStatus(apzc, 10, 10, mcc, &blockIds); + DoubleTapAndCheckStatus(apzc, ScreenIntPoint(10, 10), mcc, &blockIds); // responses to the two touchstarts apzc->ContentReceivedInputBlock(blockIds[0], false); @@ -516,7 +516,7 @@ TEST_F(APZCGestureDetectorTester, DoubleTapPreventDefaultFirstOnly) { EXPECT_CALL(*mcc, HandleDoubleTap(CSSPoint(10, 10), 0, apzc->GetGuid())).Times(0); uint64_t blockIds[2]; - DoubleTapAndCheckStatus(apzc, 10, 10, mcc, &blockIds); + DoubleTapAndCheckStatus(apzc, ScreenIntPoint(10, 10), mcc, &blockIds); // responses to the two touchstarts apzc->ContentReceivedInputBlock(blockIds[0], true); @@ -533,7 +533,7 @@ TEST_F(APZCGestureDetectorTester, DoubleTapPreventDefaultBoth) { EXPECT_CALL(*mcc, HandleDoubleTap(CSSPoint(10, 10), 0, apzc->GetGuid())).Times(0); uint64_t blockIds[2]; - DoubleTapAndCheckStatus(apzc, 10, 10, mcc, &blockIds); + DoubleTapAndCheckStatus(apzc, ScreenIntPoint(10, 10), mcc, &blockIds); // responses to the two touchstarts apzc->ContentReceivedInputBlock(blockIds[0], true); @@ -549,7 +549,7 @@ TEST_F(APZCGestureDetectorTester, TapFollowedByPinch) { EXPECT_CALL(*mcc, HandleSingleTap(CSSPoint(10, 10), 0, apzc->GetGuid())).Times(1); - Tap(apzc, 10, 10, mcc, TimeDuration::FromMilliseconds(100)); + Tap(apzc, ScreenIntPoint(10, 10), mcc, TimeDuration::FromMilliseconds(100)); int inputId = 0; MultiTouchInput mti; @@ -571,7 +571,7 @@ TEST_F(APZCGestureDetectorTester, TapFollowedByMultipleTouches) { EXPECT_CALL(*mcc, HandleSingleTap(CSSPoint(10, 10), 0, apzc->GetGuid())).Times(1); - Tap(apzc, 10, 10, mcc, TimeDuration::FromMilliseconds(100)); + Tap(apzc, ScreenIntPoint(10, 10), mcc, TimeDuration::FromMilliseconds(100)); int inputId = 0; MultiTouchInput mti; diff --git a/gfx/layers/apz/test/gtest/TestHitTesting.cpp b/gfx/layers/apz/test/gtest/TestHitTesting.cpp index f66b42e89b66..ee9ccce16ebe 100644 --- a/gfx/layers/apz/test/gtest/TestHitTesting.cpp +++ b/gfx/layers/apz/test/gtest/TestHitTesting.cpp @@ -465,12 +465,12 @@ TEST_F(APZHitTestingTester, Bug1148350) { EXPECT_CALL(check, Call("Tapped with interleaved transform")); } - Tap(manager, 100, 100, mcc, TimeDuration::FromMilliseconds(100)); + Tap(manager, ScreenIntPoint(100, 100), mcc, TimeDuration::FromMilliseconds(100)); mcc->RunThroughDelayedTasks(); check.Call("Tapped without transform"); uint64_t blockId; - TouchDown(manager, 100, 100, mcc->Time(), &blockId); + TouchDown(manager, ScreenIntPoint(100, 100), mcc->Time(), &blockId); if (gfxPrefs::TouchActionEnabled()) { SetDefaultAllowedTouchBehavior(manager, blockId); } @@ -480,7 +480,7 @@ TEST_F(APZHitTestingTester, Bug1148350) { layers[0]->SetBaseTransform(Matrix4x4::Translation(0, 50, 0)); manager->UpdateHitTestingTree(nullptr, root, false, 0, 0); - TouchUp(manager, 100, 100, mcc->Time()); + TouchUp(manager, ScreenIntPoint(100, 100), mcc->Time()); mcc->RunThroughDelayedTasks(); check.Call("Tapped with interleaved transform"); } diff --git a/gfx/layers/apz/test/gtest/TestPinching.cpp b/gfx/layers/apz/test/gtest/TestPinching.cpp index 47be99ce82b7..818d5942b34d 100644 --- a/gfx/layers/apz/test/gtest/TestPinching.cpp +++ b/gfx/layers/apz/test/gtest/TestPinching.cpp @@ -44,9 +44,11 @@ protected: int touchInputId = 0; if (mGestureBehavior == AsyncPanZoomController::USE_GESTURE_DETECTOR) { - PinchWithTouchInputAndCheckStatus(apzc, 250, 300, 1.25, touchInputId, aShouldTriggerPinch, aAllowedTouchBehaviors); + PinchWithTouchInputAndCheckStatus(apzc, ScreenIntPoint(250, 300), 1.25, + touchInputId, aShouldTriggerPinch, aAllowedTouchBehaviors); } else { - PinchWithPinchInputAndCheckStatus(apzc, 250, 300, 1.25, aShouldTriggerPinch); + PinchWithPinchInputAndCheckStatus(apzc, ScreenIntPoint(250, 300), 1.25, + aShouldTriggerPinch); } FrameMetrics fm = apzc->GetFrameMetrics(); @@ -72,9 +74,11 @@ protected: // the visible area of the document in CSS pixels is x=930 y=5 w=50 h=100 if (mGestureBehavior == AsyncPanZoomController::USE_GESTURE_DETECTOR) { - PinchWithTouchInputAndCheckStatus(apzc, 250, 300, 0.5, touchInputId, aShouldTriggerPinch, aAllowedTouchBehaviors); + PinchWithTouchInputAndCheckStatus(apzc, ScreenIntPoint(250, 300), 0.5, + touchInputId, aShouldTriggerPinch, aAllowedTouchBehaviors); } else { - PinchWithPinchInputAndCheckStatus(apzc, 250, 300, 0.5, aShouldTriggerPinch); + PinchWithPinchInputAndCheckStatus(apzc, ScreenIntPoint(250, 300), 0.5, + aShouldTriggerPinch); } fm = apzc->GetFrameMetrics(); @@ -142,7 +146,8 @@ TEST_F(APZCPinchGestureDetectorTester, Pinch_PreventDefault) { int touchInputId = 0; uint64_t blockId = 0; - PinchWithTouchInput(apzc, 250, 300, 1.25, touchInputId, nullptr, nullptr, &blockId); + PinchWithTouchInput(apzc, ScreenIntPoint(250, 300), 1.25, touchInputId, + nullptr, nullptr, &blockId); // Send the prevent-default notification for the touch block apzc->ContentReceivedInputBlock(blockId, true); @@ -162,7 +167,8 @@ TEST_F(APZCPinchTester, Panning_TwoFinger_ZoomDisabled) { MakeApzcUnzoomable(); nsEventStatus statuses[3]; // scalebegin, scale, scaleend - PinchWithPinchInput(apzc, 250, 350, 200, 300, 10, &statuses); + PinchWithPinchInput(apzc, ScreenIntPoint(250, 350), ScreenIntPoint(200, 300), + 10, &statuses); FrameMetrics fm = apzc->GetFrameMetrics(); diff --git a/gfx/layers/apz/test/gtest/TestOverscrollHandoff.cpp b/gfx/layers/apz/test/gtest/TestScrollHandoff.cpp similarity index 79% rename from gfx/layers/apz/test/gtest/TestOverscrollHandoff.cpp rename to gfx/layers/apz/test/gtest/TestScrollHandoff.cpp index edb86f1a68ac..d1b58eb56f4c 100644 --- a/gfx/layers/apz/test/gtest/TestOverscrollHandoff.cpp +++ b/gfx/layers/apz/test/gtest/TestScrollHandoff.cpp @@ -8,12 +8,12 @@ #include "APZTestCommon.h" #include "InputUtils.h" -class APZOverscrollHandoffTester : public APZCTreeManagerTester { +class APZScrollHandoffTester : public APZCTreeManagerTester { protected: UniquePtr registration; TestAsyncPanZoomController* rootApzc; - void CreateOverscrollHandoffLayerTree1() { + void CreateScrollHandoffLayerTree1() { const char* layerTreeSyntax = "c(t)"; nsIntRegion layerVisibleRegion[] = { nsIntRegion(IntRect(0, 0, 100, 100)), @@ -26,9 +26,10 @@ protected: registration = MakeUnique(manager, 0, root, mcc); manager->UpdateHitTestingTree(nullptr, root, false, 0, 0); rootApzc = ApzcOf(root); + rootApzc->GetFrameMetrics().SetIsRootContent(true); // make root APZC zoomable } - void CreateOverscrollHandoffLayerTree2() { + void CreateScrollHandoffLayerTree2() { const char* layerTreeSyntax = "c(c(t))"; nsIntRegion layerVisibleRegion[] = { nsIntRegion(IntRect(0, 0, 100, 100)), @@ -48,7 +49,7 @@ protected: rootApzc = ApzcOf(root); } - void CreateOverscrollHandoffLayerTree3() { + void CreateScrollHandoffLayerTree3() { const char* layerTreeSyntax = "c(c(t)c(t))"; nsIntRegion layerVisibleRegion[] = { nsIntRegion(IntRect(0, 0, 100, 100)), // root @@ -126,9 +127,9 @@ protected: // Here we test that if the processing of a touch block is deferred while we // wait for content to send a prevent-default message, overscroll is still // handed off correctly when the block is processed. -TEST_F(APZOverscrollHandoffTester, DeferredInputEventProcessing) { +TEST_F(APZScrollHandoffTester, DeferredInputEventProcessing) { // Set up the APZC tree. - CreateOverscrollHandoffLayerTree1(); + CreateScrollHandoffLayerTree1(); TestAsyncPanZoomController* childApzc = ApzcOf(layers[1]); @@ -154,9 +155,9 @@ TEST_F(APZOverscrollHandoffTester, DeferredInputEventProcessing) { // one has been queued, overscroll handoff for the first block follows // the original layer structure while overscroll handoff for the second block // follows the new layer structure. -TEST_F(APZOverscrollHandoffTester, LayerStructureChangesWhileEventsArePending) { +TEST_F(APZScrollHandoffTester, LayerStructureChangesWhileEventsArePending) { // Set up an initial APZC tree. - CreateOverscrollHandoffLayerTree1(); + CreateScrollHandoffLayerTree1(); TestAsyncPanZoomController* childApzc = ApzcOf(layers[1]); @@ -170,7 +171,7 @@ TEST_F(APZOverscrollHandoffTester, LayerStructureChangesWhileEventsArePending) { // Modify the APZC tree to insert a new APZC 'middle' into the handoff chain // between the child and the root. - CreateOverscrollHandoffLayerTree2(); + CreateScrollHandoffLayerTree2(); RefPtr middle = layers[1]; childApzc->SetWaitForMainThread(); TestAsyncPanZoomController* middleApzc = ApzcOf(middle); @@ -202,11 +203,11 @@ TEST_F(APZOverscrollHandoffTester, LayerStructureChangesWhileEventsArePending) { // Test that putting a second finger down on an APZC while a down-chain APZC // is overscrolled doesn't result in being stuck in overscroll. -TEST_F(APZOverscrollHandoffTester, StuckInOverscroll_Bug1073250) { +TEST_F(APZScrollHandoffTester, StuckInOverscroll_Bug1073250) { // Enable overscrolling. SCOPED_GFX_PREF(APZOverscrollEnabled, bool, true); - CreateOverscrollHandoffLayerTree1(); + CreateScrollHandoffLayerTree1(); TestAsyncPanZoomController* child = ApzcOf(layers[1]); @@ -239,11 +240,11 @@ TEST_F(APZOverscrollHandoffTester, StuckInOverscroll_Bug1073250) { // This is almost exactly like StuckInOverscroll_Bug1073250, except the // APZC receiving the input events for the first touch block is the child // (and thus not the same APZC that overscrolls, which is the parent). -TEST_F(APZOverscrollHandoffTester, StuckInOverscroll_Bug1231228) { +TEST_F(APZScrollHandoffTester, StuckInOverscroll_Bug1231228) { // Enable overscrolling. SCOPED_GFX_PREF(APZOverscrollEnabled, bool, true); - CreateOverscrollHandoffLayerTree1(); + CreateScrollHandoffLayerTree1(); TestAsyncPanZoomController* child = ApzcOf(layers[1]); @@ -273,17 +274,95 @@ TEST_F(APZOverscrollHandoffTester, StuckInOverscroll_Bug1231228) { EXPECT_FALSE(rootApzc->IsOverscrolled()); } +TEST_F(APZScrollHandoffTester, StuckInOverscroll_Bug1240202a) { + // Enable overscrolling. + SCOPED_GFX_PREF(APZOverscrollEnabled, bool, true); + + CreateScrollHandoffLayerTree1(); + + TestAsyncPanZoomController* child = ApzcOf(layers[1]); + + // Pan, causing the parent APZC to overscroll. + Pan(manager, mcc, 60, 90, true /* keep finger down */); + EXPECT_FALSE(child->IsOverscrolled()); + EXPECT_TRUE(rootApzc->IsOverscrolled()); + + // Lift the finger, triggering an overscroll animation + // (but don't allow it to run). + TouchUp(manager, ScreenIntPoint(10, 90), mcc->Time()); + + // Put the finger down again, interrupting the animation + // and entering the TOUCHING state. + TouchDown(manager, ScreenIntPoint(10, 90), mcc->Time()); + + // Lift the finger once again. + TouchUp(manager, ScreenIntPoint(10, 90), mcc->Time()); + + // Allow any animations to run their course. + child->AdvanceAnimationsUntilEnd(); + rootApzc->AdvanceAnimationsUntilEnd(); + + // Make sure nothing is overscrolled. + EXPECT_FALSE(child->IsOverscrolled()); + EXPECT_FALSE(rootApzc->IsOverscrolled()); +} + +TEST_F(APZScrollHandoffTester, StuckInOverscroll_Bug1240202b) { + // Enable overscrolling. + SCOPED_GFX_PREF(APZOverscrollEnabled, bool, true); + + CreateScrollHandoffLayerTree1(); + + TestAsyncPanZoomController* child = ApzcOf(layers[1]); + + // Pan, causing the parent APZC to overscroll. + Pan(manager, mcc, 60, 90, true /* keep finger down */); + EXPECT_FALSE(child->IsOverscrolled()); + EXPECT_TRUE(rootApzc->IsOverscrolled()); + + // Lift the finger, triggering an overscroll animation + // (but don't allow it to run). + TouchUp(manager, ScreenIntPoint(10, 90), mcc->Time()); + + // Put the finger down again, interrupting the animation + // and entering the TOUCHING state. + TouchDown(manager, ScreenIntPoint(10, 90), mcc->Time()); + + // Put a second finger down. Since we're in the TOUCHING state, + // the "are we panned into overscroll" check will fail and we + // will not ignore the second finger, instead entering the + // PINCHING state. + MultiTouchInput secondFingerDown(MultiTouchInput::MULTITOUCH_START, 0, TimeStamp(), 0); + // Use the same touch identifier for the first touch (0) as TouchDown(). (A bit hacky.) + secondFingerDown.mTouches.AppendElement(SingleTouchData(0, ScreenIntPoint(10, 90), ScreenSize(0, 0), 0, 0)); + secondFingerDown.mTouches.AppendElement(SingleTouchData(1, ScreenIntPoint(10, 80), ScreenSize(0, 0), 0, 0)); + manager->ReceiveInputEvent(secondFingerDown, nullptr, nullptr); + + // Release the fingers. + MultiTouchInput fingersUp = secondFingerDown; + fingersUp.mType = MultiTouchInput::MULTITOUCH_END; + manager->ReceiveInputEvent(fingersUp, nullptr, nullptr); + + // Allow any animations to run their course. + child->AdvanceAnimationsUntilEnd(); + rootApzc->AdvanceAnimationsUntilEnd(); + + // Make sure nothing is overscrolled. + EXPECT_FALSE(child->IsOverscrolled()); + EXPECT_FALSE(rootApzc->IsOverscrolled()); +} + // Test that flinging in a direction where one component of the fling goes into // overscroll but the other doesn't, results in just the one component being // handed off to the parent, while the original APZC continues flinging in the // other direction. -TEST_F(APZOverscrollHandoffTester, PartialFlingHandoff) { - CreateOverscrollHandoffLayerTree1(); +TEST_F(APZScrollHandoffTester, PartialFlingHandoff) { + CreateScrollHandoffLayerTree1(); // Fling up and to the left. The child APZC has room to scroll up, but not // to the left, so the horizontal component of the fling should be handed // off to the parent APZC. - Pan(manager, mcc, ScreenPoint(90, 90), ScreenPoint(55, 55)); + Pan(manager, mcc, ScreenIntPoint(90, 90), ScreenIntPoint(55, 55)); RefPtr parent = ApzcOf(root); RefPtr child = ApzcOf(layers[1]); @@ -300,9 +379,9 @@ TEST_F(APZOverscrollHandoffTester, PartialFlingHandoff) { // Here we test that if two flings are happening simultaneously, overscroll // is handed off correctly for each. -TEST_F(APZOverscrollHandoffTester, SimultaneousFlings) { +TEST_F(APZScrollHandoffTester, SimultaneousFlings) { // Set up an initial APZC tree. - CreateOverscrollHandoffLayerTree3(); + CreateScrollHandoffLayerTree3(); RefPtr parent1 = ApzcOf(layers[1]); RefPtr child1 = ApzcOf(layers[2]); @@ -330,7 +409,7 @@ TEST_F(APZOverscrollHandoffTester, SimultaneousFlings) { parent2->AssertStateIsFling(); } -TEST_F(APZOverscrollHandoffTester, Scrollgrab) { +TEST_F(APZScrollHandoffTester, Scrollgrab) { // Set up the layer tree CreateScrollgrabLayerTree(); @@ -345,7 +424,7 @@ TEST_F(APZOverscrollHandoffTester, Scrollgrab) { EXPECT_EQ(15, childApzc->GetFrameMetrics().GetScrollOffset().y); } -TEST_F(APZOverscrollHandoffTester, ScrollgrabFling) { +TEST_F(APZScrollHandoffTester, ScrollgrabFling) { // Set up the layer tree CreateScrollgrabLayerTree(); @@ -359,20 +438,20 @@ TEST_F(APZOverscrollHandoffTester, ScrollgrabFling) { childApzc->AssertStateIsReset(); } -TEST_F(APZOverscrollHandoffTester, ScrollgrabFlingAcceleration1) { +TEST_F(APZScrollHandoffTester, ScrollgrabFlingAcceleration1) { CreateScrollgrabLayerTree(true /* make parent scrollable */); TestFlingAcceleration(); } -TEST_F(APZOverscrollHandoffTester, ScrollgrabFlingAcceleration2) { +TEST_F(APZScrollHandoffTester, ScrollgrabFlingAcceleration2) { CreateScrollgrabLayerTree(false /* do not make parent scrollable */); TestFlingAcceleration(); } -TEST_F(APZOverscrollHandoffTester, ImmediateHandoffDisallowed_Pan) { +TEST_F(APZScrollHandoffTester, ImmediateHandoffDisallowed_Pan) { SCOPED_GFX_PREF(APZAllowImmediateHandoff, bool, false); - CreateOverscrollHandoffLayerTree1(); + CreateScrollHandoffLayerTree1(); RefPtr parentApzc = ApzcOf(root); RefPtr childApzc = ApzcOf(layers[1]); @@ -394,10 +473,10 @@ TEST_F(APZOverscrollHandoffTester, ImmediateHandoffDisallowed_Pan) { EXPECT_EQ(10, parentApzc->GetFrameMetrics().GetScrollOffset().y); } -TEST_F(APZOverscrollHandoffTester, ImmediateHandoffDisallowed_Fling) { +TEST_F(APZScrollHandoffTester, ImmediateHandoffDisallowed_Fling) { SCOPED_GFX_PREF(APZAllowImmediateHandoff, bool, false); - CreateOverscrollHandoffLayerTree1(); + CreateScrollHandoffLayerTree1(); RefPtr parentApzc = ApzcOf(root); RefPtr childApzc = ApzcOf(layers[1]); diff --git a/gfx/layers/apz/test/gtest/moz.build b/gfx/layers/apz/test/gtest/moz.build index 56a3422bb8da..c15e172cc38f 100644 --- a/gfx/layers/apz/test/gtest/moz.build +++ b/gfx/layers/apz/test/gtest/moz.build @@ -9,9 +9,9 @@ UNIFIED_SOURCES += [ 'TestEventRegions.cpp', 'TestGestureDetector.cpp', 'TestHitTesting.cpp', - 'TestOverscrollHandoff.cpp', 'TestPanning.cpp', 'TestPinching.cpp', + 'TestScrollHandoff.cpp', 'TestTreeManager.cpp', ] diff --git a/ipc/glue/MessageChannel.cpp b/ipc/glue/MessageChannel.cpp index e8e3dd002b1b..0cd2bb16a0b3 100644 --- a/ipc/glue/MessageChannel.cpp +++ b/ipc/glue/MessageChannel.cpp @@ -324,6 +324,7 @@ MessageChannel::MessageChannel(MessageListener *aListener) mDispatchingAsyncMessagePriority(0), mCurrentTransaction(0), mTimedOutMessageSeqno(0), + mTimedOutMessagePriority(0), mRecvdErrors(0), mRemoteStackDepthGuess(false), mSawInterruptOutMsg(false), @@ -1039,6 +1040,7 @@ MessageChannel::Send(Message* aMsg, Message* aReply) } mTimedOutMessageSeqno = seqno; + mTimedOutMessagePriority = prio; return false; } } @@ -1408,7 +1410,22 @@ MessageChannel::DispatchSyncMessage(const Message& aMsg, Message*& aReply) MessageChannel*& blockingVar = ShouldBlockScripts() ? gParentProcessBlocker : dummy; Result rv; - { + if (mTimedOutMessageSeqno && mTimedOutMessagePriority >= prio) { + // If the other side sends a message in response to one of our messages + // that we've timed out, then we reply with an error. + // + // We do this because want to avoid a situation where we process an + // incoming message from the child here while it simultaneously starts + // processing our timed-out CPOW. It's very bad for both sides to + // be processing sync messages concurrently. + // + // The only exception is if the incoming message has urgent priority and + // our timed-out message had only high priority. In that case it's safe + // to process the incoming message because we know that the child won't + // process anything (the child will defer incoming messages when waiting + // for a response to its urgent message). + rv = MsgNotAllowed; + } else { AutoSetValue blocked(blockingVar, this); AutoSetValue sync(mDispatchingSyncMessage, true); AutoSetValue prioSet(mDispatchingSyncMessagePriority, prio); @@ -2117,8 +2134,10 @@ MessageChannel::CancelCurrentTransaction() { MonitorAutoLock lock(*mMonitor); if (mCurrentTransaction) { + CancelMessage *cancel = new CancelMessage(); + cancel->set_transaction_id(mCurrentTransaction); + mLink->SendMessage(cancel); CancelCurrentTransactionInternal(); - mLink->SendMessage(new CancelMessage()); } } diff --git a/ipc/glue/MessageChannel.h b/ipc/glue/MessageChannel.h index f05eed9338dd..72c25e12eae0 100644 --- a/ipc/glue/MessageChannel.h +++ b/ipc/glue/MessageChannel.h @@ -630,6 +630,7 @@ class MessageChannel : HasResultCodes // hitting a lot of corner cases with message nesting that we don't really // care about. int32_t mTimedOutMessageSeqno; + int mTimedOutMessagePriority; // If waiting for the reply to a sync out-message, it will be saved here // on the I/O thread and then read and cleared by the worker thread. diff --git a/js/src/builtin/Array.js b/js/src/builtin/Array.js index b628c46491f2..049e1601fbeb 100644 --- a/js/src/builtin/Array.js +++ b/js/src/builtin/Array.js @@ -195,6 +195,36 @@ function ArrayStaticSome(list, callbackfn/*, thisArg*/) { return callFunction(ArraySome, list, callbackfn, T); } +/* ES6 draft 2016-1-15 22.1.3.25 Array.prototype.sort (comparefn) */ +function ArraySort(comparefn) { + /* Step 1. */ + var O = ToObject(this); + + /* Step 2. */ + var len = TO_UINT32(O.length); + + /* 22.1.3.25.1 Runtime Semantics: SortCompare( x, y ) */ + var wrappedCompareFn = comparefn; + comparefn = function(x, y) { + /* Steps 1-3. */ + if (x === undefined) { + if (y === undefined) + return 0; + return 1; + } + if (y === undefined) + return -1; + + /* Step 4.a. */ + var v = ToNumber(wrappedCompareFn(x, y)); + + /* Step 4.b-c. */ + return v !== v ? 0 : v; + } + + return MergeSort(O, len, comparefn); +} + /* ES5 15.4.4.18. */ function ArrayForEach(callbackfn/*, thisArg*/) { /* Step 1. */ diff --git a/js/src/builtin/Sorting.js b/js/src/builtin/Sorting.js new file mode 100644 index 000000000000..ade8fe1954d2 --- /dev/null +++ b/js/src/builtin/Sorting.js @@ -0,0 +1,185 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +// We use varying sorts across the self-hosted codebase. All sorts are +// consolidated here to avoid confusion and re-implementation of existing +// algorithms. + +// For sorting small arrays. +function InsertionSort(array, from, to, comparefn) { + var item, swap; + for (var i = from + 1; i <= to; i++) { + item = array[i]; + for (var j = i - 1; j >= from; j--) { + swap = array[j]; + if (comparefn(swap, item) <= 0) + break; + array[j + 1] = swap; + } + array[j + 1] = item; + } +} + +function SwapArrayElements(array, i, j) { + var swap = array[i]; + array[i] = array[j]; + array[j] = swap; +} + +// A helper function for MergeSort. +function Merge(array, start, mid, end, lBuffer, rBuffer, comparefn) { + var i, j, k; + + var sizeLeft = mid - start + 1; + var sizeRight = end - mid; + + // Copy our virtual arrays into separate buffers. + for (i = 0; i < sizeLeft; i++) + lBuffer[i] = array[start + i]; + + for (j = 0; j < sizeRight; j++) + rBuffer[j] = array[mid + 1 + j]; + + i = 0; + j = 0; + k = start; + while (i < sizeLeft && j < sizeRight) { + if (comparefn(lBuffer[i], rBuffer[j]) <= 0) { + array[k] = lBuffer[i]; + i++; + } else { + array[k] = rBuffer[j]; + j++; + } + k++; + } + + // Empty out any remaining elements in the buffer. + while (i < sizeLeft) { + array[k] = lBuffer[i]; + i++; + k++; + } + + while (j < sizeRight) { + array[k] = rBuffer[j]; + j++; + k++; + } +} + +// Iterative, bottom up, mergesort. +function MergeSort(array, len, comparefn) { + // Insertion sort for small arrays, where "small" is defined by performance + // testing. + if (len < 24) { + InsertionSort(array, 0, len - 1, comparefn); + return array; + } + + // We do all of our allocating up front + var lBuffer = new List(); + var rBuffer = new List(); + var mid, end, endOne, endTwo; + + for (var windowSize = 1; windowSize < len; windowSize = 2*windowSize) { + for (var start = 0; start < len - 1; start += 2*windowSize) { + assert(windowSize < len, "The window size is larger than the array length!"); + // The midpoint between the two subarrays. + mid = start + windowSize - 1; + // To keep from going over the edge. + end = start + 2 * windowSize - 1; + end = end < len - 1 ? end : len - 1; + // Skip lopsided runs to avoid doing useless work + if (mid > end) + continue; + Merge(array, start, mid, end, lBuffer, rBuffer, comparefn); + } + } + return array; +} + +// Rearranges the elements in array[from:to + 1] and returns an index j such that: +// - from < j < to +// - each element in array[from:j] is less than or equal to array[j] +// - each element in array[j + 1:to + 1] greater than or equal to array[j]. +function Partition(array, from, to, comparefn) { + assert(to - from >= 3, "Partition will not work with less than three elements"); + + var medianIndex = (from + to) >> 1; + + var i = from + 1; + var j = to; + + SwapArrayElements(array, medianIndex, i); + + // Median of three pivot selection. + if (comparefn(array[from], array[to]) > 0) + SwapArrayElements(array, from, to); + + if (comparefn(array[i], array[to]) > 0) + SwapArrayElements(array, i, to); + + if (comparefn(array[from], array[i]) > 0) + SwapArrayElements(array, from, i); + + var pivotIndex = i; + + // Hoare partition method. + for(;;) { + do i++; while (comparefn(array[i], array[pivotIndex]) < 0); + do j--; while (comparefn(array[j], array[pivotIndex]) > 0); + if (i > j) + break; + SwapArrayElements(array, i, j); + } + + SwapArrayElements(array, pivotIndex, j); + return j; +} + +// In-place QuickSort. +function QuickSort(array, len, comparefn) { + // Managing the stack ourselves seems to provide a small performance boost. + var stack = new List(); + var top = 0; + + var start = 0; + var end = len - 1; + + var pivotIndex, i, j, leftLen, rightLen; + + for (;;) { + // Insertion sort for the first N elements where N is some value + // determined by performance testing. + if (end - start <= 23) { + InsertionSort(array, start, end, comparefn); + if (top < 1) + break; + end = stack[--top]; + start = stack[--top]; + } else { + pivotIndex = Partition(array, start, end, comparefn); + + // Calculate the left and right sub-array lengths and save + // stack space by directly modifying start/end so that + // we sort the longest of the two during the next iteration. + // This reduces the maximum stack size to log2(len). + leftLen = (pivotIndex - 1) - start; + rightLen = end - (pivotIndex + 1); + + if (rightLen > leftLen) { + stack[top++] = start; + stack[top++] = pivotIndex - 1; + start = pivotIndex + 1; + } else { + stack[top++] = pivotIndex + 1; + stack[top++] = end; + end = pivotIndex - 1; + } + + } + } + return array; +} diff --git a/js/src/builtin/TypedArray.js b/js/src/builtin/TypedArray.js index d820857db57a..99453f87e865 100644 --- a/js/src/builtin/TypedArray.js +++ b/js/src/builtin/TypedArray.js @@ -939,112 +939,6 @@ function TypedArraySome(callbackfn, thisArg = undefined) { return false; } -// For sorting small arrays -function InsertionSort(array, from, to, comparefn) { - var item, swap; - for (var i = from + 1; i <= to; i++) { - item = array[i]; - for (var j = i - 1; j >= from; j--) { - swap = array[j]; - if (comparefn(swap, item) <= 0) - break - array[j + 1] = swap; - } - array[j + 1] = item; - } -} - -function SwapArrayElements(array, i, j) { - var swap = array[i]; - array[i] = array[j]; - array[j] = swap; -} - -// Rearranges the elements in array[from:to + 1] and returns an index j such that: -// - from < j < to -// - each element in array[from:j] is less than or equal to array[j] -// - each element in array[j + 1:to + 1] greater than or equal to array[j]. -function Partition(array, from, to, comparefn) { - assert(to - from >= 3, - "Partition will not work with less than three elements"); - - var median_i = (from + to) >> 1; - - var i = from + 1; - var j = to; - - SwapArrayElements(array, median_i, i); - - // Median of three pivot selection - if (comparefn(array[from], array[to]) > 0) - SwapArrayElements(array, from, to); - - if (comparefn(array[i], array[to]) > 0) - SwapArrayElements(array, i, to); - - if (comparefn(array[from], array[i]) > 0) - SwapArrayElements(array, from, i); - - var pivot_i = i; - - // Hoare partition method - for(;;) { - do i++; while (comparefn(array[i], array[pivot_i]) < 0); - do j--; while (comparefn(array[j], array[pivot_i]) > 0); - if (i > j) - break; - SwapArrayElements(array, i, j); - } - - SwapArrayElements(array, pivot_i, j); - return j; -} - -// In-place QuickSort -function QuickSort(array, len, comparefn) { - // Managing the stack ourselves seems to provide a small performance boost - var stack = new List(); - var top = 0; - - var start = 0; - var end = len - 1; - - var pivot_i, i, j, l_len, r_len; - - for (;;) { - // Insertion sort for the first N elements where N is some value - // determined by performance testing. - if (end - start <= 23) { - InsertionSort(array, start, end, comparefn); - if (top < 1) - break; - end = stack[--top]; - start = stack[--top]; - } else { - pivot_i = Partition(array, start, end, comparefn); - - // Calculate the left and right sub-array lengths and save - // stack space by directly modifying start/end so that - // we sort the longest of the two during the next iteration. - // This reduces the maximum stack size to log2(len) - l_len = (pivot_i - 1) - start; - r_len = end - (pivot_i + 1); - - if (r_len > l_len) { - stack[top++] = start; - stack[top++] = pivot_i - 1; - start = pivot_i + 1; - } else { - stack[top++] = pivot_i + 1; - stack[top++] = end; - end = pivot_i - 1; - } - - } - } - return array; -} - // ES6 draft 20151210 22.2.3.26 // Cases are ordered according to likelihood of occurrence // as opposed to the ordering in the spec. diff --git a/js/src/configure.in b/js/src/configure.in index 5834856d8ca0..17e1809c81a2 100644 --- a/js/src/configure.in +++ b/js/src/configure.in @@ -2901,24 +2901,6 @@ elif test "$GNU_CC"; then fi fi -dnl ======================================================== -dnl = Enable DMD -dnl ======================================================== - -MOZ_ARG_ENABLE_BOOL(dmd, -[ --enable-dmd Enable DMD; also enables jemalloc and replace-malloc], - MOZ_DMD=1, - MOZ_DMD= ) - -if test "$MOZ_DMD"; then - AC_DEFINE(MOZ_DMD) - - if test "${CPU_ARCH}" = "arm"; then - CFLAGS="$CFLAGS -funwind-tables" - CXXFLAGS="$CXXFLAGS -funwind-tables" - fi -fi - dnl ======================================================== dnl = Enable jemalloc dnl ======================================================== diff --git a/js/src/gc/GCRuntime.h b/js/src/gc/GCRuntime.h index 3af1c3d75929..91a4383b7fa4 100644 --- a/js/src/gc/GCRuntime.h +++ b/js/src/gc/GCRuntime.h @@ -18,11 +18,6 @@ #include "gc/StoreBuffer.h" #include "gc/Tracer.h" -/* Perform validation of incremental marking in debug builds but not on B2G. */ -#if defined(DEBUG) && !defined(MOZ_B2G) -#define JS_GC_MARKING_VALIDATION -#endif - namespace js { class AutoLockGC; @@ -1182,7 +1177,7 @@ class GCRuntime js::gc::ZoneList zonesToMaybeCompact; ArenaHeader* relocatedArenasToRelease; -#ifdef JS_GC_MARKING_VALIDATION +#ifdef JS_GC_ZEAL js::gc::MarkingValidator* markingValidator; #endif diff --git a/js/src/jit-test/tests/self-hosting/invoke-self-hosted-with-primitive-this.js b/js/src/jit-test/tests/self-hosting/invoke-self-hosted-with-primitive-this.js index 59af0d26f6cd..24130e0f01f5 100644 --- a/js/src/jit-test/tests/self-hosting/invoke-self-hosted-with-primitive-this.js +++ b/js/src/jit-test/tests/self-hosting/invoke-self-hosted-with-primitive-this.js @@ -2,6 +2,6 @@ try { [0,0].sort(Array.some) "".replace(RegExp(), Array.reduce) } catch (error) { - if (!(error instanceof TypeError && error.message == "0 is not a function")) + if (!(error instanceof TypeError && /^\w is not a function$/.test(error.message))) throw error; -} \ No newline at end of file +} diff --git a/js/src/jsarray.cpp b/js/src/jsarray.cpp index 0ecc9c48c848..03fef59f284c 100644 --- a/js/src/jsarray.cpp +++ b/js/src/jsarray.cpp @@ -1817,6 +1817,43 @@ js::array_sort(JSContext* cx, unsigned argc, Value* vp) if (!obj) return false; + ComparatorMatchResult comp = MatchNumericComparator(cx, fval); + if (comp == Match_Failure) + return false; + + if (!fval.isNull() && comp == Match_None) { + /* + * Non-optimized user supplied comparators perform much better when + * called from within a self-hosted sorting function. + */ + RootedAtom selfHostedSortAtom(cx, Atomize(cx, "ArraySort", 9)); + RootedPropertyName selfHostedSortName(cx, selfHostedSortAtom->asPropertyName()); + RootedValue selfHostedSortValue(cx); + + if (!GlobalObject::getIntrinsicValue(cx, cx->global(), selfHostedSortName, + &selfHostedSortValue)) { + return false; + } + + MOZ_ASSERT(selfHostedSortValue.isObject()); + MOZ_ASSERT(selfHostedSortValue.toObject().is()); + + InvokeArgs iargs(cx); + + if (!iargs.init(1)) + return false; + + iargs.setCallee(selfHostedSortValue); + iargs.setThis(args.thisv()); + iargs[0].set(fval); + + if (!Invoke(cx, iargs)) + return false; + + args.rval().set(iargs.rval()); + return true; + } + uint32_t len; if (!GetLengthProperty(cx, obj, &len)) return false; @@ -1917,27 +1954,13 @@ js::array_sort(JSContext* cx, unsigned argc, Value* vp) return false; } } else { - ComparatorMatchResult comp = MatchNumericComparator(cx, fval); - if (comp == Match_Failure) - return false; - - if (comp != Match_None) { - if (allInts) { - JS_ALWAYS_TRUE(vec.resize(n * 2)); - if (!MergeSort(vec.begin(), n, vec.begin() + n, SortComparatorInt32s[comp])) - return false; - } else { - if (!SortNumerically(cx, &vec, n, comp)) - return false; - } - } else { - FastInvokeGuard fig(cx, fval); + if (allInts) { JS_ALWAYS_TRUE(vec.resize(n * 2)); - if (!MergeSort(vec.begin(), n, vec.begin() + n, - SortComparatorFunction(cx, fval, fig))) - { + if (!MergeSort(vec.begin(), n, vec.begin() + n, SortComparatorInt32s[comp])) + return false; + } else { + if (!SortNumerically(cx, &vec, n, comp)) return false; - } } } diff --git a/js/src/jsgc.cpp b/js/src/jsgc.cpp index f29911ea9a03..935a1dab404b 100644 --- a/js/src/jsgc.cpp +++ b/js/src/jsgc.cpp @@ -1150,7 +1150,7 @@ GCRuntime::GCRuntime(JSRuntime* rt) : arenasAllocatedDuringSweep(nullptr), startedCompacting(false), relocatedArenasToRelease(nullptr), -#ifdef JS_GC_MARKING_VALIDATION +#ifdef JS_GC_ZEAL markingValidator(nullptr), #endif interFrameGC(false), diff --git a/js/src/moz.build b/js/src/moz.build index d72999952c12..2e9377492a78 100644 --- a/js/src/moz.build +++ b/js/src/moz.build @@ -711,6 +711,7 @@ selfhosted.inputs = [ 'builtin/RegExp.js', 'builtin/String.js', 'builtin/Set.js', + 'builtin/Sorting.js', 'builtin/TypedArray.js', 'builtin/TypedObject.js', 'builtin/WeakSet.js' diff --git a/js/src/tests/ecma_6/Array/sort_basics.js b/js/src/tests/ecma_6/Array/sort_basics.js new file mode 100644 index 000000000000..8fff254ef642 --- /dev/null +++ b/js/src/tests/ecma_6/Array/sort_basics.js @@ -0,0 +1,34 @@ +// Note: failed runs should include their "SEED" value in error messages, +// setting "const SEED" to that value will recreate the data from any such run. +const SEED = (Math.random() * 10) + 1; + +// Create an array filled with random values, 'size' is the desired length of +// the array and 'seed' is an initial value supplied to a pseudo-random number +// generator. +function genRandomArray(size, seed) { + return Array.from(XorShiftGenerator(seed, size)); +} + +function SortTest(size, seed) { + let arrOne = genRandomArray(size, seed); + let arrTwo = Array.from(arrOne); + let arrThree = Array.from(arrOne); + + // Test numeric comparators against typed array sort. + assertDeepEq(Array.from((Int32Array.from(arrOne)).sort()), + arrTwo.sort((x, y) => (x - y)), + `The arr is not properly sorted! seed: ${SEED}`); + + // Use multiplication to kill comparator optimization and trigger + // self-hosted sorting. + assertDeepEq(Array.from((Int32Array.from(arrOne)).sort()), + arrThree.sort((x, y) => (1*x - 1*y)), + `The arr is not properly sorted! seed: ${SEED}`); +} + +SortTest(2048, SEED); +SortTest(16, SEED); +SortTest(0, SEED); + +if (typeof reportCompare === "function") + reportCompare(true, true); diff --git a/js/src/tests/ecma_6/Array/sort_small.js b/js/src/tests/ecma_6/Array/sort_small.js new file mode 100644 index 000000000000..fa8b789e10da --- /dev/null +++ b/js/src/tests/ecma_6/Array/sort_small.js @@ -0,0 +1,33 @@ +// Sort every possible permutation of some arrays. +function sortAllPermutations(data, comparefn) { + for (let permutation of Permutations(Array.from(data))) { + let sorted = (Array.from(permutation)).sort(comparefn); + for (let i in sorted) { + assertEq(sorted[i], data[i], + [`[${permutation}].sort(${comparefn})`, + `returned ${sorted}, expected ${data}`].join(' ')); + } + } +} + +let lex = [2112, "bob", "is", "my", "name"]; +let nans = [1/undefined, NaN, Number.NaN] + +let num1 = [-11, 0, 0, 100, 101]; +let num2 = [-11, 100, 201234.23, undefined, undefined]; + +sortAllPermutations(lex); +sortAllPermutations(nans); + +sortAllPermutations(nans, (x, y) => x - y); +// Multiplication kills comparator optimization. +sortAllPermutations(nans, (x, y) => (1*x - 1*y)); + +sortAllPermutations(num1, (x, y) => x - y); +sortAllPermutations(num1, (x, y) => (1*x - 1*y)); + +sortAllPermutations(num2, (x, y) => x - y); +sortAllPermutations(num2, (x, y) => (1*x - 1*y)); + +if (typeof reportCompare === "function") + reportCompare(true, true); diff --git a/js/src/tests/ecma_6/TypedArray/sort_basics.js b/js/src/tests/ecma_6/TypedArray/sort_basics.js index 76cf0c965466..ccd38bb0edb2 100644 --- a/js/src/tests/ecma_6/TypedArray/sort_basics.js +++ b/js/src/tests/ecma_6/TypedArray/sort_basics.js @@ -2,20 +2,6 @@ // setting "const SEED" to that value will recreate the data from any such run. const SEED = (Math.random() * 10) + 1; -// An xorshift pseudo-random number generator see: -// https://en.wikipedia.org/wiki/Xorshift#xorshift.2A -// This generator will always produce a value, n, where -// 0 <= n <= 255 -function *xorShiftGenerator(seed, size) { - let x = seed; - for (let i = 0; i < size; i++) { - x ^= x >> 12; - x ^= x << 25; - x ^= x >> 27; - yield x % 256; - } -} - // Fill up an array buffer with random values and return it in raw form. // 'size' is the desired length of the view we will place atop the buffer, // 'width' is the bit-width of the view we plan on placing atop the buffer, @@ -26,7 +12,7 @@ function genRandomArrayBuffer(size, width, seed) { let len = 0; // We generate a random number, n, where 0 <= n <= 255 for every space // available in our buffer. - for (let n of xorShiftGenerator(seed, buf.byteLength)) + for (let n of XorShiftGenerator(seed, buf.byteLength)) arr[len++] = n; return buf; } diff --git a/js/src/tests/ecma_6/TypedArray/sort_small.js b/js/src/tests/ecma_6/TypedArray/sort_small.js index 386f92e1db0c..ea462cbd72f9 100644 --- a/js/src/tests/ecma_6/TypedArray/sort_small.js +++ b/js/src/tests/ecma_6/TypedArray/sort_small.js @@ -1,22 +1,3 @@ -function swapElements(arr, i, j) { - var swap = arr[i]; - arr[i] = arr[j]; - arr[j] = swap; -} - -// Yield every permutation of the elements in some iterable. -function *permutations(items) { - if (items.length == 0) { - yield []; - } else { - for (let i = 0; i < items.length; i++) { - swapElements(items, 0, i); - for (let e of permutations(items.slice(1, items.length))) - yield [items[0]].concat(e); - } - } -} - // Pre-sorted test data, it's important that these arrays remain in ascending order. let i32 = [-2147483648, -320000, -244000, 2147483647] let u32 = [0, 987632, 4294967295] @@ -35,7 +16,7 @@ let nans = [1/undefined, NaN, Number.NaN] // Sort every possible permutation of an arrays function sortAllPermutations(dataType, testData) { let reference = new dataType(testData); - for (let permutation of permutations(testData)) + for (let permutation of Permutations(testData)) assertDeepEq((new dataType(permutation)).sort(), reference); } diff --git a/js/src/tests/shell.js b/js/src/tests/shell.js index a5702b96f832..4a9034f6097f 100644 --- a/js/src/tests/shell.js +++ b/js/src/tests/shell.js @@ -372,6 +372,40 @@ function enterFunc (funcName) callStack.push(funcName); } +/* + * An xorshift pseudo-random number generator see: + * https://en.wikipedia.org/wiki/Xorshift#xorshift.2A + * This generator will always produce a value, n, where + * 0 <= n <= 255 + */ +function *XorShiftGenerator(seed, size) { + let x = seed; + for (let i = 0; i < size; i++) { + x ^= x >> 12; + x ^= x << 25; + x ^= x >> 27; + yield x % 256; + } +} + +/* + * Yield every permutation of the elements in some iterable. + */ +function *Permutations(items) { + if (items.length == 0) { + yield []; + } else { + let swap; + for (let i = 0; i < items.length; i++) { + swap = items[0]; + items[0] = items[i]; + items[i] = swap; + for (let e of Permutations(items.slice(1, items.length))) + yield [items[0]].concat(e); + } + } +} + /* * Pops the top funcName off the call stack. funcName is optional, and can be * used to check push-pop balance. diff --git a/js/src/vm/SavedStacks.cpp b/js/src/vm/SavedStacks.cpp index 2cb7b9c07a43..836be5addc06 100644 --- a/js/src/vm/SavedStacks.cpp +++ b/js/src/vm/SavedStacks.cpp @@ -1066,7 +1066,8 @@ SavedStacks::clear() size_t SavedStacks::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) { - return frames.sizeOfExcludingThis(mallocSizeOf); + return frames.sizeOfExcludingThis(mallocSizeOf) + + pcLocationMap.sizeOfExcludingThis(mallocSizeOf); } bool diff --git a/layout/base/nsDisplayList.cpp b/layout/base/nsDisplayList.cpp index e17dc7e0e3fc..c97c372b4777 100644 --- a/layout/base/nsDisplayList.cpp +++ b/layout/base/nsDisplayList.cpp @@ -1856,7 +1856,7 @@ struct FramesWithDepth {} bool operator<(const FramesWithDepth& aOther) const { - if (mDepth != aOther.mDepth) { + if (!FuzzyEqual(mDepth, aOther.mDepth, 0.1f)) { // We want to sort so that the shallowest item (highest depth value) is first return mDepth > aOther.mDepth; } @@ -1919,13 +1919,16 @@ void nsDisplayList::HitTest(nsDisplayListBuilder* aBuilder, const nsRect& aRect, bool snap; nsRect r = item->GetBounds(aBuilder, &snap).Intersect(aRect); auto itemType = item->GetType(); - bool alwaysIntersect = + bool same3DContext = (itemType == nsDisplayItem::TYPE_TRANSFORM && static_cast(item)->IsParticipating3DContext()) || (itemType == nsDisplayItem::TYPE_PERSPECTIVE && static_cast(item)->Frame()->Extend3DContext()); - if (alwaysIntersect && + if (same3DContext && !static_cast(item)->IsLeafOf3DContext()) { + if (!item->GetClip().MayIntersect(aRect)) { + continue; + } nsAutoTArray neverUsed; // Start gethering leaves of the 3D rendering context, and // append leaves at the end of mItemBuffer. Leaves are @@ -1936,7 +1939,7 @@ void nsDisplayList::HitTest(nsDisplayListBuilder* aBuilder, const nsRect& aRect, i = aState->mItemBuffer.Length(); continue; } - if (alwaysIntersect || item->GetClip().MayIntersect(r)) { + if (same3DContext || item->GetClip().MayIntersect(r)) { nsAutoTArray outFrames; item->HitTest(aBuilder, aRect, aState, &outFrames); diff --git a/layout/base/tests/mochitest.ini b/layout/base/tests/mochitest.ini index aee1fab0d417..de65f5b71a54 100644 --- a/layout/base/tests/mochitest.ini +++ b/layout/base/tests/mochitest.ini @@ -5,6 +5,7 @@ support-files = Ahem.ttf border_radius_hit_testing_iframe.html preserve3d_sorting_hit_testing_iframe.html + preserve3d_sorting_hit_testing2_iframe.html image_rgrg-256x256.png image_rrgg-256x256.png bug369950-subframe.xml @@ -41,6 +42,7 @@ support-files = multi-range-script-select-ref.html [test_preserve3d_sorting_hit_testing.html] +[test_preserve3d_sorting_hit_testing2.html] [test_after_paint_pref.html] [test_bug993936.html] skip-if = e10s diff --git a/layout/base/tests/preserve3d_sorting_hit_testing2_iframe.html b/layout/base/tests/preserve3d_sorting_hit_testing2_iframe.html new file mode 100644 index 000000000000..e9fa71977aca --- /dev/null +++ b/layout/base/tests/preserve3d_sorting_hit_testing2_iframe.html @@ -0,0 +1,97 @@ + + + + + + + +
+
+

+ Lorem ipsum dolor sit amet, consectetur adipiscing elit. Integer sagittis nisi urna, a ultrices est facilisis a. Morbi porttitor vulputate odio, eu lacinia nisi. Suspendisse felis sapien, facilisis nec ex in, blandit tincidunt tellus. Sed at commodo nunc. In nibh lectus, facilisis nec magna nec, bibendum egestas nunc. Nam varius lorem in fringilla cursus. Integer dignissim, lectus vitae sodales molestie, libero purus malesuada arcu, vitae facilisis nunc dolor non mi. In nunc tortor, tempor non pharetra vitae, mattis a purus. Nulla rhoncus vitae metus vel ornare. Nunc augue dui, suscipit ac urna vel, consectetur volutpat ipsum. Nunc ac nulla ut enim laoreet placerat. Sed luctus aliquam purus, sollicitudin blandit dui blandit id. Aenean venenatis risus dolor, at viverra urna aliquam non. Morbi sit amet pellentesque justo, eget viverra augue. +

+

     + Praesent posuere ultricies orci sit amet lacinia. Suspendisse lacinia scelerisque risus, sodales egestas turpis cursus sed. Proin sed mollis mauris, vitae ultricies nibh. Nulla bibendum leo a mauris luctus, sit amet iaculis arcu blandit. Etiam pulvinar, odio et rutrum egestas, elit mi maximus ex, id elementum est tortor id turpis. Duis rhoncus et lorem vel maximus. Aenean at justo sagittis, aliquet eros eget, iaculis magna. Nam non orci congue, dapibus dui eget, sagittis nisl. Phasellus venenatis id est et tempor. Aenean condimentum tristique nibh sit amet varius. Vestibulum et lectus quis eros dapibus consectetur nec auctor dolor. Sed euismod eu felis aliquam fermentum. Donec lacinia fringilla erat, at eleifend velit tempus at. +

+
+

     + Cras justo turpis, vulputate eget venenatis sit amet, bibendum quis dolor. Cras at interdum libero. Quisque convallis rutrum magna in ultrices. Donec ut magna dolor. Mauris pulvinar ut sapien a posuere. Sed nisi elit, tincidunt vitae magna eu, dapibus suscipit purus. Maecenas tincidunt mollis eros et dictum. Duis est nulla, rhoncus tincidunt velit at, venenatis elementum velit. Phasellus lobortis sem tellus, id sodales quam dignissim nec. Phasellus pulvinar metus ex, nec gravida nunc elementum vel. Ut mattis varius fringilla. Phasellus imperdiet sit amet risus a elementum. Donec pulvinar ante sit amet massa blandit ullamcorper. Donec vitae malesuada nisl, et laoreet sem. +

+

     + Suspendisse bibendum elit blandit arcu vulputate, nec hendrerit dui vehicula. Vestibulum porta finibus odio vitae maximus. Duis in vulputate risus. Donec mattis turpis ex, vitae semper sem ultrices eu. Aliquam in ex blandit erat ultrices sollicitudin. Vestibulum porta nisl in porttitor rutrum. Integer consectetur porttitor ligula facilisis malesuada. Proin placerat enim sed lacus commodo mollis nec eu arcu. In hac habitasse platea dictumst. Curabitur luctus est risus, sit amet fringilla nunc condimentum vel. Integer mauris lorem, molestie ut nisl sit amet, pellentesque mollis quam. Aliquam posuere purus non nisi molestie semper. +

+
+

     + Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Mauris facilisis nisi diam, eu pulvinar ex sollicitudin sed. Maecenas sed eros id quam suscipit ultricies ut tincidunt quam. Donec iaculis, justo at fringilla laoreet, quam sem dapibus urna, ut eleifend odio eros et ligula. Proin urna ante, condimentum vitae sollicitudin sit amet, egestas ac nunc. Aenean sapien velit, porta a eros quis, iaculis dignissim felis. Suspendisse mollis vulputate metus vel interdum. Aliquam hendrerit elementum erat, sit amet commodo velit suscipit et. Sed semper sem at mauris rhoncus, id efficitur arcu molestie. Nam feugiat lorem pretium, consectetur felis et, fringilla dolor. Nunc dui velit, elementum non hendrerit nec, sagittis vitae odio. Curabitur nec leo tincidunt, pellentesque metus at, condimentum risus. +

+
+
+ + + + + diff --git a/layout/base/tests/test_preserve3d_sorting_hit_testing2.html b/layout/base/tests/test_preserve3d_sorting_hit_testing2.html new file mode 100644 index 000000000000..991b94640dc5 --- /dev/null +++ b/layout/base/tests/test_preserve3d_sorting_hit_testing2.html @@ -0,0 +1,40 @@ + + + + + Test for Bug 1241394 + + + + +Mozilla Bug 1241394 +
+
+
+ + diff --git a/media/mtransport/third_party/nICEr/src/ice/ice_socket.c b/media/mtransport/third_party/nICEr/src/ice/ice_socket.c index eb3e5cad615f..177bbf75737d 100644 --- a/media/mtransport/third_party/nICEr/src/ice/ice_socket.c +++ b/media/mtransport/third_party/nICEr/src/ice/ice_socket.c @@ -65,7 +65,7 @@ static void nr_ice_socket_readable_cb(NR_SOCKET s, int how, void *cb_arg) NR_ASYNC_WAIT(s,how,nr_ice_socket_readable_cb,cb_arg); if(r=nr_socket_recvfrom(sock->sock,buf,sizeof(buf),&len_s,0,&addr)){ - if (r != R_WOULDBLOCK && (sock->type == NR_ICE_SOCKET_TYPE_STREAM_TURN)) { + if (r != R_WOULDBLOCK && (sock->type != NR_ICE_SOCKET_TYPE_DGRAM)) { /* Report this error upward. Bug 946423 */ r_log(LOG_ICE,LOG_ERR,"ICE(%s): Error %d on reliable socket. Abandoning.",sock->ctx->label, r); NR_ASYNC_CANCEL(s, NR_ASYNC_WAIT_READ); diff --git a/media/mtransport/third_party/nICEr/src/net/nr_proxy_tunnel.c b/media/mtransport/third_party/nICEr/src/net/nr_proxy_tunnel.c index 0f49390de601..b26f2d278973 100644 --- a/media/mtransport/third_party/nICEr/src/net/nr_proxy_tunnel.c +++ b/media/mtransport/third_party/nICEr/src/net/nr_proxy_tunnel.c @@ -47,13 +47,19 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif #define END_HEADERS CRLF CRLF +typedef enum { + PROXY_TUNNEL_NONE=0, + PROXY_TUNNEL_REQUESTED, + PROXY_TUNNEL_CONNECTED, + PROXY_TUNNEL_CLOSED, + PROXY_TUNNEL_FAILED +} nr_socket_proxy_tunnel_state; + typedef struct nr_socket_proxy_tunnel_ { nr_proxy_tunnel_config *config; nr_socket *inner; nr_transport_addr remote_addr; - int connect_requested; - int connect_answered; - int connect_failed; + nr_socket_proxy_tunnel_state state; char buffer[MAX_HTTP_CONNECT_BUFFER_SIZE]; size_t buffered_bytes; void *resolver_handle; @@ -143,7 +149,7 @@ static int send_http_connect(nr_socket_proxy_tunnel *sock) ABORT(R_IO_ERROR); } - sock->connect_requested = 1; + sock->state = PROXY_TUNNEL_REQUESTED; _status = 0; abort: @@ -173,6 +179,9 @@ static int parse_http_response(char *begin, char *end, unsigned int *status) // len should *never* be greater than nr_socket_proxy_tunnel::buffered_bytes. // Which in turn should never be greater nr_socket_proxy_tunnel::buffer size. assert(len <= MAX_HTTP_CONNECT_BUFFER_SIZE); + if (len > MAX_HTTP_CONNECT_BUFFER_SIZE) { + return R_BAD_DATA; + } memcpy(response, begin, len); response[len] = '\0'; @@ -249,6 +258,10 @@ static int nr_socket_proxy_tunnel_resolved_cb(void *obj, nr_transport_addr *prox else { r_log(LOG_GENERIC,LOG_WARNING,"Failed to resolve proxy %s", sock->config->proxy_host); + /* TODO: Mozilla bug 1241758: because of the callback the return value goes + * nowhere, so we can't mark the candidate as failed, so everything depends + * on the overall timeouts in this case. */ + sock->state = PROXY_TUNNEL_FAILED; ABORT(R_NOT_FOUND); } @@ -336,13 +349,20 @@ int nr_socket_proxy_tunnel_write(void *obj, const void *msg, size_t len, r_log(LOG_GENERIC,LOG_DEBUG,"nr_socket_proxy_tunnel_write"); - if (!sock->connect_requested) { + if (sock->state >= PROXY_TUNNEL_CLOSED) { + return R_FAILED; + } + + if (sock->state == PROXY_TUNNEL_NONE) { if ((r=send_http_connect(sock))) { ABORT(r); } } - /* TODO (bug 1117984): we cannot assume it's safe to write until we receive a response. */ + if (sock->state != PROXY_TUNNEL_CONNECTED) { + return R_WOULDBLOCK; + } + if ((r=nr_socket_write(sock->inner, msg, len, written, 0))) { ABORT(r); } @@ -366,11 +386,11 @@ int nr_socket_proxy_tunnel_read(void *obj, void * restrict buf, size_t maxlen, *len = 0; - if (sock->connect_failed) { + if (sock->state >= PROXY_TUNNEL_CLOSED) { return R_FAILED; } - if (sock->connect_answered) { + if (sock->state == PROXY_TUNNEL_CONNECTED) { return nr_socket_read(sock->inner, buf, maxlen, len, 0); } @@ -391,8 +411,6 @@ int nr_socket_proxy_tunnel_read(void *obj, void * restrict buf, size_t maxlen, sock->buffered_bytes += bytes_read; if (http_term = find_http_terminator(sock->buffer, sock->buffered_bytes)) { - sock->connect_answered = 1; - if ((r = parse_http_response(sock->buffer, http_term, &http_status))) { ABORT(r); } @@ -404,6 +422,8 @@ int nr_socket_proxy_tunnel_read(void *obj, void * restrict buf, size_t maxlen, ABORT(R_FAILED); } + sock->state = PROXY_TUNNEL_CONNECTED; + ptr = http_term + strlen(END_HEADERS); pending = sock->buffered_bytes - (ptr - sock->buffer); @@ -420,7 +440,7 @@ int nr_socket_proxy_tunnel_read(void *obj, void * restrict buf, size_t maxlen, _status=0; abort: if (_status && _status != R_WOULDBLOCK) { - sock->connect_failed = 1; + sock->state = PROXY_TUNNEL_FAILED; } return(_status); } @@ -436,6 +456,8 @@ int nr_socket_proxy_tunnel_close(void *obj) sock->resolver_handle = 0; } + sock->state = PROXY_TUNNEL_CLOSED; + return nr_socket_close(sock->inner); } diff --git a/media/mtransport/third_party/nICEr/src/stun/nr_socket_buffered_stun.c b/media/mtransport/third_party/nICEr/src/stun/nr_socket_buffered_stun.c index 89effe9e322e..2301654f5d64 100644 --- a/media/mtransport/third_party/nICEr/src/stun/nr_socket_buffered_stun.c +++ b/media/mtransport/third_party/nICEr/src/stun/nr_socket_buffered_stun.c @@ -524,6 +524,10 @@ static void nr_socket_buffered_stun_writable_cb(NR_SOCKET s, int how, void *arg) int r,_status; nr_p_buf *n1, *n2; + if (sock->read_state == NR_ICE_SOCKET_READ_FAILED) { + ABORT(R_FAILED); + } + /* Try to flush */ STAILQ_FOREACH_SAFE(n1, &sock->pending_writes, entry, n2) { size_t written = 0; diff --git a/media/mtransport/transportlayer.cpp b/media/mtransport/transportlayer.cpp index 2b2772e34052..5f05b1d5de1c 100644 --- a/media/mtransport/transportlayer.cpp +++ b/media/mtransport/transportlayer.cpp @@ -49,19 +49,4 @@ void TransportLayer::SetState(State state, const char *file, unsigned line) { } } -nsresult TransportLayer::RunOnThread(nsIRunnable *event) { - if (target_) { - nsIThread *thr; - - DebugOnly rv = NS_GetCurrentThread(&thr); - MOZ_ASSERT(NS_SUCCEEDED(rv)); - - if (target_ != thr) { - return target_->Dispatch(event, NS_DISPATCH_SYNC); - } - } - - return event->Run(); -} - } // close namespace diff --git a/media/mtransport/transportlayer.h b/media/mtransport/transportlayer.h index 85afad7fd9ab..fb40904c31e8 100644 --- a/media/mtransport/transportlayer.h +++ b/media/mtransport/transportlayer.h @@ -57,10 +57,6 @@ class TransportLayer : public sigslot::has_slots<> { // Downward interface TransportLayer *downward() { return downward_; } - // Dispatch a call onto our thread (or run on the same thread if - // thread is not set). This is always synchronous. - nsresult RunOnThread(nsIRunnable *event); - // Get the state State state() const { return state_; } // Must be implemented by derived classes diff --git a/media/webrtc/signaling/src/jsep/JsepSessionImpl.cpp b/media/webrtc/signaling/src/jsep/JsepSessionImpl.cpp index 3b07a685a6e8..6a2a6b123c25 100644 --- a/media/webrtc/signaling/src/jsep/JsepSessionImpl.cpp +++ b/media/webrtc/signaling/src/jsep/JsepSessionImpl.cpp @@ -2088,16 +2088,7 @@ JsepSessionImpl::SetupDefaultCodecs() )); // Supported video codecs. - JsepVideoCodecDescription* vp8 = new JsepVideoCodecDescription( - "120", - "VP8", - 90000 - ); - // Defaults for mandatory params - vp8->mConstraints.maxFs = 12288; // Enough for 2048x1536 - vp8->mConstraints.maxFps = 60; - mSupportedCodecs.values.push_back(vp8); - + // Note: order here implies priority for building offers! JsepVideoCodecDescription* vp9 = new JsepVideoCodecDescription( "121", "VP9", @@ -2108,6 +2099,16 @@ JsepSessionImpl::SetupDefaultCodecs() vp9->mConstraints.maxFps = 60; mSupportedCodecs.values.push_back(vp9); + JsepVideoCodecDescription* vp8 = new JsepVideoCodecDescription( + "120", + "VP8", + 90000 + ); + // Defaults for mandatory params + vp8->mConstraints.maxFs = 12288; // Enough for 2048x1536 + vp8->mConstraints.maxFps = 60; + mSupportedCodecs.values.push_back(vp8); + JsepVideoCodecDescription* h264_1 = new JsepVideoCodecDescription( "126", "H264", diff --git a/media/webrtc/signaling/test/jsep_session_unittest.cpp b/media/webrtc/signaling/test/jsep_session_unittest.cpp index 48923152a928..c548f746daad 100644 --- a/media/webrtc/signaling/test/jsep_session_unittest.cpp +++ b/media/webrtc/signaling/test/jsep_session_unittest.cpp @@ -2656,8 +2656,8 @@ TEST_F(JsepSessionTest, ValidateOfferedCodecParams) ASSERT_EQ(SdpDirectionAttribute::kSendrecv, video_attrs.GetDirection()); ASSERT_EQ(4U, video_section.GetFormats().size()); - ASSERT_EQ("120", video_section.GetFormats()[0]); - ASSERT_EQ("121", video_section.GetFormats()[1]); + ASSERT_EQ("121", video_section.GetFormats()[0]); + ASSERT_EQ("120", video_section.GetFormats()[1]); ASSERT_EQ("126", video_section.GetFormats()[2]); ASSERT_EQ("97", video_section.GetFormats()[3]); @@ -2790,22 +2790,24 @@ TEST_F(JsepSessionTest, ValidateAnsweredCodecParams) // TODO(bug 1099351): Once fixed, this stuff will need to be updated. ASSERT_EQ(1U, video_section.GetFormats().size()); // ASSERT_EQ(3U, video_section.GetFormats().size()); - ASSERT_EQ("120", video_section.GetFormats()[0]); + ASSERT_EQ("121", video_section.GetFormats()[0]); // ASSERT_EQ("126", video_section.GetFormats()[1]); // ASSERT_EQ("97", video_section.GetFormats()[2]); // Validate rtpmap ASSERT_TRUE(video_attrs.HasAttribute(SdpAttribute::kRtpmapAttribute)); auto& rtpmaps = video_attrs.GetRtpmap(); - ASSERT_TRUE(rtpmaps.HasEntry("120")); + ASSERT_TRUE(rtpmaps.HasEntry("121")); // ASSERT_TRUE(rtpmaps.HasEntry("126")); // ASSERT_TRUE(rtpmaps.HasEntry("97")); - auto& vp8_entry = rtpmaps.GetEntry("120"); + //auto& vp8_entry = rtpmaps.GetEntry("120"); + auto& vp9_entry = rtpmaps.GetEntry("121"); // auto& h264_1_entry = rtpmaps.GetEntry("126"); // auto& h264_0_entry = rtpmaps.GetEntry("97"); - ASSERT_EQ("VP8", vp8_entry.name); + //ASSERT_EQ("VP8", vp8_entry.name); + ASSERT_EQ("VP9", vp9_entry.name); // ASSERT_EQ("H264", h264_1_entry.name); // ASSERT_EQ("H264", h264_0_entry.name); @@ -2816,17 +2818,17 @@ TEST_F(JsepSessionTest, ValidateAnsweredCodecParams) ASSERT_EQ(1U, fmtps.size()); // ASSERT_EQ(3U, fmtps.size()); - // VP8 - ASSERT_EQ("120", fmtps[0].format); + // VP9 + ASSERT_EQ("121", fmtps[0].format); ASSERT_TRUE(!!fmtps[0].parameters); - ASSERT_EQ(SdpRtpmapAttributeList::kVP8, fmtps[0].parameters->codec_type); + ASSERT_EQ(SdpRtpmapAttributeList::kVP9, fmtps[0].parameters->codec_type); - auto& parsed_vp8_params = + auto& parsed_vp9_params = *static_cast( fmtps[0].parameters.get()); - ASSERT_EQ((uint32_t)12288, parsed_vp8_params.max_fs); - ASSERT_EQ((uint32_t)60, parsed_vp8_params.max_fr); + ASSERT_EQ((uint32_t)12288, parsed_vp9_params.max_fs); + ASSERT_EQ((uint32_t)60, parsed_vp9_params.max_fr); SetLocalAnswer(answer); diff --git a/media/webrtc/trunk/webrtc/base/base.gyp b/media/webrtc/trunk/webrtc/base/base.gyp index 164a8beb67e8..e7c6c9030739 100644 --- a/media/webrtc/trunk/webrtc/base/base.gyp +++ b/media/webrtc/trunk/webrtc/base/base.gyp @@ -29,8 +29,13 @@ 'target_name': 'rtc_base_approved', 'type': 'static_library', 'sources': [ + 'bitbuffer.cc', + 'bitbuffer.h', + 'buffer.cc', + 'buffer.h', 'checks.cc', 'checks.h', + 'constructormagic.h', 'event.cc', 'event.h', 'event_tracer.cc', diff --git a/media/webrtc/trunk/webrtc/base/bitbuffer.cc b/media/webrtc/trunk/webrtc/base/bitbuffer.cc new file mode 100644 index 000000000000..1aa245e78c79 --- /dev/null +++ b/media/webrtc/trunk/webrtc/base/bitbuffer.cc @@ -0,0 +1,296 @@ +/* + * Copyright 2015 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/base/bitbuffer.h" + +#include +#include + +#include "webrtc/base/checks.h" + +namespace { + +// Returns the lowest (right-most) |bit_count| bits in |byte|. +uint8_t LowestBits(uint8_t byte, size_t bit_count) { + RTC_DCHECK_LE(bit_count, 8u); + return byte & ((1 << bit_count) - 1); +} + +// Returns the highest (left-most) |bit_count| bits in |byte|, shifted to the +// lowest bits (to the right). +uint8_t HighestBits(uint8_t byte, size_t bit_count) { + RTC_DCHECK_LE(bit_count, 8u); + uint8_t shift = 8 - static_cast(bit_count); + uint8_t mask = 0xFF << shift; + return (byte & mask) >> shift; +} + +// Returns the highest byte of |val| in a uint8_t. +uint8_t HighestByte(uint64_t val) { + return static_cast(val >> 56); +} + +// Returns the result of writing partial data from |source|, of +// |source_bit_count| size in the highest bits, to |target| at +// |target_bit_offset| from the highest bit. +uint8_t WritePartialByte(uint8_t source, + size_t source_bit_count, + uint8_t target, + size_t target_bit_offset) { + RTC_DCHECK(target_bit_offset < 8); + RTC_DCHECK(source_bit_count < 9); + RTC_DCHECK(source_bit_count <= (8 - target_bit_offset)); + // Generate a mask for just the bits we're going to overwrite, so: + uint8_t mask = + // The number of bits we want, in the most significant bits... + static_cast(0xFF << (8 - source_bit_count)) + // ...shifted over to the target offset from the most signficant bit. + >> target_bit_offset; + + // We want the target, with the bits we'll overwrite masked off, or'ed with + // the bits from the source we want. + return (target & ~mask) | (source >> target_bit_offset); +} + +// Counts the number of bits used in the binary representation of val. +size_t CountBits(uint64_t val) { + size_t bit_count = 0; + while (val != 0) { + bit_count++; + val >>= 1; + } + return bit_count; +} + +} // namespace + +namespace rtc { + +BitBuffer::BitBuffer(const uint8_t* bytes, size_t byte_count) + : bytes_(bytes), byte_count_(byte_count), byte_offset_(), bit_offset_() { + RTC_DCHECK(static_cast(byte_count_) <= + std::numeric_limits::max()); +} + +uint64_t BitBuffer::RemainingBitCount() const { + return (static_cast(byte_count_) - byte_offset_) * 8 - bit_offset_; +} + +bool BitBuffer::ReadUInt8(uint8_t* val) { + uint32_t bit_val; + if (!ReadBits(&bit_val, sizeof(uint8_t) * 8)) { + return false; + } + RTC_DCHECK(bit_val <= std::numeric_limits::max()); + *val = static_cast(bit_val); + return true; +} + +bool BitBuffer::ReadUInt16(uint16_t* val) { + uint32_t bit_val; + if (!ReadBits(&bit_val, sizeof(uint16_t) * 8)) { + return false; + } + RTC_DCHECK(bit_val <= std::numeric_limits::max()); + *val = static_cast(bit_val); + return true; +} + +bool BitBuffer::ReadUInt32(uint32_t* val) { + return ReadBits(val, sizeof(uint32_t) * 8); +} + +bool BitBuffer::PeekBits(uint32_t* val, size_t bit_count) { + if (!val || bit_count > RemainingBitCount() || bit_count > 32) { + return false; + } + const uint8_t* bytes = bytes_ + byte_offset_; + size_t remaining_bits_in_current_byte = 8 - bit_offset_; + uint32_t bits = LowestBits(*bytes++, remaining_bits_in_current_byte); + // If we're reading fewer bits than what's left in the current byte, just + // return the portion of this byte that we need. + if (bit_count < remaining_bits_in_current_byte) { + *val = HighestBits(bits, bit_offset_ + bit_count); + return true; + } + // Otherwise, subtract what we've read from the bit count and read as many + // full bytes as we can into bits. + bit_count -= remaining_bits_in_current_byte; + while (bit_count >= 8) { + bits = (bits << 8) | *bytes++; + bit_count -= 8; + } + // Whatever we have left is smaller than a byte, so grab just the bits we need + // and shift them into the lowest bits. + if (bit_count > 0) { + bits <<= bit_count; + bits |= HighestBits(*bytes, bit_count); + } + *val = bits; + return true; +} + +bool BitBuffer::ReadBits(uint32_t* val, size_t bit_count) { + return PeekBits(val, bit_count) && ConsumeBits(bit_count); +} + +bool BitBuffer::ConsumeBytes(size_t byte_count) { + return ConsumeBits(byte_count * 8); +} + +bool BitBuffer::ConsumeBits(size_t bit_count) { + if (bit_count > RemainingBitCount()) { + return false; + } + + byte_offset_ += (bit_offset_ + bit_count) / 8; + bit_offset_ = (bit_offset_ + bit_count) % 8; + return true; +} + +bool BitBuffer::ReadExponentialGolomb(uint32_t* val) { + if (!val) { + return false; + } + // Store off the current byte/bit offset, in case we want to restore them due + // to a failed parse. + size_t original_byte_offset = byte_offset_; + size_t original_bit_offset = bit_offset_; + + // Count the number of leading 0 bits by peeking/consuming them one at a time. + size_t zero_bit_count = 0; + uint32_t peeked_bit; + while (PeekBits(&peeked_bit, 1) && peeked_bit == 0) { + zero_bit_count++; + ConsumeBits(1); + } + + // We should either be at the end of the stream, or the next bit should be 1. + RTC_DCHECK(!PeekBits(&peeked_bit, 1) || peeked_bit == 1); + + // The bit count of the value is the number of zeros + 1. Make sure that many + // bits fits in a uint32_t and that we have enough bits left for it, and then + // read the value. + size_t value_bit_count = zero_bit_count + 1; + if (value_bit_count > 32 || !ReadBits(val, value_bit_count)) { + RTC_CHECK(Seek(original_byte_offset, original_bit_offset)); + return false; + } + *val -= 1; + return true; +} + +bool BitBuffer::ReadSignedExponentialGolomb(int32_t* val) { + uint32_t unsigned_val; + if (!ReadExponentialGolomb(&unsigned_val)) { + return false; + } + if ((unsigned_val & 1) == 0) { + *val = -static_cast(unsigned_val / 2); + } else { + *val = (unsigned_val + 1) / 2; + } + return true; +} + +void BitBuffer::GetCurrentOffset( + size_t* out_byte_offset, size_t* out_bit_offset) { + RTC_CHECK(out_byte_offset != NULL); + RTC_CHECK(out_bit_offset != NULL); + *out_byte_offset = byte_offset_; + *out_bit_offset = bit_offset_; +} + +bool BitBuffer::Seek(size_t byte_offset, size_t bit_offset) { + if (byte_offset > byte_count_ || bit_offset > 7 || + (byte_offset == byte_count_ && bit_offset > 0)) { + return false; + } + byte_offset_ = byte_offset; + bit_offset_ = bit_offset; + return true; +} + +BitBufferWriter::BitBufferWriter(uint8_t* bytes, size_t byte_count) + : BitBuffer(bytes, byte_count), writable_bytes_(bytes) { +} + +bool BitBufferWriter::WriteUInt8(uint8_t val) { + return WriteBits(val, sizeof(uint8_t) * 8); +} + +bool BitBufferWriter::WriteUInt16(uint16_t val) { + return WriteBits(val, sizeof(uint16_t) * 8); +} + +bool BitBufferWriter::WriteUInt32(uint32_t val) { + return WriteBits(val, sizeof(uint32_t) * 8); +} + +bool BitBufferWriter::WriteBits(uint64_t val, size_t bit_count) { + if (bit_count > RemainingBitCount()) { + return false; + } + size_t total_bits = bit_count; + + // For simplicity, push the bits we want to read from val to the highest bits. + val <<= (sizeof(uint64_t) * 8 - bit_count); + + uint8_t* bytes = writable_bytes_ + byte_offset_; + + // The first byte is relatively special; the bit offset to write to may put us + // in the middle of the byte, and the total bit count to write may require we + // save the bits at the end of the byte. + size_t remaining_bits_in_current_byte = 8 - bit_offset_; + size_t bits_in_first_byte = + std::min(bit_count, remaining_bits_in_current_byte); + *bytes = WritePartialByte( + HighestByte(val), bits_in_first_byte, *bytes, bit_offset_); + if (bit_count <= remaining_bits_in_current_byte) { + // Nothing left to write, so quit early. + return ConsumeBits(total_bits); + } + + // Subtract what we've written from the bit count, shift it off the value, and + // write the remaining full bytes. + val <<= bits_in_first_byte; + bytes++; + bit_count -= bits_in_first_byte; + while (bit_count >= 8) { + *bytes++ = HighestByte(val); + val <<= 8; + bit_count -= 8; + } + + // Last byte may also be partial, so write the remaining bits from the top of + // val. + if (bit_count > 0) { + *bytes = WritePartialByte(HighestByte(val), bit_count, *bytes, 0); + } + + // All done! Consume the bits we've written. + return ConsumeBits(total_bits); +} + +bool BitBufferWriter::WriteExponentialGolomb(uint32_t val) { + // We don't support reading UINT32_MAX, because it doesn't fit in a uint32_t + // when encoded, so don't support writing it either. + if (val == std::numeric_limits::max()) { + return false; + } + uint64_t val_to_encode = static_cast(val) + 1; + + // We need to write CountBits(val+1) 0s and then val+1. Since val (as a + // uint64_t) has leading zeros, we can just write the total golomb encoded + // size worth of bits, knowing the value will appear last. + return WriteBits(val_to_encode, CountBits(val_to_encode) * 2 - 1); +} + +} // namespace rtc diff --git a/media/webrtc/trunk/webrtc/base/bitbuffer.h b/media/webrtc/trunk/webrtc/base/bitbuffer.h new file mode 100644 index 000000000000..8ea044e04129 --- /dev/null +++ b/media/webrtc/trunk/webrtc/base/bitbuffer.h @@ -0,0 +1,122 @@ +/* + * Copyright 2015 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_BASE_BITBUFFER_H_ +#define WEBRTC_BASE_BITBUFFER_H_ + +#include // For integer types. +#include // For size_t. + +#include "webrtc/base/constructormagic.h" + +namespace rtc { + +// A class, similar to ByteBuffer, that can parse bit-sized data out of a set of +// bytes. Has a similar API to ByteBuffer, plus methods for reading bit-sized +// and exponential golomb encoded data. For a writable version, use +// BitBufferWriter. Unlike ByteBuffer, this class doesn't make a copy of the +// source bytes, so it can be used on read-only data. +// Sizes/counts specify bits/bytes, for clarity. +// Byte order is assumed big-endian/network. +class BitBuffer { + public: + BitBuffer(const uint8_t* bytes, size_t byte_count); + + // Gets the current offset, in bytes/bits, from the start of the buffer. The + // bit offset is the offset into the current byte, in the range [0,7]. + void GetCurrentOffset(size_t* out_byte_offset, size_t* out_bit_offset); + + // The remaining bits in the byte buffer. + uint64_t RemainingBitCount() const; + + // Reads byte-sized values from the buffer. Returns false if there isn't + // enough data left for the specified type. + bool ReadUInt8(uint8_t* val); + bool ReadUInt16(uint16_t* val); + bool ReadUInt32(uint32_t* val); + + // Reads bit-sized values from the buffer. Returns false if there isn't enough + // data left for the specified bit count.. + bool ReadBits(uint32_t* val, size_t bit_count); + + // Peeks bit-sized values from the buffer. Returns false if there isn't enough + // data left for the specified number of bits. Doesn't move the current + // offset. + bool PeekBits(uint32_t* val, size_t bit_count); + + // Reads the exponential golomb encoded value at the current offset. + // Exponential golomb values are encoded as: + // 1) x = source val + 1 + // 2) In binary, write [countbits(x) - 1] 0s, then x + // To decode, we count the number of leading 0 bits, read that many + 1 bits, + // and increment the result by 1. + // Returns false if there isn't enough data left for the specified type, or if + // the value wouldn't fit in a uint32_t. + bool ReadExponentialGolomb(uint32_t* val); + // Reads signed exponential golomb values at the current offset. Signed + // exponential golomb values are just the unsigned values mapped to the + // sequence 0, 1, -1, 2, -2, etc. in order. + bool ReadSignedExponentialGolomb(int32_t* val); + + // Moves current position |byte_count| bytes forward. Returns false if + // there aren't enough bytes left in the buffer. + bool ConsumeBytes(size_t byte_count); + // Moves current position |bit_count| bits forward. Returns false if + // there aren't enough bits left in the buffer. + bool ConsumeBits(size_t bit_count); + + // Sets the current offset to the provied byte/bit offsets. The bit + // offset is from the given byte, in the range [0,7]. + bool Seek(size_t byte_offset, size_t bit_offset); + + protected: + const uint8_t* const bytes_; + // The total size of |bytes_|. + size_t byte_count_; + // The current offset, in bytes, from the start of |bytes_|. + size_t byte_offset_; + // The current offset, in bits, into the current byte. + size_t bit_offset_; + + RTC_DISALLOW_COPY_AND_ASSIGN(BitBuffer); +}; + +// A BitBuffer API for write operations. Supports symmetric write APIs to the +// reading APIs of BitBuffer. Note that the read/write offset is shared with the +// BitBuffer API, so both reading and writing will consume bytes/bits. +class BitBufferWriter : public BitBuffer { + public: + // Constructs a bit buffer for the writable buffer of |bytes|. + BitBufferWriter(uint8_t* bytes, size_t byte_count); + + // Writes byte-sized values from the buffer. Returns false if there isn't + // enough data left for the specified type. + bool WriteUInt8(uint8_t val); + bool WriteUInt16(uint16_t val); + bool WriteUInt32(uint32_t val); + + // Writes bit-sized values to the buffer. Returns false if there isn't enough + // room left for the specified number of bits. + bool WriteBits(uint64_t val, size_t bit_count); + + // Writes the exponential golomb encoded version of the supplied value. + // Returns false if there isn't enough room left for the value. + bool WriteExponentialGolomb(uint32_t val); + + private: + // The buffer, as a writable array. + uint8_t* const writable_bytes_; + + RTC_DISALLOW_COPY_AND_ASSIGN(BitBufferWriter); +}; + +} // namespace rtc + +#endif // WEBRTC_BASE_BITBUFFER_H_ diff --git a/media/webrtc/trunk/webrtc/base/bitbuffer_unittest.cc b/media/webrtc/trunk/webrtc/base/bitbuffer_unittest.cc new file mode 100644 index 000000000000..ce42257255a5 --- /dev/null +++ b/media/webrtc/trunk/webrtc/base/bitbuffer_unittest.cc @@ -0,0 +1,330 @@ +/* + * Copyright 2015 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/base/arraysize.h" +#include "webrtc/base/bitbuffer.h" +#include "webrtc/base/bytebuffer.h" +#include "webrtc/base/common.h" +#include "webrtc/base/gunit.h" + +namespace rtc { + +TEST(BitBufferTest, ConsumeBits) { + const uint8_t bytes[64] = {0}; + BitBuffer buffer(bytes, 32); + uint64_t total_bits = 32 * 8; + EXPECT_EQ(total_bits, buffer.RemainingBitCount()); + EXPECT_TRUE(buffer.ConsumeBits(3)); + total_bits -= 3; + EXPECT_EQ(total_bits, buffer.RemainingBitCount()); + EXPECT_TRUE(buffer.ConsumeBits(3)); + total_bits -= 3; + EXPECT_EQ(total_bits, buffer.RemainingBitCount()); + EXPECT_TRUE(buffer.ConsumeBits(15)); + total_bits -= 15; + EXPECT_EQ(total_bits, buffer.RemainingBitCount()); + EXPECT_TRUE(buffer.ConsumeBits(37)); + total_bits -= 37; + EXPECT_EQ(total_bits, buffer.RemainingBitCount()); + + EXPECT_FALSE(buffer.ConsumeBits(32 * 8)); + EXPECT_EQ(total_bits, buffer.RemainingBitCount()); +} + +TEST(BitBufferTest, ReadBytesAligned) { + const uint8_t bytes[] = {0x0A, 0xBC, 0xDE, 0xF1, 0x23, 0x45, 0x67, 0x89}; + uint8_t val8; + uint16_t val16; + uint32_t val32; + BitBuffer buffer(bytes, 8); + EXPECT_TRUE(buffer.ReadUInt8(&val8)); + EXPECT_EQ(0x0Au, val8); + EXPECT_TRUE(buffer.ReadUInt8(&val8)); + EXPECT_EQ(0xBCu, val8); + EXPECT_TRUE(buffer.ReadUInt16(&val16)); + EXPECT_EQ(0xDEF1u, val16); + EXPECT_TRUE(buffer.ReadUInt32(&val32)); + EXPECT_EQ(0x23456789u, val32); +} + +TEST(BitBufferTest, ReadBytesOffset4) { + const uint8_t bytes[] = {0x0A, 0xBC, 0xDE, 0xF1, 0x23, + 0x45, 0x67, 0x89, 0x0A}; + uint8_t val8; + uint16_t val16; + uint32_t val32; + BitBuffer buffer(bytes, 9); + EXPECT_TRUE(buffer.ConsumeBits(4)); + + EXPECT_TRUE(buffer.ReadUInt8(&val8)); + EXPECT_EQ(0xABu, val8); + EXPECT_TRUE(buffer.ReadUInt8(&val8)); + EXPECT_EQ(0xCDu, val8); + EXPECT_TRUE(buffer.ReadUInt16(&val16)); + EXPECT_EQ(0xEF12u, val16); + EXPECT_TRUE(buffer.ReadUInt32(&val32)); + EXPECT_EQ(0x34567890u, val32); +} + +TEST(BitBufferTest, ReadBytesOffset3) { + // The pattern we'll check against is counting down from 0b1111. It looks + // weird here because it's all offset by 3. + // Byte pattern is: + // 56701234 + // 0b00011111, + // 0b11011011, + // 0b10010111, + // 0b01010011, + // 0b00001110, + // 0b11001010, + // 0b10000110, + // 0b01000010 + // xxxxx <-- last 5 bits unused. + + // The bytes. It almost looks like counting down by two at a time, except the + // jump at 5->3->0, since that's when the high bit is turned off. + const uint8_t bytes[] = {0x1F, 0xDB, 0x97, 0x53, 0x0E, 0xCA, 0x86, 0x42}; + + uint8_t val8; + uint16_t val16; + uint32_t val32; + BitBuffer buffer(bytes, 8); + EXPECT_TRUE(buffer.ConsumeBits(3)); + EXPECT_TRUE(buffer.ReadUInt8(&val8)); + EXPECT_EQ(0xFEu, val8); + EXPECT_TRUE(buffer.ReadUInt16(&val16)); + EXPECT_EQ(0xDCBAu, val16); + EXPECT_TRUE(buffer.ReadUInt32(&val32)); + EXPECT_EQ(0x98765432u, val32); + // 5 bits left unread. Not enough to read a uint8_t. + EXPECT_EQ(5u, buffer.RemainingBitCount()); + EXPECT_FALSE(buffer.ReadUInt8(&val8)); +} + +TEST(BitBufferTest, ReadBits) { + // Bit values are: + // 0b01001101, + // 0b00110010 + const uint8_t bytes[] = {0x4D, 0x32}; + uint32_t val; + BitBuffer buffer(bytes, 2); + EXPECT_TRUE(buffer.ReadBits(&val, 3)); + // 0b010 + EXPECT_EQ(0x2u, val); + EXPECT_TRUE(buffer.ReadBits(&val, 2)); + // 0b01 + EXPECT_EQ(0x1u, val); + EXPECT_TRUE(buffer.ReadBits(&val, 7)); + // 0b1010011 + EXPECT_EQ(0x53u, val); + EXPECT_TRUE(buffer.ReadBits(&val, 2)); + // 0b00 + EXPECT_EQ(0x0u, val); + EXPECT_TRUE(buffer.ReadBits(&val, 1)); + // 0b1 + EXPECT_EQ(0x1u, val); + EXPECT_TRUE(buffer.ReadBits(&val, 1)); + // 0b0 + EXPECT_EQ(0x0u, val); + + EXPECT_FALSE(buffer.ReadBits(&val, 1)); +} + +TEST(BitBufferTest, SetOffsetValues) { + uint8_t bytes[4] = {0}; + BitBufferWriter buffer(bytes, 4); + + size_t byte_offset, bit_offset; + // Bit offsets are [0,7]. + EXPECT_TRUE(buffer.Seek(0, 0)); + EXPECT_TRUE(buffer.Seek(0, 7)); + buffer.GetCurrentOffset(&byte_offset, &bit_offset); + EXPECT_EQ(0u, byte_offset); + EXPECT_EQ(7u, bit_offset); + EXPECT_FALSE(buffer.Seek(0, 8)); + buffer.GetCurrentOffset(&byte_offset, &bit_offset); + EXPECT_EQ(0u, byte_offset); + EXPECT_EQ(7u, bit_offset); + // Byte offsets are [0,length]. At byte offset length, the bit offset must be + // 0. + EXPECT_TRUE(buffer.Seek(0, 0)); + EXPECT_TRUE(buffer.Seek(2, 4)); + buffer.GetCurrentOffset(&byte_offset, &bit_offset); + EXPECT_EQ(2u, byte_offset); + EXPECT_EQ(4u, bit_offset); + EXPECT_TRUE(buffer.Seek(4, 0)); + EXPECT_FALSE(buffer.Seek(5, 0)); + buffer.GetCurrentOffset(&byte_offset, &bit_offset); + EXPECT_EQ(4u, byte_offset); + EXPECT_EQ(0u, bit_offset); + EXPECT_FALSE(buffer.Seek(4, 1)); + + // Disable death test on Android because it relies on fork() and doesn't play + // nicely. +#if defined(GTEST_HAS_DEATH_TEST) +#if !defined(WEBRTC_ANDROID) + // Passing a NULL out parameter is death. + EXPECT_DEATH(buffer.GetCurrentOffset(&byte_offset, NULL), ""); +#endif +#endif +} + +uint64_t GolombEncoded(uint32_t val) { + val++; + uint32_t bit_counter = val; + uint64_t bit_count = 0; + while (bit_counter > 0) { + bit_count++; + bit_counter >>= 1; + } + return static_cast(val) << (64 - (bit_count * 2 - 1)); +} + +TEST(BitBufferTest, GolombUint32Values) { + ByteBuffer byteBuffer; + byteBuffer.Resize(16); + BitBuffer buffer(reinterpret_cast(byteBuffer.Data()), + byteBuffer.Capacity()); + // Test over the uint32_t range with a large enough step that the test doesn't + // take forever. Around 20,000 iterations should do. + const int kStep = std::numeric_limits::max() / 20000; + for (uint32_t i = 0; i < std::numeric_limits::max() - kStep; + i += kStep) { + uint64_t encoded_val = GolombEncoded(i); + byteBuffer.Clear(); + byteBuffer.WriteUInt64(encoded_val); + uint32_t decoded_val; + EXPECT_TRUE(buffer.Seek(0, 0)); + EXPECT_TRUE(buffer.ReadExponentialGolomb(&decoded_val)); + EXPECT_EQ(i, decoded_val); + } +} + +TEST(BitBufferTest, SignedGolombValues) { + uint8_t golomb_bits[] = { + 0x80, // 1 + 0x40, // 010 + 0x60, // 011 + 0x20, // 00100 + 0x38, // 00111 + }; + int32_t expected[] = {0, 1, -1, 2, -3}; + for (size_t i = 0; i < sizeof(golomb_bits); ++i) { + BitBuffer buffer(&golomb_bits[i], 1); + int32_t decoded_val; + ASSERT_TRUE(buffer.ReadSignedExponentialGolomb(&decoded_val)); + EXPECT_EQ(expected[i], decoded_val) + << "Mismatch in expected/decoded value for golomb_bits[" << i + << "]: " << static_cast(golomb_bits[i]); + } +} + +TEST(BitBufferTest, NoGolombOverread) { + const uint8_t bytes[] = {0x00, 0xFF, 0xFF}; + // Make sure the bit buffer correctly enforces byte length on golomb reads. + // If it didn't, the above buffer would be valid at 3 bytes. + BitBuffer buffer(bytes, 1); + uint32_t decoded_val; + EXPECT_FALSE(buffer.ReadExponentialGolomb(&decoded_val)); + + BitBuffer longer_buffer(bytes, 2); + EXPECT_FALSE(longer_buffer.ReadExponentialGolomb(&decoded_val)); + + BitBuffer longest_buffer(bytes, 3); + EXPECT_TRUE(longest_buffer.ReadExponentialGolomb(&decoded_val)); + // Golomb should have read 9 bits, so 0x01FF, and since it is golomb, the + // result is 0x01FF - 1 = 0x01FE. + EXPECT_EQ(0x01FEu, decoded_val); +} + +TEST(BitBufferWriterTest, SymmetricReadWrite) { + uint8_t bytes[16] = {0}; + BitBufferWriter buffer(bytes, 4); + + // Write some bit data at various sizes. + EXPECT_TRUE(buffer.WriteBits(0x2u, 3)); + EXPECT_TRUE(buffer.WriteBits(0x1u, 2)); + EXPECT_TRUE(buffer.WriteBits(0x53u, 7)); + EXPECT_TRUE(buffer.WriteBits(0x0u, 2)); + EXPECT_TRUE(buffer.WriteBits(0x1u, 1)); + EXPECT_TRUE(buffer.WriteBits(0x1ABCDu, 17)); + // That should be all that fits in the buffer. + EXPECT_FALSE(buffer.WriteBits(1, 1)); + + EXPECT_TRUE(buffer.Seek(0, 0)); + uint32_t val; + EXPECT_TRUE(buffer.ReadBits(&val, 3)); + EXPECT_EQ(0x2u, val); + EXPECT_TRUE(buffer.ReadBits(&val, 2)); + EXPECT_EQ(0x1u, val); + EXPECT_TRUE(buffer.ReadBits(&val, 7)); + EXPECT_EQ(0x53u, val); + EXPECT_TRUE(buffer.ReadBits(&val, 2)); + EXPECT_EQ(0x0u, val); + EXPECT_TRUE(buffer.ReadBits(&val, 1)); + EXPECT_EQ(0x1u, val); + EXPECT_TRUE(buffer.ReadBits(&val, 17)); + EXPECT_EQ(0x1ABCDu, val); + // And there should be nothing left. + EXPECT_FALSE(buffer.ReadBits(&val, 1)); +} + +TEST(BitBufferWriterTest, SymmetricBytesMisaligned) { + uint8_t bytes[16] = {0}; + BitBufferWriter buffer(bytes, 16); + + // Offset 3, to get things misaligned. + EXPECT_TRUE(buffer.ConsumeBits(3)); + EXPECT_TRUE(buffer.WriteUInt8(0x12u)); + EXPECT_TRUE(buffer.WriteUInt16(0x3456u)); + EXPECT_TRUE(buffer.WriteUInt32(0x789ABCDEu)); + + buffer.Seek(0, 3); + uint8_t val8; + uint16_t val16; + uint32_t val32; + EXPECT_TRUE(buffer.ReadUInt8(&val8)); + EXPECT_EQ(0x12u, val8); + EXPECT_TRUE(buffer.ReadUInt16(&val16)); + EXPECT_EQ(0x3456u, val16); + EXPECT_TRUE(buffer.ReadUInt32(&val32)); + EXPECT_EQ(0x789ABCDEu, val32); +} + +TEST(BitBufferWriterTest, SymmetricGolomb) { + char test_string[] = "my precious"; + uint8_t bytes[64] = {0}; + BitBufferWriter buffer(bytes, 64); + for (size_t i = 0; i < arraysize(test_string); ++i) { + EXPECT_TRUE(buffer.WriteExponentialGolomb(test_string[i])); + } + buffer.Seek(0, 0); + for (size_t i = 0; i < arraysize(test_string); ++i) { + uint32_t val; + EXPECT_TRUE(buffer.ReadExponentialGolomb(&val)); + EXPECT_LE(val, std::numeric_limits::max()); + EXPECT_EQ(test_string[i], static_cast(val)); + } +} + +TEST(BitBufferWriterTest, WriteClearsBits) { + uint8_t bytes[] = {0xFF, 0xFF}; + BitBufferWriter buffer(bytes, 2); + EXPECT_TRUE(buffer.ConsumeBits(3)); + EXPECT_TRUE(buffer.WriteBits(0, 1)); + EXPECT_EQ(0xEFu, bytes[0]); + EXPECT_TRUE(buffer.WriteBits(0, 3)); + EXPECT_EQ(0xE1u, bytes[0]); + EXPECT_TRUE(buffer.WriteBits(0, 2)); + EXPECT_EQ(0xE0u, bytes[0]); + EXPECT_EQ(0x7F, bytes[1]); +} + +} // namespace rtc diff --git a/media/webrtc/trunk/webrtc/base/buffer.h b/media/webrtc/trunk/webrtc/base/buffer.h index 07345a96aea9..fead5048a11d 100644 --- a/media/webrtc/trunk/webrtc/base/buffer.h +++ b/media/webrtc/trunk/webrtc/base/buffer.h @@ -13,7 +13,8 @@ #include -#include "webrtc/base/common.h" +// common.h isn't in the rtc_approved list +//#include "webrtc/base/common.h" #include "webrtc/base/scoped_ptr.h" namespace rtc { @@ -52,12 +53,12 @@ class Buffer { } void SetData(const void* data, size_t size) { - ASSERT(data != NULL || size == 0); + assert(data != NULL || size == 0); SetSize(size); memcpy(data_.get(), data, size); } void AppendData(const void* data, size_t size) { - ASSERT(data != NULL || size == 0); + assert(data != NULL || size == 0); size_t old_size = size_; SetSize(size_ + size); memcpy(data_.get() + old_size, data, size); @@ -76,7 +77,7 @@ class Buffer { } void TransferTo(Buffer* buf) { - ASSERT(buf != NULL); + assert(buf != NULL); buf->data_.reset(data_.release()); buf->size_ = size_; buf->capacity_ = capacity_; diff --git a/media/webrtc/trunk/webrtc/base/checks.h b/media/webrtc/trunk/webrtc/base/checks.h index 521586844a6a..ea3fb37c0d19 100644 --- a/media/webrtc/trunk/webrtc/base/checks.h +++ b/media/webrtc/trunk/webrtc/base/checks.h @@ -91,6 +91,8 @@ namespace rtc { LAZY_STREAM(rtc::FatalMessage(__FILE__, __LINE__).stream(), !(condition)) \ << "Check failed: " #condition << std::endl << "# " +#define RTC_CHECK(condition) CHECK(condition) + // Helper macro for binary operators. // Don't use this macro directly in your code, use CHECK_EQ et al below. // @@ -185,6 +187,36 @@ DEFINE_CHECK_OP_IMPL(GT, > ) #define DCHECK_GT(v1, v2) EAT_STREAM_PARAMETERS((v1) > (v2)) #endif +#define RTC_CHECK_EQ(val1, val2) CHECK_OP(EQ, ==, val1, val2) +#define RTC_CHECK_NE(val1, val2) CHECK_OP(NE, !=, val1, val2) +#define RTC_CHECK_LE(val1, val2) CHECK_OP(LE, <=, val1, val2) +#define RTC_CHECK_LT(val1, val2) CHECK_OP(LT, < , val1, val2) +#define RTC_CHECK_GE(val1, val2) CHECK_OP(GE, >=, val1, val2) +#define RTC_CHECK_GT(val1, val2) CHECK_OP(GT, > , val1, val2) + +// The RTC_DCHECK macro is equivalent to RTC_CHECK except that it only generates +// code in debug builds. It does reference the condition parameter in all cases, +// though, so callers won't risk getting warnings about unused variables. +#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) +#define RTC_DCHECK_IS_ON 1 +#define RTC_DCHECK(condition) CHECK(condition) +#define RTC_DCHECK_EQ(v1, v2) CHECK_EQ(v1, v2) +#define RTC_DCHECK_NE(v1, v2) CHECK_NE(v1, v2) +#define RTC_DCHECK_LE(v1, v2) CHECK_LE(v1, v2) +#define RTC_DCHECK_LT(v1, v2) CHECK_LT(v1, v2) +#define RTC_DCHECK_GE(v1, v2) CHECK_GE(v1, v2) +#define RTC_DCHECK_GT(v1, v2) CHECK_GT(v1, v2) +#else +#define RTC_DCHECK_IS_ON 0 +#define RTC_DCHECK(condition) EAT_STREAM_PARAMETERS(condition) +#define RTC_DCHECK_EQ(v1, v2) EAT_STREAM_PARAMETERS((v1) == (v2)) +#define RTC_DCHECK_NE(v1, v2) EAT_STREAM_PARAMETERS((v1) != (v2)) +#define RTC_DCHECK_LE(v1, v2) EAT_STREAM_PARAMETERS((v1) <= (v2)) +#define RTC_DCHECK_LT(v1, v2) EAT_STREAM_PARAMETERS((v1) < (v2)) +#define RTC_DCHECK_GE(v1, v2) EAT_STREAM_PARAMETERS((v1) >= (v2)) +#define RTC_DCHECK_GT(v1, v2) EAT_STREAM_PARAMETERS((v1) > (v2)) +#endif + // This is identical to LogMessageVoidify but in name. class FatalMessageVoidify { public: diff --git a/media/webrtc/trunk/webrtc/base/constructormagic.h b/media/webrtc/trunk/webrtc/base/constructormagic.h index ceee37de4baf..972508b4ca25 100644 --- a/media/webrtc/trunk/webrtc/base/constructormagic.h +++ b/media/webrtc/trunk/webrtc/base/constructormagic.h @@ -17,6 +17,8 @@ #undef DISALLOW_ASSIGN #define DISALLOW_ASSIGN(TypeName) \ void operator=(const TypeName&) +#define RTC_DISALLOW_ASSIGN(TypeName) \ + void operator=(const TypeName&) = delete // A macro to disallow the evil copy constructor and operator= functions // This should be used in the private: declarations for a class. @@ -24,6 +26,9 @@ #define DISALLOW_COPY_AND_ASSIGN(TypeName) \ TypeName(const TypeName&); \ DISALLOW_ASSIGN(TypeName) +#define RTC_DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName&) = delete; \ + RTC_DISALLOW_ASSIGN(TypeName) // Alternative, less-accurate legacy name. #undef DISALLOW_EVIL_CONSTRUCTORS @@ -40,6 +45,9 @@ #define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \ TypeName(); \ DISALLOW_EVIL_CONSTRUCTORS(TypeName) +#define RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \ + TypeName() = delete; \ + RTC_DISALLOW_COPY_AND_ASSIGN(TypeName) #endif // WEBRTC_BASE_CONSTRUCTORMAGIC_H_ diff --git a/media/webrtc/trunk/webrtc/common_types.h b/media/webrtc/trunk/webrtc/common_types.h index 0c11be01d88f..52f77b8c665c 100644 --- a/media/webrtc/trunk/webrtc/common_types.h +++ b/media/webrtc/trunk/webrtc/common_types.h @@ -639,6 +639,9 @@ struct VideoCodecVP9 { bool frameDroppingOn; int keyFrameInterval; bool adaptiveQpMode; + bool automaticResizeOn; + unsigned char numberOfSpatialLayers; + bool flexibleMode; }; // H264 specific. diff --git a/media/webrtc/trunk/webrtc/modules/interface/module_common_types.h b/media/webrtc/trunk/webrtc/modules/interface/module_common_types.h index 3e7f954e4bca..7dc430af0123 100644 --- a/media/webrtc/trunk/webrtc/modules/interface/module_common_types.h +++ b/media/webrtc/trunk/webrtc/modules/interface/module_common_types.h @@ -15,6 +15,7 @@ #include // memcpy #include +#include #include "webrtc/base/constructormagic.h" #include "webrtc/common_types.h" @@ -31,8 +32,16 @@ struct RTPAudioHeader { }; const int16_t kNoPictureId = -1; +const int16_t kMaxOneBytePictureId = 0x7F; // 7 bits +const int16_t kMaxTwoBytePictureId = 0x7FFF; // 15 bits const int16_t kNoTl0PicIdx = -1; const uint8_t kNoTemporalIdx = 0xFF; +const uint8_t kNoSpatialIdx = 0xFF; +const uint8_t kNoGofIdx = 0xFF; +const uint8_t kNumVp9Buffers = 8; +const size_t kMaxVp9RefPics = 3; +const size_t kMaxVp9FramesInGof = 0xFF; // 8 bits +const size_t kMaxVp9NumberOfSpatialLayers = 8; const int kNoKeyIdx = -1; struct RTPVideoHeaderVP8 { @@ -61,37 +70,164 @@ struct RTPVideoHeaderVP8 { // in a VP8 partition. Otherwise false }; +enum TemporalStructureMode { + kTemporalStructureMode1, // 1 temporal layer structure - i.e., IPPP... + kTemporalStructureMode2, // 2 temporal layers 0-1-0-1... + kTemporalStructureMode3 // 3 temporal layers 0-2-1-2-0-2-1-2... +}; + +struct GofInfoVP9 { + void SetGofInfoVP9(TemporalStructureMode tm) { + switch (tm) { + case kTemporalStructureMode1: + num_frames_in_gof = 1; + temporal_idx[0] = 0; + temporal_up_switch[0] = false; + num_ref_pics[0] = 1; + pid_diff[0][0] = 1; + break; + case kTemporalStructureMode2: + num_frames_in_gof = 2; + temporal_idx[0] = 0; + temporal_up_switch[0] = false; + num_ref_pics[0] = 1; + pid_diff[0][0] = 2; + + temporal_idx[1] = 1; + temporal_up_switch[1] = true; + num_ref_pics[1] = 1; + pid_diff[1][0] = 1; + break; + case kTemporalStructureMode3: + num_frames_in_gof = 4; + temporal_idx[0] = 0; + temporal_up_switch[0] = false; + num_ref_pics[0] = 1; + pid_diff[0][0] = 4; + + temporal_idx[1] = 2; + temporal_up_switch[1] = true; + num_ref_pics[1] = 1; + pid_diff[1][0] = 1; + + temporal_idx[2] = 1; + temporal_up_switch[2] = true; + num_ref_pics[2] = 1; + pid_diff[2][0] = 2; + + temporal_idx[3] = 2; + temporal_up_switch[3] = false; + num_ref_pics[3] = 2; + pid_diff[3][0] = 1; + pid_diff[3][1] = 2; + break; + default: + assert(false); + } + } + + void CopyGofInfoVP9(const GofInfoVP9& src) { + num_frames_in_gof = src.num_frames_in_gof; + for (size_t i = 0; i < num_frames_in_gof; ++i) { + temporal_idx[i] = src.temporal_idx[i]; + temporal_up_switch[i] = src.temporal_up_switch[i]; + num_ref_pics[i] = src.num_ref_pics[i]; + for (uint8_t r = 0; r < num_ref_pics[i]; ++r) { + pid_diff[i][r] = src.pid_diff[i][r]; + } + } + } + + size_t num_frames_in_gof; + uint8_t temporal_idx[kMaxVp9FramesInGof]; + bool temporal_up_switch[kMaxVp9FramesInGof]; + uint8_t num_ref_pics[kMaxVp9FramesInGof]; + uint8_t pid_diff[kMaxVp9FramesInGof][kMaxVp9RefPics]; +}; + +struct RTPVideoHeaderVP9 { + void InitRTPVideoHeaderVP9() { + inter_pic_predicted = false; + flexible_mode = false; + beginning_of_frame = false; + end_of_frame = false; + ss_data_available = false; + picture_id = kNoPictureId; + max_picture_id = kMaxTwoBytePictureId; + tl0_pic_idx = kNoTl0PicIdx; + temporal_idx = kNoTemporalIdx; + spatial_idx = kNoSpatialIdx; + temporal_up_switch = false; + inter_layer_predicted = false; + gof_idx = kNoGofIdx; + num_ref_pics = 0; + num_spatial_layers = 1; + } + + bool inter_pic_predicted; // This layer frame is dependent on previously + // coded frame(s). + bool flexible_mode; // This frame is in flexible mode. + bool beginning_of_frame; // True if this packet is the first in a VP9 layer + // frame. + bool end_of_frame; // True if this packet is the last in a VP9 layer frame. + bool ss_data_available; // True if SS data is available in this payload + // descriptor. + int16_t picture_id; // PictureID index, 15 bits; + // kNoPictureId if PictureID does not exist. + int16_t max_picture_id; // Maximum picture ID index; either 0x7F or 0x7FFF; + int16_t tl0_pic_idx; // TL0PIC_IDX, 8 bits; + // kNoTl0PicIdx means no value provided. + uint8_t temporal_idx; // Temporal layer index, or kNoTemporalIdx. + uint8_t spatial_idx; // Spatial layer index, or kNoSpatialIdx. + bool temporal_up_switch; // True if upswitch to higher frame rate is possible + // starting from this frame. + bool inter_layer_predicted; // Frame is dependent on directly lower spatial + // layer frame. + + uint8_t gof_idx; // Index to predefined temporal frame info in SS data. + + uint8_t num_ref_pics; // Number of reference pictures used by this layer + // frame. + uint8_t pid_diff[kMaxVp9RefPics]; // P_DIFF signaled to derive the PictureID + // of the reference pictures. + int16_t ref_picture_id[kMaxVp9RefPics]; // PictureID of reference pictures. + + // SS data. + size_t num_spatial_layers; // Always populated. + bool spatial_layer_resolution_present; + uint16_t width[kMaxVp9NumberOfSpatialLayers]; + uint16_t height[kMaxVp9NumberOfSpatialLayers]; + GofInfoVP9 gof; +}; + +#if WEBRTC_48_H264_IMPL +// The packetization types that we support: single, aggregated, and fragmented. +enum H264PacketizationTypes { + kH264SingleNalu, // This packet contains a single NAL unit. + kH264StapA, // This packet contains STAP-A (single time + // aggregation) packets. If this packet has an + // associated NAL unit type, it'll be for the + // first such aggregated packet. + kH264FuA, // This packet contains a FU-A (fragmentation + // unit) packet, meaning it is a part of a frame + // that was too large to fit into a single packet. +}; + +struct RTPVideoHeaderH264 { + uint8_t nalu_type; // The NAL unit type. If this is a header for a + // fragmented packet, it's the NAL unit type of + // the original data. If this is the header for an + // aggregated packet, it's the NAL unit type of + // the first NAL unit in the packet. + H264PacketizationTypes packetization_type; +}; +#else +// Mozilla's OpenH264 implementation struct RTPVideoHeaderH264 { bool stap_a; bool single_nalu; }; - -// XXX fix vp9 (bug 1138629) -struct RTPVideoHeaderVP9 { - void InitRTPVideoHeaderVP9() { - nonReference = false; - pictureId = kNoPictureId; - tl0PicIdx = kNoTl0PicIdx; - temporalIdx = kNoTemporalIdx; - layerSync = false; - keyIdx = kNoKeyIdx; - partitionId = 0; - beginningOfPartition = false; - } - - bool nonReference; // Frame is discardable. - int16_t pictureId; // Picture ID index, 15 bits; - // kNoPictureId if PictureID does not exist. - int16_t tl0PicIdx; // TL0PIC_IDX, 8 bits; - // kNoTl0PicIdx means no value provided. - uint8_t temporalIdx; // Temporal layer index, or kNoTemporalIdx. - bool layerSync; // This frame is a layer sync frame. - // Disabled if temporalIdx == kNoTemporalIdx. - int keyIdx; // 5 bits; kNoKeyIdx means not used. - int partitionId; // VP9 partition ID - bool beginningOfPartition; // True if this packet is the first - // in a VP9 partition. Otherwise false -}; +#endif union RTPVideoTypeHeader { RTPVideoHeaderVP8 VP8; @@ -611,6 +747,18 @@ inline AudioFrame& AudioFrame::Append(const AudioFrame& rhs) { return *this; } +namespace { +inline int16_t ClampToInt16(int32_t input) { + if (input < -0x00008000) { + return -0x8000; + } else if (input > 0x00007FFF) { + return 0x7FFF; + } else { + return static_cast(input); + } +} +} + inline AudioFrame& AudioFrame::operator+=(const AudioFrame& rhs) { // Sanity check assert((num_channels_ > 0) && (num_channels_ < 3)); @@ -643,15 +791,9 @@ inline AudioFrame& AudioFrame::operator+=(const AudioFrame& rhs) { } else { // IMPROVEMENT this can be done very fast in assembly for (int i = 0; i < samples_per_channel_ * num_channels_; i++) { - int32_t wrapGuard = + int32_t wrap_guard = static_cast(data_[i]) + static_cast(rhs.data_[i]); - if (wrapGuard < -32768) { - data_[i] = -32768; - } else if (wrapGuard > 32767) { - data_[i] = 32767; - } else { - data_[i] = (int16_t)wrapGuard; - } + data_[i] = ClampToInt16(wrap_guard); } } energy_ = 0xffffffff; @@ -674,15 +816,9 @@ inline AudioFrame& AudioFrame::operator-=(const AudioFrame& rhs) { speech_type_ = kUndefined; for (int i = 0; i < samples_per_channel_ * num_channels_; i++) { - int32_t wrapGuard = + int32_t wrap_guard = static_cast(data_[i]) - static_cast(rhs.data_[i]); - if (wrapGuard < -32768) { - data_[i] = -32768; - } else if (wrapGuard > 32767) { - data_[i] = 32767; - } else { - data_[i] = (int16_t)wrapGuard; - } + data_[i] = ClampToInt16(wrap_guard); } energy_ = 0xffffffff; return *this; @@ -690,11 +826,24 @@ inline AudioFrame& AudioFrame::operator-=(const AudioFrame& rhs) { inline bool IsNewerSequenceNumber(uint16_t sequence_number, uint16_t prev_sequence_number) { + // Distinguish between elements that are exactly 0x8000 apart. + // If s1>s2 and |s1-s2| = 0x8000: IsNewer(s1,s2)=true, IsNewer(s2,s1)=false + // rather than having IsNewer(s1,s2) = IsNewer(s2,s1) = false. + if (static_cast(sequence_number - prev_sequence_number) == 0x8000) { + return sequence_number > prev_sequence_number; + } return sequence_number != prev_sequence_number && static_cast(sequence_number - prev_sequence_number) < 0x8000; } inline bool IsNewerTimestamp(uint32_t timestamp, uint32_t prev_timestamp) { + // Distinguish between elements that are exactly 0x80000000 apart. + // If t1>t2 and |t1-t2| = 0x80000000: IsNewer(t1,t2)=true, + // IsNewer(t2,t1)=false + // rather than having IsNewer(t1,t2) = IsNewer(t2,t1) = false. + if (static_cast(timestamp - prev_timestamp) == 0x80000000) { + return timestamp > prev_timestamp; + } return timestamp != prev_timestamp && static_cast(timestamp - prev_timestamp) < 0x80000000; } @@ -715,6 +864,46 @@ inline uint32_t LatestTimestamp(uint32_t timestamp1, uint32_t timestamp2) { return IsNewerTimestamp(timestamp1, timestamp2) ? timestamp1 : timestamp2; } +// Utility class to unwrap a sequence number to a larger type, for easier +// handling large ranges. Note that sequence numbers will never be unwrapped +// to a negative value. +class SequenceNumberUnwrapper { + public: + SequenceNumberUnwrapper() : last_seq_(-1) {} + + // Get the unwrapped sequence, but don't update the internal state. + int64_t UnwrapWithoutUpdate(uint16_t sequence_number) { + if (last_seq_ == -1) + return sequence_number; + + uint16_t cropped_last = static_cast(last_seq_); + int64_t delta = sequence_number - cropped_last; + if (IsNewerSequenceNumber(sequence_number, cropped_last)) { + if (delta < 0) + delta += (1 << 16); // Wrap forwards. + } else if (delta > 0 && (last_seq_ + delta - (1 << 16)) >= 0) { + // If sequence_number is older but delta is positive, this is a backwards + // wrap-around. However, don't wrap backwards past 0 (unwrapped). + delta -= (1 << 16); + } + + return last_seq_ + delta; + } + + // Only update the internal state to the specified last (unwrapped) sequence. + void UpdateLast(int64_t last_sequence) { last_seq_ = last_sequence; } + + // Unwrap the sequence number and update the internal state. + int64_t Unwrap(uint16_t sequence_number) { + int64_t unwrapped = UnwrapWithoutUpdate(sequence_number); + UpdateLast(unwrapped); + return unwrapped; + } + + private: + int64_t last_seq_; +}; + } // namespace webrtc #endif // MODULE_COMMON_TYPES_H diff --git a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/rtp_rtcp.gypi b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/rtp_rtcp.gypi index 7a144e4ef53d..c4b9b3b43d19 100644 --- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/rtp_rtcp.gypi +++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/rtp_rtcp.gypi @@ -92,6 +92,8 @@ 'source/rtp_format_h264.h', 'source/rtp_format_vp8.cc', 'source/rtp_format_vp8.h', + 'source/rtp_format_vp9.cc', + 'source/rtp_format_vp9.h', 'source/rtp_format_video_generic.cc', 'source/rtp_format_video_generic.h', 'source/vp8_partition_aggregator.cc', diff --git a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_format.cc b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_format.cc index 67c1abe0f3bb..cdb9c4920e31 100644 --- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_format.cc +++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_format.cc @@ -13,6 +13,7 @@ #include "webrtc/modules/rtp_rtcp/source/rtp_format_h264.h" #include "webrtc/modules/rtp_rtcp/source/rtp_format_video_generic.h" #include "webrtc/modules/rtp_rtcp/source/rtp_format_vp8.h" +#include "webrtc/modules/rtp_rtcp/source/rtp_format_vp9.h" namespace webrtc { RtpPacketizer* RtpPacketizer::Create(RtpVideoCodecTypes type, @@ -26,6 +27,8 @@ RtpPacketizer* RtpPacketizer::Create(RtpVideoCodecTypes type, assert(rtp_type_header != NULL); return new RtpPacketizerVp8(rtp_type_header->VP8, max_payload_len); case kRtpVideoVp9: + assert(rtp_type_header != NULL); + return new RtpPacketizerVp9(rtp_type_header->VP9, max_payload_len); case kRtpVideoGeneric: return new RtpPacketizerGeneric(frame_type, max_payload_len); case kRtpVideoNone: @@ -40,7 +43,8 @@ RtpDepacketizer* RtpDepacketizer::Create(RtpVideoCodecTypes type) { return new RtpDepacketizerH264(); case kRtpVideoVp8: return new RtpDepacketizerVp8(); - case kRtpVideoVp9: // XXX fix vp9 packetization (bug 1138629) + case kRtpVideoVp9: + return new RtpDepacketizerVp9(); case kRtpVideoGeneric: return new RtpDepacketizerGeneric(); case kRtpVideoNone: diff --git a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_format_vp9.cc b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_format_vp9.cc new file mode 100644 index 000000000000..d2f22d50446a --- /dev/null +++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_format_vp9.cc @@ -0,0 +1,743 @@ +/* + * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/modules/rtp_rtcp/source/rtp_format_vp9.h" + +#include +#include + +#include + +#include "webrtc/base/bitbuffer.h" +#include "webrtc/base/checks.h" +#include "webrtc/base/logging.h" + +#define RETURN_FALSE_ON_ERROR(x) \ + if (!(x)) { \ + return false; \ + } + +namespace webrtc { +namespace { +// Length of VP9 payload descriptors' fixed part. +const size_t kFixedPayloadDescriptorBytes = 1; + +// Packet fragmentation mode. If true, packets are split into (almost) equal +// sizes. Otherwise, as many bytes as possible are fit into one packet. +const bool kBalancedMode = true; + +const uint32_t kReservedBitValue0 = 0; + +uint8_t TemporalIdxField(const RTPVideoHeaderVP9& hdr, uint8_t def) { + return (hdr.temporal_idx == kNoTemporalIdx) ? def : hdr.temporal_idx; +} + +uint8_t SpatialIdxField(const RTPVideoHeaderVP9& hdr, uint8_t def) { + return (hdr.spatial_idx == kNoSpatialIdx) ? def : hdr.spatial_idx; +} + +int16_t Tl0PicIdxField(const RTPVideoHeaderVP9& hdr, uint8_t def) { + return (hdr.tl0_pic_idx == kNoTl0PicIdx) ? def : hdr.tl0_pic_idx; +} + +// Picture ID: +// +// +-+-+-+-+-+-+-+-+ +// I: |M| PICTURE ID | M:0 => picture id is 7 bits. +// +-+-+-+-+-+-+-+-+ M:1 => picture id is 15 bits. +// M: | EXTENDED PID | +// +-+-+-+-+-+-+-+-+ +// +size_t PictureIdLength(const RTPVideoHeaderVP9& hdr) { + if (hdr.picture_id == kNoPictureId) + return 0; + return (hdr.max_picture_id == kMaxOneBytePictureId) ? 1 : 2; +} + +bool PictureIdPresent(const RTPVideoHeaderVP9& hdr) { + return PictureIdLength(hdr) > 0; +} + +// Layer indices: +// +// Flexible mode (F=1): Non-flexible mode (F=0): +// +// +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+ +// L: | T |U| S |D| | T |U| S |D| +// +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+ +// | TL0PICIDX | +// +-+-+-+-+-+-+-+-+ +// +size_t LayerInfoLength(const RTPVideoHeaderVP9& hdr) { + if (hdr.temporal_idx == kNoTemporalIdx && + hdr.spatial_idx == kNoSpatialIdx) { + return 0; + } + return hdr.flexible_mode ? 1 : 2; +} + +bool LayerInfoPresent(const RTPVideoHeaderVP9& hdr) { + return LayerInfoLength(hdr) > 0; +} + +// Reference indices: +// +// +-+-+-+-+-+-+-+-+ P=1,F=1: At least one reference index +// P,F: | P_DIFF |N| up to 3 times has to be specified. +// +-+-+-+-+-+-+-+-+ N=1: An additional P_DIFF follows +// current P_DIFF. +// +size_t RefIndicesLength(const RTPVideoHeaderVP9& hdr) { + if (!hdr.inter_pic_predicted || !hdr.flexible_mode) + return 0; + + RTC_DCHECK_GT(hdr.num_ref_pics, 0U); + RTC_DCHECK_LE(hdr.num_ref_pics, kMaxVp9RefPics); + return hdr.num_ref_pics; +} + +// Scalability structure (SS). +// +// +-+-+-+-+-+-+-+-+ +// V: | N_S |Y|G|-|-|-| +// +-+-+-+-+-+-+-+-+ -| +// Y: | WIDTH | (OPTIONAL) . +// + + . +// | | (OPTIONAL) . +// +-+-+-+-+-+-+-+-+ . N_S + 1 times +// | HEIGHT | (OPTIONAL) . +// + + . +// | | (OPTIONAL) . +// +-+-+-+-+-+-+-+-+ -| +// G: | N_G | (OPTIONAL) +// +-+-+-+-+-+-+-+-+ -| +// N_G: | T |U| R |-|-| (OPTIONAL) . +// +-+-+-+-+-+-+-+-+ -| . N_G times +// | P_DIFF | (OPTIONAL) . R times . +// +-+-+-+-+-+-+-+-+ -| -| +// +size_t SsDataLength(const RTPVideoHeaderVP9& hdr) { + if (!hdr.ss_data_available) + return 0; + + RTC_DCHECK_GT(hdr.num_spatial_layers, 0U); + RTC_DCHECK_LE(hdr.num_spatial_layers, kMaxVp9NumberOfSpatialLayers); + RTC_DCHECK_LE(hdr.gof.num_frames_in_gof, kMaxVp9FramesInGof); + size_t length = 1; // V + if (hdr.spatial_layer_resolution_present) { + length += 4 * hdr.num_spatial_layers; // Y + } + if (hdr.gof.num_frames_in_gof > 0) { + ++length; // G + } + // N_G + length += hdr.gof.num_frames_in_gof; // T, U, R + for (size_t i = 0; i < hdr.gof.num_frames_in_gof; ++i) { + RTC_DCHECK_LE(hdr.gof.num_ref_pics[i], kMaxVp9RefPics); + length += hdr.gof.num_ref_pics[i]; // R times + } + return length; +} + +size_t PayloadDescriptorLengthMinusSsData(const RTPVideoHeaderVP9& hdr) { + return kFixedPayloadDescriptorBytes + PictureIdLength(hdr) + + LayerInfoLength(hdr) + RefIndicesLength(hdr); +} + +size_t PayloadDescriptorLength(const RTPVideoHeaderVP9& hdr) { + return PayloadDescriptorLengthMinusSsData(hdr) + SsDataLength(hdr); +} + +void QueuePacket(size_t start_pos, + size_t size, + bool layer_begin, + bool layer_end, + RtpPacketizerVp9::PacketInfoQueue* packets) { + RtpPacketizerVp9::PacketInfo packet_info; + packet_info.payload_start_pos = start_pos; + packet_info.size = size; + packet_info.layer_begin = layer_begin; + packet_info.layer_end = layer_end; + packets->push(packet_info); +} + +// Picture ID: +// +// +-+-+-+-+-+-+-+-+ +// I: |M| PICTURE ID | M:0 => picture id is 7 bits. +// +-+-+-+-+-+-+-+-+ M:1 => picture id is 15 bits. +// M: | EXTENDED PID | +// +-+-+-+-+-+-+-+-+ +// +bool WritePictureId(const RTPVideoHeaderVP9& vp9, + rtc::BitBufferWriter* writer) { + bool m_bit = (PictureIdLength(vp9) == 2); + RETURN_FALSE_ON_ERROR(writer->WriteBits(m_bit ? 1 : 0, 1)); + RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.picture_id, m_bit ? 15 : 7)); + return true; +} + +// Layer indices: +// +// Flexible mode (F=1): +// +// +-+-+-+-+-+-+-+-+ +// L: | T |U| S |D| +// +-+-+-+-+-+-+-+-+ +// +bool WriteLayerInfoCommon(const RTPVideoHeaderVP9& vp9, + rtc::BitBufferWriter* writer) { + RETURN_FALSE_ON_ERROR(writer->WriteBits(TemporalIdxField(vp9, 0), 3)); + RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.temporal_up_switch ? 1 : 0, 1)); + RETURN_FALSE_ON_ERROR(writer->WriteBits(SpatialIdxField(vp9, 0), 3)); + RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.inter_layer_predicted ? 1: 0, 1)); + return true; +} + +// Non-flexible mode (F=0): +// +// +-+-+-+-+-+-+-+-+ +// L: | T |U| S |D| +// +-+-+-+-+-+-+-+-+ +// | TL0PICIDX | +// +-+-+-+-+-+-+-+-+ +// +bool WriteLayerInfoNonFlexibleMode(const RTPVideoHeaderVP9& vp9, + rtc::BitBufferWriter* writer) { + RETURN_FALSE_ON_ERROR(writer->WriteUInt8(Tl0PicIdxField(vp9, 0))); + return true; +} + +bool WriteLayerInfo(const RTPVideoHeaderVP9& vp9, + rtc::BitBufferWriter* writer) { + if (!WriteLayerInfoCommon(vp9, writer)) + return false; + + if (vp9.flexible_mode) + return true; + + return WriteLayerInfoNonFlexibleMode(vp9, writer); +} + +// Reference indices: +// +// +-+-+-+-+-+-+-+-+ P=1,F=1: At least one reference index +// P,F: | P_DIFF |N| up to 3 times has to be specified. +// +-+-+-+-+-+-+-+-+ N=1: An additional P_DIFF follows +// current P_DIFF. +// +bool WriteRefIndices(const RTPVideoHeaderVP9& vp9, + rtc::BitBufferWriter* writer) { + if (!PictureIdPresent(vp9) || + vp9.num_ref_pics == 0 || vp9.num_ref_pics > kMaxVp9RefPics) { + return false; + } + for (uint8_t i = 0; i < vp9.num_ref_pics; ++i) { + bool n_bit = !(i == vp9.num_ref_pics - 1); + RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.pid_diff[i], 7)); + RETURN_FALSE_ON_ERROR(writer->WriteBits(n_bit ? 1 : 0, 1)); + } + return true; +} + +// Scalability structure (SS). +// +// +-+-+-+-+-+-+-+-+ +// V: | N_S |Y|G|-|-|-| +// +-+-+-+-+-+-+-+-+ -| +// Y: | WIDTH | (OPTIONAL) . +// + + . +// | | (OPTIONAL) . +// +-+-+-+-+-+-+-+-+ . N_S + 1 times +// | HEIGHT | (OPTIONAL) . +// + + . +// | | (OPTIONAL) . +// +-+-+-+-+-+-+-+-+ -| +// G: | N_G | (OPTIONAL) +// +-+-+-+-+-+-+-+-+ -| +// N_G: | T |U| R |-|-| (OPTIONAL) . +// +-+-+-+-+-+-+-+-+ -| . N_G times +// | P_DIFF | (OPTIONAL) . R times . +// +-+-+-+-+-+-+-+-+ -| -| +// +bool WriteSsData(const RTPVideoHeaderVP9& vp9, rtc::BitBufferWriter* writer) { + RTC_DCHECK_GT(vp9.num_spatial_layers, 0U); + RTC_DCHECK_LE(vp9.num_spatial_layers, kMaxVp9NumberOfSpatialLayers); + RTC_DCHECK_LE(vp9.gof.num_frames_in_gof, kMaxVp9FramesInGof); + bool g_bit = vp9.gof.num_frames_in_gof > 0; + + RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.num_spatial_layers - 1, 3)); + RETURN_FALSE_ON_ERROR( + writer->WriteBits(vp9.spatial_layer_resolution_present ? 1 : 0, 1)); + RETURN_FALSE_ON_ERROR(writer->WriteBits(g_bit ? 1 : 0, 1)); // G + RETURN_FALSE_ON_ERROR(writer->WriteBits(kReservedBitValue0, 3)); + + if (vp9.spatial_layer_resolution_present) { + for (size_t i = 0; i < vp9.num_spatial_layers; ++i) { + RETURN_FALSE_ON_ERROR(writer->WriteUInt16(vp9.width[i])); + RETURN_FALSE_ON_ERROR(writer->WriteUInt16(vp9.height[i])); + } + } + if (g_bit) { + RETURN_FALSE_ON_ERROR(writer->WriteUInt8(vp9.gof.num_frames_in_gof)); + } + for (size_t i = 0; i < vp9.gof.num_frames_in_gof; ++i) { + RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.gof.temporal_idx[i], 3)); + RETURN_FALSE_ON_ERROR( + writer->WriteBits(vp9.gof.temporal_up_switch[i] ? 1 : 0, 1)); + RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.gof.num_ref_pics[i], 2)); + RETURN_FALSE_ON_ERROR(writer->WriteBits(kReservedBitValue0, 2)); + for (uint8_t r = 0; r < vp9.gof.num_ref_pics[i]; ++r) { + RETURN_FALSE_ON_ERROR(writer->WriteUInt8(vp9.gof.pid_diff[i][r])); + } + } + return true; +} + +// Picture ID: +// +// +-+-+-+-+-+-+-+-+ +// I: |M| PICTURE ID | M:0 => picture id is 7 bits. +// +-+-+-+-+-+-+-+-+ M:1 => picture id is 15 bits. +// M: | EXTENDED PID | +// +-+-+-+-+-+-+-+-+ +// +bool ParsePictureId(rtc::BitBuffer* parser, RTPVideoHeaderVP9* vp9) { + uint32_t picture_id; + uint32_t m_bit; + RETURN_FALSE_ON_ERROR(parser->ReadBits(&m_bit, 1)); + if (m_bit) { + RETURN_FALSE_ON_ERROR(parser->ReadBits(&picture_id, 15)); + vp9->max_picture_id = kMaxTwoBytePictureId; + } else { + RETURN_FALSE_ON_ERROR(parser->ReadBits(&picture_id, 7)); + vp9->max_picture_id = kMaxOneBytePictureId; + } + vp9->picture_id = picture_id; + return true; +} + +// Layer indices (flexible mode): +// +// +-+-+-+-+-+-+-+-+ +// L: | T |U| S |D| +// +-+-+-+-+-+-+-+-+ +// +bool ParseLayerInfoCommon(rtc::BitBuffer* parser, RTPVideoHeaderVP9* vp9) { + uint32_t t, u_bit, s, d_bit; + RETURN_FALSE_ON_ERROR(parser->ReadBits(&t, 3)); + RETURN_FALSE_ON_ERROR(parser->ReadBits(&u_bit, 1)); + RETURN_FALSE_ON_ERROR(parser->ReadBits(&s, 3)); + RETURN_FALSE_ON_ERROR(parser->ReadBits(&d_bit, 1)); + vp9->temporal_idx = t; + vp9->temporal_up_switch = u_bit ? true : false; + vp9->spatial_idx = s; + vp9->inter_layer_predicted = d_bit ? true : false; + return true; +} + +// Layer indices (non-flexible mode): +// +// +-+-+-+-+-+-+-+-+ +// L: | T |U| S |D| +// +-+-+-+-+-+-+-+-+ +// | TL0PICIDX | +// +-+-+-+-+-+-+-+-+ +// +bool ParseLayerInfoNonFlexibleMode(rtc::BitBuffer* parser, + RTPVideoHeaderVP9* vp9) { + uint8_t tl0picidx; + RETURN_FALSE_ON_ERROR(parser->ReadUInt8(&tl0picidx)); + vp9->tl0_pic_idx = tl0picidx; + return true; +} + +bool ParseLayerInfo(rtc::BitBuffer* parser, RTPVideoHeaderVP9* vp9) { + if (!ParseLayerInfoCommon(parser, vp9)) + return false; + + if (vp9->flexible_mode) + return true; + + return ParseLayerInfoNonFlexibleMode(parser, vp9); +} + +// Reference indices: +// +// +-+-+-+-+-+-+-+-+ P=1,F=1: At least one reference index +// P,F: | P_DIFF |N| up to 3 times has to be specified. +// +-+-+-+-+-+-+-+-+ N=1: An additional P_DIFF follows +// current P_DIFF. +// +bool ParseRefIndices(rtc::BitBuffer* parser, RTPVideoHeaderVP9* vp9) { + if (vp9->picture_id == kNoPictureId) + return false; + + vp9->num_ref_pics = 0; + uint32_t n_bit; + do { + if (vp9->num_ref_pics == kMaxVp9RefPics) + return false; + + uint32_t p_diff; + RETURN_FALSE_ON_ERROR(parser->ReadBits(&p_diff, 7)); + RETURN_FALSE_ON_ERROR(parser->ReadBits(&n_bit, 1)); + + vp9->pid_diff[vp9->num_ref_pics] = p_diff; + uint32_t scaled_pid = vp9->picture_id; + if (p_diff > scaled_pid) { + // TODO(asapersson): Max should correspond to the picture id of last wrap. + scaled_pid += vp9->max_picture_id + 1; + } + vp9->ref_picture_id[vp9->num_ref_pics++] = scaled_pid - p_diff; + } while (n_bit); + + return true; +} + +// Scalability structure (SS). +// +// +-+-+-+-+-+-+-+-+ +// V: | N_S |Y|G|-|-|-| +// +-+-+-+-+-+-+-+-+ -| +// Y: | WIDTH | (OPTIONAL) . +// + + . +// | | (OPTIONAL) . +// +-+-+-+-+-+-+-+-+ . N_S + 1 times +// | HEIGHT | (OPTIONAL) . +// + + . +// | | (OPTIONAL) . +// +-+-+-+-+-+-+-+-+ -| +// G: | N_G | (OPTIONAL) +// +-+-+-+-+-+-+-+-+ -| +// N_G: | T |U| R |-|-| (OPTIONAL) . +// +-+-+-+-+-+-+-+-+ -| . N_G times +// | P_DIFF | (OPTIONAL) . R times . +// +-+-+-+-+-+-+-+-+ -| -| +// +bool ParseSsData(rtc::BitBuffer* parser, RTPVideoHeaderVP9* vp9) { + uint32_t n_s, y_bit, g_bit; + RETURN_FALSE_ON_ERROR(parser->ReadBits(&n_s, 3)); + RETURN_FALSE_ON_ERROR(parser->ReadBits(&y_bit, 1)); + RETURN_FALSE_ON_ERROR(parser->ReadBits(&g_bit, 1)); + RETURN_FALSE_ON_ERROR(parser->ConsumeBits(3)); + vp9->num_spatial_layers = n_s + 1; + vp9->spatial_layer_resolution_present = y_bit ? true : false; + vp9->gof.num_frames_in_gof = 0; + + if (y_bit) { + for (size_t i = 0; i < vp9->num_spatial_layers; ++i) { + RETURN_FALSE_ON_ERROR(parser->ReadUInt16(&vp9->width[i])); + RETURN_FALSE_ON_ERROR(parser->ReadUInt16(&vp9->height[i])); + } + } + if (g_bit) { + uint8_t n_g; + RETURN_FALSE_ON_ERROR(parser->ReadUInt8(&n_g)); + vp9->gof.num_frames_in_gof = n_g; + } + for (size_t i = 0; i < vp9->gof.num_frames_in_gof; ++i) { + uint32_t t, u_bit, r; + RETURN_FALSE_ON_ERROR(parser->ReadBits(&t, 3)); + RETURN_FALSE_ON_ERROR(parser->ReadBits(&u_bit, 1)); + RETURN_FALSE_ON_ERROR(parser->ReadBits(&r, 2)); + RETURN_FALSE_ON_ERROR(parser->ConsumeBits(2)); + vp9->gof.temporal_idx[i] = t; + vp9->gof.temporal_up_switch[i] = u_bit ? true : false; + vp9->gof.num_ref_pics[i] = r; + + for (uint8_t p = 0; p < vp9->gof.num_ref_pics[i]; ++p) { + uint8_t p_diff; + RETURN_FALSE_ON_ERROR(parser->ReadUInt8(&p_diff)); + vp9->gof.pid_diff[i][p] = p_diff; + } + } + return true; +} + +// Gets the size of next payload chunk to send. Returns 0 on error. +size_t CalcNextSize(size_t max_length, size_t rem_bytes) { + if (max_length == 0 || rem_bytes == 0) { + return 0; + } + if (kBalancedMode) { + size_t num_frags = std::ceil(static_cast(rem_bytes) / max_length); + return static_cast( + static_cast(rem_bytes) / num_frags + 0.5); + } + return max_length >= rem_bytes ? rem_bytes : max_length; +} +} // namespace + + +RtpPacketizerVp9::RtpPacketizerVp9(const RTPVideoHeaderVP9& hdr, + size_t max_payload_length) + : hdr_(hdr), + max_payload_length_(max_payload_length), + payload_(nullptr), + payload_size_(0) { +} + +RtpPacketizerVp9::~RtpPacketizerVp9() { +} + +ProtectionType RtpPacketizerVp9::GetProtectionType() { + bool protect = + hdr_.temporal_idx == 0 || hdr_.temporal_idx == kNoTemporalIdx; + return protect ? kProtectedPacket : kUnprotectedPacket; +} + +StorageType RtpPacketizerVp9::GetStorageType(uint32_t retransmission_settings) { + StorageType storage = kAllowRetransmission; + if (hdr_.temporal_idx == 0 && + !(retransmission_settings & kRetransmitBaseLayer)) { + storage = kDontRetransmit; + } else if (hdr_.temporal_idx != kNoTemporalIdx && hdr_.temporal_idx > 0 && + !(retransmission_settings & kRetransmitHigherLayers)) { + storage = kDontRetransmit; + } + return storage; +} + +std::string RtpPacketizerVp9::ToString() { + return "RtpPacketizerVp9"; +} + +void RtpPacketizerVp9::SetPayloadData( + const uint8_t* payload, + size_t payload_size, + const RTPFragmentationHeader* fragmentation) { + payload_ = payload; + payload_size_ = payload_size; + GeneratePackets(); +} + +void RtpPacketizerVp9::GeneratePackets() { + if (max_payload_length_ < PayloadDescriptorLength(hdr_) + 1) { + LOG(LS_ERROR) << "Payload header and one payload byte won't fit."; + return; + } + size_t bytes_processed = 0; + while (bytes_processed < payload_size_) { + size_t rem_bytes = payload_size_ - bytes_processed; + size_t rem_payload_len = max_payload_length_ - + (bytes_processed ? PayloadDescriptorLengthMinusSsData(hdr_) + : PayloadDescriptorLength(hdr_)); + + size_t packet_bytes = CalcNextSize(rem_payload_len, rem_bytes); + if (packet_bytes == 0) { + LOG(LS_ERROR) << "Failed to generate VP9 packets."; + while (!packets_.empty()) + packets_.pop(); + return; + } + QueuePacket(bytes_processed, packet_bytes, bytes_processed == 0, + rem_bytes == packet_bytes, &packets_); + bytes_processed += packet_bytes; + } + assert(bytes_processed == payload_size_); +} + +bool RtpPacketizerVp9::NextPacket(uint8_t* buffer, + size_t* bytes_to_send, + bool* last_packet) { + if (packets_.empty()) { + return false; + } + PacketInfo packet_info = packets_.front(); + packets_.pop(); + + if (!WriteHeaderAndPayload(packet_info, buffer, bytes_to_send)) { + return false; + } + *last_packet = + packets_.empty() && (hdr_.spatial_idx == kNoSpatialIdx || + hdr_.spatial_idx == hdr_.num_spatial_layers - 1); + return true; +} + +// VP9 format: +// +// Payload descriptor for F = 1 (flexible mode) +// 0 1 2 3 4 5 6 7 +// +-+-+-+-+-+-+-+-+ +// |I|P|L|F|B|E|V|-| (REQUIRED) +// +-+-+-+-+-+-+-+-+ +// I: |M| PICTURE ID | (RECOMMENDED) +// +-+-+-+-+-+-+-+-+ +// M: | EXTENDED PID | (RECOMMENDED) +// +-+-+-+-+-+-+-+-+ +// L: | T |U| S |D| (CONDITIONALLY RECOMMENDED) +// +-+-+-+-+-+-+-+-+ -| +// P,F: | P_DIFF |N| (CONDITIONALLY RECOMMENDED) . up to 3 times +// +-+-+-+-+-+-+-+-+ -| +// V: | SS | +// | .. | +// +-+-+-+-+-+-+-+-+ +// +// Payload descriptor for F = 0 (non-flexible mode) +// 0 1 2 3 4 5 6 7 +// +-+-+-+-+-+-+-+-+ +// |I|P|L|F|B|E|V|-| (REQUIRED) +// +-+-+-+-+-+-+-+-+ +// I: |M| PICTURE ID | (RECOMMENDED) +// +-+-+-+-+-+-+-+-+ +// M: | EXTENDED PID | (RECOMMENDED) +// +-+-+-+-+-+-+-+-+ +// L: | T |U| S |D| (CONDITIONALLY RECOMMENDED) +// +-+-+-+-+-+-+-+-+ +// | TL0PICIDX | (CONDITIONALLY REQUIRED) +// +-+-+-+-+-+-+-+-+ +// V: | SS | +// | .. | +// +-+-+-+-+-+-+-+-+ + +bool RtpPacketizerVp9::WriteHeaderAndPayload(const PacketInfo& packet_info, + uint8_t* buffer, + size_t* bytes_to_send) const { + size_t header_length; + if (!WriteHeader(packet_info, buffer, &header_length)) + return false; + + // Copy payload data. + memcpy(&buffer[header_length], + &payload_[packet_info.payload_start_pos], packet_info.size); + + *bytes_to_send = header_length + packet_info.size; + return true; +} + +bool RtpPacketizerVp9::WriteHeader(const PacketInfo& packet_info, + uint8_t* buffer, + size_t* header_length) const { + // Required payload descriptor byte. + bool i_bit = PictureIdPresent(hdr_); + bool p_bit = hdr_.inter_pic_predicted; + bool l_bit = LayerInfoPresent(hdr_); + bool f_bit = hdr_.flexible_mode; + bool b_bit = packet_info.layer_begin; + bool e_bit = packet_info.layer_end; + bool v_bit = hdr_.ss_data_available && b_bit; + + rtc::BitBufferWriter writer(buffer, max_payload_length_); + RETURN_FALSE_ON_ERROR(writer.WriteBits(i_bit ? 1 : 0, 1)); + RETURN_FALSE_ON_ERROR(writer.WriteBits(p_bit ? 1 : 0, 1)); + RETURN_FALSE_ON_ERROR(writer.WriteBits(l_bit ? 1 : 0, 1)); + RETURN_FALSE_ON_ERROR(writer.WriteBits(f_bit ? 1 : 0, 1)); + RETURN_FALSE_ON_ERROR(writer.WriteBits(b_bit ? 1 : 0, 1)); + RETURN_FALSE_ON_ERROR(writer.WriteBits(e_bit ? 1 : 0, 1)); + RETURN_FALSE_ON_ERROR(writer.WriteBits(v_bit ? 1 : 0, 1)); + RETURN_FALSE_ON_ERROR(writer.WriteBits(kReservedBitValue0, 1)); + + // Add fields that are present. + if (i_bit && !WritePictureId(hdr_, &writer)) { + LOG(LS_ERROR) << "Failed writing VP9 picture id."; + return false; + } + if (l_bit && !WriteLayerInfo(hdr_, &writer)) { + LOG(LS_ERROR) << "Failed writing VP9 layer info."; + return false; + } + if (p_bit && f_bit && !WriteRefIndices(hdr_, &writer)) { + LOG(LS_ERROR) << "Failed writing VP9 ref indices."; + return false; + } + if (v_bit && !WriteSsData(hdr_, &writer)) { + LOG(LS_ERROR) << "Failed writing VP9 SS data."; + return false; + } + + size_t offset_bytes = 0; + size_t offset_bits = 0; + writer.GetCurrentOffset(&offset_bytes, &offset_bits); + assert(offset_bits == 0); + + *header_length = offset_bytes; + return true; +} + +bool RtpDepacketizerVp9::Parse(ParsedPayload* parsed_payload, + const uint8_t* payload, + size_t payload_length) { + assert(parsed_payload != nullptr); + if (payload_length == 0) { + LOG(LS_ERROR) << "Payload length is zero."; + return false; + } + + // Parse mandatory first byte of payload descriptor. + rtc::BitBuffer parser(payload, payload_length); + uint32_t i_bit, p_bit, l_bit, f_bit, b_bit, e_bit, v_bit; + RETURN_FALSE_ON_ERROR(parser.ReadBits(&i_bit, 1)); + RETURN_FALSE_ON_ERROR(parser.ReadBits(&p_bit, 1)); + RETURN_FALSE_ON_ERROR(parser.ReadBits(&l_bit, 1)); + RETURN_FALSE_ON_ERROR(parser.ReadBits(&f_bit, 1)); + RETURN_FALSE_ON_ERROR(parser.ReadBits(&b_bit, 1)); + RETURN_FALSE_ON_ERROR(parser.ReadBits(&e_bit, 1)); + RETURN_FALSE_ON_ERROR(parser.ReadBits(&v_bit, 1)); + RETURN_FALSE_ON_ERROR(parser.ConsumeBits(1)); + + // Parsed payload. + parsed_payload->type.Video.width = 0; + parsed_payload->type.Video.height = 0; + parsed_payload->type.Video.simulcastIdx = 0; + parsed_payload->type.Video.codec = kRtpVideoVp9; + + parsed_payload->frame_type = p_bit ? kVideoFrameDelta : kVideoFrameKey; + + RTPVideoHeaderVP9* vp9 = &parsed_payload->type.Video.codecHeader.VP9; + vp9->InitRTPVideoHeaderVP9(); + vp9->inter_pic_predicted = p_bit ? true : false; + vp9->flexible_mode = f_bit ? true : false; + vp9->beginning_of_frame = b_bit ? true : false; + vp9->end_of_frame = e_bit ? true : false; + vp9->ss_data_available = v_bit ? true : false; + vp9->spatial_idx = 0; + + // Parse fields that are present. + if (i_bit && !ParsePictureId(&parser, vp9)) { + LOG(LS_ERROR) << "Failed parsing VP9 picture id."; + return false; + } + if (l_bit && !ParseLayerInfo(&parser, vp9)) { + LOG(LS_ERROR) << "Failed parsing VP9 layer info."; + return false; + } + if (p_bit && f_bit && !ParseRefIndices(&parser, vp9)) { + LOG(LS_ERROR) << "Failed parsing VP9 ref indices."; + return false; + } + if (v_bit) { + if (!ParseSsData(&parser, vp9)) { + LOG(LS_ERROR) << "Failed parsing VP9 SS data."; + return false; + } + if (vp9->spatial_layer_resolution_present) { + // TODO(asapersson): Add support for spatial layers. + parsed_payload->type.Video.width = vp9->width[0]; + parsed_payload->type.Video.height = vp9->height[0]; + } + } + parsed_payload->type.Video.isFirstPacket = + b_bit && (!l_bit || !vp9->inter_layer_predicted); + + uint64_t rem_bits = parser.RemainingBitCount(); + assert(rem_bits % 8 == 0); + parsed_payload->payload_length = rem_bits / 8; + if (parsed_payload->payload_length == 0) { + LOG(LS_ERROR) << "Failed parsing VP9 payload data."; + return false; + } + parsed_payload->payload = + payload + payload_length - parsed_payload->payload_length; + + return true; +} +} // namespace webrtc diff --git a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_format_vp9.h b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_format_vp9.h new file mode 100644 index 000000000000..883fbce5c866 --- /dev/null +++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_format_vp9.h @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// +// This file contains the declaration of the VP9 packetizer class. +// A packetizer object is created for each encoded video frame. The +// constructor is called with the payload data and size. +// +// After creating the packetizer, the method NextPacket is called +// repeatedly to get all packets for the frame. The method returns +// false as long as there are more packets left to fetch. +// + +#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_VP9_H_ +#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_VP9_H_ + +#include +#include + +#include "webrtc/base/constructormagic.h" +#include "webrtc/modules/interface/module_common_types.h" +#include "webrtc/modules/rtp_rtcp/source/rtp_format.h" +#include "webrtc/typedefs.h" + +namespace webrtc { + +class RtpPacketizerVp9 : public RtpPacketizer { + public: + RtpPacketizerVp9(const RTPVideoHeaderVP9& hdr, size_t max_payload_length); + + virtual ~RtpPacketizerVp9(); + + ProtectionType GetProtectionType() override; + + StorageType GetStorageType(uint32_t retransmission_settings) override; + + std::string ToString() override; + + // The payload data must be one encoded VP9 frame. + void SetPayloadData(const uint8_t* payload, + size_t payload_size, + const RTPFragmentationHeader* fragmentation) override; + + // Gets the next payload with VP9 payload header. + // |buffer| is a pointer to where the output will be written. + // |bytes_to_send| is an output variable that will contain number of bytes + // written to buffer. + // |last_packet| is true for the last packet of the frame, false otherwise + // (i.e. call the function again to get the next packet). + // Returns true on success, false otherwise. + bool NextPacket(uint8_t* buffer, + size_t* bytes_to_send, + bool* last_packet) override; + + typedef struct { + size_t payload_start_pos; + size_t size; + bool layer_begin; + bool layer_end; + } PacketInfo; + typedef std::queue PacketInfoQueue; + + private: + // Calculates all packet sizes and loads info to packet queue. + void GeneratePackets(); + + // Writes the payload descriptor header and copies payload to the |buffer|. + // |packet_info| determines which part of the payload to write. + // |bytes_to_send| contains the number of written bytes to the buffer. + // Returns true on success, false otherwise. + bool WriteHeaderAndPayload(const PacketInfo& packet_info, + uint8_t* buffer, + size_t* bytes_to_send) const; + + // Writes payload descriptor header to |buffer|. + // Returns true on success, false otherwise. + bool WriteHeader(const PacketInfo& packet_info, + uint8_t* buffer, + size_t* header_length) const; + + const RTPVideoHeaderVP9 hdr_; + const size_t max_payload_length_; // The max length in bytes of one packet. + const uint8_t* payload_; // The payload data to be packetized. + size_t payload_size_; // The size in bytes of the payload data. + PacketInfoQueue packets_; + + DISALLOW_COPY_AND_ASSIGN(RtpPacketizerVp9); +}; + + +class RtpDepacketizerVp9 : public RtpDepacketizer { + public: + virtual ~RtpDepacketizerVp9() {} + + bool Parse(ParsedPayload* parsed_payload, + const uint8_t* payload, + size_t payload_length) override; +}; + +} // namespace webrtc +#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_VP9_H_ diff --git a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_format_vp9_unittest.cc b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_format_vp9_unittest.cc new file mode 100644 index 000000000000..5bbafe459d22 --- /dev/null +++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_format_vp9_unittest.cc @@ -0,0 +1,690 @@ +/* + * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include + +#include "testing/gmock/include/gmock/gmock.h" +#include "testing/gtest/include/gtest/gtest.h" +#include "webrtc/modules/rtp_rtcp/source/rtp_format_vp9.h" +#include "webrtc/typedefs.h" + +namespace webrtc { +namespace { +void VerifyHeader(const RTPVideoHeaderVP9& expected, + const RTPVideoHeaderVP9& actual) { + EXPECT_EQ(expected.inter_layer_predicted, actual.inter_layer_predicted); + EXPECT_EQ(expected.inter_pic_predicted, actual.inter_pic_predicted); + EXPECT_EQ(expected.flexible_mode, actual.flexible_mode); + EXPECT_EQ(expected.beginning_of_frame, actual.beginning_of_frame); + EXPECT_EQ(expected.end_of_frame, actual.end_of_frame); + EXPECT_EQ(expected.ss_data_available, actual.ss_data_available); + EXPECT_EQ(expected.picture_id, actual.picture_id); + EXPECT_EQ(expected.max_picture_id, actual.max_picture_id); + EXPECT_EQ(expected.temporal_idx, actual.temporal_idx); + EXPECT_EQ(expected.spatial_idx == kNoSpatialIdx ? 0 : expected.spatial_idx, + actual.spatial_idx); + EXPECT_EQ(expected.gof_idx, actual.gof_idx); + EXPECT_EQ(expected.tl0_pic_idx, actual.tl0_pic_idx); + EXPECT_EQ(expected.temporal_up_switch, actual.temporal_up_switch); + + EXPECT_EQ(expected.num_ref_pics, actual.num_ref_pics); + for (uint8_t i = 0; i < expected.num_ref_pics; ++i) { + EXPECT_EQ(expected.pid_diff[i], actual.pid_diff[i]); + EXPECT_EQ(expected.ref_picture_id[i], actual.ref_picture_id[i]); + } + if (expected.ss_data_available) { + EXPECT_EQ(expected.spatial_layer_resolution_present, + actual.spatial_layer_resolution_present); + EXPECT_EQ(expected.num_spatial_layers, actual.num_spatial_layers); + if (expected.spatial_layer_resolution_present) { + for (size_t i = 0; i < expected.num_spatial_layers; i++) { + EXPECT_EQ(expected.width[i], actual.width[i]); + EXPECT_EQ(expected.height[i], actual.height[i]); + } + } + EXPECT_EQ(expected.gof.num_frames_in_gof, actual.gof.num_frames_in_gof); + for (size_t i = 0; i < expected.gof.num_frames_in_gof; i++) { + EXPECT_EQ(expected.gof.temporal_up_switch[i], + actual.gof.temporal_up_switch[i]); + EXPECT_EQ(expected.gof.temporal_idx[i], actual.gof.temporal_idx[i]); + EXPECT_EQ(expected.gof.num_ref_pics[i], actual.gof.num_ref_pics[i]); + for (uint8_t j = 0; j < expected.gof.num_ref_pics[i]; j++) { + EXPECT_EQ(expected.gof.pid_diff[i][j], actual.gof.pid_diff[i][j]); + } + } + } +} + +void VerifyPayload(const RtpDepacketizer::ParsedPayload& parsed, + const uint8_t* payload, + size_t payload_length) { + EXPECT_EQ(payload, parsed.payload); + EXPECT_EQ(payload_length, parsed.payload_length); + EXPECT_THAT(std::vector(parsed.payload, + parsed.payload + parsed.payload_length), + ::testing::ElementsAreArray(payload, payload_length)); +} + +void ParseAndCheckPacket(const uint8_t* packet, + const RTPVideoHeaderVP9& expected, + size_t expected_hdr_length, + size_t expected_length) { + rtc::scoped_ptr depacketizer(new RtpDepacketizerVp9()); + RtpDepacketizer::ParsedPayload parsed; + ASSERT_TRUE(depacketizer->Parse(&parsed, packet, expected_length)); + EXPECT_EQ(kRtpVideoVp9, parsed.type.Video.codec); + VerifyHeader(expected, parsed.type.Video.codecHeader.VP9); + const size_t kExpectedPayloadLength = expected_length - expected_hdr_length; + VerifyPayload(parsed, packet + expected_hdr_length, kExpectedPayloadLength); +} +} // namespace + +// Payload descriptor for flexible mode +// 0 1 2 3 4 5 6 7 +// +-+-+-+-+-+-+-+-+ +// |I|P|L|F|B|E|V|-| (REQUIRED) +// +-+-+-+-+-+-+-+-+ +// I: |M| PICTURE ID | (RECOMMENDED) +// +-+-+-+-+-+-+-+-+ +// M: | EXTENDED PID | (RECOMMENDED) +// +-+-+-+-+-+-+-+-+ +// L: | T |U| S |D| (CONDITIONALLY RECOMMENDED) +// +-+-+-+-+-+-+-+-+ -| +// P,F: | P_DIFF |N| (CONDITIONALLY RECOMMENDED) . up to 3 times +// +-+-+-+-+-+-+-+-+ -| +// V: | SS | +// | .. | +// +-+-+-+-+-+-+-+-+ +// +// Payload descriptor for non-flexible mode +// 0 1 2 3 4 5 6 7 +// +-+-+-+-+-+-+-+-+ +// |I|P|L|F|B|E|V|-| (REQUIRED) +// +-+-+-+-+-+-+-+-+ +// I: |M| PICTURE ID | (RECOMMENDED) +// +-+-+-+-+-+-+-+-+ +// M: | EXTENDED PID | (RECOMMENDED) +// +-+-+-+-+-+-+-+-+ +// L: | T |U| S |D| (CONDITIONALLY RECOMMENDED) +// +-+-+-+-+-+-+-+-+ +// | TL0PICIDX | (CONDITIONALLY REQUIRED) +// +-+-+-+-+-+-+-+-+ +// V: | SS | +// | .. | +// +-+-+-+-+-+-+-+-+ + +class RtpPacketizerVp9Test : public ::testing::Test { + protected: + RtpPacketizerVp9Test() {} + virtual void SetUp() { + expected_.InitRTPVideoHeaderVP9(); + } + + rtc::scoped_ptr packet_; + rtc::scoped_ptr payload_; + size_t payload_size_; + size_t payload_pos_; + RTPVideoHeaderVP9 expected_; + rtc::scoped_ptr packetizer_; + + void Init(size_t payload_size, size_t packet_size) { + payload_.reset(new uint8_t[payload_size]); + memset(payload_.get(), 7, payload_size); + payload_size_ = payload_size; + payload_pos_ = 0; + packetizer_.reset(new RtpPacketizerVp9(expected_, packet_size)); + packetizer_->SetPayloadData(payload_.get(), payload_size_, NULL); + + const int kMaxPayloadDescriptorLength = 100; + packet_.reset(new uint8_t[payload_size_ + kMaxPayloadDescriptorLength]); + } + + void CheckPayload(const uint8_t* packet, + size_t start_pos, + size_t end_pos, + bool last) { + for (size_t i = start_pos; i < end_pos; ++i) { + EXPECT_EQ(packet[i], payload_[payload_pos_++]); + } + EXPECT_EQ(last, payload_pos_ == payload_size_); + } + + void CreateParseAndCheckPackets(const size_t* expected_hdr_sizes, + const size_t* expected_sizes, + size_t expected_num_packets) { + ASSERT_TRUE(packetizer_.get() != NULL); + size_t length = 0; + bool last = false; + if (expected_num_packets == 0) { + EXPECT_FALSE(packetizer_->NextPacket(packet_.get(), &length, &last)); + return; + } + for (size_t i = 0; i < expected_num_packets; ++i) { + EXPECT_TRUE(packetizer_->NextPacket(packet_.get(), &length, &last)); + EXPECT_EQ(expected_sizes[i], length); + RTPVideoHeaderVP9 hdr = expected_; + hdr.beginning_of_frame = (i == 0); + hdr.end_of_frame = last; + ParseAndCheckPacket(packet_.get(), hdr, expected_hdr_sizes[i], length); + CheckPayload(packet_.get(), expected_hdr_sizes[i], length, last); + } + EXPECT_TRUE(last); + } +}; + +TEST_F(RtpPacketizerVp9Test, TestEqualSizedMode_OnePacket) { + const size_t kFrameSize = 25; + const size_t kPacketSize = 26; + Init(kFrameSize, kPacketSize); + + // One packet: + // I:0, P:0, L:0, F:0, B:1, E:1, V:0 (1hdr + 25 payload) + const size_t kExpectedHdrSizes[] = {1}; + const size_t kExpectedSizes[] = {26}; + const size_t kExpectedNum = GTEST_ARRAY_SIZE_(kExpectedSizes); + CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes, kExpectedNum); +} + +TEST_F(RtpPacketizerVp9Test, TestEqualSizedMode_TwoPackets) { + const size_t kFrameSize = 27; + const size_t kPacketSize = 27; + Init(kFrameSize, kPacketSize); + + // Two packets: + // I:0, P:0, L:0, F:0, B:1, E:0, V:0 (1hdr + 14 payload) + // I:0, P:0, L:0, F:0, B:0, E:1, V:0 (1hdr + 13 payload) + const size_t kExpectedHdrSizes[] = {1, 1}; + const size_t kExpectedSizes[] = {15, 14}; + const size_t kExpectedNum = GTEST_ARRAY_SIZE_(kExpectedSizes); + CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes, kExpectedNum); +} + +TEST_F(RtpPacketizerVp9Test, TestTooShortBufferToFitPayload) { + const size_t kFrameSize = 1; + const size_t kPacketSize = 1; + Init(kFrameSize, kPacketSize); // 1hdr + 1 payload + + const size_t kExpectedNum = 0; + CreateParseAndCheckPackets(NULL, NULL, kExpectedNum); +} + +TEST_F(RtpPacketizerVp9Test, TestOneBytePictureId) { + const size_t kFrameSize = 30; + const size_t kPacketSize = 12; + + expected_.picture_id = kMaxOneBytePictureId; // 2 byte payload descriptor + expected_.max_picture_id = kMaxOneBytePictureId; + Init(kFrameSize, kPacketSize); + + // Three packets: + // I:1, P:0, L:0, F:0, B:1, E:0, V:0 (2hdr + 10 payload) + // I:1, P:0, L:0, F:0, B:0, E:0, V:0 (2hdr + 10 payload) + // I:1, P:0, L:0, F:0, B:0, E:1, V:0 (2hdr + 10 payload) + const size_t kExpectedHdrSizes[] = {2, 2, 2}; + const size_t kExpectedSizes[] = {12, 12, 12}; + const size_t kExpectedNum = GTEST_ARRAY_SIZE_(kExpectedSizes); + CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes, kExpectedNum); +} + +TEST_F(RtpPacketizerVp9Test, TestTwoBytePictureId) { + const size_t kFrameSize = 31; + const size_t kPacketSize = 13; + + expected_.picture_id = kMaxTwoBytePictureId; // 3 byte payload descriptor + Init(kFrameSize, kPacketSize); + + // Four packets: + // I:1, P:0, L:0, F:0, B:1, E:0, V:0 (3hdr + 8 payload) + // I:1, P:0, L:0, F:0, B:0, E:0, V:0 (3hdr + 8 payload) + // I:1, P:0, L:0, F:0, B:0, E:0, V:0 (3hdr + 8 payload) + // I:1, P:0, L:0, F:0, B:0, E:1, V:0 (3hdr + 7 payload) + const size_t kExpectedHdrSizes[] = {3, 3, 3, 3}; + const size_t kExpectedSizes[] = {11, 11, 11, 10}; + const size_t kExpectedNum = GTEST_ARRAY_SIZE_(kExpectedSizes); + CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes, kExpectedNum); +} + +TEST_F(RtpPacketizerVp9Test, TestLayerInfoWithNonFlexibleMode) { + const size_t kFrameSize = 30; + const size_t kPacketSize = 25; + + expected_.temporal_idx = 3; + expected_.temporal_up_switch = true; // U + expected_.num_spatial_layers = 3; + expected_.spatial_idx = 2; + expected_.inter_layer_predicted = true; // D + expected_.tl0_pic_idx = 117; + Init(kFrameSize, kPacketSize); + + // Two packets: + // | I:0, P:0, L:1, F:0, B:1, E:0, V:0 | (3hdr + 15 payload) + // L: | T:3, U:1, S:2, D:1 | TL0PICIDX:117 | + // | I:0, P:0, L:1, F:0, B:0, E:1, V:0 | (3hdr + 15 payload) + // L: | T:3, U:1, S:2, D:1 | TL0PICIDX:117 | + const size_t kExpectedHdrSizes[] = {3, 3}; + const size_t kExpectedSizes[] = {18, 18}; + const size_t kExpectedNum = GTEST_ARRAY_SIZE_(kExpectedSizes); + CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes, kExpectedNum); +} + +TEST_F(RtpPacketizerVp9Test, TestLayerInfoWithFlexibleMode) { + const size_t kFrameSize = 21; + const size_t kPacketSize = 23; + + expected_.flexible_mode = true; + expected_.temporal_idx = 3; + expected_.temporal_up_switch = true; // U + expected_.num_spatial_layers = 3; + expected_.spatial_idx = 2; + expected_.inter_layer_predicted = false; // D + Init(kFrameSize, kPacketSize); + + // One packet: + // I:0, P:0, L:1, F:1, B:1, E:1, V:0 (2hdr + 21 payload) + // L: T:3, U:1, S:2, D:0 + const size_t kExpectedHdrSizes[] = {2}; + const size_t kExpectedSizes[] = {23}; + const size_t kExpectedNum = GTEST_ARRAY_SIZE_(kExpectedSizes); + CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes, kExpectedNum); +} + +TEST_F(RtpPacketizerVp9Test, TestRefIdx) { + const size_t kFrameSize = 16; + const size_t kPacketSize = 21; + + expected_.inter_pic_predicted = true; // P + expected_.flexible_mode = true; // F + expected_.picture_id = 2; + expected_.max_picture_id = kMaxOneBytePictureId; + + expected_.num_ref_pics = 3; + expected_.pid_diff[0] = 1; + expected_.pid_diff[1] = 3; + expected_.pid_diff[2] = 127; + expected_.ref_picture_id[0] = 1; // 2 - 1 = 1 + expected_.ref_picture_id[1] = 127; // (kMaxPictureId + 1) + 2 - 3 = 127 + expected_.ref_picture_id[2] = 3; // (kMaxPictureId + 1) + 2 - 127 = 3 + Init(kFrameSize, kPacketSize); + + // Two packets: + // I:1, P:1, L:0, F:1, B:1, E:1, V:0 (5hdr + 16 payload) + // I: 2 + // P,F: P_DIFF:1, N:1 + // P_DIFF:3, N:1 + // P_DIFF:127, N:0 + const size_t kExpectedHdrSizes[] = {5}; + const size_t kExpectedSizes[] = {21}; + const size_t kExpectedNum = GTEST_ARRAY_SIZE_(kExpectedSizes); + CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes, kExpectedNum); +} + +TEST_F(RtpPacketizerVp9Test, TestRefIdxFailsWithoutPictureId) { + const size_t kFrameSize = 16; + const size_t kPacketSize = 21; + + expected_.inter_pic_predicted = true; + expected_.flexible_mode = true; + expected_.num_ref_pics = 1; + expected_.pid_diff[0] = 3; + Init(kFrameSize, kPacketSize); + + const size_t kExpectedNum = 0; + CreateParseAndCheckPackets(NULL, NULL, kExpectedNum); +} + +TEST_F(RtpPacketizerVp9Test, TestSsDataWithoutSpatialResolutionPresent) { + const size_t kFrameSize = 21; + const size_t kPacketSize = 26; + + expected_.ss_data_available = true; + expected_.num_spatial_layers = 1; + expected_.spatial_layer_resolution_present = false; + expected_.gof.num_frames_in_gof = 1; + expected_.gof.temporal_idx[0] = 0; + expected_.gof.temporal_up_switch[0] = true; + expected_.gof.num_ref_pics[0] = 1; + expected_.gof.pid_diff[0][0] = 4; + Init(kFrameSize, kPacketSize); + + // One packet: + // I:0, P:0, L:0, F:0, B:1, E:1, V:1 (5hdr + 21 payload) + // N_S:0, Y:0, G:1 + // N_G:1 + // T:0, U:1, R:1 | P_DIFF[0][0]:4 + const size_t kExpectedHdrSizes[] = {5}; + const size_t kExpectedSizes[] = {26}; + const size_t kExpectedNum = GTEST_ARRAY_SIZE_(kExpectedSizes); + CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes, kExpectedNum); +} + +TEST_F(RtpPacketizerVp9Test, TestSsDataWithoutGbitPresent) { + const size_t kFrameSize = 21; + const size_t kPacketSize = 23; + + expected_.ss_data_available = true; + expected_.num_spatial_layers = 1; + expected_.spatial_layer_resolution_present = false; + expected_.gof.num_frames_in_gof = 0; + Init(kFrameSize, kPacketSize); + + // One packet: + // I:0, P:0, L:0, F:0, B:1, E:1, V:1 (2hdr + 21 payload) + // N_S:0, Y:0, G:0 + const size_t kExpectedHdrSizes[] = {2}; + const size_t kExpectedSizes[] = {23}; + const size_t kExpectedNum = GTEST_ARRAY_SIZE_(kExpectedSizes); + CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes, kExpectedNum); +} + +TEST_F(RtpPacketizerVp9Test, TestSsData) { + const size_t kFrameSize = 21; + const size_t kPacketSize = 40; + + expected_.ss_data_available = true; + expected_.num_spatial_layers = 2; + expected_.spatial_layer_resolution_present = true; + expected_.width[0] = 640; + expected_.width[1] = 1280; + expected_.height[0] = 360; + expected_.height[1] = 720; + expected_.gof.num_frames_in_gof = 3; + expected_.gof.temporal_idx[0] = 0; + expected_.gof.temporal_idx[1] = 1; + expected_.gof.temporal_idx[2] = 2; + expected_.gof.temporal_up_switch[0] = true; + expected_.gof.temporal_up_switch[1] = true; + expected_.gof.temporal_up_switch[2] = false; + expected_.gof.num_ref_pics[0] = 0; + expected_.gof.num_ref_pics[1] = 3; + expected_.gof.num_ref_pics[2] = 2; + expected_.gof.pid_diff[1][0] = 5; + expected_.gof.pid_diff[1][1] = 6; + expected_.gof.pid_diff[1][2] = 7; + expected_.gof.pid_diff[2][0] = 8; + expected_.gof.pid_diff[2][1] = 9; + Init(kFrameSize, kPacketSize); + + // One packet: + // I:0, P:0, L:0, F:0, B:1, E:1, V:1 (19hdr + 21 payload) + // N_S:1, Y:1, G:1 + // WIDTH:640 // 2 bytes + // HEIGHT:360 // 2 bytes + // WIDTH:1280 // 2 bytes + // HEIGHT:720 // 2 bytes + // N_G:3 + // T:0, U:1, R:0 + // T:1, U:1, R:3 | P_DIFF[1][0]:5 | P_DIFF[1][1]:6 | P_DIFF[1][2]:7 + // T:2, U:0, R:2 | P_DIFF[2][0]:8 | P_DIFF[2][0]:9 + const size_t kExpectedHdrSizes[] = {19}; + const size_t kExpectedSizes[] = {40}; + const size_t kExpectedNum = GTEST_ARRAY_SIZE_(kExpectedSizes); + CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes, kExpectedNum); +} + +TEST_F(RtpPacketizerVp9Test, TestBaseLayerProtectionAndStorageType) { + const size_t kFrameSize = 10; + const size_t kPacketSize = 12; + + // I:0, P:0, L:1, F:1, B:1, E:1, V:0 (2hdr + 10 payload) + // L: T:0, U:0, S:0, D:0 + expected_.flexible_mode = true; + expected_.temporal_idx = 0; + Init(kFrameSize, kPacketSize); + EXPECT_EQ(kProtectedPacket, packetizer_->GetProtectionType()); + EXPECT_EQ(kAllowRetransmission, + packetizer_->GetStorageType(kRetransmitBaseLayer)); + EXPECT_EQ(kDontRetransmit, packetizer_->GetStorageType(kRetransmitOff)); +} + +TEST_F(RtpPacketizerVp9Test, TestHigherLayerProtectionAndStorageType) { + const size_t kFrameSize = 10; + const size_t kPacketSize = 12; + + // I:0, P:0, L:1, F:1, B:1, E:1, V:0 (2hdr + 10 payload) + // L: T:1, U:0, S:0, D:0 + expected_.flexible_mode = true; + expected_.temporal_idx = 1; + Init(kFrameSize, kPacketSize); + EXPECT_EQ(kUnprotectedPacket, packetizer_->GetProtectionType()); + EXPECT_EQ(kDontRetransmit, packetizer_->GetStorageType(kRetransmitBaseLayer)); + EXPECT_EQ(kAllowRetransmission, + packetizer_->GetStorageType(kRetransmitHigherLayers)); +} + + +class RtpDepacketizerVp9Test : public ::testing::Test { + protected: + RtpDepacketizerVp9Test() + : depacketizer_(new RtpDepacketizerVp9()) {} + + virtual void SetUp() { + expected_.InitRTPVideoHeaderVP9(); + } + + RTPVideoHeaderVP9 expected_; + rtc::scoped_ptr depacketizer_; +}; + +TEST_F(RtpDepacketizerVp9Test, ParseBasicHeader) { + const uint8_t kHeaderLength = 1; + uint8_t packet[4] = {0}; + packet[0] = 0x0C; // I:0 P:0 L:0 F:0 B:1 E:1 V:0 R:0 + expected_.beginning_of_frame = true; + expected_.end_of_frame = true; + ParseAndCheckPacket(packet, expected_, kHeaderLength, sizeof(packet)); +} + +TEST_F(RtpDepacketizerVp9Test, ParseOneBytePictureId) { + const uint8_t kHeaderLength = 2; + uint8_t packet[10] = {0}; + packet[0] = 0x80; // I:1 P:0 L:0 F:0 B:0 E:0 V:0 R:0 + packet[1] = kMaxOneBytePictureId; + + expected_.picture_id = kMaxOneBytePictureId; + expected_.max_picture_id = kMaxOneBytePictureId; + ParseAndCheckPacket(packet, expected_, kHeaderLength, sizeof(packet)); +} + +TEST_F(RtpDepacketizerVp9Test, ParseTwoBytePictureId) { + const uint8_t kHeaderLength = 3; + uint8_t packet[10] = {0}; + packet[0] = 0x80; // I:1 P:0 L:0 F:0 B:0 E:0 V:0 R:0 + packet[1] = 0x80 | ((kMaxTwoBytePictureId >> 8) & 0x7F); + packet[2] = kMaxTwoBytePictureId & 0xFF; + + expected_.picture_id = kMaxTwoBytePictureId; + expected_.max_picture_id = kMaxTwoBytePictureId; + ParseAndCheckPacket(packet, expected_, kHeaderLength, sizeof(packet)); +} + +TEST_F(RtpDepacketizerVp9Test, ParseLayerInfoWithNonFlexibleMode) { + const uint8_t kHeaderLength = 3; + const uint8_t kTemporalIdx = 2; + const uint8_t kUbit = 1; + const uint8_t kSpatialIdx = 1; + const uint8_t kDbit = 1; + const uint8_t kTl0PicIdx = 17; + uint8_t packet[13] = {0}; + packet[0] = 0x20; // I:0 P:0 L:1 F:0 B:0 E:0 V:0 R:0 + packet[1] = (kTemporalIdx << 5) | (kUbit << 4) | (kSpatialIdx << 1) | kDbit; + packet[2] = kTl0PicIdx; + + // T:2 U:1 S:1 D:1 + // TL0PICIDX:17 + expected_.temporal_idx = kTemporalIdx; + expected_.temporal_up_switch = kUbit ? true : false; + expected_.spatial_idx = kSpatialIdx; + expected_.inter_layer_predicted = kDbit ? true : false; + expected_.tl0_pic_idx = kTl0PicIdx; + ParseAndCheckPacket(packet, expected_, kHeaderLength, sizeof(packet)); +} + +TEST_F(RtpDepacketizerVp9Test, ParseLayerInfoWithFlexibleMode) { + const uint8_t kHeaderLength = 2; + const uint8_t kTemporalIdx = 2; + const uint8_t kUbit = 1; + const uint8_t kSpatialIdx = 0; + const uint8_t kDbit = 0; + uint8_t packet[13] = {0}; + packet[0] = 0x38; // I:0 P:0 L:1 F:1 B:1 E:0 V:0 R:0 + packet[1] = (kTemporalIdx << 5) | (kUbit << 4) | (kSpatialIdx << 1) | kDbit; + + // I:0 P:0 L:1 F:1 B:1 E:0 V:0 + // L: T:2 U:1 S:0 D:0 + expected_.beginning_of_frame = true; + expected_.flexible_mode = true; + expected_.temporal_idx = kTemporalIdx; + expected_.temporal_up_switch = kUbit ? true : false; + expected_.spatial_idx = kSpatialIdx; + expected_.inter_layer_predicted = kDbit ? true : false; + ParseAndCheckPacket(packet, expected_, kHeaderLength, sizeof(packet)); +} + +TEST_F(RtpDepacketizerVp9Test, ParseRefIdx) { + const uint8_t kHeaderLength = 6; + const int16_t kPictureId = 17; + const uint8_t kPdiff1 = 17; + const uint8_t kPdiff2 = 18; + const uint8_t kPdiff3 = 127; + uint8_t packet[13] = {0}; + packet[0] = 0xD8; // I:1 P:1 L:0 F:1 B:1 E:0 V:0 R:0 + packet[1] = 0x80 | ((kPictureId >> 8) & 0x7F); // Two byte pictureID. + packet[2] = kPictureId; + packet[3] = (kPdiff1 << 1) | 1; // P_DIFF N:1 + packet[4] = (kPdiff2 << 1) | 1; // P_DIFF N:1 + packet[5] = (kPdiff3 << 1) | 0; // P_DIFF N:0 + + // I:1 P:1 L:0 F:1 B:1 E:0 V:0 + // I: PICTURE ID:17 + // I: + // P,F: P_DIFF:17 N:1 => refPicId = 17 - 17 = 0 + // P,F: P_DIFF:18 N:1 => refPicId = (kMaxPictureId + 1) + 17 - 18 = 0x7FFF + // P,F: P_DIFF:127 N:0 => refPicId = (kMaxPictureId + 1) + 17 - 127 = 32658 + expected_.beginning_of_frame = true; + expected_.inter_pic_predicted = true; + expected_.flexible_mode = true; + expected_.picture_id = kPictureId; + expected_.num_ref_pics = 3; + expected_.pid_diff[0] = kPdiff1; + expected_.pid_diff[1] = kPdiff2; + expected_.pid_diff[2] = kPdiff3; + expected_.ref_picture_id[0] = 0; + expected_.ref_picture_id[1] = 0x7FFF; + expected_.ref_picture_id[2] = 32658; + ParseAndCheckPacket(packet, expected_, kHeaderLength, sizeof(packet)); +} + +TEST_F(RtpDepacketizerVp9Test, ParseRefIdxFailsWithNoPictureId) { + const uint8_t kPdiff = 3; + uint8_t packet[13] = {0}; + packet[0] = 0x58; // I:0 P:1 L:0 F:1 B:1 E:0 V:0 R:0 + packet[1] = (kPdiff << 1); // P,F: P_DIFF:3 N:0 + + RtpDepacketizer::ParsedPayload parsed; + EXPECT_FALSE(depacketizer_->Parse(&parsed, packet, sizeof(packet))); +} + +TEST_F(RtpDepacketizerVp9Test, ParseRefIdxFailsWithTooManyRefPics) { + const uint8_t kPdiff = 3; + uint8_t packet[13] = {0}; + packet[0] = 0xD8; // I:1 P:1 L:0 F:1 B:1 E:0 V:0 R:0 + packet[1] = kMaxOneBytePictureId; // I: PICTURE ID:127 + packet[2] = (kPdiff << 1) | 1; // P,F: P_DIFF:3 N:1 + packet[3] = (kPdiff << 1) | 1; // P,F: P_DIFF:3 N:1 + packet[4] = (kPdiff << 1) | 1; // P,F: P_DIFF:3 N:1 + packet[5] = (kPdiff << 1) | 0; // P,F: P_DIFF:3 N:0 + + RtpDepacketizer::ParsedPayload parsed; + EXPECT_FALSE(depacketizer_->Parse(&parsed, packet, sizeof(packet))); +} + +TEST_F(RtpDepacketizerVp9Test, ParseSsData) { + const uint8_t kHeaderLength = 6; + const uint8_t kYbit = 0; + const size_t kNs = 2; + const size_t kNg = 2; + uint8_t packet[23] = {0}; + packet[0] = 0x0A; // I:0 P:0 L:0 F:0 B:1 E:0 V:1 R:0 + packet[1] = ((kNs - 1) << 5) | (kYbit << 4) | (1 << 3); // N_S Y G:1 - + packet[2] = kNg; // N_G + packet[3] = (0 << 5) | (1 << 4) | (0 << 2) | 0; // T:0 U:1 R:0 - + packet[4] = (2 << 5) | (0 << 4) | (1 << 2) | 0; // T:2 U:0 R:1 - + packet[5] = 33; + + expected_.beginning_of_frame = true; + expected_.ss_data_available = true; + expected_.num_spatial_layers = kNs; + expected_.spatial_layer_resolution_present = kYbit ? true : false; + expected_.gof.num_frames_in_gof = kNg; + expected_.gof.temporal_idx[0] = 0; + expected_.gof.temporal_idx[1] = 2; + expected_.gof.temporal_up_switch[0] = true; + expected_.gof.temporal_up_switch[1] = false; + expected_.gof.num_ref_pics[0] = 0; + expected_.gof.num_ref_pics[1] = 1; + expected_.gof.pid_diff[1][0] = 33; + ParseAndCheckPacket(packet, expected_, kHeaderLength, sizeof(packet)); +} + +TEST_F(RtpDepacketizerVp9Test, ParseFirstPacketInKeyFrame) { + uint8_t packet[2] = {0}; + packet[0] = 0x08; // I:0 P:0 L:0 F:0 B:1 E:0 V:0 R:0 + + RtpDepacketizer::ParsedPayload parsed; + ASSERT_TRUE(depacketizer_->Parse(&parsed, packet, sizeof(packet))); + EXPECT_EQ(kVideoFrameKey, parsed.frame_type); + EXPECT_TRUE(parsed.type.Video.isFirstPacket); +} + +TEST_F(RtpDepacketizerVp9Test, ParseLastPacketInDeltaFrame) { + uint8_t packet[2] = {0}; + packet[0] = 0x44; // I:0 P:1 L:0 F:0 B:0 E:1 V:0 R:0 + + RtpDepacketizer::ParsedPayload parsed; + ASSERT_TRUE(depacketizer_->Parse(&parsed, packet, sizeof(packet))); + EXPECT_EQ(kVideoFrameDelta, parsed.frame_type); + EXPECT_FALSE(parsed.type.Video.isFirstPacket); +} + +TEST_F(RtpDepacketizerVp9Test, ParseResolution) { + const uint16_t kWidth[2] = {640, 1280}; + const uint16_t kHeight[2] = {360, 720}; + uint8_t packet[20] = {0}; + packet[0] = 0x0A; // I:0 P:0 L:0 F:0 B:1 E:0 V:1 R:0 + packet[1] = (1 << 5) | (1 << 4) | 0; // N_S:1 Y:1 G:0 + packet[2] = kWidth[0] >> 8; + packet[3] = kWidth[0] & 0xFF; + packet[4] = kHeight[0] >> 8; + packet[5] = kHeight[0] & 0xFF; + packet[6] = kWidth[1] >> 8; + packet[7] = kWidth[1] & 0xFF; + packet[8] = kHeight[1] >> 8; + packet[9] = kHeight[1] & 0xFF; + + RtpDepacketizer::ParsedPayload parsed; + ASSERT_TRUE(depacketizer_->Parse(&parsed, packet, sizeof(packet))); + EXPECT_EQ(kWidth[0], parsed.type.Video.width); + EXPECT_EQ(kHeight[0], parsed.type.Video.height); +} + +TEST_F(RtpDepacketizerVp9Test, ParseFailsForNoPayloadLength) { + uint8_t packet[1] = {0}; + RtpDepacketizer::ParsedPayload parsed; + EXPECT_FALSE(depacketizer_->Parse(&parsed, packet, 0)); +} + +TEST_F(RtpDepacketizerVp9Test, ParseFailsForTooShortBufferToFitPayload) { + const uint8_t kHeaderLength = 1; + uint8_t packet[kHeaderLength] = {0}; + RtpDepacketizer::ParsedPayload parsed; + EXPECT_FALSE(depacketizer_->Parse(&parsed, packet, sizeof(packet))); +} + +} // namespace webrtc diff --git a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_sender_video.cc b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_sender_video.cc index 3dd0da167348..c5af226ca69f 100644 --- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_sender_video.cc +++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_sender_video.cc @@ -14,11 +14,15 @@ #include #include +#include "webrtc/base/checks.h" +#include "webrtc/base/logging.h" +#include "webrtc/base/trace_event.h" #include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h" #include "webrtc/modules/rtp_rtcp/source/byte_io.h" #include "webrtc/modules/rtp_rtcp/source/producer_fec.h" #include "webrtc/modules/rtp_rtcp/source/rtp_format_video_generic.h" #include "webrtc/modules/rtp_rtcp/source/rtp_format_vp8.h" +#include "webrtc/modules/rtp_rtcp/source/rtp_format_vp9.h" #include "webrtc/modules/rtp_rtcp/source/rtp_format_h264.h" #include "webrtc/system_wrappers/interface/critical_section_wrapper.h" #include "webrtc/system_wrappers/interface/logging.h" @@ -323,7 +327,7 @@ bool RTPSenderVideo::Send(const RtpVideoCodecTypes videoType, // output multiple partitions for VP8. Should remove below check after the // issue is fixed. const RTPFragmentationHeader* frag = - (videoType == kRtpVideoVp8 || videoType == kRtpVideoVp9) ? NULL : fragmentation; + (videoType == kRtpVideoVp8) ? NULL : fragmentation; packetizer->SetPayloadData(data, payload_bytes_to_send, frag); @@ -360,7 +364,7 @@ bool RTPSenderVideo::Send(const RtpVideoCodecTypes videoType, // a lock. It'll be a no-op if it's not registered. // TODO(guoweis): For now, all packets sent will carry the CVO such that // the RTP header length is consistent, although the receiver side will - // only exam the packets with market bit set. + // only exam the packets with marker bit set. size_t packetSize = payloadSize + rtp_header_length; RtpUtility::RtpHeaderParser rtp_parser(dataBuffer, packetSize); RTPHeader rtp_header; diff --git a/media/webrtc/trunk/webrtc/modules/video_capture/linux/device_info_linux.cc b/media/webrtc/trunk/webrtc/modules/video_capture/linux/device_info_linux.cc index 2c4fb09fa319..6c9295a1c9f2 100644 --- a/media/webrtc/trunk/webrtc/modules/video_capture/linux/device_info_linux.cc +++ b/media/webrtc/trunk/webrtc/modules/video_capture/linux/device_info_linux.cc @@ -164,7 +164,7 @@ int32_t DeviceInfoLinux::GetDeviceName( } else { // if there's no bus info to use for uniqueId, invent one - and it has to be repeatable if (snprintf(deviceUniqueIdUTF8, deviceUniqueIdUTF8Length, "fake_%u", device_index) >= - deviceUniqueIdUTF8Length) + (int) deviceUniqueIdUTF8Length) { WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id, "buffer passed is too small"); diff --git a/media/webrtc/trunk/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h index c126147f18f6..cecfeb1eec7e 100644 --- a/media/webrtc/trunk/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h +++ b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h @@ -43,16 +43,35 @@ struct CodecSpecificInfoVP8 { }; struct CodecSpecificInfoVP9 { - bool hasReceivedSLI; - uint8_t pictureIdSLI; - bool hasReceivedRPSI; - uint64_t pictureIdRPSI; - int16_t pictureId; // Negative value to skip pictureId. - bool nonReference; - uint8_t temporalIdx; - bool layerSync; - int tl0PicIdx; // Negative value to skip tl0PicIdx. - int8_t keyIdx; // Negative value to skip keyIdx. + bool has_received_sli; + uint8_t picture_id_sli; + bool has_received_rpsi; + uint64_t picture_id_rpsi; + int16_t picture_id; // Negative value to skip pictureId. + + bool inter_pic_predicted; // This layer frame is dependent on previously + // coded frame(s). + bool flexible_mode; + bool ss_data_available; + + int tl0_pic_idx; // Negative value to skip tl0PicIdx. + uint8_t temporal_idx; + uint8_t spatial_idx; + bool temporal_up_switch; + bool inter_layer_predicted; // Frame is dependent on directly lower spatial + // layer frame. + uint8_t gof_idx; + + // SS data. + size_t num_spatial_layers; // Always populated. + bool spatial_layer_resolution_present; + uint16_t width[kMaxVp9NumberOfSpatialLayers]; + uint16_t height[kMaxVp9NumberOfSpatialLayers]; + GofInfoVP9 gof; + + // Frame reference data. + uint8_t num_ref_pics; + uint8_t p_diff[kMaxVp9RefPics]; }; struct CodecSpecificInfoGeneric { diff --git a/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/screenshare_layers.cc b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/screenshare_layers.cc new file mode 100644 index 000000000000..53e6647cfdeb --- /dev/null +++ b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/screenshare_layers.cc @@ -0,0 +1,93 @@ +/* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. +* +* Use of this source code is governed by a BSD-style license +* that can be found in the LICENSE file in the root of the source +* tree. An additional intellectual property rights grant can be found +* in the file PATENTS. All contributing project authors may +* be found in the AUTHORS file in the root of the source tree. +*/ + +#include +#include "webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h" +#include "webrtc/base/checks.h" + +namespace webrtc { + +ScreenshareLayersVP9::ScreenshareLayersVP9(uint8_t num_layers) + : num_layers_(num_layers), + start_layer_(0), + last_timestamp_(0), + timestamp_initialized_(false) { + DCHECK_GT(num_layers, 0); + DCHECK_LE(num_layers, kMaxVp9NumberOfSpatialLayers); + memset(bits_used_, 0, sizeof(bits_used_)); + memset(threshold_kbps_, 0, sizeof(threshold_kbps_)); +} + +uint8_t ScreenshareLayersVP9::GetStartLayer() const { + return start_layer_; +} + +void ScreenshareLayersVP9::ConfigureBitrate(int threshold_kbps, + uint8_t layer_id) { + // The upper layer is always the layer we spill frames + // to when the bitrate becomes to high, therefore setting + // a max limit is not allowed. The top layer bitrate is + // never used either so configuring it makes no difference. + DCHECK_LT(layer_id, num_layers_ - 1); + threshold_kbps_[layer_id] = threshold_kbps; +} + +void ScreenshareLayersVP9::LayerFrameEncoded(unsigned int size_bytes, + uint8_t layer_id) { + DCHECK_LT(layer_id, num_layers_); + bits_used_[layer_id] += size_bytes * 8; +} + +VP9EncoderImpl::SuperFrameRefSettings +ScreenshareLayersVP9::GetSuperFrameSettings(uint32_t timestamp, + bool is_keyframe) { + VP9EncoderImpl::SuperFrameRefSettings settings; + if (!timestamp_initialized_) { + last_timestamp_ = timestamp; + timestamp_initialized_ = true; + } + float time_diff = (timestamp - last_timestamp_) / 90.f; + float total_bits_used = 0; + float total_threshold_kbps = 0; + start_layer_ = 0; + + // Up to (num_layers - 1) because we only have + // (num_layers - 1) thresholds to check. + for (int layer_id = 0; layer_id < num_layers_ - 1; ++layer_id) { + bits_used_[layer_id] = std::max( + 0.f, bits_used_[layer_id] - time_diff * threshold_kbps_[layer_id]); + total_bits_used += bits_used_[layer_id]; + total_threshold_kbps += threshold_kbps_[layer_id]; + + // If this is a keyframe then there should be no + // references to any previous frames. + if (!is_keyframe) { + settings.layer[layer_id].ref_buf1 = layer_id; + if (total_bits_used > total_threshold_kbps * 1000) + start_layer_ = layer_id + 1; + } + + settings.layer[layer_id].upd_buf = layer_id; + } + // Since the above loop does not iterate over the last layer + // the reference of the last layer has to be set after the loop, + // and if this is a keyframe there should be no references to + // any previous frames. + if (!is_keyframe) + settings.layer[num_layers_ - 1].ref_buf1 = num_layers_ - 1; + + settings.layer[num_layers_ - 1].upd_buf = num_layers_ - 1; + settings.is_keyframe = is_keyframe; + settings.start_layer = start_layer_; + settings.stop_layer = num_layers_ - 1; + last_timestamp_ = timestamp; + return settings; +} + +} // namespace webrtc diff --git a/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h new file mode 100644 index 000000000000..5a901ae359c5 --- /dev/null +++ b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h @@ -0,0 +1,66 @@ +/* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. +* +* Use of this source code is governed by a BSD-style license +* that can be found in the LICENSE file in the root of the source +* tree. An additional intellectual property rights grant can be found +* in the file PATENTS. All contributing project authors may +* be found in the AUTHORS file in the root of the source tree. +*/ + +#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_SCREENSHARE_LAYERS_H_ +#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_SCREENSHARE_LAYERS_H_ + +#include "webrtc/modules/video_coding/codecs/vp9/vp9_impl.h" + +namespace webrtc { + +class ScreenshareLayersVP9 { + public: + explicit ScreenshareLayersVP9(uint8_t num_layers); + + // The target bitrate for layer with id layer_id. + void ConfigureBitrate(int threshold_kbps, uint8_t layer_id); + + // The current start layer. + uint8_t GetStartLayer() const; + + // Update the layer with the size of the layer frame. + void LayerFrameEncoded(unsigned int size_bytes, uint8_t layer_id); + + // Get the layer settings for the next superframe. + // + // In short, each time the GetSuperFrameSettings is called the + // bitrate of every layer is calculated and if the cummulative + // bitrate exceeds the configured cummulative bitrates + // (ConfigureBitrate to configure) up to and including that + // layer then the resulting encoding settings for the + // superframe will only encode layers above that layer. + VP9EncoderImpl::SuperFrameRefSettings GetSuperFrameSettings( + uint32_t timestamp, + bool is_keyframe); + + private: + // How many layers that are used. + uint8_t num_layers_; + + // The index of the first layer to encode. + uint8_t start_layer_; + + // Cummulative target kbps for the different layers. + float threshold_kbps_[kMaxVp9NumberOfSpatialLayers - 1]; + + // How many bits that has been used for a certain layer. Increased in + // FrameEncoded() by the size of the encoded frame and decreased in + // GetSuperFrameSettings() depending on the time between frames. + float bits_used_[kMaxVp9NumberOfSpatialLayers]; + + // Timestamp of last frame. + uint32_t last_timestamp_; + + // If the last_timestamp_ has been set. + bool timestamp_initialized_; +}; + +} // namespace webrtc + +#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_SCREENSHARE_LAYERS_H_ diff --git a/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/screenshare_layers_unittest.cc b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/screenshare_layers_unittest.cc new file mode 100644 index 000000000000..5eb7b237ac60 --- /dev/null +++ b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/screenshare_layers_unittest.cc @@ -0,0 +1,323 @@ +/* + * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include + +#include "testing/gtest/include/gtest/gtest.h" +#include "vpx/vp8cx.h" +#include "webrtc/base/logging.h" +#include "webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h" +#include "webrtc/modules/video_coding/codecs/vp9/vp9_impl.h" +#include "webrtc/system_wrappers/include/clock.h" + +namespace webrtc { + +typedef VP9EncoderImpl::SuperFrameRefSettings Settings; + +const uint32_t kTickFrequency = 90000; + +class ScreenshareLayerTestVP9 : public ::testing::Test { + protected: + ScreenshareLayerTestVP9() : clock_(0) {} + virtual ~ScreenshareLayerTestVP9() {} + + void InitScreenshareLayers(int layers) { + layers_.reset(new ScreenshareLayersVP9(layers)); + } + + void ConfigureBitrateForLayer(int kbps, uint8_t layer_id) { + layers_->ConfigureBitrate(kbps, layer_id); + } + + void AdvanceTime(int64_t milliseconds) { + clock_.AdvanceTimeMilliseconds(milliseconds); + } + + void AddKilobitsToLayer(int kilobits, uint8_t layer_id) { + layers_->LayerFrameEncoded(kilobits * 1000 / 8, layer_id); + } + + void EqualRefsForLayer(const Settings& actual, uint8_t layer_id) { + EXPECT_EQ(expected_.layer[layer_id].upd_buf, + actual.layer[layer_id].upd_buf); + EXPECT_EQ(expected_.layer[layer_id].ref_buf1, + actual.layer[layer_id].ref_buf1); + EXPECT_EQ(expected_.layer[layer_id].ref_buf2, + actual.layer[layer_id].ref_buf2); + EXPECT_EQ(expected_.layer[layer_id].ref_buf3, + actual.layer[layer_id].ref_buf3); + } + + void EqualRefs(const Settings& actual) { + for (unsigned int layer_id = 0; layer_id < kMaxVp9NumberOfSpatialLayers; + ++layer_id) { + EqualRefsForLayer(actual, layer_id); + } + } + + void EqualStartStopKeyframe(const Settings& actual) { + EXPECT_EQ(expected_.start_layer, actual.start_layer); + EXPECT_EQ(expected_.stop_layer, actual.stop_layer); + EXPECT_EQ(expected_.is_keyframe, actual.is_keyframe); + } + + // Check that the settings returned by GetSuperFrameSettings() is + // equal to the expected_ settings. + void EqualToExpected() { + uint32_t frame_timestamp_ = + clock_.TimeInMilliseconds() * (kTickFrequency / 1000); + Settings actual = + layers_->GetSuperFrameSettings(frame_timestamp_, expected_.is_keyframe); + EqualRefs(actual); + EqualStartStopKeyframe(actual); + } + + Settings expected_; + SimulatedClock clock_; + rtc::scoped_ptr layers_; +}; + +TEST_F(ScreenshareLayerTestVP9, NoRefsOnKeyFrame) { + const int kNumLayers = kMaxVp9NumberOfSpatialLayers; + InitScreenshareLayers(kNumLayers); + expected_.start_layer = 0; + expected_.stop_layer = kNumLayers - 1; + + for (int l = 0; l < kNumLayers; ++l) { + expected_.layer[l].upd_buf = l; + } + expected_.is_keyframe = true; + EqualToExpected(); + + for (int l = 0; l < kNumLayers; ++l) { + expected_.layer[l].ref_buf1 = l; + } + expected_.is_keyframe = false; + EqualToExpected(); +} + +// Test if it is possible to send at a high bitrate (over the threshold) +// after a longer period of low bitrate. This should not be possible. +TEST_F(ScreenshareLayerTestVP9, DontAccumelateAvailableBitsOverTime) { + InitScreenshareLayers(2); + ConfigureBitrateForLayer(100, 0); + + expected_.layer[0].upd_buf = 0; + expected_.layer[0].ref_buf1 = 0; + expected_.layer[1].upd_buf = 1; + expected_.layer[1].ref_buf1 = 1; + expected_.start_layer = 0; + expected_.stop_layer = 1; + + // Send 10 frames at a low bitrate (50 kbps) + for (int i = 0; i < 10; ++i) { + AdvanceTime(200); + EqualToExpected(); + AddKilobitsToLayer(10, 0); + } + + AdvanceTime(200); + EqualToExpected(); + AddKilobitsToLayer(301, 0); + + // Send 10 frames at a high bitrate (200 kbps) + expected_.start_layer = 1; + for (int i = 0; i < 10; ++i) { + AdvanceTime(200); + EqualToExpected(); + AddKilobitsToLayer(40, 1); + } +} + +// Test if used bits are accumelated over layers, as they should; +TEST_F(ScreenshareLayerTestVP9, AccumelateUsedBitsOverLayers) { + const int kNumLayers = kMaxVp9NumberOfSpatialLayers; + InitScreenshareLayers(kNumLayers); + for (int l = 0; l < kNumLayers - 1; ++l) + ConfigureBitrateForLayer(100, l); + for (int l = 0; l < kNumLayers; ++l) { + expected_.layer[l].upd_buf = l; + expected_.layer[l].ref_buf1 = l; + } + + expected_.start_layer = 0; + expected_.stop_layer = kNumLayers - 1; + EqualToExpected(); + + for (int layer = 0; layer < kNumLayers - 1; ++layer) { + expected_.start_layer = layer; + EqualToExpected(); + AddKilobitsToLayer(101, layer); + } +} + +// General testing of the bitrate controller. +TEST_F(ScreenshareLayerTestVP9, 2LayerBitrate) { + InitScreenshareLayers(2); + ConfigureBitrateForLayer(100, 0); + + expected_.layer[0].upd_buf = 0; + expected_.layer[1].upd_buf = 1; + expected_.layer[0].ref_buf1 = -1; + expected_.layer[1].ref_buf1 = -1; + expected_.start_layer = 0; + expected_.stop_layer = 1; + + expected_.is_keyframe = true; + EqualToExpected(); + AddKilobitsToLayer(100, 0); + + expected_.layer[0].ref_buf1 = 0; + expected_.layer[1].ref_buf1 = 1; + expected_.is_keyframe = false; + AdvanceTime(199); + EqualToExpected(); + AddKilobitsToLayer(100, 0); + + expected_.start_layer = 1; + for (int frame = 0; frame < 3; ++frame) { + AdvanceTime(200); + EqualToExpected(); + AddKilobitsToLayer(100, 1); + } + + // Just before enough bits become available for L0 @0.999 seconds. + AdvanceTime(199); + EqualToExpected(); + AddKilobitsToLayer(100, 1); + + // Just after enough bits become available for L0 @1.0001 seconds. + expected_.start_layer = 0; + AdvanceTime(2); + EqualToExpected(); + AddKilobitsToLayer(100, 0); + + // Keyframes always encode all layers, even if it is over budget. + expected_.layer[0].ref_buf1 = -1; + expected_.layer[1].ref_buf1 = -1; + expected_.is_keyframe = true; + AdvanceTime(499); + EqualToExpected(); + expected_.layer[0].ref_buf1 = 0; + expected_.layer[1].ref_buf1 = 1; + expected_.start_layer = 1; + expected_.is_keyframe = false; + EqualToExpected(); + AddKilobitsToLayer(100, 0); + + // 400 kb in L0 --> @3 second mark to fall below the threshold.. + // just before @2.999 seconds. + expected_.is_keyframe = false; + AdvanceTime(1499); + EqualToExpected(); + AddKilobitsToLayer(100, 1); + + // just after @3.001 seconds. + expected_.start_layer = 0; + AdvanceTime(2); + EqualToExpected(); + AddKilobitsToLayer(100, 0); +} + +// General testing of the bitrate controller. +TEST_F(ScreenshareLayerTestVP9, 3LayerBitrate) { + InitScreenshareLayers(3); + ConfigureBitrateForLayer(100, 0); + ConfigureBitrateForLayer(100, 1); + + for (int l = 0; l < 3; ++l) { + expected_.layer[l].upd_buf = l; + expected_.layer[l].ref_buf1 = l; + } + expected_.start_layer = 0; + expected_.stop_layer = 2; + + EqualToExpected(); + AddKilobitsToLayer(105, 0); + AddKilobitsToLayer(30, 1); + + AdvanceTime(199); + EqualToExpected(); + AddKilobitsToLayer(105, 0); + AddKilobitsToLayer(30, 1); + + expected_.start_layer = 1; + AdvanceTime(200); + EqualToExpected(); + AddKilobitsToLayer(130, 1); + + expected_.start_layer = 2; + AdvanceTime(200); + EqualToExpected(); + + // 400 kb in L1 --> @1.0 second mark to fall below threshold. + // 210 kb in L0 --> @1.1 second mark to fall below threshold. + // Just before L1 @0.999 seconds. + AdvanceTime(399); + EqualToExpected(); + + // Just after L1 @1.001 seconds. + expected_.start_layer = 1; + AdvanceTime(2); + EqualToExpected(); + + // Just before L0 @1.099 seconds. + AdvanceTime(99); + EqualToExpected(); + + // Just after L0 @1.101 seconds. + expected_.start_layer = 0; + AdvanceTime(2); + EqualToExpected(); + + // @1.1 seconds + AdvanceTime(99); + EqualToExpected(); + AddKilobitsToLayer(200, 1); + + expected_.is_keyframe = true; + for (int l = 0; l < 3; ++l) + expected_.layer[l].ref_buf1 = -1; + AdvanceTime(200); + EqualToExpected(); + + expected_.is_keyframe = false; + expected_.start_layer = 2; + for (int l = 0; l < 3; ++l) + expected_.layer[l].ref_buf1 = l; + AdvanceTime(200); + EqualToExpected(); +} + +// Test that the bitrate calculations are +// correct when the timestamp wrap. +TEST_F(ScreenshareLayerTestVP9, TimestampWrap) { + InitScreenshareLayers(2); + ConfigureBitrateForLayer(100, 0); + + expected_.layer[0].upd_buf = 0; + expected_.layer[0].ref_buf1 = 0; + expected_.layer[1].upd_buf = 1; + expected_.layer[1].ref_buf1 = 1; + expected_.start_layer = 0; + expected_.stop_layer = 1; + + // Advance time to just before the timestamp wraps. + AdvanceTime(std::numeric_limits::max() / (kTickFrequency / 1000)); + EqualToExpected(); + AddKilobitsToLayer(200, 0); + + // Wrap + expected_.start_layer = 1; + AdvanceTime(1); + EqualToExpected(); +} + +} // namespace webrtc diff --git a/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9.gyp b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9.gyp index 795db62a3024..9e02cd6e8d83 100644 --- a/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9.gyp +++ b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9.gyp @@ -22,12 +22,20 @@ 'conditions': [ ['build_libvpx==1', { 'dependencies': [ - '<(libvpx_dir)/libvpx.gyp:libvpx', + '<(libvpx_dir)/libvpx.gyp:libvpx_new', ], - }], + }, { + 'include_dirs': [ + '../../../../../../../libvpx', + ], + }], ['build_vp9==1', { 'sources': [ 'include/vp9.h', + 'screenshare_layers.cc', + 'screenshare_layers.h', + 'vp9_frame_buffer_pool.cc', + 'vp9_frame_buffer_pool.h', 'vp9_impl.cc', 'vp9_impl.h', ], diff --git a/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc new file mode 100644 index 000000000000..fceb4bf9d329 --- /dev/null +++ b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + * + */ + +#include "webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h" + +#include "vpx/vpx_codec.h" +#include "vpx/vpx_decoder.h" +#include "vpx/vpx_frame_buffer.h" + +#include "webrtc/base/checks.h" +#include "webrtc/system_wrappers/interface/logging.h" + +namespace webrtc { + +uint8_t* Vp9FrameBufferPool::Vp9FrameBuffer::GetData() { + return (uint8_t*)(data_.data()); //data(); +} + +size_t Vp9FrameBufferPool::Vp9FrameBuffer::GetDataSize() const { + return data_.size(); +} + +void Vp9FrameBufferPool::Vp9FrameBuffer::SetSize(size_t size) { + data_.SetSize(size); +} + +bool Vp9FrameBufferPool::InitializeVpxUsePool( + vpx_codec_ctx* vpx_codec_context) { + DCHECK(vpx_codec_context); + // Tell libvpx to use this pool. + if (vpx_codec_set_frame_buffer_functions( + // In which context to use these callback functions. + vpx_codec_context, + // Called by libvpx when it needs another frame buffer. + &Vp9FrameBufferPool::VpxGetFrameBuffer, + // Called by libvpx when it no longer uses a frame buffer. + &Vp9FrameBufferPool::VpxReleaseFrameBuffer, + // |this| will be passed as |user_priv| to VpxGetFrameBuffer. + this)) { + // Failed to configure libvpx to use Vp9FrameBufferPool. + return false; + } + return true; +} + +rtc::scoped_refptr +Vp9FrameBufferPool::GetFrameBuffer(size_t min_size) { + DCHECK_GT(min_size, 0u); + rtc::scoped_refptr available_buffer = nullptr; + { + rtc::CritScope cs(&buffers_lock_); + // Do we have a buffer we can recycle? + for (const auto& buffer : allocated_buffers_) { + if (buffer->HasOneRef()) { + available_buffer = buffer; + break; + } + } + // Otherwise create one. + if (available_buffer == nullptr) { + available_buffer = new rtc::RefCountedObject(); + allocated_buffers_.push_back(available_buffer); + if (allocated_buffers_.size() > max_num_buffers_) { + LOG(LS_WARNING) + << allocated_buffers_.size() << " Vp9FrameBuffers have been " + << "allocated by a Vp9FrameBufferPool (exceeding what is " + << "considered reasonable, " << max_num_buffers_ << ")."; + RTC_NOTREACHED(); + } + } + } + + available_buffer->SetSize(min_size); + return available_buffer; +} + +int Vp9FrameBufferPool::GetNumBuffersInUse() const { + int num_buffers_in_use = 0; + rtc::CritScope cs(&buffers_lock_); + for (const auto& buffer : allocated_buffers_) { + if (!buffer->HasOneRef()) + ++num_buffers_in_use; + } + return num_buffers_in_use; +} + +void Vp9FrameBufferPool::ClearPool() { + rtc::CritScope cs(&buffers_lock_); + allocated_buffers_.clear(); +} + +// static +int32_t Vp9FrameBufferPool::VpxGetFrameBuffer(void* user_priv, + size_t min_size, + vpx_codec_frame_buffer* fb) { + DCHECK(user_priv); + DCHECK(fb); + Vp9FrameBufferPool* pool = static_cast(user_priv); + + rtc::scoped_refptr buffer = pool->GetFrameBuffer(min_size); + fb->data = buffer->GetData(); + fb->size = buffer->GetDataSize(); + // Store Vp9FrameBuffer* in |priv| for use in VpxReleaseFrameBuffer. + // This also makes vpx_codec_get_frame return images with their |fb_priv| set + // to |buffer| which is important for external reference counting. + // Release from refptr so that the buffer's |ref_count_| remains 1 when + // |buffer| goes out of scope. + fb->priv = static_cast(buffer.release()); + return 0; +} + +// static +int32_t Vp9FrameBufferPool::VpxReleaseFrameBuffer(void* user_priv, + vpx_codec_frame_buffer* fb) { + DCHECK(user_priv); + DCHECK(fb); + Vp9FrameBuffer* buffer = static_cast(fb->priv); + if (buffer != nullptr) { + buffer->Release(); + // When libvpx fails to decode and you continue to try to decode (and fail) + // libvpx can for some reason try to release the same buffer multiple times. + // Setting |priv| to null protects against trying to Release multiple times. + fb->priv = nullptr; + } + return 0; +} + +} // namespace webrtc diff --git a/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h new file mode 100644 index 000000000000..97ed41a0154a --- /dev/null +++ b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + * + */ + +#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_FRAME_BUFFER_POOL_H_ +#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_FRAME_BUFFER_POOL_H_ + +#include + +#include "webrtc/base/basictypes.h" +#include "webrtc/base/buffer.h" +#include "webrtc/base/criticalsection.h" +#include "webrtc/base/refcount.h" +#include "webrtc/base/scoped_ref_ptr.h" + +struct vpx_codec_ctx; +struct vpx_codec_frame_buffer; + +namespace webrtc { + +// This memory pool is used to serve buffers to libvpx for decoding purposes in +// VP9, which is set up in InitializeVPXUsePool. After the initialization any +// time libvpx wants to decode a frame it will use buffers provided and released +// through VpxGetFrameBuffer and VpxReleaseFrameBuffer. +// The benefit of owning the pool that libvpx relies on for decoding is that the +// decoded frames returned by libvpx (from vpx_codec_get_frame) use parts of our +// buffers for the decoded image data. By retaining ownership of this buffer +// using scoped_refptr, the image buffer can be reused by VideoFrames and no +// frame copy has to occur during decoding and frame delivery. +// +// Pseudo example usage case: +// Vp9FrameBufferPool pool; +// pool.InitializeVpxUsePool(decoder_ctx); +// ... +// +// // During decoding, libvpx will get and release buffers from the pool. +// vpx_codec_decode(decoder_ctx, ...); +// +// vpx_image_t* img = vpx_codec_get_frame(decoder_ctx, &iter); +// // Important to use scoped_refptr to protect it against being recycled by +// // the pool. +// scoped_refptr img_buffer = (Vp9FrameBuffer*)img->fb_priv; +// ... +// +// // Destroying the codec will make libvpx release any buffers it was using. +// vpx_codec_destroy(decoder_ctx); +class Vp9FrameBufferPool { + public: + class Vp9FrameBuffer : public rtc::RefCountInterface { + public: + uint8_t* GetData(); + size_t GetDataSize() const; + void SetSize(size_t size); + + virtual bool HasOneRef() const = 0; + + private: + // Data as an easily resizable buffer. + rtc::Buffer data_; + }; + + // Configures libvpx to, in the specified context, use this memory pool for + // buffers used to decompress frames. This is only supported for VP9. + bool InitializeVpxUsePool(vpx_codec_ctx* vpx_codec_context); + + // Gets a frame buffer of at least |min_size|, recycling an available one or + // creating a new one. When no longer referenced from the outside the buffer + // becomes recyclable. + rtc::scoped_refptr GetFrameBuffer(size_t min_size); + // Gets the number of buffers currently in use (not ready to be recycled). + int GetNumBuffersInUse() const; + // Releases allocated buffers, deleting available buffers. Buffers in use are + // not deleted until they are no longer referenced. + void ClearPool(); + + // InitializeVpxUsePool configures libvpx to call this function when it needs + // a new frame buffer. Parameters: + // |user_priv| Private data passed to libvpx, InitializeVpxUsePool sets it up + // to be a pointer to the pool. + // |min_size| Minimum size needed by libvpx (to decompress a frame). + // |fb| Pointer to the libvpx frame buffer object, this is updated to + // use the pool's buffer. + // Returns 0 on success. Returns < 0 on failure. + static int32_t VpxGetFrameBuffer(void* user_priv, + size_t min_size, + vpx_codec_frame_buffer* fb); + + // InitializeVpxUsePool configures libvpx to call this function when it has + // finished using one of the pool's frame buffer. Parameters: + // |user_priv| Private data passed to libvpx, InitializeVpxUsePool sets it up + // to be a pointer to the pool. + // |fb| Pointer to the libvpx frame buffer object, its |priv| will be + // a pointer to one of the pool's Vp9FrameBuffer. + static int32_t VpxReleaseFrameBuffer(void* user_priv, + vpx_codec_frame_buffer* fb); + + private: + // Protects |allocated_buffers_|. + mutable rtc::CriticalSection buffers_lock_; + // All buffers, in use or ready to be recycled. + std::vector> allocated_buffers_ + GUARDED_BY(buffers_lock_); + // If more buffers than this are allocated we print warnings, and crash if + // in debug mode. + static const size_t max_num_buffers_ = 10; +}; + +} // namespace webrtc + +#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_FRAME_BUFFER_POOL_H_ diff --git a/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc index 310e53af5efd..850403e60e85 100644 --- a/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc +++ b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc @@ -21,19 +21,49 @@ #include "vpx/vp8cx.h" #include "vpx/vp8dx.h" +#include "webrtc/base/bind.h" #include "webrtc/base/checks.h" +#include "webrtc/base/trace_event.h" #include "webrtc/common.h" #include "webrtc/common_video/libyuv/include/webrtc_libyuv.h" #include "webrtc/modules/interface/module_common_types.h" +#include "webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h" +#include "webrtc/system_wrappers/interface/logging.h" #include "webrtc/system_wrappers/interface/tick_util.h" -#include "webrtc/system_wrappers/interface/trace_event.h" + +namespace { + +// VP9DecoderImpl::ReturnFrame helper function used with WrappedI420Buffer. +static void WrappedI420BufferNoLongerUsedCb( + webrtc::Vp9FrameBufferPool::Vp9FrameBuffer* img_buffer) { + img_buffer->Release(); +} + +} // anonymous namespace namespace webrtc { +// Only positive speeds, range for real-time coding currently is: 5 - 8. +// Lower means slower/better quality, higher means fastest/lower quality. +int GetCpuSpeed(int width, int height) { + // For smaller resolutions, use lower speed setting (get some coding gain at + // the cost of increased encoding complexity). + if (width * height <= 352 * 288) + return 5; + else + return 7; +} + VP9Encoder* VP9Encoder::Create() { return new VP9EncoderImpl(); } +void VP9EncoderImpl::EncoderOutputCodedPacketCallback(vpx_codec_cx_pkt* pkt, + void* user_data) { + VP9EncoderImpl* enc = (VP9EncoderImpl*)(user_data); + enc->GetEncodedLayerFrame(pkt); +} + VP9EncoderImpl::VP9EncoderImpl() : encoded_image_(), encoded_complete_callback_(NULL), @@ -44,7 +74,15 @@ VP9EncoderImpl::VP9EncoderImpl() rc_max_intra_target_(0), encoder_(NULL), config_(NULL), - raw_(NULL) { + raw_(NULL), + input_image_(NULL), + tl0_pic_idx_(0), + frames_since_kf_(0), + num_temporal_layers_(0), + num_spatial_layers_(0), + frames_encoded_(0), + // Use two spatial when screensharing with flexible mode. + spatial_layer_(new ScreenshareLayersVP9(2)) { memset(&codec_, 0, sizeof(codec_)); uint32_t seed = static_cast(TickTime::MillisecondTimestamp()); srand(seed); @@ -78,6 +116,91 @@ int VP9EncoderImpl::Release() { return WEBRTC_VIDEO_CODEC_OK; } +bool VP9EncoderImpl::ExplicitlyConfiguredSpatialLayers() const { + // We check target_bitrate_bps of the 0th layer to see if the spatial layers + // (i.e. bitrates) were explicitly configured. +#ifdef LIBVPX_SVC + return num_spatial_layers_ > 1 && + codec_.spatialLayers[0].target_bitrate_bps > 0; +#else + return false; +#endif +} + +bool VP9EncoderImpl::SetSvcRates() { + uint8_t i = 0; + + if (ExplicitlyConfiguredSpatialLayers()) { +#ifdef LIBVPX_SVC + if (num_temporal_layers_ > 1) { + LOG(LS_ERROR) << "Multiple temporal layers when manually specifying " + "spatial layers not implemented yet!"; + return false; + } + int total_bitrate_bps = 0; + for (i = 0; i < num_spatial_layers_; ++i) + total_bitrate_bps += codec_.spatialLayers[i].target_bitrate_bps; + // If total bitrate differs now from what has been specified at the + // beginning, update the bitrates in the same ratio as before. + for (i = 0; i < num_spatial_layers_; ++i) { + config_->ss_target_bitrate[i] = config_->layer_target_bitrate[i] = + static_cast(static_cast(config_->rc_target_bitrate) * + codec_.spatialLayers[i].target_bitrate_bps / + total_bitrate_bps); + } +#endif + } else { + float rate_ratio[VPX_MAX_LAYERS] = {0}; + float total = 0; + + for (i = 0; i < num_spatial_layers_; ++i) { + if (svc_internal_.svc_params.scaling_factor_num[i] <= 0 || + svc_internal_.svc_params.scaling_factor_den[i] <= 0) { + LOG(LS_ERROR) << "Scaling factors not specified!"; + return false; + } + rate_ratio[i] = + static_cast(svc_internal_.svc_params.scaling_factor_num[i]) / + svc_internal_.svc_params.scaling_factor_den[i]; + total += rate_ratio[i]; + } + + for (i = 0; i < num_spatial_layers_; ++i) { + config_->ss_target_bitrate[i] = static_cast( + config_->rc_target_bitrate * rate_ratio[i] / total); + if (num_temporal_layers_ == 1) { + config_->layer_target_bitrate[i] = config_->ss_target_bitrate[i]; + } else if (num_temporal_layers_ == 2) { + config_->layer_target_bitrate[i * num_temporal_layers_] = + config_->ss_target_bitrate[i] * 2 / 3; + config_->layer_target_bitrate[i * num_temporal_layers_ + 1] = + config_->ss_target_bitrate[i]; + } else if (num_temporal_layers_ == 3) { + config_->layer_target_bitrate[i * num_temporal_layers_] = + config_->ss_target_bitrate[i] / 2; + config_->layer_target_bitrate[i * num_temporal_layers_ + 1] = + config_->layer_target_bitrate[i * num_temporal_layers_] + + (config_->ss_target_bitrate[i] / 4); + config_->layer_target_bitrate[i * num_temporal_layers_ + 2] = + config_->ss_target_bitrate[i]; + } else { + LOG(LS_ERROR) << "Unsupported number of temporal layers: " + << num_temporal_layers_; + return false; + } + } + } + + // For now, temporal layers only supported when having one spatial layer. + if (num_spatial_layers_ == 1) { + for (i = 0; i < num_temporal_layers_; ++i) { + config_->ts_target_bitrate[i] = config_->layer_target_bitrate[i]; + } + } + + return true; +} + int VP9EncoderImpl::SetRates(uint32_t new_bitrate_kbit, uint32_t new_framerate) { if (!inited_) { @@ -95,6 +218,12 @@ int VP9EncoderImpl::SetRates(uint32_t new_bitrate_kbit, } config_->rc_target_bitrate = new_bitrate_kbit; codec_.maxFramerate = new_framerate; + spatial_layer_->ConfigureBitrate(new_bitrate_kbit, 0); + + if (!SetSvcRates()) { + return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; + } + // Update encoder context if (vpx_codec_enc_config_set(encoder_, config_)) { return WEBRTC_VIDEO_CODEC_ERROR; @@ -121,6 +250,14 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst, if (number_of_cores < 1) { return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; } + if (inst->codecSpecific.VP9.numberOfTemporalLayers > 3) { + return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; + } + // libvpx currently supports only one or two spatial layers. + if (inst->codecSpecific.VP9.numberOfSpatialLayers > 2) { + return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; + } + int retVal = Release(); if (retVal < 0) { return retVal; @@ -135,6 +272,12 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst, if (&codec_ != inst) { codec_ = *inst; } + + num_spatial_layers_ = inst->codecSpecific.VP9.numberOfSpatialLayers; + num_temporal_layers_ = inst->codecSpecific.VP9.numberOfTemporalLayers; + if (num_temporal_layers_ == 0) + num_temporal_layers_ = 1; + // Random start 16 bits is enough. picture_id_ = static_cast(rand()) & 0x7FFF; // Allocate memory for encoded image @@ -182,11 +325,57 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst, } else { config_->kf_mode = VPX_KF_DISABLED; } - + config_->rc_resize_allowed = inst->codecSpecific.VP9.automaticResizeOn ? + 1 : 0; // Determine number of threads based on the image size and #cores. config_->g_threads = NumberOfThreads(config_->g_w, config_->g_h, number_of_cores); + + cpu_speed_ = GetCpuSpeed(config_->g_w, config_->g_h); + + // TODO(asapersson): Check configuration of temporal switch up and increase + // pattern length. + is_flexible_mode_ = inst->codecSpecific.VP9.flexibleMode; + if (is_flexible_mode_) { + config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_BYPASS; + config_->ts_number_layers = num_temporal_layers_; + if (codec_.mode == kScreensharing) + spatial_layer_->ConfigureBitrate(inst->startBitrate, 0); + } else if (num_temporal_layers_ == 1) { + gof_.SetGofInfoVP9(kTemporalStructureMode1); + config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_NOLAYERING; + config_->ts_number_layers = 1; + config_->ts_rate_decimator[0] = 1; + config_->ts_periodicity = 1; + config_->ts_layer_id[0] = 0; + } else if (num_temporal_layers_ == 2) { + gof_.SetGofInfoVP9(kTemporalStructureMode2); + config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_0101; + config_->ts_number_layers = 2; + config_->ts_rate_decimator[0] = 2; + config_->ts_rate_decimator[1] = 1; + config_->ts_periodicity = 2; + config_->ts_layer_id[0] = 0; + config_->ts_layer_id[1] = 1; + } else if (num_temporal_layers_ == 3) { + gof_.SetGofInfoVP9(kTemporalStructureMode3); + config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_0212; + config_->ts_number_layers = 3; + config_->ts_rate_decimator[0] = 4; + config_->ts_rate_decimator[1] = 2; + config_->ts_rate_decimator[2] = 1; + config_->ts_periodicity = 4; + config_->ts_layer_id[0] = 0; + config_->ts_layer_id[1] = 2; + config_->ts_layer_id[2] = 1; + config_->ts_layer_id[3] = 2; + } else { + return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; + } + + tl0_pic_idx_ = static_cast(rand()); + return InitAndSetControlSettings(inst); } @@ -206,30 +395,73 @@ int VP9EncoderImpl::NumberOfThreads(int width, } int VP9EncoderImpl::InitAndSetControlSettings(const VideoCodec* inst) { + config_->ss_number_layers = num_spatial_layers_; + + if (ExplicitlyConfiguredSpatialLayers()) { +#ifdef LIBVPX_SVC + for (int i = 0; i < num_spatial_layers_; ++i) { + const auto& layer = codec_.spatialLayers[i]; + svc_internal_.svc_params.max_quantizers[i] = config_->rc_max_quantizer; + svc_internal_.svc_params.min_quantizers[i] = config_->rc_min_quantizer; + svc_internal_.svc_params.scaling_factor_num[i] = layer.scaling_factor_num; + svc_internal_.svc_params.scaling_factor_den[i] = layer.scaling_factor_den; + } +#endif + } else { + int scaling_factor_num = 256; + for (int i = num_spatial_layers_ - 1; i >= 0; --i) { + svc_internal_.svc_params.max_quantizers[i] = config_->rc_max_quantizer; + svc_internal_.svc_params.min_quantizers[i] = config_->rc_min_quantizer; + // 1:2 scaling in each dimension. + svc_internal_.svc_params.scaling_factor_num[i] = scaling_factor_num; + svc_internal_.svc_params.scaling_factor_den[i] = 256; + if (codec_.mode != kScreensharing) + scaling_factor_num /= 2; + } + } + + if (!SetSvcRates()) { + return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; + } + if (vpx_codec_enc_init(encoder_, vpx_codec_vp9_cx(), config_, 0)) { return WEBRTC_VIDEO_CODEC_UNINITIALIZED; } - // Only positive speeds, currently: 0 - 8. - // O means slowest/best quality, 8 means fastest/lower quality. - cpu_speed_ = 7; - // Note: some of these codec controls still use "VP8" in the control name. - // TODO(marpan): Update this in the next/future libvpx version. vpx_codec_control(encoder_, VP8E_SET_CPUUSED, cpu_speed_); vpx_codec_control(encoder_, VP8E_SET_MAX_INTRA_BITRATE_PCT, rc_max_intra_target_); vpx_codec_control(encoder_, VP9E_SET_AQ_MODE, inst->codecSpecific.VP9.adaptiveQpMode ? 3 : 0); + + vpx_codec_control( + encoder_, VP9E_SET_SVC, + (num_temporal_layers_ > 1 || num_spatial_layers_ > 1) ? 1 : 0); + if (num_temporal_layers_ > 1 || num_spatial_layers_ > 1) { + vpx_codec_control(encoder_, VP9E_SET_SVC_PARAMETERS, + &svc_internal_.svc_params); + } + // Register callback for getting each spatial layer. + vpx_codec_priv_output_cx_pkt_cb_pair_t cbp = { + VP9EncoderImpl::EncoderOutputCodedPacketCallback, (void*)(this)}; + vpx_codec_control(encoder_, VP9E_REGISTER_CX_CALLBACK, (void*)(&cbp)); + // Control function to set the number of column tiles in encoding a frame, in // log2 unit: e.g., 0 = 1 tile column, 1 = 2 tile columns, 2 = 4 tile columns. // The number tile columns will be capped by the encoder based on image size // (minimum width of tile column is 256 pixels, maximum is 4096). vpx_codec_control(encoder_, VP9E_SET_TILE_COLUMNS, (config_->g_threads >> 1)); -#if !defined(WEBRTC_ARCH_ARM) +#if !defined(WEBRTC_ARCH_ARM) && !defined(WEBRTC_ARCH_ARM64) // Note denoiser is still off by default until further testing/optimization, // i.e., codecSpecific.VP9.denoisingOn == 0. vpx_codec_control(encoder_, VP9E_SET_NOISE_SENSITIVITY, inst->codecSpecific.VP9.denoisingOn ? 1 : 0); #endif + if (codec_.mode == kScreensharing) { + // Adjust internal parameters to screen content. + vpx_codec_control(encoder_, VP9E_SET_TUNE_CONTENT, 1); + } + // Enable encoder skip of static/low content blocks. + vpx_codec_control(encoder_, VP8E_SET_STATIC_THRESHOLD, 1); inited_ = true; return WEBRTC_VIDEO_CODEC_OK; } @@ -268,6 +500,13 @@ int VP9EncoderImpl::Encode(const I420VideoFrame& input_image, } DCHECK_EQ(input_image.width(), static_cast(raw_->d_w)); DCHECK_EQ(input_image.height(), static_cast(raw_->d_h)); + + // Set input image for use in the callback. + // This was necessary since you need some information from input_image. + // You can save only the necessary information (such as timestamp) instead of + // doing this. + input_image_ = &input_image; + // Image in vpx_image_t format. // Input image is const. VPX's raw image is not defined as const. raw_->planes[VPX_PLANE_Y] = const_cast(input_image.buffer(kYPlane)); @@ -277,12 +516,37 @@ int VP9EncoderImpl::Encode(const I420VideoFrame& input_image, raw_->stride[VPX_PLANE_U] = input_image.stride(kUPlane); raw_->stride[VPX_PLANE_V] = input_image.stride(kVPlane); - int flags = 0; + vpx_enc_frame_flags_t flags = 0; bool send_keyframe = (frame_type == kKeyFrame); if (send_keyframe) { // Key frame request from caller. flags = VPX_EFLAG_FORCE_KF; } + +#ifdef LIBVPX_SVC + if (is_flexible_mode_) { + SuperFrameRefSettings settings; + + // These structs are copied when calling vpx_codec_control, + // therefore it is ok for them to go out of scope. + vpx_svc_ref_frame_config enc_layer_conf; + vpx_svc_layer_id layer_id; + + if (codec_.mode == kRealtimeVideo) { + // Real time video not yet implemented in flexible mode. + RTC_NOTREACHED(); + } else { + settings = spatial_layer_->GetSuperFrameSettings(input_image.timestamp(), + send_keyframe); + } + enc_layer_conf = GenerateRefsAndFlags(settings); + layer_id.temporal_layer_id = 0; + layer_id.spatial_layer_id = settings.start_layer; + vpx_codec_control(encoder_, VP9E_SET_SVC_LAYER_ID, &layer_id); + vpx_codec_control(encoder_, VP9E_SET_SVC_REF_FRAME_CONFIG, &enc_layer_conf); + } +#endif + assert(codec_.maxFramerate > 0); uint32_t duration = 90000 / codec_.maxFramerate; if (vpx_codec_encode(encoder_, raw_, timestamp_, duration, flags, @@ -290,7 +554,8 @@ int VP9EncoderImpl::Encode(const I420VideoFrame& input_image, return WEBRTC_VIDEO_CODEC_ERROR; } timestamp_ += duration; - return GetEncodedPartitions(input_image); + + return WEBRTC_VIDEO_CODEC_OK; } void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific, @@ -299,20 +564,104 @@ void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific, assert(codec_specific != NULL); codec_specific->codecType = kVideoCodecVP9; CodecSpecificInfoVP9 *vp9_info = &(codec_specific->codecSpecific.VP9); - vp9_info->pictureId = picture_id_; - vp9_info->keyIdx = kNoKeyIdx; - vp9_info->nonReference = (pkt.data.frame.flags & VPX_FRAME_IS_DROPPABLE) != 0; - // TODO(marpan): Temporal layers are supported in the current VP9 version, - // but for now use 1 temporal layer encoding. Will update this when temporal - // layer support for VP9 is added in webrtc. - vp9_info->temporalIdx = kNoTemporalIdx; - vp9_info->layerSync = false; - vp9_info->tl0PicIdx = kNoTl0PicIdx; - picture_id_ = (picture_id_ + 1) & 0x7FFF; + // TODO(asapersson): Set correct values. + vp9_info->inter_pic_predicted = + (pkt.data.frame.flags & VPX_FRAME_IS_KEY) ? false : true; + vp9_info->flexible_mode = codec_.codecSpecific.VP9.flexibleMode; + vp9_info->ss_data_available = ((pkt.data.frame.flags & VPX_FRAME_IS_KEY) && + !codec_.codecSpecific.VP9.flexibleMode) + ? true + : false; + + vpx_svc_layer_id_t layer_id = {0}; + vpx_codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id); + + assert(num_temporal_layers_ > 0); + assert(num_spatial_layers_ > 0); + if (num_temporal_layers_ == 1) { + assert(layer_id.temporal_layer_id == 0); + vp9_info->temporal_idx = kNoTemporalIdx; + } else { + vp9_info->temporal_idx = layer_id.temporal_layer_id; + } + if (num_spatial_layers_ == 1) { + assert(layer_id.spatial_layer_id == 0); + vp9_info->spatial_idx = kNoSpatialIdx; + } else { + vp9_info->spatial_idx = layer_id.spatial_layer_id; + } + if (layer_id.spatial_layer_id != 0) { + vp9_info->ss_data_available = false; + } + + // TODO(asapersson): this info has to be obtained from the encoder. + vp9_info->temporal_up_switch = true; + + bool is_first_frame = false; + if (is_flexible_mode_) { + is_first_frame = + layer_id.spatial_layer_id == spatial_layer_->GetStartLayer(); + } else { + is_first_frame = layer_id.spatial_layer_id == 0; + } + + if (is_first_frame) { + picture_id_ = (picture_id_ + 1) & 0x7FFF; + // TODO(asapersson): this info has to be obtained from the encoder. + vp9_info->inter_layer_predicted = false; + ++frames_since_kf_; + } else { + // TODO(asapersson): this info has to be obtained from the encoder. + vp9_info->inter_layer_predicted = true; + } + + if (pkt.data.frame.flags & VPX_FRAME_IS_KEY) { + frames_since_kf_ = 0; + } + + vp9_info->picture_id = picture_id_; + + if (!vp9_info->flexible_mode) { + if (layer_id.temporal_layer_id == 0 && layer_id.spatial_layer_id == 0) { + tl0_pic_idx_++; + } + vp9_info->tl0_pic_idx = tl0_pic_idx_; + } + + // Always populate this, so that the packetizer can properly set the marker + // bit. + vp9_info->num_spatial_layers = num_spatial_layers_; + + vp9_info->num_ref_pics = 0; + if (vp9_info->flexible_mode) { + vp9_info->gof_idx = kNoGofIdx; + vp9_info->num_ref_pics = num_ref_pics_[layer_id.spatial_layer_id]; + for (int i = 0; i < num_ref_pics_[layer_id.spatial_layer_id]; ++i) { + vp9_info->p_diff[i] = p_diff_[layer_id.spatial_layer_id][i]; + } + } else { + vp9_info->gof_idx = + static_cast(frames_since_kf_ % gof_.num_frames_in_gof); + vp9_info->temporal_up_switch = gof_.temporal_up_switch[vp9_info->gof_idx]; + } + + if (vp9_info->ss_data_available) { + vp9_info->spatial_layer_resolution_present = true; + for (size_t i = 0; i < vp9_info->num_spatial_layers; ++i) { + vp9_info->width[i] = codec_.width * + svc_internal_.svc_params.scaling_factor_num[i] / + svc_internal_.svc_params.scaling_factor_den[i]; + vp9_info->height[i] = codec_.height * + svc_internal_.svc_params.scaling_factor_num[i] / + svc_internal_.svc_params.scaling_factor_den[i]; + } + if (!vp9_info->flexible_mode) { + vp9_info->gof.CopyGofInfoVP9(gof_); + } + } } -int VP9EncoderImpl::GetEncodedPartitions(const I420VideoFrame& input_image) { - vpx_codec_iter_t iter = NULL; +int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) { encoded_image_._length = 0; encoded_image_._frameType = kDeltaFrame; RTPFragmentationHeader frag_info; @@ -321,48 +670,149 @@ int VP9EncoderImpl::GetEncodedPartitions(const I420VideoFrame& input_image) { frag_info.VerifyAndAllocateFragmentationHeader(1); int part_idx = 0; CodecSpecificInfo codec_specific; - const vpx_codec_cx_pkt_t *pkt = NULL; - while ((pkt = vpx_codec_get_cx_data(encoder_, &iter)) != NULL) { - switch (pkt->kind) { - case VPX_CODEC_CX_FRAME_PKT: { - memcpy(&encoded_image_._buffer[encoded_image_._length], - pkt->data.frame.buf, - pkt->data.frame.sz); - frag_info.fragmentationOffset[part_idx] = encoded_image_._length; - frag_info.fragmentationLength[part_idx] = - static_cast(pkt->data.frame.sz); - frag_info.fragmentationPlType[part_idx] = 0; - frag_info.fragmentationTimeDiff[part_idx] = 0; - encoded_image_._length += static_cast(pkt->data.frame.sz); - assert(encoded_image_._length <= encoded_image_._size); - break; - } - default: { - break; - } - } - // End of frame. - if ((pkt->data.frame.flags & VPX_FRAME_IS_FRAGMENT) == 0) { - // Check if encoded frame is a key frame. - if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) { - encoded_image_._frameType = kKeyFrame; - } - PopulateCodecSpecific(&codec_specific, *pkt, input_image.timestamp()); - break; - } + + assert(pkt->kind == VPX_CODEC_CX_FRAME_PKT); + memcpy(&encoded_image_._buffer[encoded_image_._length], pkt->data.frame.buf, + pkt->data.frame.sz); + frag_info.fragmentationOffset[part_idx] = encoded_image_._length; + frag_info.fragmentationLength[part_idx] = + static_cast(pkt->data.frame.sz); + frag_info.fragmentationPlType[part_idx] = 0; + frag_info.fragmentationTimeDiff[part_idx] = 0; + encoded_image_._length += static_cast(pkt->data.frame.sz); + + vpx_svc_layer_id_t layer_id = {0}; + vpx_codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id); + if (is_flexible_mode_ && codec_.mode == kScreensharing) + spatial_layer_->LayerFrameEncoded( + static_cast(encoded_image_._length), + layer_id.spatial_layer_id); + + assert(encoded_image_._length <= encoded_image_._size); + + // End of frame. + // Check if encoded frame is a key frame. + if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) { + encoded_image_._frameType = kKeyFrame; } + PopulateCodecSpecific(&codec_specific, *pkt, input_image_->timestamp()); + if (encoded_image_._length > 0) { TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_._length); - encoded_image_._timeStamp = input_image.timestamp(); - encoded_image_.capture_time_ms_ = input_image.render_time_ms(); + encoded_image_._timeStamp = input_image_->timestamp(); + encoded_image_.capture_time_ms_ = input_image_->render_time_ms(); encoded_image_._encodedHeight = raw_->d_h; encoded_image_._encodedWidth = raw_->d_w; encoded_complete_callback_->Encoded(encoded_image_, &codec_specific, - &frag_info); + &frag_info); } return WEBRTC_VIDEO_CODEC_OK; } +#ifdef LIBVPX_SVC +vpx_svc_ref_frame_config VP9EncoderImpl::GenerateRefsAndFlags( + const SuperFrameRefSettings& settings) { + static const vpx_enc_frame_flags_t kAllFlags = + VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_LAST | + VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_GF; + vpx_svc_ref_frame_config sf_conf = {}; + if (settings.is_keyframe) { + // Used later on to make sure we don't make any invalid references. + memset(buffer_updated_at_frame_, -1, sizeof(buffer_updated_at_frame_)); + for (int layer = settings.start_layer; layer <= settings.stop_layer; + ++layer) { + num_ref_pics_[layer] = 0; + buffer_updated_at_frame_[settings.layer[layer].upd_buf] = frames_encoded_; + // When encoding a keyframe only the alt_fb_idx is used + // to specify which layer ends up in which buffer. + sf_conf.alt_fb_idx[layer] = settings.layer[layer].upd_buf; + } + } else { + for (int layer_idx = settings.start_layer; layer_idx <= settings.stop_layer; + ++layer_idx) { + vpx_enc_frame_flags_t layer_flags = kAllFlags; + num_ref_pics_[layer_idx] = 0; + int8_t refs[3] = {settings.layer[layer_idx].ref_buf1, + settings.layer[layer_idx].ref_buf2, + settings.layer[layer_idx].ref_buf3}; + + for (unsigned int ref_idx = 0; ref_idx < kMaxVp9RefPics; ++ref_idx) { + if (refs[ref_idx] == -1) + continue; + + DCHECK_GE(refs[ref_idx], 0); + DCHECK_LE(refs[ref_idx], 7); + // Easier to remove flags from all flags rather than having to + // build the flags from 0. + switch (num_ref_pics_[layer_idx]) { + case 0: { + sf_conf.lst_fb_idx[layer_idx] = refs[ref_idx]; + layer_flags &= ~VP8_EFLAG_NO_REF_LAST; + break; + } + case 1: { + sf_conf.gld_fb_idx[layer_idx] = refs[ref_idx]; + layer_flags &= ~VP8_EFLAG_NO_REF_GF; + break; + } + case 2: { + sf_conf.alt_fb_idx[layer_idx] = refs[ref_idx]; + layer_flags &= ~VP8_EFLAG_NO_REF_ARF; + break; + } + } + // Make sure we don't reference a buffer that hasn't been + // used at all or hasn't been used since a keyframe. + DCHECK_NE(buffer_updated_at_frame_[refs[ref_idx]], -1); + + p_diff_[layer_idx][num_ref_pics_[layer_idx]] = + frames_encoded_ - buffer_updated_at_frame_[refs[ref_idx]]; + num_ref_pics_[layer_idx]++; + } + + bool upd_buf_same_as_a_ref = false; + if (settings.layer[layer_idx].upd_buf != -1) { + for (unsigned int ref_idx = 0; ref_idx < kMaxVp9RefPics; ++ref_idx) { + if (settings.layer[layer_idx].upd_buf == refs[ref_idx]) { + switch (ref_idx) { + case 0: { + layer_flags &= ~VP8_EFLAG_NO_UPD_LAST; + break; + } + case 1: { + layer_flags &= ~VP8_EFLAG_NO_UPD_GF; + break; + } + case 2: { + layer_flags &= ~VP8_EFLAG_NO_UPD_ARF; + break; + } + } + upd_buf_same_as_a_ref = true; + break; + } + } + if (!upd_buf_same_as_a_ref) { + // If we have three references and a buffer is specified to be + // updated, then that buffer must be the same as one of the + // three references. + RTC_CHECK_LT(num_ref_pics_[layer_idx], kMaxVp9RefPics); + + sf_conf.alt_fb_idx[layer_idx] = settings.layer[layer_idx].upd_buf; + layer_flags ^= VP8_EFLAG_NO_UPD_ARF; + } + + int updated_buffer = settings.layer[layer_idx].upd_buf; + buffer_updated_at_frame_[updated_buffer] = frames_encoded_; + sf_conf.frame_flags[layer_idx] = layer_flags; + } + } + } + ++frames_encoded_; + return sf_conf; +} +#endif + int VP9EncoderImpl::SetChannelParameters(uint32_t packet_loss, int64_t rtt) { return WEBRTC_VIDEO_CODEC_OK; } @@ -388,6 +838,14 @@ VP9DecoderImpl::VP9DecoderImpl() VP9DecoderImpl::~VP9DecoderImpl() { inited_ = true; // in order to do the actual release Release(); + int num_buffers_in_use = frame_buffer_pool_.GetNumBuffersInUse(); + if (num_buffers_in_use > 0) { + // The frame buffers are reference counted and frames are exposed after + // decoding. There may be valid usage cases where previous frames are still + // referenced after ~VP9DecoderImpl that is not a leak. + LOG(LS_INFO) << num_buffers_in_use << " Vp9FrameBuffers are still " + << "referenced during ~VP9DecoderImpl."; + } } int VP9DecoderImpl::Reset() { @@ -421,6 +879,11 @@ int VP9DecoderImpl::InitDecode(const VideoCodec* inst, int number_of_cores) { // Save VideoCodec instance for later; mainly for duplicating the decoder. codec_ = *inst; } + + if (!frame_buffer_pool_.InitializeVpxUsePool(decoder_)) { + return WEBRTC_VIDEO_CODEC_MEMORY; + } + inited_ = true; // Always start with a complete key frame. key_frame_required_ = true; @@ -455,6 +918,8 @@ int VP9DecoderImpl::Decode(const EncodedImage& input_image, if (input_image._length == 0) { buffer = NULL; // Triggers full frame concealment. } + // During decode libvpx may get and release buffers from |frame_buffer_pool_|. + // In practice libvpx keeps a few (~3-4) buffers alive at a time. if (vpx_codec_decode(decoder_, buffer, static_cast(input_image._length), @@ -462,6 +927,9 @@ int VP9DecoderImpl::Decode(const EncodedImage& input_image, VPX_DL_REALTIME)) { return WEBRTC_VIDEO_CODEC_ERROR; } + // |img->fb_priv| contains the image data, a reference counted Vp9FrameBuffer. + // It may be released by libvpx during future vpx_codec_decode or + // vpx_codec_destroy calls. img = vpx_codec_get_frame(decoder_, &iter); int ret = ReturnFrame(img, input_image._timeStamp); if (ret != 0) { @@ -475,6 +943,32 @@ int VP9DecoderImpl::ReturnFrame(const vpx_image_t* img, uint32_t timestamp) { // Decoder OK and NULL image => No show frame. return WEBRTC_VIDEO_CODEC_NO_OUTPUT; } + +#ifdef USE_WRAPPED_I420_BUFFER + // This buffer contains all of |img|'s image data, a reference counted + // Vp9FrameBuffer. Performing AddRef/Release ensures it is not released and + // recycled during use (libvpx is done with the buffers after a few + // vpx_codec_decode calls or vpx_codec_destroy). + Vp9FrameBufferPool::Vp9FrameBuffer* img_buffer = + static_cast(img->fb_priv); + img_buffer->AddRef(); + // The buffer can be used directly by the VideoFrame (without copy) by + // using a WrappedI420Buffer. + rtc::scoped_refptr img_wrapped_buffer( + new rtc::RefCountedObject( + img->d_w, img->d_h, + img->d_w, img->d_h, + img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y], + img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U], + img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V], + // WrappedI420Buffer's mechanism for allowing the release of its frame + // buffer is through a callback function. This is where we should + // release |img_buffer|. + rtc::Bind(&WrappedI420BufferNoLongerUsedCb, img_buffer))); + + I420VideoFrame decoded_image_; + decoded_image_.set_video_frame_buffer(img_wrapped_buffer); +#else decoded_image_.CreateFrame(img->planes[VPX_PLANE_Y], img->planes[VPX_PLANE_U], img->planes[VPX_PLANE_V], @@ -482,7 +976,9 @@ int VP9DecoderImpl::ReturnFrame(const vpx_image_t* img, uint32_t timestamp) { img->stride[VPX_PLANE_Y], img->stride[VPX_PLANE_U], img->stride[VPX_PLANE_V]); +#endif decoded_image_.set_timestamp(timestamp); + int ret = decode_complete_callback_->Decoded(decoded_image_); if (ret != 0) return ret; @@ -497,12 +993,18 @@ int VP9DecoderImpl::RegisterDecodeCompleteCallback( int VP9DecoderImpl::Release() { if (decoder_ != NULL) { + // When a codec is destroyed libvpx will release any buffers of + // |frame_buffer_pool_| it is currently using. if (vpx_codec_destroy(decoder_)) { return WEBRTC_VIDEO_CODEC_MEMORY; } delete decoder_; decoder_ = NULL; } + // Releases buffers from the pool. Any buffers not in use are deleted. Buffers + // still referenced externally are deleted once fully released, not returning + // to the pool. + frame_buffer_pool_.ClearPool(); inited_ = false; return WEBRTC_VIDEO_CODEC_OK; } diff --git a/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h index 019e73ea137a..49e756ad4317 100644 --- a/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h +++ b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h @@ -13,12 +13,16 @@ #define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_IMPL_H_ #include "webrtc/modules/video_coding/codecs/vp9/include/vp9.h" +#include "webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h" +#include "vpx/svc_context.h" #include "vpx/vpx_decoder.h" #include "vpx/vpx_encoder.h" namespace webrtc { +class ScreenshareLayersVP9; + class VP9EncoderImpl : public VP9Encoder { public: VP9EncoderImpl(); @@ -41,6 +45,20 @@ class VP9EncoderImpl : public VP9Encoder { int SetRates(uint32_t new_bitrate_kbit, uint32_t frame_rate) override; + struct LayerFrameRefSettings { + int8_t upd_buf = -1; // -1 - no update, 0..7 - update buffer 0..7 + int8_t ref_buf1 = -1; // -1 - no reference, 0..7 - reference buffer 0..7 + int8_t ref_buf2 = -1; // -1 - no reference, 0..7 - reference buffer 0..7 + int8_t ref_buf3 = -1; // -1 - no reference, 0..7 - reference buffer 0..7 + }; + + struct SuperFrameRefSettings { + LayerFrameRefSettings layer[kMaxVp9NumberOfSpatialLayers]; + uint8_t start_layer = 0; // The first spatial layer to be encoded. + uint8_t stop_layer = 0; // The last spatial layer to be encoded. + bool is_keyframe = false; + }; + private: // Determine number of encoder threads to use. int NumberOfThreads(int width, int height, int number_of_cores); @@ -52,7 +70,25 @@ class VP9EncoderImpl : public VP9Encoder { const vpx_codec_cx_pkt& pkt, uint32_t timestamp); - int GetEncodedPartitions(const I420VideoFrame& input_image); + bool ExplicitlyConfiguredSpatialLayers() const; + bool SetSvcRates(); + +#ifdef LIBVPX_SVC + // Used for flexible mode to set the flags and buffer references used + // by the encoder. Also calculates the references used by the RTP + // packetizer. + // + // Has to be called for every frame (keyframes included) to update the + // state used to calculate references. + vpx_svc_ref_frame_config GenerateRefsAndFlags( + const SuperFrameRefSettings& settings); +#endif + + virtual int GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt); + + // Callback function for outputting packets per spatial layer. + static void EncoderOutputCodedPacketCallback(vpx_codec_cx_pkt* pkt, + void* user_data); // Determine maximum target for Intra frames // @@ -73,6 +109,22 @@ class VP9EncoderImpl : public VP9Encoder { vpx_codec_ctx_t* encoder_; vpx_codec_enc_cfg_t* config_; vpx_image_t* raw_; + SvcInternal_t svc_internal_; + const I420VideoFrame* input_image_; + GofInfoVP9 gof_; // Contains each frame's temporal information for + // non-flexible mode. + uint8_t tl0_pic_idx_; // Only used in non-flexible mode. + size_t frames_since_kf_; + uint8_t num_temporal_layers_; + uint8_t num_spatial_layers_; + + // Used for flexible mode. + bool is_flexible_mode_; + int64_t buffer_updated_at_frame_[kNumVp9Buffers]; + int64_t frames_encoded_; + uint8_t num_ref_pics_[kMaxVp9NumberOfSpatialLayers]; + uint8_t p_diff_[kMaxVp9NumberOfSpatialLayers][kMaxVp9RefPics]; + rtc::scoped_ptr spatial_layer_; }; @@ -99,7 +151,13 @@ class VP9DecoderImpl : public VP9Decoder { private: int ReturnFrame(const vpx_image_t* img, uint32_t timeStamp); +#ifndef USE_WRAPPED_I420_BUFFER + // Temporarily keep VideoFrame in a separate buffer + // Once we debug WrappedI420VideoFrame usage, we can get rid of this I420VideoFrame decoded_image_; +#endif + // Memory pool used to share buffers between libvpx and webrtc. + Vp9FrameBufferPool frame_buffer_pool_; DecodedImageCallback* decode_complete_callback_; bool inited_; vpx_codec_ctx_t* decoder_; diff --git a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/codec_database.cc b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/codec_database.cc index 3bd3e449bdae..9a32fd27ed9f 100644 --- a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/codec_database.cc +++ b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/codec_database.cc @@ -57,7 +57,9 @@ VideoCodecVP9 VideoEncoder::GetDefaultVp9Settings() { vp9_settings.frameDroppingOn = true; vp9_settings.keyFrameInterval = 3000; vp9_settings.adaptiveQpMode = true; - + vp9_settings.automaticResizeOn = true; + vp9_settings.numberOfSpatialLayers = 1; + vp9_settings.flexibleMode = false; return vp9_settings; } diff --git a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/encoded_frame.cc b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/encoded_frame.cc index 8f3cd03294af..8b2a39244b13 100644 --- a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/encoded_frame.cc +++ b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/encoded_frame.cc @@ -141,27 +141,67 @@ void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header) case kRtpVideoVp9: { if (_codecSpecificInfo.codecType != kVideoCodecVP9) { // This is the first packet for this frame. - _codecSpecificInfo.codecSpecific.VP9.pictureId = -1; - _codecSpecificInfo.codecSpecific.VP9.temporalIdx = 0; - _codecSpecificInfo.codecSpecific.VP9.layerSync = false; - _codecSpecificInfo.codecSpecific.VP9.keyIdx = -1; + _codecSpecificInfo.codecSpecific.VP9.picture_id = -1; + _codecSpecificInfo.codecSpecific.VP9.temporal_idx = 0; + _codecSpecificInfo.codecSpecific.VP9.spatial_idx = 0; + _codecSpecificInfo.codecSpecific.VP9.gof_idx = 0; + _codecSpecificInfo.codecSpecific.VP9.inter_layer_predicted = false; + _codecSpecificInfo.codecSpecific.VP9.tl0_pic_idx = -1; _codecSpecificInfo.codecType = kVideoCodecVP9; } - _codecSpecificInfo.codecSpecific.VP9.nonReference = - header->codecHeader.VP9.nonReference; - if (header->codecHeader.VP9.pictureId != kNoPictureId) { - _codecSpecificInfo.codecSpecific.VP9.pictureId = - header->codecHeader.VP9.pictureId; + _codecSpecificInfo.codecSpecific.VP9.inter_pic_predicted = + header->codecHeader.VP9.inter_pic_predicted; + _codecSpecificInfo.codecSpecific.VP9.flexible_mode = + header->codecHeader.VP9.flexible_mode; + _codecSpecificInfo.codecSpecific.VP9.num_ref_pics = + header->codecHeader.VP9.num_ref_pics; + for (uint8_t r = 0; r < header->codecHeader.VP9.num_ref_pics; ++r) { + _codecSpecificInfo.codecSpecific.VP9.p_diff[r] = + header->codecHeader.VP9.pid_diff[r]; } - if (header->codecHeader.VP9.temporalIdx != kNoTemporalIdx) { - _codecSpecificInfo.codecSpecific.VP9.temporalIdx = - header->codecHeader.VP9.temporalIdx; - _codecSpecificInfo.codecSpecific.VP9.layerSync = - header->codecHeader.VP9.layerSync; + _codecSpecificInfo.codecSpecific.VP9.ss_data_available = + header->codecHeader.VP9.ss_data_available; + if (header->codecHeader.VP9.picture_id != kNoPictureId) { + _codecSpecificInfo.codecSpecific.VP9.picture_id = + header->codecHeader.VP9.picture_id; } - if (header->codecHeader.VP9.keyIdx != kNoKeyIdx) { - _codecSpecificInfo.codecSpecific.VP9.keyIdx = - header->codecHeader.VP9.keyIdx; + if (header->codecHeader.VP9.tl0_pic_idx != kNoTl0PicIdx) { + _codecSpecificInfo.codecSpecific.VP9.tl0_pic_idx = + header->codecHeader.VP9.tl0_pic_idx; + } + if (header->codecHeader.VP9.temporal_idx != kNoTemporalIdx) { + _codecSpecificInfo.codecSpecific.VP9.temporal_idx = + header->codecHeader.VP9.temporal_idx; + _codecSpecificInfo.codecSpecific.VP9.temporal_up_switch = + header->codecHeader.VP9.temporal_up_switch; + } + if (header->codecHeader.VP9.spatial_idx != kNoSpatialIdx) { + _codecSpecificInfo.codecSpecific.VP9.spatial_idx = + header->codecHeader.VP9.spatial_idx; + _codecSpecificInfo.codecSpecific.VP9.inter_layer_predicted = + header->codecHeader.VP9.inter_layer_predicted; + } + if (header->codecHeader.VP9.gof_idx != kNoGofIdx) { + _codecSpecificInfo.codecSpecific.VP9.gof_idx = + header->codecHeader.VP9.gof_idx; + } + if (header->codecHeader.VP9.ss_data_available) { + _codecSpecificInfo.codecSpecific.VP9.num_spatial_layers = + header->codecHeader.VP9.num_spatial_layers; + _codecSpecificInfo.codecSpecific.VP9 + .spatial_layer_resolution_present = + header->codecHeader.VP9.spatial_layer_resolution_present; + if (header->codecHeader.VP9.spatial_layer_resolution_present) { + for (size_t i = 0; i < header->codecHeader.VP9.num_spatial_layers; + ++i) { + _codecSpecificInfo.codecSpecific.VP9.width[i] = + header->codecHeader.VP9.width[i]; + _codecSpecificInfo.codecSpecific.VP9.height[i] = + header->codecHeader.VP9.height[i]; + } + } + _codecSpecificInfo.codecSpecific.VP9.gof.CopyGofInfoVP9( + header->codecHeader.VP9.gof); } break; } diff --git a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/frame_buffer.cc b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/frame_buffer.cc index 8bd375893d96..edb1995e2306 100644 --- a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/frame_buffer.cc +++ b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/frame_buffer.cc @@ -75,6 +75,15 @@ bool VCMFrameBuffer::NonReference() const { return _sessionInfo.NonReference(); } +void VCMFrameBuffer::SetGofInfo(const GofInfoVP9& gof_info, size_t idx) { + _sessionInfo.SetGofInfo(gof_info, idx); + // TODO(asapersson): Consider adding hdr->VP9.ref_picture_id for testing. + _codecSpecificInfo.codecSpecific.VP9.temporal_idx = + gof_info.temporal_idx[idx]; + _codecSpecificInfo.codecSpecific.VP9.temporal_up_switch = + gof_info.temporal_up_switch[idx]; +} + bool VCMFrameBuffer::IsSessionComplete() const { return _sessionInfo.complete(); diff --git a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/frame_buffer.h b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/frame_buffer.h index d98b02463f23..3af85f31a178 100644 --- a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/frame_buffer.h +++ b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/frame_buffer.h @@ -61,6 +61,8 @@ class VCMFrameBuffer : public VCMEncodedFrame { int Tl0PicId() const; bool NonReference() const; + void SetGofInfo(const GofInfoVP9& gof_info, size_t idx); + // Increments a counter to keep track of the number of packets of this frame // which were NACKed before they arrived. void IncrementNackCount(); diff --git a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/generic_encoder.cc b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/generic_encoder.cc index 32772539fc65..1cebd4a842d6 100644 --- a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/generic_encoder.cc +++ b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/generic_encoder.cc @@ -36,21 +36,49 @@ void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) { rtp->simulcastIdx = info->codecSpecific.VP8.simulcastIdx; return; } + case kVideoCodecVP9: { + rtp->codec = kRtpVideoVp9; + rtp->codecHeader.VP9.InitRTPVideoHeaderVP9(); + rtp->codecHeader.VP9.inter_pic_predicted = + info->codecSpecific.VP9.inter_pic_predicted; + rtp->codecHeader.VP9.flexible_mode = + info->codecSpecific.VP9.flexible_mode; + rtp->codecHeader.VP9.ss_data_available = + info->codecSpecific.VP9.ss_data_available; + rtp->codecHeader.VP9.picture_id = info->codecSpecific.VP9.picture_id; + rtp->codecHeader.VP9.tl0_pic_idx = info->codecSpecific.VP9.tl0_pic_idx; + rtp->codecHeader.VP9.temporal_idx = info->codecSpecific.VP9.temporal_idx; + rtp->codecHeader.VP9.spatial_idx = info->codecSpecific.VP9.spatial_idx; + rtp->codecHeader.VP9.temporal_up_switch = + info->codecSpecific.VP9.temporal_up_switch; + rtp->codecHeader.VP9.inter_layer_predicted = + info->codecSpecific.VP9.inter_layer_predicted; + rtp->codecHeader.VP9.gof_idx = info->codecSpecific.VP9.gof_idx; + rtp->codecHeader.VP9.num_spatial_layers = + info->codecSpecific.VP9.num_spatial_layers; + + if (info->codecSpecific.VP9.ss_data_available) { + rtp->codecHeader.VP9.spatial_layer_resolution_present = + info->codecSpecific.VP9.spatial_layer_resolution_present; + if (info->codecSpecific.VP9.spatial_layer_resolution_present) { + for (size_t i = 0; i < info->codecSpecific.VP9.num_spatial_layers; + ++i) { + rtp->codecHeader.VP9.width[i] = info->codecSpecific.VP9.width[i]; + rtp->codecHeader.VP9.height[i] = info->codecSpecific.VP9.height[i]; + } + } + rtp->codecHeader.VP9.gof.CopyGofInfoVP9(info->codecSpecific.VP9.gof); + } + + rtp->codecHeader.VP9.num_ref_pics = info->codecSpecific.VP9.num_ref_pics; + for (int i = 0; i < info->codecSpecific.VP9.num_ref_pics; ++i) + rtp->codecHeader.VP9.pid_diff[i] = info->codecSpecific.VP9.p_diff[i]; + return; + } case kVideoCodecH264: rtp->codec = kRtpVideoH264; rtp->simulcastIdx = info->codecSpecific.H264.simulcastIdx; return; - case kVideoCodecVP9: - rtp->codec = kRtpVideoVp9; - rtp->codecHeader.VP9.InitRTPVideoHeaderVP9(); - rtp->codecHeader.VP9.pictureId = info->codecSpecific.VP9.pictureId; - rtp->codecHeader.VP9.nonReference = - info->codecSpecific.VP9.nonReference; - rtp->codecHeader.VP9.temporalIdx = info->codecSpecific.VP9.temporalIdx; - rtp->codecHeader.VP9.layerSync = info->codecSpecific.VP9.layerSync; - rtp->codecHeader.VP9.tl0PicIdx = info->codecSpecific.VP9.tl0PicIdx; - rtp->codecHeader.VP9.keyIdx = info->codecSpecific.VP9.keyIdx; - return; case kVideoCodecGeneric: rtp->codec = kRtpVideoGeneric; rtp->simulcastIdx = info->codecSpecific.generic.simulcast_idx; diff --git a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/jitter_buffer.cc b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/jitter_buffer.cc index a643b64eab65..3961334e0a0a 100644 --- a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/jitter_buffer.cc +++ b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/jitter_buffer.cc @@ -14,6 +14,9 @@ #include #include +#include "webrtc/base/checks.h" +#include "webrtc/base/trace_event.h" +#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h" #include "webrtc/modules/video_coding/main/interface/video_coding.h" #include "webrtc/modules/video_coding/main/source/frame_buffer.h" #include "webrtc/modules/video_coding/main/source/inter_frame_delay.h" @@ -26,10 +29,12 @@ #include "webrtc/system_wrappers/interface/event_wrapper.h" #include "webrtc/system_wrappers/interface/logging.h" #include "webrtc/system_wrappers/interface/metrics.h" -#include "webrtc/system_wrappers/interface/trace_event.h" namespace webrtc { +// Interval for updating SS data. +static const uint32_t kSsCleanupIntervalSec = 60; + // Use this rtt if no value has been reported. static const int64_t kDefaultRtt = 200; @@ -146,6 +151,98 @@ void FrameList::Reset(UnorderedFrameList* free_frames) { } } +bool Vp9SsMap::Insert(const VCMPacket& packet) { + if (!packet.codecSpecificHeader.codecHeader.VP9.ss_data_available) + return false; + + ss_map_[packet.timestamp] = packet.codecSpecificHeader.codecHeader.VP9.gof; + return true; +} + +void Vp9SsMap::Reset() { + ss_map_.clear(); +} + +bool Vp9SsMap::Find(uint32_t timestamp, SsMap::iterator* it_out) { + bool found = false; + for (SsMap::iterator it = ss_map_.begin(); it != ss_map_.end(); ++it) { + if (it->first == timestamp || IsNewerTimestamp(timestamp, it->first)) { + *it_out = it; + found = true; + } + } + return found; +} + +void Vp9SsMap::RemoveOld(uint32_t timestamp) { + if (!TimeForCleanup(timestamp)) + return; + + SsMap::iterator it; + if (!Find(timestamp, &it)) + return; + + ss_map_.erase(ss_map_.begin(), it); + AdvanceFront(timestamp); +} + +bool Vp9SsMap::TimeForCleanup(uint32_t timestamp) const { + if (ss_map_.empty() || !IsNewerTimestamp(timestamp, ss_map_.begin()->first)) + return false; + + uint32_t diff = timestamp - ss_map_.begin()->first; + return diff / kVideoPayloadTypeFrequency >= kSsCleanupIntervalSec; +} + +void Vp9SsMap::AdvanceFront(uint32_t timestamp) { + DCHECK(!ss_map_.empty()); + GofInfoVP9 gof = ss_map_.begin()->second; + ss_map_.erase(ss_map_.begin()); + ss_map_[timestamp] = gof; +} + +// TODO(asapersson): Update according to updates in RTP payload profile. +bool Vp9SsMap::UpdatePacket(VCMPacket* packet) { + uint8_t gof_idx = packet->codecSpecificHeader.codecHeader.VP9.gof_idx; + if (gof_idx == kNoGofIdx) + return false; // No update needed. + + SsMap::iterator it; + if (!Find(packet->timestamp, &it)) + return false; // Corresponding SS not yet received. + + if (gof_idx >= it->second.num_frames_in_gof) + return false; // Assume corresponding SS not yet received. + + RTPVideoHeaderVP9* vp9 = &packet->codecSpecificHeader.codecHeader.VP9; + vp9->temporal_idx = it->second.temporal_idx[gof_idx]; + vp9->temporal_up_switch = it->second.temporal_up_switch[gof_idx]; + + // TODO(asapersson): Set vp9.ref_picture_id[i] and add usage. + vp9->num_ref_pics = it->second.num_ref_pics[gof_idx]; + for (uint8_t i = 0; i < it->second.num_ref_pics[gof_idx]; ++i) { + vp9->pid_diff[i] = it->second.pid_diff[gof_idx][i]; + } + return true; +} + +void Vp9SsMap::UpdateFrames(FrameList* frames) { + for (const auto& frame_it : *frames) { + uint8_t gof_idx = + frame_it.second->CodecSpecific()->codecSpecific.VP9.gof_idx; + if (gof_idx == kNoGofIdx) { + continue; + } + SsMap::iterator ss_it; + if (Find(frame_it.second->TimeStamp(), &ss_it)) { + if (gof_idx >= ss_it->second.num_frames_in_gof) { + continue; // Assume corresponding SS not yet received. + } + frame_it.second->SetGofInfo(ss_it->second, gof_idx); + } + } +} + VCMJitterBuffer::VCMJitterBuffer(Clock* clock, EventFactory* event_factory) : clock_(clock), running_(false), @@ -204,7 +301,7 @@ VCMJitterBuffer::~VCMJitterBuffer() { } void VCMJitterBuffer::UpdateHistograms() { - if (num_packets_ <= 0) { + if (num_packets_ <= 0 || !running_) { return; } int64_t elapsed_sec = @@ -624,6 +721,9 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet, last_decoded_state_.UpdateOldPacket(&packet); DropPacketsFromNackList(last_decoded_state_.sequence_num()); + // Also see if this old packet made more incomplete frames continuous. + FindAndInsertContinuousFramesWithState(last_decoded_state_); + if (num_consecutive_old_packets_ > kMaxConsecutiveOldPackets) { LOG(LS_WARNING) << num_consecutive_old_packets_ @@ -800,6 +900,16 @@ void VCMJitterBuffer::FindAndInsertContinuousFrames( VCMDecodingState decoding_state; decoding_state.CopyFrom(last_decoded_state_); decoding_state.SetState(&new_frame); + FindAndInsertContinuousFramesWithState(decoding_state); +} + +void VCMJitterBuffer::FindAndInsertContinuousFramesWithState( + const VCMDecodingState& original_decoded_state) { + // Copy original_decoded_state so we can move the state forward with each + // decodable frame we find. + VCMDecodingState decoding_state; + decoding_state.CopyFrom(original_decoded_state); + // When temporal layers are available, we search for a complete or decodable // frame until we hit one of the following: // 1. Continuous base or sync layer. @@ -807,7 +917,8 @@ void VCMJitterBuffer::FindAndInsertContinuousFrames( for (FrameList::iterator it = incomplete_frames_.begin(); it != incomplete_frames_.end();) { VCMFrameBuffer* frame = it->second; - if (IsNewerTimestamp(new_frame.TimeStamp(), frame->TimeStamp())) { + if (IsNewerTimestamp(original_decoded_state.time_stamp(), + frame->TimeStamp())) { ++it; continue; } @@ -858,7 +969,7 @@ void VCMJitterBuffer::SetNackMode(VCMNackMode mode, low_rtt_nack_threshold_ms_ = low_rtt_nack_threshold_ms; high_rtt_nack_threshold_ms_ = high_rtt_nack_threshold_ms; // Don't set a high start rtt if high_rtt_nack_threshold_ms_ is used, to not - // disable NACK in hybrid mode. + // disable NACK in |kNack| mode. if (rtt_ms_ == kDefaultRtt && high_rtt_nack_threshold_ms_ != -1) { rtt_ms_ = 0; } diff --git a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/jitter_buffer.h b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/jitter_buffer.h index 7d7f024cb1ca..62d5b7690001 100644 --- a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/jitter_buffer.h +++ b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/jitter_buffer.h @@ -75,6 +75,37 @@ class FrameList void Reset(UnorderedFrameList* free_frames); }; +class Vp9SsMap { + public: + typedef std::map SsMap; + bool Insert(const VCMPacket& packet); + void Reset(); + + // Removes SS data that are older than |timestamp|. + // The |timestamp| should be an old timestamp, i.e. packets with older + // timestamps should no longer be inserted. + void RemoveOld(uint32_t timestamp); + + bool UpdatePacket(VCMPacket* packet); + void UpdateFrames(FrameList* frames); + + // Public for testing. + // Returns an iterator to the corresponding SS data for the input |timestamp|. + bool Find(uint32_t timestamp, SsMap::iterator* it); + + private: + // These two functions are called by RemoveOld. + // Checks if it is time to do a clean up (done each kSsCleanupIntervalSec). + bool TimeForCleanup(uint32_t timestamp) const; + + // Advances the oldest SS data to handle timestamp wrap in cases where SS data + // are received very seldom (e.g. only once in beginning, second when + // IsNewerTimestamp is not true). + void AdvanceFront(uint32_t timestamp); + + SsMap ss_map_; +}; + class VCMJitterBuffer { public: VCMJitterBuffer(Clock* clock, @@ -215,6 +246,12 @@ class VCMJitterBuffer { // all decodable frames into account. bool IsContinuous(const VCMFrameBuffer& frame) const EXCLUSIVE_LOCKS_REQUIRED(crit_sect_); + // Looks for frames in |incomplete_frames_| which are continuous in the + // provided |decoded_state|. Starts the search from the timestamp of + // |decoded_state|. + void FindAndInsertContinuousFramesWithState( + const VCMDecodingState& decoded_state) + EXCLUSIVE_LOCKS_REQUIRED(crit_sect_); // Looks for frames in |incomplete_frames_| which are continuous in // |last_decoded_state_| taking all decodable frames into account. Starts // the search from |new_frame|. diff --git a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/session_info.cc b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/session_info.cc index b310af98aa15..fef86a45fbcd 100644 --- a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/session_info.cc +++ b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/session_info.cc @@ -59,31 +59,52 @@ int VCMSessionInfo::HighSequenceNumber() const { } int VCMSessionInfo::PictureId() const { - if (packets_.empty() || - packets_.front().codecSpecificHeader.codec != kRtpVideoVp8) + if (packets_.empty()) return kNoPictureId; - return packets_.front().codecSpecificHeader.codecHeader.VP8.pictureId; + if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp8) { + return packets_.front().codecSpecificHeader.codecHeader.VP8.pictureId; + } else if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp9) { + return packets_.front().codecSpecificHeader.codecHeader.VP9.picture_id; + } else { + return kNoPictureId; + } } int VCMSessionInfo::TemporalId() const { - if (packets_.empty() || - packets_.front().codecSpecificHeader.codec != kRtpVideoVp8) + if (packets_.empty()) return kNoTemporalIdx; - return packets_.front().codecSpecificHeader.codecHeader.VP8.temporalIdx; + if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp8) { + return packets_.front().codecSpecificHeader.codecHeader.VP8.temporalIdx; + } else if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp9) { + return packets_.front().codecSpecificHeader.codecHeader.VP9.temporal_idx; + } else { + return kNoTemporalIdx; + } } bool VCMSessionInfo::LayerSync() const { - if (packets_.empty() || - packets_.front().codecSpecificHeader.codec != kRtpVideoVp8) + if (packets_.empty()) return false; - return packets_.front().codecSpecificHeader.codecHeader.VP8.layerSync; + if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp8) { + return packets_.front().codecSpecificHeader.codecHeader.VP8.layerSync; + } else if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp9) { + return + packets_.front().codecSpecificHeader.codecHeader.VP9.temporal_up_switch; + } else { + return false; + } } int VCMSessionInfo::Tl0PicId() const { - if (packets_.empty() || - packets_.front().codecSpecificHeader.codec != kRtpVideoVp8) + if (packets_.empty()) return kNoTl0PicIdx; - return packets_.front().codecSpecificHeader.codecHeader.VP8.tl0PicIdx; + if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp8) { + return packets_.front().codecSpecificHeader.codecHeader.VP8.tl0PicIdx; + } else if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp9) { + return packets_.front().codecSpecificHeader.codecHeader.VP9.tl0_pic_idx; + } else { + return kNoTl0PicIdx; + } } bool VCMSessionInfo::NonReference() const { @@ -93,6 +114,24 @@ bool VCMSessionInfo::NonReference() const { return packets_.front().codecSpecificHeader.codecHeader.VP8.nonReference; } +void VCMSessionInfo::SetGofInfo(const GofInfoVP9& gof_info, size_t idx) { + if (packets_.empty() || + packets_.front().codecSpecificHeader.codec != kRtpVideoVp9 || + packets_.front().codecSpecificHeader.codecHeader.VP9.flexible_mode) { + return; + } + packets_.front().codecSpecificHeader.codecHeader.VP9.temporal_idx = + gof_info.temporal_idx[idx]; + packets_.front().codecSpecificHeader.codecHeader.VP9.temporal_up_switch = + gof_info.temporal_up_switch[idx]; + packets_.front().codecSpecificHeader.codecHeader.VP9.num_ref_pics = + gof_info.num_ref_pics[idx]; + for (uint8_t i = 0; i < gof_info.num_ref_pics[idx]; ++i) { + packets_.front().codecSpecificHeader.codecHeader.VP9.pid_diff[i] = + gof_info.pid_diff[idx][i]; + } +} + void VCMSessionInfo::Reset() { session_nack_ = false; complete_ = false; diff --git a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/session_info.h b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/session_info.h index 21f6c437e3a7..88071e19d5fa 100644 --- a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/session_info.h +++ b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/session_info.h @@ -88,6 +88,8 @@ class VCMSessionInfo { int Tl0PicId() const; bool NonReference() const; + void SetGofInfo(const GofInfoVP9& gof_info, size_t idx); + // The number of packets discarded because the decoder can't make use of // them. int packets_not_decodable() const; diff --git a/media/webrtc/trunk/webrtc/video_engine/vie_codec_impl.cc b/media/webrtc/trunk/webrtc/video_engine/vie_codec_impl.cc index b4c1b8c9d466..002aa97a0122 100644 --- a/media/webrtc/trunk/webrtc/video_engine/vie_codec_impl.cc +++ b/media/webrtc/trunk/webrtc/video_engine/vie_codec_impl.cc @@ -89,6 +89,7 @@ static void LogCodec(const VideoCodec& codec) { << codec.codecSpecific.H264.ppsLen; } else if (codec.codecType == kVideoCodecVP9) { LOG(LS_INFO) << "VP9 specific settings"; + // XXX FIX!! log VP9 specific settings } } diff --git a/memory/replace/dmd/DMD.cpp b/memory/replace/dmd/DMD.cpp index 7ca969ef3331..05e10494430d 100644 --- a/memory/replace/dmd/DMD.cpp +++ b/memory/replace/dmd/DMD.cpp @@ -12,10 +12,11 @@ #include #include -#ifdef XP_WIN -#if defined(MOZ_OPTIMIZE) && !defined(MOZ_PROFILING) -#error "Optimized, DMD-enabled builds on Windows must be built with --enable-profiling" +#if !defined(MOZ_PROFILING) +#error "DMD requires MOZ_PROFILING" #endif + +#ifdef XP_WIN #include #include #else diff --git a/modules/libpref/init/all.js b/modules/libpref/init/all.js index 76f0386749c2..c6eff239bda5 100644 --- a/modules/libpref/init/all.js +++ b/modules/libpref/init/all.js @@ -382,6 +382,7 @@ pref("media.navigator.video.h264.level", 12); // 0x42E00C - level 1.2 pref("media.navigator.video.h264.max_br", 700); // 8x10 pref("media.navigator.video.h264.max_mbps", 11880); // CIF@30fps pref("media.peerconnection.video.h264_enabled", false); +pref("media.peerconnection.video.vp9_enabled", false); pref("media.getusermedia.aec", 4); // Gonk typically captures at QVGA, and so min resolution is QQVGA or // 160x120; 100Kbps is plenty for that. @@ -4499,6 +4500,8 @@ pref("full-screen-api.pointer-lock.enabled", true); // transition duration of fade-to-black and fade-from-black, unit: ms pref("full-screen-api.transition-duration.enter", "200 200"); pref("full-screen-api.transition-duration.leave", "200 200"); +// timeout for black screen in fullscreen transition, unit: ms +pref("full-screen-api.transition.timeout", 500); // time for the warning box stays on the screen before sliding out, unit: ms pref("full-screen-api.warning.timeout", 3000); // delay for the warning box to show when pointer stays on the top, unit: ms diff --git a/toolkit/components/telemetry/Histograms.json b/toolkit/components/telemetry/Histograms.json index 171060f994fa..a97de6ee0650 100644 --- a/toolkit/components/telemetry/Histograms.json +++ b/toolkit/components/telemetry/Histograms.json @@ -6113,6 +6113,21 @@ "description": "Whether Ogg audio/video encountered are chained or not.", "bug_numbers": [1230295] }, + "VIDEO_MFT_OUTPUT_NULL_SAMPLES": { + "alert_emails": ["cpearce@mozilla.com"], + "expires_in_version": "53", + "kind": "enumerated", + "n_values": 10, + "description": "Does the WMF video decoder return success but null output? 0 = playback successful, 1 = excessive null output but able to decode some frames, 2 = excessive null output and gave up, 3 = null output but recovered, 4 = non-excessive null output without being able to decode frames.", + "bug_numbers": [1176071] + }, + "AUDIO_MFT_OUTPUT_NULL_SAMPLES": { + "alert_emails": ["cpearce@mozilla.com"], + "expires_in_version": "53", + "kind": "count", + "description": "How many times the audio MFT decoder returns success but output nothing.", + "bug_numbers": [1176071] + }, "VIDEO_CAN_CREATE_AAC_DECODER": { "alert_emails": ["cpearce@mozilla.com"], "expires_in_version": "50", diff --git a/xpcom/threads/TaskDispatcher.h b/xpcom/threads/TaskDispatcher.h index 6817b6ba724e..d7005c35bcbe 100644 --- a/xpcom/threads/TaskDispatcher.h +++ b/xpcom/threads/TaskDispatcher.h @@ -69,7 +69,10 @@ public: class AutoTaskDispatcher : public TaskDispatcher { public: - explicit AutoTaskDispatcher(bool aIsTailDispatcher = false) : mIsTailDispatcher(aIsTailDispatcher) {} + explicit AutoTaskDispatcher(bool aIsTailDispatcher = false) + : mIsTailDispatcher(aIsTailDispatcher) + {} + ~AutoTaskDispatcher() { // Given that direct tasks may trigger other code that uses the tail @@ -81,25 +84,33 @@ public: // potentially not true for other hypothetical AutoTaskDispatchers). Feel // free to loosen this restriction to apply only to mIsTailDispatcher if a // use-case requires it. - MOZ_ASSERT(mDirectTasks.empty()); + MOZ_ASSERT(!HaveDirectTasks()); for (size_t i = 0; i < mTaskGroups.Length(); ++i) { DispatchTaskGroup(Move(mTaskGroups[i])); } } + bool HaveDirectTasks() const + { + return mDirectTasks.isSome() && !mDirectTasks->empty(); + } + void DrainDirectTasks() override { - while (!mDirectTasks.empty()) { - nsCOMPtr r = mDirectTasks.front(); - mDirectTasks.pop(); + while (HaveDirectTasks()) { + nsCOMPtr r = mDirectTasks->front(); + mDirectTasks->pop(); r->Run(); } } void AddDirectTask(already_AddRefed aRunnable) override { - mDirectTasks.push(Move(aRunnable)); + if (mDirectTasks.isNothing()) { + mDirectTasks.emplace(); + } + mDirectTasks->push(Move(aRunnable)); } void AddStateChangeTask(AbstractThread* aThread, @@ -124,7 +135,8 @@ public: bool HasTasksFor(AbstractThread* aThread) override { - return !!GetTaskGroup(aThread) || (aThread == AbstractThread::GetCurrent() && !mDirectTasks.empty()); + return !!GetTaskGroup(aThread) || + (aThread == AbstractThread::GetCurrent() && HaveDirectTasks()); } void DispatchTasksFor(AbstractThread* aThread) override @@ -232,8 +244,11 @@ private: thread->Dispatch(r.forget(), failureHandling, reason); } - // Direct tasks. - std::queue> mDirectTasks; + // Direct tasks. We use a Maybe<> because (a) this class is hot, (b) + // mDirectTasks often doesn't get anything put into it, and (c) the + // std::queue implementation in GNU libstdc++ does two largish heap + // allocations when creating a new std::queue. + mozilla::Maybe>> mDirectTasks; // Task groups, organized by thread. nsTArray> mTaskGroups; diff --git a/xpcom/threads/nsEventQueue.cpp b/xpcom/threads/nsEventQueue.cpp index 08f7bee3f945..c1811335882d 100644 --- a/xpcom/threads/nsEventQueue.cpp +++ b/xpcom/threads/nsEventQueue.cpp @@ -32,7 +32,8 @@ nsEventQueue::~nsEventQueue() { // It'd be nice to be able to assert that no one else is holding the lock, // but NSPR doesn't really expose APIs for it. - MOZ_ASSERT(IsEmpty()); + NS_ASSERTION(IsEmpty(), + "Non-empty event queue being destroyed; events being leaked."); if (mHead) { FreePage(mHead); diff --git a/xpcom/threads/nsIEventTarget.idl b/xpcom/threads/nsIEventTarget.idl index c3e4d3c3299a..db71ac7bf691 100644 --- a/xpcom/threads/nsIEventTarget.idl +++ b/xpcom/threads/nsIEventTarget.idl @@ -56,9 +56,7 @@ interface nsIEventTarget : nsISupports * * @param event * The alreadyAddRefed<> event to dispatch. - * NOTE that the event will be leaked if it fails to dispatch. Also note - * that if "flags" includes DISPATCH_SYNC, it may return error from Run() - * after a successful dispatch. In that case, the event is not leaked. + * NOTE that the event will be leaked if it fails to dispatch. * @param flags * The flags modifying event dispatch. The flags are described in detail * below. diff --git a/xpcom/threads/nsThread.cpp b/xpcom/threads/nsThread.cpp index f2801d97b2d3..1df052b7f6e1 100644 --- a/xpcom/threads/nsThread.cpp +++ b/xpcom/threads/nsThread.cpp @@ -643,9 +643,7 @@ nsThread::DispatchInternal(already_AddRefed&& aEvent, uint32_t aFla while (wrapper->IsPending()) { NS_ProcessNextEvent(thread, true); } - // NOTE that, unlike the behavior above, the event is not leaked by - // this place, while it is possible that the result is an error. - return wrapper->Result(); + return NS_OK; } NS_ASSERTION(aFlags == NS_DISPATCH_NORMAL, "unexpected dispatch flags"); diff --git a/xpcom/threads/nsThreadSyncDispatch.h b/xpcom/threads/nsThreadSyncDispatch.h index 4462a38101d2..5e010f6d4009 100644 --- a/xpcom/threads/nsThreadSyncDispatch.h +++ b/xpcom/threads/nsThreadSyncDispatch.h @@ -9,6 +9,7 @@ #include "nsThreadUtils.h" #include "LeakRefPtr.h" +#include "mozilla/DebugOnly.h" class nsThreadSyncDispatch : public nsRunnable { @@ -16,7 +17,6 @@ public: nsThreadSyncDispatch(nsIThread* aOrigin, already_AddRefed&& aTask) : mOrigin(aOrigin) , mSyncTask(mozilla::Move(aTask)) - , mResult(NS_ERROR_NOT_INITIALIZED) { } @@ -25,16 +25,13 @@ public: return !!mSyncTask; } - nsresult Result() - { - return mResult; - } - private: NS_IMETHOD Run() override { if (nsIRunnable* task = mSyncTask.get()) { - mResult = task->Run(); + mozilla::DebugOnly result = task->Run(); + MOZ_ASSERT(NS_SUCCEEDED(result), + "task in sync dispatch should not fail"); // We must release the task here to ensure that when the original // thread is unblocked, this task has been released. mSyncTask.release(); @@ -48,7 +45,6 @@ private: // The task is leaked by default when Run() is not called, because // otherwise we may release it in an incorrect thread. mozilla::LeakRefPtr mSyncTask; - nsresult mResult; }; #endif // nsThreadSyncDispatch_h_