diff --git a/dom/media/AudibilityMonitor.h b/dom/media/AudibilityMonitor.h index e5be7bda491e..d869d1c7601d 100644 --- a/dom/media/AudibilityMonitor.h +++ b/dom/media/AudibilityMonitor.h @@ -11,7 +11,6 @@ #include "AudioSampleFormat.h" #include "WebAudioUtils.h" -#include "AudioBlock.h" namespace mozilla { @@ -26,38 +25,10 @@ class AudibilityMonitor { mSilentFramesInARow(0), mEverAudible(false) {} - void Process(const AudioData* aData) { + void ProcessAudioData(const AudioData* aData) { ProcessInterleaved(aData->Data(), aData->mChannels); } - void Process(const AudioBlock& aData) { - if (aData.IsNull() || aData.IsMuted()) { - mSilentFramesInARow += aData.GetDuration(); - return; - } - ProcessPlanar(aData.ChannelData(), aData.GetDuration()); - } - - void ProcessPlanar(const nsTArray& aPlanar, - TrackTime aFrames) { - uint32_t lastFrameAudibleAccrossChannels = 0; - for (uint32_t channel = 0; channel < aPlanar.Length(); channel++) { - uint32_t lastSampleAudible = 0; - for (uint32_t frame = 0; frame < aFrames; frame++) { - float dbfs = dom::WebAudioUtils::ConvertLinearToDecibels( - abs(AudioSampleToFloat(aPlanar[channel][frame])), -100.f); - if (dbfs > AUDIBILITY_THREHSOLD) { - mEverAudible = true; - mSilentFramesInARow = 0; - lastSampleAudible = frame; - } - } - lastFrameAudibleAccrossChannels = - std::max(lastFrameAudibleAccrossChannels, lastSampleAudible); - } - mSilentFramesInARow += aFrames - lastFrameAudibleAccrossChannels - 1; - } - void ProcessInterleaved(const Span& aInterleaved, size_t aChannels) { MOZ_ASSERT(aInterleaved.Length() % aChannels == 0); diff --git a/dom/media/AudioSegment.h b/dom/media/AudioSegment.h index 6878509939c1..1d8b4920d574 100644 --- a/dom/media/AudioSegment.h +++ b/dom/media/AudioSegment.h @@ -208,6 +208,23 @@ struct AudioChunk { bool IsMuted() const { return mVolume == 0.0f; } + bool IsAudible() const { + for (auto&& channel : mChannelData) { + // Transform sound into dB RMS and assume that the value smaller than -100 + // is inaudible. + float dbrms = 0.0; + for (uint32_t idx = 0; idx < mDuration; idx++) { + dbrms += std::pow(static_cast(channel)[idx], 2); + } + dbrms /= mDuration; + dbrms = std::sqrt(dbrms) != 0.0 ? 20 * log10(dbrms) : -1000.0; + if (dbrms > -100.0) { + return true; + } + } + return false; + } + size_t SizeOfExcludingThisIfUnshared(MallocSizeOf aMallocSizeOf) const { return SizeOfExcludingThis(aMallocSizeOf, true); } diff --git a/dom/media/mediasink/AudioSink.cpp b/dom/media/mediasink/AudioSink.cpp index 0fc520b6d109..7e6d8c51b4b7 100644 --- a/dom/media/mediasink/AudioSink.cpp +++ b/dom/media/mediasink/AudioSink.cpp @@ -320,7 +320,7 @@ void AudioSink::Errored() { void AudioSink::CheckIsAudible(const AudioData* aData) { MOZ_ASSERT(aData); - mAudibilityMonitor.Process(aData); + mAudibilityMonitor.ProcessAudioData(aData); bool isAudible = mAudibilityMonitor.RecentlyAudible(); if (isAudible != mIsAudioDataAudible) { diff --git a/dom/media/mediasink/DecodedStream.cpp b/dom/media/mediasink/DecodedStream.cpp index 4fd91e18374e..fafa5f457179 100644 --- a/dom/media/mediasink/DecodedStream.cpp +++ b/dom/media/mediasink/DecodedStream.cpp @@ -679,7 +679,7 @@ void DecodedStream::SendAudio(double aVolume, void DecodedStream::CheckIsDataAudible(const AudioData* aData) { MOZ_ASSERT(aData); - mAudibilityMonitor->Process(aData); + mAudibilityMonitor->ProcessAudioData(aData); bool isAudible = mAudibilityMonitor->RecentlyAudible(); if (isAudible != mIsAudioDataAudible) { diff --git a/dom/media/webaudio/AudioBlock.h b/dom/media/webaudio/AudioBlock.h index b5178c9d8a37..abf55ae08e16 100644 --- a/dom/media/webaudio/AudioBlock.h +++ b/dom/media/webaudio/AudioBlock.h @@ -38,6 +38,7 @@ class AudioBlock : private AudioChunk { using AudioChunk::ChannelCount; using AudioChunk::ChannelData; using AudioChunk::GetDuration; + using AudioChunk::IsAudible; using AudioChunk::IsNull; using AudioChunk::SizeOfExcludingThis; using AudioChunk::SizeOfExcludingThisIfUnshared; diff --git a/dom/media/webaudio/AudioDestinationNode.cpp b/dom/media/webaudio/AudioDestinationNode.cpp index 04da4a09a7c8..acc36f0213b1 100644 --- a/dom/media/webaudio/AudioDestinationNode.cpp +++ b/dom/media/webaudio/AudioDestinationNode.cpp @@ -5,28 +5,25 @@ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "AudioDestinationNode.h" - +#include "AudioContext.h" #include "AlignmentUtils.h" -#include "AudibilityMonitor.h" -#include "AudioChannelService.h" #include "AudioContext.h" -#include "AudioContext.h" -#include "AudioNodeEngine.h" -#include "AudioNodeTrack.h" #include "CubebUtils.h" -#include "MediaTrackGraph.h" -#include "mozilla/StaticPrefs_dom.h" #include "mozilla/dom/AudioDestinationNodeBinding.h" #include "mozilla/dom/BaseAudioContextBinding.h" #include "mozilla/dom/OfflineAudioCompletionEvent.h" -#include "mozilla/dom/Promise.h" +#include "mozilla/dom/power/PowerManagerService.h" #include "mozilla/dom/ScriptSettings.h" #include "mozilla/dom/WakeLock.h" -#include "mozilla/dom/power/PowerManagerService.h" +#include "AudioChannelService.h" +#include "AudioNodeEngine.h" +#include "AudioNodeTrack.h" +#include "MediaTrackGraph.h" #include "nsContentUtils.h" #include "nsIInterfaceRequestorUtils.h" #include "nsIScriptObjectPrincipal.h" #include "nsServiceManagerUtils.h" +#include "mozilla/dom/Promise.h" extern mozilla::LazyLogModule gAudioChannelLog; @@ -190,13 +187,10 @@ class DestinationNodeEngine final : public AudioNodeEngine { public: explicit DestinationNodeEngine(AudioDestinationNode* aNode) : AudioNodeEngine(aNode), - mSampleRate(CubebUtils::PreferredSampleRate()), mVolume(1.0f), - mAudibilityMonitor( - mSampleRate, - StaticPrefs::dom_media_silence_duration_for_audibility()), + mLastInputAudible(false), mSuspended(false), - mLastInputAudible(false) { + mSampleRate(CubebUtils::PreferredSampleRate()) { MOZ_ASSERT(aNode); } @@ -210,10 +204,32 @@ class DestinationNodeEngine final : public AudioNodeEngine { return; } - mAudibilityMonitor.Process(aInput); - bool isInputAudible = mAudibilityMonitor.RecentlyAudible(); + bool isInputAudible = + !aInput.IsNull() && !aInput.IsMuted() && aInput.IsAudible(); - if (isInputAudible != mLastInputAudible) { + auto shouldNotifyChanged = [&]() { + // We don't want to notify state changed frequently if the input track is + // consist of interleaving audible and inaudible blocks. This situation is + // really common, especially when user is using OscillatorNode to produce + // sound. Sending unnessary runnable frequently would cause performance + // debasing. If the track contains 10 interleaving samples and 5 of them + // are audible, others are inaudible, user would tend to feel the track + // is audible. Therefore, we have the loose checking when track is + // changing from inaudible to audible, but have strict checking when + // streaming is changing from audible to inaudible. If the inaudible + // blocks continue over a speicific time threshold, then we will treat the + // track as inaudible. + if (isInputAudible && !mLastInputAudible) { + return true; + } + // Use more strict condition, choosing 1 seconds as a threshold. + if (!isInputAudible && mLastInputAudible && + aFrom - mLastInputAudibleTime >= mSampleRate) { + return true; + } + return false; + }; + if (shouldNotifyChanged()) { mLastInputAudible = isInputAudible; RefPtr track = aTrack; auto r = [track, isInputAudible]() -> void { @@ -229,6 +245,10 @@ class DestinationNodeEngine final : public AudioNodeEngine { aTrack->Graph()->DispatchToMainThreadStableState(NS_NewRunnableFunction( "dom::WebAudioAudibleStateChangedRunnable", r)); } + + if (isInputAudible) { + mLastInputAudibleTime = aFrom; + } } bool IsActive() const override { @@ -242,7 +262,7 @@ class DestinationNodeEngine final : public AudioNodeEngine { void SetDoubleParameter(uint32_t aIndex, double aParam) override { if (aIndex == VOLUME) { - mVolume = static_cast(aParam); + mVolume = aParam; } } @@ -265,11 +285,11 @@ class DestinationNodeEngine final : public AudioNodeEngine { } private: - int mSampleRate; float mVolume; - AudibilityMonitor mAudibilityMonitor; - bool mSuspended; bool mLastInputAudible; + GraphTime mLastInputAudibleTime = 0; + bool mSuspended; + int mSampleRate; }; NS_IMPL_CYCLE_COLLECTION_INHERITED(AudioDestinationNode, AudioNode, @@ -493,7 +513,7 @@ AudioDestinationNode::WindowVolumeChanged(float aVolume, bool aMuted) { "aVolume = %f, aMuted = %s\n", this, aVolume, aMuted ? "true" : "false"); - float volume = aMuted ? 0.0f : aVolume; + float volume = aMuted ? 0.0 : aVolume; mTrack->SetAudioOutputVolume(nullptr, volume); AudioChannelService::AudibleState audible = @@ -639,7 +659,6 @@ void AudioDestinationNode::NotifyAudibleStateChanged(bool aAudible) { if (IsCapturingAudio()) { StopAudioCapturingTrack(); } - ReleaseAudioWakeLockIfExists(); return; } diff --git a/toolkit/content/tests/browser/browser_media_wakelock.js b/toolkit/content/tests/browser/browser_media_wakelock.js index f81b75270563..bdd32899f78d 100644 --- a/toolkit/content/tests/browser/browser_media_wakelock.js +++ b/toolkit/content/tests/browser/browser_media_wakelock.js @@ -36,12 +36,6 @@ function getWakeLockState(topic, needLock, isTabInForeground) { return { check: async () => { if (needLock) { - const lockState = powerManager.getWakeLockState(topic); - info(`topic=${topic}, state=${lockState}`); - if (lockState == `locked-${tabState}`) { - ok(true, `requested '${topic}' wakelock in ${tabState}`); - return; - } await promise; ok(true, `requested '${topic}' wakelock in ${tabState}`); } else { @@ -74,63 +68,24 @@ async function waitUntilVideoStarted({ muted, volume } = {}) { ); } -async function initializeWebAudio({ suspend } = {}) { - if (suspend) { - await content.ac.suspend(); - } else { - const ac = content.ac; - if (ac.state == "running") { - return; - } - while (ac.state != "running") { - await new Promise(r => (ac.onstatechange = r)); - } - } -} - -function webaudioDocument() { - content.ac = new content.AudioContext(); - const ac = content.ac; - const dest = ac.destination; - const source = new content.OscillatorNode(ac); - source.start(ac.currentTime); - source.connect(dest); -} - async function test_media_wakelock({ description, - urlOrFunction, - additionalParams, + url, + videoAttsParams, lockAudio, lockVideo, }) { info(`- start a new test for '${description}' -`); info(`- open new foreground tab -`); - var url; - if (typeof urlOrFunction == "string") { - url = LOCATION + urlOrFunction; - } else { - url = "about:blank"; - } + url = LOCATION + url; const tab = await BrowserTestUtils.openNewForegroundTab(window.gBrowser, url); const browser = tab.linkedBrowser; - if (typeof urlOrFunction == "function") { - await SpecialPowers.spawn(browser, [], urlOrFunction); - } - let audioWakeLock = getWakeLockState("audio-playing", lockAudio, true); let videoWakeLock = getWakeLockState("video-playing", lockVideo, true); - var initFunction = null; - if (description.includes("web audio")) { - initFunction = initializeWebAudio; - } else { - initFunction = waitUntilVideoStarted; - } - info(`- wait for media starting playing -`); - await SpecialPowers.spawn(browser, [additionalParams], initFunction); + await SpecialPowers.spawn(browser, [videoAttsParams], waitUntilVideoStarted); await audioWakeLock.check(); await videoWakeLock.check(); @@ -159,14 +114,14 @@ async function test_media_wakelock({ add_task(async function start_tests() { await test_media_wakelock({ description: "playing video", - urlOrFunction: "file_video.html", + url: "file_video.html", lockAudio: true, lockVideo: true, }); await test_media_wakelock({ description: "playing muted video", - urlOrFunction: "file_video.html", - additionalParams: { + url: "file_video.html", + videoAttsParams: { muted: true, }, lockAudio: false, @@ -174,8 +129,8 @@ add_task(async function start_tests() { }); await test_media_wakelock({ description: "playing volume=0 video", - urlOrFunction: "file_video.html", - additionalParams: { + url: "file_video.html", + videoAttsParams: { volume: 0.0, }, lockAudio: false, @@ -183,99 +138,32 @@ add_task(async function start_tests() { }); await test_media_wakelock({ description: "playing video without audio in it", - urlOrFunction: "file_videoWithoutAudioTrack.html", + url: "file_videoWithoutAudioTrack.html", lockAudio: false, lockVideo: false, }); await test_media_wakelock({ description: "playing audio in video element", - urlOrFunction: "file_videoWithAudioOnly.html", + url: "file_videoWithAudioOnly.html", lockAudio: true, lockVideo: false, }); await test_media_wakelock({ description: "playing audio in audio element", - urlOrFunction: "file_mediaPlayback2.html", + url: "file_mediaPlayback2.html", lockAudio: true, lockVideo: false, }); await test_media_wakelock({ description: "playing video from media stream with audio and video tracks", - urlOrFunction: "browser_mediaStreamPlayback.html", + url: "browser_mediaStreamPlayback.html", lockAudio: true, lockVideo: true, }); await test_media_wakelock({ description: "playing video from media stream without audio track", - urlOrFunction: "browser_mediaStreamPlaybackWithoutAudio.html", + url: "browser_mediaStreamPlaybackWithoutAudio.html", lockAudio: true, lockVideo: true, }); - await test_media_wakelock({ - description: "playing audible web audio", - urlOrFunction: webaudioDocument, - lockAudio: true, - lockVideo: false, - }); - await test_media_wakelock({ - description: "suspended web audio", - urlOrFunction: webaudioDocument, - additionalParams: { - suspend: true, - }, - lockAudio: false, - lockVideo: false, - }); }); - -async function waitUntilAudioContextStarts() { - const ac = content.ac; - if (ac.state == "running") { - return; - } - - while (ac.state != "running") { - await new Promise(r => (ac.onstatechange = r)); - } -} - -add_task( - async function testBrieflyAudibleAudioContextReleasesAudioWakeLockWhenInaudible() { - const tab = await BrowserTestUtils.openNewForegroundTab( - window.gBrowser, - "about:blank" - ); - - const browser = tab.linkedBrowser; - - let audioWakeLock = getWakeLockState("audio-playing", true, true); - let videoWakeLock = getWakeLockState("video-playing", false, true); - - // Make a short noise - await SpecialPowers.spawn(browser, [], () => { - content.ac = new content.AudioContext(); - const ac = content.ac; - const dest = ac.destination; - const source = new content.OscillatorNode(ac); - source.start(ac.currentTime); - source.stop(ac.currentTime + 0.1); - source.connect(dest); - }); - - await SpecialPowers.spawn(browser, [], waitUntilAudioContextStarts); - info("AudioContext is running."); - - await audioWakeLock.check(); - await videoWakeLock.check(); - - await waitForTabPlayingEvent(tab, false); - - audioWakeLock = getWakeLockState("audio-playing", false, true); - videoWakeLock = getWakeLockState("video-playing", false, true); - - await audioWakeLock.check(); - await videoWakeLock.check(); - - await BrowserTestUtils.removeTab(tab); - } -); diff --git a/toolkit/content/tests/browser/browser_webAudio_silentData.js b/toolkit/content/tests/browser/browser_webAudio_silentData.js index 1229aafda251..9831ace920ce 100644 --- a/toolkit/content/tests/browser/browser_webAudio_silentData.js +++ b/toolkit/content/tests/browser/browser_webAudio_silentData.js @@ -35,10 +35,14 @@ add_task(async function testSilentAudioContext() { content.ac = new content.AudioContext(); const ac = content.ac; const dest = ac.destination; - const source = new content.OscillatorNode(content.ac); - const gain = new content.GainNode(content.ac); - gain.gain.value = 0.0; - source.connect(gain).connect(dest); + const source = ac.createBufferSource(); + const buf = ac.createBuffer(1, 3 * ac.sampleRate, ac.sampleRate); + const bufData = Cu.cloneInto(buf.getChannelData(0), {}); + for (let idx = 0; idx < buf.length; idx++) { + bufData[idx] = 0.0; + } + source.buffer = buf; + source.connect(dest); source.start(); }); info(`- check AudioContext's state -`); diff --git a/toolkit/content/tests/browser/file_webAudio.html b/toolkit/content/tests/browser/file_webAudio.html index f6fb5e7c073d..fdb1ecda9292 100644 --- a/toolkit/content/tests/browser/file_webAudio.html +++ b/toolkit/content/tests/browser/file_webAudio.html @@ -3,20 +3,12 @@ - -

-
-
 
+
+
+
 
diff --git a/toolkit/content/tests/browser/head.js b/toolkit/content/tests/browser/head.js
index 75be2554dd62..b14974fa04c1 100644
--- a/toolkit/content/tests/browser/head.js
+++ b/toolkit/content/tests/browser/head.js
@@ -64,7 +64,7 @@ async function waitForTabPlayingEvent(tab, expectPlaying) {
   if (tab.soundPlaying == expectPlaying) {
     ok(true, "The tab should " + (expectPlaying ? "" : "not ") + "be playing");
   } else {
-    info("Playing state doesn't match, wait for attributes changes.");
+    info("Playing state doens't match, wait for attributes changes.");
     await BrowserTestUtils.waitForEvent(
       tab,
       "TabAttrModified",