Bug 1553215 - Implement the correct behaviour for MediaStreamAudioSource. r=karlt,pehrsons

It should pick and lock to the right track, regardless of if it's still in the
MediaStream.

The test fix is because we don't expose tracks until the HTMLMediaElement has
loaded loading I think.

This alignes with a couple spec changes:
- https://github.com/WebAudio/web-audio-api/issues/264
- https://github.com/WebAudio/web-audio-api/pull/1829/files

and also throws when no valid track are found in the MediaStream, either because
it's all video tracks or because it has no tracks.

Differential Revision: https://phabricator.services.mozilla.com/D32176

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Paul Adenot 2019-06-10 15:33:12 +00:00
Родитель c2b50727cc
Коммит 2e2b6d25cb
4 изменённых файлов: 98 добавлений и 49 удалений

Просмотреть файл

@ -14,7 +14,8 @@ namespace mozilla {
namespace dom {
MediaElementAudioSourceNode::MediaElementAudioSourceNode(AudioContext* aContext)
: MediaStreamAudioSourceNode(aContext) {}
: MediaStreamAudioSourceNode(aContext, TrackChangeBehavior::FollowChanges) {
}
/* static */
already_AddRefed<MediaElementAudioSourceNode>

Просмотреть файл

@ -13,6 +13,7 @@
#include "mozilla/CORSMode.h"
#include "nsContentUtils.h"
#include "nsIScriptError.h"
#include "nsID.h"
namespace mozilla {
namespace dom {
@ -37,9 +38,11 @@ NS_INTERFACE_MAP_END_INHERITING(AudioNode)
NS_IMPL_ADDREF_INHERITED(MediaStreamAudioSourceNode, AudioNode)
NS_IMPL_RELEASE_INHERITED(MediaStreamAudioSourceNode, AudioNode)
MediaStreamAudioSourceNode::MediaStreamAudioSourceNode(AudioContext* aContext)
MediaStreamAudioSourceNode::MediaStreamAudioSourceNode(
AudioContext* aContext, TrackChangeBehavior aBehavior)
: AudioNode(aContext, 2, ChannelCountMode::Max,
ChannelInterpretation::Speakers) {}
ChannelInterpretation::Speakers),
mBehavior(aBehavior) {}
/* static */
already_AddRefed<MediaStreamAudioSourceNode> MediaStreamAudioSourceNode::Create(
@ -63,7 +66,7 @@ already_AddRefed<MediaStreamAudioSourceNode> MediaStreamAudioSourceNode::Create(
}
RefPtr<MediaStreamAudioSourceNode> node =
new MediaStreamAudioSourceNode(&aAudioContext);
new MediaStreamAudioSourceNode(&aAudioContext, LockOnTrackPicked);
node->Init(aOptions.mMediaStream, aRv);
if (aRv.Failed()) {
@ -96,7 +99,7 @@ void MediaStreamAudioSourceNode::Init(DOMMediaStream* aMediaStream,
if (mInputStream->Active()) {
NotifyActive();
}
AttachToFirstTrack(mInputStream);
AttachToRightTrack(mInputStream, aRv);
}
void MediaStreamAudioSourceNode::Destroy() {
@ -137,14 +140,35 @@ void MediaStreamAudioSourceNode::DetachFromTrack() {
}
}
void MediaStreamAudioSourceNode::AttachToFirstTrack(
const RefPtr<DOMMediaStream>& aMediaStream) {
static int AudioTrackCompare(const RefPtr<AudioStreamTrack>& aLhs,
const RefPtr<AudioStreamTrack>& aRhs) {
nsAutoStringN<NSID_LENGTH> IDLhs;
nsAutoStringN<NSID_LENGTH> IDRhs;
aLhs->GetId(IDLhs);
aRhs->GetId(IDRhs);
return NS_ConvertUTF16toUTF8(IDLhs).Compare(
NS_ConvertUTF16toUTF8(IDRhs).get());
}
void MediaStreamAudioSourceNode::AttachToRightTrack(
const RefPtr<DOMMediaStream>& aMediaStream, ErrorResult& aRv) {
nsTArray<RefPtr<AudioStreamTrack>> tracks;
aMediaStream->GetAudioTracks(tracks);
if (tracks.IsEmpty() && mBehavior == LockOnTrackPicked) {
aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
return;
}
// Sort the track to have a stable order, on their ID by lexicographic
// ordering on sequences of code unit values.
tracks.Sort(AudioTrackCompare);
for (const RefPtr<AudioStreamTrack>& track : tracks) {
if (track->Ended()) {
continue;
if (mBehavior == FollowChanges) {
if (track->Ended()) {
continue;
}
}
AttachToTrack(track);
@ -158,6 +182,9 @@ void MediaStreamAudioSourceNode::AttachToFirstTrack(
void MediaStreamAudioSourceNode::NotifyTrackAdded(
const RefPtr<MediaStreamTrack>& aTrack) {
if (mBehavior != FollowChanges) {
return;
}
if (mInputTrack) {
return;
}
@ -171,12 +198,14 @@ void MediaStreamAudioSourceNode::NotifyTrackAdded(
void MediaStreamAudioSourceNode::NotifyTrackRemoved(
const RefPtr<MediaStreamTrack>& aTrack) {
if (aTrack != mInputTrack) {
return;
}
if (mBehavior == FollowChanges) {
if (aTrack != mInputTrack) {
return;
}
DetachFromTrack();
AttachToFirstTrack(mInputStream);
DetachFromTrack();
AttachToRightTrack(mInputStream, IgnoreErrors());
}
}
void MediaStreamAudioSourceNode::NotifyActive() {

Просмотреть файл

@ -80,8 +80,10 @@ class MediaStreamAudioSourceNode
// Detaches from the currently attached track if there is one.
void DetachFromTrack();
// Attaches to the first available audio track in aMediaStream.
void AttachToFirstTrack(const RefPtr<DOMMediaStream>& aMediaStream);
// Attaches to the first audio track in the MediaStream, when the tracks are
// ordered by id.
void AttachToRightTrack(const RefPtr<DOMMediaStream>& aMediaStream,
ErrorResult& aRv);
// From DOMMediaStream::TrackListener.
void NotifyTrackAdded(const RefPtr<MediaStreamTrack>& aTrack) override;
@ -91,13 +93,27 @@ class MediaStreamAudioSourceNode
// From PrincipalChangeObserver<MediaStreamTrack>.
void PrincipalChanged(MediaStreamTrack* aMediaStreamTrack) override;
// This allows implementing the correct behaviour for both
// MediaElementAudioSourceNode and MediaStreamAudioSourceNode, that have most
// of their behaviour shared.
enum TrackChangeBehavior {
// MediaStreamAudioSourceNode locks on the track it picked, and never
// changes.
LockOnTrackPicked,
// MediaElementAudioSourceNode can change track, depending on what the
// HTMLMediaElement does.
FollowChanges
};
protected:
explicit MediaStreamAudioSourceNode(AudioContext* aContext);
MediaStreamAudioSourceNode(AudioContext* aContext,
TrackChangeBehavior aBehavior);
void Init(DOMMediaStream* aMediaStream, ErrorResult& aRv);
virtual void Destroy();
virtual ~MediaStreamAudioSourceNode();
private:
const TrackChangeBehavior mBehavior;
RefPtr<MediaInputPort> mInputPort;
RefPtr<DOMMediaStream> mInputStream;

Просмотреть файл

@ -12,45 +12,48 @@
SimpleTest.waitForExplicitFinish();
var audio = new Audio("http://example.org:80/tests/dom/media/webaudio/test/small-shot.ogg");
audio.load();
var context = new AudioContext();
var node = context.createMediaStreamSource(audio.mozCaptureStreamUntilEnded());
var sp = context.createScriptProcessor(2048, 1);
node.connect(sp);
var nonzeroSampleCount = 0;
var complete = false;
var iterationCount = 0;
audio.onloadedmetadata = function() {
var node = context.createMediaStreamSource(audio.mozCaptureStreamUntilEnded());
var sp = context.createScriptProcessor(2048, 1);
node.connect(sp);
var nonzeroSampleCount = 0;
var complete = false;
var iterationCount = 0;
// This test ensures we receive at least expectedSampleCount nonzero samples
function processSamples(e) {
if (complete) {
return;
}
// This test ensures we receive at least expectedSampleCount nonzero samples
function processSamples(e) {
if (complete) {
return;
}
if (iterationCount == 0) {
// Don't start playing the audio until the AudioContext stuff is connected
// and running.
audio.play();
}
++iterationCount;
if (iterationCount == 0) {
// Don't start playing the audio until the AudioContext stuff is connected
// and running.
audio.play();
}
++iterationCount;
var buf = e.inputBuffer.getChannelData(0);
var nonzeroSamplesThisBuffer = 0;
for (var i = 0; i < buf.length; ++i) {
if (buf[i] != 0) {
++nonzeroSamplesThisBuffer;
var buf = e.inputBuffer.getChannelData(0);
var nonzeroSamplesThisBuffer = 0;
for (var i = 0; i < buf.length; ++i) {
if (buf[i] != 0) {
++nonzeroSamplesThisBuffer;
}
}
is(nonzeroSamplesThisBuffer, 0,
"Checking all samples are zero");
if (iterationCount >= 20) {
SimpleTest.finish();
complete = true;
}
}
is(nonzeroSamplesThisBuffer, 0,
"Checking all samples are zero");
if (iterationCount >= 20) {
SimpleTest.finish();
complete = true;
}
}
audio.oncanplaythrough = function() {
sp.onaudioprocess = processSamples;
};
audio.oncanplaythrough = function() {
sp.onaudioprocess = processSamples;
};
}
</script>
</pre>
</body>