зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1324548 - Tests for MediaStreamTrackAudioSourceNode. r=pehrsons
Differential Revision: https://phabricator.services.mozilla.com/D16066 --HG-- extra : moz-landing-system : lando
This commit is contained in:
Родитель
6bc73bc6a2
Коммит
039c642c8e
|
@ -196,6 +196,9 @@ tags=capturestream
|
|||
[test_mediaStreamAudioSourceNodePassThrough.html]
|
||||
[test_mediaStreamAudioSourceNodeResampling.html]
|
||||
tags=capturestream
|
||||
[test_mediaStreamTrackAudioSourceNode.html]
|
||||
[test_mediaStreamTrackAudioSourceNodeVideo.html]
|
||||
[test_mediaStreamTrackAudioSourceNodeCrossOrigin.html]
|
||||
[test_mixingRules.html]
|
||||
skip-if = toolkit == 'android' # bug 1091965
|
||||
[test_nodeToParamConnection.html]
|
||||
|
|
|
@ -14,8 +14,8 @@ SimpleTest.requestFlakyTimeout("gUM and WebAudio data is async to main thread. "
|
|||
"We need a timeout to see that something does " +
|
||||
"NOT happen to data.");
|
||||
|
||||
var context = new AudioContext();
|
||||
var analyser = context.createAnalyser();
|
||||
let context = new AudioContext();
|
||||
let analyser = context.createAnalyser();
|
||||
|
||||
function wait(millis, resolveWithThis) {
|
||||
return new Promise(resolve => setTimeout(() => resolve(resolveWithThis), millis));
|
||||
|
@ -26,15 +26,15 @@ function binIndexForFrequency(frequency) {
|
|||
}
|
||||
|
||||
function waitForAudio(analysisFunction, cancelPromise) {
|
||||
var data = new Uint8Array(analyser.frequencyBinCount);
|
||||
var cancelled = false;
|
||||
var cancelledMsg = "";
|
||||
let data = new Uint8Array(analyser.frequencyBinCount);
|
||||
let cancelled = false;
|
||||
let cancelledMsg = "";
|
||||
cancelPromise.then(msg => {
|
||||
cancelled = true;
|
||||
cancelledMsg = msg;
|
||||
});
|
||||
return new Promise((resolve, reject) => {
|
||||
var loop = () => {
|
||||
let loop = () => {
|
||||
analyser.getByteFrequencyData(data);
|
||||
if (cancelled) {
|
||||
reject(new Error("waitForAudio cancelled: " + cancelledMsg));
|
||||
|
@ -50,20 +50,8 @@ function waitForAudio(analysisFunction, cancelPromise) {
|
|||
});
|
||||
}
|
||||
|
||||
SpecialPowers.pushPrefEnv({
|
||||
set: [
|
||||
// This test expects the fake audio device, specifically for the tones
|
||||
// it outputs. Explicitly disable the audio loopback device and enable
|
||||
// fake streams.
|
||||
['media.audio_loopback_dev', ''],
|
||||
['media.navigator.streams.fake', true]
|
||||
]
|
||||
}).then(async () => {
|
||||
async function test(sourceNode) {
|
||||
try {
|
||||
let stream = await navigator.mediaDevices.getUserMedia({audio: true});
|
||||
stream.onended = () => ended = true;
|
||||
let source = context.createMediaStreamSource(stream);
|
||||
source.connect(analyser);
|
||||
await analyser.connect(context.destination);
|
||||
|
||||
ok(true, "Waiting for audio to pass through the analyser")
|
||||
|
@ -83,12 +71,43 @@ SpecialPowers.pushPrefEnv({
|
|||
() => Promise.resolve());
|
||||
|
||||
ok(true, "Audio is still flowing");
|
||||
SimpleTest.finish();
|
||||
} catch(e) {
|
||||
ok(false, "Error executing test: " + e + (e.stack ? "\n" + e.stack : ""));
|
||||
SimpleTest.finish();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
(async function() {
|
||||
try {
|
||||
await SpecialPowers.pushPrefEnv({
|
||||
set: [
|
||||
// This test expects the fake audio device, specifically for the tones
|
||||
// it outputs. Explicitly disable the audio loopback device and enable
|
||||
// fake streams.
|
||||
['media.audio_loopback_dev', ''],
|
||||
['media.navigator.streams.fake', true]
|
||||
]
|
||||
});
|
||||
|
||||
// Test stream source GC
|
||||
let stream = await navigator.mediaDevices.getUserMedia({audio: true});
|
||||
let source = context.createMediaStreamSource(stream);
|
||||
stream = null;
|
||||
source.connect(analyser);
|
||||
await test(source);
|
||||
|
||||
// Test track source GC
|
||||
stream = await navigator.mediaDevices.getUserMedia({audio: true});
|
||||
source = context.createMediaStreamTrackSource(stream.getAudioTracks()[0]);
|
||||
stream = null;
|
||||
source.connect(analyser);
|
||||
await test(source);
|
||||
} catch(e) {
|
||||
ok(false, `Error executing test: ${e}${e.stack ? "\n" + e.stack : ""}`);
|
||||
} finally {
|
||||
SimpleTest.finish();
|
||||
}
|
||||
})();
|
||||
</script>
|
||||
</pre>
|
||||
</body>
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
<!DOCTYPE HTML>
|
||||
<html>
|
||||
<meta charset="utf-8">
|
||||
<head>
|
||||
<title>Test MediaStreamTrackAudioSourceNode processing is correct</title>
|
||||
<script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
|
||||
<script type="text/javascript" src="webaudio.js"></script>
|
||||
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
|
||||
</head>
|
||||
<body>
|
||||
<pre id="test">
|
||||
<script class="testbody" type="text/javascript">
|
||||
|
||||
function createBuffer(context) {
|
||||
let buffer = context.createBuffer(2, 2048, context.sampleRate);
|
||||
for (let i = 0; i < 2048; ++i) {
|
||||
buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
|
||||
buffer.getChannelData(1)[i] = -buffer.getChannelData(0)[i];
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
|
||||
let gTest = {
|
||||
length: 2048,
|
||||
skipOfflineContextTests: true,
|
||||
createGraph: function(context) {
|
||||
let sourceGraph = new AudioContext();
|
||||
let source = sourceGraph.createBufferSource();
|
||||
source.buffer = createBuffer(context);
|
||||
let dest = sourceGraph.createMediaStreamDestination();
|
||||
source.connect(dest);
|
||||
|
||||
// Extract first audio track from dest.stream
|
||||
let track = dest.stream.getAudioTracks()[0];
|
||||
|
||||
source.start(0);
|
||||
|
||||
let mediaStreamTrackSource = new MediaStreamTrackAudioSourceNode(context, { mediaStreamTrack: track });
|
||||
// channelCount and channelCountMode should have no effect
|
||||
mediaStreamTrackSource.channelCount = 1;
|
||||
mediaStreamTrackSource.channelCountMode = "explicit";
|
||||
return mediaStreamTrackSource;
|
||||
},
|
||||
createExpectedBuffers: function(context) {
|
||||
return createBuffer(context);
|
||||
},
|
||||
};
|
||||
|
||||
runTest();
|
||||
|
||||
</script>
|
||||
</pre>
|
||||
</body>
|
||||
</html>
|
|
@ -0,0 +1,53 @@
|
|||
<!DOCTYPE HTML>
|
||||
<html>
|
||||
<meta charset="utf-8">
|
||||
<head>
|
||||
<title>Test MediaStreamTrackAudioSourceNode doesn't get data from cross-origin media resources</title>
|
||||
<script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
|
||||
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
|
||||
</head>
|
||||
<body>
|
||||
<pre id="test">
|
||||
<script class="testbody" type="text/javascript">
|
||||
SimpleTest.waitForExplicitFinish();
|
||||
|
||||
const CROSS_ORIGIN_URL = "http://example.org:80/tests/dom/media/webaudio/test/sine-440-10s.opus"
|
||||
let iterationCount = 0;
|
||||
let context = null;
|
||||
|
||||
function processSamples(e) {
|
||||
++iterationCount;
|
||||
|
||||
let buf = e.inputBuffer.getChannelData(0);
|
||||
let nonzeroSamplesThisBuffer = 0;
|
||||
for (let i = 0; i < buf.length; ++i) {
|
||||
if (buf[i] != 0) {
|
||||
++nonzeroSamplesThisBuffer;
|
||||
}
|
||||
}
|
||||
is(nonzeroSamplesThisBuffer, 0,
|
||||
"a source that is cross origin cannot be inspected by Web Audio");
|
||||
|
||||
if (iterationCount == 40) {
|
||||
sp = null;
|
||||
context.close();
|
||||
SimpleTest.finish();
|
||||
}
|
||||
}
|
||||
|
||||
let audio = new Audio();
|
||||
audio.src = CROSS_ORIGIN_URL;
|
||||
audio.onloadedmetadata = function () {
|
||||
context = new AudioContext();
|
||||
let stream = audio.mozCaptureStream();
|
||||
let track = stream.getAudioTracks()[0];
|
||||
let node = context.createMediaStreamTrackSource(track);
|
||||
node.connect(context.destination);
|
||||
sp = context.createScriptProcessor(2048, 1);
|
||||
sp.onaudioprocess = processSamples;
|
||||
node.connect(sp);
|
||||
}
|
||||
|
||||
</script>
|
||||
</pre>
|
||||
</body>
|
|
@ -0,0 +1,27 @@
|
|||
<!DOCTYPE HTML>
|
||||
<html>
|
||||
<meta charset="utf-8">
|
||||
<head>
|
||||
<title>Test MediaStreamTrackAudioSourceNode throw video track</title>
|
||||
<script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
|
||||
<script type="text/javascript" src="/tests/dom/media/webaudio/test/webaudio.js"></script>
|
||||
<script type="text/javascript" src="/tests/dom/media/tests/mochitest/head.js"></script>
|
||||
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
|
||||
</head>
|
||||
<body>
|
||||
<pre id="test">
|
||||
<script class="testbody" type="text/javascript">
|
||||
let context = new AudioContext();
|
||||
let canvas = document.createElement("canvas");
|
||||
canvas.getContext("2d");
|
||||
let track = canvas.captureStream().getTracks()[0];
|
||||
|
||||
expectException(() => {
|
||||
let mediaStreamTrackSource = new MediaStreamTrackAudioSourceNode(
|
||||
context,
|
||||
{ mediaStreamTrack: track });
|
||||
}, DOMException.INVALID_STATE_ERR);
|
||||
</script>
|
||||
</pre>
|
||||
</body>
|
||||
</html>
|
Загрузка…
Ссылка в новой задаче