b=910174 add DelayNode's tail-time reference as soon as the engine receives sound r=ehsan

This removes the dependence on AllInputsFinished() which didn't return true
for many input types.

The DelayProcessor is no longer continuously reset (bug 921457) and the
reference is now correctly added again when all inputs are finished and then
new inputs are connected.

--HG--
extra : rebase_source : b85c62305a6fcfce57bd40a11edaeaaf2a63c188
This commit is contained in:
Karl Tomlinson 2013-10-01 09:50:04 +13:00
Родитель 960af07a87
Коммит fec6395878
6 изменённых файлов: 215 добавлений и 19 удалений

Просмотреть файл

@ -238,18 +238,6 @@ AudioNodeStream::SetChannelMixingParametersImpl(uint32_t aNumberOfChannels,
mChannelInterpretation = aChannelInterpretation;
}
bool
AudioNodeStream::AllInputsFinished() const
{
uint32_t inputCount = mInputs.Length();
for (uint32_t i = 0; i < inputCount; ++i) {
if (!mInputs[i]->GetSource()->IsFinishedOnGraphThread()) {
return false;
}
}
return !!inputCount;
}
uint32_t
AudioNodeStream::ComputeFinalOuputChannelCount(uint32_t aInputChannelCount)
{

Просмотреть файл

@ -100,7 +100,6 @@ public:
dom::ChannelInterpretation aChannelInterpretation);
virtual void ProduceOutput(GraphTime aFrom, GraphTime aTo);
TrackTicks GetCurrentPosition();
bool AllInputsFinished() const;
bool IsAudioParamStream() const
{
return mAudioParamStream;

Просмотреть файл

@ -85,16 +85,13 @@ public:
aInput.mChannelData.Length();
bool playedBackAllLeftOvers = false;
if (mProcessor.BufferChannelCount() &&
mLeftOverData == INT32_MIN &&
aStream->AllInputsFinished()) {
mLeftOverData = mProcessor.MaxDelayFrames() - WEBAUDIO_BLOCK_SIZE;
if (mLeftOverData > 0) {
if (!aInput.IsNull()) {
if (mLeftOverData <= 0) {
nsRefPtr<PlayingRefChanged> refchanged =
new PlayingRefChanged(aStream, PlayingRefChanged::ADDREF);
NS_DispatchToMainThread(refchanged);
}
mLeftOverData = mProcessor.MaxDelayFrames();
} else if (mLeftOverData != INT32_MIN) {
mLeftOverData -= WEBAUDIO_BLOCK_SIZE;
if (mLeftOverData <= 0) {

Просмотреть файл

@ -77,6 +77,8 @@ support-files =
[test_delayNodeCycles.html]
[test_delayNodeSmallMaxDelay.html]
[test_delayNodeTailIncrease.html]
[test_delayNodeTailWithGain.html]
[test_delayNodeTailWithReconnect.html]
[test_delayNodeWithGain.html]
[test_dynamicsCompressorNode.html]
[test_gainNode.html]

Просмотреть файл

@ -0,0 +1,72 @@
<!DOCTYPE HTML>
<html>
<head>
<title>Test tail time lifetime of DelayNode indirectly connected to source</title>
<script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
<script type="text/javascript" src="webaudio.js"></script>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
</head>
<body>
<pre id="test">
<script class="testbody" type="text/javascript">
SimpleTest.waitForExplicitFinish();
const signalLength = 130;
const bufferSize = 1024;
// Delay should be long enough to allow CC to run
const delayBufferCount = 50;
const delayLength = delayBufferCount * bufferSize + 700;
var count = 0;
function applySignal(buffer, offset) {
for (var i = 0; i < signalLength; ++i) {
buffer.getChannelData(0)[offset + i] = Math.cos(Math.PI * i / signalLength);
}
}
function onAudioProcess(e) {
switch(count) {
case 5:
SpecialPowers.forceGC();
SpecialPowers.forceCC();
break;
case delayBufferCount:
var offset = delayLength - count * bufferSize;
var ctx = e.target.context;
var expected = ctx.createBuffer(1, bufferSize, ctx.sampleRate);
applySignal(expected, offset);
compareBuffers(e.inputBuffer.getChannelData(0), expected.getChannelData(0));
SimpleTest.finish();
}
count++;
}
function startTest() {
var ctx = new AudioContext();
var processor = ctx.createScriptProcessor(bufferSize, 1, 0);
processor.onaudioprocess = onAudioProcess;
var delayDuration = delayLength / ctx.sampleRate;
var delay = ctx.createDelay(delayDuration);
delay.delayTime.value = delayDuration;
delay.connect(processor);
var gain = ctx.createGain();
gain.connect(delay);
// Short signal that finishes before garbage collection
var buffer = ctx.createBuffer(1, signalLength, ctx.sampleRate);
applySignal(buffer, 0);
var source = ctx.createBufferSource();
source.buffer = buffer;
source.start();
source.connect(gain);
};
startTest();
</script>
</pre>
</body>
</html>

Просмотреть файл

@ -0,0 +1,138 @@
<!DOCTYPE HTML>
<html>
<head>
<title>Test tail time lifetime of DelayNode after input finishes and new input added</title>
<script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
<script type="text/javascript" src="webaudio.js"></script>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
</head>
<body>
<pre id="test">
<script class="testbody" type="text/javascript">
SimpleTest.waitForExplicitFinish();
// The buffer source will start on a block boundary, so keeping the signal
// within one block ensures that it will not cross AudioProcessingEvent buffer
// boundaries.
const signalLength = 128;
const bufferSize = 1024;
// Delay should be long enough to allow CC to run
var delayBufferCount = 50;
var delayBufferOffset;
const delayLength = delayBufferCount * bufferSize;
var phase = "initial";
var sourceCount = 0;
var delayCount = 0;
var oscillator;
var delay;
var source;
function applySignal(buffer, offset) {
for (var i = 0; i < signalLength; ++i) {
buffer.getChannelData(0)[offset + i] = Math.cos(Math.PI * i / signalLength);
}
}
function bufferIsSilent(buffer, out) {
for (var i = 0; i < buffer.length; ++i) {
if (buffer.getChannelData(0)[i] != 0) {
if (out) {
out.soundOffset = i;
}
return false;
}
}
return true;
}
function onDelayOutput(e) {
switch(phase) {
case "initial":
// Wait for oscillator sound to exit delay
if (bufferIsSilent(e.inputBuffer))
break;
phase = "played oscillator";
break;
case "played oscillator":
// First tail time has expired. Start second source and remove references
// to the delay and connected second source.
oscillator.disconnect();
source.connect(delay);
source.start();
source = null;
delay = null;
phase = "started second source";
break;
case "second tail time":
if (delayCount == delayBufferCount) {
var ctx = e.target.context;
var expected = ctx.createBuffer(1, bufferSize, ctx.sampleRate);
applySignal(expected, delayBufferOffset);
compareBuffers(e.inputBuffer.getChannelData(0), expected.getChannelData(0));
e.target.onaudioprocess = null;
SimpleTest.finish();
}
}
delayCount++;
}
function onSourceOutput(e) {
switch(phase) {
case "started second source":
var out = {};
if (!bufferIsSilent(e.inputBuffer, out)) {
delayBufferCount += sourceCount;
delayBufferOffset = out.soundOffset;
phase = "played second source";
}
break;
case "played second source":
SpecialPowers.forceGC();
SpecialPowers.forceCC();
phase = "second tail time";
e.target.onaudioprocess = null;
}
sourceCount++;
}
function startTest() {
var ctx = new AudioContext();
var delayDuration = delayLength / ctx.sampleRate;
delay = ctx.createDelay(delayDuration);
delay.delayTime.value = delayDuration;
var processor1 = ctx.createScriptProcessor(bufferSize, 1, 0);
delay.connect(processor1);
processor1.onaudioprocess = onDelayOutput;
processor1.connect(ctx.destination); // work around bug 916387
// Signal to trigger initial tail time reference
oscillator = ctx.createOscillator();
oscillator.start(0);
oscillator.stop(100/ctx.sampleRate);
oscillator.connect(delay);
// Short signal, not started yet, with a ScriptProcessor to detect when it
// starts. It should finish before garbage collection.
var buffer = ctx.createBuffer(1, signalLength, ctx.sampleRate);
applySignal(buffer, 0);
source = ctx.createBufferSource();
source.buffer = buffer;
var processor2 = ctx.createScriptProcessor(bufferSize, 1, 0);
source.connect(processor2);
processor2.onaudioprocess = onSourceOutput;
processor2.connect(ctx.destination); // guard against bug 916387
};
startTest();
</script>
</pre>
</body>
</html>