Bug 880265 - Remove AudioBufferSourceNode.gain; r=ehsan

This commit is contained in:
Sankha Narayan Guria 2013-06-08 00:55:04 +05:30
Родитель 835746d2b7
Коммит d8379b657e
11 изменённых файлов: 52 добавлений и 290 удалений

Просмотреть файл

@ -8,8 +8,9 @@
#include "mozilla/dom/AudioBufferSourceNodeBinding.h"
#include "nsMathUtils.h"
#include "AudioNodeEngine.h"
#include "AudioNodeStream.h"
#include "AudioDestinationNode.h"
#include "PannerNode.h"
#include "GainProcessor.h"
#include "speex/speex_resampler.h"
#include <limits>
@ -19,7 +20,6 @@ namespace dom {
NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(AudioBufferSourceNode)
NS_IMPL_CYCLE_COLLECTION_UNLINK(mBuffer)
NS_IMPL_CYCLE_COLLECTION_UNLINK(mPlaybackRate)
NS_IMPL_CYCLE_COLLECTION_UNLINK(mGain)
if (tmp->Context()) {
// AudioNode's Unlink implementation disconnects us from the graph
// too, but we need to do this right here to make sure that
@ -33,7 +33,6 @@ NS_IMPL_CYCLE_COLLECTION_UNLINK_END_INHERITED(AudioNode)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(AudioBufferSourceNode, AudioNode)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mBuffer)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPlaybackRate)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mGain)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(AudioBufferSourceNode)
@ -42,20 +41,19 @@ NS_INTERFACE_MAP_END_INHERITING(AudioNode)
NS_IMPL_ADDREF_INHERITED(AudioBufferSourceNode, AudioNode)
NS_IMPL_RELEASE_INHERITED(AudioBufferSourceNode, AudioNode)
class AudioBufferSourceNodeEngine : public AudioNodeEngine,
public GainProcessor
class AudioBufferSourceNodeEngine : public AudioNodeEngine
{
public:
explicit AudioBufferSourceNodeEngine(AudioNode* aNode,
AudioDestinationNode* aDestination) :
AudioNodeEngine(aNode),
GainProcessor(aDestination),
mStart(0), mStop(TRACK_TICKS_MAX),
mResampler(nullptr),
mOffset(0), mDuration(0),
mLoopStart(0), mLoopEnd(0),
mBufferSampleRate(0), mPosition(0), mChannels(0), mPlaybackRate(1.0f),
mDopplerShift(1.0f),
mDestination(static_cast<AudioNodeStream*>(aDestination->Stream())),
mPlaybackRateTimeline(1.0f), mLoop(false)
{}
@ -84,9 +82,6 @@ public:
}
WebAudioUtils::ConvertAudioParamToTicks(mPlaybackRateTimeline, nullptr, mDestination);
break;
case AudioBufferSourceNode::GAIN:
SetGainParameter(aValue);
break;
default:
NS_ERROR("Bad AudioBufferSourceNodeEngine TimelineParameter");
}
@ -401,21 +396,6 @@ public:
}
}
// Process the gain on the AudioBufferSourceNode
if (!aOutput->IsNull()) {
if (!mGain.HasSimpleValue() &&
aOutput->mBuffer == mBuffer) {
// If we have borrowed out buffer, make sure to allocate a new one in case
// the gain value is not a simple value.
nsTArray<const void*> oldChannels;
oldChannels.AppendElements(aOutput->mChannelData);
AllocateAudioBlock(channels, aOutput);
ProcessGain(aStream, 1.0f, oldChannels, aOutput);
} else {
ProcessGain(aStream, 1.0f, aOutput->mChannelData, aOutput);
}
}
// We've finished if we've gone past mStop, or if we're past mDuration when
// looping is disabled.
if (currentPosition >= mStop ||
@ -437,6 +417,7 @@ public:
uint32_t mChannels;
float mPlaybackRate;
float mDopplerShift;
AudioNodeStream* mDestination;
AudioParamTimeline mPlaybackRateTimeline;
bool mLoop;
};
@ -451,15 +432,13 @@ AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* aContext)
, mOffset(0.0)
, mDuration(std::numeric_limits<double>::min())
, mPlaybackRate(new AudioParam(this, SendPlaybackRateToStream, 1.0f))
, mGain(new AudioParam(this, SendGainToStream, 1.0f))
, mLoop(false)
, mStartCalled(false)
, mStopped(false)
{
AudioBufferSourceNodeEngine* engine =
new AudioBufferSourceNodeEngine(this, aContext->Destination());
mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
engine->SetSourceStream(static_cast<AudioNodeStream*> (mStream.get()));
mStream = aContext->Graph()->CreateAudioNodeStream(
new AudioBufferSourceNodeEngine(this, aContext->Destination()),
MediaStreamGraph::INTERNAL_STREAM);
mStream->AddMainThreadListener(this);
}
@ -642,13 +621,6 @@ AudioBufferSourceNode::SendPlaybackRateToStream(AudioNode* aNode)
SendTimelineParameterToStream(This, PLAYBACKRATE, *This->mPlaybackRate);
}
void
AudioBufferSourceNode::SendGainToStream(AudioNode* aNode)
{
AudioBufferSourceNode* This = static_cast<AudioBufferSourceNode*>(aNode);
SendTimelineParameterToStream(This, GAIN, *This->mGain);
}
void
AudioBufferSourceNode::SendDopplerShiftToStream(double aDopplerShift)
{

Просмотреть файл

@ -76,10 +76,6 @@ public:
{
return mPlaybackRate;
}
AudioParam* Gain() const
{
return mGain;
}
bool Loop() const
{
return mLoop;
@ -128,7 +124,6 @@ private:
LOOPSTART,
LOOPEND,
PLAYBACKRATE,
GAIN,
DOPPLERSHIFT
};
@ -138,7 +133,6 @@ private:
double aOffset,
double aDuration);
static void SendPlaybackRateToStream(AudioNode* aNode);
static void SendGainToStream(AudioNode* aNode);
private:
double mLoopStart;
@ -147,7 +141,6 @@ private:
double mDuration;
nsRefPtr<AudioBuffer> mBuffer;
nsRefPtr<AudioParam> mPlaybackRate;
nsRefPtr<AudioParam> mGain;
SelfReference<AudioBufferSourceNode> mPlayingRef; // a reference to self while playing
bool mLoop;
bool mStartCalled;

Просмотреть файл

@ -7,7 +7,9 @@
#include "GainNode.h"
#include "mozilla/dom/GainNodeBinding.h"
#include "AudioNodeEngine.h"
#include "GainProcessor.h"
#include "AudioNodeStream.h"
#include "AudioDestinationNode.h"
#include "WebAudioUtils.h"
namespace mozilla {
namespace dom {
@ -21,16 +23,23 @@ NS_INTERFACE_MAP_END_INHERITING(AudioNode)
NS_IMPL_ADDREF_INHERITED(GainNode, AudioNode)
NS_IMPL_RELEASE_INHERITED(GainNode, AudioNode)
class GainNodeEngine : public AudioNodeEngine,
public GainProcessor
class GainNodeEngine : public AudioNodeEngine
{
public:
GainNodeEngine(AudioNode* aNode, AudioDestinationNode* aDestination)
: AudioNodeEngine(aNode)
, GainProcessor(aDestination)
, mSource(nullptr)
, mDestination(static_cast<AudioNodeStream*> (aDestination->Stream()))
// Keep the default value in sync with the default value in GainNode::GainNode.
, mGain(1.f)
{
}
void SetSourceStream(AudioNodeStream* aSource)
{
mSource = aSource;
}
enum Parameters {
GAIN
};
@ -40,7 +49,9 @@ public:
{
switch (aIndex) {
case GAIN:
SetGainParameter(aValue);
MOZ_ASSERT(mSource && mDestination);
mGain = aValue;
WebAudioUtils::ConvertAudioParamToTicks(mGain, mSource, mDestination);
break;
default:
NS_ERROR("Bad GainNodeEngine TimelineParameter");
@ -57,18 +68,37 @@ public:
if (aInput.IsNull()) {
// If input is silent, so is the output
aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
} else if (mGain.HasSimpleValue()) {
// Optimize the case where we only have a single value set as the volume
*aOutput = aInput;
aOutput->mVolume *= mGain.GetValue();
} else {
if (mGain.HasSimpleValue()) {
// Copy the input chunk to the output chunk, since we will only be
// changing the mVolume member.
*aOutput = aInput;
} else {
// Create a new output chunk to avoid modifying the input chunk.
AllocateAudioBlock(aInput.mChannelData.Length(), aOutput);
// First, compute a vector of gains for each track tick based on the
// timeline at hand, and then for each channel, multiply the values
// in the buffer with the gain vector.
AllocateAudioBlock(aInput.mChannelData.Length(), aOutput);
// Compute the gain values for the duration of the input AudioChunk
// XXX we need to add a method to AudioEventTimeline to compute this buffer directly.
float computedGain[WEBAUDIO_BLOCK_SIZE];
for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) {
TrackTicks tick = aStream->GetCurrentPosition();
computedGain[counter] = mGain.GetValueAtTime(tick, counter) * aInput.mVolume;
}
// Apply the gain to the output buffer
for (size_t channel = 0; channel < aOutput->mChannelData.Length(); ++channel) {
const float* inputBuffer = static_cast<const float*> (aInput.mChannelData[channel]);
float* buffer = static_cast<float*> (const_cast<void*>
(aOutput->mChannelData[channel]));
AudioBlockCopyChannelWithScale(inputBuffer, computedGain, buffer);
}
ProcessGain(aStream, aInput.mVolume, aInput.mChannelData, aOutput);
}
}
AudioNodeStream* mSource;
AudioNodeStream* mDestination;
AudioParamTimeline mGain;
};
GainNode::GainNode(AudioContext* aContext)

Просмотреть файл

@ -1,85 +0,0 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef GainProcessor_h_
#define GainProcessor_h_
#include "AudioNodeStream.h"
#include "AudioDestinationNode.h"
#include "WebAudioUtils.h"
namespace mozilla {
namespace dom {
// This class implements the gain processing logic used by GainNodeEngine
// and AudioBufferSourceNodeEngine.
class GainProcessor
{
public:
explicit GainProcessor(AudioDestinationNode* aDestination)
: mSource(nullptr)
, mDestination(static_cast<AudioNodeStream*> (aDestination->Stream()))
, mGain(1.f)
{
}
void SetSourceStream(AudioNodeStream* aSource)
{
mSource = aSource;
}
void SetGainParameter(const AudioParamTimeline& aValue)
{
MOZ_ASSERT(mSource && mDestination);
mGain = aValue;
WebAudioUtils::ConvertAudioParamToTicks(mGain, mSource, mDestination);
}
void ProcessGain(AudioNodeStream* aStream,
float aInputVolume,
const nsTArray<const void*>& aInputChannelData,
AudioChunk* aOutput)
{
MOZ_ASSERT(mSource == aStream, "Invalid source stream");
if (mGain.HasSimpleValue()) {
// Optimize the case where we only have a single value set as the volume
aOutput->mVolume *= mGain.GetValue();
} else {
// First, compute a vector of gains for each track tick based on the
// timeline at hand, and then for each channel, multiply the values
// in the buffer with the gain vector.
// Compute the gain values for the duration of the input AudioChunk
// XXX we need to add a method to AudioEventTimeline to compute this buffer directly.
float computedGain[WEBAUDIO_BLOCK_SIZE];
for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) {
TrackTicks tick = aStream->GetCurrentPosition();
computedGain[counter] = mGain.GetValueAtTime(tick, counter) * aInputVolume;
}
// Apply the gain to the output buffer
MOZ_ASSERT(aInputChannelData.Length() == aOutput->mChannelData.Length());
for (size_t channel = 0; channel < aOutput->mChannelData.Length(); ++channel) {
const float* inputBuffer = static_cast<const float*> (aInputChannelData[channel]);
float* buffer = static_cast<float*> (const_cast<void*>
(aOutput->mChannelData[channel]));
AudioBlockCopyChannelWithScale(inputBuffer, computedGain, buffer);
}
}
}
protected:
AudioNodeStream* mSource;
AudioNodeStream* mDestination;
AudioParamTimeline mGain;
};
}
}
#endif

Просмотреть файл

@ -40,8 +40,6 @@ MOCHITEST_FILES := \
test_audioParamTimelineDestinationOffset.html \
test_audioBufferSourceNode.html \
test_audioBufferSourceNodeEnded.html \
test_audioBufferSourceNodeGain.html \
test_audioBufferSourceNodeGainInLoop.html \
test_audioBufferSourceNodeLazyLoopParam.html \
test_audioBufferSourceNodeLoop.html \
test_audioBufferSourceNodeLoopStartEnd.html \
@ -57,7 +55,6 @@ MOCHITEST_FILES := \
test_delayNode.html \
test_delayNodeSmallMaxDelay.html \
test_delayNodeWithGain.html \
test_delayNodeWithGainAlternate.html \
test_dynamicsCompressorNode.html \
test_gainNode.html \
test_gainNodeInLoop.html \

Просмотреть файл

@ -19,8 +19,6 @@ var gTest = {
}
var source = context.createBufferSource();
ok("gain" in source, "AudioBufferSourceNode.gain must exist");
is(source.gain.value, 1, "AudioBufferSourceNode.gain's default value must be 1");
var sp = context.createScriptProcessor(2048);
source.start(0);

Просмотреть файл

@ -1,45 +0,0 @@
<!DOCTYPE HTML>
<html>
<head>
<title>Test AudioBufferSourceNode.gain</title>
<script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
<script type="text/javascript" src="webaudio.js"></script>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
</head>
<body>
<pre id="test">
<script class="testbody" type="text/javascript">
var gTest = {
length: 2048,
numberOfChannels: 1,
createGraph: function(context) {
var buffer = context.createBuffer(1, 2048, context.sampleRate);
for (var i = 0; i < 2048; ++i) {
buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
}
var source = context.createBufferSource();
source.buffer = buffer;
source.gain.value = 0.5;
source.start(0);
return source;
},
createExpectedBuffers: function(context) {
var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate);
for (var i = 0; i < 2048; ++i) {
expectedBuffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate) / 2;
}
return expectedBuffer;
},
};
runTest();
</script>
</pre>
</body>
</html>

Просмотреть файл

@ -1,46 +0,0 @@
<!DOCTYPE HTML>
<html>
<head>
<title>Test AudioBufferSourceNode.gain in presence of loops</title>
<script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
<script type="text/javascript" src="webaudio.js"></script>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
</head>
<body>
<pre id="test">
<script class="testbody" type="text/javascript">
var gTest = {
length: 4096,
numberOfChannels: 1,
createGraph: function(context) {
var sourceBuffer = context.createBuffer(1, 2048, context.sampleRate);
for (var i = 0; i < 2048; ++i) {
sourceBuffer.getChannelData(0)[i] = 1;
}
var source = context.createBufferSource();
source.buffer = sourceBuffer;
source.loop = true;
source.start(0);
source.stop(sourceBuffer.duration * 2);
// Adjust the gain in a way that we don't just end up modifying AudioChunk::mVolume
source.gain.setValueAtTime(0.5, 0);
return source;
},
createExpectedBuffers: function(context) {
var expectedBuffer = context.createBuffer(1, 4096, context.sampleRate);
for (var i = 0; i < 4096; ++i) {
expectedBuffer.getChannelData(0)[i] = 0.5;
}
return expectedBuffer;
},
};
runTest();
</script>
</pre>
</body>
</html>

Просмотреть файл

@ -1,51 +0,0 @@
<!DOCTYPE HTML>
<html>
<head>
<title>Test DelayNode with an AudioBufferSourceNode.gain value</title>
<script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
</head>
<body>
<pre id="test">
<script src="webaudio.js" type="text/javascript"></script>
<script class="testbody" type="text/javascript">
var gTest = {
length: 4096,
numberOfChannels: 1,
createGraph: function(context) {
var buffer = context.createBuffer(1, 2048, context.sampleRate);
for (var i = 0; i < 2048; ++i) {
buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
}
var source = context.createBufferSource();
var delay = context.createDelay();
source.buffer = buffer;
source.gain.value = 0.5;
source.connect(delay);
// Delay the source stream by 2048 frames
delay.delayTime.value = 2048 / context.sampleRate;
source.start(0);
return delay;
},
createExpectedBuffers: function(context) {
var expectedBuffer = context.createBuffer(1, 2048 * 2, context.sampleRate);
for (var i = 2048; i < 2048 * 2; ++i) {
expectedBuffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * (i - 2048) / context.sampleRate) / 2;
}
return expectedBuffer;
},
};
runTest();
</script>
</pre>
</body>
</html>

Просмотреть файл

@ -107,7 +107,7 @@ DOMInterfaces = {
'AudioBufferSourceNode': {
'implicitJSContext': [ 'buffer' ],
'resultNotAddRefed': [ 'gain', 'playbackRate' ],
'resultNotAddRefed': [ 'playbackRate' ],
},
'AudioListener' : {

Просмотреть файл

@ -16,7 +16,6 @@ interface AudioBufferSourceNode : AudioNode {
attribute AudioBuffer? buffer;
readonly attribute AudioParam playbackRate;
readonly attribute AudioParam gain;
attribute boolean loop;
attribute double loopStart;