Bug 834513 - Part 3: Implement ScriptProcessorNode; r=roc

This commit is contained in:
Ehsan Akhgari 2013-04-13 21:37:04 -04:00
Родитель 82a95502c9
Коммит 21b6d807c6
24 изменённых файлов: 784 добавлений и 14 удалений

Просмотреть файл

@ -639,6 +639,7 @@ GK_ATOM(onanimationend, "onanimationend")
GK_ATOM(onanimationiteration, "onanimationiteration")
GK_ATOM(onanimationstart, "onanimationstart")
GK_ATOM(onAppCommand, "onAppCommand")
GK_ATOM(onaudioprocess, "onaudioprocess")
GK_ATOM(onbeforecopy, "onbeforecopy")
GK_ATOM(onbeforecut, "onbeforecut")
GK_ATOM(onbeforepaste, "onbeforepaste")

Просмотреть файл

@ -848,6 +848,11 @@ NON_IDL_EVENT(animationiteration,
EventNameType_None,
NS_ANIMATION_EVENT)
NON_IDL_EVENT(audioprocess,
NS_AUDIO_PROCESS,
EventNameType_None,
NS_EVENT)
#ifdef DEFINED_FORWARDED_EVENT
#undef DEFINED_FORWARDED_EVENT
#undef FORWARDED_EVENT

Просмотреть файл

@ -193,7 +193,7 @@ AudioChunk*
AudioNodeStream::ObtainInputBlock(AudioChunk* aTmpChunk)
{
uint32_t inputCount = mInputs.Length();
uint32_t outputChannelCount = 0;
uint32_t outputChannelCount = mNumberOfInputChannels;
nsAutoTArray<AudioChunk*,250> inputChunks;
for (uint32_t i = 0; i < inputCount; ++i) {
MediaStream* s = mInputs[i]->GetSource();
@ -209,8 +209,10 @@ AudioNodeStream::ObtainInputBlock(AudioChunk* aTmpChunk)
}
inputChunks.AppendElement(chunk);
outputChannelCount =
GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length());
if (!mNumberOfInputChannels) {
outputChannelCount =
GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length());
}
}
uint32_t inputChunkCount = inputChunks.Length();
@ -219,7 +221,8 @@ AudioNodeStream::ObtainInputBlock(AudioChunk* aTmpChunk)
return aTmpChunk;
}
if (inputChunkCount == 1) {
if (inputChunkCount == 1 &&
inputChunks[0]->mChannelData.Length() == outputChannelCount) {
return inputChunks[0];
}
@ -233,6 +236,21 @@ AudioNodeStream::ObtainInputBlock(AudioChunk* aTmpChunk)
AudioChannelsUpMix(&channels, outputChannelCount, nullptr);
NS_ASSERTION(outputChannelCount == channels.Length(),
"We called GetAudioChannelsSuperset to avoid this");
} else if (channels.Length() > outputChannelCount) {
nsAutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannels;
outputChannels.SetLength(outputChannelCount);
for (uint32_t i = 0; i < outputChannelCount; ++i) {
outputChannels[i] =
const_cast<float*>(static_cast<const float*>(aTmpChunk->mChannelData[i]));
}
AudioChannelsDownMix(channels, outputChannels.Elements(),
outputChannelCount, WEBAUDIO_BLOCK_SIZE);
channels.SetLength(outputChannelCount);
for (uint32_t i = 0; i < channels.Length(); ++i) {
channels[i] = outputChannels[i];
}
}
for (uint32_t c = 0; c < channels.Length(); ++c) {

Просмотреть файл

@ -43,10 +43,12 @@ public:
* Transfers ownership of aEngine to the new AudioNodeStream.
*/
AudioNodeStream(AudioNodeEngine* aEngine,
MediaStreamGraph::AudioNodeStreamKind aKind)
MediaStreamGraph::AudioNodeStreamKind aKind,
uint32_t aNumberOfInputChannels = 0)
: ProcessedMediaStream(nullptr),
mEngine(aEngine),
mKind(aKind)
mKind(aKind),
mNumberOfInputChannels(aNumberOfInputChannels)
{
// AudioNodes are always producing data
mHasCurrentData = true;
@ -91,6 +93,8 @@ protected:
AudioChunk mLastChunk;
// Whether this is an internal or external stream
MediaStreamGraph::AudioNodeStreamKind mKind;
// The number of input channels that this stream requires. 0 means don't care.
uint32_t mNumberOfInputChannels;
};
}

Просмотреть файл

@ -2010,9 +2010,10 @@ MediaStreamGraph::CreateTrackUnionStream(DOMMediaStream* aWrapper)
AudioNodeStream*
MediaStreamGraph::CreateAudioNodeStream(AudioNodeEngine* aEngine,
AudioNodeStreamKind aKind)
AudioNodeStreamKind aKind,
uint32_t aNumberOfInputChannels)
{
AudioNodeStream* stream = new AudioNodeStream(aEngine, aKind);
AudioNodeStream* stream = new AudioNodeStream(aEngine, aKind, aNumberOfInputChannels);
NS_ADDREF(stream);
MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(this);
stream->SetGraphImpl(graph);

Просмотреть файл

@ -909,7 +909,8 @@ public:
* Takes ownership of aEngine.
*/
AudioNodeStream* CreateAudioNodeStream(AudioNodeEngine* aEngine,
AudioNodeStreamKind aKind);
AudioNodeStreamKind aKind,
uint32_t aNumberOfInputChannels = 0);
/**
* Returns the number of graph updates sent. This can be used to track
* whether a given update has been processed by the graph thread and reflected

Просмотреть файл

@ -107,6 +107,13 @@ AudioBuffer::RestoreJSChannelData(JSContext* aJSContext)
}
}
void
AudioBuffer::SetRawChannelContents(JSContext* aJSContext, uint32_t aChannel,
float* aContents)
{
memcpy(JS_GetFloat32ArrayData(mJSChannels[aChannel]), aContents, sizeof(float)*mLength);
}
JSObject*
AudioBuffer::GetChannelData(JSContext* aJSContext, uint32_t aChannel,
ErrorResult& aRv)

Просмотреть файл

@ -102,6 +102,14 @@ public:
uint32_t aChannel,
void* aContents);
// This replaces the contents of the JS array for the given channel.
// This function needs to be called on an AudioBuffer which has not been
// handed off to the content yet, and right after the object has been
// initialized.
void SetRawChannelContents(JSContext* aJSContext,
uint32_t aChannel,
float* aContents);
protected:
void RestoreJSChannelData(JSContext* aJSContext);
void ClearJSChannels();

Просмотреть файл

@ -19,8 +19,13 @@
#include "AudioListener.h"
#include "DynamicsCompressorNode.h"
#include "BiquadFilterNode.h"
#include "ScriptProcessorNode.h"
#include "nsNetUtil.h"
// Note that this number is an arbitrary large value to protect against OOM
// attacks.
const unsigned MAX_SCRIPT_PROCESSOR_CHANNELS = 10000;
namespace mozilla {
namespace dom {
@ -99,6 +104,46 @@ AudioContext::CreateBuffer(JSContext* aJSContext, uint32_t aNumberOfChannels,
return buffer.forget();
}
namespace {
bool IsValidBufferSize(uint32_t aBufferSize) {
switch (aBufferSize) {
case 0: // let the implementation choose the buffer size
case 256:
case 512:
case 1024:
case 2048:
case 4096:
case 8192:
case 16384:
return true;
default:
return false;
}
}
}
already_AddRefed<ScriptProcessorNode>
AudioContext::CreateScriptProcessor(uint32_t aBufferSize,
uint32_t aNumberOfInputChannels,
uint32_t aNumberOfOutputChannels,
ErrorResult& aRv)
{
if (aNumberOfInputChannels == 0 || aNumberOfOutputChannels == 0 ||
aNumberOfInputChannels > MAX_SCRIPT_PROCESSOR_CHANNELS ||
aNumberOfOutputChannels > MAX_SCRIPT_PROCESSOR_CHANNELS ||
!IsValidBufferSize(aBufferSize)) {
aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
return nullptr;
}
nsRefPtr<ScriptProcessorNode> scriptProcessor =
new ScriptProcessorNode(this, aBufferSize, aNumberOfInputChannels,
aNumberOfOutputChannels);
return scriptProcessor.forget();
}
already_AddRefed<AnalyserNode>
AudioContext::CreateAnalyser()
{

Просмотреть файл

@ -48,6 +48,7 @@ class DynamicsCompressorNode;
class GainNode;
class GlobalObject;
class PannerNode;
class ScriptProcessorNode;
class AudioContext MOZ_FINAL : public nsWrapperCache,
public EnableWebAudioCheck
@ -99,6 +100,22 @@ public:
uint32_t aLength, float aSampleRate,
ErrorResult& aRv);
already_AddRefed<ScriptProcessorNode>
CreateScriptProcessor(uint32_t aBufferSize,
uint32_t aNumberOfInputChannels,
uint32_t aNumberOfOutputChannels,
ErrorResult& aRv);
already_AddRefed<ScriptProcessorNode>
CreateJavaScriptNode(uint32_t aBufferSize,
uint32_t aNumberOfInputChannels,
uint32_t aNumberOfOutputChannels,
ErrorResult& aRv)
{
return CreateScriptProcessor(aBufferSize, aNumberOfInputChannels,
aNumberOfOutputChannels, aRv);
}
already_AddRefed<AnalyserNode>
CreateAnalyser();

Просмотреть файл

@ -0,0 +1,52 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "AudioProcessingEvent.h"
#include "mozilla/dom/AudioProcessingEventBinding.h"
namespace mozilla {
namespace dom {
NS_IMPL_CYCLE_COLLECTION_INHERITED_3(AudioProcessingEvent, nsDOMEvent,
mInputBuffer, mOutputBuffer, mNode)
NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(AudioProcessingEvent)
NS_INTERFACE_MAP_END_INHERITING(nsDOMEvent)
NS_IMPL_ADDREF_INHERITED(AudioProcessingEvent, nsDOMEvent)
NS_IMPL_RELEASE_INHERITED(AudioProcessingEvent, nsDOMEvent)
AudioProcessingEvent::AudioProcessingEvent(ScriptProcessorNode* aOwner,
nsPresContext* aPresContext,
nsEvent* aEvent)
: nsDOMEvent(aOwner, aPresContext, aEvent)
, mPlaybackTime(0.0)
, mNode(aOwner)
{
SetIsDOMBinding();
}
JSObject*
AudioProcessingEvent::WrapObject(JSContext* aCx, JSObject* aScope)
{
return AudioProcessingEventBinding::Wrap(aCx, aScope, this);
}
void
AudioProcessingEvent::LazilyCreateBuffer(nsRefPtr<AudioBuffer>& aBuffer,
uint32_t aNumberOfChannels)
{
AutoPushJSContext cx(mNode->Context()->GetJSContext());
JSAutoRequest ar(cx);
aBuffer = new AudioBuffer(mNode->Context(), mNode->BufferSize(),
mNode->Context()->SampleRate());
aBuffer->InitializeBuffers(aNumberOfChannels, cx);
}
}
}

Просмотреть файл

@ -0,0 +1,83 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef AudioProcessingEvent_h_
#define AudioProcessingEvent_h_
#include "nsDOMEvent.h"
#include "AudioBuffer.h"
#include "ScriptProcessorNode.h"
namespace mozilla {
namespace dom {
class AudioProcessingEvent : public nsDOMEvent,
public EnableWebAudioCheck
{
public:
AudioProcessingEvent(ScriptProcessorNode* aOwner,
nsPresContext *aPresContext,
nsEvent *aEvent);
NS_DECL_ISUPPORTS_INHERITED
NS_FORWARD_TO_NSDOMEVENT
NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(AudioProcessingEvent, nsDOMEvent)
virtual JSObject* WrapObject(JSContext* aCx, JSObject* aScope);
void InitEvent(AudioBuffer* aInputBuffer,
uint32_t aNumberOfInputChannels,
double aPlaybackTime)
{
InitEvent(NS_LITERAL_STRING("audioprocess"), false, false);
mInputBuffer = aInputBuffer;
mNumberOfInputChannels = aNumberOfInputChannels;
mPlaybackTime = aPlaybackTime;
}
double PlaybackTime() const
{
return mPlaybackTime;
}
AudioBuffer* InputBuffer()
{
if (!mInputBuffer) {
LazilyCreateBuffer(mInputBuffer, mNumberOfInputChannels);
}
return mInputBuffer;
}
AudioBuffer* OutputBuffer()
{
if (!mOutputBuffer) {
LazilyCreateBuffer(mOutputBuffer, mNode->NumberOfOutputChannels());
}
return mOutputBuffer;
}
bool HasOutputBuffer() const
{
return !!mOutputBuffer;
}
private:
void LazilyCreateBuffer(nsRefPtr<AudioBuffer>& aBuffer,
uint32_t aNumberOfChannels);
private:
double mPlaybackTime;
nsRefPtr<AudioBuffer> mInputBuffer;
nsRefPtr<AudioBuffer> mOutputBuffer;
nsRefPtr<ScriptProcessorNode> mNode;
uint32_t mNumberOfInputChannels;
};
}
}
#endif

Просмотреть файл

@ -24,6 +24,7 @@ CPPSRCS := \
AudioListener.cpp \
AudioNode.cpp \
AudioParam.cpp \
AudioProcessingEvent.cpp \
BiquadFilterNode.cpp \
DelayNode.cpp \
DynamicsCompressorNode.cpp \
@ -31,6 +32,7 @@ CPPSRCS := \
GainNode.cpp \
MediaBufferDecoder.cpp \
PannerNode.cpp \
ScriptProcessorNode.cpp \
ThreeDPoint.cpp \
WebAudioUtils.cpp \
$(NULL)

Просмотреть файл

@ -0,0 +1,377 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "ScriptProcessorNode.h"
#include "mozilla/dom/ScriptProcessorNodeBinding.h"
#include "AudioBuffer.h"
#include "AudioDestinationNode.h"
#include "AudioNodeEngine.h"
#include "AudioNodeStream.h"
#include "AudioProcessingEvent.h"
#include "WebAudioUtils.h"
#include "mozilla/Mutex.h"
#include "mozilla/unused.h"
#include "mozilla/PodOperations.h"
#include <deque>
namespace mozilla {
namespace dom {
NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(ScriptProcessorNode)
NS_INTERFACE_MAP_END_INHERITING(AudioNode)
NS_IMPL_ADDREF_INHERITED(ScriptProcessorNode, AudioNode)
NS_IMPL_RELEASE_INHERITED(ScriptProcessorNode, AudioNode)
// This class manages a queue of output buffers shared between
// the main thread and the Media Stream Graph thread.
class SharedBuffers
{
private:
class OutputQueue
{
public:
explicit OutputQueue(const char* aName)
: mMutex(aName)
{}
Mutex& Lock() { return mMutex; }
size_t ReadyToConsume() const
{
mMutex.AssertCurrentThreadOwns();
MOZ_ASSERT(!NS_IsMainThread());
return mBufferList.size();
}
// Produce one buffer
AudioChunk& Produce()
{
mMutex.AssertCurrentThreadOwns();
MOZ_ASSERT(NS_IsMainThread());
mBufferList.push_back(AudioChunk());
return mBufferList.back();
}
// Consumes one buffer.
AudioChunk Consume()
{
mMutex.AssertCurrentThreadOwns();
MOZ_ASSERT(!NS_IsMainThread());
MOZ_ASSERT(ReadyToConsume() > 0);
AudioChunk front = mBufferList.front();
mBufferList.pop_front();
return front;
}
private:
typedef std::deque<AudioChunk> BufferList;
// Synchronizes access to mBufferList. Note that it's the responsibility
// of the callers to perform the required locking, and we assert that every
// time we access mBufferList.
Mutex mMutex;
// The list representing the queue.
BufferList mBufferList;
};
public:
SharedBuffers()
: mOutputQueue("SharedBuffers::outputQueue")
, mDelaySoFar(TRACK_TICKS_MAX)
{
}
// main thread
void FinishProducingOutputBuffer(ThreadSharedFloatArrayBufferList* aBuffer,
uint32_t aBufferSize)
{
MOZ_ASSERT(NS_IsMainThread());
MutexAutoLock lock(mOutputQueue.Lock());
for (uint32_t offset = 0; offset < aBufferSize; offset += WEBAUDIO_BLOCK_SIZE) {
AudioChunk& chunk = mOutputQueue.Produce();
if (aBuffer) {
chunk.mDuration = WEBAUDIO_BLOCK_SIZE;
chunk.mBuffer = aBuffer;
chunk.mChannelData.SetLength(aBuffer->GetChannels());
for (uint32_t i = 0; i < aBuffer->GetChannels(); ++i) {
chunk.mChannelData[i] = aBuffer->GetData(i) + offset;
}
chunk.mVolume = 1.0f;
chunk.mBufferFormat = AUDIO_FORMAT_FLOAT32;
} else {
chunk.SetNull(WEBAUDIO_BLOCK_SIZE);
}
}
}
// graph thread
AudioChunk GetOutputBuffer()
{
MOZ_ASSERT(!NS_IsMainThread());
AudioChunk buffer;
{
MutexAutoLock lock(mOutputQueue.Lock());
if (mOutputQueue.ReadyToConsume() > 0) {
if (mDelaySoFar == TRACK_TICKS_MAX) {
mDelaySoFar = 0;
}
buffer = mOutputQueue.Consume();
} else {
// If we're out of buffers to consume, just output silence
buffer.SetNull(WEBAUDIO_BLOCK_SIZE);
if (mDelaySoFar != TRACK_TICKS_MAX) {
// Remember the delay that we just hit
mDelaySoFar += WEBAUDIO_BLOCK_SIZE;
}
}
}
return buffer;
}
TrackTicks DelaySoFar() const
{
MOZ_ASSERT(!NS_IsMainThread());
return mDelaySoFar == TRACK_TICKS_MAX ? 0 : mDelaySoFar;
}
private:
OutputQueue mOutputQueue;
// How much delay we've seen so far. This measures the amount of delay
// caused by the main thread lagging behind in producing output buffers.
// TRACK_TICKS_MAX means that we have not received our first buffer yet.
TrackTicks mDelaySoFar;
};
class ScriptProcessorNodeEngine : public AudioNodeEngine
{
public:
typedef nsAutoTArray<nsAutoArrayPtr<float>, 2> InputChannels;
ScriptProcessorNodeEngine(ScriptProcessorNode* aNode,
AudioDestinationNode* aDestination,
uint32_t aBufferSize,
uint32_t aNumberOfInputChannels)
: AudioNodeEngine(aNode)
, mSharedBuffers(aNode->GetSharedBuffers())
, mSource(nullptr)
, mDestination(static_cast<AudioNodeStream*> (aDestination->Stream()))
, mBufferSize(aBufferSize)
, mInputWriteIndex(0)
, mSeenNonSilenceInput(false)
{
mInputChannels.SetLength(aNumberOfInputChannels);
AllocateInputBlock();
}
void SetSourceStream(AudioNodeStream* aSource)
{
mSource = aSource;
}
virtual void ProduceAudioBlock(AudioNodeStream* aStream,
const AudioChunk& aInput,
AudioChunk* aOutput,
bool* aFinished) MOZ_OVERRIDE
{
// If our node is dead, just output silence
if (!mNode) {
aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
return;
}
// First, record our input buffer
for (uint32_t i = 0; i < mInputChannels.Length(); ++i) {
if (aInput.IsNull()) {
PodZero(mInputChannels[i] + mInputWriteIndex,
aInput.GetDuration());
} else {
mSeenNonSilenceInput = true;
PodCopy(mInputChannels[i] + mInputWriteIndex,
static_cast<const float*>(aInput.mChannelData[i]),
aInput.GetDuration());
}
}
mInputWriteIndex += aInput.GetDuration();
// Now, see if we have data to output
// Note that we need to do this before sending the buffer to the main
// thread so that our delay time is updated.
*aOutput = mSharedBuffers->GetOutputBuffer();
if (mInputWriteIndex >= mBufferSize) {
SendBuffersToMainThread(aStream);
mInputWriteIndex -= mBufferSize;
mSeenNonSilenceInput = false;
AllocateInputBlock();
}
}
private:
void AllocateInputBlock()
{
for (unsigned i = 0; i < mInputChannels.Length(); ++i) {
if (!mInputChannels[i]) {
mInputChannels[i] = new float[mBufferSize];
}
}
}
void SendBuffersToMainThread(AudioNodeStream* aStream)
{
MOZ_ASSERT(!NS_IsMainThread());
// we now have a full input buffer ready to be sent to the main thread.
TrackTicks playbackTick = mSource->GetCurrentPosition();
// Add the duration of the current sample
playbackTick += WEBAUDIO_BLOCK_SIZE;
// Add the delay caused by the main thread
playbackTick += mSharedBuffers->DelaySoFar();
// Compute the playback time in the coordinate system of the destination
double playbackTime =
WebAudioUtils::StreamPositionToDestinationTime(playbackTick,
mSource,
mDestination);
class Command : public nsRunnable
{
public:
Command(AudioNodeStream* aStream,
InputChannels& aInputChannels,
double aPlaybackTime,
bool aNullInput)
: mStream(aStream)
, mPlaybackTime(aPlaybackTime)
, mNullInput(aNullInput)
{
mInputChannels.SetLength(aInputChannels.Length());
if (!aNullInput) {
for (uint32_t i = 0; i < mInputChannels.Length(); ++i) {
mInputChannels[i] = aInputChannels[i].forget();
}
}
}
NS_IMETHODIMP Run()
{
// If it's not safe to run scripts right now, schedule this to run later
if (!nsContentUtils::IsSafeToRunScript()) {
nsContentUtils::AddScriptRunner(this);
return NS_OK;
}
nsRefPtr<ScriptProcessorNode> node = static_cast<ScriptProcessorNode*>(mStream->Engine()->Node());
if (!node) {
return NS_OK;
}
AutoPushJSContext cx(node->Context()->GetJSContext());
if (cx) {
JSAutoRequest ar(cx);
// Create the input buffer
nsRefPtr<AudioBuffer> inputBuffer;
if (!mNullInput) {
inputBuffer = new AudioBuffer(node->Context(),
node->BufferSize(),
node->Context()->SampleRate());
if (!inputBuffer->InitializeBuffers(mInputChannels.Length(), cx)) {
return NS_OK;
}
// Put the channel data inside it
for (uint32_t i = 0; i < mInputChannels.Length(); ++i) {
inputBuffer->SetRawChannelContents(cx, i, mInputChannels[i]);
}
}
// Ask content to produce data in the output buffer
// Note that we always avoid creating the output buffer here, and we try to
// avoid creating the input buffer as well. The AudioProcessingEvent class
// knows how to lazily create them if needed once the script tries to access
// them. Otherwise, we may be able to get away without creating them!
nsRefPtr<AudioProcessingEvent> event = new AudioProcessingEvent(node, nullptr, nullptr);
event->InitEvent(inputBuffer,
mInputChannels.Length(),
mPlaybackTime);
node->DispatchTrustedEvent(event);
// Steal the output buffers
nsRefPtr<ThreadSharedFloatArrayBufferList> output;
if (event->HasOutputBuffer()) {
uint32_t rate, length;
output = event->OutputBuffer()->GetThreadSharedChannelsForRate(cx, &rate, &length);
unused << rate;
unused << length;
}
// Append it to our output buffer queue
node->GetSharedBuffers()->FinishProducingOutputBuffer(output, node->BufferSize());
}
return NS_OK;
}
private:
nsRefPtr<AudioNodeStream> mStream;
InputChannels mInputChannels;
double mPlaybackTime;
bool mNullInput;
};
NS_DispatchToMainThread(new Command(aStream, mInputChannels,
playbackTime,
!mSeenNonSilenceInput));
}
friend class ScriptProcessorNode;
SharedBuffers* mSharedBuffers;
AudioNodeStream* mSource;
AudioNodeStream* mDestination;
InputChannels mInputChannels;
const uint32_t mBufferSize;
// The write index into the current input buffer
uint32_t mInputWriteIndex;
bool mSeenNonSilenceInput;
};
ScriptProcessorNode::ScriptProcessorNode(AudioContext* aContext,
uint32_t aBufferSize,
uint32_t aNumberOfInputChannels,
uint32_t aNumberOfOutputChannels)
: AudioNode(aContext)
, mSharedBuffers(new SharedBuffers())
, mBufferSize(aBufferSize ?
aBufferSize : // respect what the web developer requested
4096) // choose our own buffer size -- 4KB for now
, mNumberOfOutputChannels(aNumberOfOutputChannels)
{
MOZ_ASSERT(BufferSize() % WEBAUDIO_BLOCK_SIZE == 0, "Invalid buffer size");
ScriptProcessorNodeEngine* engine =
new ScriptProcessorNodeEngine(this,
aContext->Destination(),
BufferSize(),
aNumberOfInputChannels);
mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM,
aNumberOfInputChannels);
engine->SetSourceStream(static_cast<AudioNodeStream*> (mStream.get()));
}
ScriptProcessorNode::~ScriptProcessorNode()
{
DestroyMediaStream();
}
JSObject*
ScriptProcessorNode::WrapObject(JSContext* aCx, JSObject* aScope)
{
return ScriptProcessorNodeBinding::Wrap(aCx, aScope, this);
}
}
}

Просмотреть файл

@ -0,0 +1,70 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef ScriptProcessorNode_h_
#define ScriptProcessorNode_h_
#include "AudioNode.h"
#include "nsAutoPtr.h"
namespace mozilla {
class AudioNodeStream;
namespace dom {
class AudioContext;
class ScriptProcessorNodeEngine;
class SharedBuffers;
class ScriptProcessorNode : public AudioNode
{
public:
ScriptProcessorNode(AudioContext* aContext,
uint32_t aBufferSize,
uint32_t aNumberOfInputChannels,
uint32_t aNumberOfOutputChannels);
virtual ~ScriptProcessorNode();
NS_DECL_ISUPPORTS_INHERITED
IMPL_EVENT_HANDLER(audioprocess)
virtual JSObject* WrapObject(JSContext* aCx, JSObject* aScope);
virtual bool SupportsMediaStreams() const MOZ_OVERRIDE
{
return true;
}
uint32_t BufferSize() const
{
return mBufferSize;
}
SharedBuffers* GetSharedBuffers() const
{
return mSharedBuffers;
}
uint32_t NumberOfOutputChannels() const
{
return mNumberOfOutputChannels;
}
using nsDOMEventTargetHelper::DispatchTrustedEvent;
private:
nsAutoPtr<SharedBuffers> mSharedBuffers;
const uint32_t mBufferSize;
const uint32_t mNumberOfOutputChannels;
};
}
}
#endif

Просмотреть файл

@ -34,6 +34,17 @@ struct ConvertTimeToTickHelper
}
};
double
WebAudioUtils::StreamPositionToDestinationTime(TrackTicks aSourcePosition,
AudioNodeStream* aSource,
AudioNodeStream* aDestination)
{
StreamTime sourceTime = TicksToTimeRoundDown(IdealAudioRate(), aSourcePosition);
GraphTime graphTime = aSource->StreamTimeToGraphTime(sourceTime);
StreamTime destinationTime = aDestination->GraphTimeToStreamTimeOptimistic(graphTime);
return MediaTimeToSeconds(destinationTime);
}
void
WebAudioUtils::ConvertAudioParamToTicks(AudioParamTimeline& aParam,
AudioNodeStream* aSource,

Просмотреть файл

@ -9,6 +9,7 @@
#include <cmath>
#include "AudioParamTimeline.h"
#include "MediaSegment.h"
namespace mozilla {
@ -73,6 +74,14 @@ struct WebAudioUtils {
aDouble = 0.0;
}
}
/**
* Convert a stream position into the time coordinate of the destination
* stream.
*/
static double StreamPositionToDestinationTime(TrackTicks aSourcePosition,
AudioNodeStream* aSource,
AudioNodeStream* aDestination);
};
}

Просмотреть файл

@ -26,11 +26,13 @@ EXPORTS.mozilla.dom += [
'AudioListener.h',
'AudioNode.h',
'AudioParam.h',
'AudioProcessingEvent.h',
'BiquadFilterNode.h',
'DelayNode.h',
'DynamicsCompressorNode.h',
'EnableWebAudioCheck.h',
'GainNode.h',
'PannerNode.h',
'ScriptProcessorNode.h',
]

Просмотреть файл

@ -122,6 +122,10 @@ DOMInterfaces = {
'nativeOwnership': 'refcounted'
},
'AudioProcessingEvent' : {
'resultNotAddRefed': [ 'inputBuffer', 'outputBuffer' ],
},
'BeforeUnloadEvent': {
'nativeType': 'nsDOMBeforeUnloadEvent',
},

Просмотреть файл

@ -35,6 +35,11 @@ interface AudioContext {
[Creator]
AudioBufferSourceNode createBufferSource();
[Creator, Throws]
ScriptProcessorNode createScriptProcessor(optional unsigned long bufferSize = 0,
optional unsigned long numberOfInputChannels = 2,
optional unsigned long numberOfOutputChannels = 2);
[Creator]
AnalyserNode createAnalyser();
[Creator]
@ -66,10 +71,10 @@ partial interface AudioContext {
DelayNode createDelayNode(optional double maxDelayTime = 1);
// Same as createScriptProcessor()
// [Creator]
// ScriptProcessorNode createJavaScriptNode(unsigned long bufferSize,
// optional unsigned long numberOfInputChannels = 2,
// optional unsigned long numberOfOutputChannels = 2);
[Creator, Throws]
ScriptProcessorNode createJavaScriptNode(optional unsigned long bufferSize = 0,
optional unsigned long numberOfInputChannels = 2,
optional unsigned long numberOfOutputChannels = 2);
};

Просмотреть файл

@ -0,0 +1,21 @@
/* -*- Mode: IDL; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/.
*
* The origin of this IDL file is
* https://dvcs.w3.org/hg/audio/raw-file/tip/webaudio/specification.html
*
* Copyright © 2012 W3C® (MIT, ERCIM, Keio), All Rights Reserved. W3C
* liability, trademark and document use rules apply.
*/
[PrefControlled]
interface AudioProcessingEvent : Event {
readonly attribute double playbackTime;
readonly attribute AudioBuffer inputBuffer;
readonly attribute AudioBuffer outputBuffer;
};

Просмотреть файл

@ -0,0 +1,22 @@
/* -*- Mode: IDL; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/.
*
* The origin of this IDL file is
* https://dvcs.w3.org/hg/audio/raw-file/tip/webaudio/specification.html
*
* Copyright © 2012 W3C® (MIT, ERCIM, Keio), All Rights Reserved. W3C
* liability, trademark and document use rules apply.
*/
[PrefControlled]
interface ScriptProcessorNode : AudioNode {
[SetterThrows]
attribute EventHandler onaudioprocess;
readonly attribute long bufferSize;
};

Просмотреть файл

@ -13,6 +13,7 @@ webidl_files = \
AnimationEvent.webidl \
ArchiveReader.webidl \
ArchiveRequest.webidl \
Attr.webidl \
AudioBuffer.webidl \
AudioBufferSourceNode.webidl \
AudioContext.webidl \
@ -20,7 +21,7 @@ webidl_files = \
AudioListener.webidl \
AudioNode.webidl \
AudioParam.webidl \
Attr.webidl \
AudioProcessingEvent.webidl \
BatteryManager.webidl \
BeforeUnloadEvent.webidl \
BiquadFilterNode.webidl \
@ -183,6 +184,7 @@ webidl_files = \
RGBColor.webidl \
RTCConfiguration.webidl \
Screen.webidl \
ScriptProcessorNode.webidl \
ScrollAreaEvent.webidl \
SimpleGestureEvent.webidl \
StyleSheet.webidl \

Просмотреть файл

@ -399,6 +399,9 @@ enum nsEventStructType {
#define NS_SMIL_END (NS_SMIL_TIME_EVENT_START + 1)
#define NS_SMIL_REPEAT (NS_SMIL_TIME_EVENT_START + 2)
#define NS_WEBAUDIO_EVENT_START 4350
#define NS_AUDIO_PROCESS (NS_WEBAUDIO_EVENT_START)
// script notification events
#define NS_NOTIFYSCRIPT_START 4500
#define NS_BEFORE_SCRIPT_EXECUTE (NS_NOTIFYSCRIPT_START)