diff --git a/content/media/webaudio/AudioProcessingEvent.cpp b/content/media/webaudio/AudioProcessingEvent.cpp index 4cb44d9382e8..01962fd09b48 100644 --- a/content/media/webaudio/AudioProcessingEvent.cpp +++ b/content/media/webaudio/AudioProcessingEvent.cpp @@ -37,23 +37,30 @@ AudioProcessingEvent::WrapObject(JSContext* aCx) return AudioProcessingEventBinding::Wrap(aCx, this); } -void -AudioProcessingEvent::LazilyCreateBuffer(nsRefPtr& aBuffer, - uint32_t aNumberOfChannels) +already_AddRefed +AudioProcessingEvent::LazilyCreateBuffer(uint32_t aNumberOfChannels, + ErrorResult& aRv) { // We need the global for the context so that we can enter its compartment. JSObject* global = mNode->Context()->GetGlobalJSObject(); if (NS_WARN_IF(!global)) { - return; + aRv.Throw(NS_ERROR_UNEXPECTED); + return nullptr; } AutoJSAPI jsapi; JSContext* cx = jsapi.cx(); JSAutoCompartment ac(cx, global); - aBuffer = new AudioBuffer(mNode->Context(), mNode->BufferSize(), - mNode->Context()->SampleRate()); - aBuffer->InitializeBuffers(aNumberOfChannels, cx); + nsRefPtr buffer = + new AudioBuffer(mNode->Context(), mNode->BufferSize(), + mNode->Context()->SampleRate()); + if (!buffer->InitializeBuffers(aNumberOfChannels, cx)) { + aRv.Throw(NS_ERROR_OUT_OF_MEMORY); + return nullptr; + } + + return buffer.forget(); } } diff --git a/content/media/webaudio/AudioProcessingEvent.h b/content/media/webaudio/AudioProcessingEvent.h index e32e2ebd54c8..49a333cb19ae 100644 --- a/content/media/webaudio/AudioProcessingEvent.h +++ b/content/media/webaudio/AudioProcessingEvent.h @@ -42,18 +42,18 @@ public: return mPlaybackTime; } - AudioBuffer* InputBuffer() + AudioBuffer* GetInputBuffer(ErrorResult& aRv) { if (!mInputBuffer) { - LazilyCreateBuffer(mInputBuffer, mNumberOfInputChannels); + mInputBuffer = LazilyCreateBuffer(mNumberOfInputChannels, aRv); } return mInputBuffer; } - AudioBuffer* OutputBuffer() + AudioBuffer* GetOutputBuffer(ErrorResult& aRv) { if (!mOutputBuffer) { - LazilyCreateBuffer(mOutputBuffer, mNode->NumberOfOutputChannels()); + mOutputBuffer = LazilyCreateBuffer(mNode->NumberOfOutputChannels(), aRv); } return mOutputBuffer; } @@ -64,8 +64,8 @@ public: } private: - void LazilyCreateBuffer(nsRefPtr& aBuffer, - uint32_t aNumberOfChannels); + already_AddRefed + LazilyCreateBuffer(uint32_t aNumberOfChannels, ErrorResult& rv); private: double mPlaybackTime; diff --git a/content/media/webaudio/ScriptProcessorNode.cpp b/content/media/webaudio/ScriptProcessorNode.cpp index 57b6d447ee58..ea441c7e4344 100644 --- a/content/media/webaudio/ScriptProcessorNode.cpp +++ b/content/media/webaudio/ScriptProcessorNode.cpp @@ -438,10 +438,18 @@ private: mPlaybackTime); node->DispatchTrustedEvent(event); - // Steal the output buffers + // Steal the output buffers if they have been set. + // Don't create a buffer if it hasn't been used to return output; + // FinishProducingOutputBuffer() will optimize output = null. + // GetThreadSharedChannelsForRate() may also return null after OOM. nsRefPtr output; if (event->HasOutputBuffer()) { - output = event->OutputBuffer()->GetThreadSharedChannelsForRate(cx); + ErrorResult rv; + AudioBuffer* buffer = event->GetOutputBuffer(rv); + // HasOutputBuffer() returning true means that GetOutputBuffer() + // will not fail. + MOZ_ASSERT(!rv.Failed()); + output = buffer->GetThreadSharedChannelsForRate(cx); } // Append it to our output buffer queue diff --git a/dom/webidl/AudioProcessingEvent.webidl b/dom/webidl/AudioProcessingEvent.webidl index 9d061499e091..17ee321fb193 100644 --- a/dom/webidl/AudioProcessingEvent.webidl +++ b/dom/webidl/AudioProcessingEvent.webidl @@ -12,9 +12,12 @@ interface AudioProcessingEvent : Event { - readonly attribute double playbackTime; - readonly attribute AudioBuffer inputBuffer; - readonly attribute AudioBuffer outputBuffer; + readonly attribute double playbackTime; + + [Throws] + readonly attribute AudioBuffer inputBuffer; + [Throws] + readonly attribute AudioBuffer outputBuffer; };