2013-06-11 00:07:55 +04:00
|
|
|
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
|
|
|
/* vim:set ts=2 sw=2 sts=2 et cindent: */
|
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
|
|
|
|
|
|
|
#include "ConvolverNode.h"
|
|
|
|
#include "mozilla/dom/ConvolverNodeBinding.h"
|
2016-06-07 23:10:18 +03:00
|
|
|
#include "nsAutoPtr.h"
|
2016-04-13 22:31:50 +03:00
|
|
|
#include "AlignmentUtils.h"
|
2013-06-11 00:07:55 +04:00
|
|
|
#include "AudioNodeEngine.h"
|
|
|
|
#include "AudioNodeStream.h"
|
2013-06-11 00:09:12 +04:00
|
|
|
#include "blink/Reverb.h"
|
2013-08-15 23:44:14 +04:00
|
|
|
#include "PlayingRefChangeHandler.h"
|
2013-06-11 00:07:55 +04:00
|
|
|
|
|
|
|
namespace mozilla {
|
|
|
|
namespace dom {
|
|
|
|
|
2014-04-25 20:49:00 +04:00
|
|
|
NS_IMPL_CYCLE_COLLECTION_INHERITED(ConvolverNode, AudioNode, mBuffer)
|
2013-06-11 00:07:55 +04:00
|
|
|
|
2017-08-30 02:02:48 +03:00
|
|
|
NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(ConvolverNode)
|
2013-06-11 00:07:55 +04:00
|
|
|
NS_INTERFACE_MAP_END_INHERITING(AudioNode)
|
|
|
|
|
|
|
|
NS_IMPL_ADDREF_INHERITED(ConvolverNode, AudioNode)
|
|
|
|
NS_IMPL_RELEASE_INHERITED(ConvolverNode, AudioNode)
|
|
|
|
|
2015-04-28 09:42:00 +03:00
|
|
|
class ConvolverNodeEngine final : public AudioNodeEngine {
|
2013-10-25 03:12:12 +04:00
|
|
|
typedef PlayingRefChangeHandler PlayingRefChanged;
|
2018-11-19 16:25:37 +03:00
|
|
|
|
2013-06-11 00:07:55 +04:00
|
|
|
public:
|
2018-10-09 16:19:20 +03:00
|
|
|
ConvolverNodeEngine(AudioNode* aNode, bool aNormalize, uint64_t aWindowID)
|
2013-06-11 00:07:55 +04:00
|
|
|
: AudioNodeEngine(aNode),
|
2018-10-09 16:19:20 +03:00
|
|
|
mWindowID(aWindowID),
|
2013-06-11 00:09:12 +04:00
|
|
|
mUseBackgroundThreads(!aNode->Context()->IsOffline()),
|
2013-06-11 00:07:55 +04:00
|
|
|
mNormalize(aNormalize) {}
|
|
|
|
|
2018-08-06 12:24:15 +03:00
|
|
|
// Indicates how the right output channel is generated.
|
|
|
|
enum class RightConvolverMode {
|
|
|
|
// A right convolver is always used when there is more than one impulse
|
|
|
|
// response channel.
|
|
|
|
Always,
|
|
|
|
// With a single response channel, the mode may be either Direct or
|
|
|
|
// Difference. The decision on which to use is made when stereo input is
|
|
|
|
// received. Once the right convolver is in use, convolver state is
|
|
|
|
// suitable only for the selected mode, and so the mode cannot change
|
|
|
|
// until the right convolver contains only silent history.
|
|
|
|
//
|
|
|
|
// With Direct mode, each convolver processes a corresponding channel.
|
|
|
|
// This mode is selected when input is initially stereo or
|
|
|
|
// channelInterpretation is "discrete" at the time or starting the right
|
|
|
|
// convolver when input changes from non-silent mono to stereo.
|
|
|
|
Direct,
|
|
|
|
// Difference mode is selected if channelInterpretation is "speakers" at
|
|
|
|
// the time starting the right convolver when the input changes from mono
|
|
|
|
// to stereo.
|
|
|
|
//
|
|
|
|
// When non-silent input is initially mono, with a single response
|
|
|
|
// channel, the right output channel is not produced until input becomes
|
|
|
|
// stereo. Only a single convolver is used for mono processing. When
|
|
|
|
// stereo input arrives after mono input, output must be as if the mono
|
|
|
|
// signal remaining in the left convolver is up-mixed, but the right
|
|
|
|
// convolver has not been initialized with the history of the mono input.
|
|
|
|
// Copying the state of the left convolver into the right convolver is not
|
|
|
|
// desirable, because there is considerable state to copy, and the
|
|
|
|
// different convolvers are intended to process out of phase, which means
|
|
|
|
// that state from one convolver would not directly map to state in
|
|
|
|
// another convolver.
|
|
|
|
//
|
|
|
|
// Instead the distributive property of convolution is used to generate
|
|
|
|
// the right output channel using information in the left output channel.
|
|
|
|
// Using l and r to denote the left and right channel input signals, g the
|
|
|
|
// impulse response, and * convolution, the convolution of the right
|
|
|
|
// channel can be given by
|
|
|
|
//
|
|
|
|
// r * g = (l + (r - l)) * g
|
|
|
|
// = l * g + (r - l) * g
|
|
|
|
//
|
|
|
|
// The left convolver continues to process the left channel l to produce
|
|
|
|
// l * g. The right convolver processes the difference of input channel
|
|
|
|
// signals r - l to produce (r - l) * g. The outputs of the two
|
|
|
|
// convolvers are added to generate the right channel output r * g.
|
|
|
|
//
|
|
|
|
// The benefit of doing this is that the history of the r - l input for a
|
|
|
|
// "speakers" up-mixed mono signal is zero, and so an empty convolver
|
|
|
|
// already has exactly the right history for mixing the previous mono
|
|
|
|
// signal with the new stereo signal.
|
|
|
|
Difference
|
|
|
|
};
|
|
|
|
|
2013-06-11 00:07:55 +04:00
|
|
|
enum Parameters { SAMPLE_RATE, NORMALIZE };
|
2016-01-18 06:22:51 +03:00
|
|
|
void SetInt32Parameter(uint32_t aIndex, int32_t aParam) override {
|
2013-06-11 00:07:55 +04:00
|
|
|
switch (aIndex) {
|
|
|
|
case NORMALIZE:
|
|
|
|
mNormalize = !!aParam;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
NS_ERROR("Bad ConvolverNodeEngine Int32Parameter");
|
|
|
|
}
|
|
|
|
}
|
2016-01-18 06:22:51 +03:00
|
|
|
void SetDoubleParameter(uint32_t aIndex, double aParam) override {
|
2013-06-11 00:09:12 +04:00
|
|
|
switch (aIndex) {
|
|
|
|
case SAMPLE_RATE:
|
|
|
|
mSampleRate = aParam;
|
2017-08-08 07:01:57 +03:00
|
|
|
// The buffer is passed after the sample rate.
|
|
|
|
// mReverb will be set using this sample rate when the buffer is
|
|
|
|
// received.
|
2013-06-11 00:09:12 +04:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
NS_ERROR("Bad ConvolverNodeEngine DoubleParameter");
|
|
|
|
}
|
|
|
|
}
|
2017-08-09 07:39:40 +03:00
|
|
|
void SetBuffer(AudioChunk&& aBuffer) override {
|
2013-06-11 00:09:12 +04:00
|
|
|
// Note about empirical tuning (this is copied from Blink)
|
|
|
|
// The maximum FFT size affects reverb performance and accuracy.
|
|
|
|
// If the reverb is single-threaded and processes entirely in the real-time
|
|
|
|
// audio thread, it's important not to make this too high. In this case
|
|
|
|
// 8192 is a good value. But, the Reverb object is multi-threaded, so we
|
|
|
|
// want this as high as possible without losing too much accuracy. Very
|
|
|
|
// large FFTs will have worse phase errors. Given these constraints 32768 is
|
|
|
|
// a good compromise.
|
|
|
|
const size_t MaxFFTSize = 32768;
|
|
|
|
|
2018-08-06 12:24:15 +03:00
|
|
|
// Reset.
|
|
|
|
mRemainingLeftOutput = INT32_MIN;
|
|
|
|
mRemainingRightOutput = 0;
|
|
|
|
mRemainingRightHistory = 0;
|
2017-08-09 07:39:40 +03:00
|
|
|
|
|
|
|
if (aBuffer.IsNull() || !mSampleRate) {
|
2013-06-11 00:09:12 +04:00
|
|
|
mReverb = nullptr;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-08-06 12:24:15 +03:00
|
|
|
// Assume for now that convolution of channel difference is not required.
|
|
|
|
// Direct may change to Difference during processing.
|
|
|
|
mRightConvolverMode = aBuffer.ChannelCount() == 1
|
|
|
|
? RightConvolverMode::Direct
|
|
|
|
: RightConvolverMode::Always;
|
|
|
|
|
2018-10-09 16:19:20 +03:00
|
|
|
bool allocationFailure = false;
|
2017-08-09 07:39:40 +03:00
|
|
|
mReverb = new WebCore::Reverb(aBuffer, MaxFFTSize, mUseBackgroundThreads,
|
2018-10-09 16:19:20 +03:00
|
|
|
mNormalize, mSampleRate, &allocationFailure);
|
|
|
|
if (allocationFailure) {
|
|
|
|
// If the allocation failed, this AudioNodeEngine is going to output
|
|
|
|
// silence. This is signaled to developers in the console.
|
|
|
|
mReverb = nullptr;
|
|
|
|
WebAudioUtils::LogToDeveloperConsole(mWindowID,
|
|
|
|
"ConvolverNodeAllocationError");
|
|
|
|
}
|
2013-06-11 00:07:55 +04:00
|
|
|
}
|
|
|
|
|
2018-08-06 12:24:15 +03:00
|
|
|
void AllocateReverbInput(const AudioBlock& aInput,
|
|
|
|
uint32_t aTotalChannelCount) {
|
|
|
|
uint32_t inputChannelCount = aInput.ChannelCount();
|
|
|
|
MOZ_ASSERT(inputChannelCount <= aTotalChannelCount);
|
|
|
|
mReverbInput.AllocateChannels(aTotalChannelCount);
|
|
|
|
// Pre-multiply the input's volume
|
|
|
|
for (uint32_t i = 0; i < inputChannelCount; ++i) {
|
|
|
|
const float* src = static_cast<const float*>(aInput.mChannelData[i]);
|
|
|
|
float* dest = mReverbInput.ChannelFloatsForWrite(i);
|
|
|
|
AudioBlockCopyChannelWithScale(src, aInput.mVolume, dest);
|
|
|
|
}
|
|
|
|
// Fill remaining channels with silence
|
|
|
|
for (uint32_t i = inputChannelCount; i < aTotalChannelCount; ++i) {
|
|
|
|
float* dest = mReverbInput.ChannelFloatsForWrite(i);
|
|
|
|
std::fill_n(dest, WEBAUDIO_BLOCK_SIZE, 0.0f);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-18 06:22:51 +03:00
|
|
|
void ProcessBlock(AudioNodeStream* aStream, GraphTime aFrom,
|
|
|
|
const AudioBlock& aInput, AudioBlock* aOutput,
|
2018-07-16 10:22:15 +03:00
|
|
|
bool* aFinished) override;
|
2013-06-11 00:07:55 +04:00
|
|
|
|
2018-08-06 12:24:15 +03:00
|
|
|
bool IsActive() const override { return mRemainingLeftOutput != INT32_MIN; }
|
2015-09-08 23:54:03 +03:00
|
|
|
|
2016-01-18 06:22:51 +03:00
|
|
|
size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override {
|
2014-04-13 22:08:10 +04:00
|
|
|
size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf);
|
|
|
|
|
2018-07-16 10:33:35 +03:00
|
|
|
amount += mReverbInput.SizeOfExcludingThis(aMallocSizeOf, false);
|
|
|
|
|
2014-04-13 22:08:10 +04:00
|
|
|
if (mReverb) {
|
|
|
|
amount += mReverb->sizeOfIncludingThis(aMallocSizeOf);
|
|
|
|
}
|
|
|
|
|
|
|
|
return amount;
|
|
|
|
}
|
|
|
|
|
2016-01-18 06:22:51 +03:00
|
|
|
size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override {
|
2014-04-13 22:08:10 +04:00
|
|
|
return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
|
|
|
|
}
|
|
|
|
|
2013-06-11 00:07:55 +04:00
|
|
|
private:
|
2018-07-16 10:33:35 +03:00
|
|
|
// Keeping mReverbInput across process calls avoids unnecessary reallocation.
|
|
|
|
AudioBlock mReverbInput;
|
2013-06-11 00:09:12 +04:00
|
|
|
nsAutoPtr<WebCore::Reverb> mReverb;
|
2018-10-09 16:19:20 +03:00
|
|
|
uint64_t mWindowID;
|
2018-08-06 12:24:15 +03:00
|
|
|
// Tracks samples of the tail remaining to be output. INT32_MIN is a
|
|
|
|
// special value to indicate that the end of any previous tail has been
|
|
|
|
// handled.
|
|
|
|
int32_t mRemainingLeftOutput = INT32_MIN;
|
|
|
|
// mRemainingRightOutput and mRemainingRightHistory are only used when
|
|
|
|
// mRightOutputMode != Always. There is no special handling required at the
|
|
|
|
// end of tail times and so INT32_MIN is not used.
|
|
|
|
// mRemainingRightOutput tracks how much longer this node needs to continue
|
|
|
|
// to produce a right output channel.
|
|
|
|
int32_t mRemainingRightOutput = 0;
|
|
|
|
// mRemainingRightHistory tracks how much silent input would be required to
|
|
|
|
// drain the right convolver, which may sometimes be longer than the period
|
|
|
|
// a right output channel is required.
|
|
|
|
int32_t mRemainingRightHistory = 0;
|
|
|
|
float mSampleRate = 0.0f;
|
|
|
|
RightConvolverMode mRightConvolverMode = RightConvolverMode::Always;
|
2013-06-11 00:09:12 +04:00
|
|
|
bool mUseBackgroundThreads;
|
2013-06-11 00:07:55 +04:00
|
|
|
bool mNormalize;
|
|
|
|
};
|
|
|
|
|
2018-08-06 12:24:15 +03:00
|
|
|
static void AddScaledLeftToRight(AudioBlock* aBlock, float aScale) {
|
|
|
|
const float* left = static_cast<const float*>(aBlock->mChannelData[0]);
|
|
|
|
float* right = aBlock->ChannelFloatsForWrite(1);
|
|
|
|
AudioBlockAddChannelWithScale(left, aScale, right);
|
|
|
|
}
|
|
|
|
|
2018-07-16 10:22:15 +03:00
|
|
|
void ConvolverNodeEngine::ProcessBlock(AudioNodeStream* aStream,
|
|
|
|
GraphTime aFrom,
|
|
|
|
const AudioBlock& aInput,
|
|
|
|
AudioBlock* aOutput, bool* aFinished) {
|
|
|
|
if (!mReverb) {
|
|
|
|
aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-08-06 12:24:15 +03:00
|
|
|
uint32_t inputChannelCount = aInput.ChannelCount();
|
2018-07-16 10:22:15 +03:00
|
|
|
if (aInput.IsNull()) {
|
2018-08-06 12:24:15 +03:00
|
|
|
if (mRemainingLeftOutput > 0) {
|
|
|
|
mRemainingLeftOutput -= WEBAUDIO_BLOCK_SIZE;
|
|
|
|
AllocateReverbInput(aInput, 1); // floats for silence
|
2018-07-16 10:22:15 +03:00
|
|
|
} else {
|
2018-08-06 12:24:15 +03:00
|
|
|
if (mRemainingLeftOutput != INT32_MIN) {
|
|
|
|
mRemainingLeftOutput = INT32_MIN;
|
|
|
|
MOZ_ASSERT(mRemainingRightOutput <= 0);
|
|
|
|
MOZ_ASSERT(mRemainingRightHistory <= 0);
|
2018-07-16 10:22:15 +03:00
|
|
|
aStream->ScheduleCheckForInactive();
|
|
|
|
RefPtr<PlayingRefChanged> refchanged =
|
|
|
|
new PlayingRefChanged(aStream, PlayingRefChanged::RELEASE);
|
2018-12-19 07:34:10 +03:00
|
|
|
aStream->Graph()->DispatchToMainThreadStableState(refchanged.forget());
|
2018-07-16 10:22:15 +03:00
|
|
|
}
|
|
|
|
aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
} else {
|
2018-08-06 12:24:15 +03:00
|
|
|
if (mRemainingLeftOutput <= 0) {
|
2018-07-16 10:22:15 +03:00
|
|
|
RefPtr<PlayingRefChanged> refchanged =
|
|
|
|
new PlayingRefChanged(aStream, PlayingRefChanged::ADDREF);
|
2018-12-19 07:34:10 +03:00
|
|
|
aStream->Graph()->DispatchToMainThreadStableState(refchanged.forget());
|
2018-07-16 10:22:15 +03:00
|
|
|
}
|
2018-08-06 12:24:15 +03:00
|
|
|
|
|
|
|
// Use mVolume as a flag to detect whether AllocateReverbInput() gets
|
|
|
|
// called.
|
|
|
|
mReverbInput.mVolume = 0.0f;
|
|
|
|
|
|
|
|
// Special handling of input channel count changes is used when there is
|
|
|
|
// only a single impulse response channel. See RightConvolverMode.
|
|
|
|
if (mRightConvolverMode != RightConvolverMode::Always) {
|
|
|
|
ChannelInterpretation channelInterpretation =
|
|
|
|
aStream->GetChannelInterpretation();
|
|
|
|
if (inputChannelCount == 2) {
|
|
|
|
if (mRemainingRightHistory <= 0) {
|
|
|
|
// Will start the second convolver. Choose to convolve the right
|
|
|
|
// channel directly if there is no left tail to up-mix or up-mixing
|
|
|
|
// is "discrete".
|
|
|
|
mRightConvolverMode =
|
|
|
|
(mRemainingLeftOutput <= 0 ||
|
|
|
|
channelInterpretation == ChannelInterpretation::Discrete)
|
|
|
|
? RightConvolverMode::Direct
|
|
|
|
: RightConvolverMode::Difference;
|
|
|
|
}
|
|
|
|
// The extra WEBAUDIO_BLOCK_SIZE is subtracted below.
|
|
|
|
mRemainingRightOutput =
|
|
|
|
mReverb->impulseResponseLength() + WEBAUDIO_BLOCK_SIZE;
|
|
|
|
mRemainingRightHistory = mRemainingRightOutput;
|
|
|
|
if (mRightConvolverMode == RightConvolverMode::Difference) {
|
|
|
|
AllocateReverbInput(aInput, 2);
|
|
|
|
// Subtract left from right.
|
|
|
|
AddScaledLeftToRight(&mReverbInput, -1.0f);
|
|
|
|
}
|
|
|
|
} else if (mRemainingRightHistory > 0) {
|
|
|
|
// There is one channel of input, but a second convolver also
|
|
|
|
// requires input. Up-mix appropriately for the second convolver.
|
|
|
|
if ((mRightConvolverMode == RightConvolverMode::Difference) ^
|
|
|
|
(channelInterpretation == ChannelInterpretation::Discrete)) {
|
|
|
|
MOZ_ASSERT(
|
|
|
|
(mRightConvolverMode == RightConvolverMode::Difference &&
|
|
|
|
channelInterpretation == ChannelInterpretation::Speakers) ||
|
|
|
|
(mRightConvolverMode == RightConvolverMode::Direct &&
|
|
|
|
channelInterpretation == ChannelInterpretation::Discrete));
|
|
|
|
// The state is one of the following combinations:
|
|
|
|
// 1) Difference and speakers.
|
|
|
|
// Up-mixing gives r = l.
|
|
|
|
// The input to the second convolver is r - l.
|
|
|
|
// 2) Direct and discrete.
|
|
|
|
// Up-mixing gives r = 0.
|
|
|
|
// The input to the second convolver is r.
|
|
|
|
//
|
|
|
|
// In each case the input for the second convolver is silence, which
|
|
|
|
// will drain the convolver.
|
|
|
|
AllocateReverbInput(aInput, 2);
|
|
|
|
} else {
|
|
|
|
if (channelInterpretation == ChannelInterpretation::Discrete) {
|
|
|
|
MOZ_ASSERT(mRightConvolverMode == RightConvolverMode::Difference);
|
|
|
|
// channelInterpretation has changed since the second convolver
|
|
|
|
// was added. "discrete" up-mixing of input would produce a
|
|
|
|
// silent right channel r = 0, but the second convolver needs
|
|
|
|
// r - l for RightConvolverMode::Difference.
|
|
|
|
AllocateReverbInput(aInput, 2);
|
|
|
|
AddScaledLeftToRight(&mReverbInput, -1.0f);
|
|
|
|
} else {
|
|
|
|
MOZ_ASSERT(channelInterpretation ==
|
|
|
|
ChannelInterpretation::Speakers);
|
|
|
|
MOZ_ASSERT(mRightConvolverMode == RightConvolverMode::Direct);
|
|
|
|
// The Reverb will essentially up-mix the single input channel by
|
|
|
|
// feeding it into both convolvers.
|
|
|
|
}
|
|
|
|
// The second convolver does not have silent input, and so it will
|
|
|
|
// not drain. It will need to continue processing up-mixed input
|
|
|
|
// because the next input block may be stereo, which would be mixed
|
|
|
|
// with the signal remaining in the convolvers.
|
|
|
|
// The extra WEBAUDIO_BLOCK_SIZE is subtracted below.
|
|
|
|
mRemainingRightHistory =
|
|
|
|
mReverb->impulseResponseLength() + WEBAUDIO_BLOCK_SIZE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mReverbInput.mVolume == 0.0f) { // not yet set
|
|
|
|
if (aInput.mVolume != 1.0f) {
|
|
|
|
AllocateReverbInput(aInput, inputChannelCount); // pre-multiply
|
|
|
|
} else {
|
|
|
|
mReverbInput = aInput;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mRemainingLeftOutput = mReverb->impulseResponseLength();
|
|
|
|
MOZ_ASSERT(mRemainingLeftOutput > 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
// "The ConvolverNode produces a mono output only in the single case where
|
|
|
|
// there is a single input channel and a single-channel buffer."
|
|
|
|
uint32_t outputChannelCount = 2;
|
|
|
|
uint32_t reverbOutputChannelCount = 2;
|
|
|
|
if (mRightConvolverMode != RightConvolverMode::Always) {
|
|
|
|
// When the input changes from stereo to mono, the output continues to be
|
|
|
|
// stereo for the length of the tail time, during which the two channels
|
|
|
|
// may differ.
|
|
|
|
if (mRemainingRightOutput > 0) {
|
|
|
|
MOZ_ASSERT(mRemainingRightHistory > 0);
|
|
|
|
mRemainingRightOutput -= WEBAUDIO_BLOCK_SIZE;
|
|
|
|
} else {
|
|
|
|
outputChannelCount = 1;
|
|
|
|
}
|
|
|
|
// The second convolver keeps processing until it drains.
|
|
|
|
if (mRemainingRightHistory > 0) {
|
|
|
|
mRemainingRightHistory -= WEBAUDIO_BLOCK_SIZE;
|
|
|
|
} else {
|
|
|
|
reverbOutputChannelCount = 1;
|
|
|
|
}
|
2018-07-16 10:22:15 +03:00
|
|
|
}
|
2018-08-06 12:24:15 +03:00
|
|
|
|
|
|
|
// If there are two convolvers, then they each need an output buffer, even
|
|
|
|
// if the second convolver is only processing to keep history of up-mixed
|
|
|
|
// input.
|
|
|
|
aOutput->AllocateChannels(reverbOutputChannelCount);
|
2018-07-16 10:22:15 +03:00
|
|
|
|
2018-07-16 10:33:35 +03:00
|
|
|
mReverb->process(&mReverbInput, aOutput);
|
2018-08-06 12:24:15 +03:00
|
|
|
|
|
|
|
if (mRightConvolverMode == RightConvolverMode::Difference &&
|
|
|
|
outputChannelCount == 2) {
|
|
|
|
// Add left to right.
|
|
|
|
AddScaledLeftToRight(aOutput, 1.0f);
|
|
|
|
} else {
|
|
|
|
// Trim if outputChannelCount < reverbOutputChannelCount
|
|
|
|
aOutput->mChannelData.TruncateLength(outputChannelCount);
|
|
|
|
}
|
2018-07-16 10:22:15 +03:00
|
|
|
}
|
|
|
|
|
2013-06-11 00:07:55 +04:00
|
|
|
ConvolverNode::ConvolverNode(AudioContext* aContext)
|
|
|
|
: AudioNode(aContext, 2, ChannelCountMode::Clamped_max,
|
|
|
|
ChannelInterpretation::Speakers),
|
|
|
|
mNormalize(true) {
|
2019-04-05 14:38:06 +03:00
|
|
|
uint64_t windowID;
|
|
|
|
if (aContext->GetParentObject()) {
|
|
|
|
windowID = aContext->GetParentObject()->WindowID();
|
|
|
|
} else {
|
|
|
|
// This is used to send a message to the developer console, but the page is
|
|
|
|
// being closed so it doesn't matter too much.
|
|
|
|
windowID = 0;
|
|
|
|
}
|
2018-10-09 16:19:20 +03:00
|
|
|
ConvolverNodeEngine* engine =
|
|
|
|
new ConvolverNodeEngine(this, mNormalize, windowID);
|
2015-09-08 16:22:16 +03:00
|
|
|
mStream = AudioNodeStream::Create(
|
2016-09-05 18:25:41 +03:00
|
|
|
aContext, engine, AudioNodeStream::NO_STREAM_FLAGS, aContext->Graph());
|
2013-06-11 00:07:55 +04:00
|
|
|
}
|
|
|
|
|
2019-02-26 01:05:29 +03:00
|
|
|
/* static */
|
|
|
|
already_AddRefed<ConvolverNode> ConvolverNode::Create(
|
2016-12-15 21:24:42 +03:00
|
|
|
JSContext* aCx, AudioContext& aAudioContext,
|
|
|
|
const ConvolverOptions& aOptions, ErrorResult& aRv) {
|
|
|
|
RefPtr<ConvolverNode> audioNode = new ConvolverNode(&aAudioContext);
|
|
|
|
|
|
|
|
audioNode->Initialize(aOptions, aRv);
|
|
|
|
if (NS_WARN_IF(aRv.Failed())) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
// This must be done before setting the buffer.
|
|
|
|
audioNode->SetNormalize(!aOptions.mDisableNormalization);
|
|
|
|
|
|
|
|
if (aOptions.mBuffer.WasPassed()) {
|
|
|
|
MOZ_ASSERT(aCx);
|
|
|
|
audioNode->SetBuffer(aCx, aOptions.mBuffer.Value(), aRv);
|
|
|
|
if (NS_WARN_IF(aRv.Failed())) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return audioNode.forget();
|
2014-07-09 01:23:17 +04:00
|
|
|
}
|
|
|
|
|
2014-04-13 22:08:10 +04:00
|
|
|
size_t ConvolverNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
|
|
|
|
size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf);
|
|
|
|
if (mBuffer) {
|
|
|
|
// NB: mBuffer might be shared with the associated engine, by convention
|
|
|
|
// the AudioNode will report.
|
|
|
|
amount += mBuffer->SizeOfIncludingThis(aMallocSizeOf);
|
|
|
|
}
|
|
|
|
return amount;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t ConvolverNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
|
|
|
|
return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
|
|
|
|
}
|
|
|
|
|
Bug 1117172 part 3. Change the wrappercached WrapObject methods to allow passing in aGivenProto. r=peterv
The only manual changes here are to BindingUtils.h, BindingUtils.cpp,
Codegen.py, Element.cpp, IDBFileRequest.cpp, IDBObjectStore.cpp,
dom/workers/Navigator.cpp, WorkerPrivate.cpp, DeviceStorageRequestChild.cpp,
Notification.cpp, nsGlobalWindow.cpp, MessagePort.cpp, nsJSEnvironment.cpp,
Sandbox.cpp, XPCConvert.cpp, ExportHelpers.cpp, and DataStoreService.cpp. The
rest of this diff was generated by running the following commands:
find . -name "*.h" -o -name "*.cpp" | xargs perl -pi -e 'BEGIN { $/ = undef } s/(WrapObjectInternal\(JSContext *\* *(?:aCx|cx|aContext|aCtx|js))\)/\1, JS::Handle<JSObject*> aGivenProto)/g'
find . -name "*.h" -o -name "*.cpp" | xargs perl -pi -e 'BEGIN { $/ = undef } s/(WrapObjectInternal\((?:aCx|cx|aContext|aCtx|js))\)/\1, aGivenProto)/g'
find . -name "*.h" -o -name "*.cpp" | xargs perl -pi -e 'BEGIN { $/ = undef } s/(WrapNode\(JSContext *\* *(?:aCx|cx|aContext|aCtx|js))\)/\1, JS::Handle<JSObject*> aGivenProto)/g'
find . -name "*.h" -o -name "*.cpp" | xargs perl -pi -e 'BEGIN { $/ = undef } s/(WrapNode\((?:aCx|cx|aContext|aCtx|js))\)/\1, aGivenProto)/g'
find . -name "*.h" -o -name "*.cpp" | xargs perl -pi -e 'BEGIN { $/ = undef } s/(WrapObject\(JSContext *\* *(?:aCx|cx|aContext|aCtx|js))\)/\1, JS::Handle<JSObject*> aGivenProto)/g'
find . -name "*.h" -o -name "*.cpp" | xargs perl -pi -e 'BEGIN { $/ = undef } s/(Binding(?:_workers)?::Wrap\((?:aCx|cx|aContext|aCtx|js), [^,)]+)\)/\1, aGivenProto)/g'
2015-03-19 17:13:33 +03:00
|
|
|
JSObject* ConvolverNode::WrapObject(JSContext* aCx,
|
|
|
|
JS::Handle<JSObject*> aGivenProto) {
|
2018-06-26 00:20:54 +03:00
|
|
|
return ConvolverNode_Binding::Wrap(aCx, this, aGivenProto);
|
2013-06-11 00:07:55 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void ConvolverNode::SetBuffer(JSContext* aCx, AudioBuffer* aBuffer,
|
|
|
|
ErrorResult& aRv) {
|
2013-07-02 22:15:32 +04:00
|
|
|
if (aBuffer) {
|
|
|
|
switch (aBuffer->NumberOfChannels()) {
|
|
|
|
case 1:
|
|
|
|
case 2:
|
|
|
|
case 4:
|
|
|
|
// Supported number of channels
|
|
|
|
break;
|
|
|
|
default:
|
2018-03-08 13:23:45 +03:00
|
|
|
aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
|
2013-07-02 22:15:32 +04:00
|
|
|
return;
|
|
|
|
}
|
2013-06-11 00:07:55 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Send the buffer to the stream
|
2015-07-02 08:36:07 +03:00
|
|
|
AudioNodeStream* ns = mStream;
|
2013-06-11 00:07:55 +04:00
|
|
|
MOZ_ASSERT(ns, "Why don't we have a stream here?");
|
2017-08-10 10:55:36 +03:00
|
|
|
if (aBuffer) {
|
|
|
|
AudioChunk data = aBuffer->GetThreadSharedChannelsForRate(aCx);
|
|
|
|
if (data.mBufferFormat == AUDIO_FORMAT_S16) {
|
|
|
|
// Reverb expects data in float format.
|
|
|
|
// Convert on the main thread so as to minimize allocations on the audio
|
|
|
|
// thread.
|
|
|
|
// Reverb will dispose of the buffer once initialized, so convert here
|
|
|
|
// and leave the smaller arrays in the AudioBuffer.
|
|
|
|
// There is currently no value in providing 16/32-byte aligned data
|
|
|
|
// because PadAndMakeScaledDFT() will copy the data (without SIMD
|
|
|
|
// instructions) to aligned arrays for the FFT.
|
|
|
|
RefPtr<SharedBuffer> floatBuffer = SharedBuffer::Create(
|
|
|
|
sizeof(float) * data.mDuration * data.ChannelCount());
|
|
|
|
if (!floatBuffer) {
|
|
|
|
aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
auto floatData = static_cast<float*>(floatBuffer->Data());
|
|
|
|
for (size_t i = 0; i < data.ChannelCount(); ++i) {
|
|
|
|
ConvertAudioSamples(data.ChannelData<int16_t>()[i], floatData,
|
|
|
|
data.mDuration);
|
|
|
|
data.mChannelData[i] = floatData;
|
|
|
|
floatData += data.mDuration;
|
|
|
|
}
|
2018-05-30 22:15:35 +03:00
|
|
|
data.mBuffer = std::move(floatBuffer);
|
2017-08-10 10:55:36 +03:00
|
|
|
data.mBufferFormat = AUDIO_FORMAT_FLOAT32;
|
|
|
|
}
|
2013-06-11 00:09:12 +04:00
|
|
|
SendDoubleParameterToStream(ConvolverNodeEngine::SAMPLE_RATE,
|
2017-08-10 10:55:36 +03:00
|
|
|
aBuffer->SampleRate());
|
2018-05-30 22:15:35 +03:00
|
|
|
ns->SetBuffer(std::move(data));
|
2013-06-11 00:07:55 +04:00
|
|
|
} else {
|
2017-08-09 07:39:40 +03:00
|
|
|
ns->SetBuffer(AudioChunk());
|
2013-06-11 00:07:55 +04:00
|
|
|
}
|
2017-08-10 10:55:36 +03:00
|
|
|
|
|
|
|
mBuffer = aBuffer;
|
2013-06-11 00:07:55 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void ConvolverNode::SetNormalize(bool aNormalize) {
|
|
|
|
mNormalize = aNormalize;
|
|
|
|
SendInt32ParameterToStream(ConvolverNodeEngine::NORMALIZE, aNormalize);
|
|
|
|
}
|
|
|
|
|
2015-07-13 18:25:42 +03:00
|
|
|
} // namespace dom
|
|
|
|
} // namespace mozilla
|