зеркало из https://github.com/mozilla/gecko-dev.git
b=986901 don't assume that DelayNode maxDelayTime is greater than 1 block r=padenot
Also apply DelayNode maxDelayTime before rounding to ticks. --HG-- extra : transplant_source : %F1i%02%2A%ED%98%95%C9u%60%0B%1A%81A%C2%8E%FB%F3%FA%D5
This commit is contained in:
Родитель
22b3aa9cc0
Коммит
d74753f274
|
@ -17,6 +17,10 @@ DelayBuffer::Write(const AudioChunk& aInputChunk)
|
|||
{
|
||||
// We must have a reference to the buffer if there are channels
|
||||
MOZ_ASSERT(aInputChunk.IsNull() == !aInputChunk.mChannelData.Length());
|
||||
#ifdef DEBUG
|
||||
MOZ_ASSERT(!mHaveWrittenBlock);
|
||||
mHaveWrittenBlock = true;
|
||||
#endif
|
||||
|
||||
if (!EnsureBuffer()) {
|
||||
return;
|
||||
|
@ -118,7 +122,7 @@ DelayBuffer::ReadChannels(const double aPerFrameDelays[WEBAUDIO_BLOCK_SIZE],
|
|||
for (unsigned i = 0; i < WEBAUDIO_BLOCK_SIZE; ++i) {
|
||||
double currentDelay = aPerFrameDelays[i];
|
||||
MOZ_ASSERT(currentDelay >= 0.0);
|
||||
MOZ_ASSERT(currentDelay <= static_cast<double>(mMaxDelayTicks));
|
||||
MOZ_ASSERT(currentDelay <= (mChunks.Length() - 1) * WEBAUDIO_BLOCK_SIZE);
|
||||
|
||||
// Interpolate two input frames in case the read position does not match
|
||||
// an integer index.
|
||||
|
@ -226,6 +230,9 @@ DelayBuffer::UpdateUpmixChannels(int aNewReadChunk, uint32_t aChannelCount,
|
|||
|
||||
static const float silenceChannel[WEBAUDIO_BLOCK_SIZE] = {};
|
||||
|
||||
NS_WARN_IF_FALSE(mHaveWrittenBlock || aNewReadChunk != mCurrentChunk,
|
||||
"Smoothing is making feedback delay too small.");
|
||||
|
||||
mLastReadChunk = aNewReadChunk;
|
||||
// Missing assignment operator is bug 976927
|
||||
mUpmixChannels.ReplaceElementsAt(0, mUpmixChannels.Length(),
|
||||
|
|
|
@ -19,25 +19,34 @@ class DelayBuffer {
|
|||
public:
|
||||
// See WebAudioUtils::ComputeSmoothingRate() for frame to frame exponential
|
||||
// |smoothingRate| multiplier.
|
||||
DelayBuffer(int aMaxDelayTicks, double aSmoothingRate)
|
||||
DelayBuffer(double aMaxDelayTicks, double aSmoothingRate)
|
||||
: mSmoothingRate(aSmoothingRate)
|
||||
, mCurrentDelay(-1.0)
|
||||
, mMaxDelayTicks(aMaxDelayTicks)
|
||||
// Round the maximum delay up to the next tick.
|
||||
, mMaxDelayTicks(ceil(aMaxDelayTicks))
|
||||
, mCurrentChunk(0)
|
||||
// mLastReadChunk is initialized in EnsureBuffer
|
||||
#ifdef DEBUG
|
||||
, mHaveWrittenBlock(false)
|
||||
#endif
|
||||
{
|
||||
// The 180 second limit in AudioContext::CreateDelay() and the
|
||||
// 1 << MEDIA_TIME_FRAC_BITS limit on sample rate provide a limit on the
|
||||
// maximum delay.
|
||||
MOZ_ASSERT(aMaxDelayTicks <=
|
||||
std::numeric_limits<decltype(mMaxDelayTicks)>::max());
|
||||
}
|
||||
|
||||
// Write a WEBAUDIO_BLOCK_SIZE block for aChannelCount channels.
|
||||
void Write(const AudioChunk& aInputChunk);
|
||||
|
||||
// Read a block with an array of delays, in ticks, for each sample frame.
|
||||
// Each delay must be > 0 and < MaxDelayTicks().
|
||||
// Each delay should be >= 0 and <= MaxDelayTicks().
|
||||
void Read(const double aPerFrameDelays[WEBAUDIO_BLOCK_SIZE],
|
||||
AudioChunk* aOutputChunk,
|
||||
ChannelInterpretation aChannelInterpretation);
|
||||
// Read a block with a constant delay, which will be smoothed with the
|
||||
// previous delay. The delay must be > 0 and < MaxDelayTicks().
|
||||
// previous delay. The delay should be >= 0 and <= MaxDelayTicks().
|
||||
void Read(double aDelayTicks, AudioChunk* aOutputChunk,
|
||||
ChannelInterpretation aChannelInterpretation);
|
||||
|
||||
|
@ -53,6 +62,10 @@ public:
|
|||
void NextBlock()
|
||||
{
|
||||
mCurrentChunk = (mCurrentChunk + 1) % mChunks.Length();
|
||||
#ifdef DEBUG
|
||||
MOZ_ASSERT(mHaveWrittenBlock);
|
||||
mHaveWrittenBlock = false;
|
||||
#endif
|
||||
}
|
||||
|
||||
void Reset() {
|
||||
|
@ -89,6 +102,9 @@ private:
|
|||
int mCurrentChunk;
|
||||
// The chunk owning the pointers in mUpmixChannels
|
||||
int mLastReadChunk;
|
||||
#ifdef DEBUG
|
||||
bool mHaveWrittenBlock;
|
||||
#endif
|
||||
};
|
||||
|
||||
} // mozilla
|
||||
|
|
|
@ -30,16 +30,18 @@ class DelayNodeEngine : public AudioNodeEngine
|
|||
typedef PlayingRefChangeHandler PlayingRefChanged;
|
||||
public:
|
||||
DelayNodeEngine(AudioNode* aNode, AudioDestinationNode* aDestination,
|
||||
int aMaxDelayTicks)
|
||||
double aMaxDelayTicks)
|
||||
: AudioNodeEngine(aNode)
|
||||
, mSource(nullptr)
|
||||
, mDestination(static_cast<AudioNodeStream*> (aDestination->Stream()))
|
||||
// Keep the default value in sync with the default value in DelayNode::DelayNode.
|
||||
, mDelay(0.f)
|
||||
// Use a smoothing range of 20ms
|
||||
, mBuffer(aMaxDelayTicks,
|
||||
, mBuffer(std::max(aMaxDelayTicks,
|
||||
static_cast<double>(WEBAUDIO_BLOCK_SIZE)),
|
||||
WebAudioUtils::ComputeSmoothingRate(0.02,
|
||||
mDestination->SampleRate()))
|
||||
, mMaxDelay(aMaxDelayTicks)
|
||||
, mLastOutputPosition(-1)
|
||||
, mLeftOverData(INT32_MIN)
|
||||
{
|
||||
|
@ -122,15 +124,16 @@ public:
|
|||
mLastOutputPosition = tick;
|
||||
bool inCycle = mSource->AsProcessedStream()->InCycle();
|
||||
double minDelay = inCycle ? static_cast<double>(WEBAUDIO_BLOCK_SIZE) : 0.0;
|
||||
double maxDelay = mBuffer.MaxDelayTicks();
|
||||
double maxDelay = mMaxDelay;
|
||||
double sampleRate = mSource->SampleRate();
|
||||
ChannelInterpretation channelInterpretation =
|
||||
mSource->GetChannelInterpretation();
|
||||
if (mDelay.HasSimpleValue()) {
|
||||
// If this DelayNode is in a cycle, make sure the delay value is at least
|
||||
// one block.
|
||||
// one block, even if that is greater than maxDelay.
|
||||
double delayFrames = mDelay.GetValue() * sampleRate;
|
||||
double delayFramesClamped = clamped(delayFrames, minDelay, maxDelay);
|
||||
double delayFramesClamped =
|
||||
std::max(minDelay, std::min(delayFrames, maxDelay));
|
||||
mBuffer.Read(delayFramesClamped, aOutput, channelInterpretation);
|
||||
} else {
|
||||
// Compute the delay values for the duration of the input AudioChunk
|
||||
|
@ -139,7 +142,8 @@ public:
|
|||
double computedDelay[WEBAUDIO_BLOCK_SIZE];
|
||||
for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) {
|
||||
double delayAtTick = mDelay.GetValueAtTime(tick, counter) * sampleRate;
|
||||
double delayAtTickClamped = clamped(delayAtTick, minDelay, maxDelay);
|
||||
double delayAtTickClamped =
|
||||
std::max(minDelay, std::min(delayAtTick, maxDelay));
|
||||
computedDelay[counter] = delayAtTickClamped;
|
||||
}
|
||||
mBuffer.Read(computedDelay, aOutput, channelInterpretation);
|
||||
|
@ -159,6 +163,7 @@ public:
|
|||
AudioNodeStream* mDestination;
|
||||
AudioParamTimeline mDelay;
|
||||
DelayBuffer mBuffer;
|
||||
double mMaxDelay;
|
||||
TrackTicks mLastOutputPosition;
|
||||
// How much data we have in our buffer which needs to be flushed out when our inputs
|
||||
// finish.
|
||||
|
@ -175,7 +180,7 @@ DelayNode::DelayNode(AudioContext* aContext, double aMaxDelay)
|
|||
{
|
||||
DelayNodeEngine* engine =
|
||||
new DelayNodeEngine(this, aContext->Destination(),
|
||||
ceil(aContext->SampleRate() * aMaxDelay));
|
||||
aContext->SampleRate() * aMaxDelay);
|
||||
mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
|
||||
engine->SetSourceStream(static_cast<AudioNodeStream*> (mStream.get()));
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ HRTFPanner::HRTFPanner(float sampleRate, mozilla::TemporaryRef<HRTFDatabaseLoade
|
|||
, m_convolverR1(m_convolverL1.fftSize())
|
||||
, m_convolverL2(m_convolverL1.fftSize())
|
||||
, m_convolverR2(m_convolverL1.fftSize())
|
||||
, m_delayLine(ceilf(MaxDelayTimeSeconds * sampleRate), 1.0)
|
||||
, m_delayLine(MaxDelayTimeSeconds * sampleRate, 1.0)
|
||||
{
|
||||
MOZ_ASSERT(m_databaseLoader);
|
||||
MOZ_COUNT_CTOR(HRTFPanner);
|
||||
|
|
Загрузка…
Ссылка в новой задаче