bug 1199559 write audioprocess input buffer in a format suitable for direct use by AudioBuffer r=padenot

saving copying.

This also avoids graph thread allocations when input is null.

--HG--
extra : rebase_source : f9d2d987076fd23ecf2ff37ab0008ef7eb50a278
This commit is contained in:
Karl Tomlinson 2015-08-28 18:30:17 +12:00
Родитель 604c7a838d
Коммит 306e3b5c38
1 изменённых файлов: 36 добавлений и 55 удалений

Просмотреть файл

@ -240,8 +240,6 @@ private:
class ScriptProcessorNodeEngine final : public AudioNodeEngine
{
public:
typedef nsAutoTArray<nsAutoArrayPtr<float>, 2> InputChannels;
ScriptProcessorNodeEngine(ScriptProcessorNode* aNode,
AudioDestinationNode* aDestination,
uint32_t aBufferSize,
@ -250,11 +248,9 @@ public:
, mSource(nullptr)
, mDestination(aDestination->Stream())
, mBufferSize(aBufferSize)
, mInputChannelCount(aNumberOfInputChannels)
, mInputWriteIndex(0)
, mSeenNonSilenceInput(false)
{
mInputChannels.SetLength(aNumberOfInputChannels);
AllocateInputBlock();
}
void SetSourceStream(AudioNodeStream* aSource)
@ -294,23 +290,34 @@ public:
if (!mIsConnected) {
aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
mSharedBuffers->Reset();
mSeenNonSilenceInput = false;
mInputWriteIndex = 0;
return;
}
// First, record our input buffer
for (uint32_t i = 0; i < mInputChannels.Length(); ++i) {
// The input buffer is allocated lazily when non-null input is received.
if (!aInput.IsNull() && !mInputBuffer) {
mInputBuffer = ThreadSharedFloatArrayBufferList::
Create(mInputChannelCount, mBufferSize, fallible);
if (mInputBuffer && mInputWriteIndex) {
// Zero leading for null chunks that were skipped.
for (uint32_t i = 0; i < mInputChannelCount; ++i) {
float* channelData = mInputBuffer->GetDataForWrite(i);
PodZero(channelData, mInputWriteIndex);
}
}
}
// First, record our input buffer, if its allocation succeeded.
uint32_t inputChannelCount = mInputBuffer ? mInputBuffer->GetChannels() : 0;
for (uint32_t i = 0; i < inputChannelCount; ++i) {
float* writeData = mInputBuffer->GetDataForWrite(i) + mInputWriteIndex;
if (aInput.IsNull()) {
PodZero(mInputChannels[i] + mInputWriteIndex,
aInput.GetDuration());
PodZero(writeData, aInput.GetDuration());
} else {
mSeenNonSilenceInput = true;
MOZ_ASSERT(aInput.GetDuration() == WEBAUDIO_BLOCK_SIZE, "sanity check");
MOZ_ASSERT(aInput.mChannelData.Length() == mInputChannels.Length());
MOZ_ASSERT(aInput.mChannelData.Length() == inputChannelCount);
AudioBlockCopyChannelWithScale(static_cast<const float*>(aInput.mChannelData[i]),
aInput.mVolume,
mInputChannels[i] + mInputWriteIndex);
aInput.mVolume, writeData);
}
}
mInputWriteIndex += aInput.GetDuration();
@ -323,8 +330,6 @@ public:
if (mInputWriteIndex >= mBufferSize) {
SendBuffersToMainThread(aStream);
mInputWriteIndex -= mBufferSize;
mSeenNonSilenceInput = false;
AllocateInputBlock();
}
}
@ -335,10 +340,7 @@ public:
// - mDestination (probably)
size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf);
amount += mSharedBuffers->SizeOfIncludingThis(aMallocSizeOf);
amount += mInputChannels.ShallowSizeOfExcludingThis(aMallocSizeOf);
for (size_t i = 0; i < mInputChannels.Length(); i++) {
amount += mInputChannels[i].SizeOfExcludingThis(aMallocSizeOf);
}
amount += mInputBuffer->SizeOfIncludingThis(aMallocSizeOf);
return amount;
}
@ -349,15 +351,6 @@ public:
}
private:
void AllocateInputBlock()
{
for (unsigned i = 0; i < mInputChannels.Length(); ++i) {
if (!mInputChannels[i]) {
mInputChannels[i] = new float[mBufferSize];
}
}
}
void SendBuffersToMainThread(AudioNodeStream* aStream)
{
MOZ_ASSERT(!NS_IsMainThread());
@ -376,19 +369,12 @@ private:
{
public:
Command(AudioNodeStream* aStream,
InputChannels& aInputChannels,
double aPlaybackTime,
bool aNullInput)
already_AddRefed<ThreadSharedFloatArrayBufferList> aInputBuffer,
double aPlaybackTime)
: mStream(aStream)
, mInputBuffer(aInputBuffer)
, mPlaybackTime(aPlaybackTime)
, mNullInput(aNullInput)
{
mInputChannels.SetLength(aInputChannels.Length());
if (!aNullInput) {
for (uint32_t i = 0; i < mInputChannels.Length(); ++i) {
mInputChannels[i] = aInputChannels[i].forget();
}
}
}
NS_IMETHOD Run() override
@ -431,22 +417,19 @@ private:
return nullptr;
}
JSContext* cx = jsapi.cx();
uint32_t inputChannelCount = aNode->ChannelCount();
// Create the input buffer
nsRefPtr<AudioBuffer> inputBuffer;
if (!mNullInput) {
if (mInputBuffer) {
ErrorResult rv;
inputBuffer =
AudioBuffer::Create(context, mInputChannels.Length(),
aNode->BufferSize(),
context->SampleRate(), cx, rv);
AudioBuffer::Create(context, inputChannelCount,
aNode->BufferSize(), context->SampleRate(),
mInputBuffer.forget(), cx, rv);
if (rv.Failed()) {
return nullptr;
}
// Put the channel data inside it
for (uint32_t i = 0; i < mInputChannels.Length(); ++i) {
inputBuffer->SetRawChannelContents(i, mInputChannels[i]);
}
}
// Ask content to produce data in the output buffer
@ -457,7 +440,7 @@ private:
nsRefPtr<AudioProcessingEvent> event =
new AudioProcessingEvent(aNode, nullptr, nullptr);
event->InitEvent(inputBuffer,
mInputChannels.Length(),
inputChannelCount,
context->StreamTimeToDOMTime(mPlaybackTime));
aNode->DispatchTrustedEvent(event);
@ -478,14 +461,12 @@ private:
}
private:
nsRefPtr<AudioNodeStream> mStream;
InputChannels mInputChannels;
nsRefPtr<ThreadSharedFloatArrayBufferList> mInputBuffer;
double mPlaybackTime;
bool mNullInput;
};
NS_DispatchToMainThread(new Command(aStream, mInputChannels,
playbackTime,
!mSeenNonSilenceInput));
NS_DispatchToMainThread(new Command(aStream, mInputBuffer.forget(),
playbackTime));
}
friend class ScriptProcessorNode;
@ -493,12 +474,12 @@ private:
nsAutoPtr<SharedBuffers> mSharedBuffers;
AudioNodeStream* mSource;
AudioNodeStream* mDestination;
InputChannels mInputChannels;
nsRefPtr<ThreadSharedFloatArrayBufferList> mInputBuffer;
const uint32_t mBufferSize;
const uint32_t mInputChannelCount;
// The write index into the current input buffer
uint32_t mInputWriteIndex;
bool mIsConnected = false;
bool mSeenNonSilenceInput;
};
ScriptProcessorNode::ScriptProcessorNode(AudioContext* aContext,