b=815643 Implement HRTF panner processing based on Blink implementation r=ehsan

--HG--
extra : rebase_source : 197fd6f39d3d45b61d25d9dc399c72828d93655a
This commit is contained in:
Karl Tomlinson 2013-08-09 10:08:06 +12:00
Родитель c41d18de82
Коммит fb08eba385
3 изменённых файлов: 62 добавлений и 21 удалений

Просмотреть файл

@ -9,6 +9,10 @@
#include "AudioNodeStream.h"
#include "AudioListener.h"
#include "AudioBufferSourceNode.h"
#include "blink/HRTFPanner.h"
using WebCore::HRTFDatabaseLoader;
using WebCore::HRTFPanner;
namespace mozilla {
namespace dom {
@ -56,6 +60,10 @@ public:
, mListenerDopplerFactor(0.)
, mListenerSpeedOfSound(0.)
{
// HRTFDatabaseLoader needs to be fetched on the main thread.
TemporaryRef<HRTFDatabaseLoader> loader =
HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(aNode->Context()->SampleRate());
mHRTFPanner = new HRTFPanner(aNode->Context()->SampleRate(), loader);
}
virtual void SetInt32Parameter(uint32_t aIndex, int32_t aParam) MOZ_OVERRIDE
@ -131,16 +139,14 @@ public:
AudioChunk* aOutput,
bool *aFinished) MOZ_OVERRIDE
{
if (aInput.IsNull()) {
*aOutput = aInput;
return;
}
(this->*mPanningModelFunction)(aInput, aOutput);
}
void ComputeAzimuthAndElevation(float& aAzimuth, float& aElevation);
void DistanceAndConeGain(AudioChunk* aChunk, float aGain);
float ComputeConeGain();
// Compute how much the distance contributes to the gain reduction.
float ComputeDistanceGain();
void GainMonoToStereo(const AudioChunk& aInput, AudioChunk* aOutput,
float aGainL, float aGainR);
@ -154,6 +160,7 @@ public:
float InverseGainFunction(float aDistance);
float ExponentialGainFunction(float aDistance);
nsAutoPtr<HRTFPanner> mHRTFPanner;
PanningModelType mPanningModel;
typedef void (PannerNodeEngine::*PanningModelFunction)(const AudioChunk& aInput, AudioChunk* aOutput);
PanningModelFunction mPanningModelFunction;
@ -245,17 +252,43 @@ void
PannerNodeEngine::HRTFPanningFunction(const AudioChunk& aInput,
AudioChunk* aOutput)
{
// not implemented: noop
*aOutput = aInput;
int numChannels = aInput.mChannelData.Length();
// The output of this node is always stereo, no matter what the inputs are.
AllocateAudioBlock(2, aOutput);
float azimuth, elevation;
ComputeAzimuthAndElevation(azimuth, elevation);
AudioChunk input = aInput;
// Gain is applied before the delay and convolution of the HRTF
if (!input.IsNull()) {
float gain = ComputeConeGain() * ComputeDistanceGain() * aInput.mVolume;
if (gain != 1.0f) {
AllocateAudioBlock(numChannels, &input);
for (int i = 0; i < numChannels; ++i) {
const float* src = static_cast<const float*>(aInput.mChannelData[i]);
float* dest =
static_cast<float*>(const_cast<void*>(input.mChannelData[i]));
AudioBlockCopyChannelWithScale(src, gain, dest);
}
}
}
mHRTFPanner->pan(azimuth, elevation, &input, aOutput, WEBAUDIO_BLOCK_SIZE);
}
void
PannerNodeEngine::EqualPowerPanningFunction(const AudioChunk& aInput,
AudioChunk* aOutput)
{
float azimuth, elevation, gainL, gainR, normalizedAzimuth, distance, distanceGain, coneGain;
if (aInput.IsNull()) {
*aOutput = aInput;
return;
}
float azimuth, elevation, gainL, gainR, normalizedAzimuth, distanceGain, coneGain;
int inputChannels = aInput.mChannelData.Length();
ThreeDPoint distanceVec;
// If both the listener are in the same spot, and no cone gain is specified,
// this node is noop.
@ -294,10 +327,7 @@ PannerNodeEngine::EqualPowerPanningFunction(const AudioChunk& aInput,
}
}
// Compute how much the distance contributes to the gain reduction.
distanceVec = mPosition - mListenerPosition;
distance = sqrt(distanceVec.DotProduct(distanceVec));
distanceGain = (this->*mDistanceModelFunction)(distance);
distanceGain = ComputeDistanceGain();
// Actually compute the left and right gain.
gainL = cos(0.5 * M_PI * normalizedAzimuth) * aInput.mVolume;
@ -442,6 +472,14 @@ PannerNodeEngine::ComputeConeGain()
return gain;
}
float
PannerNodeEngine::ComputeDistanceGain()
{
ThreeDPoint distanceVec = mPosition - mListenerPosition;
float distance = sqrt(distanceVec.DotProduct(distanceVec));
return (this->*mDistanceModelFunction)(distance);
}
float
PannerNode::ComputeDopplerShift()
{

Просмотреть файл

@ -41,7 +41,7 @@ const double MaxDelayTimeSeconds = 0.002;
const int UninitializedAzimuth = -1;
const unsigned RenderingQuantum = 128;
HRTFPanner::HRTFPanner(float sampleRate, HRTFDatabaseLoader* databaseLoader)
HRTFPanner::HRTFPanner(float sampleRate, mozilla::TemporaryRef<HRTFDatabaseLoader> databaseLoader)
: m_databaseLoader(databaseLoader)
, m_sampleRate(sampleRate)
, m_crossfadeSelection(CrossfadeSelection1)
@ -60,7 +60,9 @@ HRTFPanner::HRTFPanner(float sampleRate, HRTFDatabaseLoader* databaseLoader)
, m_delayLineR(ceilf(MaxDelayTimeSeconds * sampleRate),
WebAudioUtils::ComputeSmoothingRate(0.02, sampleRate))
{
MOZ_ASSERT(databaseLoader);
MOZ_ASSERT(m_databaseLoader);
MOZ_COUNT_CTOR(HRTFPanner);
m_tempL1.SetLength(RenderingQuantum);
m_tempR1.SetLength(RenderingQuantum);
m_tempL2.SetLength(RenderingQuantum);
@ -69,6 +71,7 @@ HRTFPanner::HRTFPanner(float sampleRate, HRTFDatabaseLoader* databaseLoader)
HRTFPanner::~HRTFPanner()
{
MOZ_COUNT_DTOR(HRTFPanner);
}
void HRTFPanner::reset()
@ -111,7 +114,7 @@ void HRTFPanner::pan(double desiredAzimuth, double elevation, const AudioChunk*
unsigned numInputChannels =
inputBus->IsNull() ? 0 : inputBus->mChannelData.Length();
bool isInputGood = inputBus && numInputChannels >= 1 && numInputChannels <= 2;
bool isInputGood = inputBus && numInputChannels <= 2;
MOZ_ASSERT(isInputGood);
MOZ_ASSERT(framesToProcess <= inputBus->mDuration);
@ -125,8 +128,7 @@ void HRTFPanner::pan(double desiredAzimuth, double elevation, const AudioChunk*
}
HRTFDatabase* database = m_databaseLoader->database();
MOZ_ASSERT(database);
if (!database) {
if (!database) { // not yet loaded
outputBus->SetNull(outputBus->mDuration);
return;
}
@ -145,7 +147,8 @@ void HRTFPanner::pan(double desiredAzimuth, double elevation, const AudioChunk*
// If we have a stereo input, implement stereo panning with left source processed by left HRTF, and right source by right HRTF.
// Get source and destination pointers.
const float* sourceL = static_cast<const float*>(inputBus->mChannelData[0]);
const float* sourceL = numInputChannels > 0 ?
static_cast<const float*>(inputBus->mChannelData[0]) : nullptr;
const float* sourceR = numInputChannels > 1 ?
static_cast<const float*>(inputBus->mChannelData[1]) : sourceL;
float* destinationL =
@ -225,8 +228,8 @@ void HRTFPanner::pan(double desiredAzimuth, double elevation, const AudioChunk*
// Calculate the source and destination pointers for the current segment.
unsigned offset = segment * framesPerSegment;
const float* segmentSourceL = sourceL + offset;
const float* segmentSourceR = sourceR + offset;
const float* segmentSourceL = sourceL ? sourceL + offset : nullptr;
const float* segmentSourceR = sourceR ? sourceR + offset : nullptr;
float* segmentDestinationL = destinationL + offset;
float* segmentDestinationR = destinationR + offset;

Просмотреть файл

@ -39,7 +39,7 @@ using mozilla::AudioChunk;
class HRTFPanner {
public:
HRTFPanner(float sampleRate, HRTFDatabaseLoader*);
HRTFPanner(float sampleRate, mozilla::TemporaryRef<HRTFDatabaseLoader> databaseLoader);
~HRTFPanner();
// framesToProcess must be a power of 2 and greater than 128