зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1200579 - Stop copying AudioParam timelines. r=karlt
--HG-- extra : rebase_source : 3acc85754acb096843c45d5ad12e8e3f7954ecdc
This commit is contained in:
Родитель
227eb62a65
Коммит
cb424cc5a1
|
@ -66,18 +66,20 @@ public:
|
|||
mSource = aSource;
|
||||
}
|
||||
|
||||
virtual void SetTimelineParameter(uint32_t aIndex,
|
||||
const dom::AudioParamTimeline& aValue,
|
||||
TrackRate aSampleRate) override
|
||||
virtual void RecvTimelineEvent(uint32_t aIndex,
|
||||
dom::AudioTimelineEvent& aEvent) override
|
||||
{
|
||||
MOZ_ASSERT(mSource && mDestination);
|
||||
WebAudioUtils::ConvertAudioTimelineEventToTicks(aEvent,
|
||||
mSource,
|
||||
mDestination);
|
||||
|
||||
switch (aIndex) {
|
||||
case AudioBufferSourceNode::PLAYBACKRATE:
|
||||
mPlaybackRateTimeline = aValue;
|
||||
WebAudioUtils::ConvertAudioParamToTicks(mPlaybackRateTimeline, mSource, mDestination);
|
||||
mPlaybackRateTimeline.InsertEvent<int64_t>(aEvent);
|
||||
break;
|
||||
case AudioBufferSourceNode::DETUNE:
|
||||
mDetuneTimeline = aValue;
|
||||
WebAudioUtils::ConvertAudioParamToTicks(mDetuneTimeline, mSource, mDestination);
|
||||
mDetuneTimeline.InsertEvent<int64_t>(aEvent);
|
||||
break;
|
||||
default:
|
||||
NS_ERROR("Bad AudioBufferSourceNodeEngine TimelineParameter");
|
||||
|
@ -785,23 +787,25 @@ AudioBufferSourceNode::NotifyMainThreadStreamFinished()
|
|||
}
|
||||
|
||||
void
|
||||
AudioBufferSourceNode::SendPlaybackRateToStream(AudioNode* aNode)
|
||||
AudioBufferSourceNode::SendPlaybackRateToStream(AudioNode* aNode,
|
||||
const AudioTimelineEvent& aEvent)
|
||||
{
|
||||
AudioBufferSourceNode* This = static_cast<AudioBufferSourceNode*>(aNode);
|
||||
if (!This->mStream) {
|
||||
return;
|
||||
}
|
||||
SendTimelineParameterToStream(This, PLAYBACKRATE, *This->mPlaybackRate);
|
||||
SendTimelineEventToStream(This, PLAYBACKRATE, aEvent);
|
||||
}
|
||||
|
||||
void
|
||||
AudioBufferSourceNode::SendDetuneToStream(AudioNode* aNode)
|
||||
AudioBufferSourceNode::SendDetuneToStream(AudioNode* aNode,
|
||||
const AudioTimelineEvent& aEvent)
|
||||
{
|
||||
AudioBufferSourceNode* This = static_cast<AudioBufferSourceNode*>(aNode);
|
||||
if (!This->mStream) {
|
||||
return;
|
||||
}
|
||||
SendTimelineParameterToStream(This, DETUNE, *This->mDetune);
|
||||
SendTimelineEventToStream(This, DETUNE, aEvent);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -129,8 +129,10 @@ private:
|
|||
void SendLoopParametersToStream();
|
||||
void SendBufferParameterToStream(JSContext* aCx);
|
||||
void SendOffsetAndDurationParametersToStream(AudioNodeStream* aStream);
|
||||
static void SendPlaybackRateToStream(AudioNode* aNode);
|
||||
static void SendDetuneToStream(AudioNode* aNode);
|
||||
static void SendPlaybackRateToStream(AudioNode* aNode,
|
||||
const AudioTimelineEvent& aEvent);
|
||||
static void SendDetuneToStream(AudioNode* aNode,
|
||||
const AudioTimelineEvent& aEvent);
|
||||
|
||||
private:
|
||||
double mLoopStart;
|
||||
|
|
|
@ -12,28 +12,34 @@
|
|||
#include "mozilla/FloatingPoint.h"
|
||||
#include "mozilla/PodOperations.h"
|
||||
|
||||
#include "MainThreadUtils.h"
|
||||
#include "nsTArray.h"
|
||||
#include "math.h"
|
||||
#include "WebAudioUtils.h"
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
class MediaStream;
|
||||
|
||||
namespace dom {
|
||||
|
||||
// This is an internal helper class and should not be used outside of this header.
|
||||
struct AudioTimelineEvent final
|
||||
{
|
||||
enum Type : uint32_t
|
||||
{
|
||||
SetValue,
|
||||
SetValueAtTime,
|
||||
LinearRamp,
|
||||
ExponentialRamp,
|
||||
SetTarget,
|
||||
SetValueCurve
|
||||
SetValueCurve,
|
||||
Stream,
|
||||
Cancel
|
||||
};
|
||||
|
||||
AudioTimelineEvent(Type aType, double aTime, float aValue, double aTimeConstant = 0.0,
|
||||
float aDuration = 0.0, const float* aCurve = nullptr, uint32_t aCurveLength = 0)
|
||||
float aDuration = 0.0, const float* aCurve = nullptr,
|
||||
uint32_t aCurveLength = 0)
|
||||
: mType(aType)
|
||||
, mTimeConstant(aTimeConstant)
|
||||
, mDuration(aDuration)
|
||||
|
@ -49,11 +55,23 @@ struct AudioTimelineEvent final
|
|||
}
|
||||
}
|
||||
|
||||
explicit AudioTimelineEvent(MediaStream* aStream)
|
||||
: mType(Stream)
|
||||
, mStream(aStream)
|
||||
#ifdef DEBUG
|
||||
, mTimeIsInTicks(false)
|
||||
#endif
|
||||
{
|
||||
}
|
||||
|
||||
AudioTimelineEvent(const AudioTimelineEvent& rhs)
|
||||
{
|
||||
PodCopy(this, &rhs, 1);
|
||||
|
||||
if (rhs.mType == AudioTimelineEvent::SetValueCurve) {
|
||||
SetCurveParams(rhs.mCurve, rhs.mCurveLength);
|
||||
} else if (rhs.mType == AudioTimelineEvent::Stream) {
|
||||
new (&mStream) decltype(mStream)(rhs.mStream);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -64,30 +82,6 @@ struct AudioTimelineEvent final
|
|||
}
|
||||
}
|
||||
|
||||
bool IsValid() const
|
||||
{
|
||||
if (mType == AudioTimelineEvent::SetValueCurve) {
|
||||
if (!mCurve || !mCurveLength) {
|
||||
return false;
|
||||
}
|
||||
for (uint32_t i = 0; i < mCurveLength; ++i) {
|
||||
if (!IsValid(mCurve[i])) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (mType == AudioTimelineEvent::SetTarget &&
|
||||
WebAudioUtils::FuzzyEqual(mTimeConstant, 0.0)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return IsValid(mTime) &&
|
||||
IsValid(mValue) &&
|
||||
IsValid(mTimeConstant) &&
|
||||
IsValid(mDuration);
|
||||
}
|
||||
|
||||
template <class TimeType>
|
||||
TimeType Time() const;
|
||||
|
||||
|
@ -114,6 +108,22 @@ struct AudioTimelineEvent final
|
|||
float mValue;
|
||||
uint32_t mCurveLength;
|
||||
};
|
||||
// mCurve contains a buffer of SetValueCurve samples. We sample the
|
||||
// values in the buffer depending on how far along we are in time.
|
||||
// If we're at time T and the event has started as time T0 and has a
|
||||
// duration of D, we sample the buffer at floor(mCurveLength*(T-T0)/D)
|
||||
// if T<T0+D, and just take the last sample in the buffer otherwise.
|
||||
float* mCurve;
|
||||
nsRefPtr<MediaStream> mStream;
|
||||
double mTimeConstant;
|
||||
double mDuration;
|
||||
#ifdef DEBUG
|
||||
bool mTimeIsInTicks;
|
||||
#endif
|
||||
|
||||
private:
|
||||
// This member is accessed using the `Time` method, for safety.
|
||||
//
|
||||
// The time for an event can either be in absolute value or in ticks.
|
||||
// Initially the time of the event is always in absolute value.
|
||||
// In order to convert it to ticks, call SetTimeInTicks. Once this
|
||||
|
@ -123,23 +133,6 @@ struct AudioTimelineEvent final
|
|||
double mTime;
|
||||
int64_t mTimeInTicks;
|
||||
};
|
||||
// mCurve contains a buffer of SetValueCurve samples. We sample the
|
||||
// values in the buffer depending on how far along we are in time.
|
||||
// If we're at time T and the event has started as time T0 and has a
|
||||
// duration of D, we sample the buffer at floor(mCurveLength*(T-T0)/D)
|
||||
// if T<T0+D, and just take the last sample in the buffer otherwise.
|
||||
float* mCurve;
|
||||
double mTimeConstant;
|
||||
double mDuration;
|
||||
#ifdef DEBUG
|
||||
bool mTimeIsInTicks;
|
||||
#endif
|
||||
|
||||
private:
|
||||
static bool IsValid(double value)
|
||||
{
|
||||
return mozilla::IsFinite(value);
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
|
@ -152,6 +145,7 @@ inline double AudioTimelineEvent::Time<double>() const
|
|||
template <>
|
||||
inline int64_t AudioTimelineEvent::Time<int64_t>() const
|
||||
{
|
||||
MOZ_ASSERT(!NS_IsMainThread());
|
||||
MOZ_ASSERT(mTimeIsInTicks);
|
||||
return mTimeInTicks;
|
||||
}
|
||||
|
@ -171,7 +165,119 @@ public:
|
|||
: mValue(aDefaultValue),
|
||||
mComputedValue(aDefaultValue),
|
||||
mLastComputedValue(aDefaultValue)
|
||||
{ }
|
||||
|
||||
bool ValidateEvent(AudioTimelineEvent& aEvent,
|
||||
ErrorResult& aRv)
|
||||
{
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
|
||||
// Validate the event itself
|
||||
if (!WebAudioUtils::IsTimeValid(aEvent.template Time<double>()) ||
|
||||
!WebAudioUtils::IsTimeValid(aEvent.mTimeConstant)) {
|
||||
aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (aEvent.mType == AudioTimelineEvent::SetValueCurve) {
|
||||
if (!aEvent.mCurve || !aEvent.mCurveLength) {
|
||||
aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
|
||||
return false;
|
||||
}
|
||||
for (uint32_t i = 0; i < aEvent.mCurveLength; ++i) {
|
||||
if (!IsValid(aEvent.mCurve[i])) {
|
||||
aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (aEvent.mType == AudioTimelineEvent::SetTarget &&
|
||||
WebAudioUtils::FuzzyEqual(aEvent.mTimeConstant, 0.0)) {
|
||||
aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool timeAndValueValid = IsValid(aEvent.mValue) &&
|
||||
IsValid(aEvent.mDuration);
|
||||
if (!timeAndValueValid) {
|
||||
aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Make sure that non-curve events don't fall within the duration of a
|
||||
// curve event.
|
||||
for (unsigned i = 0; i < mEvents.Length(); ++i) {
|
||||
if (mEvents[i].mType == AudioTimelineEvent::SetValueCurve &&
|
||||
mEvents[i].template Time<double>() <= aEvent.template Time<double>() &&
|
||||
(mEvents[i].template Time<double>() + mEvents[i].mDuration) >= aEvent.template Time<double>()) {
|
||||
aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure that curve events don't fall in a range which includes other
|
||||
// events.
|
||||
if (aEvent.mType == AudioTimelineEvent::SetValueCurve) {
|
||||
for (unsigned i = 0; i < mEvents.Length(); ++i) {
|
||||
if (mEvents[i].template Time<double>() > aEvent.template Time<double>() &&
|
||||
mEvents[i].template Time<double>() < (aEvent.template Time<double>() + aEvent.mDuration)) {
|
||||
aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure that invalid values are not used for exponential curves
|
||||
if (aEvent.mType == AudioTimelineEvent::ExponentialRamp) {
|
||||
if (aEvent.mValue <= 0.f) {
|
||||
aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
|
||||
return false;
|
||||
}
|
||||
const AudioTimelineEvent* previousEvent = GetPreviousEvent(aEvent.template Time<double>());
|
||||
if (previousEvent) {
|
||||
if (previousEvent->mValue <= 0.f) {
|
||||
aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
if (mValue <= 0.f) {
|
||||
aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
template<typename TimeType>
|
||||
void InsertEvent(const AudioTimelineEvent& aEvent)
|
||||
{
|
||||
for (unsigned i = 0; i < mEvents.Length(); ++i) {
|
||||
if (aEvent.template Time<TimeType>() == mEvents[i].template Time<TimeType>()) {
|
||||
if (aEvent.mType == mEvents[i].mType) {
|
||||
// If times and types are equal, replace the event
|
||||
mEvents.ReplaceElementAt(i, aEvent);
|
||||
} else {
|
||||
// Otherwise, place the element after the last event of another type
|
||||
do {
|
||||
++i;
|
||||
} while (i < mEvents.Length() &&
|
||||
aEvent.mType != mEvents[i].mType &&
|
||||
aEvent.template Time<TimeType>() == mEvents[i].template Time<TimeType>());
|
||||
mEvents.InsertElementAt(i, aEvent);
|
||||
}
|
||||
return;
|
||||
}
|
||||
// Otherwise, place the event right after the latest existing event
|
||||
if (aEvent.template Time<TimeType>() < mEvents[i].template Time<TimeType>()) {
|
||||
mEvents.InsertElementAt(i, aEvent);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// If we couldn't find a place for the event, just append it to the list
|
||||
mEvents.AppendElement(aEvent);
|
||||
}
|
||||
|
||||
bool HasSimpleValue() const
|
||||
|
@ -202,38 +308,58 @@ public:
|
|||
|
||||
void SetValueAtTime(float aValue, double aStartTime, ErrorResult& aRv)
|
||||
{
|
||||
InsertEvent(AudioTimelineEvent(AudioTimelineEvent::SetValue, aStartTime, aValue), aRv);
|
||||
AudioTimelineEvent event(AudioTimelineEvent::SetValueAtTime, aStartTime, aValue);
|
||||
|
||||
if (ValidateEvent(event, aRv)) {
|
||||
InsertEvent<double>(event);
|
||||
}
|
||||
}
|
||||
|
||||
void LinearRampToValueAtTime(float aValue, double aEndTime, ErrorResult& aRv)
|
||||
{
|
||||
InsertEvent(AudioTimelineEvent(AudioTimelineEvent::LinearRamp, aEndTime, aValue), aRv);
|
||||
AudioTimelineEvent event(AudioTimelineEvent::LinearRamp, aEndTime, aValue);
|
||||
|
||||
if (ValidateEvent(event, aRv)) {
|
||||
InsertEvent<double>(event);
|
||||
}
|
||||
}
|
||||
|
||||
void ExponentialRampToValueAtTime(float aValue, double aEndTime, ErrorResult& aRv)
|
||||
{
|
||||
InsertEvent(AudioTimelineEvent(AudioTimelineEvent::ExponentialRamp, aEndTime, aValue), aRv);
|
||||
AudioTimelineEvent event(AudioTimelineEvent::ExponentialRamp, aEndTime, aValue);
|
||||
|
||||
if (ValidateEvent(event, aRv)) {
|
||||
InsertEvent<double>(event);
|
||||
}
|
||||
}
|
||||
|
||||
void SetTargetAtTime(float aTarget, double aStartTime, double aTimeConstant, ErrorResult& aRv)
|
||||
{
|
||||
InsertEvent(AudioTimelineEvent(AudioTimelineEvent::SetTarget, aStartTime, aTarget, aTimeConstant), aRv);
|
||||
AudioTimelineEvent event(AudioTimelineEvent::SetTarget, aStartTime, aTarget, aTimeConstant);
|
||||
|
||||
if (ValidateEvent(event, aRv)) {
|
||||
InsertEvent<double>(event);
|
||||
}
|
||||
}
|
||||
|
||||
void SetValueCurveAtTime(const float* aValues, uint32_t aValuesLength, double aStartTime, double aDuration, ErrorResult& aRv)
|
||||
{
|
||||
InsertEvent(AudioTimelineEvent(AudioTimelineEvent::SetValueCurve, aStartTime, 0.0f, 0.0f, aDuration, aValues, aValuesLength), aRv);
|
||||
AudioTimelineEvent event(AudioTimelineEvent::SetValueCurve, aStartTime, 0.0f, 0.0f, aDuration, aValues, aValuesLength);
|
||||
if (ValidateEvent(event, aRv)) {
|
||||
InsertEvent<double>(event);
|
||||
}
|
||||
}
|
||||
|
||||
void CancelScheduledValues(double aStartTime)
|
||||
template<typename TimeType>
|
||||
void CancelScheduledValues(TimeType aStartTime)
|
||||
{
|
||||
for (unsigned i = 0; i < mEvents.Length(); ++i) {
|
||||
if (mEvents[i].mTime >= aStartTime) {
|
||||
if (mEvents[i].template Time<TimeType>() >= aStartTime) {
|
||||
#ifdef DEBUG
|
||||
// Sanity check: the array should be sorted, so all of the following
|
||||
// events should have a time greater than aStartTime too.
|
||||
for (unsigned j = i + 1; j < mEvents.Length(); ++j) {
|
||||
MOZ_ASSERT(mEvents[j].mTime >= aStartTime);
|
||||
MOZ_ASSERT(mEvents[j].template Time<TimeType>() >= aStartTime);
|
||||
}
|
||||
#endif
|
||||
mEvents.TruncateLength(i);
|
||||
|
@ -298,7 +424,7 @@ public:
|
|||
|
||||
#ifdef DEBUG
|
||||
const AudioTimelineEvent* current = &mEvents[lastEventId];
|
||||
MOZ_ASSERT(current->mType == AudioTimelineEvent::SetValue ||
|
||||
MOZ_ASSERT(current->mType == AudioTimelineEvent::SetValueAtTime ||
|
||||
current->mType == AudioTimelineEvent::SetTarget ||
|
||||
current->mType == AudioTimelineEvent::LinearRamp ||
|
||||
current->mType == AudioTimelineEvent::ExponentialRamp ||
|
||||
|
@ -393,17 +519,6 @@ public:
|
|||
return aCurve[uint32_t(aCurveLength * ratio)];
|
||||
}
|
||||
|
||||
void ConvertEventTimesToTicks(int64_t (*aConvertor)(double aTime, void* aClosure), void* aClosure,
|
||||
int32_t aSampleRate)
|
||||
{
|
||||
for (unsigned i = 0; i < mEvents.Length(); ++i) {
|
||||
mEvents[i].SetTimeInTicks(aConvertor(mEvents[i].template Time<double>(), aClosure));
|
||||
mEvents[i].mTimeConstant *= aSampleRate;
|
||||
mEvents[i].mDuration *= aSampleRate;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
template<class TimeType>
|
||||
float GetValuesAtTimeHelperInternal(TimeType aTime,
|
||||
const AudioTimelineEvent* aPrevious,
|
||||
|
@ -433,17 +548,21 @@ private:
|
|||
// If the requested time is after all of the existing events
|
||||
if (!aNext) {
|
||||
switch (aPrevious->mType) {
|
||||
case AudioTimelineEvent::SetValue:
|
||||
case AudioTimelineEvent::LinearRamp:
|
||||
case AudioTimelineEvent::ExponentialRamp:
|
||||
// The value will be constant after the last event
|
||||
return aPrevious->mValue;
|
||||
case AudioTimelineEvent::SetValueCurve:
|
||||
return ExtractValueFromCurve(aPrevious->template Time<TimeType>(),
|
||||
aPrevious->mCurve, aPrevious->mCurveLength,
|
||||
aPrevious->mDuration, aTime);
|
||||
case AudioTimelineEvent::SetTarget:
|
||||
MOZ_ASSERT(false, "unreached");
|
||||
case AudioTimelineEvent::SetValueAtTime:
|
||||
case AudioTimelineEvent::LinearRamp:
|
||||
case AudioTimelineEvent::ExponentialRamp:
|
||||
// The value will be constant after the last event
|
||||
return aPrevious->mValue;
|
||||
case AudioTimelineEvent::SetValueCurve:
|
||||
return ExtractValueFromCurve(aPrevious->template Time<TimeType>(),
|
||||
aPrevious->mCurve, aPrevious->mCurveLength,
|
||||
aPrevious->mDuration, aTime);
|
||||
case AudioTimelineEvent::SetTarget:
|
||||
MOZ_ASSERT(false, "unreached");
|
||||
case AudioTimelineEvent::SetValue:
|
||||
case AudioTimelineEvent::Cancel:
|
||||
case AudioTimelineEvent::Stream:
|
||||
MOZ_ASSERT(false, "Should have been handled earlier.");
|
||||
}
|
||||
MOZ_ASSERT(false, "unreached");
|
||||
}
|
||||
|
@ -464,15 +583,19 @@ private:
|
|||
aNext->template Time<TimeType>(),
|
||||
aNext->mValue, aTime);
|
||||
|
||||
case AudioTimelineEvent::SetValue:
|
||||
case AudioTimelineEvent::SetValueAtTime:
|
||||
case AudioTimelineEvent::SetTarget:
|
||||
case AudioTimelineEvent::SetValueCurve:
|
||||
break;
|
||||
case AudioTimelineEvent::SetValue:
|
||||
case AudioTimelineEvent::Cancel:
|
||||
case AudioTimelineEvent::Stream:
|
||||
MOZ_ASSERT(false, "Should have been handled earlier.");
|
||||
}
|
||||
|
||||
// Now handle all other cases
|
||||
switch (aPrevious->mType) {
|
||||
case AudioTimelineEvent::SetValue:
|
||||
case AudioTimelineEvent::SetValueAtTime:
|
||||
case AudioTimelineEvent::LinearRamp:
|
||||
case AudioTimelineEvent::ExponentialRamp:
|
||||
// If the next event type is neither linear or exponential ramp, the
|
||||
|
@ -484,6 +607,10 @@ private:
|
|||
aPrevious->mDuration, aTime);
|
||||
case AudioTimelineEvent::SetTarget:
|
||||
MOZ_ASSERT(false, "unreached");
|
||||
case AudioTimelineEvent::SetValue:
|
||||
case AudioTimelineEvent::Cancel:
|
||||
case AudioTimelineEvent::Stream:
|
||||
MOZ_ASSERT(false, "Should have been handled earlier.");
|
||||
}
|
||||
|
||||
MOZ_ASSERT(false, "unreached");
|
||||
|
@ -498,22 +625,22 @@ private:
|
|||
bool bailOut = false;
|
||||
for (unsigned i = 0; !bailOut && i < mEvents.Length(); ++i) {
|
||||
switch (mEvents[i].mType) {
|
||||
case AudioTimelineEvent::SetValue:
|
||||
case AudioTimelineEvent::SetValueAtTime:
|
||||
case AudioTimelineEvent::SetTarget:
|
||||
case AudioTimelineEvent::LinearRamp:
|
||||
case AudioTimelineEvent::ExponentialRamp:
|
||||
case AudioTimelineEvent::SetValueCurve:
|
||||
if (aTime == mEvents[i].mTime) {
|
||||
if (aTime == mEvents[i].template Time<double>()) {
|
||||
// Find the last event with the same time
|
||||
do {
|
||||
++i;
|
||||
} while (i < mEvents.Length() &&
|
||||
aTime == mEvents[i].mTime);
|
||||
aTime == mEvents[i].template Time<double>());
|
||||
return &mEvents[i - 1];
|
||||
}
|
||||
previous = next;
|
||||
next = &mEvents[i];
|
||||
if (aTime < mEvents[i].mTime) {
|
||||
if (aTime < mEvents[i].template Time<double>()) {
|
||||
bailOut = true;
|
||||
}
|
||||
break;
|
||||
|
@ -528,85 +655,12 @@ private:
|
|||
|
||||
return previous;
|
||||
}
|
||||
|
||||
void InsertEvent(const AudioTimelineEvent& aEvent, ErrorResult& aRv)
|
||||
private:
|
||||
static bool IsValid(double value)
|
||||
{
|
||||
if (!aEvent.IsValid()) {
|
||||
aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
|
||||
return;
|
||||
}
|
||||
|
||||
// Make sure that non-curve events don't fall within the duration of a
|
||||
// curve event.
|
||||
for (unsigned i = 0; i < mEvents.Length(); ++i) {
|
||||
if (mEvents[i].mType == AudioTimelineEvent::SetValueCurve &&
|
||||
mEvents[i].mTime <= aEvent.mTime &&
|
||||
(mEvents[i].mTime + mEvents[i].mDuration) >= aEvent.mTime) {
|
||||
aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure that curve events don't fall in a range which includes other
|
||||
// events.
|
||||
if (aEvent.mType == AudioTimelineEvent::SetValueCurve) {
|
||||
for (unsigned i = 0; i < mEvents.Length(); ++i) {
|
||||
if (mEvents[i].mTime > aEvent.mTime &&
|
||||
mEvents[i].mTime < (aEvent.mTime + aEvent.mDuration)) {
|
||||
aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure that invalid values are not used for exponential curves
|
||||
if (aEvent.mType == AudioTimelineEvent::ExponentialRamp) {
|
||||
if (aEvent.mValue <= 0.f) {
|
||||
aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
|
||||
return;
|
||||
}
|
||||
const AudioTimelineEvent* previousEvent = GetPreviousEvent(aEvent.mTime);
|
||||
if (previousEvent) {
|
||||
if (previousEvent->mValue <= 0.f) {
|
||||
aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
if (mValue <= 0.f) {
|
||||
aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (unsigned i = 0; i < mEvents.Length(); ++i) {
|
||||
if (aEvent.mTime == mEvents[i].mTime) {
|
||||
if (aEvent.mType == mEvents[i].mType) {
|
||||
// If times and types are equal, replace the event
|
||||
mEvents.ReplaceElementAt(i, aEvent);
|
||||
} else {
|
||||
// Otherwise, place the element after the last event of another type
|
||||
do {
|
||||
++i;
|
||||
} while (i < mEvents.Length() &&
|
||||
aEvent.mType != mEvents[i].mType &&
|
||||
aEvent.mTime == mEvents[i].mTime);
|
||||
mEvents.InsertElementAt(i, aEvent);
|
||||
}
|
||||
return;
|
||||
}
|
||||
// Otherwise, place the event right after the latest existing event
|
||||
if (aEvent.mTime < mEvents[i].mTime) {
|
||||
mEvents.InsertElementAt(i, aEvent);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// If we couldn't find a place for the event, just append it to the list
|
||||
mEvents.AppendElement(aEvent);
|
||||
return mozilla::IsFinite(value);
|
||||
}
|
||||
|
||||
private:
|
||||
// This is a sorted array of the events in the timeline. Queries of this
|
||||
// data structure should probably be more frequent than modifications to it,
|
||||
// and that is the reason why we're using a simple array as the data structure.
|
||||
|
|
|
@ -304,12 +304,12 @@ AudioNode::SendChannelMixingParametersToStream()
|
|||
}
|
||||
|
||||
void
|
||||
AudioNode::SendTimelineParameterToStream(AudioNode* aNode, uint32_t aIndex,
|
||||
const AudioParamTimeline& aValue)
|
||||
AudioNode::SendTimelineEventToStream(AudioNode* aNode, uint32_t aIndex,
|
||||
const AudioTimelineEvent& aEvent)
|
||||
{
|
||||
AudioNodeStream* ns = aNode->mStream;
|
||||
MOZ_ASSERT(ns, "How come we don't have a stream here?");
|
||||
ns->SetTimelineParameter(aIndex, aValue);
|
||||
ns->SendTimelineEvent(aIndex, aEvent);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -28,6 +28,7 @@ class AudioBufferSourceNode;
|
|||
class AudioParam;
|
||||
class AudioParamTimeline;
|
||||
struct ThreeDPoint;
|
||||
struct AudioTimelineEvent;
|
||||
|
||||
/**
|
||||
* The DOM object representing a Web Audio AudioNode.
|
||||
|
@ -223,8 +224,8 @@ protected:
|
|||
void SendInt32ParameterToStream(uint32_t aIndex, int32_t aValue);
|
||||
void SendThreeDPointParameterToStream(uint32_t aIndex, const ThreeDPoint& aValue);
|
||||
void SendChannelMixingParametersToStream();
|
||||
static void SendTimelineParameterToStream(AudioNode* aNode, uint32_t aIndex,
|
||||
const AudioParamTimeline& aValue);
|
||||
static void SendTimelineEventToStream(AudioNode* aNode, uint32_t aIndex,
|
||||
const dom::AudioTimelineEvent& aEvent);
|
||||
|
||||
private:
|
||||
nsRefPtr<AudioContext> mContext;
|
||||
|
|
|
@ -17,6 +17,7 @@ namespace dom {
|
|||
struct ThreeDPoint;
|
||||
class AudioParamTimeline;
|
||||
class DelayNodeEngine;
|
||||
struct AudioTimelineEvent;
|
||||
} // namespace dom
|
||||
|
||||
class AudioBlock;
|
||||
|
@ -282,11 +283,10 @@ public:
|
|||
{
|
||||
NS_ERROR("Invalid SetInt32Parameter index");
|
||||
}
|
||||
virtual void SetTimelineParameter(uint32_t aIndex,
|
||||
const dom::AudioParamTimeline& aValue,
|
||||
TrackRate aSampleRate)
|
||||
virtual void RecvTimelineEvent(uint32_t aIndex,
|
||||
dom::AudioTimelineEvent& aValue)
|
||||
{
|
||||
NS_ERROR("Invalid SetTimelineParameter index");
|
||||
NS_ERROR("Invalid RecvTimelineEvent index");
|
||||
}
|
||||
virtual void SetThreeDPointParameter(uint32_t aIndex,
|
||||
const dom::ThreeDPoint& aValue)
|
||||
|
|
|
@ -203,29 +203,29 @@ AudioNodeStream::SetInt32Parameter(uint32_t aIndex, int32_t aValue)
|
|||
}
|
||||
|
||||
void
|
||||
AudioNodeStream::SetTimelineParameter(uint32_t aIndex,
|
||||
const AudioParamTimeline& aValue)
|
||||
AudioNodeStream::SendTimelineEvent(uint32_t aIndex,
|
||||
const AudioTimelineEvent& aEvent)
|
||||
{
|
||||
class Message final : public ControlMessage
|
||||
{
|
||||
public:
|
||||
Message(AudioNodeStream* aStream, uint32_t aIndex,
|
||||
const AudioParamTimeline& aValue)
|
||||
const AudioTimelineEvent& aEvent)
|
||||
: ControlMessage(aStream),
|
||||
mValue(aValue),
|
||||
mEvent(aEvent),
|
||||
mSampleRate(aStream->SampleRate()),
|
||||
mIndex(aIndex)
|
||||
{}
|
||||
virtual void Run() override
|
||||
{
|
||||
static_cast<AudioNodeStream*>(mStream)->Engine()->
|
||||
SetTimelineParameter(mIndex, mValue, mSampleRate);
|
||||
RecvTimelineEvent(mIndex, mEvent);
|
||||
}
|
||||
AudioParamTimeline mValue;
|
||||
AudioTimelineEvent mEvent;
|
||||
TrackRate mSampleRate;
|
||||
uint32_t mIndex;
|
||||
};
|
||||
GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
|
||||
GraphImpl()->AppendMessage(new Message(this, aIndex, aEvent));
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -14,7 +14,7 @@ namespace mozilla {
|
|||
|
||||
namespace dom {
|
||||
struct ThreeDPoint;
|
||||
class AudioParamTimeline;
|
||||
struct AudioTimelineEvent;
|
||||
class AudioContext;
|
||||
} // namespace dom
|
||||
|
||||
|
@ -85,9 +85,10 @@ public:
|
|||
double aStreamTime);
|
||||
void SetDoubleParameter(uint32_t aIndex, double aValue);
|
||||
void SetInt32Parameter(uint32_t aIndex, int32_t aValue);
|
||||
void SetTimelineParameter(uint32_t aIndex, const dom::AudioParamTimeline& aValue);
|
||||
void SetThreeDPointParameter(uint32_t aIndex, const dom::ThreeDPoint& aValue);
|
||||
void SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList>&& aBuffer);
|
||||
// This sends a single event to the timeline on the MSG thread side.
|
||||
void SendTimelineEvent(uint32_t aIndex, const dom::AudioTimelineEvent& aEvent);
|
||||
// This consumes the contents of aData. aData will be emptied after this returns.
|
||||
void SetRawArrayData(nsTArray<float>& aData);
|
||||
void SetChannelMixingParameters(uint32_t aNumberOfChannels,
|
||||
|
|
|
@ -118,8 +118,10 @@ AudioParam::Stream()
|
|||
nodeStream->AllocateInputPort(mStream, AudioNodeStream::AUDIO_TRACK);
|
||||
}
|
||||
|
||||
// Let the MSG's copy of AudioParamTimeline know about the change in the stream
|
||||
mCallback(mNode);
|
||||
// Send the stream to the timeline on the MSG side.
|
||||
AudioTimelineEvent event(mStream);
|
||||
|
||||
mCallback(mNode, event);
|
||||
|
||||
return mStream;
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ class AudioParam final : public nsWrapperCache,
|
|||
virtual ~AudioParam();
|
||||
|
||||
public:
|
||||
typedef void (*CallbackType)(AudioNode*);
|
||||
typedef void (*CallbackType)(AudioNode* aNode, const AudioTimelineEvent&);
|
||||
|
||||
AudioParam(AudioNode* aNode,
|
||||
CallbackType aCallback,
|
||||
|
@ -58,68 +58,86 @@ public:
|
|||
return;
|
||||
}
|
||||
aValues.ComputeLengthAndData();
|
||||
AudioParamTimeline::SetValueCurveAtTime(aValues.Data(), aValues.Length(),
|
||||
DOMTimeToStreamTime(aStartTime), aDuration, aRv);
|
||||
mCallback(mNode);
|
||||
|
||||
EventInsertionHelper(aRv, AudioTimelineEvent::SetValueCurve,
|
||||
aStartTime, 0.0f, 0.0f, aDuration, aValues.Data(),
|
||||
aValues.Length());
|
||||
}
|
||||
|
||||
// We override the rest of the mutating AudioParamTimeline methods in order to make
|
||||
// sure that the callback is called every time that this object gets mutated.
|
||||
void SetValue(float aValue)
|
||||
{
|
||||
// Optimize away setting the same value on an AudioParam
|
||||
if (HasSimpleValue() &&
|
||||
WebAudioUtils::FuzzyEqual(GetValue(), aValue)) {
|
||||
AudioTimelineEvent event(AudioTimelineEvent::SetValue, 0.0f, aValue);
|
||||
|
||||
ErrorResult rv;
|
||||
if (!ValidateEvent(event, rv)) {
|
||||
MOZ_ASSERT(false, "This should not happen, "
|
||||
"setting the value should always work");
|
||||
return;
|
||||
}
|
||||
|
||||
AudioParamTimeline::SetValue(aValue);
|
||||
mCallback(mNode);
|
||||
|
||||
mCallback(mNode, event);
|
||||
}
|
||||
|
||||
void SetValueAtTime(float aValue, double aStartTime, ErrorResult& aRv)
|
||||
{
|
||||
if (!WebAudioUtils::IsTimeValid(aStartTime)) {
|
||||
aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
|
||||
return;
|
||||
}
|
||||
AudioParamTimeline::SetValueAtTime(aValue, DOMTimeToStreamTime(aStartTime), aRv);
|
||||
mCallback(mNode);
|
||||
EventInsertionHelper(aRv, AudioTimelineEvent::SetValueAtTime,
|
||||
aStartTime, aValue);
|
||||
}
|
||||
|
||||
void LinearRampToValueAtTime(float aValue, double aEndTime, ErrorResult& aRv)
|
||||
{
|
||||
if (!WebAudioUtils::IsTimeValid(aEndTime)) {
|
||||
aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
|
||||
return;
|
||||
}
|
||||
AudioParamTimeline::LinearRampToValueAtTime(aValue, DOMTimeToStreamTime(aEndTime), aRv);
|
||||
mCallback(mNode);
|
||||
EventInsertionHelper(aRv, AudioTimelineEvent::LinearRamp, aEndTime, aValue);
|
||||
}
|
||||
|
||||
void ExponentialRampToValueAtTime(float aValue, double aEndTime, ErrorResult& aRv)
|
||||
{
|
||||
if (!WebAudioUtils::IsTimeValid(aEndTime)) {
|
||||
aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
|
||||
return;
|
||||
}
|
||||
AudioParamTimeline::ExponentialRampToValueAtTime(aValue, DOMTimeToStreamTime(aEndTime), aRv);
|
||||
mCallback(mNode);
|
||||
EventInsertionHelper(aRv, AudioTimelineEvent::ExponentialRamp,
|
||||
aEndTime, aValue);
|
||||
}
|
||||
void SetTargetAtTime(float aTarget, double aStartTime, double aTimeConstant, ErrorResult& aRv)
|
||||
|
||||
void SetTargetAtTime(float aTarget, double aStartTime,
|
||||
double aTimeConstant, ErrorResult& aRv)
|
||||
{
|
||||
if (!WebAudioUtils::IsTimeValid(aStartTime) ||
|
||||
!WebAudioUtils::IsTimeValid(aTimeConstant)) {
|
||||
aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
|
||||
return;
|
||||
}
|
||||
AudioParamTimeline::SetTargetAtTime(aTarget, DOMTimeToStreamTime(aStartTime), aTimeConstant, aRv);
|
||||
mCallback(mNode);
|
||||
EventInsertionHelper(aRv, AudioTimelineEvent::SetTarget,
|
||||
aStartTime, aTarget,
|
||||
aTimeConstant);
|
||||
}
|
||||
|
||||
void CancelScheduledValues(double aStartTime, ErrorResult& aRv)
|
||||
{
|
||||
if (!WebAudioUtils::IsTimeValid(aStartTime)) {
|
||||
aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
|
||||
return;
|
||||
}
|
||||
AudioParamTimeline::CancelScheduledValues(DOMTimeToStreamTime(aStartTime));
|
||||
mCallback(mNode);
|
||||
|
||||
double streamTime = DOMTimeToStreamTime(aStartTime);
|
||||
|
||||
// Remove some events on the main thread copy.
|
||||
AudioEventTimeline::CancelScheduledValues(streamTime);
|
||||
|
||||
AudioTimelineEvent event(AudioTimelineEvent::Cancel,
|
||||
streamTime, 0.0f);
|
||||
|
||||
mCallback(mNode, event);
|
||||
}
|
||||
|
||||
uint32_t ParentNodeId()
|
||||
|
@ -188,6 +206,27 @@ protected:
|
|||
NS_DECL_OWNINGTHREAD
|
||||
|
||||
private:
|
||||
void EventInsertionHelper(ErrorResult& aRv,
|
||||
AudioTimelineEvent::Type aType,
|
||||
double aTime, float aValue,
|
||||
double aTimeConstant = 0.0,
|
||||
float aDuration = 0.0,
|
||||
const float* aCurve = nullptr,
|
||||
uint32_t aCurveLength = 0)
|
||||
{
|
||||
AudioTimelineEvent event(aType,
|
||||
DOMTimeToStreamTime(aTime), aValue,
|
||||
aTimeConstant, aDuration, aCurve, aCurveLength);
|
||||
|
||||
if (!ValidateEvent(event, aRv)) {
|
||||
return;
|
||||
}
|
||||
|
||||
AudioEventTimeline::InsertEvent<double>(event);
|
||||
|
||||
mCallback(mNode, event);
|
||||
}
|
||||
|
||||
nsRefPtr<AudioNode> mNode;
|
||||
// For every InputNode, there is a corresponding entry in mOutputParams of the
|
||||
// InputNode's mInputNode.
|
||||
|
|
|
@ -50,6 +50,24 @@ public:
|
|||
return GetValueAtTime(aTime, 0);
|
||||
}
|
||||
|
||||
template<typename TimeType>
|
||||
void InsertEvent(const AudioTimelineEvent& aEvent)
|
||||
{
|
||||
if (aEvent.mType == AudioTimelineEvent::Cancel) {
|
||||
CancelScheduledValues(aEvent.template Time<TimeType>());
|
||||
return;
|
||||
}
|
||||
if (aEvent.mType == AudioTimelineEvent::Stream) {
|
||||
mStream = aEvent.mStream;
|
||||
return;
|
||||
}
|
||||
if (aEvent.mType == AudioTimelineEvent::SetValue) {
|
||||
AudioEventTimeline::SetValue(aEvent.mValue);
|
||||
return;
|
||||
}
|
||||
AudioEventTimeline::InsertEvent<TimeType>(aEvent);
|
||||
}
|
||||
|
||||
// Get the value of the AudioParam at time aTime + aCounter.
|
||||
// aCounter here is an offset to aTime if we try to get the value in ticks,
|
||||
// otherwise it should always be zero. aCounter is meant to be used when
|
||||
|
|
|
@ -110,27 +110,27 @@ public:
|
|||
NS_ERROR("Bad BiquadFilterNode Int32Parameter");
|
||||
}
|
||||
}
|
||||
void SetTimelineParameter(uint32_t aIndex,
|
||||
const AudioParamTimeline& aValue,
|
||||
TrackRate aSampleRate) override
|
||||
void RecvTimelineEvent(uint32_t aIndex,
|
||||
AudioTimelineEvent& aEvent) override
|
||||
{
|
||||
MOZ_ASSERT(mSource && mDestination);
|
||||
|
||||
WebAudioUtils::ConvertAudioTimelineEventToTicks(aEvent,
|
||||
mSource,
|
||||
mDestination);
|
||||
|
||||
switch (aIndex) {
|
||||
case FREQUENCY:
|
||||
mFrequency = aValue;
|
||||
WebAudioUtils::ConvertAudioParamToTicks(mFrequency, mSource, mDestination);
|
||||
mFrequency.InsertEvent<int64_t>(aEvent);
|
||||
break;
|
||||
case DETUNE:
|
||||
mDetune = aValue;
|
||||
WebAudioUtils::ConvertAudioParamToTicks(mDetune, mSource, mDestination);
|
||||
mDetune.InsertEvent<int64_t>(aEvent);
|
||||
break;
|
||||
case Q:
|
||||
mQ = aValue;
|
||||
WebAudioUtils::ConvertAudioParamToTicks(mQ, mSource, mDestination);
|
||||
mQ.InsertEvent<int64_t>(aEvent);
|
||||
break;
|
||||
case GAIN:
|
||||
mGain = aValue;
|
||||
WebAudioUtils::ConvertAudioParamToTicks(mGain, mSource, mDestination);
|
||||
mGain.InsertEvent<int64_t>(aEvent);
|
||||
break;
|
||||
default:
|
||||
NS_ERROR("Bad BiquadFilterNodeEngine TimelineParameter");
|
||||
|
@ -347,31 +347,31 @@ BiquadFilterNode::GetFrequencyResponse(const Float32Array& aFrequencyHz,
|
|||
}
|
||||
|
||||
void
|
||||
BiquadFilterNode::SendFrequencyToStream(AudioNode* aNode)
|
||||
BiquadFilterNode::SendFrequencyToStream(AudioNode* aNode, const AudioTimelineEvent& aEvent)
|
||||
{
|
||||
BiquadFilterNode* This = static_cast<BiquadFilterNode*>(aNode);
|
||||
SendTimelineParameterToStream(This, BiquadFilterNodeEngine::FREQUENCY, *This->mFrequency);
|
||||
SendTimelineEventToStream(This, BiquadFilterNodeEngine::FREQUENCY, aEvent);
|
||||
}
|
||||
|
||||
void
|
||||
BiquadFilterNode::SendDetuneToStream(AudioNode* aNode)
|
||||
BiquadFilterNode::SendDetuneToStream(AudioNode* aNode, const AudioTimelineEvent& aEvent)
|
||||
{
|
||||
BiquadFilterNode* This = static_cast<BiquadFilterNode*>(aNode);
|
||||
SendTimelineParameterToStream(This, BiquadFilterNodeEngine::DETUNE, *This->mDetune);
|
||||
SendTimelineEventToStream(This, BiquadFilterNodeEngine::DETUNE, aEvent);
|
||||
}
|
||||
|
||||
void
|
||||
BiquadFilterNode::SendQToStream(AudioNode* aNode)
|
||||
BiquadFilterNode::SendQToStream(AudioNode* aNode, const AudioTimelineEvent& aEvent)
|
||||
{
|
||||
BiquadFilterNode* This = static_cast<BiquadFilterNode*>(aNode);
|
||||
SendTimelineParameterToStream(This, BiquadFilterNodeEngine::Q, *This->mQ);
|
||||
SendTimelineEventToStream(This, BiquadFilterNodeEngine::Q, aEvent);
|
||||
}
|
||||
|
||||
void
|
||||
BiquadFilterNode::SendGainToStream(AudioNode* aNode)
|
||||
BiquadFilterNode::SendGainToStream(AudioNode* aNode, const AudioTimelineEvent& aEvent)
|
||||
{
|
||||
BiquadFilterNode* This = static_cast<BiquadFilterNode*>(aNode);
|
||||
SendTimelineParameterToStream(This, BiquadFilterNodeEngine::GAIN, *This->mGain);
|
||||
SendTimelineEventToStream(This, BiquadFilterNodeEngine::GAIN, aEvent);
|
||||
}
|
||||
|
||||
} // namespace dom
|
||||
|
|
|
@ -15,6 +15,7 @@ namespace mozilla {
|
|||
namespace dom {
|
||||
|
||||
class AudioContext;
|
||||
struct AudioTimelineEvent;
|
||||
|
||||
class BiquadFilterNode final : public AudioNode
|
||||
{
|
||||
|
@ -68,10 +69,14 @@ protected:
|
|||
virtual ~BiquadFilterNode();
|
||||
|
||||
private:
|
||||
static void SendFrequencyToStream(AudioNode* aNode);
|
||||
static void SendDetuneToStream(AudioNode* aNode);
|
||||
static void SendQToStream(AudioNode* aNode);
|
||||
static void SendGainToStream(AudioNode* aNode);
|
||||
static void SendFrequencyToStream(AudioNode* aNode,
|
||||
const AudioTimelineEvent& aEvent);
|
||||
static void SendDetuneToStream(AudioNode* aNode,
|
||||
const AudioTimelineEvent& aEvente);
|
||||
static void SendQToStream(AudioNode* aNode,
|
||||
const AudioTimelineEvent& aEvent);
|
||||
static void SendGainToStream(AudioNode* aNode,
|
||||
const AudioTimelineEvent& aEvent);
|
||||
|
||||
private:
|
||||
BiquadFilterType mType;
|
||||
|
|
|
@ -60,15 +60,17 @@ public:
|
|||
enum Parameters {
|
||||
DELAY,
|
||||
};
|
||||
void SetTimelineParameter(uint32_t aIndex,
|
||||
const AudioParamTimeline& aValue,
|
||||
TrackRate aSampleRate) override
|
||||
void RecvTimelineEvent(uint32_t aIndex,
|
||||
AudioTimelineEvent& aEvent) override
|
||||
{
|
||||
MOZ_ASSERT(mSource && mDestination);
|
||||
WebAudioUtils::ConvertAudioTimelineEventToTicks(aEvent,
|
||||
mSource,
|
||||
mDestination);
|
||||
|
||||
switch (aIndex) {
|
||||
case DELAY:
|
||||
MOZ_ASSERT(mSource && mDestination);
|
||||
mDelay = aValue;
|
||||
WebAudioUtils::ConvertAudioParamToTicks(mDelay, mSource, mDestination);
|
||||
mDelay.InsertEvent<int64_t>(aEvent);
|
||||
break;
|
||||
default:
|
||||
NS_ERROR("Bad DelayNodeEngine TimelineParameter");
|
||||
|
@ -237,10 +239,10 @@ DelayNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto)
|
|||
}
|
||||
|
||||
void
|
||||
DelayNode::SendDelayToStream(AudioNode* aNode)
|
||||
DelayNode::SendDelayToStream(AudioNode* aNode, const AudioTimelineEvent& aEvent)
|
||||
{
|
||||
DelayNode* This = static_cast<DelayNode*>(aNode);
|
||||
SendTimelineParameterToStream(This, DelayNodeEngine::DELAY, *This->mDelay);
|
||||
SendTimelineEventToStream(This, DelayNodeEngine::DELAY, aEvent);
|
||||
}
|
||||
|
||||
} // namespace dom
|
||||
|
|
|
@ -42,7 +42,8 @@ protected:
|
|||
virtual ~DelayNode();
|
||||
|
||||
private:
|
||||
static void SendDelayToStream(AudioNode* aNode);
|
||||
static void SendDelayToStream(AudioNode* aNode,
|
||||
const AudioTimelineEvent& aEvent);
|
||||
friend class DelayNodeEngine;
|
||||
|
||||
private:
|
||||
|
|
|
@ -61,31 +61,30 @@ public:
|
|||
ATTACK,
|
||||
RELEASE
|
||||
};
|
||||
void SetTimelineParameter(uint32_t aIndex,
|
||||
const AudioParamTimeline& aValue,
|
||||
TrackRate aSampleRate) override
|
||||
void RecvTimelineEvent(uint32_t aIndex,
|
||||
AudioTimelineEvent& aEvent) override
|
||||
{
|
||||
MOZ_ASSERT(mSource && mDestination);
|
||||
|
||||
WebAudioUtils::ConvertAudioTimelineEventToTicks(aEvent,
|
||||
mSource,
|
||||
mDestination);
|
||||
|
||||
switch (aIndex) {
|
||||
case THRESHOLD:
|
||||
mThreshold = aValue;
|
||||
WebAudioUtils::ConvertAudioParamToTicks(mThreshold, mSource, mDestination);
|
||||
mThreshold.InsertEvent<int64_t>(aEvent);
|
||||
break;
|
||||
case KNEE:
|
||||
mKnee = aValue;
|
||||
WebAudioUtils::ConvertAudioParamToTicks(mKnee, mSource, mDestination);
|
||||
mKnee.InsertEvent<int64_t>(aEvent);
|
||||
break;
|
||||
case RATIO:
|
||||
mRatio = aValue;
|
||||
WebAudioUtils::ConvertAudioParamToTicks(mRatio, mSource, mDestination);
|
||||
mRatio.InsertEvent<int64_t>(aEvent);
|
||||
break;
|
||||
case ATTACK:
|
||||
mAttack = aValue;
|
||||
WebAudioUtils::ConvertAudioParamToTicks(mAttack, mSource, mDestination);
|
||||
mAttack.InsertEvent<int64_t>(aEvent);
|
||||
break;
|
||||
case RELEASE:
|
||||
mRelease = aValue;
|
||||
WebAudioUtils::ConvertAudioParamToTicks(mRelease, mSource, mDestination);
|
||||
mRelease.InsertEvent<int64_t>(aEvent);
|
||||
break;
|
||||
default:
|
||||
NS_ERROR("Bad DynamicsCompresssorNodeEngine TimelineParameter");
|
||||
|
@ -238,38 +237,43 @@ DynamicsCompressorNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenP
|
|||
}
|
||||
|
||||
void
|
||||
DynamicsCompressorNode::SendThresholdToStream(AudioNode* aNode)
|
||||
DynamicsCompressorNode::SendThresholdToStream(AudioNode* aNode,
|
||||
const AudioTimelineEvent& aEvent)
|
||||
{
|
||||
DynamicsCompressorNode* This = static_cast<DynamicsCompressorNode*>(aNode);
|
||||
SendTimelineParameterToStream(This, DynamicsCompressorNodeEngine::THRESHOLD, *This->mThreshold);
|
||||
SendTimelineEventToStream(This, DynamicsCompressorNodeEngine::THRESHOLD, aEvent);
|
||||
}
|
||||
|
||||
void
|
||||
DynamicsCompressorNode::SendKneeToStream(AudioNode* aNode)
|
||||
DynamicsCompressorNode::SendKneeToStream(AudioNode* aNode,
|
||||
const AudioTimelineEvent& aEvent)
|
||||
{
|
||||
DynamicsCompressorNode* This = static_cast<DynamicsCompressorNode*>(aNode);
|
||||
SendTimelineParameterToStream(This, DynamicsCompressorNodeEngine::KNEE, *This->mKnee);
|
||||
SendTimelineEventToStream(This, DynamicsCompressorNodeEngine::KNEE, aEvent);
|
||||
}
|
||||
|
||||
void
|
||||
DynamicsCompressorNode::SendRatioToStream(AudioNode* aNode)
|
||||
DynamicsCompressorNode::SendRatioToStream(AudioNode* aNode,
|
||||
const AudioTimelineEvent& aEvent)
|
||||
{
|
||||
DynamicsCompressorNode* This = static_cast<DynamicsCompressorNode*>(aNode);
|
||||
SendTimelineParameterToStream(This, DynamicsCompressorNodeEngine::RATIO, *This->mRatio);
|
||||
SendTimelineEventToStream(This, DynamicsCompressorNodeEngine::RATIO, aEvent);
|
||||
}
|
||||
|
||||
void
|
||||
DynamicsCompressorNode::SendAttackToStream(AudioNode* aNode)
|
||||
DynamicsCompressorNode::SendAttackToStream(AudioNode* aNode,
|
||||
const AudioTimelineEvent& aEvent)
|
||||
{
|
||||
DynamicsCompressorNode* This = static_cast<DynamicsCompressorNode*>(aNode);
|
||||
SendTimelineParameterToStream(This, DynamicsCompressorNodeEngine::ATTACK, *This->mAttack);
|
||||
SendTimelineEventToStream(This, DynamicsCompressorNodeEngine::ATTACK, aEvent);
|
||||
}
|
||||
|
||||
void
|
||||
DynamicsCompressorNode::SendReleaseToStream(AudioNode* aNode)
|
||||
DynamicsCompressorNode::SendReleaseToStream(AudioNode* aNode,
|
||||
const AudioTimelineEvent& aEvent)
|
||||
{
|
||||
DynamicsCompressorNode* This = static_cast<DynamicsCompressorNode*>(aNode);
|
||||
SendTimelineParameterToStream(This, DynamicsCompressorNodeEngine::RELEASE, *This->mRelease);
|
||||
SendTimelineEventToStream(This, DynamicsCompressorNodeEngine::RELEASE, aEvent);
|
||||
}
|
||||
|
||||
} // namespace dom
|
||||
|
|
|
@ -74,11 +74,16 @@ protected:
|
|||
virtual ~DynamicsCompressorNode();
|
||||
|
||||
private:
|
||||
static void SendThresholdToStream(AudioNode* aNode);
|
||||
static void SendKneeToStream(AudioNode* aNode);
|
||||
static void SendRatioToStream(AudioNode* aNode);
|
||||
static void SendAttackToStream(AudioNode* aNode);
|
||||
static void SendReleaseToStream(AudioNode* aNode);
|
||||
static void SendThresholdToStream(AudioNode* aNode,
|
||||
const AudioTimelineEvent& aEvent);
|
||||
static void SendKneeToStream(AudioNode* aNode,
|
||||
const AudioTimelineEvent& aEvent);
|
||||
static void SendRatioToStream(AudioNode* aNode,
|
||||
const AudioTimelineEvent& aEvent);
|
||||
static void SendAttackToStream(AudioNode* aNode,
|
||||
const AudioTimelineEvent& aEvent);
|
||||
static void SendReleaseToStream(AudioNode* aNode,
|
||||
const AudioTimelineEvent& aEvent);
|
||||
|
||||
private:
|
||||
nsRefPtr<AudioParam> mThreshold;
|
||||
|
|
|
@ -43,15 +43,17 @@ public:
|
|||
enum Parameters {
|
||||
GAIN
|
||||
};
|
||||
void SetTimelineParameter(uint32_t aIndex,
|
||||
const AudioParamTimeline& aValue,
|
||||
TrackRate aSampleRate) override
|
||||
void RecvTimelineEvent(uint32_t aIndex,
|
||||
AudioTimelineEvent& aEvent) override
|
||||
{
|
||||
MOZ_ASSERT(mSource && mDestination);
|
||||
WebAudioUtils::ConvertAudioTimelineEventToTicks(aEvent,
|
||||
mSource,
|
||||
mDestination);
|
||||
|
||||
switch (aIndex) {
|
||||
case GAIN:
|
||||
MOZ_ASSERT(mSource && mDestination);
|
||||
mGain = aValue;
|
||||
WebAudioUtils::ConvertAudioParamToTicks(mGain, mSource, mDestination);
|
||||
mGain.InsertEvent<int64_t>(aEvent);
|
||||
break;
|
||||
default:
|
||||
NS_ERROR("Bad GainNodeEngine TimelineParameter");
|
||||
|
@ -159,10 +161,10 @@ GainNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto)
|
|||
}
|
||||
|
||||
void
|
||||
GainNode::SendGainToStream(AudioNode* aNode)
|
||||
GainNode::SendGainToStream(AudioNode* aNode, const AudioTimelineEvent& aEvent)
|
||||
{
|
||||
GainNode* This = static_cast<GainNode*>(aNode);
|
||||
SendTimelineParameterToStream(This, GainNodeEngine::GAIN, *This->mGain);
|
||||
SendTimelineEventToStream(This, GainNodeEngine::GAIN, aEvent);
|
||||
}
|
||||
|
||||
} // namespace dom
|
||||
|
|
|
@ -42,7 +42,7 @@ protected:
|
|||
virtual ~GainNode();
|
||||
|
||||
private:
|
||||
static void SendGainToStream(AudioNode* aNode);
|
||||
static void SendGainToStream(AudioNode* aNode, const AudioTimelineEvent& aEvent);
|
||||
|
||||
private:
|
||||
nsRefPtr<AudioParam> mGain;
|
||||
|
|
|
@ -57,21 +57,23 @@ public:
|
|||
START,
|
||||
STOP,
|
||||
};
|
||||
void SetTimelineParameter(uint32_t aIndex,
|
||||
const AudioParamTimeline& aValue,
|
||||
TrackRate aSampleRate) override
|
||||
void RecvTimelineEvent(uint32_t aIndex,
|
||||
AudioTimelineEvent& aEvent) override
|
||||
{
|
||||
mRecomputeParameters = true;
|
||||
|
||||
MOZ_ASSERT(mSource && mDestination);
|
||||
|
||||
WebAudioUtils::ConvertAudioTimelineEventToTicks(aEvent,
|
||||
mSource,
|
||||
mDestination);
|
||||
|
||||
switch (aIndex) {
|
||||
case FREQUENCY:
|
||||
MOZ_ASSERT(mSource && mDestination);
|
||||
mFrequency = aValue;
|
||||
WebAudioUtils::ConvertAudioParamToTicks(mFrequency, mSource, mDestination);
|
||||
mFrequency.InsertEvent<int64_t>(aEvent);
|
||||
break;
|
||||
case DETUNE:
|
||||
MOZ_ASSERT(mSource && mDestination);
|
||||
mDetune = aValue;
|
||||
WebAudioUtils::ConvertAudioParamToTicks(mDetune, mSource, mDestination);
|
||||
mDetune.InsertEvent<int64_t>(aEvent);
|
||||
break;
|
||||
default:
|
||||
NS_ERROR("Bad OscillatorNodeEngine TimelineParameter");
|
||||
|
@ -440,23 +442,23 @@ OscillatorNode::DestroyMediaStream()
|
|||
}
|
||||
|
||||
void
|
||||
OscillatorNode::SendFrequencyToStream(AudioNode* aNode)
|
||||
OscillatorNode::SendFrequencyToStream(AudioNode* aNode, const AudioTimelineEvent& aEvent)
|
||||
{
|
||||
OscillatorNode* This = static_cast<OscillatorNode*>(aNode);
|
||||
if (!This->mStream) {
|
||||
return;
|
||||
}
|
||||
SendTimelineParameterToStream(This, OscillatorNodeEngine::FREQUENCY, *This->mFrequency);
|
||||
SendTimelineEventToStream(This, OscillatorNodeEngine::FREQUENCY, aEvent);
|
||||
}
|
||||
|
||||
void
|
||||
OscillatorNode::SendDetuneToStream(AudioNode* aNode)
|
||||
OscillatorNode::SendDetuneToStream(AudioNode* aNode, const AudioTimelineEvent& aEvent)
|
||||
{
|
||||
OscillatorNode* This = static_cast<OscillatorNode*>(aNode);
|
||||
if (!This->mStream) {
|
||||
return;
|
||||
}
|
||||
SendTimelineParameterToStream(This, OscillatorNodeEngine::DETUNE, *This->mDetune);
|
||||
SendTimelineEventToStream(This, OscillatorNodeEngine::DETUNE, aEvent);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -87,8 +87,8 @@ protected:
|
|||
virtual ~OscillatorNode();
|
||||
|
||||
private:
|
||||
static void SendFrequencyToStream(AudioNode* aNode);
|
||||
static void SendDetuneToStream(AudioNode* aNode);
|
||||
static void SendFrequencyToStream(AudioNode* aNode, const AudioTimelineEvent& aEvent);
|
||||
static void SendDetuneToStream(AudioNode* aNode, const AudioTimelineEvent& aEvent);
|
||||
void SendTypeToStream();
|
||||
void SendPeriodicWaveToStream();
|
||||
|
||||
|
|
|
@ -49,15 +49,17 @@ public:
|
|||
enum Parameters {
|
||||
PAN
|
||||
};
|
||||
void SetTimelineParameter(uint32_t aIndex,
|
||||
const AudioParamTimeline& aValue,
|
||||
TrackRate aSampleRate) override
|
||||
void RecvTimelineEvent(uint32_t aIndex,
|
||||
AudioTimelineEvent& aEvent) override
|
||||
{
|
||||
MOZ_ASSERT(mSource && mDestination);
|
||||
WebAudioUtils::ConvertAudioTimelineEventToTicks(aEvent,
|
||||
mSource,
|
||||
mDestination);
|
||||
|
||||
switch (aIndex) {
|
||||
case PAN:
|
||||
MOZ_ASSERT(mSource && mDestination);
|
||||
mPan = aValue;
|
||||
WebAudioUtils::ConvertAudioParamToTicks(mPan, mSource, mDestination);
|
||||
mPan.InsertEvent<int64_t>(aEvent);
|
||||
break;
|
||||
default:
|
||||
NS_ERROR("Bad StereoPannerNode TimelineParameter");
|
||||
|
@ -212,10 +214,10 @@ StereoPannerNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto)
|
|||
}
|
||||
|
||||
void
|
||||
StereoPannerNode::SendPanToStream(AudioNode* aNode)
|
||||
StereoPannerNode::SendPanToStream(AudioNode* aNode, const AudioTimelineEvent& aEvent)
|
||||
{
|
||||
StereoPannerNode* This = static_cast<StereoPannerNode*>(aNode);
|
||||
SendTimelineParameterToStream(This, StereoPannerNodeEngine::PAN, *This->mPan);
|
||||
SendTimelineEventToStream(This, StereoPannerNodeEngine::PAN, aEvent);
|
||||
}
|
||||
|
||||
} // namespace dom
|
||||
|
|
|
@ -60,7 +60,8 @@ protected:
|
|||
virtual ~StereoPannerNode();
|
||||
|
||||
private:
|
||||
static void SendPanToStream(AudioNode* aNode);
|
||||
static void SendPanToStream(AudioNode* aNode,
|
||||
const AudioTimelineEvent& aEvent);
|
||||
nsRefPtr<AudioParam> mPan;
|
||||
};
|
||||
|
||||
|
|
|
@ -6,37 +6,21 @@
|
|||
|
||||
#include "WebAudioUtils.h"
|
||||
#include "AudioNodeStream.h"
|
||||
#include "AudioParamTimeline.h"
|
||||
#include "blink/HRTFDatabaseLoader.h"
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
namespace dom {
|
||||
|
||||
struct ConvertTimeToTickHelper
|
||||
{
|
||||
AudioNodeStream* mSourceStream;
|
||||
AudioNodeStream* mDestinationStream;
|
||||
|
||||
static int64_t Convert(double aTime, void* aClosure)
|
||||
{
|
||||
ConvertTimeToTickHelper* This = static_cast<ConvertTimeToTickHelper*> (aClosure);
|
||||
MOZ_ASSERT(This->mSourceStream->SampleRate() == This->mDestinationStream->SampleRate());
|
||||
return This->mSourceStream->
|
||||
TicksFromDestinationTime(This->mDestinationStream, aTime);
|
||||
}
|
||||
};
|
||||
|
||||
void
|
||||
WebAudioUtils::ConvertAudioParamToTicks(AudioParamTimeline& aParam,
|
||||
AudioNodeStream* aSource,
|
||||
AudioNodeStream* aDest)
|
||||
void WebAudioUtils::ConvertAudioTimelineEventToTicks(AudioTimelineEvent& aEvent,
|
||||
AudioNodeStream* aSource,
|
||||
AudioNodeStream* aDest)
|
||||
{
|
||||
MOZ_ASSERT(!aSource || aSource->SampleRate() == aDest->SampleRate());
|
||||
ConvertTimeToTickHelper ctth;
|
||||
ctth.mSourceStream = aSource;
|
||||
ctth.mDestinationStream = aDest;
|
||||
aParam.ConvertEventTimesToTicks(ConvertTimeToTickHelper::Convert, &ctth, aDest->SampleRate());
|
||||
aEvent.SetTimeInTicks(
|
||||
aSource->TicksFromDestinationTime(aDest, aEvent.Time<double>()));
|
||||
aEvent.mTimeConstant *= aSource->SampleRate();
|
||||
aEvent.mDuration *= aSource->SampleRate();
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -22,7 +22,7 @@ class AudioNodeStream;
|
|||
|
||||
namespace dom {
|
||||
|
||||
class AudioParamTimeline;
|
||||
struct AudioTimelineEvent;
|
||||
|
||||
namespace WebAudioUtils {
|
||||
// 32 is the minimum required by the spec for createBuffer() and
|
||||
|
@ -55,17 +55,17 @@ namespace WebAudioUtils {
|
|||
}
|
||||
|
||||
/**
|
||||
* Converts AudioParamTimeline floating point time values to tick values
|
||||
* with respect to a source and a destination AudioNodeStream.
|
||||
* Converts an AudioTimelineEvent's floating point time values to tick values
|
||||
* with respect to a destination AudioNodeStream.
|
||||
*
|
||||
* This needs to be called for each AudioParamTimeline that gets sent to an
|
||||
* AudioNodeEngine on the engine side where the AudioParamTimeline is
|
||||
* received. This means that such engines need to be aware of their source
|
||||
* and destination streams as well.
|
||||
* This needs to be called for each AudioTimelineEvent that gets sent to an
|
||||
* AudioNodeEngine, on the engine side where the AudioTimlineEvent is
|
||||
* received. This means that such engines need to be aware of their
|
||||
* destination streams as well.
|
||||
*/
|
||||
void ConvertAudioParamToTicks(AudioParamTimeline& aParam,
|
||||
AudioNodeStream* aSource,
|
||||
AudioNodeStream* aDest);
|
||||
void ConvertAudioTimelineEventToTicks(AudioTimelineEvent& aEvent,
|
||||
AudioNodeStream* aSource,
|
||||
AudioNodeStream* aDest);
|
||||
|
||||
/**
|
||||
* Converts a linear value to decibels. Returns aMinDecibels if the linear
|
||||
|
|
|
@ -9,6 +9,17 @@
|
|||
#include <sstream>
|
||||
#include <limits>
|
||||
|
||||
// Mock the MediaStream class
|
||||
namespace mozilla {
|
||||
class MediaStream
|
||||
{
|
||||
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaStream)
|
||||
private:
|
||||
~MediaStream() {
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
using namespace mozilla;
|
||||
using namespace mozilla::dom;
|
||||
using std::numeric_limits;
|
||||
|
|
Загрузка…
Ссылка в новой задаче