2013-12-11 09:03:30 +04:00
|
|
|
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
|
|
|
/* vim:set ts=2 sw=2 sts=2 et cindent: */
|
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
2015-07-23 14:58:30 +03:00
|
|
|
|
2016-02-05 05:19:12 +03:00
|
|
|
#include "nsPrintfCString.h"
|
2015-07-23 14:58:30 +03:00
|
|
|
#include "MediaQueue.h"
|
2015-08-18 06:55:01 +03:00
|
|
|
#include "DecodedAudioDataSink.h"
|
2015-07-23 14:58:30 +03:00
|
|
|
#include "VideoUtils.h"
|
|
|
|
|
|
|
|
#include "mozilla/CheckedInt.h"
|
2015-07-28 06:52:05 +03:00
|
|
|
#include "mozilla/DebugOnly.h"
|
2013-12-11 09:03:30 +04:00
|
|
|
|
|
|
|
namespace mozilla {
|
|
|
|
|
2015-11-15 16:49:01 +03:00
|
|
|
extern LazyLogModule gMediaDecoderLog;
|
2013-12-11 09:03:30 +04:00
|
|
|
#define SINK_LOG(msg, ...) \
|
2015-08-18 06:55:01 +03:00
|
|
|
MOZ_LOG(gMediaDecoderLog, LogLevel::Debug, \
|
|
|
|
("DecodedAudioDataSink=%p " msg, this, ##__VA_ARGS__))
|
2015-05-14 05:15:05 +03:00
|
|
|
#define SINK_LOG_V(msg, ...) \
|
2015-08-18 06:55:01 +03:00
|
|
|
MOZ_LOG(gMediaDecoderLog, LogLevel::Verbose, \
|
|
|
|
("DecodedAudioDataSink=%p " msg, this, ##__VA_ARGS__))
|
|
|
|
|
|
|
|
namespace media {
|
2013-12-11 09:03:30 +04:00
|
|
|
|
2014-09-23 00:26:36 +04:00
|
|
|
// The amount of audio frames that is used to fuzz rounding errors.
|
|
|
|
static const int64_t AUDIO_FUZZ_FRAMES = 1;
|
|
|
|
|
2015-08-18 06:55:01 +03:00
|
|
|
DecodedAudioDataSink::DecodedAudioDataSink(MediaQueue<MediaData>& aAudioQueue,
|
|
|
|
int64_t aStartTime,
|
|
|
|
const AudioInfo& aInfo,
|
|
|
|
dom::AudioChannel aChannel)
|
|
|
|
: AudioSink(aAudioQueue)
|
2013-12-11 09:03:30 +04:00
|
|
|
, mStartTime(aStartTime)
|
|
|
|
, mWritten(0)
|
2014-10-30 23:10:00 +03:00
|
|
|
, mLastGoodPosition(0)
|
2013-12-11 09:03:30 +04:00
|
|
|
, mInfo(aInfo)
|
|
|
|
, mChannel(aChannel)
|
|
|
|
, mPlaying(true)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2015-08-26 05:30:56 +03:00
|
|
|
DecodedAudioDataSink::~DecodedAudioDataSink()
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2015-10-18 08:24:48 +03:00
|
|
|
RefPtr<GenericPromise>
|
2016-01-12 16:48:25 +03:00
|
|
|
DecodedAudioDataSink::Init(const PlaybackParams& aParams)
|
2013-12-11 09:03:30 +04:00
|
|
|
{
|
2015-10-18 08:24:48 +03:00
|
|
|
RefPtr<GenericPromise> p = mEndPromise.Ensure(__func__);
|
2016-01-12 16:48:25 +03:00
|
|
|
nsresult rv = InitializeAudioStream(aParams);
|
2013-12-11 09:03:30 +04:00
|
|
|
if (NS_FAILED(rv)) {
|
2015-07-22 04:54:06 +03:00
|
|
|
mEndPromise.Reject(rv, __func__);
|
2013-12-11 09:03:30 +04:00
|
|
|
}
|
2015-07-22 04:54:06 +03:00
|
|
|
return p;
|
2013-12-11 09:03:30 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
int64_t
|
2015-08-18 06:55:01 +03:00
|
|
|
DecodedAudioDataSink::GetPosition()
|
2013-12-11 09:03:30 +04:00
|
|
|
{
|
2014-10-30 23:10:00 +03:00
|
|
|
int64_t pos;
|
|
|
|
if (mAudioStream &&
|
|
|
|
(pos = mAudioStream->GetPosition()) >= 0) {
|
2015-11-02 16:26:12 +03:00
|
|
|
NS_ASSERTION(pos >= mLastGoodPosition,
|
|
|
|
"AudioStream position shouldn't go backward");
|
2014-10-30 23:10:00 +03:00
|
|
|
// Update the last good position when we got a good one.
|
2015-11-02 16:26:12 +03:00
|
|
|
if (pos >= mLastGoodPosition) {
|
|
|
|
mLastGoodPosition = pos;
|
|
|
|
}
|
2013-12-11 09:03:30 +04:00
|
|
|
}
|
2014-10-30 23:10:00 +03:00
|
|
|
|
2015-07-09 06:07:57 +03:00
|
|
|
return mStartTime + mLastGoodPosition;
|
2013-12-11 09:03:30 +04:00
|
|
|
}
|
|
|
|
|
2015-02-27 00:37:03 +03:00
|
|
|
bool
|
2015-08-18 06:55:01 +03:00
|
|
|
DecodedAudioDataSink::HasUnplayedFrames()
|
2015-02-27 00:37:03 +03:00
|
|
|
{
|
|
|
|
// Experimentation suggests that GetPositionInFrames() is zero-indexed,
|
|
|
|
// so we need to add 1 here before comparing it to mWritten.
|
|
|
|
return mAudioStream && mAudioStream->GetPositionInFrames() + 1 < mWritten;
|
|
|
|
}
|
|
|
|
|
2013-12-11 09:03:30 +04:00
|
|
|
void
|
2015-08-18 06:55:01 +03:00
|
|
|
DecodedAudioDataSink::Shutdown()
|
2013-12-11 09:03:30 +04:00
|
|
|
{
|
2015-07-22 04:54:15 +03:00
|
|
|
if (mAudioStream) {
|
|
|
|
mAudioStream->Shutdown();
|
|
|
|
mAudioStream = nullptr;
|
|
|
|
}
|
2016-01-12 16:48:25 +03:00
|
|
|
mEndPromise.ResolveIfExists(true, __func__);
|
2013-12-11 09:03:30 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2015-08-18 06:55:01 +03:00
|
|
|
DecodedAudioDataSink::SetVolume(double aVolume)
|
2013-12-11 09:03:30 +04:00
|
|
|
{
|
2016-01-12 16:48:25 +03:00
|
|
|
if (mAudioStream) {
|
|
|
|
mAudioStream->SetVolume(aVolume);
|
|
|
|
}
|
2013-12-11 09:03:30 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2015-08-18 06:55:01 +03:00
|
|
|
DecodedAudioDataSink::SetPlaybackRate(double aPlaybackRate)
|
2013-12-11 09:03:30 +04:00
|
|
|
{
|
2015-07-31 08:26:06 +03:00
|
|
|
MOZ_ASSERT(aPlaybackRate != 0, "Don't set the playbackRate to 0 on AudioStream");
|
2016-01-12 16:48:25 +03:00
|
|
|
if (mAudioStream) {
|
|
|
|
mAudioStream->SetPlaybackRate(aPlaybackRate);
|
|
|
|
}
|
2013-12-11 09:03:30 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2015-08-18 06:55:01 +03:00
|
|
|
DecodedAudioDataSink::SetPreservesPitch(bool aPreservesPitch)
|
2013-12-11 09:03:30 +04:00
|
|
|
{
|
2016-01-12 16:48:25 +03:00
|
|
|
if (mAudioStream) {
|
|
|
|
mAudioStream->SetPreservesPitch(aPreservesPitch);
|
|
|
|
}
|
2013-12-11 09:03:30 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2015-08-18 06:55:01 +03:00
|
|
|
DecodedAudioDataSink::SetPlaying(bool aPlaying)
|
2013-12-11 09:03:30 +04:00
|
|
|
{
|
2016-01-12 16:48:25 +03:00
|
|
|
if (!mAudioStream || mPlaying == aPlaying) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
// pause/resume AudioStream as necessary.
|
|
|
|
if (!aPlaying && !mAudioStream->IsPaused()) {
|
|
|
|
mAudioStream->Pause();
|
|
|
|
} else if (aPlaying && mAudioStream->IsPaused()) {
|
|
|
|
mAudioStream->Resume();
|
|
|
|
}
|
|
|
|
mPlaying = aPlaying;
|
2013-12-11 09:03:30 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
2016-01-12 16:48:25 +03:00
|
|
|
DecodedAudioDataSink::InitializeAudioStream(const PlaybackParams& aParams)
|
2013-12-11 09:03:30 +04:00
|
|
|
{
|
2016-01-12 16:48:25 +03:00
|
|
|
mAudioStream = new AudioStream(*this);
|
|
|
|
nsresult rv = mAudioStream->Init(mInfo.mChannels, mInfo.mRate, mChannel);
|
2014-09-29 09:31:00 +04:00
|
|
|
if (NS_FAILED(rv)) {
|
2016-01-12 16:48:25 +03:00
|
|
|
mAudioStream->Shutdown();
|
|
|
|
mAudioStream = nullptr;
|
2014-09-29 09:31:00 +04:00
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2016-01-12 16:48:25 +03:00
|
|
|
// Set playback params before calling Start() so they can take effect
|
|
|
|
// as soon as the 1st DataCallback of the AudioStream fires.
|
|
|
|
mAudioStream->SetVolume(aParams.mVolume);
|
|
|
|
mAudioStream->SetPlaybackRate(aParams.mPlaybackRate);
|
|
|
|
mAudioStream->SetPreservesPitch(aParams.mPreservesPitch);
|
2013-12-11 09:03:30 +04:00
|
|
|
mAudioStream->Start();
|
|
|
|
|
2016-01-12 16:48:25 +03:00
|
|
|
return NS_OK;
|
2013-12-11 09:03:30 +04:00
|
|
|
}
|
|
|
|
|
2016-01-12 16:48:25 +03:00
|
|
|
int64_t
|
|
|
|
DecodedAudioDataSink::GetEndTime() const
|
2013-12-11 09:03:30 +04:00
|
|
|
{
|
2016-01-12 16:48:25 +03:00
|
|
|
CheckedInt64 playedUsecs = FramesToUsecs(mWritten, mInfo.mRate) + mStartTime;
|
|
|
|
if (!playedUsecs.isValid()) {
|
|
|
|
NS_WARNING("Int overflow calculating audio end time");
|
|
|
|
return -1;
|
2013-12-11 09:03:30 +04:00
|
|
|
}
|
2016-01-12 16:48:25 +03:00
|
|
|
return playedUsecs.value();
|
2013-12-11 09:03:30 +04:00
|
|
|
}
|
|
|
|
|
2016-01-12 16:48:25 +03:00
|
|
|
UniquePtr<AudioStream::Chunk>
|
|
|
|
DecodedAudioDataSink::PopFrames(uint32_t aFrames)
|
2015-07-28 06:52:05 +03:00
|
|
|
{
|
2016-01-12 16:48:25 +03:00
|
|
|
class Chunk : public AudioStream::Chunk {
|
|
|
|
public:
|
2016-01-18 06:24:06 +03:00
|
|
|
Chunk(AudioData* aBuffer, uint32_t aFrames, AudioDataValue* aData)
|
|
|
|
: mBuffer(aBuffer), mFrames(aFrames), mData(aData) {}
|
2016-01-12 16:48:25 +03:00
|
|
|
Chunk() : mFrames(0), mData(nullptr) {}
|
|
|
|
const AudioDataValue* Data() const { return mData; }
|
|
|
|
uint32_t Frames() const { return mFrames; }
|
2016-01-21 16:11:14 +03:00
|
|
|
uint32_t Channels() const { return mBuffer ? mBuffer->mChannels: 0; }
|
|
|
|
uint32_t Rate() const { return mBuffer ? mBuffer->mRate : 0; }
|
2016-01-12 16:48:25 +03:00
|
|
|
AudioDataValue* GetWritable() const { return mData; }
|
|
|
|
private:
|
|
|
|
const RefPtr<AudioData> mBuffer;
|
|
|
|
const uint32_t mFrames;
|
|
|
|
AudioDataValue* const mData;
|
|
|
|
};
|
|
|
|
|
|
|
|
class SilentChunk : public AudioStream::Chunk {
|
|
|
|
public:
|
2016-01-21 16:11:14 +03:00
|
|
|
SilentChunk(uint32_t aFrames, uint32_t aChannels, uint32_t aRate)
|
2016-01-12 16:48:25 +03:00
|
|
|
: mFrames(aFrames)
|
2016-01-21 16:11:14 +03:00
|
|
|
, mChannels(aChannels)
|
|
|
|
, mRate(aRate)
|
2016-01-12 16:48:25 +03:00
|
|
|
, mData(MakeUnique<AudioDataValue[]>(aChannels * aFrames)) {
|
|
|
|
memset(mData.get(), 0, aChannels * aFrames * sizeof(AudioDataValue));
|
|
|
|
}
|
|
|
|
const AudioDataValue* Data() const { return mData.get(); }
|
|
|
|
uint32_t Frames() const { return mFrames; }
|
2016-01-21 16:11:14 +03:00
|
|
|
uint32_t Channels() const { return mChannels; }
|
|
|
|
uint32_t Rate() const { return mRate; }
|
2016-01-12 16:48:25 +03:00
|
|
|
AudioDataValue* GetWritable() const { return mData.get(); }
|
|
|
|
private:
|
|
|
|
const uint32_t mFrames;
|
2016-01-21 16:11:14 +03:00
|
|
|
const uint32_t mChannels;
|
|
|
|
const uint32_t mRate;
|
2016-01-12 16:48:25 +03:00
|
|
|
UniquePtr<AudioDataValue[]> mData;
|
|
|
|
};
|
|
|
|
|
2016-01-21 16:14:42 +03:00
|
|
|
while (!mCurrentData) {
|
2016-01-12 16:48:25 +03:00
|
|
|
// No data in the queue. Return an empty chunk.
|
|
|
|
if (AudioQueue().GetSize() == 0) {
|
|
|
|
return MakeUnique<Chunk>();
|
2015-07-28 06:52:05 +03:00
|
|
|
}
|
|
|
|
|
2016-02-05 05:19:12 +03:00
|
|
|
AudioData* a = AudioQueue().PeekFront()->As<AudioData>();
|
|
|
|
|
2016-01-21 16:14:42 +03:00
|
|
|
// Ignore the element with 0 frames and try next.
|
2016-02-05 05:19:12 +03:00
|
|
|
if (a->mFrames == 0) {
|
|
|
|
RefPtr<MediaData> releaseMe = AudioQueue().PopFront();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ignore invalid samples.
|
|
|
|
if (a->mRate != mInfo.mRate || a->mChannels != mInfo.mChannels) {
|
|
|
|
NS_WARNING(nsPrintfCString(
|
|
|
|
"mismatched sample format, data=%p rate=%u channels=%u frames=%u",
|
|
|
|
a->mAudioData.get(), a->mRate, a->mChannels, a->mFrames).get());
|
2016-01-21 16:14:42 +03:00
|
|
|
RefPtr<MediaData> releaseMe = AudioQueue().PopFront();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2016-01-12 16:48:25 +03:00
|
|
|
// See if there's a gap in the audio. If there is, push silence into the
|
|
|
|
// audio hardware, so we can play across the gap.
|
|
|
|
// Calculate the timestamp of the next chunk of audio in numbers of
|
|
|
|
// samples.
|
|
|
|
CheckedInt64 sampleTime = UsecsToFrames(AudioQueue().PeekFront()->mTime, mInfo.mRate);
|
|
|
|
// Calculate the number of frames that have been pushed onto the audio hardware.
|
|
|
|
CheckedInt64 playedFrames = UsecsToFrames(mStartTime, mInfo.mRate) +
|
|
|
|
static_cast<int64_t>(mWritten);
|
|
|
|
CheckedInt64 missingFrames = sampleTime - playedFrames;
|
|
|
|
|
|
|
|
if (!missingFrames.isValid() || !sampleTime.isValid()) {
|
|
|
|
NS_WARNING("Int overflow in DecodedAudioDataSink");
|
|
|
|
mErrored = true;
|
|
|
|
return MakeUnique<Chunk>();
|
2015-07-28 06:52:05 +03:00
|
|
|
}
|
|
|
|
|
2016-01-12 16:48:25 +03:00
|
|
|
if (missingFrames.value() > AUDIO_FUZZ_FRAMES) {
|
|
|
|
// The next audio chunk begins some time after the end of the last chunk
|
|
|
|
// we pushed to the audio hardware. We must push silence into the audio
|
|
|
|
// hardware so that the next audio chunk begins playback at the correct
|
|
|
|
// time.
|
|
|
|
missingFrames = std::min<int64_t>(UINT32_MAX, missingFrames.value());
|
|
|
|
auto framesToPop = std::min<uint32_t>(missingFrames.value(), aFrames);
|
|
|
|
mWritten += framesToPop;
|
2016-01-21 16:11:14 +03:00
|
|
|
return MakeUnique<SilentChunk>(framesToPop, mInfo.mChannels, mInfo.mRate);
|
2015-07-28 06:52:05 +03:00
|
|
|
}
|
|
|
|
|
2016-01-12 16:48:25 +03:00
|
|
|
mCurrentData = dont_AddRef(AudioQueue().PopFront().take()->As<AudioData>());
|
2016-01-18 06:24:06 +03:00
|
|
|
mCursor = MakeUnique<AudioBufferCursor>(mCurrentData->mAudioData.get(),
|
|
|
|
mCurrentData->mChannels,
|
|
|
|
mCurrentData->mFrames);
|
2016-01-21 16:14:42 +03:00
|
|
|
MOZ_ASSERT(mCurrentData->mFrames > 0);
|
2015-07-28 06:52:05 +03:00
|
|
|
}
|
|
|
|
|
2016-01-18 06:24:06 +03:00
|
|
|
auto framesToPop = std::min(aFrames, mCursor->Available());
|
2015-07-28 06:49:59 +03:00
|
|
|
|
2016-01-12 16:48:25 +03:00
|
|
|
SINK_LOG_V("playing audio at time=%lld offset=%u length=%u",
|
2016-01-18 06:24:06 +03:00
|
|
|
mCurrentData->mTime, mCurrentData->mFrames - mCursor->Available(), framesToPop);
|
2015-07-28 06:49:59 +03:00
|
|
|
|
2016-01-21 16:11:14 +03:00
|
|
|
UniquePtr<AudioStream::Chunk> chunk =
|
|
|
|
MakeUnique<Chunk>(mCurrentData, framesToPop, mCursor->Ptr());
|
2013-12-11 09:03:30 +04:00
|
|
|
|
2016-01-12 16:48:25 +03:00
|
|
|
mWritten += framesToPop;
|
2016-01-18 06:24:06 +03:00
|
|
|
mCursor->Advance(framesToPop);
|
2013-12-11 09:03:30 +04:00
|
|
|
|
2016-01-12 16:48:25 +03:00
|
|
|
// All frames are popped. Reset mCurrentData so we can pop new elements from
|
|
|
|
// the audio queue in next calls to PopFrames().
|
2016-01-18 06:24:06 +03:00
|
|
|
if (mCursor->Available() == 0) {
|
2016-01-12 16:48:25 +03:00
|
|
|
mCurrentData = nullptr;
|
2013-12-11 09:03:30 +04:00
|
|
|
}
|
|
|
|
|
2016-01-12 16:48:25 +03:00
|
|
|
return chunk;
|
2013-12-11 09:03:30 +04:00
|
|
|
}
|
|
|
|
|
2016-01-12 16:48:25 +03:00
|
|
|
bool
|
|
|
|
DecodedAudioDataSink::Ended() const
|
2013-12-11 09:03:30 +04:00
|
|
|
{
|
2016-01-12 16:48:25 +03:00
|
|
|
// Return true when error encountered so AudioStream can start draining.
|
|
|
|
return AudioQueue().IsFinished() || mErrored;
|
2013-12-11 09:03:30 +04:00
|
|
|
}
|
|
|
|
|
2015-07-28 06:52:05 +03:00
|
|
|
void
|
2016-01-12 16:48:25 +03:00
|
|
|
DecodedAudioDataSink::Drained()
|
2015-07-28 06:52:05 +03:00
|
|
|
{
|
2016-01-12 16:48:25 +03:00
|
|
|
SINK_LOG("Drained");
|
2016-02-05 03:39:00 +03:00
|
|
|
// FIXME : In OSX, the audio backend could trigger Drained() twice, then it
|
|
|
|
// cause the crash because the promise had already been resolve and free.
|
|
|
|
// You can fix it on the bug 1246108.
|
|
|
|
mEndPromise.ResolveIfExists(true, __func__);
|
2015-07-28 06:52:05 +03:00
|
|
|
}
|
|
|
|
|
2015-08-18 06:55:01 +03:00
|
|
|
} // namespace media
|
2013-12-11 09:03:30 +04:00
|
|
|
} // namespace mozilla
|