зеркало из https://github.com/mozilla/gecko-dev.git
Backed out changeset 2e2c930a960c (bug 948269) for android 4.0 mochitest-2 permaorange
This commit is contained in:
Родитель
accd5a8842
Коммит
3053bbf32b
|
@ -22,8 +22,6 @@ class MediaDecoder;
|
|||
class AudioAvailableEventManager
|
||||
{
|
||||
public:
|
||||
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioAvailableEventManager)
|
||||
|
||||
AudioAvailableEventManager(MediaDecoder* aDecoder);
|
||||
~AudioAvailableEventManager();
|
||||
|
||||
|
|
|
@ -1,413 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim:set ts=2 sw=2 sts=2 et cindent: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
#include "AudioSink.h"
|
||||
#include "MediaDecoderStateMachine.h"
|
||||
#include "AudioStream.h"
|
||||
#include "prenv.h"
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
#ifdef PR_LOGGING
|
||||
extern PRLogModuleInfo* gMediaDecoderLog;
|
||||
#define SINK_LOG(type, msg) PR_LOG(gMediaDecoderLog, type, msg)
|
||||
#else
|
||||
#define SINK_LOG(type, msg)
|
||||
#endif
|
||||
|
||||
AudioSink::AudioSink(MediaDecoderStateMachine* aStateMachine, AudioAvailableEventManager* aEventManager,
|
||||
int64_t aStartTime, AudioInfo aInfo, dom::AudioChannelType aChannelType)
|
||||
: mStateMachine(aStateMachine)
|
||||
, mEventManager(aEventManager)
|
||||
, mStartTime(aStartTime)
|
||||
, mWritten(0)
|
||||
, mInfo(aInfo)
|
||||
, mChannelType(aChannelType)
|
||||
, mVolume(1.0)
|
||||
, mPlaybackRate(1.0)
|
||||
, mPreservesPitch(false)
|
||||
, mStopAudioThread(false)
|
||||
, mSetVolume(false)
|
||||
, mSetPlaybackRate(false)
|
||||
, mSetPreservesPitch(false)
|
||||
, mPlaying(true)
|
||||
{
|
||||
NS_ASSERTION(mStartTime != -1, "Should have audio start time by now");
|
||||
}
|
||||
|
||||
nsresult
|
||||
AudioSink::Init()
|
||||
{
|
||||
nsresult rv = NS_NewNamedThread("Media Audio",
|
||||
getter_AddRefs(mThread),
|
||||
nullptr,
|
||||
MEDIA_THREAD_STACK_SIZE);
|
||||
if (NS_FAILED(rv)) {
|
||||
return rv;
|
||||
}
|
||||
nsCOMPtr<nsIRunnable> event = NS_NewRunnableMethod(this, &AudioSink::AudioLoop);
|
||||
return mThread->Dispatch(event, NS_DISPATCH_NORMAL);
|
||||
}
|
||||
|
||||
int64_t
|
||||
AudioSink::GetPosition()
|
||||
{
|
||||
if (!mAudioStream) {
|
||||
return 0;
|
||||
}
|
||||
return mAudioStream->GetPosition();
|
||||
}
|
||||
|
||||
void
|
||||
AudioSink::PrepareToShutdown()
|
||||
{
|
||||
AssertCurrentThreadInMonitor();
|
||||
mStopAudioThread = true;
|
||||
if (mAudioStream) {
|
||||
mAudioStream->Cancel();
|
||||
}
|
||||
GetReentrantMonitor().NotifyAll();
|
||||
}
|
||||
|
||||
void
|
||||
AudioSink::Shutdown()
|
||||
{
|
||||
mThread->Shutdown();
|
||||
mThread = nullptr;
|
||||
MOZ_ASSERT(!mAudioStream);
|
||||
}
|
||||
|
||||
void
|
||||
AudioSink::SetVolume(double aVolume)
|
||||
{
|
||||
AssertCurrentThreadInMonitor();
|
||||
mVolume = aVolume;
|
||||
mSetVolume = true;
|
||||
}
|
||||
|
||||
void
|
||||
AudioSink::SetPlaybackRate(double aPlaybackRate)
|
||||
{
|
||||
AssertCurrentThreadInMonitor();
|
||||
NS_ASSERTION(mPlaybackRate != 0, "Don't set the playbackRate to 0 on AudioStream");
|
||||
mPlaybackRate = aPlaybackRate;
|
||||
mSetPlaybackRate = true;
|
||||
}
|
||||
|
||||
void
|
||||
AudioSink::SetPreservesPitch(bool aPreservesPitch)
|
||||
{
|
||||
AssertCurrentThreadInMonitor();
|
||||
mPreservesPitch = aPreservesPitch;
|
||||
mSetPreservesPitch = true;
|
||||
}
|
||||
|
||||
void
|
||||
AudioSink::StartPlayback()
|
||||
{
|
||||
AssertCurrentThreadInMonitor();
|
||||
mPlaying = true;
|
||||
}
|
||||
|
||||
void
|
||||
AudioSink::StopPlayback()
|
||||
{
|
||||
AssertCurrentThreadInMonitor();
|
||||
mPlaying = false;
|
||||
}
|
||||
|
||||
void
|
||||
AudioSink::AudioLoop()
|
||||
{
|
||||
AssertOnAudioThread();
|
||||
SINK_LOG(PR_LOG_DEBUG, ("%p AudioSink: AudioLoop started", this));
|
||||
|
||||
if (NS_FAILED(InitializeAudioStream())) {
|
||||
NS_WARNING("Initializing AudioStream failed.");
|
||||
return;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
WaitForAudioToPlay();
|
||||
if (!IsPlaybackContinuing()) {
|
||||
break;
|
||||
}
|
||||
|
||||
// See if there's a gap in the audio. If there is, push silence into the
|
||||
// audio hardware, so we can play across the gap.
|
||||
// Calculate the timestamp of the next chunk of audio in numbers of
|
||||
// samples.
|
||||
NS_ASSERTION(AudioQueue().GetSize() > 0, "Should have data to play");
|
||||
CheckedInt64 sampleTime = UsecsToFrames(AudioQueue().PeekFront()->mTime, mInfo.mRate);
|
||||
|
||||
// Calculate the number of frames that have been pushed onto the audio hardware.
|
||||
CheckedInt64 playedFrames = UsecsToFrames(mStartTime, mInfo.mRate) + mWritten;
|
||||
|
||||
CheckedInt64 missingFrames = sampleTime - playedFrames;
|
||||
if (!missingFrames.isValid() || !sampleTime.isValid()) {
|
||||
NS_WARNING("Int overflow adding in AudioLoop");
|
||||
break;
|
||||
}
|
||||
|
||||
if (missingFrames.value() > 0) {
|
||||
// The next audio chunk begins some time after the end of the last chunk
|
||||
// we pushed to the audio hardware. We must push silence into the audio
|
||||
// hardware so that the next audio chunk begins playback at the correct
|
||||
// time.
|
||||
missingFrames = std::min<int64_t>(UINT32_MAX, missingFrames.value());
|
||||
mWritten += PlaySilence(static_cast<uint32_t>(missingFrames.value()),
|
||||
playedFrames.value());
|
||||
} else {
|
||||
mWritten += PlayFromAudioQueue(sampleTime.value());
|
||||
}
|
||||
int64_t endTime = GetEndTime();
|
||||
if (endTime != -1) {
|
||||
mStateMachine->OnAudioEndTimeUpdate(endTime);
|
||||
}
|
||||
}
|
||||
ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
|
||||
if (AudioQueue().AtEndOfStream() && !mStopAudioThread) {
|
||||
Drain();
|
||||
}
|
||||
SINK_LOG(PR_LOG_DEBUG, ("%p AudioSink: AudioLoop complete", this));
|
||||
Cleanup();
|
||||
SINK_LOG(PR_LOG_DEBUG, ("%p AudioSink: AudioLoop exit", this));
|
||||
}
|
||||
|
||||
nsresult
|
||||
AudioSink::InitializeAudioStream()
|
||||
{
|
||||
// AudioStream initialization can block for extended periods in unusual
|
||||
// circumstances, so we take care to drop the decoder monitor while
|
||||
// initializing.
|
||||
nsAutoPtr<AudioStream> audioStream(new AudioStream());
|
||||
nsresult rv = audioStream->Init(mInfo.mChannels, mInfo.mRate,
|
||||
mChannelType, AudioStream::HighLatency);
|
||||
if (NS_SUCCEEDED(rv)) {
|
||||
ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
|
||||
mAudioStream = audioStream;
|
||||
UpdateStreamSettings();
|
||||
}
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
||||
void
|
||||
AudioSink::Drain()
|
||||
{
|
||||
MOZ_ASSERT(!mAudioStream->IsPaused());
|
||||
AssertCurrentThreadInMonitor();
|
||||
// If the media was too short to trigger the start of the audio stream,
|
||||
// start it now.
|
||||
mAudioStream->Start();
|
||||
{
|
||||
ReentrantMonitorAutoExit exit(GetReentrantMonitor());
|
||||
mAudioStream->Drain();
|
||||
}
|
||||
// Fire one last event for any extra frames that didn't fill a framebuffer.
|
||||
mEventManager->Drain(GetEndTime());
|
||||
}
|
||||
|
||||
void
|
||||
AudioSink::Cleanup()
|
||||
{
|
||||
// Must hold lock while shutting down and anulling the audio stream to prevent
|
||||
// state machine thread trying to use it while we're destroying it.
|
||||
AssertCurrentThreadInMonitor();
|
||||
mAudioStream->Shutdown();
|
||||
mAudioStream = nullptr;
|
||||
mEventManager->Clear();
|
||||
mStateMachine->OnAudioSinkComplete();
|
||||
}
|
||||
|
||||
bool
|
||||
AudioSink::ExpectMoreAudioData()
|
||||
{
|
||||
return AudioQueue().GetSize() == 0 && !AudioQueue().IsFinished();
|
||||
}
|
||||
|
||||
void
|
||||
AudioSink::WaitForAudioToPlay()
|
||||
{
|
||||
// Wait while we're not playing, and we're not shutting down, or we're
|
||||
// playing and we've got no audio to play.
|
||||
ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
|
||||
while (!mStopAudioThread && (!mPlaying || ExpectMoreAudioData())) {
|
||||
if (!mPlaying && !mAudioStream->IsPaused()) {
|
||||
mAudioStream->Pause();
|
||||
}
|
||||
mon.Wait();
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
AudioSink::IsPlaybackContinuing()
|
||||
{
|
||||
ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
|
||||
// If we're shutting down, break out and exit the audio thread.
|
||||
// Also break out if audio is being captured.
|
||||
if (mStopAudioThread || AudioQueue().AtEndOfStream()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (mPlaying && mAudioStream->IsPaused()) {
|
||||
mAudioStream->Resume();
|
||||
}
|
||||
|
||||
UpdateStreamSettings();
|
||||
return true;
|
||||
}
|
||||
|
||||
uint32_t
|
||||
AudioSink::PlaySilence(uint32_t aFrames, uint64_t aFrameOffset)
|
||||
{
|
||||
// Maximum number of bytes we'll allocate and write at once to the audio
|
||||
// hardware when the audio stream contains missing frames and we're
|
||||
// writing silence in order to fill the gap. We limit our silence-writes
|
||||
// to 32KB in order to avoid allocating an impossibly large chunk of
|
||||
// memory if we encounter a large chunk of silence.
|
||||
const uint32_t SILENCE_BYTES_CHUNK = 32 * 1024;
|
||||
|
||||
AssertOnAudioThread();
|
||||
NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused");
|
||||
uint32_t maxFrames = SILENCE_BYTES_CHUNK / mInfo.mChannels / sizeof(AudioDataValue);
|
||||
uint32_t frames = std::min(aFrames, maxFrames);
|
||||
if (!PR_GetEnv("MOZ_QUIET")) {
|
||||
SINK_LOG(PR_LOG_DEBUG, ("%p AudioSink: playing %u frames of silence", this, aFrames));
|
||||
}
|
||||
WriteSilence(frames);
|
||||
// Dispatch events to the DOM for the audio just written.
|
||||
mEventManager->QueueWrittenAudioData(nullptr, frames * mInfo.mChannels,
|
||||
(aFrameOffset + frames) * mInfo.mChannels);
|
||||
return frames;
|
||||
}
|
||||
|
||||
uint32_t
|
||||
AudioSink::PlayFromAudioQueue(uint64_t aFrameOffset)
|
||||
{
|
||||
AssertOnAudioThread();
|
||||
NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused");
|
||||
nsAutoPtr<AudioData> audio(AudioQueue().PopFront());
|
||||
{
|
||||
ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
|
||||
NS_WARN_IF_FALSE(mPlaying, "Should be playing");
|
||||
// Awaken the decode loop if it's waiting for space to free up in the
|
||||
// audio queue.
|
||||
GetReentrantMonitor().NotifyAll();
|
||||
}
|
||||
if (!PR_GetEnv("MOZ_QUIET")) {
|
||||
SINK_LOG(PR_LOG_DEBUG, ("%p AudioSink: playing %u frames of audio at time %lld",
|
||||
this, audio->mFrames, audio->mTime));
|
||||
}
|
||||
mAudioStream->Write(audio->mAudioData, audio->mFrames);
|
||||
|
||||
StartAudioStreamPlaybackIfNeeded();
|
||||
|
||||
int outChan = mAudioStream->GetOutChannels();
|
||||
// Dispatch events to the DOM for the audio just written.
|
||||
mEventManager->QueueWrittenAudioData(audio->mAudioData.get(),
|
||||
audio->mFrames * outChan,
|
||||
(aFrameOffset + audio->mFrames) * outChan);
|
||||
if (audio->mOffset != -1) {
|
||||
mStateMachine->OnPlaybackOffsetUpdate(audio->mOffset);
|
||||
}
|
||||
return audio->mFrames;
|
||||
}
|
||||
|
||||
void
|
||||
AudioSink::UpdateStreamSettings()
|
||||
{
|
||||
AssertCurrentThreadInMonitor();
|
||||
|
||||
bool setVolume = mSetVolume;
|
||||
bool setPlaybackRate = mSetPlaybackRate;
|
||||
bool setPreservesPitch = mSetPreservesPitch;
|
||||
double volume = mVolume;
|
||||
double playbackRate = mPlaybackRate;
|
||||
bool preservesPitch = mPreservesPitch;
|
||||
|
||||
mSetVolume = false;
|
||||
mSetPlaybackRate = false;
|
||||
mSetPreservesPitch = false;
|
||||
|
||||
{
|
||||
ReentrantMonitorAutoExit exit(GetReentrantMonitor());
|
||||
if (setVolume) {
|
||||
mAudioStream->SetVolume(volume);
|
||||
}
|
||||
|
||||
if (setPlaybackRate &&
|
||||
NS_FAILED(mAudioStream->SetPlaybackRate(playbackRate))) {
|
||||
NS_WARNING("Setting the playback rate failed in AudioSink.");
|
||||
}
|
||||
|
||||
if (setPreservesPitch &&
|
||||
NS_FAILED(mAudioStream->SetPreservesPitch(preservesPitch))) {
|
||||
NS_WARNING("Setting the pitch preservation failed in AudioSink.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
AudioSink::StartAudioStreamPlaybackIfNeeded()
|
||||
{
|
||||
// This value has been chosen empirically.
|
||||
const uint32_t MIN_WRITE_BEFORE_START_USECS = 200000;
|
||||
|
||||
// We want to have enough data in the buffer to start the stream.
|
||||
if (static_cast<double>(mAudioStream->GetWritten()) / mAudioStream->GetRate() >=
|
||||
static_cast<double>(MIN_WRITE_BEFORE_START_USECS) / USECS_PER_S) {
|
||||
mAudioStream->Start();
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
AudioSink::WriteSilence(uint32_t aFrames)
|
||||
{
|
||||
uint32_t numSamples = aFrames * mInfo.mChannels;
|
||||
nsAutoTArray<AudioDataValue, 1000> buf;
|
||||
buf.SetLength(numSamples);
|
||||
memset(buf.Elements(), 0, numSamples * sizeof(AudioDataValue));
|
||||
mAudioStream->Write(buf.Elements(), aFrames);
|
||||
|
||||
StartAudioStreamPlaybackIfNeeded();
|
||||
}
|
||||
|
||||
int64_t
|
||||
AudioSink::GetEndTime()
|
||||
{
|
||||
CheckedInt64 playedUsecs = FramesToUsecs(mWritten, mInfo.mRate) + mStartTime;
|
||||
if (!playedUsecs.isValid()) {
|
||||
NS_WARNING("Int overflow calculating audio end time");
|
||||
return -1;
|
||||
}
|
||||
return playedUsecs.value();
|
||||
}
|
||||
|
||||
MediaQueue<AudioData>&
|
||||
AudioSink::AudioQueue()
|
||||
{
|
||||
return mStateMachine->mReader->AudioQueue();
|
||||
}
|
||||
|
||||
ReentrantMonitor&
|
||||
AudioSink::GetReentrantMonitor()
|
||||
{
|
||||
return mStateMachine->mDecoder->GetReentrantMonitor();
|
||||
}
|
||||
|
||||
void
|
||||
AudioSink::AssertCurrentThreadInMonitor()
|
||||
{
|
||||
return mStateMachine->AssertCurrentThreadInMonitor();
|
||||
}
|
||||
|
||||
void
|
||||
AudioSink::AssertOnAudioThread()
|
||||
{
|
||||
MOZ_ASSERT(IsCurrentThread(mThread));
|
||||
}
|
||||
|
||||
} // namespace mozilla
|
|
@ -1,138 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim:set ts=2 sw=2 sts=2 et cindent: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
#if !defined(AudioSink_h__)
|
||||
#define AudioSink_h__
|
||||
|
||||
#include "nsISupportsImpl.h"
|
||||
#include "MediaDecoderReader.h"
|
||||
#include "AudioChannelCommon.h"
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
class AudioAvailableEventManager;
|
||||
class AudioStream;
|
||||
class MediaDecoderStateMachine;
|
||||
|
||||
class AudioSink {
|
||||
public:
|
||||
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioSink)
|
||||
|
||||
AudioSink(MediaDecoderStateMachine* aStateMachine, AudioAvailableEventManager* aEventManager,
|
||||
int64_t aStartTime, AudioInfo aInfo, dom::AudioChannelType aChannelType);
|
||||
|
||||
nsresult Init();
|
||||
|
||||
int64_t GetPosition();
|
||||
|
||||
// Tell the AudioSink to stop processing and initiate shutdown. Must be
|
||||
// called with the decoder monitor held.
|
||||
void PrepareToShutdown();
|
||||
|
||||
// Shut down the AudioSink's resources. The decoder monitor must not be
|
||||
// held during this call, as it may block processing thread event queues.
|
||||
void Shutdown();
|
||||
|
||||
void SetVolume(double aVolume);
|
||||
void SetPlaybackRate(double aPlaybackRate);
|
||||
void SetPreservesPitch(bool aPreservesPitch);
|
||||
|
||||
void StartPlayback();
|
||||
void StopPlayback();
|
||||
|
||||
private:
|
||||
// The main loop for the audio thread. Sent to the thread as
|
||||
// an nsRunnableMethod. This continually does blocking writes to
|
||||
// to audio stream to play audio data.
|
||||
void AudioLoop();
|
||||
|
||||
// Allocate and initialize mAudioStream. Returns NS_OK on success.
|
||||
nsresult InitializeAudioStream();
|
||||
|
||||
void Drain();
|
||||
|
||||
void Cleanup();
|
||||
|
||||
bool ExpectMoreAudioData();
|
||||
|
||||
// Wait on the decoder monitor until playback is ready or the sink is told to shut down.
|
||||
void WaitForAudioToPlay();
|
||||
|
||||
// Check if the sink has been told to shut down, resuming mAudioStream if
|
||||
// not. Returns true if processing should continue, false if AudioLoop
|
||||
// should shutdown.
|
||||
bool IsPlaybackContinuing();
|
||||
|
||||
// Write aFrames of audio frames of silence to the audio hardware. Returns
|
||||
// the number of frames actually written. The write size is capped at
|
||||
// SILENCE_BYTES_CHUNK (32kB), so must be called in a loop to write the
|
||||
// desired number of frames. This ensures that the playback position
|
||||
// advances smoothly, and guarantees that we don't try to allocate an
|
||||
// impossibly large chunk of memory in order to play back silence. Called
|
||||
// on the audio thread.
|
||||
uint32_t PlaySilence(uint32_t aFrames, uint64_t aFrameOffset);
|
||||
|
||||
// Pops an audio chunk from the front of the audio queue, and pushes its
|
||||
// audio data to the audio hardware. MozAudioAvailable data is also queued
|
||||
// here. Called on the audio thread.
|
||||
uint32_t PlayFromAudioQueue(uint64_t aFrameOffset);
|
||||
|
||||
void UpdateStreamSettings();
|
||||
|
||||
// If we have already written enough frames to the AudioStream, start the
|
||||
// playback.
|
||||
void StartAudioStreamPlaybackIfNeeded();
|
||||
void WriteSilence(uint32_t aFrames);
|
||||
|
||||
int64_t GetEndTime();
|
||||
|
||||
MediaQueue<AudioData>& AudioQueue();
|
||||
|
||||
ReentrantMonitor& GetReentrantMonitor();
|
||||
void AssertCurrentThreadInMonitor();
|
||||
void AssertOnAudioThread();
|
||||
|
||||
nsRefPtr<MediaDecoderStateMachine> mStateMachine;
|
||||
nsRefPtr<AudioAvailableEventManager> mEventManager;
|
||||
|
||||
// Thread for pushing audio onto the audio hardware.
|
||||
// The "audio push thread".
|
||||
nsCOMPtr<nsIThread> mThread;
|
||||
|
||||
// The audio stream resource. Used on the state machine, and audio threads.
|
||||
// This is created and destroyed on the audio thread, while holding the
|
||||
// decoder monitor, so if this is used off the audio thread, you must
|
||||
// first acquire the decoder monitor and check that it is non-null.
|
||||
nsAutoPtr<AudioStream> mAudioStream;
|
||||
|
||||
// The presentation time of the first audio frame that was played in
|
||||
// microseconds. We can add this to the audio stream position to determine
|
||||
// the current audio time. Accessed on audio and state machine thread.
|
||||
// Synchronized by decoder monitor.
|
||||
int64_t mStartTime;
|
||||
|
||||
// PCM frames written to the stream so far.
|
||||
int64_t mWritten;
|
||||
|
||||
AudioInfo mInfo;
|
||||
|
||||
dom::AudioChannelType mChannelType;
|
||||
|
||||
double mVolume;
|
||||
double mPlaybackRate;
|
||||
bool mPreservesPitch;
|
||||
|
||||
bool mStopAudioThread;
|
||||
|
||||
bool mSetVolume;
|
||||
bool mSetPlaybackRate;
|
||||
bool mSetPreservesPitch;
|
||||
|
||||
bool mPlaying;
|
||||
};
|
||||
|
||||
} // namespace mozilla
|
||||
|
||||
#endif
|
|
@ -525,14 +525,6 @@ AudioStream::SetVolume(double aVolume)
|
|||
mVolume = aVolume;
|
||||
}
|
||||
|
||||
void
|
||||
AudioStream::Cancel()
|
||||
{
|
||||
MonitorAutoLock mon(mMonitor);
|
||||
mState = ERRORED;
|
||||
mon.NotifyAll();
|
||||
}
|
||||
|
||||
void
|
||||
AudioStream::Drain()
|
||||
{
|
||||
|
|
|
@ -221,9 +221,6 @@ public:
|
|||
// Block until buffered audio data has been consumed.
|
||||
void Drain();
|
||||
|
||||
// Break any blocking operation and set the stream to shutdown.
|
||||
void Cancel();
|
||||
|
||||
// Start the stream.
|
||||
void Start();
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
#include <stdint.h>
|
||||
|
||||
#include "MediaDecoderStateMachine.h"
|
||||
#include "AudioSink.h"
|
||||
#include "AudioStream.h"
|
||||
#include "nsTArray.h"
|
||||
#include "MediaDecoder.h"
|
||||
#include "MediaDecoderReader.h"
|
||||
|
@ -62,6 +62,13 @@ static const uint32_t LOW_AUDIO_USECS = 300000;
|
|||
// less than the low audio threshold.
|
||||
const int64_t AMPLE_AUDIO_USECS = 1000000;
|
||||
|
||||
// Maximum number of bytes we'll allocate and write at once to the audio
|
||||
// hardware when the audio stream contains missing frames and we're
|
||||
// writing silence in order to fill the gap. We limit our silence-writes
|
||||
// to 32KB in order to avoid allocating an impossibly large chunk of
|
||||
// memory if we encounter a large chunk of silence.
|
||||
const uint32_t SILENCE_BYTES_CHUNK = 32 * 1024;
|
||||
|
||||
// If we have fewer than LOW_VIDEO_FRAMES decoded frames, and
|
||||
// we're not "pumping video", we'll skip the video up to the next keyframe
|
||||
// which is at or after the current playback position.
|
||||
|
@ -112,6 +119,9 @@ static const uint32_t QUICK_BUFFERING_LOW_DATA_USECS = 1000000;
|
|||
static_assert(QUICK_BUFFERING_LOW_DATA_USECS <= AMPLE_AUDIO_USECS,
|
||||
"QUICK_BUFFERING_LOW_DATA_USECS is too large");
|
||||
|
||||
// This value has been chosen empirically.
|
||||
static const uint32_t AUDIOSTREAM_MIN_WRITE_BEFORE_START_USECS = 200000;
|
||||
|
||||
// The amount of instability we tollerate in calls to
|
||||
// MediaDecoderStateMachine::UpdateEstimatedDuration(); changes of duration
|
||||
// less than this are ignored, as they're assumed to be the result of
|
||||
|
@ -361,9 +371,9 @@ MediaDecoderStateMachine::MediaDecoderStateMachine(MediaDecoder* aDecoder,
|
|||
bool aRealTime) :
|
||||
mDecoder(aDecoder),
|
||||
mState(DECODER_STATE_DECODING_METADATA),
|
||||
mResetPlayStartTime(false),
|
||||
mSyncPointInMediaStream(-1),
|
||||
mSyncPointInDecodedStream(-1),
|
||||
mResetPlayStartTime(false),
|
||||
mPlayDuration(0),
|
||||
mStartTime(-1),
|
||||
mEndTime(-1),
|
||||
|
@ -396,7 +406,7 @@ MediaDecoderStateMachine::MediaDecoderStateMachine(MediaDecoder* aDecoder,
|
|||
mDidThrottleAudioDecoding(false),
|
||||
mDidThrottleVideoDecoding(false),
|
||||
mRequestedNewDecodeThread(false),
|
||||
mEventManager(new AudioAvailableEventManager(aDecoder)),
|
||||
mEventManager(aDecoder),
|
||||
mLastFrameStatus(MediaDecoderOwner::NEXT_FRAME_UNINITIALIZED)
|
||||
{
|
||||
MOZ_COUNT_CTOR(MediaDecoderStateMachine);
|
||||
|
@ -550,7 +560,7 @@ void MediaDecoderStateMachine::SendStreamAudio(AudioData* aAudio,
|
|||
aStream->mLastAudioPacketTime = aAudio->mTime;
|
||||
aStream->mLastAudioPacketEndTime = aAudio->GetEndTime();
|
||||
|
||||
// This logic has to mimic AudioSink closely to make sure we write
|
||||
// This logic has to mimic AudioLoop closely to make sure we write
|
||||
// the exact same silences
|
||||
CheckedInt64 audioWrittenOffset = UsecsToFrames(mInfo.mAudio.mRate,
|
||||
aStream->mInitialTime + mStartTime) + aStream->mAudioFramesWritten;
|
||||
|
@ -619,11 +629,11 @@ void MediaDecoderStateMachine::SendStreamData()
|
|||
if (mState == DECODER_STATE_DECODING_METADATA)
|
||||
return;
|
||||
|
||||
// If there's still an audio sink alive, then we can't send any stream
|
||||
// data yet since both SendStreamData and the audio sink want to be in
|
||||
// charge of popping the audio queue. We're waiting for the audio sink
|
||||
// If there's still an audio thread alive, then we can't send any stream
|
||||
// data yet since both SendStreamData and the audio thread want to be in
|
||||
// charge of popping the audio queue. We're waiting for the audio thread
|
||||
// to die before sending anything to our stream.
|
||||
if (mAudioSink)
|
||||
if (mAudioThread)
|
||||
return;
|
||||
|
||||
int64_t minLastAudioPacketTime = INT64_MAX;
|
||||
|
@ -738,7 +748,7 @@ void MediaDecoderStateMachine::SendStreamData()
|
|||
mReader->AudioQueue().PushFront(a.forget());
|
||||
break;
|
||||
}
|
||||
OnAudioEndTimeUpdate(std::max(mAudioEndTime, a->GetEndTime()));
|
||||
mAudioEndTime = std::max(mAudioEndTime, a->GetEndTime());
|
||||
}
|
||||
|
||||
if (finished) {
|
||||
|
@ -936,7 +946,7 @@ void MediaDecoderStateMachine::DecodeLoop()
|
|||
|
||||
SendStreamData();
|
||||
|
||||
// Notify to ensure that the AudioSink is not waiting, in case it was
|
||||
// Notify to ensure that the AudioLoop() is not waiting, in case it was
|
||||
// waiting for more audio to be decoded.
|
||||
mDecoder->GetReentrantMonitor().NotifyAll();
|
||||
|
||||
|
@ -991,6 +1001,300 @@ bool MediaDecoderStateMachine::IsPlaying()
|
|||
return !mPlayStartTime.IsNull();
|
||||
}
|
||||
|
||||
// If we have already written enough frames to the AudioStream, start the
|
||||
// playback.
|
||||
static void
|
||||
StartAudioStreamPlaybackIfNeeded(AudioStream* aStream)
|
||||
{
|
||||
// We want to have enough data in the buffer to start the stream.
|
||||
if (static_cast<double>(aStream->GetWritten()) / aStream->GetRate() >=
|
||||
static_cast<double>(AUDIOSTREAM_MIN_WRITE_BEFORE_START_USECS) / USECS_PER_S) {
|
||||
aStream->Start();
|
||||
}
|
||||
}
|
||||
|
||||
static void WriteSilence(AudioStream* aStream, uint32_t aFrames)
|
||||
{
|
||||
uint32_t numSamples = aFrames * aStream->GetChannels();
|
||||
nsAutoTArray<AudioDataValue, 1000> buf;
|
||||
buf.SetLength(numSamples);
|
||||
memset(buf.Elements(), 0, numSamples * sizeof(AudioDataValue));
|
||||
aStream->Write(buf.Elements(), aFrames);
|
||||
|
||||
StartAudioStreamPlaybackIfNeeded(aStream);
|
||||
}
|
||||
|
||||
void MediaDecoderStateMachine::AudioLoop()
|
||||
{
|
||||
NS_ASSERTION(OnAudioThread(), "Should be on audio thread.");
|
||||
DECODER_LOG(PR_LOG_DEBUG, ("%p Begun audio thread/loop", mDecoder.get()));
|
||||
int64_t audioDuration = 0;
|
||||
int64_t audioStartTime = -1;
|
||||
uint32_t channels, rate;
|
||||
double volume = -1;
|
||||
bool setVolume;
|
||||
double playbackRate = -1;
|
||||
bool setPlaybackRate;
|
||||
bool preservesPitch;
|
||||
bool setPreservesPitch;
|
||||
AudioChannelType audioChannelType;
|
||||
|
||||
{
|
||||
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
|
||||
mAudioCompleted = false;
|
||||
audioStartTime = mAudioStartTime;
|
||||
NS_ASSERTION(audioStartTime != -1, "Should have audio start time by now");
|
||||
channels = mInfo.mAudio.mChannels;
|
||||
rate = mInfo.mAudio.mRate;
|
||||
|
||||
audioChannelType = mDecoder->GetAudioChannelType();
|
||||
volume = mVolume;
|
||||
preservesPitch = mPreservesPitch;
|
||||
playbackRate = mPlaybackRate;
|
||||
}
|
||||
|
||||
{
|
||||
// AudioStream initialization can block for extended periods in unusual
|
||||
// circumstances, so we take care to drop the decoder monitor while
|
||||
// initializing.
|
||||
nsAutoPtr<AudioStream> audioStream(new AudioStream());
|
||||
audioStream->Init(channels, rate, audioChannelType, AudioStream::HighLatency);
|
||||
audioStream->SetVolume(volume);
|
||||
if (audioStream->SetPreservesPitch(preservesPitch) != NS_OK) {
|
||||
NS_WARNING("Setting the pitch preservation failed at AudioLoop start.");
|
||||
}
|
||||
if (playbackRate != 1.0) {
|
||||
NS_ASSERTION(playbackRate != 0,
|
||||
"Don't set the playbackRate to 0 on an AudioStream.");
|
||||
if (audioStream->SetPlaybackRate(playbackRate) != NS_OK) {
|
||||
NS_WARNING("Setting the playback rate failed at AudioLoop start.");
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
|
||||
mAudioStream = audioStream;
|
||||
}
|
||||
}
|
||||
|
||||
while (1) {
|
||||
// Wait while we're not playing, and we're not shutting down, or we're
|
||||
// playing and we've got no audio to play.
|
||||
{
|
||||
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
|
||||
NS_ASSERTION(mState != DECODER_STATE_DECODING_METADATA,
|
||||
"Should have meta data before audio started playing.");
|
||||
while (mState != DECODER_STATE_SHUTDOWN &&
|
||||
!mStopAudioThread &&
|
||||
(!IsPlaying() ||
|
||||
mState == DECODER_STATE_BUFFERING ||
|
||||
(mReader->AudioQueue().GetSize() == 0 &&
|
||||
!mReader->AudioQueue().AtEndOfStream())))
|
||||
{
|
||||
if (!IsPlaying() && !mAudioStream->IsPaused()) {
|
||||
mAudioStream->Pause();
|
||||
}
|
||||
mon.Wait();
|
||||
}
|
||||
|
||||
// If we're shutting down, break out and exit the audio thread.
|
||||
// Also break out if audio is being captured.
|
||||
if (mState == DECODER_STATE_SHUTDOWN ||
|
||||
mStopAudioThread ||
|
||||
mReader->AudioQueue().AtEndOfStream())
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
// We only want to go to the expense of changing the volume if
|
||||
// the volume has changed.
|
||||
setVolume = volume != mVolume;
|
||||
volume = mVolume;
|
||||
|
||||
// Same for the playbackRate.
|
||||
setPlaybackRate = playbackRate != mPlaybackRate;
|
||||
playbackRate = mPlaybackRate;
|
||||
|
||||
// Same for the pitch preservation.
|
||||
setPreservesPitch = preservesPitch != mPreservesPitch;
|
||||
preservesPitch = mPreservesPitch;
|
||||
|
||||
if (IsPlaying() && mAudioStream->IsPaused()) {
|
||||
mAudioStream->Resume();
|
||||
}
|
||||
}
|
||||
|
||||
if (setVolume) {
|
||||
mAudioStream->SetVolume(volume);
|
||||
}
|
||||
if (setPlaybackRate) {
|
||||
NS_ASSERTION(playbackRate != 0,
|
||||
"Don't set the playbackRate to 0 in the AudioStreams");
|
||||
if (mAudioStream->SetPlaybackRate(playbackRate) != NS_OK) {
|
||||
NS_WARNING("Setting the playback rate failed in AudioLoop.");
|
||||
}
|
||||
}
|
||||
if (setPreservesPitch) {
|
||||
if (mAudioStream->SetPreservesPitch(preservesPitch) != NS_OK) {
|
||||
NS_WARNING("Setting the pitch preservation failed in AudioLoop.");
|
||||
}
|
||||
}
|
||||
NS_ASSERTION(mReader->AudioQueue().GetSize() > 0,
|
||||
"Should have data to play");
|
||||
// See if there's a gap in the audio. If there is, push silence into the
|
||||
// audio hardware, so we can play across the gap.
|
||||
const AudioData* s = mReader->AudioQueue().PeekFront();
|
||||
|
||||
// Calculate the number of frames that have been pushed onto the audio
|
||||
// hardware.
|
||||
CheckedInt64 playedFrames = UsecsToFrames(audioStartTime, rate) +
|
||||
audioDuration;
|
||||
// Calculate the timestamp of the next chunk of audio in numbers of
|
||||
// samples.
|
||||
CheckedInt64 sampleTime = UsecsToFrames(s->mTime, rate);
|
||||
CheckedInt64 missingFrames = sampleTime - playedFrames;
|
||||
if (!missingFrames.isValid() || !sampleTime.isValid()) {
|
||||
NS_WARNING("Int overflow adding in AudioLoop()");
|
||||
break;
|
||||
}
|
||||
|
||||
int64_t framesWritten = 0;
|
||||
if (missingFrames.value() > 0) {
|
||||
// The next audio chunk begins some time after the end of the last chunk
|
||||
// we pushed to the audio hardware. We must push silence into the audio
|
||||
// hardware so that the next audio chunk begins playback at the correct
|
||||
// time.
|
||||
missingFrames = std::min<int64_t>(UINT32_MAX, missingFrames.value());
|
||||
DECODER_LOG(PR_LOG_DEBUG, ("%p Decoder playing %d frames of silence",
|
||||
mDecoder.get(), int32_t(missingFrames.value())));
|
||||
framesWritten = PlaySilence(static_cast<uint32_t>(missingFrames.value()),
|
||||
channels, playedFrames.value());
|
||||
} else {
|
||||
framesWritten = PlayFromAudioQueue(sampleTime.value(), channels);
|
||||
}
|
||||
audioDuration += framesWritten;
|
||||
{
|
||||
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
|
||||
CheckedInt64 playedUsecs = FramesToUsecs(audioDuration, rate) + audioStartTime;
|
||||
if (!playedUsecs.isValid()) {
|
||||
NS_WARNING("Int overflow calculating audio end time");
|
||||
break;
|
||||
}
|
||||
mAudioEndTime = playedUsecs.value();
|
||||
}
|
||||
}
|
||||
{
|
||||
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
|
||||
if (mReader->AudioQueue().AtEndOfStream() &&
|
||||
mState != DECODER_STATE_SHUTDOWN &&
|
||||
!mStopAudioThread)
|
||||
{
|
||||
// If the media was too short to trigger the start of the audio stream,
|
||||
// start it now.
|
||||
mAudioStream->Start();
|
||||
// Last frame pushed to audio hardware, wait for the audio to finish,
|
||||
// before the audio thread terminates.
|
||||
bool seeking = false;
|
||||
{
|
||||
int64_t oldPosition = -1;
|
||||
int64_t position = GetMediaTime();
|
||||
while (oldPosition != position &&
|
||||
mAudioEndTime - position > 0 &&
|
||||
mState != DECODER_STATE_SEEKING &&
|
||||
mState != DECODER_STATE_SHUTDOWN)
|
||||
{
|
||||
const int64_t DRAIN_BLOCK_USECS = 100000;
|
||||
Wait(std::min(mAudioEndTime - position, DRAIN_BLOCK_USECS));
|
||||
oldPosition = position;
|
||||
position = GetMediaTime();
|
||||
}
|
||||
seeking = mState == DECODER_STATE_SEEKING;
|
||||
}
|
||||
|
||||
if (!seeking && !mAudioStream->IsPaused()) {
|
||||
{
|
||||
ReentrantMonitorAutoExit exit(mDecoder->GetReentrantMonitor());
|
||||
mAudioStream->Drain();
|
||||
}
|
||||
// Fire one last event for any extra frames that didn't fill a framebuffer.
|
||||
mEventManager.Drain(mAudioEndTime);
|
||||
}
|
||||
}
|
||||
}
|
||||
DECODER_LOG(PR_LOG_DEBUG, ("%p Reached audio stream end.", mDecoder.get()));
|
||||
{
|
||||
// Must hold lock while shutting down and anulling the audio stream to prevent
|
||||
// state machine thread trying to use it while we're destroying it.
|
||||
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
|
||||
mAudioStream->Shutdown();
|
||||
mAudioStream = nullptr;
|
||||
mEventManager.Clear();
|
||||
if (!mAudioCaptured) {
|
||||
mAudioCompleted = true;
|
||||
UpdateReadyState();
|
||||
// Kick the decode thread; it may be sleeping waiting for this to finish.
|
||||
mDecoder->GetReentrantMonitor().NotifyAll();
|
||||
}
|
||||
}
|
||||
|
||||
DECODER_LOG(PR_LOG_DEBUG, ("%p Audio stream finished playing, audio thread exit", mDecoder.get()));
|
||||
}
|
||||
|
||||
uint32_t MediaDecoderStateMachine::PlaySilence(uint32_t aFrames,
|
||||
uint32_t aChannels,
|
||||
uint64_t aFrameOffset)
|
||||
|
||||
{
|
||||
NS_ASSERTION(OnAudioThread(), "Only call on audio thread.");
|
||||
NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused");
|
||||
uint32_t maxFrames = SILENCE_BYTES_CHUNK / aChannels / sizeof(AudioDataValue);
|
||||
uint32_t frames = std::min(aFrames, maxFrames);
|
||||
WriteSilence(mAudioStream, frames);
|
||||
// Dispatch events to the DOM for the audio just written.
|
||||
mEventManager.QueueWrittenAudioData(nullptr, frames * aChannels,
|
||||
(aFrameOffset + frames) * aChannels);
|
||||
return frames;
|
||||
}
|
||||
|
||||
uint32_t MediaDecoderStateMachine::PlayFromAudioQueue(uint64_t aFrameOffset,
|
||||
uint32_t aChannels)
|
||||
{
|
||||
NS_ASSERTION(OnAudioThread(), "Only call on audio thread.");
|
||||
NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused");
|
||||
nsAutoPtr<AudioData> audio(mReader->AudioQueue().PopFront());
|
||||
{
|
||||
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
|
||||
NS_WARN_IF_FALSE(IsPlaying(), "Should be playing");
|
||||
// Awaken the decode loop if it's waiting for space to free up in the
|
||||
// audio queue.
|
||||
mDecoder->GetReentrantMonitor().NotifyAll();
|
||||
}
|
||||
int64_t offset = -1;
|
||||
uint32_t frames = 0;
|
||||
if (!PR_GetEnv("MOZ_QUIET")) {
|
||||
DECODER_LOG(PR_LOG_DEBUG, ("%p Decoder playing %d frames of data to stream for AudioData at %lld",
|
||||
mDecoder.get(), audio->mFrames, audio->mTime));
|
||||
}
|
||||
mAudioStream->Write(audio->mAudioData,
|
||||
audio->mFrames);
|
||||
|
||||
aChannels = mAudioStream->GetOutChannels();
|
||||
|
||||
StartAudioStreamPlaybackIfNeeded(mAudioStream);
|
||||
|
||||
offset = audio->mOffset;
|
||||
frames = audio->mFrames;
|
||||
|
||||
// Dispatch events to the DOM for the audio just written.
|
||||
mEventManager.QueueWrittenAudioData(audio->mAudioData.get(),
|
||||
audio->mFrames * aChannels,
|
||||
(aFrameOffset + frames) * aChannels);
|
||||
if (offset != -1) {
|
||||
mDecoder->UpdatePlaybackOffset(offset);
|
||||
}
|
||||
return frames;
|
||||
}
|
||||
|
||||
nsresult MediaDecoderStateMachine::Init(MediaDecoderStateMachine* aCloneDonor)
|
||||
{
|
||||
MediaDecoderReader* cloneReader = nullptr;
|
||||
|
@ -1010,9 +1314,9 @@ void MediaDecoderStateMachine::StopPlayback()
|
|||
|
||||
if (IsPlaying()) {
|
||||
mPlayDuration += DurationToUsecs(TimeStamp::Now() - mPlayStartTime);
|
||||
SetPlayStartTime(TimeStamp());
|
||||
mPlayStartTime = TimeStamp();
|
||||
}
|
||||
// Notify the audio sink, so that it notices that we've stopped playing,
|
||||
// Notify the audio thread, so that it notices that we've stopped playing,
|
||||
// so it can pause audio playback.
|
||||
mDecoder->GetReentrantMonitor().NotifyAll();
|
||||
NS_ASSERTION(!IsPlaying(), "Should report not playing at end of StopPlayback()");
|
||||
|
@ -1040,7 +1344,7 @@ void MediaDecoderStateMachine::StartPlayback()
|
|||
AssertCurrentThreadInMonitor();
|
||||
|
||||
mDecoder->NotifyPlaybackStarted();
|
||||
SetPlayStartTime(TimeStamp::Now());
|
||||
mPlayStartTime = TimeStamp::Now();
|
||||
|
||||
NS_ASSERTION(IsPlaying(), "Should report playing by end of StartPlayback()");
|
||||
if (NS_FAILED(StartAudioThread())) {
|
||||
|
@ -1082,7 +1386,7 @@ void MediaDecoderStateMachine::UpdatePlaybackPosition(int64_t aTime)
|
|||
}
|
||||
|
||||
// Notify DOM of any queued up audioavailable events
|
||||
mEventManager->DispatchPendingEvents(GetMediaTime());
|
||||
mEventManager.DispatchPendingEvents(GetMediaTime());
|
||||
|
||||
mMetadataManager.DispatchMetadataIfNeeded(mDecoder, aTime);
|
||||
|
||||
|
@ -1115,9 +1419,6 @@ void MediaDecoderStateMachine::SetVolume(double volume)
|
|||
NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
|
||||
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
|
||||
mVolume = volume;
|
||||
if (mAudioSink) {
|
||||
mAudioSink->SetVolume(mVolume);
|
||||
}
|
||||
}
|
||||
|
||||
void MediaDecoderStateMachine::SetAudioCaptured(bool aCaptured)
|
||||
|
@ -1126,8 +1427,8 @@ void MediaDecoderStateMachine::SetAudioCaptured(bool aCaptured)
|
|||
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
|
||||
if (!mAudioCaptured && aCaptured && !mStopAudioThread) {
|
||||
// Make sure the state machine runs as soon as possible. That will
|
||||
// stop the audio sink.
|
||||
// If mStopAudioThread is true then we're already stopping the audio sink
|
||||
// stop the audio thread.
|
||||
// If mStopAudioThread is true then we're already stopping the audio thread
|
||||
// and since we set mAudioCaptured to true, nothing can start it again.
|
||||
ScheduleStateMachine();
|
||||
}
|
||||
|
@ -1255,9 +1556,6 @@ void MediaDecoderStateMachine::Shutdown()
|
|||
DECODER_LOG(PR_LOG_DEBUG, ("%p Changed state to SHUTDOWN", mDecoder.get()));
|
||||
ScheduleStateMachine();
|
||||
mState = DECODER_STATE_SHUTDOWN;
|
||||
if (mAudioSink) {
|
||||
mAudioSink->PrepareToShutdown();
|
||||
}
|
||||
mDecoder->GetReentrantMonitor().NotifyAll();
|
||||
}
|
||||
|
||||
|
@ -1423,16 +1721,15 @@ void MediaDecoderStateMachine::StopAudioThread()
|
|||
|
||||
mStopAudioThread = true;
|
||||
mDecoder->GetReentrantMonitor().NotifyAll();
|
||||
if (mAudioSink) {
|
||||
if (mAudioThread) {
|
||||
DECODER_LOG(PR_LOG_DEBUG, ("%p Shutdown audio thread", mDecoder.get()));
|
||||
mAudioSink->PrepareToShutdown();
|
||||
{
|
||||
ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
|
||||
mAudioSink->Shutdown();
|
||||
mAudioThread->Shutdown();
|
||||
}
|
||||
mAudioSink = nullptr;
|
||||
// Now that the audio sink is dead, try sending data to our MediaStream(s).
|
||||
// That may have been waiting for the audio sink to stop.
|
||||
mAudioThread = nullptr;
|
||||
// Now that the audio thread is dead, try sending data to our MediaStream(s).
|
||||
// That may have been waiting for the audio thread to stop.
|
||||
SendStreamData();
|
||||
}
|
||||
}
|
||||
|
@ -1516,20 +1813,20 @@ MediaDecoderStateMachine::StartAudioThread()
|
|||
}
|
||||
|
||||
mStopAudioThread = false;
|
||||
if (HasAudio() && !mAudioSink) {
|
||||
mAudioCompleted = false;
|
||||
mAudioSink = new AudioSink(this, mEventManager,
|
||||
mAudioStartTime, mInfo.mAudio, mDecoder->GetAudioChannelType());
|
||||
nsresult rv = mAudioSink->Init();
|
||||
if (HasAudio() && !mAudioThread) {
|
||||
nsresult rv = NS_NewNamedThread("Media Audio",
|
||||
getter_AddRefs(mAudioThread),
|
||||
nullptr,
|
||||
MEDIA_THREAD_STACK_SIZE);
|
||||
if (NS_FAILED(rv)) {
|
||||
DECODER_LOG(PR_LOG_DEBUG, ("%p Changed state to SHUTDOWN because failed to create audio sink", mDecoder.get()));
|
||||
DECODER_LOG(PR_LOG_DEBUG, ("%p Changed state to SHUTDOWN because failed to create audio thread", mDecoder.get()));
|
||||
mState = DECODER_STATE_SHUTDOWN;
|
||||
return rv;
|
||||
}
|
||||
|
||||
mAudioSink->SetVolume(mVolume);
|
||||
mAudioSink->SetPlaybackRate(mPlaybackRate);
|
||||
mAudioSink->SetPreservesPitch(mPreservesPitch);
|
||||
nsCOMPtr<nsIRunnable> event =
|
||||
NS_NewRunnableMethod(this, &MediaDecoderStateMachine::AudioLoop);
|
||||
mAudioThread->Dispatch(event, NS_DISPATCH_NORMAL);
|
||||
}
|
||||
return NS_OK;
|
||||
}
|
||||
|
@ -1595,7 +1892,7 @@ void MediaDecoderStateMachine::SetFrameBufferLength(uint32_t aLength)
|
|||
NS_ASSERTION(aLength >= 512 && aLength <= 16384,
|
||||
"The length must be between 512 and 16384");
|
||||
AssertCurrentThreadInMonitor();
|
||||
mEventManager->SetSignalBufferLength(aLength);
|
||||
mEventManager.SetSignalBufferLength(aLength);
|
||||
}
|
||||
|
||||
nsresult MediaDecoderStateMachine::DecodeMetadata()
|
||||
|
@ -1666,7 +1963,7 @@ nsresult MediaDecoderStateMachine::DecodeMetadata()
|
|||
// if there is audio, let the MozAudioAvailable event manager know about
|
||||
// the metadata.
|
||||
if (HasAudio()) {
|
||||
mEventManager->Init(mInfo.mAudio.mChannels, mInfo.mAudio.mRate);
|
||||
mEventManager.Init(mInfo.mAudio.mChannels, mInfo.mAudio.mRate);
|
||||
// Set the buffer length at the decoder level to be able, to be able
|
||||
// to retrive the value via media element method. The RequestFrameBufferLength
|
||||
// will call the MediaDecoderStateMachine::SetFrameBufferLength().
|
||||
|
@ -1743,7 +2040,7 @@ void MediaDecoderStateMachine::DecodeSeek()
|
|||
if (currentTimeChanged) {
|
||||
// The seek target is different than the current playback position,
|
||||
// we'll need to seek the playback position, so shutdown our decode
|
||||
// thread and audio sink.
|
||||
// and audio threads.
|
||||
StopAudioThread();
|
||||
ResetPlayback();
|
||||
nsresult res;
|
||||
|
@ -1876,11 +2173,11 @@ nsresult MediaDecoderStateMachine::RunStateMachine()
|
|||
StopPlayback();
|
||||
}
|
||||
StopAudioThread();
|
||||
// If mAudioSink is non-null after StopAudioThread completes, we are
|
||||
// If mAudioThread is non-null after StopAudioThread completes, we are
|
||||
// running in a nested event loop waiting for Shutdown() on
|
||||
// mAudioSink to complete. Return to the event loop and let it
|
||||
// mAudioThread to complete. Return to the event loop and let it
|
||||
// finish processing before continuing with shutdown.
|
||||
if (mAudioSink) {
|
||||
if (mAudioThread) {
|
||||
MOZ_ASSERT(mStopAudioThread);
|
||||
return NS_OK;
|
||||
}
|
||||
|
@ -2074,7 +2371,7 @@ nsresult MediaDecoderStateMachine::RunStateMachine()
|
|||
StopAudioThread();
|
||||
if (mDecoder->GetState() == MediaDecoder::PLAY_STATE_PLAYING) {
|
||||
int64_t videoTime = HasVideo() ? mVideoFrameEndTime : 0;
|
||||
int64_t clockTime = std::max(mEndTime, videoTime);
|
||||
int64_t clockTime = std::max(mEndTime, std::max(videoTime, GetAudioClock()));
|
||||
UpdatePlaybackPosition(clockTime);
|
||||
nsCOMPtr<nsIRunnable> event =
|
||||
NS_NewRunnableMethod(mDecoder, &MediaDecoder::PlaybackEnded);
|
||||
|
@ -2114,16 +2411,16 @@ MediaDecoderStateMachine::GetAudioClock()
|
|||
{
|
||||
NS_ASSERTION(OnStateMachineThread(), "Should be on state machine thread.");
|
||||
// We must hold the decoder monitor while using the audio stream off the
|
||||
// audio sink to ensure that it doesn't get destroyed on the audio sink
|
||||
// audio thread to ensure that it doesn't get destroyed on the audio thread
|
||||
// while we're using it.
|
||||
AssertCurrentThreadInMonitor();
|
||||
if (!HasAudio() || mAudioCaptured)
|
||||
return -1;
|
||||
if (!mAudioSink) {
|
||||
// Audio sink hasn't played any data yet.
|
||||
if (!mAudioStream) {
|
||||
// Audio thread hasn't played any data yet.
|
||||
return mAudioStartTime;
|
||||
}
|
||||
int64_t t = mAudioSink->GetPosition();
|
||||
int64_t t = mAudioStream->GetPosition();
|
||||
return (t == -1) ? -1 : t + mAudioStartTime;
|
||||
}
|
||||
|
||||
|
@ -2137,7 +2434,7 @@ int64_t MediaDecoderStateMachine::GetVideoStreamPosition()
|
|||
|
||||
// The playbackRate has been just been changed, reset the playstartTime.
|
||||
if (mResetPlayStartTime) {
|
||||
SetPlayStartTime(TimeStamp::Now());
|
||||
mPlayStartTime = TimeStamp::Now();
|
||||
mResetPlayStartTime = false;
|
||||
}
|
||||
|
||||
|
@ -2170,7 +2467,7 @@ int64_t MediaDecoderStateMachine::GetClock() {
|
|||
// Resync against the audio clock, while we're trusting the
|
||||
// audio clock. This ensures no "drift", particularly on Linux.
|
||||
mPlayDuration = clock_time - mStartTime;
|
||||
SetPlayStartTime(TimeStamp::Now());
|
||||
mPlayStartTime = TimeStamp::Now();
|
||||
} else {
|
||||
// Audio is disabled on this system. Sync to the system clock.
|
||||
clock_time = GetVideoStreamPosition();
|
||||
|
@ -2226,7 +2523,7 @@ void MediaDecoderStateMachine::AdvanceFrame()
|
|||
// Notify the decode thread that the video queue's buffers may have
|
||||
// free'd up space for more frames.
|
||||
mDecoder->GetReentrantMonitor().NotifyAll();
|
||||
OnPlaybackOffsetUpdate(frame->mOffset);
|
||||
mDecoder->UpdatePlaybackOffset(frame->mOffset);
|
||||
if (mReader->VideoQueue().GetSize() == 0)
|
||||
break;
|
||||
frame = mReader->VideoQueue().PeekFront();
|
||||
|
@ -2315,6 +2612,25 @@ void MediaDecoderStateMachine::AdvanceFrame()
|
|||
ScheduleStateMachine(remainingTime);
|
||||
}
|
||||
|
||||
void MediaDecoderStateMachine::Wait(int64_t aUsecs) {
|
||||
NS_ASSERTION(OnAudioThread(), "Only call on the audio thread");
|
||||
AssertCurrentThreadInMonitor();
|
||||
TimeStamp end = TimeStamp::Now() + UsecsToDuration(std::max<int64_t>(USECS_PER_MS, aUsecs));
|
||||
TimeStamp now;
|
||||
while ((now = TimeStamp::Now()) < end &&
|
||||
mState != DECODER_STATE_SHUTDOWN &&
|
||||
mState != DECODER_STATE_SEEKING &&
|
||||
!mStopAudioThread &&
|
||||
IsPlaying())
|
||||
{
|
||||
int64_t ms = static_cast<int64_t>(NS_round((end - now).ToSeconds() * 1000));
|
||||
if (ms == 0 || ms > UINT32_MAX) {
|
||||
break;
|
||||
}
|
||||
mDecoder->GetReentrantMonitor().Wait(PR_MillisecondsToInterval(static_cast<uint32_t>(ms)));
|
||||
}
|
||||
}
|
||||
|
||||
VideoData* MediaDecoderStateMachine::FindStartTime()
|
||||
{
|
||||
NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
|
||||
|
@ -2432,20 +2748,6 @@ bool MediaDecoderStateMachine::IsPausedAndDecoderWaiting() {
|
|||
(mState == DECODER_STATE_DECODING || mState == DECODER_STATE_BUFFERING);
|
||||
}
|
||||
|
||||
void MediaDecoderStateMachine::SetPlayStartTime(const TimeStamp& aTimeStamp)
|
||||
{
|
||||
AssertCurrentThreadInMonitor();
|
||||
mPlayStartTime = aTimeStamp;
|
||||
if (!mAudioSink) {
|
||||
return;
|
||||
}
|
||||
if (!mPlayStartTime.IsNull()) {
|
||||
mAudioSink->StartPlayback();
|
||||
} else {
|
||||
mAudioSink->StopPlayback();
|
||||
}
|
||||
}
|
||||
|
||||
nsresult MediaDecoderStateMachine::Run()
|
||||
{
|
||||
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
|
||||
|
@ -2466,7 +2768,7 @@ nsresult MediaDecoderStateMachine::CallRunStateMachine()
|
|||
// This flag prevents us from dispatching
|
||||
mDispatchedRunEvent = false;
|
||||
|
||||
// If audio is being captured, stop the audio sink if it's running
|
||||
// If audio is being captured, stop the audio thread if it's running
|
||||
if (mAudioCaptured) {
|
||||
StopAudioThread();
|
||||
}
|
||||
|
@ -2588,7 +2890,7 @@ nsIThread* MediaDecoderStateMachine::GetStateMachineThread()
|
|||
void MediaDecoderStateMachine::NotifyAudioAvailableListener()
|
||||
{
|
||||
AssertCurrentThreadInMonitor();
|
||||
mEventManager->NotifyAudioAvailableListener();
|
||||
mEventManager.NotifyAudioAvailableListener();
|
||||
}
|
||||
|
||||
void MediaDecoderStateMachine::SetPlaybackRate(double aPlaybackRate)
|
||||
|
@ -2600,7 +2902,7 @@ void MediaDecoderStateMachine::SetPlaybackRate(double aPlaybackRate)
|
|||
|
||||
// We don't currently support more than two channels when changing playback
|
||||
// rate.
|
||||
if (mInfo.mAudio.mChannels > 2) {
|
||||
if (mAudioStream && mAudioStream->GetChannels() > 2) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2618,13 +2920,10 @@ void MediaDecoderStateMachine::SetPlaybackRate(double aPlaybackRate)
|
|||
}
|
||||
mPlayDuration = mBasePosition;
|
||||
mResetPlayStartTime = true;
|
||||
SetPlayStartTime(TimeStamp::Now());
|
||||
mPlayStartTime = TimeStamp::Now();
|
||||
}
|
||||
|
||||
mPlaybackRate = aPlaybackRate;
|
||||
if (mAudioSink) {
|
||||
mAudioSink->SetPlaybackRate(mPlaybackRate);
|
||||
}
|
||||
}
|
||||
|
||||
void MediaDecoderStateMachine::SetPreservesPitch(bool aPreservesPitch)
|
||||
|
@ -2633,9 +2932,8 @@ void MediaDecoderStateMachine::SetPreservesPitch(bool aPreservesPitch)
|
|||
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
|
||||
|
||||
mPreservesPitch = aPreservesPitch;
|
||||
if (mAudioSink) {
|
||||
mAudioSink->SetPreservesPitch(mPreservesPitch);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
bool MediaDecoderStateMachine::IsShutdown()
|
||||
|
@ -2663,29 +2961,5 @@ void MediaDecoderStateMachine::QueueMetadata(int64_t aPublishTime,
|
|||
mMetadataManager.QueueMetadata(metadata);
|
||||
}
|
||||
|
||||
void MediaDecoderStateMachine::OnAudioEndTimeUpdate(int64_t aAudioEndTime)
|
||||
{
|
||||
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
|
||||
MOZ_ASSERT(aAudioEndTime >= mAudioEndTime);
|
||||
mAudioEndTime = aAudioEndTime;
|
||||
}
|
||||
|
||||
void MediaDecoderStateMachine::OnPlaybackOffsetUpdate(int64_t aPlaybackOffset)
|
||||
{
|
||||
mDecoder->UpdatePlaybackOffset(aPlaybackOffset);
|
||||
}
|
||||
|
||||
void MediaDecoderStateMachine::OnAudioSinkComplete()
|
||||
{
|
||||
AssertCurrentThreadInMonitor();
|
||||
if (mAudioCaptured) {
|
||||
return;
|
||||
}
|
||||
mAudioCompleted = true;
|
||||
UpdateReadyState();
|
||||
// Kick the decode thread; it may be sleeping waiting for this to finish.
|
||||
mDecoder->GetReentrantMonitor().NotifyAll();
|
||||
}
|
||||
|
||||
} // namespace mozilla
|
||||
|
||||
|
|
|
@ -92,8 +92,6 @@ namespace mozilla {
|
|||
class AudioSegment;
|
||||
class VideoSegment;
|
||||
|
||||
class AudioSink;
|
||||
|
||||
/*
|
||||
The state machine class. This manages the decoding and seeking in the
|
||||
MediaDecoderReader on the decode thread, and A/V sync on the shared
|
||||
|
@ -108,8 +106,6 @@ class AudioSink;
|
|||
*/
|
||||
class MediaDecoderStateMachine : public nsRunnable
|
||||
{
|
||||
friend class AudioSink;
|
||||
|
||||
public:
|
||||
typedef MediaDecoder::DecodedStreamData DecodedStreamData;
|
||||
MediaDecoderStateMachine(MediaDecoder* aDecoder,
|
||||
|
@ -177,6 +173,9 @@ public:
|
|||
return IsCurrentThread(mDecodeThread);
|
||||
}
|
||||
bool OnStateMachineThread() const;
|
||||
bool OnAudioThread() const {
|
||||
return IsCurrentThread(mAudioThread);
|
||||
}
|
||||
|
||||
MediaDecoderOwner::NextFrameStatus GetNextFrameStatus();
|
||||
|
||||
|
@ -422,6 +421,16 @@ private:
|
|||
// Returns true if we recently exited "quick buffering" mode.
|
||||
bool JustExitedQuickBuffering();
|
||||
|
||||
// Waits on the decoder ReentrantMonitor for aUsecs microseconds. If the decoder
|
||||
// monitor is awoken by a Notify() call, we'll continue waiting, unless
|
||||
// we've moved into shutdown state. This enables us to ensure that we
|
||||
// wait for a specified time, and that the myriad of Notify()s we do on
|
||||
// the decoder monitor don't cause the audio thread to be starved. aUsecs
|
||||
// values of less than 1 millisecond are rounded up to 1 millisecond
|
||||
// (see bug 651023). The decoder monitor must be held. Called only on the
|
||||
// audio thread.
|
||||
void Wait(int64_t aUsecs);
|
||||
|
||||
// Dispatches an asynchronous event to update the media element's ready state.
|
||||
void UpdateReadyState();
|
||||
|
||||
|
@ -465,6 +474,22 @@ private:
|
|||
// state machine thread.
|
||||
void AdvanceFrame();
|
||||
|
||||
// Write aFrames of audio frames of silence to the audio hardware. Returns
|
||||
// the number of frames actually written. The write size is capped at
|
||||
// SILENCE_BYTES_CHUNK (32kB), so must be called in a loop to write the
|
||||
// desired number of frames. This ensures that the playback position
|
||||
// advances smoothly, and guarantees that we don't try to allocate an
|
||||
// impossibly large chunk of memory in order to play back silence. Called
|
||||
// on the audio thread.
|
||||
uint32_t PlaySilence(uint32_t aFrames,
|
||||
uint32_t aChannels,
|
||||
uint64_t aFrameOffset);
|
||||
|
||||
// Pops an audio chunk from the front of the audio queue, and pushes its
|
||||
// audio data to the audio hardware. MozAudioAvailable data is also queued
|
||||
// here. Called on the audio thread.
|
||||
uint32_t PlayFromAudioQueue(uint64_t aFrameOffset, uint32_t aChannels);
|
||||
|
||||
// Stops the decode thread, and if we have a pending request for a new
|
||||
// decode thread it is canceled. The decoder monitor must be held with exactly
|
||||
// one lock count. Called on the state machine thread.
|
||||
|
@ -484,6 +509,11 @@ private:
|
|||
// one lock count. Called on the state machine thread.
|
||||
nsresult StartAudioThread();
|
||||
|
||||
// The main loop for the audio thread. Sent to the thread as
|
||||
// an nsRunnableMethod. This continually does blocking writes to
|
||||
// to audio stream to play audio data.
|
||||
void AudioLoop();
|
||||
|
||||
// Sets internal state which causes playback of media to pause.
|
||||
// The decoder monitor must be held.
|
||||
void StopPlayback();
|
||||
|
@ -558,20 +588,6 @@ private:
|
|||
// case as it may not be needed again.
|
||||
bool IsPausedAndDecoderWaiting();
|
||||
|
||||
// Set the time that playback started from the system clock.
|
||||
// Can only be called on the state machine thread.
|
||||
void SetPlayStartTime(const TimeStamp& aTimeStamp);
|
||||
|
||||
// Update mAudioEndTime.
|
||||
void OnAudioEndTimeUpdate(int64_t aAudioEndTime);
|
||||
|
||||
// Update mDecoder's playback offset.
|
||||
void OnPlaybackOffsetUpdate(int64_t aPlaybackOffset);
|
||||
|
||||
// Called by the AudioSink to signal that all outstanding work is complete
|
||||
// and the sink is shutting down.
|
||||
void OnAudioSinkComplete();
|
||||
|
||||
// The decoder object that created this state machine. The state machine
|
||||
// holds a strong reference to the decoder to ensure that the decoder stays
|
||||
// alive once media element has started the decoder shutdown process, and has
|
||||
|
@ -589,6 +605,10 @@ private:
|
|||
// Accessed on state machine, audio, main, and AV thread.
|
||||
State mState;
|
||||
|
||||
// Thread for pushing audio onto the audio hardware.
|
||||
// The "audio push thread".
|
||||
nsCOMPtr<nsIThread> mAudioThread;
|
||||
|
||||
// Thread for decoding video in background. The "decode thread".
|
||||
nsCOMPtr<nsIThread> mDecodeThread;
|
||||
|
||||
|
@ -603,20 +623,20 @@ private:
|
|||
|
||||
// The time that playback started from the system clock. This is used for
|
||||
// timing the presentation of video frames when there's no audio.
|
||||
// Accessed only via the state machine thread. Must be set via SetPlayStartTime.
|
||||
// Accessed only via the state machine thread.
|
||||
TimeStamp mPlayStartTime;
|
||||
|
||||
// When the playbackRate changes, and there is no audio clock, it is necessary
|
||||
// to reset the mPlayStartTime. This is done next time the clock is queried,
|
||||
// when this member is true. Access protected by decoder monitor.
|
||||
bool mResetPlayStartTime;
|
||||
|
||||
// When we start writing decoded data to a new DecodedDataStream, or we
|
||||
// restart writing due to PlaybackStarted(), we record where we are in the
|
||||
// MediaStream and what that corresponds to in the media.
|
||||
StreamTime mSyncPointInMediaStream;
|
||||
int64_t mSyncPointInDecodedStream; // microseconds
|
||||
|
||||
// When the playbackRate changes, and there is no audio clock, it is necessary
|
||||
// to reset the mPlayStartTime. This is done next time the clock is queried,
|
||||
// when this member is true. Access protected by decoder monitor.
|
||||
bool mResetPlayStartTime;
|
||||
|
||||
// The amount of time we've spent playing already the media. The current
|
||||
// playback position is therefore |Now() - mPlayStartTime +
|
||||
// mPlayDuration|, which must be adjusted by mStartTime if used with media
|
||||
|
@ -647,7 +667,11 @@ private:
|
|||
// Media Fragment end time in microseconds. Access controlled by decoder monitor.
|
||||
int64_t mFragmentEndTime;
|
||||
|
||||
nsRefPtr<AudioSink> mAudioSink;
|
||||
// The audio stream resource. Used on the state machine, and audio threads.
|
||||
// This is created and destroyed on the audio thread, while holding the
|
||||
// decoder monitor, so if this is used off the audio thread, you must
|
||||
// first acquire the decoder monitor and check that it is non-null.
|
||||
nsAutoPtr<AudioStream> mAudioStream;
|
||||
|
||||
// The reader, don't call its methods with the decoder monitor held.
|
||||
// This is created in the play state machine's constructor, and destroyed
|
||||
|
@ -802,7 +826,7 @@ private:
|
|||
// Manager for queuing and dispatching MozAudioAvailable events. The
|
||||
// event manager is accessed from the state machine and audio threads,
|
||||
// and takes care of synchronizing access to its internal queue.
|
||||
nsRefPtr<AudioAvailableEventManager> mEventManager;
|
||||
AudioAvailableEventManager mEventManager;
|
||||
|
||||
// Stores presentation info required for playback. The decoder monitor
|
||||
// must be held when accessing this.
|
||||
|
|
|
@ -111,7 +111,6 @@ UNIFIED_SOURCES += [
|
|||
'AudioNodeExternalInputStream.cpp',
|
||||
'AudioNodeStream.cpp',
|
||||
'AudioSegment.cpp',
|
||||
'AudioSink.cpp',
|
||||
'AudioStream.cpp',
|
||||
'AudioStreamTrack.cpp',
|
||||
'BufferDecoder.cpp',
|
||||
|
|
Загрузка…
Ссылка в новой задаче