зеркало из https://github.com/mozilla/gecko-dev.git
Backing out parts 6 and 7 of bug 779715 (5a87f1d1807d, 5a87f1d1807d) due to crashtest orange.
--HG-- extra : rebase_source : bb7acb9958452850bd9d36f8e58f956d065d4935
This commit is contained in:
Родитель
7614e7603f
Коммит
9152e8f482
|
@ -553,6 +553,12 @@ void nsHTMLMediaElement::ShutdownDecoder()
|
|||
NS_ASSERTION(mDecoder, "Must have decoder to shut down");
|
||||
mDecoder->Shutdown();
|
||||
mDecoder = nullptr;
|
||||
// Discard all output streams. mDecoder->Shutdown() will have finished all
|
||||
// its output streams.
|
||||
// XXX For now we ignore mFinishWhenEnded. We'll fix this later. The
|
||||
// immediate goal is to not crash when reloading a media element with
|
||||
// output streams.
|
||||
mOutputStreams.Clear();
|
||||
}
|
||||
|
||||
void nsHTMLMediaElement::AbortExistingLoads()
|
||||
|
@ -1518,20 +1524,16 @@ already_AddRefed<nsDOMMediaStream>
|
|||
nsHTMLMediaElement::CaptureStreamInternal(bool aFinishWhenEnded)
|
||||
{
|
||||
OutputMediaStream* out = mOutputStreams.AppendElement();
|
||||
out->mStream = nsDOMMediaStream::CreateTrackUnionStream();
|
||||
out->mStream = nsDOMMediaStream::CreateInputStream();
|
||||
nsRefPtr<nsIPrincipal> principal = GetCurrentPrincipal();
|
||||
out->mStream->CombineWithPrincipal(principal);
|
||||
out->mFinishWhenEnded = aFinishWhenEnded;
|
||||
|
||||
mAudioCaptured = true;
|
||||
// Block the output stream initially.
|
||||
// Decoders are responsible for removing the block while they are playing
|
||||
// back into the output stream.
|
||||
out->mStream->GetStream()->ChangeExplicitBlockerCount(1);
|
||||
if (mDecoder) {
|
||||
mDecoder->SetAudioCaptured(true);
|
||||
mDecoder->AddOutputStream(
|
||||
out->mStream->GetStream()->AsProcessedStream(), aFinishWhenEnded);
|
||||
out->mStream->GetStream()->AsSourceStream(), aFinishWhenEnded);
|
||||
}
|
||||
nsRefPtr<nsDOMMediaStream> result = out->mStream;
|
||||
return result.forget();
|
||||
|
@ -2474,7 +2476,7 @@ nsresult nsHTMLMediaElement::FinishDecoderSetup(nsMediaDecoder* aDecoder,
|
|||
aDecoder->SetVolume(mMuted ? 0.0 : mVolume);
|
||||
for (PRUint32 i = 0; i < mOutputStreams.Length(); ++i) {
|
||||
OutputMediaStream* ms = &mOutputStreams[i];
|
||||
aDecoder->AddOutputStream(ms->mStream->GetStream()->AsProcessedStream(),
|
||||
aDecoder->AddOutputStream(ms->mStream->GetStream()->AsSourceStream(),
|
||||
ms->mFinishWhenEnded);
|
||||
}
|
||||
|
||||
|
@ -2826,14 +2828,6 @@ void nsHTMLMediaElement::PlaybackEnded()
|
|||
|
||||
NS_ASSERTION(!mDecoder || mDecoder->IsEnded(),
|
||||
"Decoder fired ended, but not in ended state");
|
||||
|
||||
// Discard all output streams that have finished now.
|
||||
for (PRInt32 i = mOutputStreams.Length() - 1; i >= 0; --i) {
|
||||
if (mOutputStreams[i].mFinishWhenEnded) {
|
||||
mOutputStreams.RemoveElementAt(i);
|
||||
}
|
||||
}
|
||||
|
||||
if (mSrcStream || (mDecoder && mDecoder->IsInfinite())) {
|
||||
LOG(PR_LOG_DEBUG, ("%p, got duration by reaching the end of the resource", this));
|
||||
DispatchAsyncEvent(NS_LITERAL_STRING("durationchange"));
|
||||
|
|
|
@ -58,126 +58,14 @@ void nsBuiltinDecoder::SetAudioCaptured(bool aCaptured)
|
|||
}
|
||||
}
|
||||
|
||||
void nsBuiltinDecoder::ConnectDecodedStreamToOutputStream(OutputStreamData* aStream)
|
||||
{
|
||||
NS_ASSERTION(!aStream->mPort, "Already connected?");
|
||||
|
||||
// The output stream must stay in sync with the decoded stream, so if
|
||||
// either stream is blocked, we block the other.
|
||||
aStream->mPort = aStream->mStream->AllocateInputPort(mDecodedStream->mStream,
|
||||
MediaInputPort::FLAG_BLOCK_INPUT | MediaInputPort::FLAG_BLOCK_OUTPUT);
|
||||
// Unblock the output stream now. While it's connected to mDecodedStream,
|
||||
// mDecodedStream is responsible for controlling blocking.
|
||||
aStream->mStream->ChangeExplicitBlockerCount(-1);
|
||||
}
|
||||
|
||||
nsBuiltinDecoder::DecodedStreamData::DecodedStreamData(nsBuiltinDecoder* aDecoder,
|
||||
PRInt64 aInitialTime,
|
||||
SourceMediaStream* aStream)
|
||||
: mLastAudioPacketTime(-1),
|
||||
mLastAudioPacketEndTime(-1),
|
||||
mAudioFramesWritten(0),
|
||||
mInitialTime(aInitialTime),
|
||||
mNextVideoTime(aInitialTime),
|
||||
mStreamInitialized(false),
|
||||
mHaveSentFinish(false),
|
||||
mHaveSentFinishAudio(false),
|
||||
mHaveSentFinishVideo(false),
|
||||
mStream(aStream),
|
||||
mMainThreadListener(new DecodedStreamMainThreadListener(aDecoder)),
|
||||
mHaveBlockedForPlayState(false)
|
||||
{
|
||||
mStream->AddMainThreadListener(mMainThreadListener);
|
||||
}
|
||||
|
||||
nsBuiltinDecoder::DecodedStreamData::~DecodedStreamData()
|
||||
{
|
||||
mStream->RemoveMainThreadListener(mMainThreadListener);
|
||||
mStream->Destroy();
|
||||
}
|
||||
|
||||
void nsBuiltinDecoder::DestroyDecodedStream()
|
||||
void nsBuiltinDecoder::AddOutputStream(SourceMediaStream* aStream, bool aFinishWhenEnded)
|
||||
{
|
||||
NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
|
||||
mReentrantMonitor.AssertCurrentThreadIn();
|
||||
|
||||
// All streams are having their SourceMediaStream disconnected, so they
|
||||
// need to be explicitly blocked again.
|
||||
for (PRUint32 i = 0; i < mOutputStreams.Length(); ++i) {
|
||||
OutputStreamData& os = mOutputStreams[i];
|
||||
// During cycle collection, nsDOMMediaStream can be destroyed and send
|
||||
// its Destroy message before this decoder is destroyed. So we have to
|
||||
// be careful not to send any messages after the Destroy().
|
||||
if (!os.mStream->IsDestroyed()) {
|
||||
os.mStream->ChangeExplicitBlockerCount(1);
|
||||
}
|
||||
// Explicitly remove all existing ports. This is not strictly necessary but it's
|
||||
// good form.
|
||||
os.mPort->Destroy();
|
||||
os.mPort = nullptr;
|
||||
}
|
||||
|
||||
mDecodedStream = nullptr;
|
||||
}
|
||||
|
||||
void nsBuiltinDecoder::RecreateDecodedStream(PRInt64 aStartTimeUSecs)
|
||||
{
|
||||
NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
|
||||
mReentrantMonitor.AssertCurrentThreadIn();
|
||||
LOG(PR_LOG_DEBUG, ("nsBuiltinDecoder::RecreateDecodedStream this=%p aStartTimeUSecs=%lld!",
|
||||
this, (long long)aStartTimeUSecs));
|
||||
|
||||
DestroyDecodedStream();
|
||||
|
||||
mDecodedStream = new DecodedStreamData(this, aStartTimeUSecs,
|
||||
MediaStreamGraph::GetInstance()->CreateInputStream(nullptr));
|
||||
|
||||
// Note that the delay between removing ports in DestroyDecodedStream
|
||||
// and adding new ones won't cause a glitch since all graph operations
|
||||
// between main-thread stable states take effect atomically.
|
||||
for (PRUint32 i = 0; i < mOutputStreams.Length(); ++i) {
|
||||
ConnectDecodedStreamToOutputStream(&mOutputStreams[i]);
|
||||
}
|
||||
|
||||
mDecodedStream->mHaveBlockedForPlayState = mPlayState != PLAY_STATE_PLAYING;
|
||||
if (mDecodedStream->mHaveBlockedForPlayState) {
|
||||
mDecodedStream->mStream->ChangeExplicitBlockerCount(1);
|
||||
}
|
||||
}
|
||||
|
||||
void nsBuiltinDecoder::NotifyDecodedStreamMainThreadStateChanged()
|
||||
{
|
||||
if (mTriggerPlaybackEndedWhenSourceStreamFinishes && mDecodedStream &&
|
||||
mDecodedStream->mStream->IsFinished()) {
|
||||
mTriggerPlaybackEndedWhenSourceStreamFinishes = false;
|
||||
if (GetState() == PLAY_STATE_PLAYING) {
|
||||
nsCOMPtr<nsIRunnable> event =
|
||||
NS_NewRunnableMethod(this, &nsBuiltinDecoder::PlaybackEnded);
|
||||
NS_DispatchToCurrentThread(event);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void nsBuiltinDecoder::AddOutputStream(ProcessedMediaStream* aStream,
|
||||
bool aFinishWhenEnded)
|
||||
{
|
||||
NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
|
||||
LOG(PR_LOG_DEBUG, ("nsBuiltinDecoder::AddOutputStream this=%p aStream=%p!",
|
||||
this, aStream));
|
||||
|
||||
{
|
||||
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
|
||||
if (!mDecodedStream) {
|
||||
RecreateDecodedStream(mDecoderStateMachine ?
|
||||
PRInt64(mDecoderStateMachine->GetCurrentTime()*USECS_PER_S) : 0);
|
||||
}
|
||||
OutputStreamData* os = mOutputStreams.AppendElement();
|
||||
os->Init(aStream, aFinishWhenEnded);
|
||||
ConnectDecodedStreamToOutputStream(os);
|
||||
if (aFinishWhenEnded) {
|
||||
// Ensure that aStream finishes the moment mDecodedStream does.
|
||||
aStream->SetAutofinish(true);
|
||||
}
|
||||
OutputMediaStream* ms = mOutputStreams.AppendElement();
|
||||
ms->Init(PRInt64(mCurrentTime*USECS_PER_S), aStream, aFinishWhenEnded);
|
||||
}
|
||||
|
||||
// This can be called before Load(), in which case our mDecoderStateMachine
|
||||
|
@ -227,8 +115,7 @@ nsBuiltinDecoder::nsBuiltinDecoder() :
|
|||
mNextState(PLAY_STATE_PAUSED),
|
||||
mResourceLoaded(false),
|
||||
mIgnoreProgressData(false),
|
||||
mInfiniteStream(false),
|
||||
mTriggerPlaybackEndedWhenSourceStreamFinishes(false)
|
||||
mInfiniteStream(false)
|
||||
{
|
||||
MOZ_COUNT_CTOR(nsBuiltinDecoder);
|
||||
NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
|
||||
|
@ -258,11 +145,6 @@ void nsBuiltinDecoder::Shutdown()
|
|||
|
||||
mShuttingDown = true;
|
||||
|
||||
{
|
||||
ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
|
||||
DestroyDecodedStream();
|
||||
}
|
||||
|
||||
// This changes the decoder state to SHUTDOWN and does other things
|
||||
// necessary to unblock the state machine thread if it's blocked, so
|
||||
// the asynchronous shutdown in nsDestroyStateMachine won't deadlock.
|
||||
|
@ -660,36 +542,10 @@ bool nsBuiltinDecoder::IsEnded() const
|
|||
|
||||
void nsBuiltinDecoder::PlaybackEnded()
|
||||
{
|
||||
NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
|
||||
|
||||
if (mShuttingDown || mPlayState == nsBuiltinDecoder::PLAY_STATE_SEEKING)
|
||||
return;
|
||||
|
||||
{
|
||||
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
|
||||
|
||||
if (mDecodedStream && !mDecodedStream->mStream->IsFinished()) {
|
||||
// Wait for it to finish before firing PlaybackEnded()
|
||||
mTriggerPlaybackEndedWhenSourceStreamFinishes = true;
|
||||
return;
|
||||
}
|
||||
|
||||
for (PRInt32 i = mOutputStreams.Length() - 1; i >= 0; --i) {
|
||||
OutputStreamData& os = mOutputStreams[i];
|
||||
if (os.mFinishWhenEnded) {
|
||||
// Shouldn't really be needed since mDecodedStream should already have
|
||||
// finished, but doesn't hurt.
|
||||
os.mStream->Finish();
|
||||
os.mPort->Destroy();
|
||||
os.mPort = nullptr;
|
||||
// Not really needed but it keeps the invariant that a stream not
|
||||
// connected to mDecodedStream is explicity blocked.
|
||||
os.mStream->ChangeExplicitBlockerCount(1);
|
||||
mOutputStreams.RemoveElementAt(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
printf("nsBuiltinDecoder::PlaybackEnded mPlayState=%d\n", mPlayState);
|
||||
PlaybackPositionChanged();
|
||||
ChangeState(PLAY_STATE_ENDED);
|
||||
|
||||
|
@ -908,6 +764,7 @@ void nsBuiltinDecoder::SeekingStopped()
|
|||
seekWasAborted = true;
|
||||
} else {
|
||||
UnpinForSeek();
|
||||
printf("nsBuiltinDecoder::SeekingStopped, next state=%d\n", mNextState);
|
||||
ChangeState(mNextState);
|
||||
}
|
||||
}
|
||||
|
@ -984,13 +841,6 @@ void nsBuiltinDecoder::ChangeState(PlayState aState)
|
|||
return;
|
||||
}
|
||||
|
||||
if (mDecodedStream) {
|
||||
bool blockForPlayState = aState != PLAY_STATE_PLAYING;
|
||||
if (mDecodedStream->mHaveBlockedForPlayState != blockForPlayState) {
|
||||
mDecodedStream->mStream->ChangeExplicitBlockerCount(blockForPlayState ? 1 : -1);
|
||||
mDecodedStream->mHaveBlockedForPlayState = blockForPlayState;
|
||||
}
|
||||
}
|
||||
mPlayState = aState;
|
||||
if (mDecoderStateMachine) {
|
||||
switch (aState) {
|
||||
|
|
|
@ -332,7 +332,6 @@ class nsBuiltinDecoder : public nsMediaDecoder
|
|||
{
|
||||
public:
|
||||
typedef mozilla::MediaChannelStatistics MediaChannelStatistics;
|
||||
class DecodedStreamMainThreadListener;
|
||||
|
||||
NS_DECL_ISUPPORTS
|
||||
NS_DECL_NSIOBSERVER
|
||||
|
@ -378,27 +377,29 @@ public:
|
|||
virtual void SetVolume(double aVolume);
|
||||
virtual void SetAudioCaptured(bool aCaptured);
|
||||
|
||||
// All MediaStream-related data is protected by mReentrantMonitor.
|
||||
// We have at most one DecodedStreamData per nsBuiltinDecoder. Its stream
|
||||
// is used as the input for each ProcessedMediaStream created by calls to
|
||||
// captureStream(UntilEnded). Seeking creates a new source stream, as does
|
||||
// replaying after the input as ended. In the latter case, the new source is
|
||||
// not connected to streams created by captureStreamUntilEnded.
|
||||
|
||||
struct DecodedStreamData {
|
||||
DecodedStreamData(nsBuiltinDecoder* aDecoder,
|
||||
PRInt64 aInitialTime, SourceMediaStream* aStream);
|
||||
~DecodedStreamData();
|
||||
|
||||
// The following group of fields are protected by the decoder's monitor
|
||||
// and can be read or written on any thread.
|
||||
virtual void AddOutputStream(SourceMediaStream* aStream, bool aFinishWhenEnded);
|
||||
// Protected by mReentrantMonitor. All decoder output is copied to these streams.
|
||||
struct OutputMediaStream {
|
||||
void Init(PRInt64 aInitialTime, SourceMediaStream* aStream, bool aFinishWhenEnded)
|
||||
{
|
||||
mLastAudioPacketTime = -1;
|
||||
mLastAudioPacketEndTime = -1;
|
||||
mAudioFramesWrittenBaseTime = aInitialTime;
|
||||
mAudioFramesWritten = 0;
|
||||
mNextVideoTime = aInitialTime;
|
||||
mStream = aStream;
|
||||
mStreamInitialized = false;
|
||||
mFinishWhenEnded = aFinishWhenEnded;
|
||||
mHaveSentFinish = false;
|
||||
mHaveSentFinishAudio = false;
|
||||
mHaveSentFinishVideo = false;
|
||||
}
|
||||
PRInt64 mLastAudioPacketTime; // microseconds
|
||||
PRInt64 mLastAudioPacketEndTime; // microseconds
|
||||
// Count of audio frames written to the stream
|
||||
PRInt64 mAudioFramesWritten;
|
||||
// Saved value of aInitialTime. Timestamp of the first audio and/or
|
||||
// video packet written.
|
||||
PRInt64 mInitialTime; // microseconds
|
||||
// Timestamp of the first audio packet whose frames we wrote.
|
||||
PRInt64 mAudioFramesWrittenBaseTime; // microseconds
|
||||
// mNextVideoTime is the end timestamp for the last packet sent to the stream.
|
||||
// Therefore video packets starting at or after this time need to be copied
|
||||
// to the output stream.
|
||||
|
@ -406,78 +407,21 @@ public:
|
|||
// The last video image sent to the stream. Useful if we need to replicate
|
||||
// the image.
|
||||
nsRefPtr<Image> mLastVideoImage;
|
||||
nsRefPtr<SourceMediaStream> mStream;
|
||||
gfxIntSize mLastVideoImageDisplaySize;
|
||||
// This is set to true when the stream is initialized (audio and
|
||||
// video tracks added).
|
||||
bool mStreamInitialized;
|
||||
bool mFinishWhenEnded;
|
||||
bool mHaveSentFinish;
|
||||
bool mHaveSentFinishAudio;
|
||||
bool mHaveSentFinishVideo;
|
||||
|
||||
// The decoder is responsible for calling Destroy() on this stream.
|
||||
// Can be read from any thread.
|
||||
const nsRefPtr<SourceMediaStream> mStream;
|
||||
// A listener object that receives notifications when mStream's
|
||||
// main-thread-visible state changes. Used on the main thread only.
|
||||
const nsRefPtr<DecodedStreamMainThreadListener> mMainThreadListener;
|
||||
// True when we've explicitly blocked this stream because we're
|
||||
// not in PLAY_STATE_PLAYING. Used on the main thread only.
|
||||
bool mHaveBlockedForPlayState;
|
||||
};
|
||||
struct OutputStreamData {
|
||||
void Init(ProcessedMediaStream* aStream, bool aFinishWhenEnded)
|
||||
{
|
||||
mStream = aStream;
|
||||
mFinishWhenEnded = aFinishWhenEnded;
|
||||
}
|
||||
nsRefPtr<ProcessedMediaStream> mStream;
|
||||
// mPort connects mDecodedStream->mStream to our mStream.
|
||||
nsRefPtr<MediaInputPort> mPort;
|
||||
bool mFinishWhenEnded;
|
||||
};
|
||||
/**
|
||||
* Connects mDecodedStream->mStream to aStream->mStream.
|
||||
*/
|
||||
void ConnectDecodedStreamToOutputStream(OutputStreamData* aStream);
|
||||
/**
|
||||
* Disconnects mDecodedStream->mStream from all outputs and clears
|
||||
* mDecodedStream.
|
||||
*/
|
||||
void DestroyDecodedStream();
|
||||
/**
|
||||
* Recreates mDecodedStream. Call this to create mDecodedStream at first,
|
||||
* and when seeking, to ensure a new stream is set up with fresh buffers.
|
||||
* aStartTimeUSecs is relative to the state machine's mStartTime.
|
||||
*/
|
||||
void RecreateDecodedStream(PRInt64 aStartTimeUSecs);
|
||||
/**
|
||||
* Called when the state of mDecodedStream as visible on the main thread
|
||||
* has changed. In particular we want to know when the stream has finished
|
||||
* so we can call PlaybackEnded.
|
||||
*/
|
||||
void NotifyDecodedStreamMainThreadStateChanged();
|
||||
nsTArray<OutputStreamData>& OutputStreams()
|
||||
nsTArray<OutputMediaStream>& OutputStreams()
|
||||
{
|
||||
GetReentrantMonitor().AssertCurrentThreadIn();
|
||||
return mOutputStreams;
|
||||
}
|
||||
DecodedStreamData* GetDecodedStream()
|
||||
{
|
||||
GetReentrantMonitor().AssertCurrentThreadIn();
|
||||
return mDecodedStream;
|
||||
}
|
||||
class DecodedStreamMainThreadListener : public MainThreadMediaStreamListener {
|
||||
public:
|
||||
DecodedStreamMainThreadListener(nsBuiltinDecoder* aDecoder)
|
||||
: mDecoder(aDecoder) {}
|
||||
virtual void NotifyMainThreadStateChanged()
|
||||
{
|
||||
mDecoder->NotifyDecodedStreamMainThreadStateChanged();
|
||||
}
|
||||
nsBuiltinDecoder* mDecoder;
|
||||
};
|
||||
|
||||
virtual void AddOutputStream(ProcessedMediaStream* aStream, bool aFinishWhenEnded);
|
||||
|
||||
virtual double GetDuration();
|
||||
|
||||
|
@ -774,13 +718,7 @@ public:
|
|||
ReentrantMonitor mReentrantMonitor;
|
||||
|
||||
// Data about MediaStreams that are being fed by this decoder.
|
||||
nsTArray<OutputStreamData> mOutputStreams;
|
||||
// The SourceMediaStream we are using to feed the mOutputStreams. This stream
|
||||
// is never exposed outside the decoder.
|
||||
// Only written on the main thread while holding the monitor. Therefore it
|
||||
// can be read on any thread while holding the monitor, or on the main thread
|
||||
// without holding the monitor.
|
||||
nsAutoPtr<DecodedStreamData> mDecodedStream;
|
||||
nsTArray<OutputMediaStream> mOutputStreams;
|
||||
|
||||
// Set to one of the valid play states.
|
||||
// This can only be changed on the main thread while holding the decoder
|
||||
|
@ -812,10 +750,6 @@ public:
|
|||
|
||||
// True if the stream is infinite (e.g. a webradio).
|
||||
bool mInfiniteStream;
|
||||
|
||||
// True if NotifyDecodedStreamMainThreadStateChanged should retrigger
|
||||
// PlaybackEnded when mDecodedStream->mStream finishes.
|
||||
bool mTriggerPlaybackEndedWhenSourceStreamFinishes;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -427,8 +427,6 @@ nsBuiltinDecoderStateMachine::~nsBuiltinDecoderStateMachine()
|
|||
{
|
||||
NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
|
||||
MOZ_COUNT_DTOR(nsBuiltinDecoderStateMachine);
|
||||
NS_ASSERTION(!mPendingWakeDecoder.get(),
|
||||
"WakeDecoder should have been revoked already");
|
||||
NS_ASSERTION(!StateMachineTracker::Instance().IsQueued(this),
|
||||
"Should not have a pending request for a new decode thread");
|
||||
NS_ASSERTION(!mRequestedNewDecodeThread,
|
||||
|
@ -497,11 +495,10 @@ void nsBuiltinDecoderStateMachine::DecodeThreadRun()
|
|||
LOG(PR_LOG_DEBUG, ("%p Decode thread finished", mDecoder.get()));
|
||||
}
|
||||
|
||||
void nsBuiltinDecoderStateMachine::SendStreamAudio(AudioData* aAudio,
|
||||
DecodedStreamData* aStream,
|
||||
AudioSegment* aOutput)
|
||||
void nsBuiltinDecoderStateMachine::SendOutputStreamAudio(AudioData* aAudio,
|
||||
OutputMediaStream* aStream,
|
||||
AudioSegment* aOutput)
|
||||
{
|
||||
NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
|
||||
mDecoder->GetReentrantMonitor().AssertCurrentThreadIn();
|
||||
|
||||
if (aAudio->mTime <= aStream->mLastAudioPacketTime) {
|
||||
|
@ -517,7 +514,7 @@ void nsBuiltinDecoderStateMachine::SendStreamAudio(AudioData* aAudio,
|
|||
// This logic has to mimic AudioLoop closely to make sure we write
|
||||
// the exact same silences
|
||||
CheckedInt64 audioWrittenOffset = UsecsToFrames(mInfo.mAudioRate,
|
||||
aStream->mInitialTime + mStartTime) + aStream->mAudioFramesWritten;
|
||||
aStream->mAudioFramesWrittenBaseTime + mStartTime) + aStream->mAudioFramesWritten;
|
||||
CheckedInt64 frameOffset = UsecsToFrames(mInfo.mAudioRate, aAudio->mTime);
|
||||
if (!audioWrittenOffset.isValid() || !frameOffset.isValid())
|
||||
return;
|
||||
|
@ -567,112 +564,111 @@ static const TrackID TRACK_AUDIO = 1;
|
|||
static const TrackID TRACK_VIDEO = 2;
|
||||
static const TrackRate RATE_VIDEO = USECS_PER_S;
|
||||
|
||||
void nsBuiltinDecoderStateMachine::SendStreamData()
|
||||
void nsBuiltinDecoderStateMachine::SendOutputStreamData()
|
||||
{
|
||||
NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
|
||||
mDecoder->GetReentrantMonitor().AssertCurrentThreadIn();
|
||||
|
||||
DecodedStreamData* stream = mDecoder->GetDecodedStream();
|
||||
if (!stream)
|
||||
return;
|
||||
|
||||
if (mState == DECODER_STATE_DECODING_METADATA)
|
||||
return;
|
||||
|
||||
nsTArray<OutputMediaStream>& streams = mDecoder->OutputStreams();
|
||||
PRInt64 minLastAudioPacketTime = PR_INT64_MAX;
|
||||
SourceMediaStream* mediaStream = stream->mStream;
|
||||
StreamTime endPosition = 0;
|
||||
|
||||
if (!stream->mStreamInitialized) {
|
||||
if (mInfo.mHasAudio) {
|
||||
AudioSegment* audio = new AudioSegment();
|
||||
audio->Init(mInfo.mAudioChannels);
|
||||
mediaStream->AddTrack(TRACK_AUDIO, mInfo.mAudioRate, 0, audio);
|
||||
}
|
||||
if (mInfo.mHasVideo) {
|
||||
VideoSegment* video = new VideoSegment();
|
||||
mediaStream->AddTrack(TRACK_VIDEO, RATE_VIDEO, 0, video);
|
||||
}
|
||||
stream->mStreamInitialized = true;
|
||||
}
|
||||
|
||||
if (mInfo.mHasAudio) {
|
||||
nsAutoTArray<AudioData*,10> audio;
|
||||
// It's OK to hold references to the AudioData because while audio
|
||||
// is captured, only the decoder thread pops from the queue (see below).
|
||||
mReader->mAudioQueue.GetElementsAfter(stream->mLastAudioPacketTime, &audio);
|
||||
AudioSegment output;
|
||||
output.Init(mInfo.mAudioChannels);
|
||||
for (PRUint32 i = 0; i < audio.Length(); ++i) {
|
||||
SendStreamAudio(audio[i], stream, &output);
|
||||
}
|
||||
if (output.GetDuration() > 0) {
|
||||
mediaStream->AppendToTrack(TRACK_AUDIO, &output);
|
||||
}
|
||||
if (mReader->mAudioQueue.IsFinished() && !stream->mHaveSentFinishAudio) {
|
||||
mediaStream->EndTrack(TRACK_AUDIO);
|
||||
stream->mHaveSentFinishAudio = true;
|
||||
}
|
||||
minLastAudioPacketTime = NS_MIN(minLastAudioPacketTime, stream->mLastAudioPacketTime);
|
||||
endPosition = NS_MAX(endPosition,
|
||||
TicksToTimeRoundDown(mInfo.mAudioRate, stream->mAudioFramesWritten));
|
||||
}
|
||||
|
||||
if (mInfo.mHasVideo) {
|
||||
nsAutoTArray<VideoData*,10> video;
|
||||
// It's OK to hold references to the VideoData only the decoder thread
|
||||
// pops from the queue.
|
||||
mReader->mVideoQueue.GetElementsAfter(stream->mNextVideoTime + mStartTime, &video);
|
||||
VideoSegment output;
|
||||
for (PRUint32 i = 0; i < video.Length(); ++i) {
|
||||
VideoData* v = video[i];
|
||||
if (stream->mNextVideoTime + mStartTime < v->mTime) {
|
||||
LOG(PR_LOG_DEBUG, ("%p Decoder writing last video to MediaStream %p for %lld ms",
|
||||
mDecoder.get(), mediaStream,
|
||||
v->mTime - (stream->mNextVideoTime + mStartTime)));
|
||||
// Write last video frame to catch up. mLastVideoImage can be null here
|
||||
// which is fine, it just means there's no video.
|
||||
WriteVideoToMediaStream(stream->mLastVideoImage,
|
||||
v->mTime - (stream->mNextVideoTime + mStartTime), stream->mLastVideoImageDisplaySize,
|
||||
&output);
|
||||
stream->mNextVideoTime = v->mTime - mStartTime;
|
||||
}
|
||||
if (stream->mNextVideoTime + mStartTime < v->mEndTime) {
|
||||
LOG(PR_LOG_DEBUG, ("%p Decoder writing video frame %lld to MediaStream %p for %lld ms",
|
||||
mDecoder.get(), v->mTime, mediaStream,
|
||||
v->mEndTime - (stream->mNextVideoTime + mStartTime)));
|
||||
WriteVideoToMediaStream(v->mImage,
|
||||
v->mEndTime - (stream->mNextVideoTime + mStartTime), v->mDisplay,
|
||||
&output);
|
||||
stream->mNextVideoTime = v->mEndTime - mStartTime;
|
||||
stream->mLastVideoImage = v->mImage;
|
||||
stream->mLastVideoImageDisplaySize = v->mDisplay;
|
||||
} else {
|
||||
LOG(PR_LOG_DEBUG, ("%p Decoder skipping writing video frame %lld to MediaStream",
|
||||
mDecoder.get(), v->mTime));
|
||||
}
|
||||
}
|
||||
if (output.GetDuration() > 0) {
|
||||
mediaStream->AppendToTrack(TRACK_VIDEO, &output);
|
||||
}
|
||||
if (mReader->mVideoQueue.IsFinished() && !stream->mHaveSentFinishVideo) {
|
||||
mediaStream->EndTrack(TRACK_VIDEO);
|
||||
stream->mHaveSentFinishVideo = true;
|
||||
}
|
||||
endPosition = NS_MAX(endPosition,
|
||||
TicksToTimeRoundDown(RATE_VIDEO, stream->mNextVideoTime - stream->mInitialTime));
|
||||
}
|
||||
|
||||
if (!stream->mHaveSentFinish) {
|
||||
stream->mStream->AdvanceKnownTracksTime(endPosition);
|
||||
}
|
||||
|
||||
bool finished =
|
||||
(!mInfo.mHasAudio || mReader->mAudioQueue.IsFinished()) &&
|
||||
(!mInfo.mHasVideo || mReader->mVideoQueue.IsFinished());
|
||||
if (finished && !stream->mHaveSentFinish) {
|
||||
stream->mHaveSentFinish = true;
|
||||
stream->mStream->Finish();
|
||||
|
||||
for (PRUint32 i = 0; i < streams.Length(); ++i) {
|
||||
OutputMediaStream* stream = &streams[i];
|
||||
SourceMediaStream* mediaStream = stream->mStream;
|
||||
StreamTime endPosition = 0;
|
||||
|
||||
if (!stream->mStreamInitialized) {
|
||||
if (mInfo.mHasAudio) {
|
||||
AudioSegment* audio = new AudioSegment();
|
||||
audio->Init(mInfo.mAudioChannels);
|
||||
mediaStream->AddTrack(TRACK_AUDIO, mInfo.mAudioRate, 0, audio);
|
||||
}
|
||||
if (mInfo.mHasVideo) {
|
||||
VideoSegment* video = new VideoSegment();
|
||||
mediaStream->AddTrack(TRACK_VIDEO, RATE_VIDEO, 0, video);
|
||||
}
|
||||
stream->mStreamInitialized = true;
|
||||
}
|
||||
|
||||
if (mInfo.mHasAudio) {
|
||||
nsAutoTArray<AudioData*,10> audio;
|
||||
// It's OK to hold references to the AudioData because while audio
|
||||
// is captured, only the decoder thread pops from the queue (see below).
|
||||
mReader->mAudioQueue.GetElementsAfter(stream->mLastAudioPacketTime, &audio);
|
||||
AudioSegment output;
|
||||
output.Init(mInfo.mAudioChannels);
|
||||
for (PRUint32 i = 0; i < audio.Length(); ++i) {
|
||||
SendOutputStreamAudio(audio[i], stream, &output);
|
||||
}
|
||||
if (output.GetDuration() > 0) {
|
||||
mediaStream->AppendToTrack(TRACK_AUDIO, &output);
|
||||
}
|
||||
if (mReader->mAudioQueue.IsFinished() && !stream->mHaveSentFinishAudio) {
|
||||
mediaStream->EndTrack(TRACK_AUDIO);
|
||||
stream->mHaveSentFinishAudio = true;
|
||||
}
|
||||
minLastAudioPacketTime = NS_MIN(minLastAudioPacketTime, stream->mLastAudioPacketTime);
|
||||
endPosition = NS_MAX(endPosition,
|
||||
TicksToTimeRoundDown(mInfo.mAudioRate, stream->mAudioFramesWritten));
|
||||
}
|
||||
|
||||
if (mInfo.mHasVideo) {
|
||||
nsAutoTArray<VideoData*,10> video;
|
||||
// It's OK to hold references to the VideoData only the decoder thread
|
||||
// pops from the queue.
|
||||
mReader->mVideoQueue.GetElementsAfter(stream->mNextVideoTime + mStartTime, &video);
|
||||
VideoSegment output;
|
||||
for (PRUint32 i = 0; i < video.Length(); ++i) {
|
||||
VideoData* v = video[i];
|
||||
if (stream->mNextVideoTime + mStartTime < v->mTime) {
|
||||
LOG(PR_LOG_DEBUG, ("%p Decoder writing last video to MediaStream for %lld ms",
|
||||
mDecoder.get(), v->mTime - (stream->mNextVideoTime + mStartTime)));
|
||||
// Write last video frame to catch up. mLastVideoImage can be null here
|
||||
// which is fine, it just means there's no video.
|
||||
WriteVideoToMediaStream(stream->mLastVideoImage,
|
||||
v->mTime - (stream->mNextVideoTime + mStartTime), stream->mLastVideoImageDisplaySize,
|
||||
&output);
|
||||
stream->mNextVideoTime = v->mTime - mStartTime;
|
||||
}
|
||||
if (stream->mNextVideoTime + mStartTime < v->mEndTime) {
|
||||
LOG(PR_LOG_DEBUG, ("%p Decoder writing video frame %lld to MediaStream",
|
||||
mDecoder.get(), v->mTime));
|
||||
WriteVideoToMediaStream(v->mImage,
|
||||
v->mEndTime - (stream->mNextVideoTime + mStartTime), v->mDisplay,
|
||||
&output);
|
||||
stream->mNextVideoTime = v->mEndTime - mStartTime;
|
||||
stream->mLastVideoImage = v->mImage;
|
||||
stream->mLastVideoImageDisplaySize = v->mDisplay;
|
||||
} else {
|
||||
LOG(PR_LOG_DEBUG, ("%p Decoder skipping writing video frame %lld to MediaStream",
|
||||
mDecoder.get(), v->mTime));
|
||||
}
|
||||
}
|
||||
if (output.GetDuration() > 0) {
|
||||
mediaStream->AppendToTrack(TRACK_VIDEO, &output);
|
||||
}
|
||||
if (mReader->mVideoQueue.IsFinished() && !stream->mHaveSentFinishVideo) {
|
||||
mediaStream->EndTrack(TRACK_VIDEO);
|
||||
stream->mHaveSentFinishVideo = true;
|
||||
}
|
||||
endPosition = NS_MAX(endPosition,
|
||||
TicksToTimeRoundDown(RATE_VIDEO, stream->mNextVideoTime));
|
||||
}
|
||||
|
||||
if (!stream->mHaveSentFinish) {
|
||||
stream->mStream->AdvanceKnownTracksTime(endPosition);
|
||||
}
|
||||
|
||||
if (finished && !stream->mHaveSentFinish) {
|
||||
stream->mHaveSentFinish = true;
|
||||
stream->mStream->Finish();
|
||||
}
|
||||
}
|
||||
|
||||
if (mAudioCaptured) {
|
||||
|
@ -702,15 +698,31 @@ void nsBuiltinDecoderStateMachine::SendStreamData()
|
|||
}
|
||||
}
|
||||
|
||||
nsBuiltinDecoderStateMachine::WakeDecoderRunnable*
|
||||
nsBuiltinDecoderStateMachine::GetWakeDecoderRunnable()
|
||||
void nsBuiltinDecoderStateMachine::FinishOutputStreams()
|
||||
{
|
||||
mDecoder->GetReentrantMonitor().AssertCurrentThreadIn();
|
||||
|
||||
if (!mPendingWakeDecoder.get()) {
|
||||
mPendingWakeDecoder = new WakeDecoderRunnable(this);
|
||||
// Tell all our output streams that all tracks have ended and we've
|
||||
// finished.
|
||||
nsTArray<OutputMediaStream>& streams = mDecoder->OutputStreams();
|
||||
for (PRUint32 i = 0; i < streams.Length(); ++i) {
|
||||
OutputMediaStream* stream = &streams[i];
|
||||
if (!stream->mStreamInitialized) {
|
||||
continue;
|
||||
}
|
||||
SourceMediaStream* mediaStream = stream->mStream;
|
||||
if (mInfo.mHasAudio && !stream->mHaveSentFinishAudio) {
|
||||
mediaStream->EndTrack(TRACK_AUDIO);
|
||||
stream->mHaveSentFinishAudio = true;
|
||||
}
|
||||
if (mInfo.mHasVideo && !stream->mHaveSentFinishVideo) {
|
||||
mediaStream->EndTrack(TRACK_VIDEO);
|
||||
stream->mHaveSentFinishVideo = true;
|
||||
}
|
||||
// XXX ignoring mFinishWhenEnded for now. Immediate goal is to not crash.
|
||||
if (!stream->mHaveSentFinish) {
|
||||
mediaStream->Finish();
|
||||
stream->mHaveSentFinish = true;
|
||||
}
|
||||
}
|
||||
return mPendingWakeDecoder.get();
|
||||
}
|
||||
|
||||
bool nsBuiltinDecoderStateMachine::HaveEnoughDecodedAudio(PRInt64 aAmpleAudioUSecs)
|
||||
|
@ -725,15 +737,24 @@ bool nsBuiltinDecoderStateMachine::HaveEnoughDecodedAudio(PRInt64 aAmpleAudioUSe
|
|||
return true;
|
||||
}
|
||||
|
||||
DecodedStreamData* stream = mDecoder->GetDecodedStream();
|
||||
if (stream && stream->mStreamInitialized && !stream->mHaveSentFinishAudio) {
|
||||
if (!stream->mStream->HaveEnoughBuffered(TRACK_AUDIO)) {
|
||||
nsTArray<OutputMediaStream>& streams = mDecoder->OutputStreams();
|
||||
for (PRUint32 i = 0; i < streams.Length(); ++i) {
|
||||
OutputMediaStream* stream = &streams[i];
|
||||
if (stream->mStreamInitialized && !stream->mHaveSentFinishAudio &&
|
||||
!stream->mStream->HaveEnoughBuffered(TRACK_AUDIO)) {
|
||||
return false;
|
||||
}
|
||||
stream->mStream->DispatchWhenNotEnoughBuffered(TRACK_AUDIO,
|
||||
GetStateMachineThread(), GetWakeDecoderRunnable());
|
||||
}
|
||||
|
||||
nsIThread* thread = GetStateMachineThread();
|
||||
nsCOMPtr<nsIRunnable> callback = NS_NewRunnableMethod(this,
|
||||
&nsBuiltinDecoderStateMachine::ScheduleStateMachineWithLockAndWakeDecoder);
|
||||
for (PRUint32 i = 0; i < streams.Length(); ++i) {
|
||||
OutputMediaStream* stream = &streams[i];
|
||||
if (stream->mStreamInitialized && !stream->mHaveSentFinishAudio) {
|
||||
stream->mStream->DispatchWhenNotEnoughBuffered(TRACK_AUDIO, thread, callback);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -745,15 +766,28 @@ bool nsBuiltinDecoderStateMachine::HaveEnoughDecodedVideo()
|
|||
return false;
|
||||
}
|
||||
|
||||
DecodedStreamData* stream = mDecoder->GetDecodedStream();
|
||||
if (stream && stream->mStreamInitialized && !stream->mHaveSentFinishVideo) {
|
||||
if (!stream->mStream->HaveEnoughBuffered(TRACK_VIDEO)) {
|
||||
return false;
|
||||
}
|
||||
stream->mStream->DispatchWhenNotEnoughBuffered(TRACK_VIDEO,
|
||||
GetStateMachineThread(), GetWakeDecoderRunnable());
|
||||
nsTArray<OutputMediaStream>& streams = mDecoder->OutputStreams();
|
||||
if (streams.IsEmpty()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
for (PRUint32 i = 0; i < streams.Length(); ++i) {
|
||||
OutputMediaStream* stream = &streams[i];
|
||||
if (stream->mStreamInitialized && !stream->mHaveSentFinishVideo &&
|
||||
!stream->mStream->HaveEnoughBuffered(TRACK_VIDEO)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
nsIThread* thread = GetStateMachineThread();
|
||||
nsCOMPtr<nsIRunnable> callback = NS_NewRunnableMethod(this,
|
||||
&nsBuiltinDecoderStateMachine::ScheduleStateMachineWithLockAndWakeDecoder);
|
||||
for (PRUint32 i = 0; i < streams.Length(); ++i) {
|
||||
OutputMediaStream* stream = &streams[i];
|
||||
if (stream->mStreamInitialized && !stream->mHaveSentFinishVideo) {
|
||||
stream->mStream->DispatchWhenNotEnoughBuffered(TRACK_VIDEO, thread, callback);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -880,7 +914,7 @@ void nsBuiltinDecoderStateMachine::DecodeLoop()
|
|||
audioPlaying = mReader->DecodeAudioData();
|
||||
}
|
||||
|
||||
SendStreamData();
|
||||
SendOutputStreamData();
|
||||
|
||||
// Notify to ensure that the AudioLoop() is not waiting, in case it was
|
||||
// waiting for more audio to be decoded.
|
||||
|
@ -1490,7 +1524,6 @@ void nsBuiltinDecoderStateMachine::Seek(double aTime)
|
|||
mSeekTime = NS_MAX(mStartTime, mSeekTime);
|
||||
LOG(PR_LOG_DEBUG, ("%p Changed state to SEEKING (to %f)", mDecoder.get(), aTime));
|
||||
mState = DECODER_STATE_SEEKING;
|
||||
mDecoder->RecreateDecodedStream(mSeekTime - mStartTime);
|
||||
ScheduleStateMachine();
|
||||
}
|
||||
|
||||
|
@ -1967,11 +2000,12 @@ nsresult nsBuiltinDecoderStateMachine::RunStateMachine()
|
|||
}
|
||||
StopAudioThread();
|
||||
StopDecodeThread();
|
||||
// Now that those threads are stopped, there's no possibility of
|
||||
// mPendingWakeDecoder being needed again. Revoke it.
|
||||
mPendingWakeDecoder = nullptr;
|
||||
NS_ASSERTION(mState == DECODER_STATE_SHUTDOWN,
|
||||
"How did we escape from the shutdown state?");
|
||||
// Need to call this before dispatching nsDispatchDisposeEvent below, to
|
||||
// ensure that any notifications dispatched by the stream graph
|
||||
// will run before nsDispatchDisposeEvent below.
|
||||
FinishOutputStreams();
|
||||
// We must daisy-chain these events to destroy the decoder. We must
|
||||
// destroy the decoder on the main thread, but we can't destroy the
|
||||
// decoder while this thread holds the decoder monitor. We can't
|
||||
|
|
|
@ -106,7 +106,7 @@ public:
|
|||
typedef mozilla::TimeStamp TimeStamp;
|
||||
typedef mozilla::TimeDuration TimeDuration;
|
||||
typedef mozilla::VideoFrameContainer VideoFrameContainer;
|
||||
typedef nsBuiltinDecoder::DecodedStreamData DecodedStreamData;
|
||||
typedef nsBuiltinDecoder::OutputMediaStream OutputMediaStream;
|
||||
typedef mozilla::SourceMediaStream SourceMediaStream;
|
||||
typedef mozilla::AudioSegment AudioSegment;
|
||||
typedef mozilla::VideoSegment VideoSegment;
|
||||
|
@ -260,46 +260,12 @@ public:
|
|||
|
||||
// Copy queued audio/video data in the reader to any output MediaStreams that
|
||||
// need it.
|
||||
void SendStreamData();
|
||||
void FinishStreamData();
|
||||
void SendOutputStreamData();
|
||||
void FinishOutputStreams();
|
||||
bool HaveEnoughDecodedAudio(PRInt64 aAmpleAudioUSecs);
|
||||
bool HaveEnoughDecodedVideo();
|
||||
|
||||
protected:
|
||||
class WakeDecoderRunnable : public nsRunnable {
|
||||
public:
|
||||
WakeDecoderRunnable(nsBuiltinDecoderStateMachine* aSM)
|
||||
: mMutex("WakeDecoderRunnable"), mStateMachine(aSM) {}
|
||||
NS_IMETHOD Run()
|
||||
{
|
||||
nsRefPtr<nsBuiltinDecoderStateMachine> stateMachine;
|
||||
{
|
||||
// Don't let Run() (called by media stream graph thread) race with
|
||||
// Revoke() (called by decoder state machine thread)
|
||||
MutexAutoLock lock(mMutex);
|
||||
if (!mStateMachine)
|
||||
return NS_OK;
|
||||
stateMachine = mStateMachine;
|
||||
}
|
||||
stateMachine->ScheduleStateMachineWithLockAndWakeDecoder();
|
||||
return NS_OK;
|
||||
}
|
||||
void Revoke()
|
||||
{
|
||||
MutexAutoLock lock(mMutex);
|
||||
mStateMachine = nullptr;
|
||||
}
|
||||
|
||||
Mutex mMutex;
|
||||
// Protected by mMutex.
|
||||
// We don't use an owning pointer here, because keeping mStateMachine alive
|
||||
// would mean in some cases we'd have to destroy mStateMachine from this
|
||||
// object, which would be problematic since nsBuiltinDecoderStateMachine can
|
||||
// only be destroyed on the main thread whereas this object can be destroyed
|
||||
// on the media stream graph thread.
|
||||
nsBuiltinDecoderStateMachine* mStateMachine;
|
||||
};
|
||||
WakeDecoderRunnable* GetWakeDecoderRunnable();
|
||||
|
||||
// Returns true if we've got less than aAudioUsecs microseconds of decoded
|
||||
// and playable data. The decoder monitor must be held.
|
||||
|
@ -464,8 +430,8 @@ protected:
|
|||
|
||||
// Copy audio from an AudioData packet to aOutput. This may require
|
||||
// inserting silence depending on the timing of the audio packet.
|
||||
void SendStreamAudio(AudioData* aAudio, DecodedStreamData* aStream,
|
||||
AudioSegment* aOutput);
|
||||
void SendOutputStreamAudio(AudioData* aAudio, OutputMediaStream* aStream,
|
||||
AudioSegment* aOutput);
|
||||
|
||||
// State machine thread run function. Defers to RunStateMachine().
|
||||
nsresult CallRunStateMachine();
|
||||
|
@ -564,13 +530,6 @@ protected:
|
|||
// in the play state machine's destructor.
|
||||
nsAutoPtr<nsBuiltinDecoderReader> mReader;
|
||||
|
||||
// Accessed only on the state machine thread.
|
||||
// Not an nsRevocableEventPtr since we must Revoke() it well before
|
||||
// this object is destroyed, anyway.
|
||||
// Protected by decoder monitor except during the SHUTDOWN state after the
|
||||
// decoder thread has been stopped.
|
||||
nsRevocableEventPtr<WakeDecoderRunnable> mPendingWakeDecoder;
|
||||
|
||||
// The time of the current frame in microseconds. This is referenced from
|
||||
// 0 which is the initial playback position. Set by the state machine
|
||||
// thread, and read-only from the main thread to get the current
|
||||
|
|
|
@ -51,15 +51,6 @@ nsDOMMediaStream::CreateInputStream()
|
|||
return stream.forget();
|
||||
}
|
||||
|
||||
already_AddRefed<nsDOMMediaStream>
|
||||
nsDOMMediaStream::CreateTrackUnionStream()
|
||||
{
|
||||
nsRefPtr<nsDOMMediaStream> stream = new nsDOMMediaStream();
|
||||
MediaStreamGraph* gm = MediaStreamGraph::GetInstance();
|
||||
stream->mStream = gm->CreateTrackUnionStream(stream);
|
||||
return stream.forget();
|
||||
}
|
||||
|
||||
bool
|
||||
nsDOMMediaStream::CombineWithPrincipal(nsIPrincipal* aPrincipal)
|
||||
{
|
||||
|
|
|
@ -55,11 +55,6 @@ public:
|
|||
*/
|
||||
static already_AddRefed<nsDOMMediaStream> CreateInputStream();
|
||||
|
||||
/**
|
||||
* Create an nsDOMMediaStream whose underlying stream is a TrackUnionStream.
|
||||
*/
|
||||
static already_AddRefed<nsDOMMediaStream> CreateTrackUnionStream();
|
||||
|
||||
protected:
|
||||
// MediaStream is owned by the graph, but we tell it when to die, and it won't
|
||||
// die until we let it.
|
||||
|
|
|
@ -41,9 +41,6 @@ public:
|
|||
typedef mozilla::MediaResource MediaResource;
|
||||
typedef mozilla::ReentrantMonitor ReentrantMonitor;
|
||||
typedef mozilla::SourceMediaStream SourceMediaStream;
|
||||
typedef mozilla::ProcessedMediaStream ProcessedMediaStream;
|
||||
typedef mozilla::MediaInputPort MediaInputPort;
|
||||
typedef mozilla::MainThreadMediaStreamListener MainThreadMediaStreamListener;
|
||||
typedef mozilla::TimeStamp TimeStamp;
|
||||
typedef mozilla::TimeDuration TimeDuration;
|
||||
typedef mozilla::VideoFrameContainer VideoFrameContainer;
|
||||
|
@ -105,10 +102,7 @@ public:
|
|||
virtual void SetAudioCaptured(bool aCaptured) = 0;
|
||||
|
||||
// Add an output stream. All decoder output will be sent to the stream.
|
||||
// The stream is initially blocked. The decoder is responsible for unblocking
|
||||
// it while it is playing back.
|
||||
virtual void AddOutputStream(ProcessedMediaStream* aStream,
|
||||
bool aFinishWhenEnded) = 0;
|
||||
virtual void AddOutputStream(SourceMediaStream* aStream, bool aFinishWhenEnded) = 0;
|
||||
|
||||
// Start playback of a video. 'Load' must have previously been
|
||||
// called.
|
||||
|
|
|
@ -9,16 +9,14 @@
|
|||
<body>
|
||||
<video id="v"></video>
|
||||
<video id="vout"></video>
|
||||
<video id="vout_untilended"></video>
|
||||
<pre id="test">
|
||||
<script class="testbody" type="text/javascript">
|
||||
SimpleTest.waitForExplicitFinish();
|
||||
|
||||
var v = document.getElementById('v');
|
||||
var vout = document.getElementById('vout');
|
||||
var vout_untilended = document.getElementById('vout_untilended');
|
||||
vout.src = v.mozCaptureStream();
|
||||
vout_untilended.src = v.mozCaptureStreamUntilEnded();
|
||||
var stream = v.mozCaptureStream();
|
||||
vout.src = stream;
|
||||
|
||||
function dumpEvent(event) {
|
||||
dump("GOT EVENT " + event.type + " currentTime=" + event.target.currentTime +
|
||||
|
@ -29,62 +27,41 @@ var events = ["timeupdate", "seeking", "seeked", "ended", "playing", "pause"];
|
|||
for (var i = 0; i < events.length; ++i) {
|
||||
v.addEventListener(events[i], dumpEvent, false);
|
||||
}
|
||||
function isWithinEps(a, b, msg) {
|
||||
ok(Math.abs(a - b) < 0.01,
|
||||
"Got " + a + ", expected " + b + "; " + msg);
|
||||
}
|
||||
|
||||
function startTest(test) {
|
||||
var seekTime = test.duration/2;
|
||||
|
||||
function endedAfterReplay() {
|
||||
isWithinEps(v.currentTime, test.duration, "checking v.currentTime at third 'ended' event");
|
||||
isWithinEps(vout.currentTime, (v.currentTime - seekTime) + test.duration*2,
|
||||
"checking vout.currentTime after seeking, playing through and reloading");
|
||||
function ended() {
|
||||
ok(true, "Final ended after changing src");
|
||||
SimpleTest.finish();
|
||||
};
|
||||
function endedAfterSeek() {
|
||||
isWithinEps(v.currentTime, test.duration, "checking v.currentTime at second 'ended' event");
|
||||
isWithinEps(vout.currentTime, (v.currentTime - seekTime) + test.duration,
|
||||
"checking vout.currentTime after seeking and playing through again");
|
||||
v.removeEventListener("ended", endedAfterSeek, false);
|
||||
v.addEventListener("ended", endedAfterReplay, false);
|
||||
function timeupdateAfterSeek() {
|
||||
if (v.currentTime < seekTime + 0.001)
|
||||
return;
|
||||
ok(true, "timeupdate after seek");
|
||||
v.removeEventListener("timeupdate", timeupdateAfterSeek, false);
|
||||
v.src = test.name + "?1";
|
||||
v.play();
|
||||
v.addEventListener("ended", ended, false);
|
||||
};
|
||||
function seeked() {
|
||||
isWithinEps(v.currentTime, seekTime, "Finished seeking");
|
||||
isWithinEps(vout.currentTime, test.duration,
|
||||
"checking vout.currentTime has not changed after seeking");
|
||||
ok(true, "Finished seeking");
|
||||
v.removeEventListener("seeked", seeked, false);
|
||||
function dontPlayAgain() {
|
||||
ok(false, "vout_untilended should not play again");
|
||||
}
|
||||
vout_untilended.addEventListener("playing", dontPlayAgain, false);
|
||||
vout_untilended.addEventListener("ended", dontPlayAgain, false);
|
||||
v.addEventListener("ended", endedAfterSeek, false);
|
||||
v.play();
|
||||
v.addEventListener("timeupdate", timeupdateAfterSeek, false);
|
||||
};
|
||||
function ended() {
|
||||
isWithinEps(vout.currentTime, test.duration, "checking vout.currentTime at first 'ended' event");
|
||||
isWithinEps(v.currentTime, test.duration, "checking v.currentTime at first 'ended' event");
|
||||
is(vout.ended, false, "checking vout has not ended");
|
||||
is(vout_untilended.ended, true, "checking vout_untilended has actually ended");
|
||||
vout_untilended.removeEventListener("ended", ended, false);
|
||||
v.pause();
|
||||
function timeupdate() {
|
||||
if (v.currentTime == 0)
|
||||
return;
|
||||
ok(true, "Initial timeupdate");
|
||||
v.removeEventListener("timeupdate", timeupdate, false);
|
||||
v.currentTime = seekTime;
|
||||
v.addEventListener("seeked", seeked, false);
|
||||
};
|
||||
vout_untilended.addEventListener("ended", ended, false);
|
||||
v.addEventListener("timeupdate", timeupdate, false);
|
||||
|
||||
v.src = test.name;
|
||||
v.play();
|
||||
function checkNoEnded() {
|
||||
ok(false, "ended event received unexpectedly");
|
||||
};
|
||||
vout.addEventListener("ended", checkNoEnded, false);
|
||||
vout.play();
|
||||
vout_untilended.play();
|
||||
}
|
||||
|
||||
var testVideo = getPlayableVideo(gSmallTests);
|
||||
|
|
Загрузка…
Ссылка в новой задаче