Bug 664918. Part 8: Add mozCaptureStream()/mozCaptureStreamUntilEnded() APIs to HTML media elements, returning a MediaStream representing the contents of the media element. r=cpearce,jesup

This is currently not fully functional. The MediaStream always ends when the underlying resource ends. You can't use these APIs on a media element
whose src is a MediaStream. Seeking or pausing the resource will cause problems. The media element does not play back in sync with the MediaStream.
This commit is contained in:
Robert O'Callahan 2012-04-30 15:12:42 +12:00
Родитель 53a92c834a
Коммит 9eef1b9f61
17 изменённых файлов: 691 добавлений и 62 удалений

Просмотреть файл

@ -265,6 +265,9 @@ public:
// Returns null if nothing is playing.
already_AddRefed<nsIPrincipal> GetCurrentPrincipal();
// called to notify that the principal of the decoder's media resource has changed.
void NotifyDecoderPrincipalChanged();
// Update the visual size of the media. Called from the decoder on the
// main thread when/if the size changes.
void UpdateMediaSize(nsIntSize size);
@ -416,6 +419,15 @@ protected:
*/
void EndMediaStreamPlayback();
/**
* Returns an nsDOMMediaStream containing the played contents of this
* element. When aFinishWhenEnded is true, when this element ends playback
* we will finish the stream and not play any more into it.
* When aFinishWhenEnded is false, ending playback does not finish the stream.
* The stream will never finish.
*/
already_AddRefed<nsDOMMediaStream> CaptureStreamInternal(bool aFinishWhenEnded);
/**
* Create a decoder for the given aMIMEType. Returns null if we
* were unable to create the decoder.
@ -633,6 +645,14 @@ protected:
// At most one of mDecoder and mStream can be non-null.
nsRefPtr<nsDOMMediaStream> mStream;
// Holds references to the DOM wrappers for the MediaStreams that we're
// writing to.
struct OutputMediaStream {
nsRefPtr<nsDOMMediaStream> mStream;
bool mFinishWhenEnded;
};
nsTArray<OutputMediaStream> mOutputStreams;
// Holds a reference to the MediaStreamListener attached to mStream. STRONG!
StreamListener* mStreamListener;
@ -769,9 +789,12 @@ protected:
// 'Pause' method, or playback not yet having started.
bool mPaused;
// True if the sound is muted
// True if the sound is muted.
bool mMuted;
// True if the sound is being captured.
bool mAudioCaptured;
// If TRUE then the media element was actively playing before the currently
// in progress seeking. If FALSE then the media element is either not seeking
// or was not actively playing before the current seek. Used to decide whether

Просмотреть файл

@ -427,6 +427,9 @@ NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(nsHTMLMediaElement, nsGenericH
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_NSCOMPTR(mSourcePointer)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_NSCOMPTR(mLoadBlockedDoc)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_NSCOMPTR(mSourceLoadCandidate)
for (PRUint32 i = 0; i < tmp->mOutputStreams.Length(); ++i) {
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_NSCOMPTR(mOutputStreams[i].mStream);
}
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_INHERITED(nsHTMLMediaElement, nsGenericHTMLElement)
@ -439,6 +442,9 @@ NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_INHERITED(nsHTMLMediaElement, nsGenericHTM
NS_IMPL_CYCLE_COLLECTION_UNLINK_NSCOMPTR(mSourcePointer)
NS_IMPL_CYCLE_COLLECTION_UNLINK_NSCOMPTR(mLoadBlockedDoc)
NS_IMPL_CYCLE_COLLECTION_UNLINK_NSCOMPTR(mSourceLoadCandidate)
for (PRUint32 i = 0; i < tmp->mOutputStreams.Length(); ++i) {
NS_IMPL_CYCLE_COLLECTION_UNLINK_NSCOMPTR(mOutputStreams[i].mStream);
}
NS_IMPL_CYCLE_COLLECTION_UNLINK_END
NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(nsHTMLMediaElement)
@ -1422,6 +1428,43 @@ NS_IMETHODIMP nsHTMLMediaElement::SetMuted(bool aMuted)
return NS_OK;
}
already_AddRefed<nsDOMMediaStream>
nsHTMLMediaElement::CaptureStreamInternal(bool aFinishWhenEnded)
{
OutputMediaStream* out = mOutputStreams.AppendElement();
out->mStream = nsDOMMediaStream::CreateInputStream();
nsRefPtr<nsIPrincipal> principal = GetCurrentPrincipal();
out->mStream->CombineWithPrincipal(principal);
out->mFinishWhenEnded = aFinishWhenEnded;
mAudioCaptured = true;
if (mDecoder) {
mDecoder->SetAudioCaptured(true);
mDecoder->AddOutputStream(
out->mStream->GetStream()->AsSourceStream(), aFinishWhenEnded);
}
nsRefPtr<nsDOMMediaStream> result = out->mStream;
return result.forget();
}
NS_IMETHODIMP nsHTMLMediaElement::MozCaptureStream(nsIDOMMediaStream** aStream)
{
*aStream = CaptureStreamInternal(false).get();
return NS_OK;
}
NS_IMETHODIMP nsHTMLMediaElement::MozCaptureStreamUntilEnded(nsIDOMMediaStream** aStream)
{
*aStream = CaptureStreamInternal(true).get();
return NS_OK;
}
NS_IMETHODIMP nsHTMLMediaElement::GetMozAudioCaptured(bool *aCaptured)
{
*aCaptured = mAudioCaptured;
return NS_OK;
}
class MediaElementSetForURI : public nsURIHashKey {
public:
MediaElementSetForURI(const nsIURI* aKey) : nsURIHashKey(aKey) {}
@ -1548,6 +1591,7 @@ nsHTMLMediaElement::nsHTMLMediaElement(already_AddRefed<nsINodeInfo> aNodeInfo)
mAutoplayEnabled(true),
mPaused(true),
mMuted(false),
mAudioCaptured(false),
mPlayingBeforeSeek(false),
mPausedForInactiveDocument(false),
mWaitingFired(false),
@ -2267,7 +2311,13 @@ nsresult nsHTMLMediaElement::FinishDecoderSetup(nsMediaDecoder* aDecoder,
// The new stream has not been suspended by us.
mPausedForInactiveDocument = false;
aDecoder->SetAudioCaptured(mAudioCaptured);
aDecoder->SetVolume(mMuted ? 0.0 : mVolume);
for (PRUint32 i = 0; i < mOutputStreams.Length(); ++i) {
OutputMediaStream* ms = &mOutputStreams[i];
aDecoder->AddOutputStream(ms->mStream->GetStream()->AsSourceStream(),
ms->mFinishWhenEnded);
}
nsresult rv = aDecoder->Load(aStream, aListener, aCloneDonor);
if (NS_FAILED(rv)) {
@ -2281,6 +2331,7 @@ nsresult nsHTMLMediaElement::FinishDecoderSetup(nsMediaDecoder* aDecoder,
mDecoder = aDecoder;
AddMediaElementToURITable();
NotifyDecoderPrincipalChanged();
// We may want to suspend the new stream now.
// This will also do an AddRemoveSelfReference.
@ -2390,6 +2441,8 @@ void nsHTMLMediaElement::SetupMediaStreamPlayback()
NS_ASSERTION(!mStream && !mStreamListener, "Should have been ended already");
mStream = mSrcAttrStream;
// XXX if we ever support capturing the output of a media element which is
// playing a stream, we'll need to add a CombineWithPrincipal call here.
mStreamListener = new StreamListener(this);
NS_ADDREF(mStreamListener);
GetMediaStream()->AddListener(mStreamListener);
@ -2911,6 +2964,15 @@ already_AddRefed<nsIPrincipal> nsHTMLMediaElement::GetCurrentPrincipal()
return nsnull;
}
void nsHTMLMediaElement::NotifyDecoderPrincipalChanged()
{
for (PRUint32 i = 0; i < mOutputStreams.Length(); ++i) {
OutputMediaStream* ms = &mOutputStreams[i];
nsRefPtr<nsIPrincipal> principal = GetCurrentPrincipal();
ms->mStream->CombineWithPrincipal(principal);
}
}
void nsHTMLMediaElement::UpdateMediaSize(nsIntSize size)
{
mMediaSize = size;

Просмотреть файл

@ -762,6 +762,14 @@ ChannelMediaResource::CacheClientNotifyDataEnded(nsresult aStatus)
NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL);
}
void
ChannelMediaResource::CacheClientNotifyPrincipalChanged()
{
NS_ASSERTION(NS_IsMainThread(), "Don't call on non-main thread");
mDecoder->NotifyPrincipalChanged();
}
nsresult
ChannelMediaResource::CacheClientSeek(PRInt64 aOffset, bool aResume)
{

Просмотреть файл

@ -375,6 +375,8 @@ public:
// if this stream didn't read any data, since another stream might have
// received data for the same resource.
void CacheClientNotifyDataEnded(nsresult aStatus);
// Notify that the principal for the cached resource changed.
void CacheClientNotifyPrincipalChanged();
// These are called on the main thread by nsMediaCache. These shouldn't block,
// but they may grab locks --- the media cache is not holding its lock

Просмотреть файл

@ -161,10 +161,16 @@ void nsAudioAvailableEventManager::QueueWrittenAudioData(AudioDataValue* aAudioD
// Fill the signalBuffer.
PRUint32 i;
float *signalBuffer = mSignalBuffer.get() + mSignalBufferPosition;
for (i = 0; i < signalBufferTail; ++i) {
signalBuffer[i] = MOZ_CONVERT_AUDIO_SAMPLE(audioData[i]);
if (audioData) {
for (i = 0; i < signalBufferTail; ++i) {
signalBuffer[i] = MOZ_CONVERT_AUDIO_SAMPLE(audioData[i]);
}
} else {
memset(signalBuffer, 0, signalBufferTail*sizeof(signalBuffer[0]));
}
if (audioData) {
audioData += signalBufferTail;
}
audioData += signalBufferTail;
NS_ASSERTION(audioDataLength >= signalBufferTail,
"audioDataLength about to wrap past zero to +infinity!");
@ -204,8 +210,12 @@ void nsAudioAvailableEventManager::QueueWrittenAudioData(AudioDataValue* aAudioD
// Add data to the signalBuffer.
PRUint32 i;
float *signalBuffer = mSignalBuffer.get() + mSignalBufferPosition;
for (i = 0; i < audioDataLength; ++i) {
signalBuffer[i] = MOZ_CONVERT_AUDIO_SAMPLE(audioData[i]);
if (audioData) {
for (i = 0; i < audioDataLength; ++i) {
signalBuffer[i] = MOZ_CONVERT_AUDIO_SAMPLE(audioData[i]);
}
} else {
memset(signalBuffer, 0, audioDataLength*sizeof(signalBuffer[0]));
}
mSignalBufferPosition += audioDataLength;
}

Просмотреть файл

@ -82,6 +82,30 @@ void nsBuiltinDecoder::SetVolume(double aVolume)
}
}
void nsBuiltinDecoder::SetAudioCaptured(bool aCaptured)
{
NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
mInitialAudioCaptured = aCaptured;
if (mDecoderStateMachine) {
mDecoderStateMachine->SetAudioCaptured(aCaptured);
}
}
void nsBuiltinDecoder::AddOutputStream(SourceMediaStream* aStream, bool aFinishWhenEnded)
{
NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
{
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
OutputMediaStream* ms = mOutputStreams.AppendElement();
ms->Init(PRInt64(mCurrentTime*USECS_PER_S), aStream, aFinishWhenEnded);
}
// Make sure the state machine thread runs so that any buffered data
// is fed into our strema.
ScheduleStateMachineThread();
}
double nsBuiltinDecoder::GetDuration()
{
NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
@ -692,6 +716,13 @@ void nsBuiltinDecoder::NotifyDownloadEnded(nsresult aStatus)
UpdateReadyStateForData();
}
void nsBuiltinDecoder::NotifyPrincipalChanged()
{
if (mElement) {
mElement->NotifyDecoderPrincipalChanged();
}
}
void nsBuiltinDecoder::NotifyBytesConsumed(PRInt64 aBytes)
{
ReentrantMonitorAutoEnter mon(mReentrantMonitor);

Просмотреть файл

@ -262,6 +262,7 @@ public:
// Set the audio volume. The decoder monitor must be obtained before
// calling this.
virtual void SetVolume(double aVolume) = 0;
virtual void SetAudioCaptured(bool aCapture) = 0;
virtual void Shutdown() = 0;
@ -397,6 +398,54 @@ public:
virtual void Pause();
virtual void SetVolume(double aVolume);
virtual void SetAudioCaptured(bool aCaptured);
virtual void AddOutputStream(SourceMediaStream* aStream, bool aFinishWhenEnded);
// Protected by mReentrantMonitor. All decoder output is copied to these streams.
struct OutputMediaStream {
void Init(PRInt64 aInitialTime, SourceMediaStream* aStream, bool aFinishWhenEnded)
{
mLastAudioPacketTime = -1;
mLastAudioPacketEndTime = -1;
mAudioFramesWrittenBaseTime = aInitialTime;
mAudioFramesWritten = 0;
mNextVideoTime = aInitialTime;
mStream = aStream;
mStreamInitialized = false;
mFinishWhenEnded = aFinishWhenEnded;
mHaveSentFinish = false;
mHaveSentFinishAudio = false;
mHaveSentFinishVideo = false;
}
PRInt64 mLastAudioPacketTime; // microseconds
PRInt64 mLastAudioPacketEndTime; // microseconds
// Count of audio frames written to the stream
PRInt64 mAudioFramesWritten;
// Timestamp of the first audio packet whose frames we wrote.
PRInt64 mAudioFramesWrittenBaseTime; // microseconds
// mNextVideoTime is the end timestamp for the last packet sent to the stream.
// Therefore video packets starting at or after this time need to be copied
// to the output stream.
PRInt64 mNextVideoTime; // microseconds
// The last video image sent to the stream. Useful if we need to replicate
// the image.
nsRefPtr<Image> mLastVideoImage;
nsRefPtr<SourceMediaStream> mStream;
gfxIntSize mLastVideoImageDisplaySize;
// This is set to true when the stream is initialized (audio and
// video tracks added).
bool mStreamInitialized;
bool mFinishWhenEnded;
bool mHaveSentFinish;
bool mHaveSentFinishAudio;
bool mHaveSentFinishVideo;
};
nsTArray<OutputMediaStream>& OutputStreams()
{
GetReentrantMonitor().AssertCurrentThreadIn();
return mOutputStreams;
}
virtual double GetDuration();
virtual void SetInfinite(bool aInfinite);
@ -408,6 +457,7 @@ public:
virtual void NotifySuspendedStatusChanged();
virtual void NotifyBytesDownloaded();
virtual void NotifyDownloadEnded(nsresult aStatus);
virtual void NotifyPrincipalChanged();
// Called by the decode thread to keep track of the number of bytes read
// from the resource.
void NotifyBytesConsumed(PRInt64 aBytes);
@ -663,6 +713,9 @@ public:
// only.
PRInt64 mDuration;
// True when playback should start with audio captured (not playing).
bool mInitialAudioCaptured;
// True if the media resource is seekable (server supports byte range
// requests).
bool mSeekable;
@ -686,6 +739,9 @@ public:
// state change.
ReentrantMonitor mReentrantMonitor;
// Data about MediaStreams that are being fed by this decoder.
nsTArray<OutputMediaStream> mOutputStreams;
// Set to one of the valid play states. It is protected by the
// monitor mReentrantMonitor. This monitor must be acquired when reading or
// writing the state. Any change to the state on the main thread

Просмотреть файл

@ -71,6 +71,21 @@ extern PRLogModuleInfo* gBuiltinDecoderLog;
#define SEEK_LOG(type, msg)
#endif
void
AudioData::EnsureAudioBuffer()
{
if (mAudioBuffer)
return;
mAudioBuffer = SharedBuffer::Create(mFrames*mChannels*sizeof(AudioDataValue));
AudioDataValue* data = static_cast<AudioDataValue*>(mAudioBuffer->Data());
for (PRUint32 i = 0; i < mFrames; ++i) {
for (PRUint32 j = 0; j < mChannels; ++j) {
data[j*mFrames + i] = mAudioData[i*mChannels + j];
}
}
}
static bool
ValidatePlane(const VideoData::YCbCrBuffer::Plane& aPlane)
{
@ -115,7 +130,15 @@ VideoData* VideoData::Create(nsVideoInfo& aInfo,
nsIntRect aPicture)
{
if (!aContainer) {
return nsnull;
// Create a dummy VideoData with no image. This gives us something to
// send to media streams if necessary.
nsAutoPtr<VideoData> v(new VideoData(aOffset,
aTime,
aEndTime,
aKeyframe,
aTimecode,
aInfo.mDisplay));
return v.forget();
}
// The following situation should never happen unless there is a bug

Просмотреть файл

@ -43,13 +43,15 @@
#include "ImageLayers.h"
#include "nsSize.h"
#include "mozilla/ReentrantMonitor.h"
#include "MediaStreamGraph.h"
#include "SharedBuffer.h"
// Stores info relevant to presenting media frames.
class nsVideoInfo {
public:
nsVideoInfo()
: mAudioRate(0),
mAudioChannels(0),
: mAudioRate(44100),
mAudioChannels(2),
mDisplay(0,0),
mStereoMode(mozilla::layers::STEREO_MODE_MONO),
mHasAudio(false),
@ -113,6 +115,8 @@ typedef float AudioDataValue;
// Holds chunk a decoded audio frames.
class AudioData {
public:
typedef mozilla::SharedBuffer SharedBuffer;
AudioData(PRInt64 aOffset,
PRInt64 aTime,
PRInt64 aDuration,
@ -134,6 +138,11 @@ public:
MOZ_COUNT_DTOR(AudioData);
}
// If mAudioBuffer is null, creates it from mAudioData.
void EnsureAudioBuffer();
PRInt64 GetEnd() { return mTime + mDuration; }
// Approximate byte offset of the end of the page on which this chunk
// ends.
const PRInt64 mOffset;
@ -142,6 +151,10 @@ public:
const PRInt64 mDuration; // In usecs.
const PRUint32 mFrames;
const PRUint32 mChannels;
// At least one of mAudioBuffer/mAudioData must be non-null.
// mChannels channels, each with mFrames frames
nsRefPtr<SharedBuffer> mAudioBuffer;
// mFrames frames, each with mChannels values
nsAutoArrayPtr<AudioDataValue> mAudioData;
};
@ -198,6 +211,8 @@ public:
MOZ_COUNT_DTOR(VideoData);
}
PRInt64 GetEnd() { return mEndTime; }
// Dimensions at which to display the video frame. The picture region
// will be scaled to this size. This is should be the picture region's
// dimensions scaled with respect to its aspect ratio.
@ -370,6 +385,25 @@ template <class T> class MediaQueue : private nsDeque {
ForEach(aFunctor);
}
// Extracts elements from the queue into aResult, in order.
// Elements whose start time is before aTime are ignored.
void GetElementsAfter(PRInt64 aTime, nsTArray<T*>* aResult) {
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
if (!GetSize())
return;
PRInt32 i;
for (i = GetSize() - 1; i > 0; --i) {
T* v = static_cast<T*>(ObjectAt(i));
if (v->GetEnd() < aTime)
break;
}
// Elements less than i have a end time before aTime. It's also possible
// that the element at i has a end time before aTime, but that's OK.
for (; i < GetSize(); ++i) {
aResult->AppendElement(static_cast<T*>(ObjectAt(i)));
}
}
private:
mutable ReentrantMonitor mReentrantMonitor;
@ -408,7 +442,7 @@ public:
// than aTimeThreshold will be decoded (unless they're not keyframes
// and aKeyframeSkip is true), but will not be added to the queue.
virtual bool DecodeVideoFrame(bool &aKeyframeSkip,
PRInt64 aTimeThreshold) = 0;
PRInt64 aTimeThreshold) = 0;
virtual bool HasAudio() = 0;
virtual bool HasVideo() = 0;

Просмотреть файл

@ -46,6 +46,8 @@
#include "VideoUtils.h"
#include "nsTimeRanges.h"
#include "nsDeque.h"
#include "AudioSegment.h"
#include "VideoSegment.h"
#include "mozilla/Preferences.h"
#include "mozilla/StandardInteger.h"
@ -420,6 +422,7 @@ nsBuiltinDecoderStateMachine::nsBuiltinDecoderStateMachine(nsBuiltinDecoder* aDe
mAudioEndTime(-1),
mVideoFrameEndTime(-1),
mVolume(1.0),
mAudioCaptured(false),
mSeekable(true),
mPositionChangeQueued(false),
mAudioCompleted(false),
@ -434,6 +437,8 @@ nsBuiltinDecoderStateMachine::nsBuiltinDecoderStateMachine(nsBuiltinDecoder* aDe
mDecodeThreadWaiting(false),
mRealTime(aRealTime),
mRequestedNewDecodeThread(false),
mDidThrottleAudioDecoding(false),
mDidThrottleVideoDecoding(false),
mEventManager(aDecoder)
{
MOZ_COUNT_CTOR(nsBuiltinDecoderStateMachine);
@ -521,6 +526,276 @@ void nsBuiltinDecoderStateMachine::DecodeThreadRun()
LOG(PR_LOG_DEBUG, ("%p Decode thread finished", mDecoder.get()));
}
void nsBuiltinDecoderStateMachine::SendOutputStreamAudio(AudioData* aAudio,
OutputMediaStream* aStream,
AudioSegment* aOutput)
{
mDecoder->GetReentrantMonitor().AssertCurrentThreadIn();
if (aAudio->mTime <= aStream->mLastAudioPacketTime) {
// ignore packet that we've already processed
return;
}
aStream->mLastAudioPacketTime = aAudio->mTime;
aStream->mLastAudioPacketEndTime = aAudio->GetEnd();
NS_ASSERTION(aOutput->GetChannels() == aAudio->mChannels,
"Wrong number of channels");
// This logic has to mimic AudioLoop closely to make sure we write
// the exact same silences
CheckedInt64 audioWrittenOffset = UsecsToFrames(mInfo.mAudioRate,
aStream->mAudioFramesWrittenBaseTime + mStartTime) + aStream->mAudioFramesWritten;
CheckedInt64 frameOffset = UsecsToFrames(mInfo.mAudioRate, aAudio->mTime);
if (!audioWrittenOffset.valid() || !frameOffset.valid())
return;
if (audioWrittenOffset.value() < frameOffset.value()) {
// Write silence to catch up
LOG(PR_LOG_DEBUG, ("%p Decoder writing %d frames of silence to MediaStream",
mDecoder.get(), PRInt32(frameOffset.value() - audioWrittenOffset.value())));
AudioSegment silence;
silence.InitFrom(*aOutput);
silence.InsertNullDataAtStart(frameOffset.value() - audioWrittenOffset.value());
aStream->mAudioFramesWritten += silence.GetDuration();
aOutput->AppendFrom(&silence);
}
PRInt64 offset;
if (aStream->mAudioFramesWritten == 0) {
NS_ASSERTION(frameOffset.value() <= audioWrittenOffset.value(),
"Otherwise we'd have taken the write-silence path");
// We're starting in the middle of a packet. Split the packet.
offset = audioWrittenOffset.value() - frameOffset.value();
} else {
// Write the entire packet.
offset = 0;
}
if (offset >= aAudio->mFrames)
return;
aAudio->EnsureAudioBuffer();
nsRefPtr<SharedBuffer> buffer = aAudio->mAudioBuffer;
aOutput->AppendFrames(buffer.forget(), aAudio->mFrames, PRInt32(offset), aAudio->mFrames,
MOZ_AUDIO_DATA_FORMAT);
LOG(PR_LOG_DEBUG, ("%p Decoder writing %d frames of data to MediaStream for AudioData at %lld",
mDecoder.get(), aAudio->mFrames - PRInt32(offset), aAudio->mTime));
aStream->mAudioFramesWritten += aAudio->mFrames - PRInt32(offset);
}
static void WriteVideoToMediaStream(Image* aImage,
PRInt64 aDuration, const gfxIntSize& aIntrinsicSize,
VideoSegment* aOutput)
{
nsRefPtr<Image> image = aImage;
aOutput->AppendFrame(image.forget(), aDuration, aIntrinsicSize);
}
static const TrackID TRACK_AUDIO = 1;
static const TrackID TRACK_VIDEO = 2;
static const TrackRate RATE_VIDEO = USECS_PER_S;
void nsBuiltinDecoderStateMachine::SendOutputStreamData()
{
mDecoder->GetReentrantMonitor().AssertCurrentThreadIn();
if (mState == DECODER_STATE_DECODING_METADATA)
return;
nsTArray<OutputMediaStream>& streams = mDecoder->OutputStreams();
PRInt64 minLastAudioPacketTime = PR_INT64_MAX;
bool finished =
(!mInfo.mHasAudio || mReader->mAudioQueue.IsFinished()) &&
(!mInfo.mHasVideo || mReader->mVideoQueue.IsFinished());
for (PRUint32 i = 0; i < streams.Length(); ++i) {
OutputMediaStream* stream = &streams[i];
SourceMediaStream* mediaStream = stream->mStream;
StreamTime endPosition = 0;
if (!stream->mStreamInitialized) {
if (mInfo.mHasAudio) {
AudioSegment* audio = new AudioSegment();
audio->Init(mInfo.mAudioChannels);
mediaStream->AddTrack(TRACK_AUDIO, mInfo.mAudioRate, 0, audio);
}
if (mInfo.mHasVideo) {
VideoSegment* video = new VideoSegment();
mediaStream->AddTrack(TRACK_VIDEO, RATE_VIDEO, 0, video);
}
stream->mStreamInitialized = true;
}
if (mInfo.mHasAudio) {
nsAutoTArray<AudioData*,10> audio;
// It's OK to hold references to the AudioData because while audio
// is captured, only the decoder thread pops from the queue (see below).
mReader->mAudioQueue.GetElementsAfter(stream->mLastAudioPacketTime, &audio);
AudioSegment output;
output.Init(mInfo.mAudioChannels);
for (PRUint32 i = 0; i < audio.Length(); ++i) {
AudioData* a = audio[i];
SendOutputStreamAudio(audio[i], stream, &output);
}
if (output.GetDuration() > 0) {
mediaStream->AppendToTrack(TRACK_AUDIO, &output);
}
if (mReader->mAudioQueue.IsFinished() && !stream->mHaveSentFinishAudio) {
mediaStream->EndTrack(TRACK_AUDIO);
stream->mHaveSentFinishAudio = true;
}
minLastAudioPacketTime = NS_MIN(minLastAudioPacketTime, stream->mLastAudioPacketTime);
endPosition = NS_MAX(endPosition,
TicksToTimeRoundDown(mInfo.mAudioRate, stream->mAudioFramesWritten));
}
if (mInfo.mHasVideo) {
nsAutoTArray<VideoData*,10> video;
// It's OK to hold references to the VideoData only the decoder thread
// pops from the queue.
mReader->mVideoQueue.GetElementsAfter(stream->mNextVideoTime + mStartTime, &video);
VideoSegment output;
for (PRUint32 i = 0; i < video.Length(); ++i) {
VideoData* v = video[i];
if (stream->mNextVideoTime + mStartTime < v->mTime) {
LOG(PR_LOG_DEBUG, ("%p Decoder writing last video to MediaStream for %lld ms",
mDecoder.get(), v->mTime - (stream->mNextVideoTime + mStartTime)));
// Write last video frame to catch up. mLastVideoImage can be null here
// which is fine, it just means there's no video.
WriteVideoToMediaStream(stream->mLastVideoImage,
v->mTime - (stream->mNextVideoTime + mStartTime), stream->mLastVideoImageDisplaySize,
&output);
stream->mNextVideoTime = v->mTime - mStartTime;
}
if (stream->mNextVideoTime + mStartTime < v->mEndTime) {
LOG(PR_LOG_DEBUG, ("%p Decoder writing video frame %lld to MediaStream",
mDecoder.get(), v->mTime));
WriteVideoToMediaStream(v->mImage,
v->mEndTime - (stream->mNextVideoTime + mStartTime), v->mDisplay,
&output);
stream->mNextVideoTime = v->mEndTime - mStartTime;
stream->mLastVideoImage = v->mImage;
stream->mLastVideoImageDisplaySize = v->mDisplay;
} else {
LOG(PR_LOG_DEBUG, ("%p Decoder skipping writing video frame %lld to MediaStream",
mDecoder.get(), v->mTime));
}
}
if (output.GetDuration() > 0) {
mediaStream->AppendToTrack(TRACK_VIDEO, &output);
}
if (mReader->mVideoQueue.IsFinished() && !stream->mHaveSentFinishVideo) {
mediaStream->EndTrack(TRACK_VIDEO);
stream->mHaveSentFinishVideo = true;
}
endPosition = NS_MAX(endPosition,
TicksToTimeRoundDown(RATE_VIDEO, stream->mNextVideoTime));
}
if (!stream->mHaveSentFinish) {
stream->mStream->AdvanceKnownTracksTime(endPosition);
}
if (finished && !stream->mHaveSentFinish) {
stream->mHaveSentFinish = true;
stream->mStream->Finish();
}
}
if (mAudioCaptured) {
// Discard audio packets that are no longer needed.
PRInt64 audioPacketTimeToDiscard =
NS_MIN(minLastAudioPacketTime, mStartTime + mCurrentFrameTime);
while (true) {
nsAutoPtr<AudioData> a(mReader->mAudioQueue.PopFront());
if (!a)
break;
// Packet times are not 100% reliable so this may discard packets that
// actually contain data for mCurrentFrameTime. This means if someone might
// create a new output stream and we actually don't have the audio for the
// very start. That's OK, we'll play silence instead for a brief moment.
// That's OK. Seeking to this time would have a similar issue for such
// badly muxed resources.
if (a->GetEnd() >= audioPacketTimeToDiscard) {
mReader->mAudioQueue.PushFront(a.forget());
break;
}
}
if (finished) {
mAudioCompleted = true;
UpdateReadyState();
}
}
}
bool nsBuiltinDecoderStateMachine::HaveEnoughDecodedAudio(PRInt64 aAmpleAudioUSecs)
{
mDecoder->GetReentrantMonitor().AssertCurrentThreadIn();
if (mReader->mAudioQueue.GetSize() == 0 ||
GetDecodedAudioDuration() < aAmpleAudioUSecs) {
return false;
}
if (!mAudioCaptured) {
return true;
}
nsTArray<OutputMediaStream>& streams = mDecoder->OutputStreams();
for (PRUint32 i = 0; i < streams.Length(); ++i) {
OutputMediaStream* stream = &streams[i];
if (!stream->mHaveSentFinishAudio &&
!stream->mStream->HaveEnoughBuffered(TRACK_AUDIO)) {
return false;
}
}
nsIThread* thread = GetStateMachineThread();
nsCOMPtr<nsIRunnable> callback = NS_NewRunnableMethod(this,
&nsBuiltinDecoderStateMachine::ScheduleStateMachineWithLockAndWakeDecoder);
for (PRUint32 i = 0; i < streams.Length(); ++i) {
OutputMediaStream* stream = &streams[i];
if (!stream->mHaveSentFinishAudio) {
stream->mStream->DispatchWhenNotEnoughBuffered(TRACK_AUDIO, thread, callback);
}
}
return true;
}
bool nsBuiltinDecoderStateMachine::HaveEnoughDecodedVideo()
{
mDecoder->GetReentrantMonitor().AssertCurrentThreadIn();
if (static_cast<PRUint32>(mReader->mVideoQueue.GetSize()) < AMPLE_VIDEO_FRAMES) {
return false;
}
nsTArray<OutputMediaStream>& streams = mDecoder->OutputStreams();
if (streams.IsEmpty()) {
return true;
}
for (PRUint32 i = 0; i < streams.Length(); ++i) {
OutputMediaStream* stream = &streams[i];
if (!stream->mHaveSentFinishVideo &&
!stream->mStream->HaveEnoughBuffered(TRACK_VIDEO)) {
return false;
}
}
nsIThread* thread = GetStateMachineThread();
nsCOMPtr<nsIRunnable> callback = NS_NewRunnableMethod(this,
&nsBuiltinDecoderStateMachine::ScheduleStateMachineWithLockAndWakeDecoder);
for (PRUint32 i = 0; i < streams.Length(); ++i) {
OutputMediaStream* stream = &streams[i];
if (!stream->mHaveSentFinishVideo) {
stream->mStream->DispatchWhenNotEnoughBuffered(TRACK_VIDEO, thread, callback);
}
}
return true;
}
void nsBuiltinDecoderStateMachine::DecodeLoop()
{
LOG(PR_LOG_DEBUG, ("%p Start DecodeLoop()", mDecoder.get()));
@ -558,7 +833,6 @@ void nsBuiltinDecoderStateMachine::DecodeLoop()
PRInt64 ampleAudioThreshold = AMPLE_AUDIO_USECS;
MediaQueue<VideoData>& videoQueue = mReader->mVideoQueue;
MediaQueue<AudioData>& audioQueue = mReader->mAudioQueue;
// Main decode loop.
bool videoPlaying = HasVideo();
@ -592,10 +866,9 @@ void nsBuiltinDecoderStateMachine::DecodeLoop()
if (mState == DECODER_STATE_DECODING &&
!skipToNextKeyframe &&
videoPlaying &&
((!audioPump && audioPlaying && GetDecodedAudioDuration() < lowAudioThreshold) ||
(!videoPump &&
videoPlaying &&
static_cast<PRUint32>(videoQueue.GetSize()) < LOW_VIDEO_FRAMES)) &&
((!audioPump && audioPlaying && !mDidThrottleAudioDecoding && GetDecodedAudioDuration() < lowAudioThreshold) ||
(!videoPump && videoPlaying && !mDidThrottleVideoDecoding &&
static_cast<PRUint32>(videoQueue.GetSize()) < LOW_VIDEO_FRAMES)) &&
!HasLowUndecodedData())
{
@ -604,8 +877,12 @@ void nsBuiltinDecoderStateMachine::DecodeLoop()
}
// Video decode.
if (videoPlaying &&
static_cast<PRUint32>(videoQueue.GetSize()) < AMPLE_VIDEO_FRAMES)
bool throttleVideoDecoding = !videoPlaying || HaveEnoughDecodedVideo();
if (mDidThrottleVideoDecoding && !throttleVideoDecoding) {
videoPump = true;
}
mDidThrottleVideoDecoding = throttleVideoDecoding;
if (!throttleVideoDecoding)
{
// Time the video decode, so that if it's slow, we can increase our low
// audio threshold to reduce the chance of an audio underrun while we're
@ -632,13 +909,18 @@ void nsBuiltinDecoderStateMachine::DecodeLoop()
}
// Audio decode.
if (audioPlaying &&
(GetDecodedAudioDuration() < ampleAudioThreshold || audioQueue.GetSize() == 0))
{
bool throttleAudioDecoding = !audioPlaying || HaveEnoughDecodedAudio(ampleAudioThreshold);
if (mDidThrottleAudioDecoding && !throttleAudioDecoding) {
audioPump = true;
}
mDidThrottleAudioDecoding = throttleAudioDecoding;
if (!mDidThrottleAudioDecoding) {
ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
audioPlaying = mReader->DecodeAudioData();
}
SendOutputStreamData();
// Notify to ensure that the AudioLoop() is not waiting, in case it was
// waiting for more audio to be decoded.
mDecoder->GetReentrantMonitor().NotifyAll();
@ -650,11 +932,7 @@ void nsBuiltinDecoderStateMachine::DecodeLoop()
if ((mState == DECODER_STATE_DECODING || mState == DECODER_STATE_BUFFERING) &&
!mStopDecodeThread &&
(videoPlaying || audioPlaying) &&
(!audioPlaying || (GetDecodedAudioDuration() >= ampleAudioThreshold &&
audioQueue.GetSize() > 0))
&&
(!videoPlaying ||
static_cast<PRUint32>(videoQueue.GetSize()) >= AMPLE_VIDEO_FRAMES))
throttleAudioDecoding && throttleVideoDecoding)
{
// All active bitstreams' decode is well ahead of the playback
// position, we may as well wait for the playback to catch up. Note the
@ -697,6 +975,15 @@ bool nsBuiltinDecoderStateMachine::IsPlaying()
return !mPlayStartTime.IsNull();
}
static void WriteSilence(nsAudioStream* aStream, PRUint32 aFrames)
{
PRUint32 numSamples = aFrames * aStream->GetChannels();
nsAutoTArray<AudioDataValue, 1000> buf;
buf.SetLength(numSamples);
memset(buf.Elements(), 0, numSamples * sizeof(AudioDataValue));
aStream->Write(buf.Elements(), aFrames);
}
void nsBuiltinDecoderStateMachine::AudioLoop()
{
NS_ASSERTION(OnAudioThread(), "Should be on audio thread.");
@ -760,6 +1047,7 @@ void nsBuiltinDecoderStateMachine::AudioLoop()
}
// If we're shutting down, break out and exit the audio thread.
// Also break out if audio is being captured.
if (mState == DECODER_STATE_SHUTDOWN ||
mStopAudioThread ||
mReader->mAudioQueue.AtEndOfStream())
@ -813,6 +1101,8 @@ void nsBuiltinDecoderStateMachine::AudioLoop()
// time.
missingFrames = NS_MIN(static_cast<PRInt64>(PR_UINT32_MAX),
missingFrames.value());
LOG(PR_LOG_DEBUG, ("%p Decoder playing %d frames of silence",
mDecoder.get(), PRInt32(missingFrames.value())));
framesWritten = PlaySilence(static_cast<PRUint32>(missingFrames.value()),
channels, playedFrames.value());
} else {
@ -850,10 +1140,7 @@ void nsBuiltinDecoderStateMachine::AudioLoop()
if (framesToWrite < PR_UINT32_MAX / channels) {
// Write silence manually rather than using PlaySilence(), so that
// the AudioAPI doesn't get a copy of the audio frames.
PRUint32 numSamples = framesToWrite * channels;
nsAutoArrayPtr<AudioDataValue> buf(new AudioDataValue[numSamples]);
memset(buf.get(), 0, numSamples * sizeof(AudioDataValue));
mAudioStream->Write(buf, framesToWrite);
WriteSilence(mAudioStream, framesToWrite);
}
}
@ -885,10 +1172,12 @@ void nsBuiltinDecoderStateMachine::AudioLoop()
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
mAudioStream = nsnull;
mEventManager.Clear();
mAudioCompleted = true;
UpdateReadyState();
// Kick the decode thread; it may be sleeping waiting for this to finish.
mDecoder->GetReentrantMonitor().NotifyAll();
if (!mAudioCaptured) {
mAudioCompleted = true;
UpdateReadyState();
// Kick the decode thread; it may be sleeping waiting for this to finish.
mDecoder->GetReentrantMonitor().NotifyAll();
}
}
// Must not hold the decoder monitor while we shutdown the audio stream, as
@ -908,12 +1197,9 @@ PRUint32 nsBuiltinDecoderStateMachine::PlaySilence(PRUint32 aFrames,
NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused");
PRUint32 maxFrames = SILENCE_BYTES_CHUNK / aChannels / sizeof(AudioDataValue);
PRUint32 frames = NS_MIN(aFrames, maxFrames);
PRUint32 numSamples = frames * aChannels;
nsAutoArrayPtr<AudioDataValue> buf(new AudioDataValue[numSamples]);
memset(buf.get(), 0, numSamples * sizeof(AudioDataValue));
mAudioStream->Write(buf, frames);
WriteSilence(mAudioStream, frames);
// Dispatch events to the DOM for the audio just written.
mEventManager.QueueWrittenAudioData(buf.get(), frames * aChannels,
mEventManager.QueueWrittenAudioData(nsnull, frames * aChannels,
(aFrameOffset + frames) * aChannels);
return frames;
}
@ -927,6 +1213,7 @@ PRUint32 nsBuiltinDecoderStateMachine::PlayFromAudioQueue(PRUint64 aFrameOffset,
{
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
NS_WARN_IF_FALSE(IsPlaying(), "Should be playing");
NS_ASSERTION(!mAudioCaptured, "Audio cannot be captured here!");
// Awaken the decode loop if it's waiting for space to free up in the
// audio queue.
mDecoder->GetReentrantMonitor().NotifyAll();
@ -941,6 +1228,8 @@ PRUint32 nsBuiltinDecoderStateMachine::PlayFromAudioQueue(PRUint64 aFrameOffset,
// able to acquire the audio monitor in order to resume or destroy the
// audio stream.
if (!mAudioStream->IsPaused()) {
LOG(PR_LOG_DEBUG, ("%p Decoder playing %d frames of data to stream for AudioData at %lld",
mDecoder.get(), audio->mFrames, audio->mTime));
mAudioStream->Write(audio->mAudioData,
audio->mFrames);
@ -1077,6 +1366,16 @@ void nsBuiltinDecoderStateMachine::SetVolume(double volume)
mVolume = volume;
}
void nsBuiltinDecoderStateMachine::SetAudioCaptured(bool aCaptured)
{
NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
if (!mAudioCaptured && aCaptured) {
StopAudioThread();
}
mAudioCaptured = aCaptured;
}
double nsBuiltinDecoderStateMachine::GetCurrentTime() const
{
NS_ASSERTION(NS_IsMainThread() ||
@ -1363,7 +1662,7 @@ nsBuiltinDecoderStateMachine::StartAudioThread()
"Should be on state machine or decode thread.");
mDecoder->GetReentrantMonitor().AssertCurrentThreadIn();
mStopAudioThread = false;
if (HasAudio() && !mAudioThread) {
if (HasAudio() && !mAudioThread && !mAudioCaptured) {
nsresult rv = NS_NewThread(getter_AddRefs(mAudioThread),
nsnull,
MEDIA_THREAD_STACK_SIZE);
@ -1542,6 +1841,9 @@ void nsBuiltinDecoderStateMachine::DecodeSeek()
NS_ASSERTION(mState == DECODER_STATE_SEEKING,
"Only call when in seeking state");
mDidThrottleAudioDecoding = false;
mDidThrottleVideoDecoding = false;
// During the seek, don't have a lock on the decoder state,
// otherwise long seek operations can block the main thread.
// The events dispatched to the main thread are SYNC calls.
@ -1596,7 +1898,7 @@ void nsBuiltinDecoderStateMachine::DecodeSeek()
mAudioStartTime = startTime;
mPlayDuration = startTime - mStartTime;
if (HasVideo()) {
nsAutoPtr<VideoData> video(mReader->mVideoQueue.PeekFront());
VideoData* video = mReader->mVideoQueue.PeekFront();
if (video) {
NS_ASSERTION(video->mTime <= seekTime && seekTime <= video->mEndTime,
"Seek target should lie inside the first frame after seek");
@ -1604,7 +1906,6 @@ void nsBuiltinDecoderStateMachine::DecodeSeek()
ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
RenderVideoFrame(video, TimeStamp::Now());
}
mReader->mVideoQueue.PopFront();
nsCOMPtr<nsIRunnable> event =
NS_NewRunnableMethod(mDecoder, &nsBuiltinDecoder::Invalidate);
NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL);
@ -1882,6 +2183,9 @@ void nsBuiltinDecoderStateMachine::RenderVideoFrame(VideoData* aData,
return;
}
LOG(PR_LOG_DEBUG, ("%p Decoder playing video frame %lld",
mDecoder.get(), aData->mTime));
VideoFrameContainer* container = mDecoder->GetVideoFrameContainer();
if (container) {
container->SetCurrentFrame(aData->mDisplay, aData->mImage, aTarget);
@ -1893,7 +2197,7 @@ nsBuiltinDecoderStateMachine::GetAudioClock()
{
NS_ASSERTION(OnStateMachineThread(), "Should be on state machine thread.");
mDecoder->GetReentrantMonitor().AssertCurrentThreadIn();
if (!HasAudio())
if (!HasAudio() || mAudioCaptured)
return -1;
// We must hold the decoder monitor while using the audio stream off the
// audio thread to ensure that it doesn't get destroyed on the audio thread
@ -1953,6 +2257,7 @@ void nsBuiltinDecoderStateMachine::AdvanceFrame()
while (mRealTime || clock_time >= frame->mTime) {
mVideoFrameEndTime = frame->mEndTime;
currentFrame = frame;
LOG(PR_LOG_DEBUG, ("%p Decoder discarding video frame %lld", mDecoder.get(), frame->mTime));
mReader->mVideoQueue.PopFront();
// Notify the decode thread that the video queue's buffers may have
// free'd up space for more frames.
@ -2240,6 +2545,12 @@ nsresult nsBuiltinDecoderStateMachine::ScheduleStateMachine() {
return ScheduleStateMachine(0);
}
void nsBuiltinDecoderStateMachine::ScheduleStateMachineWithLockAndWakeDecoder() {
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
mon.NotifyAll();
ScheduleStateMachine(0);
}
nsresult nsBuiltinDecoderStateMachine::ScheduleStateMachine(PRInt64 aUsecs) {
mDecoder->GetReentrantMonitor().AssertCurrentThreadIn();
NS_ABORT_IF_FALSE(GetStateMachineThread(),

Просмотреть файл

@ -117,6 +117,8 @@ hardware (via nsAudioStream and libsydneyaudio).
#include "nsHTMLMediaElement.h"
#include "mozilla/ReentrantMonitor.h"
#include "nsITimer.h"
#include "AudioSegment.h"
#include "VideoSegment.h"
/*
The state machine class. This manages the decoding and seeking in the
@ -137,6 +139,10 @@ public:
typedef mozilla::TimeStamp TimeStamp;
typedef mozilla::TimeDuration TimeDuration;
typedef mozilla::VideoFrameContainer VideoFrameContainer;
typedef nsBuiltinDecoder::OutputMediaStream OutputMediaStream;
typedef mozilla::SourceMediaStream SourceMediaStream;
typedef mozilla::AudioSegment AudioSegment;
typedef mozilla::VideoSegment VideoSegment;
nsBuiltinDecoderStateMachine(nsBuiltinDecoder* aDecoder, nsBuiltinDecoderReader* aReader, bool aRealTime = false);
~nsBuiltinDecoderStateMachine();
@ -149,6 +155,7 @@ public:
return mState;
}
virtual void SetVolume(double aVolume);
virtual void SetAudioCaptured(bool aCapture);
virtual void Shutdown();
virtual PRInt64 GetDuration();
virtual void SetDuration(PRInt64 aDuration);
@ -249,6 +256,10 @@ public:
// machine again.
nsresult ScheduleStateMachine();
// Calls ScheduleStateMachine() after taking the decoder lock. Also
// notifies the decoder thread in case it's waiting on the decoder lock.
void ScheduleStateMachineWithLockAndWakeDecoder();
// Schedules the shared state machine thread to run the state machine
// in aUsecs microseconds from now, if it's not already scheduled to run
// earlier, in which case the request is discarded.
@ -273,6 +284,12 @@ public:
// element. Called on the main thread.
void NotifyAudioAvailableListener();
// Copy queued audio/video data in the reader to any output MediaStreams that
// need it.
void SendOutputStreamData();
bool HaveEnoughDecodedAudio(PRInt64 aAmpleAudioUSecs);
bool HaveEnoughDecodedVideo();
protected:
// Returns true if we've got less than aAudioUsecs microseconds of decoded
@ -436,6 +453,11 @@ protected:
// to call.
void DecodeThreadRun();
// Copy audio from an AudioData packet to aOutput. This may require
// inserting silence depending on the timing of the audio packet.
void SendOutputStreamAudio(AudioData* aAudio, OutputMediaStream* aStream,
AudioSegment* aOutput);
// State machine thread run function. Defers to RunStateMachine().
nsresult CallRunStateMachine();
@ -569,6 +591,10 @@ protected:
// Time at which we started decoding. Synchronised via decoder monitor.
TimeStamp mDecodeStartTime;
// True if we shouldn't play our audio (but still write it to any capturing
// streams).
bool mAudioCaptured;
// True if the media resource can be seeked. Accessed from the state
// machine and main threads. Synchronised via decoder monitor.
bool mSeekable;
@ -636,6 +662,12 @@ protected:
// True is we are decoding a realtime stream, like a camera stream
bool mRealTime;
// Record whether audio and video decoding were throttled during the
// previous iteration of DecodeLooop. When we transition from
// throttled to not-throttled we need to pump decoding.
bool mDidThrottleAudioDecoding;
bool mDidThrottleVideoDecoding;
// True if we've requested a new decode thread, but it has not yet been
// created. Synchronized by the decoder monitor.
bool mRequestedNewDecodeThread;

Просмотреть файл

@ -239,6 +239,7 @@ public:
/**
* An iterator that makes it easy to iterate through all streams that
* have a given resource ID and are not closed.
* Can be used on the main thread or while holding the media cache lock.
*/
class ResourceStreamIterator {
public:
@ -351,13 +352,14 @@ protected:
// This member is main-thread only. It's used to allocate unique
// resource IDs to streams.
PRInt64 mNextResourceID;
// This member is main-thread only. It contains all the streams.
nsTArray<nsMediaCacheStream*> mStreams;
// The monitor protects all the data members here. Also, off-main-thread
// readers that need to block will Wait() on this monitor. When new
// data becomes available in the cache, we NotifyAll() on this monitor.
ReentrantMonitor mReentrantMonitor;
// This is only written while on the main thread and the monitor is held.
// Thus, it can be safely read from the main thread or while holding the monitor.
nsTArray<nsMediaCacheStream*> mStreams;
// The Blocks describing the cache entries.
nsTArray<Block> mIndex;
// Writer which performs IO, asynchronously writing cache blocks.
@ -1703,10 +1705,10 @@ nsMediaCacheStream::NotifyDataStarted(PRInt64 aOffset)
}
}
void
bool
nsMediaCacheStream::UpdatePrincipal(nsIPrincipal* aPrincipal)
{
nsContentUtils::CombineResourcePrincipals(&mPrincipal, aPrincipal);
return nsContentUtils::CombineResourcePrincipals(&mPrincipal, aPrincipal);
}
void
@ -1715,6 +1717,20 @@ nsMediaCacheStream::NotifyDataReceived(PRInt64 aSize, const char* aData,
{
NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
// Update principals before putting the data in the cache. This is important,
// we want to make sure all principals are updated before any consumer
// can see the new data.
// We do this without holding the cache monitor, in case the client wants
// to do something that takes a lock.
{
nsMediaCache::ResourceStreamIterator iter(mResourceID);
while (nsMediaCacheStream* stream = iter.Next()) {
if (stream->UpdatePrincipal(aPrincipal)) {
stream->mClient->CacheClientNotifyPrincipalChanged();
}
}
}
ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor());
PRInt64 size = aSize;
const char* data = aData;
@ -1769,7 +1785,6 @@ nsMediaCacheStream::NotifyDataReceived(PRInt64 aSize, const char* aData,
// The stream is at least as long as what we've read
stream->mStreamLength = NS_MAX(stream->mStreamLength, mChannelOffset);
}
stream->UpdatePrincipal(aPrincipal);
stream->mClient->CacheClientNotifyDataReceived();
}

Просмотреть файл

@ -230,10 +230,10 @@ public:
// aClient provides the underlying transport that cache will use to read
// data for this stream.
nsMediaCacheStream(ChannelMediaResource* aClient)
: mClient(aClient), mResourceID(0), mInitialized(false),
: mClient(aClient), mInitialized(false),
mHasHadUpdate(false),
mClosed(false),
mDidNotifyDataEnded(false),
mDidNotifyDataEnded(false), mResourceID(0),
mIsSeekable(false), mCacheSuspended(false),
mChannelEnded(false),
mChannelOffset(0), mStreamLength(-1),
@ -326,7 +326,8 @@ public:
// If we've successfully read data beyond the originally reported length,
// we return the end of the data we've read.
PRInt64 GetLength();
// Returns the unique resource ID
// Returns the unique resource ID. Call only on the main thread or while
// holding the media cache lock.
PRInt64 GetResourceID() { return mResourceID; }
// Returns the end of the bytes starting at the given offset
// which are in cache.
@ -459,15 +460,11 @@ private:
// blocked on reading from this stream.
void CloseInternal(ReentrantMonitorAutoEnter& aReentrantMonitor);
// Update mPrincipal given that data has been received from aPrincipal
void UpdatePrincipal(nsIPrincipal* aPrincipal);
bool UpdatePrincipal(nsIPrincipal* aPrincipal);
// These fields are main-thread-only.
ChannelMediaResource* mClient;
nsCOMPtr<nsIPrincipal> mPrincipal;
// This is a unique ID representing the resource we're loading.
// All streams with the same mResourceID are loading the same
// underlying resource and should share data.
PRInt64 mResourceID;
// Set to true when Init or InitAsClone has been called
bool mInitialized;
// Set to true when nsMediaCache::Update() has finished while this stream
@ -479,9 +476,14 @@ private:
// True if CacheClientNotifyDataEnded has been called for this stream.
bool mDidNotifyDataEnded;
// The following fields are protected by the cache's monitor but are
// only written on the main thread.
// The following fields must be written holding the cache's monitor and
// only on the main thread, thus can be read either on the main thread
// or while holding the cache's monitor.
// This is a unique ID representing the resource we're loading.
// All streams with the same mResourceID are loading the same
// underlying resource and should share data.
PRInt64 mResourceID;
// The last reported seekability state for the underlying channel
bool mIsSeekable;
// True if the cache has suspended our channel because the cache is

Просмотреть файл

@ -41,6 +41,7 @@
#include "ImageLayers.h"
#include "mozilla/ReentrantMonitor.h"
#include "VideoFrameContainer.h"
#include "MediaStreamGraph.h"
class nsHTMLMediaElement;
class nsIStreamListener;
@ -69,13 +70,14 @@ static const PRUint32 FRAMEBUFFER_LENGTH_MAX = 16384;
class nsMediaDecoder : public nsIObserver
{
public:
typedef mozilla::layers::Image Image;
typedef mozilla::layers::ImageContainer ImageContainer;
typedef mozilla::MediaResource MediaResource;
typedef mozilla::ReentrantMonitor ReentrantMonitor;
typedef mozilla::SourceMediaStream SourceMediaStream;
typedef mozilla::TimeStamp TimeStamp;
typedef mozilla::TimeDuration TimeDuration;
typedef mozilla::VideoFrameContainer VideoFrameContainer;
typedef mozilla::layers::Image Image;
typedef mozilla::layers::ImageContainer ImageContainer;
nsMediaDecoder();
virtual ~nsMediaDecoder();
@ -129,6 +131,13 @@ public:
// Set the audio volume. It should be a value from 0 to 1.0.
virtual void SetVolume(double aVolume) = 0;
// Sets whether audio is being captured. If it is, we won't play any
// of our audio.
virtual void SetAudioCaptured(bool aCaptured) = 0;
// Add an output stream. All decoder output will be sent to the stream.
virtual void AddOutputStream(SourceMediaStream* aStream, bool aFinishWhenEnded) = 0;
// Start playback of a video. 'Load' must have previously been
// called.
virtual nsresult Play() = 0;
@ -331,6 +340,10 @@ public:
// the result from OnStopRequest.
virtual void NotifyDownloadEnded(nsresult aStatus) = 0;
// Called by MediaResource when the principal of the resource has
// changed. Called on main thread only.
virtual void NotifyPrincipalChanged() = 0;
// Called as data arrives on the stream and is read into the cache. Called
// on the main thread only.
virtual void NotifyDataArrived(const char* aBuffer, PRUint32 aLength, PRInt64 aOffset) = 0;

Просмотреть файл

@ -52,7 +52,7 @@
* @status UNDER_DEVELOPMENT
*/
[scriptable, uuid(e1a11e83-255b-4350-81cf-f1f3e7d59712)]
[scriptable, uuid(32c54e30-5063-4e35-8fc9-890e50fed147)]
interface nsIDOMHTMLAudioElement : nsIDOMHTMLMediaElement
{
// Setup the audio stream for writing

Просмотреть файл

@ -40,6 +40,8 @@
#include "nsIDOMMediaError.idl"
#include "nsIDOMTimeRanges.idl"
interface nsIDOMMediaStream;
/**
* The nsIDOMHTMLMediaElement interface is an interface to be implemented by the HTML
* <audio> and <video> elements.
@ -57,7 +59,7 @@
#endif
%}
[scriptable, uuid(3e672e79-a0ea-45ef-87de-828402f1f6d7)]
[scriptable, uuid(6b938133-a8c2-424a-9401-a631f74aeff5)]
interface nsIDOMHTMLMediaElement : nsIDOMHTMLElement
{
// error state
@ -105,6 +107,11 @@ interface nsIDOMHTMLMediaElement : nsIDOMHTMLElement
attribute boolean muted;
attribute boolean defaultMuted;
// Mozilla extension: stream capture
nsIDOMMediaStream mozCaptureStream();
nsIDOMMediaStream mozCaptureStreamUntilEnded();
readonly attribute boolean mozAudioCaptured;
// Mozilla extension: extra stream metadata information, used as part
// of MozAudioAvailable events and the mozWriteAudio() method. The
// mozFrameBufferLength method allows for the size of the framebuffer

Просмотреть файл

@ -48,7 +48,7 @@
* @status UNDER_DEVELOPMENT
*/
[scriptable, uuid(e1a11e83-255b-4350-81cf-f1f3e7d59712)]
[scriptable, uuid(e43f61e3-9c67-4e78-8534-3399d7f192b9)]
interface nsIDOMHTMLVideoElement : nsIDOMHTMLMediaElement
{
attribute long width;