Bug 1514158 - Bring MediaRecorder to spec on new bitrate handling. r=bryce

This brings the MediaRecorder constructor and start() to spec on most parts
except mime type handling (bug 1512175). The flow is also improved to better
follow along in the algorithms of the spec.

Differential Revision: https://phabricator.services.mozilla.com/D41585

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Andreas Pehrson 2019-10-04 08:54:16 +00:00
Родитель 180cc1d596
Коммит 7bdf7c09ae
2 изменённых файлов: 425 добавлений и 315 удалений

Просмотреть файл

@ -43,6 +43,14 @@
mozilla::LazyLogModule gMediaRecorderLog("MediaRecorder");
#define LOG(type, msg) MOZ_LOG(gMediaRecorderLog, type, msg)
#define MIN_VIDEO_BITRATE_BPS 10e3 // 10kbps
#define DEFAULT_VIDEO_BITRATE_BPS 2500e3 // 2.5Mbps
#define MAX_VIDEO_BITRATE_BPS 100e6 // 100Mbps
#define MIN_AUDIO_BITRATE_BPS 500 // 500bps
#define DEFAULT_AUDIO_BITRATE_BPS 128e3 // 128kbps
#define MAX_AUDIO_BITRATE_BPS 512e3 // 512kbps
namespace mozilla {
namespace dom {
@ -141,7 +149,7 @@ NS_IMPL_CYCLE_COLLECTION_CLASS(MediaRecorder)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(MediaRecorder,
DOMEventTargetHelper)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mDOMStream)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mStream)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mAudioNode)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mSecurityDomException)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mUnknownDomException)
@ -150,7 +158,7 @@ NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_INHERITED(MediaRecorder,
DOMEventTargetHelper)
NS_IMPL_CYCLE_COLLECTION_UNLINK(mDOMStream)
NS_IMPL_CYCLE_COLLECTION_UNLINK(mStream)
NS_IMPL_CYCLE_COLLECTION_UNLINK(mAudioNode)
NS_IMPL_CYCLE_COLLECTION_UNLINK(mSecurityDomException)
NS_IMPL_CYCLE_COLLECTION_UNLINK(mUnknownDomException)
@ -165,6 +173,81 @@ NS_INTERFACE_MAP_END_INHERITING(DOMEventTargetHelper)
NS_IMPL_ADDREF_INHERITED(MediaRecorder, DOMEventTargetHelper)
NS_IMPL_RELEASE_INHERITED(MediaRecorder, DOMEventTargetHelper)
static bool PrincipalSubsumes(MediaRecorder* aRecorder,
nsIPrincipal* aPrincipal) {
if (!aRecorder->GetOwner()) {
return false;
}
nsCOMPtr<Document> doc = aRecorder->GetOwner()->GetExtantDoc();
if (!doc) {
return false;
}
if (!aPrincipal) {
return false;
}
bool subsumes;
if (NS_FAILED(doc->NodePrincipal()->Subsumes(aPrincipal, &subsumes))) {
return false;
}
return subsumes;
}
static bool MediaStreamTracksPrincipalSubsumes(
MediaRecorder* aRecorder,
const nsTArray<RefPtr<MediaStreamTrack>>& aTracks) {
nsCOMPtr<nsIPrincipal> principal = nullptr;
for (const auto& track : aTracks) {
nsContentUtils::CombineResourcePrincipals(&principal,
track->GetPrincipal());
}
return PrincipalSubsumes(aRecorder, principal);
}
static bool AudioNodePrincipalSubsumes(MediaRecorder* aRecorder,
AudioNode* aAudioNode) {
MOZ_ASSERT(aAudioNode);
Document* doc =
aAudioNode->GetOwner() ? aAudioNode->GetOwner()->GetExtantDoc() : nullptr;
nsCOMPtr<nsIPrincipal> principal = doc ? doc->NodePrincipal() : nullptr;
return PrincipalSubsumes(aRecorder, principal);
}
static void SelectBitrates(uint32_t aBitsPerSecond, uint8_t aNumVideoTracks,
uint32_t* aOutVideoBps, uint8_t aNumAudioTracks,
uint32_t* aOutAudioBps) {
uint32_t vbps = 0;
uint32_t abps = 0;
const uint32_t minVideoBps = MIN_VIDEO_BITRATE_BPS * aNumVideoTracks;
const uint32_t maxVideoBps = MAX_VIDEO_BITRATE_BPS * aNumVideoTracks;
const uint32_t minAudioBps = MIN_AUDIO_BITRATE_BPS * aNumAudioTracks;
const uint32_t maxAudioBps = MAX_AUDIO_BITRATE_BPS * aNumAudioTracks;
if (aNumVideoTracks == 0) {
MOZ_DIAGNOSTIC_ASSERT(aNumAudioTracks > 0);
abps = std::min(maxAudioBps, std::max(minAudioBps, aBitsPerSecond));
} else if (aNumAudioTracks == 0) {
vbps = std::min(maxVideoBps, std::max(minVideoBps, aBitsPerSecond));
} else {
// Scale the bits so that video gets 20 times the bits of audio.
// Since we must account for varying number of tracks of each type we weight
// them by type; video = weight 20, audio = weight 1.
const uint32_t videoWeight = aNumVideoTracks * 20;
const uint32_t audioWeight = aNumAudioTracks;
const uint32_t totalWeights = audioWeight + videoWeight;
const uint32_t videoBitrate =
uint64_t(aBitsPerSecond) * videoWeight / totalWeights;
const uint32_t audioBitrate =
uint64_t(aBitsPerSecond) * audioWeight / totalWeights;
vbps = std::min(maxVideoBps, std::max(minVideoBps, videoBitrate));
abps = std::min(maxAudioBps, std::max(minAudioBps, audioBitrate));
}
*aOutVideoBps = vbps;
*aOutAudioBps = abps;
}
/**
* Session is an object to represent a single recording event.
* In original design, all recording context is stored in MediaRecorder, which
@ -308,11 +391,16 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
};
public:
Session(MediaRecorder* aRecorder, uint32_t aTimeSlice)
Session(MediaRecorder* aRecorder,
nsTArray<RefPtr<MediaStreamTrack>> aMediaStreamTracks,
TimeDuration aTimeslice, uint32_t aVideoBitsPerSecond,
uint32_t aAudioBitsPerSecond)
: mRecorder(aRecorder),
mMediaStreamReady(false),
mMediaStreamTracks(std::move(aMediaStreamTracks)),
mMainThread(mRecorder->GetOwner()->EventTargetFor(TaskCategory::Other)),
mTimeSlice(aTimeSlice),
mTimeslice(aTimeslice),
mVideoBitsPerSecond(aVideoBitsPerSecond),
mAudioBitsPerSecond(aAudioBitsPerSecond),
mStartTime(TimeStamp::Now()),
mRunningState(RunningState::Idling) {
MOZ_ASSERT(NS_IsMainThread());
@ -327,7 +415,7 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
void PrincipalChanged(MediaStreamTrack* aTrack) override {
NS_ASSERTION(mMediaStreamTracks.Contains(aTrack),
"Principal changed for unrecorded track");
if (!MediaStreamTracksPrincipalSubsumes()) {
if (!MediaStreamTracksPrincipalSubsumes(mRecorder, mMediaStreamTracks)) {
DoSessionEndTask(NS_ERROR_DOM_SECURITY_ERR);
}
}
@ -336,22 +424,10 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
LOG(LogLevel::Warning,
("Session.NotifyTrackAdded %p Raising error due to track set change",
this));
if (mMediaStreamReady) {
DoSessionEndTask(NS_ERROR_ABORT);
}
NS_DispatchToMainThread(
NewRunnableMethod("MediaRecorder::Session::MediaStreamReady", this,
&Session::MediaStreamReady));
return;
DoSessionEndTask(NS_ERROR_ABORT);
}
void NotifyTrackRemoved(const RefPtr<MediaStreamTrack>& aTrack) override {
if (!mMediaStreamReady) {
// We haven't chosen the track set to record yet.
return;
}
if (aTrack->Ended()) {
// TrackEncoder will pickup tracks that end itself.
return;
@ -372,32 +448,51 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
LOG(LogLevel::Debug, ("Session.Start %p", this));
MOZ_ASSERT(NS_IsMainThread());
DOMMediaStream* domStream = mRecorder->Stream();
if (domStream) {
// The callback reports back when tracks are available and can be
// attached to MediaEncoder. This allows `recorder.start()` before any
// tracks are available. We have supported this historically and have
// mochitests assuming this behavior.
mMediaStream = domStream;
if (mRecorder->mStream) {
// The TrackListener reports back when tracks are added or removed from
// the MediaStream.
mMediaStream = mRecorder->mStream;
mMediaStream->RegisterTrackListener(this);
nsTArray<RefPtr<MediaStreamTrack>> tracks(2);
mMediaStream->GetTracks(tracks);
for (const auto& track : tracks) {
// Notify of existing tracks, as the stream doesn't do this by itself.
NotifyTrackAdded(track);
uint8_t trackTypes = 0;
int32_t audioTracks = 0;
int32_t videoTracks = 0;
for (const auto& track : mMediaStreamTracks) {
if (track->AsAudioStreamTrack()) {
++audioTracks;
trackTypes |= ContainerWriter::CREATE_AUDIO_TRACK;
} else if (track->AsVideoStreamTrack()) {
++videoTracks;
trackTypes |= ContainerWriter::CREATE_VIDEO_TRACK;
} else {
MOZ_CRASH("Unexpected track type");
}
}
if (audioTracks > 1 || videoTracks > 1) {
// When MediaRecorder supports multiple tracks, we should set up a
// single MediaInputPort from the input stream, and let main thread
// check track principals async later.
nsPIDOMWindowInner* window = mRecorder->GetOwner();
Document* document = window ? window->GetExtantDoc() : nullptr;
nsContentUtils::ReportToConsole(nsIScriptError::errorFlag,
NS_LITERAL_CSTRING("Media"), document,
nsContentUtils::eDOM_PROPERTIES,
"MediaRecorderMultiTracksNotSupported");
DoSessionEndTask(NS_ERROR_ABORT);
return;
}
for (const auto& t : mMediaStreamTracks) {
t->AddPrincipalChangeObserver(this);
}
LOG(LogLevel::Debug, ("Session.Start track types = (%d)", trackTypes));
InitEncoder(trackTypes, mMediaStreamTracks[0]->Graph()->GraphRate());
return;
}
if (mRecorder->mAudioNode) {
// Check that we may access the audio node's content.
if (!AudioNodePrincipalSubsumes()) {
LOG(LogLevel::Warning,
("Session.Start AudioNode principal check failed"));
DoSessionEndTask(NS_ERROR_DOM_SECURITY_ERR);
return;
}
TrackRate trackRate =
mRecorder->mAudioNode->Context()->Graph()->GraphRate();
@ -424,8 +519,7 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
}
{
auto tracks(std::move(mMediaStreamTracks));
for (RefPtr<MediaStreamTrack>& track : tracks) {
for (const auto& track : mMediaStreamTracks) {
track->RemovePrincipalChangeObserver(this);
}
}
@ -618,8 +712,7 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
// Whether push encoded data back to onDataAvailable automatically or we
// need a flush.
bool pushBlob = aForceFlush;
if (!pushBlob && mTimeSlice > 0 &&
(TimeStamp::Now() - mLastBlobTimeStamp).ToMilliseconds() > mTimeSlice) {
if (!pushBlob && (TimeStamp::Now() - mLastBlobTimeStamp) > mTimeslice) {
pushBlob = true;
}
if (pushBlob) {
@ -644,129 +737,6 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
}
}
void MediaStreamReady() {
if (!mMediaStream) {
// Already shut down. This can happen because MediaStreamReady is async.
return;
}
if (mMediaStreamReady) {
return;
}
if (!mRunningState.isOk() ||
mRunningState.inspect() != RunningState::Idling) {
return;
}
nsTArray<RefPtr<mozilla::dom::MediaStreamTrack>> tracks;
mMediaStream->GetTracks(tracks);
uint8_t trackTypes = 0;
int32_t audioTracks = 0;
int32_t videoTracks = 0;
for (auto& track : tracks) {
if (track->Ended()) {
continue;
}
ConnectMediaStreamTrack(*track);
if (track->AsAudioStreamTrack()) {
++audioTracks;
trackTypes |= ContainerWriter::CREATE_AUDIO_TRACK;
} else if (track->AsVideoStreamTrack()) {
++videoTracks;
trackTypes |= ContainerWriter::CREATE_VIDEO_TRACK;
} else {
MOZ_CRASH("Unexpected track type");
}
}
if (trackTypes == 0) {
MOZ_ASSERT(audioTracks == 0);
MOZ_ASSERT(videoTracks == 0);
return;
}
mMediaStreamReady = true;
if (audioTracks > 1 || videoTracks > 1) {
// When MediaRecorder supports multiple tracks, we should set up a single
// MediaInputPort from the input stream, and let main thread check
// track principals async later.
nsPIDOMWindowInner* window = mRecorder->GetOwner();
Document* document = window ? window->GetExtantDoc() : nullptr;
nsContentUtils::ReportToConsole(nsIScriptError::errorFlag,
NS_LITERAL_CSTRING("Media"), document,
nsContentUtils::eDOM_PROPERTIES,
"MediaRecorderMultiTracksNotSupported");
DoSessionEndTask(NS_ERROR_ABORT);
return;
}
// Check that we may access the tracks' content.
if (!MediaStreamTracksPrincipalSubsumes()) {
LOG(LogLevel::Warning, ("Session.MediaTracksReady MediaStreamTracks "
"principal check failed"));
DoSessionEndTask(NS_ERROR_DOM_SECURITY_ERR);
return;
}
LOG(LogLevel::Debug,
("Session.MediaTracksReady track type = (%d)", trackTypes));
InitEncoder(trackTypes, mMediaStreamTracks[0]->Graph()->GraphRate());
}
void ConnectMediaStreamTrack(MediaStreamTrack& aTrack) {
for (auto& track : mMediaStreamTracks) {
if (track->AsAudioStreamTrack() && aTrack.AsAudioStreamTrack()) {
// We only allow one audio track. See bug 1276928.
return;
}
if (track->AsVideoStreamTrack() && aTrack.AsVideoStreamTrack()) {
// We only allow one video track. See bug 1276928.
return;
}
}
mMediaStreamTracks.AppendElement(&aTrack);
aTrack.AddPrincipalChangeObserver(this);
}
bool PrincipalSubsumes(nsIPrincipal* aPrincipal) {
if (!mRecorder->GetOwner()) return false;
nsCOMPtr<Document> doc = mRecorder->GetOwner()->GetExtantDoc();
if (!doc) {
return false;
}
if (!aPrincipal) {
return false;
}
bool subsumes;
if (NS_FAILED(doc->NodePrincipal()->Subsumes(aPrincipal, &subsumes))) {
return false;
}
return subsumes;
}
bool MediaStreamTracksPrincipalSubsumes() {
MOZ_ASSERT(mRecorder->mDOMStream);
nsCOMPtr<nsIPrincipal> principal = nullptr;
for (RefPtr<MediaStreamTrack>& track : mMediaStreamTracks) {
nsContentUtils::CombineResourcePrincipals(&principal,
track->GetPrincipal());
}
return PrincipalSubsumes(principal);
}
bool AudioNodePrincipalSubsumes() {
MOZ_ASSERT(mRecorder->mAudioNode);
Document* doc = mRecorder->mAudioNode->GetOwner()
? mRecorder->mAudioNode->GetOwner()->GetExtantDoc()
: nullptr;
nsCOMPtr<nsIPrincipal> principal = doc ? doc->NodePrincipal() : nullptr;
return PrincipalSubsumes(principal);
}
void InitEncoder(uint8_t aTrackTypes, TrackRate aTrackRate) {
LOG(LogLevel::Debug, ("Session.InitEncoder %p", this));
MOZ_ASSERT(NS_IsMainThread());
@ -835,40 +805,9 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
gSessions.PutEntry(this);
uint32_t audioBitrate = mRecorder->AudioBitsPerSecond();
uint32_t videoBitrate = mRecorder->VideoBitsPerSecond();
uint32_t bitrate = mRecorder->BitsPerSecond();
if (bitrate > 0) {
// There's a total cap set. We have to make sure the type-specific limits
// are within range.
if ((aTrackTypes & ContainerWriter::CREATE_AUDIO_TRACK) &&
(aTrackTypes & ContainerWriter::CREATE_VIDEO_TRACK) &&
audioBitrate + videoBitrate > bitrate) {
LOG(LogLevel::Info, ("Session.InitEncoder Bitrates higher than total "
"cap. Recalculating."));
double factor =
bitrate / static_cast<double>(audioBitrate + videoBitrate);
audioBitrate = static_cast<uint32_t>(audioBitrate * factor);
videoBitrate = static_cast<uint32_t>(videoBitrate * factor);
} else if ((aTrackTypes & ContainerWriter::CREATE_AUDIO_TRACK) &&
!(aTrackTypes & ContainerWriter::CREATE_VIDEO_TRACK)) {
audioBitrate = std::min(audioBitrate, bitrate);
videoBitrate = 0;
} else if (!(aTrackTypes & ContainerWriter::CREATE_AUDIO_TRACK) &&
(aTrackTypes & ContainerWriter::CREATE_VIDEO_TRACK)) {
audioBitrate = 0;
videoBitrate = std::min(videoBitrate, bitrate);
}
MOZ_ASSERT(audioBitrate + videoBitrate <= bitrate);
}
// Allocate encoder and bind with union stream.
// At this stage, the API doesn't allow UA to choose the output mimeType
// format.
mEncoder =
MediaEncoder::CreateEncoder(mEncoderThread, mMimeType, audioBitrate,
videoBitrate, aTrackTypes, aTrackRate);
mEncoder = MediaEncoder::CreateEncoder(
mEncoderThread, mMimeType, mAudioBitsPerSecond, mVideoBitsPerSecond,
aTrackTypes, aTrackRate);
if (!mEncoder) {
LOG(LogLevel::Error, ("Session.InitEncoder !mEncoder %p", this));
@ -889,13 +828,16 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
mRecorder->mAudioNodeOutput);
}
for (auto& track : mMediaStreamTracks) {
for (const auto& track : mMediaStreamTracks) {
mEncoder->ConnectMediaStreamTrack(track);
}
// If user defines timeslice interval for video blobs we have to set
// appropriate video keyframe interval defined in milliseconds.
mEncoder->SetVideoKeyFrameInterval(mTimeSlice);
// If a timeslice is defined we set an appropriate video keyframe interval.
// This allows users to get blobs regularly when the timeslice interval is
// shorter than the default key frame interval, as we'd normally wait for a
// key frame before sending data to the blob.
mEncoder->SetVideoKeyFrameInterval(
std::max(TimeDuration::FromSeconds(1), mTimeslice).ToMilliseconds());
// Set mRunningState to Running so that DoSessionEndTask will
// take the responsibility to end the session.
@ -942,7 +884,7 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
if (mRecorder->mSessions.LastElement() == this) {
// Set state to inactive, but only if the recorder is not
// controlled by another session already.
mRecorder->ForceInactive();
mRecorder->Inactivate();
}
if (needsStartEvent) {
@ -1144,9 +1086,6 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
// Stream currently recorded.
RefPtr<DOMMediaStream> mMediaStream;
// True after we have decided on the track set to use for the recording.
bool mMediaStreamReady;
// Tracks currently recorded. This should be a subset of mMediaStream's track
// set.
nsTArray<RefPtr<MediaStreamTrack>> mMediaStreamTracks;
@ -1174,7 +1113,11 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
TimeStamp mLastBlobTimeStamp;
// The interval of passing encoded data from MutableBlobStorage to
// onDataAvailable handler.
const uint32_t mTimeSlice;
const TimeDuration mTimeslice;
// The video bitrate the recorder was configured with.
const uint32_t mVideoBitsPerSecond;
// The audio bitrate the recorder was configured with.
const uint32_t mAudioBitsPerSecond;
// The time this session started, for telemetry.
const TimeStamp mStartTime;
// The session's current main thread state. The error type gets set when
@ -1188,32 +1131,9 @@ MediaRecorder::~MediaRecorder() {
UnRegisterActivityObserver();
}
MediaRecorder::MediaRecorder(DOMMediaStream& aSourceMediaTrack,
nsPIDOMWindowInner* aOwnerWindow)
: DOMEventTargetHelper(aOwnerWindow),
mAudioNodeOutput(0),
mState(RecordingState::Inactive),
mAudioBitsPerSecond(0),
mVideoBitsPerSecond(0),
mBitsPerSecond(0) {
MediaRecorder::MediaRecorder(nsPIDOMWindowInner* aOwnerWindow)
: DOMEventTargetHelper(aOwnerWindow) {
MOZ_ASSERT(aOwnerWindow);
mDOMStream = &aSourceMediaTrack;
RegisterActivityObserver();
}
MediaRecorder::MediaRecorder(AudioNode& aSrcAudioNode, uint32_t aSrcOutput,
nsPIDOMWindowInner* aOwnerWindow)
: DOMEventTargetHelper(aOwnerWindow),
mAudioNodeOutput(aSrcOutput),
mState(RecordingState::Inactive),
mAudioBitsPerSecond(0),
mVideoBitsPerSecond(0),
mBitsPerSecond(0) {
MOZ_ASSERT(aOwnerWindow);
mAudioNode = &aSrcAudioNode;
RegisterActivityObserver();
}
@ -1240,42 +1160,133 @@ void MediaRecorder::SetMimeType(const nsString& aMimeType) {
void MediaRecorder::GetMimeType(nsString& aMimeType) { aMimeType = mMimeType; }
void MediaRecorder::Start(const Optional<uint32_t>& aTimeSlice,
void MediaRecorder::Start(const Optional<uint32_t>& aTimeslice,
ErrorResult& aResult) {
LOG(LogLevel::Debug, ("MediaRecorder.Start %p", this));
InitializeDomExceptions();
// When a MediaRecorder objects start() method is invoked, the UA MUST run
// the following steps:
// 1. Let recorder be the MediaRecorder object on which the method was
// invoked.
// 2. Let timeslice be the methods first argument, if provided, or undefined.
TimeDuration timeslice =
aTimeslice.WasPassed()
? TimeDuration::FromMilliseconds(aTimeslice.Value())
: TimeDuration::Forever();
// 3. Let stream be the value of recorders stream attribute.
// 4. Let tracks be the set of live tracks in streams track set.
nsTArray<RefPtr<MediaStreamTrack>> tracks;
if (mStream) {
mStream->GetTracks(tracks);
}
for (const auto& t : nsTArray<RefPtr<MediaStreamTrack>>(tracks)) {
if (t->Ended()) {
tracks.RemoveElement(t);
}
}
// 5. If the value of recorders state attribute is not inactive, throw an
// InvalidStateError DOMException and abort these steps.
if (mState != RecordingState::Inactive) {
aResult.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
return;
}
nsTArray<RefPtr<MediaStreamTrack>> tracks;
if (mDOMStream) {
mDOMStream->GetTracks(tracks);
}
if (!tracks.IsEmpty()) {
// If there are tracks already available that we're not allowed
// to record, we should throw a security error.
RefPtr<nsIPrincipal> streamPrincipal = mDOMStream->GetPrincipal();
bool subsumes = false;
nsPIDOMWindowInner* window;
Document* doc;
if (!(window = GetOwner()) || !(doc = window->GetExtantDoc()) ||
NS_FAILED(doc->NodePrincipal()->Subsumes(streamPrincipal, &subsumes)) ||
!subsumes) {
// 6. If the isolation properties of stream disallow access from recorder,
// throw a SecurityError DOMException and abort these steps.
if (mStream) {
RefPtr<nsIPrincipal> streamPrincipal = mStream->GetPrincipal();
if (!PrincipalSubsumes(this, streamPrincipal)) {
aResult.Throw(NS_ERROR_DOM_SECURITY_ERR);
return;
}
}
if (mAudioNode && !AudioNodePrincipalSubsumes(this, mAudioNode)) {
LOG(LogLevel::Warning,
("MediaRecorder %p Start AudioNode principal check failed", this));
aResult.Throw(NS_ERROR_DOM_SECURITY_ERR);
return;
}
uint32_t timeSlice = aTimeSlice.WasPassed() ? aTimeSlice.Value() : 0;
MediaRecorderReporter::AddMediaRecorder(this);
// 7. If stream is inactive, throw a NotSupportedError DOMException and abort
// these steps.
if (mStream && !mStream->Active()) {
aResult.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
return;
}
// --- these are not yet implemented to spec
// 8. If the [[ConstrainedMimeType]] slot specifies a media type, container,
// or codec, then run the following sub steps:
// 1. Constrain the configuration of recorder to the media type, container,
// and codec specified in the [[ConstrainedMimeType]] slot.
// 2. For each track in tracks, if the User Agent cannot record the track
// using the current configuration, then throw a NotSupportedError
// DOMException and abort all steps.
// 9. Let extendedMimeType be the value of recorders [[ConstrainedMimeType]]
// slot.
// 10. Modify extendedMimeType by adding media type, subtype and codecs
// parameter reflecting the configuration used by the MediaRecorder to
// record all tracks in tracks, if not already present. This MAY include
// the profiles parameter [RFC6381] or further codec-specific parameters.
// 11. Set recorders mimeType attribute to extendedMimeType.
// ---
// 12. If recorders [[ConstrainedBitsPerSecond]] slot is not undefined, set
// recorders videoBitsPerSecond and audioBitsPerSecond attributes to
// values the User Agent deems reasonable for the respective media types,
// for recording all tracks in tracks, such that the sum of
// videoBitsPerSecond and audioBitsPerSecond is close to the value of
// recorders
// [[ConstrainedBitsPerSecond]] slot.
uint8_t numVideoTracks = 0;
uint8_t numAudioTracks = 0;
for (const auto& t : tracks) {
if (t->AsVideoStreamTrack() && numVideoTracks < UINT8_MAX) {
++numVideoTracks;
} else if (t->AsAudioStreamTrack() && numAudioTracks < UINT8_MAX) {
++numAudioTracks;
}
}
if (mAudioNode) {
MOZ_DIAGNOSTIC_ASSERT(!mStream);
++numAudioTracks;
}
if (mConstrainedBitsPerSecond) {
SelectBitrates(*mConstrainedBitsPerSecond, numVideoTracks,
&mVideoBitsPerSecond, numAudioTracks, &mAudioBitsPerSecond);
}
// 13. Let videoBitrate be the value of recorders videoBitsPerSecond
// attribute, and constrain the configuration of recorder to target an
// aggregate bitrate of videoBitrate bits per second for all video tracks
// recorder will be recording. videoBitrate is a hint for the encoder and
// the value might be surpassed, not achieved, or only be achieved over a
// long period of time.
const uint32_t videoBitrate = mVideoBitsPerSecond;
// 14. Let audioBitrate be the value of recorders audioBitsPerSecond
// attribute, and constrain the configuration of recorder to target an
// aggregate bitrate of audioBitrate bits per second for all audio tracks
// recorder will be recording. audioBitrate is a hint for the encoder and
// the value might be surpassed, not achieved, or only be achieved over a
// long period of time.
const uint32_t audioBitrate = mAudioBitsPerSecond;
// 15. Set recorders state to recording
mState = RecordingState::Recording;
MediaRecorderReporter::AddMediaRecorder(this);
// Start a session.
mSessions.AppendElement();
mSessions.LastElement() = new Session(this, timeSlice);
mSessions.LastElement() = new Session(this, std::move(tracks), timeslice,
videoBitrate, audioBitrate);
mSessions.LastElement()->Start();
}
@ -1285,7 +1296,7 @@ void MediaRecorder::Stop(ErrorResult& aResult) {
if (mState == RecordingState::Inactive) {
return;
}
mState = RecordingState::Inactive;
Inactivate();
MOZ_ASSERT(mSessions.Length() > 0);
mSessions.LastElement()->Stop();
}
@ -1349,7 +1360,7 @@ JSObject* MediaRecorder::WrapObject(JSContext* aCx,
/* static */
already_AddRefed<MediaRecorder> MediaRecorder::Constructor(
const GlobalObject& aGlobal, DOMMediaStream& aStream,
const MediaRecorderOptions& aInitDict, ErrorResult& aRv) {
const MediaRecorderOptions& aOptions, ErrorResult& aRv) {
nsCOMPtr<nsPIDOMWindowInner> ownerWindow =
do_QueryInterface(aGlobal.GetAsSupports());
if (!ownerWindow) {
@ -1357,20 +1368,75 @@ already_AddRefed<MediaRecorder> MediaRecorder::Constructor(
return nullptr;
}
if (!IsTypeSupported(aInitDict.mMimeType)) {
// When the MediaRecorder() constructor is invoked, the User Agent MUST run
// the following steps:
// 1. Let stream be the constructors first argument.
// 2. Let options be the constructors second argument.
// 3. If invoking is type supported with options mimeType member as its
// argument returns false, throw a NotSupportedError DOMException and abort
// these steps.
if (!IsTypeSupported(aOptions.mMimeType)) {
aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
return nullptr;
}
RefPtr<MediaRecorder> object = new MediaRecorder(aStream, ownerWindow);
object->SetOptions(aInitDict);
return object.forget();
// 4. Let recorder be a newly constructed MediaRecorder object.
RefPtr<MediaRecorder> recorder = new MediaRecorder(ownerWindow);
// MimeType handling (points 5 and 8) not yet implemented to spec.
recorder->SetMimeType(aOptions.mMimeType);
// 6. Let recorder have a [[ConstrainedBitsPerSecond]] internal slot,
// initialized to the value of options bitsPerSecond member, if it is
// present, otherwise undefined.
recorder->mConstrainedBitsPerSecond =
aOptions.mBitsPerSecond.WasPassed()
? Some(aOptions.mBitsPerSecond.Value())
: Nothing();
// 7. Initialize recorders stream attribute to stream.
recorder->mStream = &aStream;
// 9. Initialize recorders state attribute to inactive.
recorder->mState = RecordingState::Inactive;
// 10. Initialize recorders videoBitsPerSecond attribute to the value of
// options videoBitsPerSecond member, if it is present. Otherwise, choose
// a target value the User Agent deems reasonable for video.
recorder->mVideoBitsPerSecond = aOptions.mVideoBitsPerSecond.WasPassed()
? aOptions.mVideoBitsPerSecond.Value()
: DEFAULT_VIDEO_BITRATE_BPS;
// 11. Initialize recorders audioBitsPerSecond attribute to the value of
// options audioBitsPerSecond member, if it is present. Otherwise, choose
// a target value the User Agent deems reasonable for audio.
recorder->mAudioBitsPerSecond = aOptions.mAudioBitsPerSecond.WasPassed()
? aOptions.mAudioBitsPerSecond.Value()
: DEFAULT_AUDIO_BITRATE_BPS;
// 12. If recorders [[ConstrainedBitsPerSecond]] slot is not undefined, set
// recorders videoBitsPerSecond and audioBitsPerSecond attributes to
// values the User Agent deems reasonable for the respective media types,
// such that the sum of videoBitsPerSecond and audioBitsPerSecond is close
// to the value of recorders [[ConstrainedBitsPerSecond]] slot.
if (recorder->mConstrainedBitsPerSecond) {
SelectBitrates(*recorder->mConstrainedBitsPerSecond, 1,
&recorder->mVideoBitsPerSecond, 1,
&recorder->mAudioBitsPerSecond);
}
// 13. Return recorder.
return recorder.forget();
}
/* static */
already_AddRefed<MediaRecorder> MediaRecorder::Constructor(
const GlobalObject& aGlobal, AudioNode& aSrcAudioNode, uint32_t aSrcOutput,
const MediaRecorderOptions& aInitDict, ErrorResult& aRv) {
const GlobalObject& aGlobal, AudioNode& aAudioNode,
uint32_t aAudioNodeOutput, const MediaRecorderOptions& aOptions,
ErrorResult& aRv) {
// Allow recording from audio node only when pref is on.
if (!Preferences::GetBool("media.recorder.audio_node.enabled", false)) {
// Pretending that this constructor is not defined.
@ -1387,47 +1453,80 @@ already_AddRefed<MediaRecorder> MediaRecorder::Constructor(
return nullptr;
}
// aSrcOutput doesn't matter to destination node because it has no output.
if (aSrcAudioNode.NumberOfOutputs() > 0 &&
aSrcOutput >= aSrcAudioNode.NumberOfOutputs()) {
// aAudioNodeOutput doesn't matter to destination node because it has no
// output.
if (aAudioNode.NumberOfOutputs() > 0 &&
aAudioNodeOutput >= aAudioNode.NumberOfOutputs()) {
aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
return nullptr;
}
if (!IsTypeSupported(aInitDict.mMimeType)) {
// When the MediaRecorder() constructor is invoked, the User Agent MUST run
// the following steps:
// 1. Let stream be the constructors first argument. (we'll let audioNode be
// the first arg, and audioNodeOutput the second)
// 2. Let options be the constructors second argument. (we'll let options be
// the third arg)
// 3. If invoking is type supported with options mimeType member as its
// argument returns false, throw a NotSupportedError DOMException and abort
// these steps.
if (!IsTypeSupported(aOptions.mMimeType)) {
aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
return nullptr;
}
RefPtr<MediaRecorder> object =
new MediaRecorder(aSrcAudioNode, aSrcOutput, ownerWindow);
object->SetOptions(aInitDict);
return object.forget();
}
// 4. Let recorder be a newly constructed MediaRecorder object.
RefPtr<MediaRecorder> recorder = new MediaRecorder(ownerWindow);
void MediaRecorder::SetOptions(const MediaRecorderOptions& aInitDict) {
SetMimeType(aInitDict.mMimeType);
mAudioBitsPerSecond = aInitDict.mAudioBitsPerSecond.WasPassed()
? aInitDict.mAudioBitsPerSecond.Value()
: 0;
mVideoBitsPerSecond = aInitDict.mVideoBitsPerSecond.WasPassed()
? aInitDict.mVideoBitsPerSecond.Value()
: 0;
mBitsPerSecond = aInitDict.mBitsPerSecond.WasPassed()
? aInitDict.mBitsPerSecond.Value()
: 0;
// We're not handling dynamic changes yet. Eventually we'll handle
// setting audio, video and/or total -- and anything that isn't set,
// we'll derive. Calculated versions require querying bitrates after
// the encoder is Init()ed. This happens only after data is
// available and thus requires dynamic changes.
//
// Until dynamic changes are supported, I prefer to be safe and err
// slightly high
if (aInitDict.mBitsPerSecond.WasPassed() &&
!aInitDict.mVideoBitsPerSecond.WasPassed()) {
mVideoBitsPerSecond = mBitsPerSecond;
// MimeType handling (points 5 and 8) not yet implemented to spec.
recorder->SetMimeType(aOptions.mMimeType);
// 6. Let recorder have a [[ConstrainedBitsPerSecond]] internal slot,
// initialized to the value of options bitsPerSecond member, if it is
// present, otherwise undefined.
recorder->mConstrainedBitsPerSecond =
aOptions.mBitsPerSecond.WasPassed()
? Some(aOptions.mBitsPerSecond.Value())
: Nothing();
// 7. Initialize recorders stream attribute to stream. (make that the
// audioNode and audioNodeOutput equivalents)
recorder->mAudioNode = &aAudioNode;
recorder->mAudioNodeOutput = aAudioNodeOutput;
// 9. Initialize recorders state attribute to inactive.
recorder->mState = RecordingState::Inactive;
// 10. Initialize recorders videoBitsPerSecond attribute to the value of
// options videoBitsPerSecond member, if it is present. Otherwise, choose
// a target value the User Agent deems reasonable for video.
recorder->mVideoBitsPerSecond = aOptions.mVideoBitsPerSecond.WasPassed()
? aOptions.mVideoBitsPerSecond.Value()
: DEFAULT_VIDEO_BITRATE_BPS;
// 11. Initialize recorders audioBitsPerSecond attribute to the value of
// options audioBitsPerSecond member, if it is present. Otherwise, choose
// a target value the User Agent deems reasonable for audio.
recorder->mAudioBitsPerSecond = aOptions.mAudioBitsPerSecond.WasPassed()
? aOptions.mAudioBitsPerSecond.Value()
: DEFAULT_AUDIO_BITRATE_BPS;
// 12. If recorders [[ConstrainedBitsPerSecond]] slot is not undefined, set
// recorders videoBitsPerSecond and audioBitsPerSecond attributes to
// values the User Agent deems reasonable for the respective media types,
// such that the sum of videoBitsPerSecond and audioBitsPerSecond is close
// to the value of recorders [[ConstrainedBitsPerSecond]] slot.
if (recorder->mConstrainedBitsPerSecond) {
SelectBitrates(*recorder->mConstrainedBitsPerSecond, 1,
&recorder->mVideoBitsPerSecond, 1,
&recorder->mAudioBitsPerSecond);
}
// 13. Return recorder.
return recorder.forget();
}
static char const* const gWebMVideoEncoderCodecs[4] = {
@ -1616,9 +1715,27 @@ void MediaRecorder::NotifyOwnerDocumentActivityChanged() {
}
}
void MediaRecorder::ForceInactive() {
LOG(LogLevel::Debug, ("MediaRecorder.ForceInactive %p", this));
void MediaRecorder::Inactivate() {
LOG(LogLevel::Debug, ("MediaRecorder.Inactivate %p", this));
// The Inactivate the recorder algorithm given a recorder, is as follows:
// --- Not implemented
// 1. Set recorders mimeType attribute to the value of the
// [[ConstrainedMimeType]] slot.
// ---
// 2. Set recorders state attribute to inactive.
mState = RecordingState::Inactive;
// 3. If recorders [[ConstrainedBitsPerSecond]] slot is not undefined, set
// recorders videoBitsPerSecond and audioBitsPerSecond attributes to
// values the User Agent deems reasonable for the respective media types,
// such that the sum of videoBitsPerSecond and audioBitsPerSecond is close
// to the value of recorders [[ConstrainedBitsPerSecond]] slot.
if (mConstrainedBitsPerSecond) {
SelectBitrates(*mConstrainedBitsPerSecond, 1, &mVideoBitsPerSecond, 1,
&mAudioBitsPerSecond);
}
}
void MediaRecorder::InitializeDomExceptions() {

Просмотреть файл

@ -49,10 +49,7 @@ class MediaRecorder final : public DOMEventTargetHelper,
public:
class Session;
MediaRecorder(DOMMediaStream& aSourceMediaTrack,
nsPIDOMWindowInner* aOwnerWindow);
MediaRecorder(AudioNode& aSrcAudioNode, uint32_t aSrcOutput,
nsPIDOMWindowInner* aOwnerWindow);
explicit MediaRecorder(nsPIDOMWindowInner* aOwnerWindow);
static nsTArray<RefPtr<Session>> GetSessions();
@ -79,7 +76,7 @@ class MediaRecorder final : public DOMEventTargetHelper,
// Extract encoded data Blob from MutableBlobStorage.
void RequestData(ErrorResult& aResult);
// Return the The DOMMediaStream passed from UA.
DOMMediaStream* Stream() const { return mDOMStream; }
DOMMediaStream* Stream() const { return mStream; }
// Return the current encoding MIME type selected by the MediaEncoder.
void GetMimeType(nsString& aMimeType);
// The current state of the MediaRecorder object.
@ -91,11 +88,11 @@ class MediaRecorder final : public DOMEventTargetHelper,
// Construct a recorder with a DOM media stream object as its source.
static already_AddRefed<MediaRecorder> Constructor(
const GlobalObject& aGlobal, DOMMediaStream& aStream,
const MediaRecorderOptions& aInitDict, ErrorResult& aRv);
const MediaRecorderOptions& aOptions, ErrorResult& aRv);
// Construct a recorder with a Web Audio destination node as its source.
static already_AddRefed<MediaRecorder> Constructor(
const GlobalObject& aGlobal, AudioNode& aSrcAudioNode,
uint32_t aSrcOutput, const MediaRecorderOptions& aInitDict,
const GlobalObject& aGlobal, AudioNode& aAudioNode,
uint32_t aAudioNodeOutput, const MediaRecorderOptions& aOptions,
ErrorResult& aRv);
/*
@ -115,9 +112,8 @@ class MediaRecorder final : public DOMEventTargetHelper,
NS_DECL_NSIDOCUMENTACTIVITY
uint32_t AudioBitsPerSecond() { return mAudioBitsPerSecond; }
uint32_t VideoBitsPerSecond() { return mVideoBitsPerSecond; }
uint32_t BitsPerSecond() { return mBitsPerSecond; }
uint32_t AudioBitsPerSecond() const { return mAudioBitsPerSecond; }
uint32_t VideoBitsPerSecond() const { return mVideoBitsPerSecond; }
protected:
virtual ~MediaRecorder();
@ -131,7 +127,6 @@ class MediaRecorder final : public DOMEventTargetHelper,
void NotifyError(nsresult aRv);
// Set encoded MIME type.
void SetMimeType(const nsString& aMimeType);
void SetOptions(const MediaRecorderOptions& aInitDict);
MediaRecorder(const MediaRecorder& x) = delete; // prevent bad usage
// Remove session pointer.
@ -142,23 +137,21 @@ class MediaRecorder final : public DOMEventTargetHelper,
// available at the time the error event is fired. Note, depending on when
// this is called there may not be a JS stack to capture.
void InitializeDomExceptions();
// Set the recorder state to inactive. This is needed to handle error states
// in the recorder where state must transition to inactive before full
// stoppage can be reached.
void ForceInactive();
// Runs the "Inactivate the recorder" algorithm.
void Inactivate();
// Stop the recorder and its internal session. This should be used by
// sessions that are in the process of being destroyed.
void StopForSessionDestruction();
// DOM wrapper for source media stream. Will be null when input is audio node.
RefPtr<DOMMediaStream> mDOMStream;
RefPtr<DOMMediaStream> mStream;
// Source audio node. Will be null when input is a media stream.
RefPtr<AudioNode> mAudioNode;
// Source audio node's output index. Will be zero when input is a media
// stream.
const uint32_t mAudioNodeOutput;
uint32_t mAudioNodeOutput = 0;
// The current state of the MediaRecorder object.
RecordingState mState;
RecordingState mState = RecordingState::Inactive;
// Hold the sessions reference and clean it when the DestroyRunnable for a
// session is running.
nsTArray<RefPtr<Session>> mSessions;
@ -169,9 +162,9 @@ class MediaRecorder final : public DOMEventTargetHelper,
// formats.
nsString mMimeType;
uint32_t mAudioBitsPerSecond;
uint32_t mVideoBitsPerSecond;
uint32_t mBitsPerSecond;
uint32_t mAudioBitsPerSecond = 0;
uint32_t mVideoBitsPerSecond = 0;
Maybe<uint32_t> mConstrainedBitsPerSecond;
// DOMExceptions that are created early and possibly thrown in NotifyError.
// Creating them early allows us to capture the JS stack for which cannot be