зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1908245 - Add support for missing requestVideoFrameCallback parameters. r=media-playback-reviewers,webidl,padenot,saschanaz
This patch adds support for exposing the capture time, receive time, processing duration and RTP timestamp parameters on the callback for rVFC for WebRTC related elements. It also improves the accuracy of the media time paramater for normal playback. Differential Revision: https://phabricator.services.mozilla.com/D221671
This commit is contained in:
Родитель
f7fd4872b8
Коммит
29f6d0c8fe
|
@ -9,6 +9,9 @@
|
|||
#include "mozilla/AppShutdown.h"
|
||||
#include "mozilla/AsyncEventDispatcher.h"
|
||||
#include "mozilla/dom/HTMLVideoElementBinding.h"
|
||||
#ifdef MOZ_WEBRTC
|
||||
# include "mozilla/dom/RTCStatsReport.h"
|
||||
#endif
|
||||
#include "nsGenericHTMLElement.h"
|
||||
#include "nsGkAtoms.h"
|
||||
#include "nsSize.h"
|
||||
|
@ -712,29 +715,25 @@ void HTMLVideoElement::TakeVideoFrameRequestCallbacks(
|
|||
return;
|
||||
}
|
||||
|
||||
gfx::IntSize frameSize;
|
||||
ImageContainer::FrameID frameID = layers::kContainerFrameID_Invalid;
|
||||
bool composited = false;
|
||||
|
||||
// We are guaranteed that the images are in timestamp order. It is possible we
|
||||
// are already behind if the compositor notifications have not been processed
|
||||
// yet, so as per the standard, this is a best effort attempt at synchronizing
|
||||
// with the state of the GPU process.
|
||||
const ImageContainer::OwningImage* selected = nullptr;
|
||||
bool composited = false;
|
||||
for (const auto& image : images) {
|
||||
if (image.mTimeStamp <= aNowTime) {
|
||||
// Image should already have been composited. Because we might not be in
|
||||
// the display list, we cannot rely upon its mComposited status, and
|
||||
// should just assume it has indeed been composited.
|
||||
frameSize = image.mImage->GetSize();
|
||||
frameID = image.mFrameID;
|
||||
selected = ℑ
|
||||
composited = true;
|
||||
} else if (!aNextTickTime || image.mTimeStamp <= aNextTickTime.ref()) {
|
||||
// Image should be the next to be composited. mComposited will be false
|
||||
// if the compositor hasn't rendered the frame yet or notified us of the
|
||||
// render yet, but it is in progress. If it is true, then we know the
|
||||
// next vsync will display the frame.
|
||||
frameSize = image.mImage->GetSize();
|
||||
frameID = image.mFrameID;
|
||||
selected = ℑ
|
||||
composited = false;
|
||||
} else {
|
||||
// Image is for a future composition.
|
||||
|
@ -744,14 +743,15 @@ void HTMLVideoElement::TakeVideoFrameRequestCallbacks(
|
|||
|
||||
// If all of the available images are for future compositions, we must have
|
||||
// fired too early. Wait for the next invalidation.
|
||||
if (frameID == layers::kContainerFrameID_Invalid ||
|
||||
frameID == mLastPresentedFrameID) {
|
||||
if (!selected || selected->mFrameID == layers::kContainerFrameID_Invalid ||
|
||||
selected->mFrameID == mLastPresentedFrameID) {
|
||||
return;
|
||||
}
|
||||
|
||||
// If we have got a dummy frame, then we must have suspended decoding and have
|
||||
// no actual frame to present. This should only happen if we raced on
|
||||
// requesting a callback, and the media state machine advancing.
|
||||
gfx::IntSize frameSize = selected->mImage->GetSize();
|
||||
if (NS_WARN_IF(frameSize.IsEmpty())) {
|
||||
return;
|
||||
}
|
||||
|
@ -767,20 +767,78 @@ void HTMLVideoElement::TakeVideoFrameRequestCallbacks(
|
|||
|
||||
aMd.mWidth = frameSize.width;
|
||||
aMd.mHeight = frameSize.height;
|
||||
aMd.mMediaTime = CurrentTime();
|
||||
|
||||
// If we were not provided a valid media time, then we need to estimate based
|
||||
// on the CurrentTime from the element.
|
||||
aMd.mMediaTime = selected->mMediaTime.IsValid()
|
||||
? selected->mMediaTime.ToSeconds()
|
||||
: CurrentTime();
|
||||
|
||||
// If we have a processing duration, we need to round it.
|
||||
//
|
||||
// https://wicg.github.io/video-rvfc/#security-and-privacy
|
||||
//
|
||||
// 5. Security and Privacy Considerations.
|
||||
// ... processingDuration exposes some under-the-hood performance information
|
||||
// about the video pipeline ... We therefore propose a resolution of 100μs,
|
||||
// which is still useful for automated quality analysis, but doesn’t offer any
|
||||
// new sources of high resolution information.
|
||||
if (selected->mProcessingDuration.IsValid()) {
|
||||
aMd.mProcessingDuration.Construct(
|
||||
selected->mProcessingDuration.ToBase(10000).ToSeconds());
|
||||
}
|
||||
|
||||
#ifdef MOZ_WEBRTC
|
||||
// If given, this is the RTP timestamp from the last packet for the frame.
|
||||
if (selected->mRtpTimestamp) {
|
||||
aMd.mRtpTimestamp.Construct(*selected->mRtpTimestamp);
|
||||
}
|
||||
|
||||
// For remote sources, the capture and receive time are represented as WebRTC
|
||||
// timestamps relative to an origin that is specific to the WebRTC session.
|
||||
bool hasCaptureTimeNtp = selected->mWebrtcCaptureTime.is<int64_t>();
|
||||
bool hasReceiveTimeReal = selected->mWebrtcReceiveTime.isSome();
|
||||
if (hasCaptureTimeNtp || hasReceiveTimeReal) {
|
||||
if (const auto* timestampMaker =
|
||||
mSelectedVideoStreamTrack->GetTimestampMaker()) {
|
||||
if (hasCaptureTimeNtp) {
|
||||
aMd.mCaptureTime.Construct(
|
||||
RTCStatsTimestamp::FromNtp(
|
||||
*timestampMaker,
|
||||
webrtc::Timestamp::Micros(
|
||||
selected->mWebrtcCaptureTime.as<int64_t>()))
|
||||
.ToDom());
|
||||
}
|
||||
if (hasReceiveTimeReal) {
|
||||
aMd.mReceiveTime.Construct(
|
||||
RTCStatsTimestamp::FromRealtime(
|
||||
*timestampMaker,
|
||||
webrtc::Timestamp::Micros(*selected->mWebrtcReceiveTime))
|
||||
.ToDom());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise, the capture time may be a high resolution timestamp from the
|
||||
// camera pipeline indicating when the sample was captured.
|
||||
if (selected->mWebrtcCaptureTime.is<TimeStamp>()) {
|
||||
if (nsPIDOMWindowInner* win = OwnerDoc()->GetInnerWindow()) {
|
||||
if (Performance* perf = win->GetPerformance()) {
|
||||
aMd.mCaptureTime.Construct(perf->TimeStampToDOMHighResForRendering(
|
||||
selected->mWebrtcCaptureTime.as<TimeStamp>()));
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// Presented frames is a bit of a misnomer from a rendering perspective,
|
||||
// because we still need to advance regardless of composition. Video elements
|
||||
// that are outside of the DOM, or are not visible, still advance the video in
|
||||
// the background, and presumably the caller still needs some way to know how
|
||||
// many frames we have advanced.
|
||||
aMd.mPresentedFrames = frameID;
|
||||
aMd.mPresentedFrames = selected->mFrameID;
|
||||
|
||||
// TODO(Bug 1908246): We should set processingDuration.
|
||||
// TODO(Bug 1908245): We should set captureTime, receiveTime and rtpTimestamp
|
||||
// for WebRTC.
|
||||
|
||||
mLastPresentedFrameID = frameID;
|
||||
mLastPresentedFrameID = selected->mFrameID;
|
||||
mVideoFrameRequestManager.Take(aCallbacks);
|
||||
|
||||
NS_DispatchToMainThread(NewRunnableMethod(
|
||||
|
|
|
@ -237,6 +237,8 @@ LOCAL_INCLUDES += [
|
|||
"/layout/xul",
|
||||
"/netwerk/base",
|
||||
"/parser/htmlparser",
|
||||
"/third_party/libwebrtc",
|
||||
"/third_party/libwebrtc/third_party/abseil-cpp",
|
||||
]
|
||||
|
||||
FINAL_LIBRARY = "xul"
|
||||
|
|
|
@ -977,10 +977,12 @@ void ExternalEngineStateMachine::OnRequestVideo() {
|
|||
// Send image to PIP window.
|
||||
if (mSecondaryVideoContainer.Ref()) {
|
||||
mSecondaryVideoContainer.Ref()->SetCurrentFrame(
|
||||
mVideoDisplay, aVideo->mImage, TimeStamp::Now());
|
||||
mVideoDisplay, aVideo->mImage, TimeStamp::Now(),
|
||||
media::TimeUnit::Invalid(), aVideo->mTime);
|
||||
} else {
|
||||
mVideoFrameContainer->SetCurrentFrame(
|
||||
mVideoDisplay, aVideo->mImage, TimeStamp::Now());
|
||||
mVideoDisplay, aVideo->mImage, TimeStamp::Now(),
|
||||
media::TimeUnit::Invalid(), aVideo->mTime);
|
||||
}
|
||||
},
|
||||
[this, self](const MediaResult& aError) {
|
||||
|
|
|
@ -40,6 +40,7 @@ namespace dom {
|
|||
|
||||
class AudioStreamTrack;
|
||||
class VideoStreamTrack;
|
||||
class RTCStatsTimestampMaker;
|
||||
enum class CallerType : uint32_t;
|
||||
|
||||
/**
|
||||
|
@ -140,6 +141,14 @@ class MediaStreamTrackSource : public nsISupports {
|
|||
*/
|
||||
virtual const PeerIdentity* GetPeerIdentity() const { return nullptr; }
|
||||
|
||||
/**
|
||||
* This is used in WebRTC. The timestampMaker can convert between different
|
||||
* timestamp types used during the session.
|
||||
*/
|
||||
virtual const RTCStatsTimestampMaker* GetTimestampMaker() const {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/**
|
||||
* MediaStreamTrack::GetLabel (see spec) calls through to here.
|
||||
*/
|
||||
|
@ -459,6 +468,13 @@ class MediaStreamTrack : public DOMEventTargetHelper, public SupportsWeakPtr {
|
|||
return GetSource().GetPeerIdentity();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get this track's RTCStatsTimestampMaker.
|
||||
*/
|
||||
const RTCStatsTimestampMaker* GetTimestampMaker() const {
|
||||
return GetSource().GetTimestampMaker();
|
||||
}
|
||||
|
||||
ProcessedMediaTrack* GetTrack() const;
|
||||
MediaTrackGraph* Graph() const;
|
||||
MediaTrackGraphImpl* GraphImpl() const;
|
||||
|
|
|
@ -2932,10 +2932,7 @@ static void MoveToSegment(SourceMediaTrack* aTrack, MediaSegment* aIn,
|
|||
if (!last || last->mTimeStamp.IsNull()) {
|
||||
// This is the first frame, or the last frame pushed to `out` has been
|
||||
// all consumed. Just append and we deal with its duration later.
|
||||
out->AppendFrame(do_AddRef(c->mFrame.GetImage()),
|
||||
c->mFrame.GetIntrinsicSize(),
|
||||
c->mFrame.GetPrincipalHandle(),
|
||||
c->mFrame.GetForceBlack(), c->mTimeStamp);
|
||||
out->AppendFrame(*c);
|
||||
if (c->GetDuration() > 0) {
|
||||
out->ExtendLastFrameBy(c->GetDuration());
|
||||
}
|
||||
|
@ -2952,10 +2949,7 @@ static void MoveToSegment(SourceMediaTrack* aTrack, MediaSegment* aIn,
|
|||
}
|
||||
|
||||
// Append the current frame (will have duration 0).
|
||||
out->AppendFrame(do_AddRef(c->mFrame.GetImage()),
|
||||
c->mFrame.GetIntrinsicSize(),
|
||||
c->mFrame.GetPrincipalHandle(),
|
||||
c->mFrame.GetForceBlack(), c->mTimeStamp);
|
||||
out->AppendFrame(*c);
|
||||
if (c->GetDuration() > 0) {
|
||||
out->ExtendLastFrameBy(c->GetDuration());
|
||||
}
|
||||
|
@ -3139,10 +3133,7 @@ void SourceMediaTrack::AddDirectListenerImpl(
|
|||
continue;
|
||||
}
|
||||
++videoFrames;
|
||||
bufferedData.AppendFrame(do_AddRef(iter->mFrame.GetImage()),
|
||||
iter->mFrame.GetIntrinsicSize(),
|
||||
iter->mFrame.GetPrincipalHandle(),
|
||||
iter->mFrame.GetForceBlack(), iter->mTimeStamp);
|
||||
bufferedData.AppendFrame(*iter);
|
||||
}
|
||||
|
||||
VideoSegment& video = static_cast<VideoSegment&>(*mUpdateTrack->mData);
|
||||
|
@ -3150,10 +3141,7 @@ void SourceMediaTrack::AddDirectListenerImpl(
|
|||
iter.Next()) {
|
||||
++videoFrames;
|
||||
MOZ_ASSERT(!iter->mTimeStamp.IsNull());
|
||||
bufferedData.AppendFrame(do_AddRef(iter->mFrame.GetImage()),
|
||||
iter->mFrame.GetIntrinsicSize(),
|
||||
iter->mFrame.GetPrincipalHandle(),
|
||||
iter->mFrame.GetForceBlack(), iter->mTimeStamp);
|
||||
bufferedData.AppendFrame(*iter);
|
||||
}
|
||||
|
||||
LOG(LogLevel::Info,
|
||||
|
|
|
@ -25,9 +25,7 @@ void DirectMediaTrackListener::MirrorAndDisableSegment(
|
|||
VideoSegment& aFrom, VideoSegment& aTo, DisabledTrackMode aMode) {
|
||||
if (aMode == DisabledTrackMode::SILENCE_BLACK) {
|
||||
for (VideoSegment::ChunkIterator it(aFrom); !it.IsEnded(); it.Next()) {
|
||||
aTo.AppendFrame(do_AddRef(it->mFrame.GetImage()),
|
||||
it->mFrame.GetIntrinsicSize(), it->GetPrincipalHandle(),
|
||||
true);
|
||||
aTo.AppendFrame(*it, Some(true));
|
||||
aTo.ExtendLastFrameBy(it->GetDuration());
|
||||
}
|
||||
} else if (aMode == DisabledTrackMode::SILENCE_FREEZE) {
|
||||
|
|
|
@ -95,17 +95,18 @@ static void NotifySetCurrent(Image* aImage) {
|
|||
}
|
||||
#endif
|
||||
|
||||
void VideoFrameContainer::SetCurrentFrame(const gfx::IntSize& aIntrinsicSize,
|
||||
Image* aImage,
|
||||
const TimeStamp& aTargetTime) {
|
||||
void VideoFrameContainer::SetCurrentFrame(
|
||||
const gfx::IntSize& aIntrinsicSize, Image* aImage,
|
||||
const TimeStamp& aTargetTime, const media::TimeUnit& aProcessingDuration,
|
||||
const media::TimeUnit& aMediaTime) {
|
||||
#ifdef MOZ_WIDGET_ANDROID
|
||||
NotifySetCurrent(aImage);
|
||||
#endif
|
||||
if (aImage) {
|
||||
MutexAutoLock lock(mMutex);
|
||||
AutoTArray<ImageContainer::NonOwningImage, 1> imageList;
|
||||
imageList.AppendElement(
|
||||
ImageContainer::NonOwningImage(aImage, aTargetTime, ++mFrameID));
|
||||
imageList.AppendElement(ImageContainer::NonOwningImage(
|
||||
aImage, aTargetTime, ++mFrameID, 0, aProcessingDuration, aMediaTime));
|
||||
SetCurrentFramesLocked(aIntrinsicSize, imageList);
|
||||
} else {
|
||||
ClearCurrentFrame(aIntrinsicSize);
|
||||
|
@ -203,16 +204,18 @@ void VideoFrameContainer::ClearFutureFrames(TimeStamp aNow) {
|
|||
|
||||
if (!kungFuDeathGrip.IsEmpty()) {
|
||||
AutoTArray<ImageContainer::NonOwningImage, 1> currentFrame;
|
||||
ImageContainer::OwningImage& img = kungFuDeathGrip[0];
|
||||
const ImageContainer::OwningImage* img = &kungFuDeathGrip[0];
|
||||
// Find the current image in case there are several.
|
||||
for (const auto& image : kungFuDeathGrip) {
|
||||
if (image.mTimeStamp > aNow) {
|
||||
break;
|
||||
}
|
||||
img = image;
|
||||
img = ℑ
|
||||
}
|
||||
currentFrame.AppendElement(ImageContainer::NonOwningImage(
|
||||
img.mImage, img.mTimeStamp, img.mFrameID, img.mProducerID));
|
||||
img->mImage, img->mTimeStamp, img->mFrameID, img->mProducerID,
|
||||
img->mProcessingDuration, img->mMediaTime, img->mWebrtcCaptureTime,
|
||||
img->mWebrtcReceiveTime, img->mRtpTimestamp));
|
||||
mImageContainer->SetCurrentImages(currentFrame);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,7 +41,9 @@ class VideoFrameContainer {
|
|||
already_AddRefed<ImageContainer> aContainer);
|
||||
|
||||
void SetCurrentFrame(const gfx::IntSize& aIntrinsicSize, Image* aImage,
|
||||
const TimeStamp& aTargetTime);
|
||||
const TimeStamp& aTargetTime,
|
||||
const media::TimeUnit& aProcessingDuration,
|
||||
const media::TimeUnit& aMediaTime);
|
||||
// Returns the last principalHandle we notified mElement about.
|
||||
PrincipalHandle GetLastPrincipalHandle();
|
||||
PrincipalHandle GetLastPrincipalHandleLocked() MOZ_REQUIRES(mMutex);
|
||||
|
|
|
@ -100,8 +100,11 @@ class VideoOutput : public DirectMediaTrackListener {
|
|||
// We ignore null images.
|
||||
continue;
|
||||
}
|
||||
images.AppendElement(ImageContainer::NonOwningImage(
|
||||
image, chunk.mTimeStamp, frameId, mProducerID));
|
||||
ImageContainer::NonOwningImage nonOwningImage(
|
||||
image, chunk.mTimeStamp, frameId, mProducerID,
|
||||
chunk.mProcessingDuration, chunk.mMediaTime, chunk.mWebrtcCaptureTime,
|
||||
chunk.mWebrtcReceiveTime, chunk.mRtpTimestamp);
|
||||
images.AppendElement(std::move(nonOwningImage));
|
||||
|
||||
lastPrincipalHandle = chunk.GetPrincipalHandle();
|
||||
|
||||
|
@ -281,10 +284,7 @@ class FirstFrameVideoOutput : public VideoOutput {
|
|||
|
||||
// Pick the first frame and run it through the rendering code.
|
||||
VideoSegment segment;
|
||||
segment.AppendFrame(do_AddRef(c->mFrame.GetImage()),
|
||||
c->mFrame.GetIntrinsicSize(),
|
||||
c->mFrame.GetPrincipalHandle(),
|
||||
c->mFrame.GetForceBlack(), c->mTimeStamp);
|
||||
segment.AppendFrame(*c);
|
||||
VideoOutput::NotifyRealtimeTrackData(aGraph, aTrackOffset, segment);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -84,12 +84,74 @@ already_AddRefed<Image> VideoFrame::CreateBlackImage(
|
|||
return image.forget();
|
||||
}
|
||||
|
||||
void VideoSegment::AppendFrame(const VideoChunk& aChunk,
|
||||
const Maybe<bool>& aForceBlack,
|
||||
const Maybe<TimeStamp>& aTimeStamp) {
|
||||
VideoChunk* chunk = AppendChunk(0);
|
||||
chunk->mTimeStamp = aTimeStamp ? *aTimeStamp : aChunk.mTimeStamp;
|
||||
chunk->mProcessingDuration = aChunk.mProcessingDuration;
|
||||
chunk->mMediaTime = aChunk.mMediaTime;
|
||||
chunk->mWebrtcCaptureTime = aChunk.mWebrtcCaptureTime;
|
||||
chunk->mWebrtcReceiveTime = aChunk.mWebrtcReceiveTime;
|
||||
chunk->mRtpTimestamp = aChunk.mRtpTimestamp;
|
||||
VideoFrame frame(do_AddRef(aChunk.mFrame.GetImage()),
|
||||
aChunk.mFrame.GetIntrinsicSize());
|
||||
MOZ_ASSERT_IF(!IsNull(), !aChunk.mTimeStamp.IsNull());
|
||||
frame.SetForceBlack(aForceBlack ? *aForceBlack
|
||||
: aChunk.mFrame.GetForceBlack());
|
||||
frame.SetPrincipalHandle(aChunk.mFrame.GetPrincipalHandle());
|
||||
chunk->mFrame.TakeFrom(&frame);
|
||||
}
|
||||
|
||||
void VideoSegment::AppendFrame(already_AddRefed<Image>&& aImage,
|
||||
const IntSize& aIntrinsicSize,
|
||||
const PrincipalHandle& aPrincipalHandle,
|
||||
bool aForceBlack, TimeStamp aTimeStamp) {
|
||||
bool aForceBlack, TimeStamp aTimeStamp,
|
||||
media::TimeUnit aProcessingDuration,
|
||||
media::TimeUnit aMediaTime) {
|
||||
VideoChunk* chunk = AppendChunk(0);
|
||||
chunk->mTimeStamp = aTimeStamp;
|
||||
chunk->mProcessingDuration = aProcessingDuration;
|
||||
chunk->mMediaTime = aMediaTime;
|
||||
VideoFrame frame(std::move(aImage), aIntrinsicSize);
|
||||
MOZ_ASSERT_IF(!IsNull(), !aTimeStamp.IsNull());
|
||||
frame.SetForceBlack(aForceBlack);
|
||||
frame.SetPrincipalHandle(aPrincipalHandle);
|
||||
chunk->mFrame.TakeFrom(&frame);
|
||||
}
|
||||
|
||||
void VideoSegment::AppendWebrtcRemoteFrame(
|
||||
already_AddRefed<Image>&& aImage, const IntSize& aIntrinsicSize,
|
||||
const PrincipalHandle& aPrincipalHandle, bool aForceBlack,
|
||||
TimeStamp aTimeStamp, media::TimeUnit aProcessingDuration,
|
||||
uint32_t aRtpTimestamp, int64_t aWebrtcCaptureTimeNtp,
|
||||
int64_t aWebrtcReceiveTimeUs) {
|
||||
VideoChunk* chunk = AppendChunk(0);
|
||||
chunk->mTimeStamp = aTimeStamp;
|
||||
chunk->mProcessingDuration = aProcessingDuration;
|
||||
if (aWebrtcCaptureTimeNtp > 0) {
|
||||
chunk->mWebrtcCaptureTime = AsVariant(aWebrtcCaptureTimeNtp);
|
||||
}
|
||||
if (aWebrtcReceiveTimeUs > 0) {
|
||||
chunk->mWebrtcReceiveTime = Some(aWebrtcReceiveTimeUs);
|
||||
}
|
||||
chunk->mRtpTimestamp = Some(aRtpTimestamp);
|
||||
VideoFrame frame(std::move(aImage), aIntrinsicSize);
|
||||
MOZ_ASSERT_IF(!IsNull(), !aTimeStamp.IsNull());
|
||||
frame.SetForceBlack(aForceBlack);
|
||||
frame.SetPrincipalHandle(aPrincipalHandle);
|
||||
chunk->mFrame.TakeFrom(&frame);
|
||||
}
|
||||
|
||||
void VideoSegment::AppendWebrtcLocalFrame(
|
||||
already_AddRefed<Image>&& aImage, const IntSize& aIntrinsicSize,
|
||||
const PrincipalHandle& aPrincipalHandle, bool aForceBlack,
|
||||
TimeStamp aTimeStamp, media::TimeUnit aProcessingDuration,
|
||||
TimeStamp aWebrtcCaptureTime) {
|
||||
VideoChunk* chunk = AppendChunk(0);
|
||||
chunk->mTimeStamp = aTimeStamp;
|
||||
chunk->mProcessingDuration = aProcessingDuration;
|
||||
chunk->mWebrtcCaptureTime = AsVariant(aWebrtcCaptureTime);
|
||||
VideoFrame frame(std::move(aImage), aIntrinsicSize);
|
||||
MOZ_ASSERT_IF(!IsNull(), !aTimeStamp.IsNull());
|
||||
frame.SetForceBlack(aForceBlack);
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include "nsCOMPtr.h"
|
||||
#include "gfxPoint.h"
|
||||
#include "ImageContainer.h"
|
||||
#include "TimeUnits.h"
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
|
@ -93,6 +94,11 @@ struct VideoChunk {
|
|||
TrackTime mDuration;
|
||||
VideoFrame mFrame;
|
||||
TimeStamp mTimeStamp;
|
||||
media::TimeUnit mProcessingDuration;
|
||||
media::TimeUnit mMediaTime;
|
||||
layers::ContainerCaptureTime mWebrtcCaptureTime = AsVariant(Nothing());
|
||||
layers::ContainerReceiveTime mWebrtcReceiveTime;
|
||||
layers::ContainerRtpTimestamp mRtpTimestamp;
|
||||
};
|
||||
|
||||
class VideoSegment : public MediaSegmentBase<VideoSegment, VideoChunk> {
|
||||
|
@ -108,11 +114,29 @@ class VideoSegment : public MediaSegmentBase<VideoSegment, VideoChunk> {
|
|||
|
||||
~VideoSegment();
|
||||
|
||||
void AppendFrame(already_AddRefed<Image>&& aImage,
|
||||
const IntSize& aIntrinsicSize,
|
||||
const PrincipalHandle& aPrincipalHandle,
|
||||
bool aForceBlack = false,
|
||||
TimeStamp aTimeStamp = TimeStamp::Now());
|
||||
void AppendFrame(const VideoChunk& aChunk,
|
||||
const Maybe<bool>& aForceBlack = Nothing(),
|
||||
const Maybe<TimeStamp>& aTimeStamp = Nothing());
|
||||
void AppendFrame(
|
||||
already_AddRefed<Image>&& aImage, const IntSize& aIntrinsicSize,
|
||||
const PrincipalHandle& aPrincipalHandle, bool aForceBlack = false,
|
||||
TimeStamp aTimeStamp = TimeStamp::Now(),
|
||||
media::TimeUnit aProcessingDuration = media::TimeUnit::Invalid(),
|
||||
media::TimeUnit aMediaTime = media::TimeUnit::Invalid());
|
||||
void AppendWebrtcRemoteFrame(already_AddRefed<Image>&& aImage,
|
||||
const IntSize& aIntrinsicSize,
|
||||
const PrincipalHandle& aPrincipalHandle,
|
||||
bool aForceBlack, TimeStamp aTimeStamp,
|
||||
media::TimeUnit aProcessingDuration,
|
||||
uint32_t aRtpTimestamp,
|
||||
int64_t aWebrtcCaptureTimeNtp,
|
||||
int64_t aWebrtcReceiveTimeUs);
|
||||
void AppendWebrtcLocalFrame(already_AddRefed<Image>&& aImage,
|
||||
const IntSize& aIntrinsicSize,
|
||||
const PrincipalHandle& aPrincipalHandle,
|
||||
bool aForceBlack, TimeStamp aTimeStamp,
|
||||
media::TimeUnit aProcessingDuration,
|
||||
TimeStamp aWebrtcCaptureTime);
|
||||
void ExtendLastFrameBy(TrackTime aDuration) {
|
||||
if (aDuration <= 0) {
|
||||
return;
|
||||
|
|
|
@ -847,7 +847,7 @@ void DecodedStreamData::WriteVideoToSegment(
|
|||
double aPlaybackRate) {
|
||||
RefPtr<layers::Image> image = aImage;
|
||||
aOutput->AppendFrame(image.forget(), aIntrinsicSize, aPrincipalHandle, false,
|
||||
aTimeStamp);
|
||||
aTimeStamp, media::TimeUnit::Invalid(), aStart);
|
||||
// Extend this so we get accurate durations for all frames.
|
||||
// Because this track is pushed, we need durations so the graph can track
|
||||
// when playout of the track has finished.
|
||||
|
|
|
@ -349,9 +349,12 @@ void VideoSink::Redraw(const VideoInfo& aInfo) {
|
|||
video->mImage = mBlankImage;
|
||||
}
|
||||
video->MarkSentToCompositor();
|
||||
mContainer->SetCurrentFrame(video->mDisplay, video->mImage, now);
|
||||
mContainer->SetCurrentFrame(video->mDisplay, video->mImage, now,
|
||||
media::TimeUnit::Invalid(), video->mTime);
|
||||
if (mSecondaryContainer) {
|
||||
mSecondaryContainer->SetCurrentFrame(video->mDisplay, video->mImage, now);
|
||||
mSecondaryContainer->SetCurrentFrame(video->mDisplay, video->mImage, now,
|
||||
media::TimeUnit::Invalid(),
|
||||
video->mTime);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
@ -362,10 +365,14 @@ void VideoSink::Redraw(const VideoInfo& aInfo) {
|
|||
|
||||
RefPtr<Image> blank =
|
||||
mContainer->GetImageContainer()->CreatePlanarYCbCrImage();
|
||||
mContainer->SetCurrentFrame(aInfo.mDisplay, blank, now);
|
||||
mContainer->SetCurrentFrame(aInfo.mDisplay, blank, now,
|
||||
media::TimeUnit::Invalid(),
|
||||
media::TimeUnit::Invalid());
|
||||
|
||||
if (mSecondaryContainer) {
|
||||
mSecondaryContainer->SetCurrentFrame(aInfo.mDisplay, blank, now);
|
||||
mSecondaryContainer->SetCurrentFrame(aInfo.mDisplay, blank, now,
|
||||
media::TimeUnit::Invalid(),
|
||||
media::TimeUnit::Invalid());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -476,6 +483,7 @@ void VideoSink::RenderVideoFrames(int32_t aMaxFrames, int64_t aClockTime,
|
|||
}
|
||||
img->mFrameID = frame->mFrameID;
|
||||
img->mProducerID = mProducerID;
|
||||
img->mMediaTime = frame->mTime;
|
||||
|
||||
VSINK_LOG_V("playing video frame %" PRId64
|
||||
" (id=%x, vq-queued=%zu, clock=%" PRId64 ")",
|
||||
|
@ -672,11 +680,14 @@ void VideoSink::SetSecondaryVideoContainer(VideoFrameContainer* aSecondary) {
|
|||
// that in the secondary container as well.
|
||||
AutoLockImage lockImage(mainImageContainer);
|
||||
TimeStamp now = TimeStamp::Now();
|
||||
if (RefPtr<Image> image = lockImage.GetImage(now)) {
|
||||
if (const auto* owningImage = lockImage.GetOwningImage(now)) {
|
||||
AutoTArray<ImageContainer::NonOwningImage, 1> currentFrame;
|
||||
currentFrame.AppendElement(ImageContainer::NonOwningImage(
|
||||
image, now, /* frameID */ 1,
|
||||
/* producerId */ ImageContainer::AllocateProducerID()));
|
||||
owningImage->mImage, now, /* frameID */ 1,
|
||||
/* producerId */ ImageContainer::AllocateProducerID(),
|
||||
owningImage->mProcessingDuration, owningImage->mMediaTime,
|
||||
owningImage->mWebrtcCaptureTime, owningImage->mWebrtcReceiveTime,
|
||||
owningImage->mRtpTimestamp));
|
||||
secondaryImageContainer->SetCurrentImages(currentFrame);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,7 +7,11 @@ include protocol PBackground;
|
|||
|
||||
include PBackgroundSharedTypes;
|
||||
|
||||
include "mozilla/dom/MediaIPCUtils.h";
|
||||
|
||||
using mozilla::camera::CaptureEngine from "mozilla/media/CamerasTypes.h";
|
||||
using mozilla::TimeStamp from "mozilla/TimeStamp.h";
|
||||
using mozilla::media::TimeUnit from "TimeUnits.h";
|
||||
|
||||
namespace mozilla {
|
||||
namespace camera {
|
||||
|
@ -28,13 +32,15 @@ struct VideoCaptureCapability
|
|||
// See VideoFrameUtils.h
|
||||
struct VideoFrameProperties
|
||||
{
|
||||
TimeStamp captureTime;
|
||||
// Size of image data within the ShMem,
|
||||
// the ShMem is at least this large
|
||||
uint32_t bufferSize;
|
||||
// From webrtc::VideoFrame
|
||||
uint32_t timeStamp;
|
||||
uint32_t rtpTimeStamp;
|
||||
int64_t ntpTimeMs;
|
||||
int64_t renderTimeMs;
|
||||
TimeUnit processingDuration;
|
||||
// See webrtc/**/rotation.h
|
||||
int rotation;
|
||||
int yAllocatedSize;
|
||||
|
|
|
@ -24,14 +24,23 @@ uint32_t VideoFrameUtils::TotalRequiredBufferSize(
|
|||
void VideoFrameUtils::InitFrameBufferProperties(
|
||||
const webrtc::VideoFrame& aVideoFrame,
|
||||
camera::VideoFrameProperties& aDestProps) {
|
||||
aDestProps.captureTime() = TimeStamp::Now();
|
||||
|
||||
// The VideoFrameBuffer image data stored in the accompanying buffer
|
||||
// the buffer is at least this size of larger.
|
||||
aDestProps.bufferSize() = TotalRequiredBufferSize(aVideoFrame);
|
||||
|
||||
aDestProps.timeStamp() = aVideoFrame.timestamp();
|
||||
aDestProps.rtpTimeStamp() = aVideoFrame.rtp_timestamp();
|
||||
aDestProps.ntpTimeMs() = aVideoFrame.ntp_time_ms();
|
||||
aDestProps.renderTimeMs() = aVideoFrame.render_time_ms();
|
||||
|
||||
if (aVideoFrame.processing_time()) {
|
||||
aDestProps.processingDuration() = media::TimeUnit::FromMicroseconds(
|
||||
aVideoFrame.processing_time()->Elapsed().us());
|
||||
} else {
|
||||
aDestProps.processingDuration() = media::TimeUnit::Invalid();
|
||||
}
|
||||
|
||||
aDestProps.rotation() = aVideoFrame.rotation();
|
||||
|
||||
auto i420 = aVideoFrame.video_frame_buffer()->ToI420();
|
||||
|
|
|
@ -543,11 +543,11 @@ int MediaEngineRemoteVideoSource::DeliverFrame(
|
|||
#ifdef DEBUG
|
||||
static uint32_t frame_num = 0;
|
||||
LOG_FRAME(
|
||||
"frame %d (%dx%d)->(%dx%d); rotation %d, timeStamp %u, ntpTimeMs %" PRIu64
|
||||
", renderTimeMs %" PRIu64,
|
||||
"frame %d (%dx%d)->(%dx%d); rotation %d, rtpTimeStamp %u, ntpTimeMs "
|
||||
"%" PRIu64 ", renderTimeMs %" PRIu64 " processingDuration %" PRIi64 "us",
|
||||
frame_num++, aProps.width(), aProps.height(), dst_width, dst_height,
|
||||
aProps.rotation(), aProps.timeStamp(), aProps.ntpTimeMs(),
|
||||
aProps.renderTimeMs());
|
||||
aProps.rotation(), aProps.rtpTimeStamp(), aProps.ntpTimeMs(),
|
||||
aProps.renderTimeMs(), aProps.processingDuration().ToMicroseconds());
|
||||
#endif
|
||||
|
||||
if (mImageSize.width != dst_width || mImageSize.height != dst_height) {
|
||||
|
@ -572,7 +572,9 @@ int MediaEngineRemoteVideoSource::DeliverFrame(
|
|||
MOZ_ASSERT(mState == kStarted);
|
||||
VideoSegment segment;
|
||||
mImageSize = image->GetSize();
|
||||
segment.AppendFrame(image.forget(), mImageSize, mPrincipal);
|
||||
segment.AppendWebrtcLocalFrame(
|
||||
image.forget(), mImageSize, mPrincipal, /* aForceBlack */ false,
|
||||
TimeStamp::Now(), aProps.processingDuration(), aProps.captureTime());
|
||||
mTrack->AppendData(&segment);
|
||||
}
|
||||
|
||||
|
|
|
@ -1076,6 +1076,13 @@ void RTCRtpReceiver::RequestKeyFrame() {
|
|||
});
|
||||
}
|
||||
|
||||
const RTCStatsTimestampMaker* RTCRtpReceiver::GetTimestampMaker() const {
|
||||
if (!mPc) {
|
||||
return nullptr;
|
||||
}
|
||||
return &mPc->GetTimestampMaker();
|
||||
}
|
||||
|
||||
} // namespace mozilla::dom
|
||||
|
||||
#undef LOGTAG
|
||||
|
|
|
@ -40,6 +40,7 @@ struct RTCRtpContributingSource;
|
|||
struct RTCRtpSynchronizationSource;
|
||||
class RTCRtpTransceiver;
|
||||
class RTCRtpScriptTransform;
|
||||
class RTCStatsTimestampMaker;
|
||||
|
||||
class RTCRtpReceiver : public nsISupports,
|
||||
public nsWrapperCache,
|
||||
|
@ -163,6 +164,8 @@ class RTCRtpReceiver : public nsISupports,
|
|||
return mFrameTransformerProxy;
|
||||
}
|
||||
|
||||
const RTCStatsTimestampMaker* GetTimestampMaker() const;
|
||||
|
||||
private:
|
||||
virtual ~RTCRtpReceiver();
|
||||
|
||||
|
|
|
@ -70,4 +70,12 @@ void RemoteTrackSource::ForceEnded() { OverrideEnded(); }
|
|||
|
||||
SourceMediaTrack* RemoteTrackSource::Stream() const { return mStream; }
|
||||
|
||||
const dom::RTCStatsTimestampMaker* RemoteTrackSource::GetTimestampMaker()
|
||||
const {
|
||||
if (!mReceiver) {
|
||||
return nullptr;
|
||||
}
|
||||
return mReceiver->GetTimestampMaker();
|
||||
}
|
||||
|
||||
} // namespace mozilla
|
||||
|
|
|
@ -52,6 +52,8 @@ class RemoteTrackSource : public dom::MediaStreamTrackSource {
|
|||
|
||||
SourceMediaTrack* Stream() const;
|
||||
|
||||
const dom::RTCStatsTimestampMaker* GetTimestampMaker() const override;
|
||||
|
||||
private:
|
||||
virtual ~RemoteTrackSource();
|
||||
|
||||
|
|
|
@ -79,19 +79,14 @@ class VideoRenderer {
|
|||
|
||||
/**
|
||||
* Callback Function reporting decoded frame for processing.
|
||||
* @param buffer: reference to decoded video frame
|
||||
* @param buffer_size: size of the decoded frame
|
||||
* @param time_stamp: Decoder timestamp, typically 90KHz as per RTP
|
||||
* @render_time: Wall-clock time at the decoder for synchronization
|
||||
* purposes in milliseconds
|
||||
* @param video_frame: reference to decoded video frame
|
||||
* NOTE: If decoded video frame is passed through buffer , it is the
|
||||
* responsibility of the concrete implementations of this class to own copy
|
||||
* of the frame if needed for time longer than scope of this callback.
|
||||
* Such implementations should be quick in processing the frames and return
|
||||
* immediately.
|
||||
*/
|
||||
virtual void RenderVideoFrame(const webrtc::VideoFrameBuffer& buffer,
|
||||
uint32_t time_stamp, int64_t render_time) = 0;
|
||||
virtual void RenderVideoFrame(const webrtc::VideoFrame& video_frame) = 0;
|
||||
|
||||
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(VideoRenderer)
|
||||
};
|
||||
|
|
|
@ -1868,9 +1868,7 @@ void WebrtcVideoConduit::OnFrame(const webrtc::VideoFrame& video_frame) {
|
|||
}
|
||||
#endif
|
||||
|
||||
mRenderer->RenderVideoFrame(*video_frame.video_frame_buffer(),
|
||||
video_frame.timestamp(),
|
||||
video_frame.render_time_ms());
|
||||
mRenderer->RenderVideoFrame(video_frame);
|
||||
}
|
||||
|
||||
bool WebrtcVideoConduit::AddFrameHistory(
|
||||
|
|
|
@ -1421,8 +1421,7 @@ class MediaPipelineReceiveVideo::PipelineListener
|
|||
mForceDropFrames = false;
|
||||
}
|
||||
|
||||
void RenderVideoFrame(const webrtc::VideoFrameBuffer& aBuffer,
|
||||
uint32_t aTimeStamp, int64_t aRenderTime) {
|
||||
void RenderVideoFrame(const webrtc::VideoFrame& aVideoFrame) {
|
||||
PrincipalHandle principal;
|
||||
{
|
||||
MutexAutoLock lock(mMutex);
|
||||
|
@ -1432,16 +1431,16 @@ class MediaPipelineReceiveVideo::PipelineListener
|
|||
principal = mPrincipalHandle;
|
||||
}
|
||||
RefPtr<Image> image;
|
||||
if (aBuffer.type() == webrtc::VideoFrameBuffer::Type::kNative) {
|
||||
const webrtc::VideoFrameBuffer& buffer = *aVideoFrame.video_frame_buffer();
|
||||
if (buffer.type() == webrtc::VideoFrameBuffer::Type::kNative) {
|
||||
// We assume that only native handles are used with the
|
||||
// WebrtcMediaDataCodec decoder.
|
||||
const ImageBuffer* imageBuffer =
|
||||
static_cast<const ImageBuffer*>(&aBuffer);
|
||||
const ImageBuffer* imageBuffer = static_cast<const ImageBuffer*>(&buffer);
|
||||
image = imageBuffer->GetNativeImage();
|
||||
} else {
|
||||
MOZ_ASSERT(aBuffer.type() == webrtc::VideoFrameBuffer::Type::kI420);
|
||||
MOZ_ASSERT(buffer.type() == webrtc::VideoFrameBuffer::Type::kI420);
|
||||
rtc::scoped_refptr<const webrtc::I420BufferInterface> i420(
|
||||
aBuffer.GetI420());
|
||||
buffer.GetI420());
|
||||
|
||||
MOZ_ASSERT(i420->DataY());
|
||||
// Create a video frame using |buffer|.
|
||||
|
@ -1475,9 +1474,25 @@ class MediaPipelineReceiveVideo::PipelineListener
|
|||
image = std::move(yuvImage);
|
||||
}
|
||||
|
||||
Maybe<webrtc::Timestamp> receiveTime;
|
||||
for (const auto& packet : aVideoFrame.packet_infos()) {
|
||||
if (!receiveTime || *receiveTime < packet.receive_time()) {
|
||||
receiveTime = Some(packet.receive_time());
|
||||
}
|
||||
}
|
||||
|
||||
VideoSegment segment;
|
||||
auto size = image->GetSize();
|
||||
segment.AppendFrame(image.forget(), size, principal);
|
||||
auto processingDuration =
|
||||
aVideoFrame.processing_time()
|
||||
? media::TimeUnit::FromMicroseconds(
|
||||
aVideoFrame.processing_time()->Elapsed().us())
|
||||
: media::TimeUnit::Invalid();
|
||||
segment.AppendWebrtcRemoteFrame(
|
||||
image.forget(), size, principal,
|
||||
/* aForceBlack */ false, TimeStamp::Now(), processingDuration,
|
||||
aVideoFrame.rtp_timestamp(), aVideoFrame.ntp_time_ms(),
|
||||
receiveTime ? receiveTime->us() : 0);
|
||||
mSource->AppendData(&segment);
|
||||
}
|
||||
|
||||
|
@ -1501,9 +1516,8 @@ class MediaPipelineReceiveVideo::PipelineRenderer
|
|||
|
||||
// Implement VideoRenderer
|
||||
void FrameSizeChange(unsigned int aWidth, unsigned int aHeight) override {}
|
||||
void RenderVideoFrame(const webrtc::VideoFrameBuffer& aBuffer,
|
||||
uint32_t aTimeStamp, int64_t aRenderTime) override {
|
||||
mPipeline->mListener->RenderVideoFrame(aBuffer, aTimeStamp, aRenderTime);
|
||||
void RenderVideoFrame(const webrtc::VideoFrame& aVideoFrame) override {
|
||||
mPipeline->mListener->RenderVideoFrame(aVideoFrame);
|
||||
}
|
||||
|
||||
private:
|
||||
|
|
|
@ -4,7 +4,8 @@
|
|||
* You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
*
|
||||
* The origin of this IDL file is
|
||||
* http://www.whatwg.org/specs/web-apps/current-work/#the-video-element
|
||||
* https://html.spec.whatwg.org/multipage/media.html#the-video-element
|
||||
* https://wicg.github.io/video-rvfc/
|
||||
*
|
||||
* © Copyright 2004-2011 Apple Computer, Inc., Mozilla Foundation, and
|
||||
* Opera Software ASA. You are granted a license to use, reproduce
|
||||
|
@ -21,13 +22,10 @@ dictionary VideoFrameCallbackMetadata {
|
|||
|
||||
required unsigned long presentedFrames;
|
||||
|
||||
//TODO(Bug 1908246)
|
||||
//double processingDuration;
|
||||
|
||||
//TODO(Bug 1908245)
|
||||
//DOMHighResTimeStamp captureTime;
|
||||
//DOMHighResTimeStamp receiveTime;
|
||||
//unsigned long rtpTimestamp;
|
||||
double processingDuration;
|
||||
DOMHighResTimeStamp captureTime;
|
||||
DOMHighResTimeStamp receiveTime;
|
||||
unsigned long rtpTimestamp;
|
||||
};
|
||||
|
||||
callback VideoFrameRequestCallback =
|
||||
|
|
|
@ -329,6 +329,11 @@ void ImageContainer::SetCurrentImageInternal(
|
|||
OwningImage* img = newImages.AppendElement();
|
||||
img->mImage = aImages[i].mImage;
|
||||
img->mTimeStamp = aImages[i].mTimeStamp;
|
||||
img->mProcessingDuration = aImages[i].mProcessingDuration;
|
||||
img->mMediaTime = aImages[i].mMediaTime;
|
||||
img->mWebrtcCaptureTime = aImages[i].mWebrtcCaptureTime;
|
||||
img->mWebrtcReceiveTime = aImages[i].mWebrtcReceiveTime;
|
||||
img->mRtpTimestamp = aImages[i].mRtpTimestamp;
|
||||
img->mFrameID = aImages[i].mFrameID;
|
||||
img->mProducerID = aImages[i].mProducerID;
|
||||
for (const auto& oldImg : mCurrentImages) {
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include "mozilla/EnumeratedArray.h"
|
||||
#include "mozilla/UniquePtr.h"
|
||||
#include "nsTHashMap.h"
|
||||
#include "TimeUnits.h"
|
||||
|
||||
#ifdef XP_WIN
|
||||
struct ID3D10Texture2D;
|
||||
|
@ -328,8 +329,11 @@ class ImageContainer final : public SupportsThreadSafeWeakPtr<ImageContainer> {
|
|||
|
||||
~ImageContainer();
|
||||
|
||||
typedef ContainerFrameID FrameID;
|
||||
typedef ContainerProducerID ProducerID;
|
||||
using FrameID = ContainerFrameID;
|
||||
using ProducerID = ContainerProducerID;
|
||||
using CaptureTime = ContainerCaptureTime;
|
||||
using ReceiveTime = ContainerReceiveTime;
|
||||
using RtpTimestamp = ContainerRtpTimestamp;
|
||||
|
||||
RefPtr<PlanarYCbCrImage> CreatePlanarYCbCrImage();
|
||||
|
||||
|
@ -337,17 +341,32 @@ class ImageContainer final : public SupportsThreadSafeWeakPtr<ImageContainer> {
|
|||
RefPtr<SharedRGBImage> CreateSharedRGBImage();
|
||||
|
||||
struct NonOwningImage {
|
||||
explicit NonOwningImage(Image* aImage = nullptr,
|
||||
TimeStamp aTimeStamp = TimeStamp(),
|
||||
FrameID aFrameID = 0, ProducerID aProducerID = 0)
|
||||
explicit NonOwningImage(
|
||||
Image* aImage = nullptr, TimeStamp aTimeStamp = TimeStamp(),
|
||||
FrameID aFrameID = 0, ProducerID aProducerID = 0,
|
||||
media::TimeUnit aProcessingDuration = media::TimeUnit::Invalid(),
|
||||
media::TimeUnit aMediaTime = media::TimeUnit::Invalid(),
|
||||
const CaptureTime& aWebrtcCaptureTime = AsVariant(Nothing()),
|
||||
const ReceiveTime& aWebrtcReceiveTime = Nothing(),
|
||||
const RtpTimestamp& aRtpTimestamp = Nothing())
|
||||
: mImage(aImage),
|
||||
mTimeStamp(aTimeStamp),
|
||||
mFrameID(aFrameID),
|
||||
mProducerID(aProducerID) {}
|
||||
mProducerID(aProducerID),
|
||||
mProcessingDuration(aProcessingDuration),
|
||||
mMediaTime(aMediaTime),
|
||||
mWebrtcCaptureTime(aWebrtcCaptureTime),
|
||||
mWebrtcReceiveTime(aWebrtcReceiveTime),
|
||||
mRtpTimestamp(aRtpTimestamp) {}
|
||||
Image* mImage;
|
||||
TimeStamp mTimeStamp;
|
||||
FrameID mFrameID;
|
||||
ProducerID mProducerID;
|
||||
media::TimeUnit mProcessingDuration = media::TimeUnit::Invalid();
|
||||
media::TimeUnit mMediaTime = media::TimeUnit::Invalid();
|
||||
CaptureTime mWebrtcCaptureTime = AsVariant(Nothing());
|
||||
ReceiveTime mWebrtcReceiveTime;
|
||||
RtpTimestamp mRtpTimestamp;
|
||||
};
|
||||
/**
|
||||
* Set aImages as the list of timestamped to display. The Images must have
|
||||
|
@ -440,12 +459,16 @@ class ImageContainer final : public SupportsThreadSafeWeakPtr<ImageContainer> {
|
|||
bool HasCurrentImage();
|
||||
|
||||
struct OwningImage {
|
||||
OwningImage() : mFrameID(0), mProducerID(0), mComposited(false) {}
|
||||
RefPtr<Image> mImage;
|
||||
TimeStamp mTimeStamp;
|
||||
FrameID mFrameID;
|
||||
ProducerID mProducerID;
|
||||
bool mComposited;
|
||||
media::TimeUnit mProcessingDuration = media::TimeUnit::Invalid();
|
||||
media::TimeUnit mMediaTime = media::TimeUnit::Invalid();
|
||||
CaptureTime mWebrtcCaptureTime = AsVariant(Nothing());
|
||||
ReceiveTime mWebrtcReceiveTime;
|
||||
RtpTimestamp mRtpTimestamp;
|
||||
FrameID mFrameID = 0;
|
||||
ProducerID mProducerID = 0;
|
||||
bool mComposited = false;
|
||||
};
|
||||
/**
|
||||
* Copy the current Image list to aImages.
|
||||
|
@ -679,7 +702,8 @@ class AutoLockImage {
|
|||
return mImages.IsEmpty() ? nullptr : mImages[0].mImage.get();
|
||||
}
|
||||
|
||||
Image* GetImage(TimeStamp aTimeStamp) const {
|
||||
const ImageContainer::OwningImage* GetOwningImage(
|
||||
TimeStamp aTimeStamp) const {
|
||||
if (mImages.IsEmpty()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -692,7 +716,14 @@ class AutoLockImage {
|
|||
++chosenIndex;
|
||||
}
|
||||
|
||||
return mImages[chosenIndex].mImage.get();
|
||||
return &mImages[chosenIndex];
|
||||
}
|
||||
|
||||
Image* GetImage(TimeStamp aTimeStamp) const {
|
||||
if (const auto* owningImage = GetOwningImage(aTimeStamp)) {
|
||||
return owningImage->mImage.get();
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
private:
|
||||
|
|
|
@ -8,6 +8,9 @@
|
|||
#define GFX_IMAGETYPES_H
|
||||
|
||||
#include <stdint.h> // for uint32_t
|
||||
#include "mozilla/Maybe.h"
|
||||
#include "mozilla/TimeStamp.h"
|
||||
#include "mozilla/Variant.h"
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
|
@ -112,12 +115,20 @@ enum class StereoMode {
|
|||
|
||||
namespace layers {
|
||||
|
||||
typedef uint32_t ContainerFrameID;
|
||||
using ContainerFrameID = uint32_t;
|
||||
constexpr ContainerFrameID kContainerFrameID_Invalid = 0;
|
||||
|
||||
typedef uint32_t ContainerProducerID;
|
||||
using ContainerProducerID = uint32_t;
|
||||
constexpr ContainerProducerID kContainerProducerID_Invalid = 0;
|
||||
|
||||
// int64_t represents a WebRTC NTP timestamp.
|
||||
using ContainerCaptureTime = Variant<Nothing, TimeStamp, int64_t>;
|
||||
|
||||
// int64_t represents a WebRTC Realtime timestamp.
|
||||
using ContainerReceiveTime = Maybe<int64_t>;
|
||||
|
||||
using ContainerRtpTimestamp = Maybe<uint32_t>;
|
||||
|
||||
} // namespace layers
|
||||
|
||||
} // namespace mozilla
|
||||
|
|
|
@ -1,3 +0,0 @@
|
|||
[request-video-frame-callback-webrtc.https.html]
|
||||
[Test video.requestVideoFrameCallback() parameters for WebRTC applications.]
|
||||
expected: FAIL
|
Загрузка…
Ссылка в новой задаче