зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1892299 - Vendor libwebrtc from 0fa90887c5
Upstream commit: https://webrtc.googlesource.com/src/+/0fa90887c5bf15aa6e73c2df78cae31feb82fa54 Deprecate VideoFrame::timestamp() and set_timestamp Instead, add rtp_timestamp and set_rtp_timestamp. Bug: webrtc:13756 Change-Id: Ic4266394003e0d49e525d71f4d830f5e518299cc Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/342781 Commit-Queue: Per Kjellander <perkj@webrtc.org> Reviewed-by: Magnus Jedvert <magjed@webrtc.org> Reviewed-by: Erik Språng <sprang@webrtc.org> Reviewed-by: Markus Handell <handellm@webrtc.org> Cr-Commit-Position: refs/heads/main@{#41894}
This commit is contained in:
Родитель
a6adaa0729
Коммит
69d58eaca0
|
@ -29436,3 +29436,6 @@ b8abf5199a
|
|||
# MOZ_LIBWEBRTC_SRC=/Users/ng/dev/elm/.moz-fast-forward/moz-libwebrtc MOZ_LIBWEBRTC_BRANCH=mozpatches bash dom/media/webrtc/third_party_build/fast-forward-libwebrtc.sh
|
||||
# base of lastest vendoring
|
||||
2725317b1f
|
||||
# MOZ_LIBWEBRTC_SRC=/Users/ng/dev/elm/.moz-fast-forward/moz-libwebrtc MOZ_LIBWEBRTC_BRANCH=mozpatches bash dom/media/webrtc/third_party_build/fast-forward-libwebrtc.sh
|
||||
# base of lastest vendoring
|
||||
0fa90887c5
|
||||
|
|
|
@ -19648,3 +19648,5 @@ libwebrtc updated from /Users/ng/dev/elm/.moz-fast-forward/moz-libwebrtc commit
|
|||
libwebrtc updated from /Users/ng/dev/elm/.moz-fast-forward/moz-libwebrtc commit mozpatches on 2024-05-02T08:03:45.145242.
|
||||
# ./mach python dom/media/webrtc/third_party_build/vendor-libwebrtc.py --from-local /Users/ng/dev/elm/.moz-fast-forward/moz-libwebrtc --commit mozpatches libwebrtc
|
||||
libwebrtc updated from /Users/ng/dev/elm/.moz-fast-forward/moz-libwebrtc commit mozpatches on 2024-05-02T08:04:58.566561.
|
||||
# ./mach python dom/media/webrtc/third_party_build/vendor-libwebrtc.py --from-local /Users/ng/dev/elm/.moz-fast-forward/moz-libwebrtc --commit mozpatches libwebrtc
|
||||
libwebrtc updated from /Users/ng/dev/elm/.moz-fast-forward/moz-libwebrtc commit mozpatches on 2024-05-02T08:12:44.039710.
|
||||
|
|
|
@ -199,6 +199,12 @@ VideoFrame::Builder& VideoFrame::Builder::set_reference_time(
|
|||
return *this;
|
||||
}
|
||||
|
||||
VideoFrame::Builder& VideoFrame::Builder::set_rtp_timestamp(
|
||||
uint32_t rtp_timestamp) {
|
||||
timestamp_rtp_ = rtp_timestamp;
|
||||
return *this;
|
||||
}
|
||||
|
||||
VideoFrame::Builder& VideoFrame::Builder::set_timestamp_rtp(
|
||||
uint32_t timestamp_rtp) {
|
||||
timestamp_rtp_ = timestamp_rtp;
|
||||
|
|
|
@ -111,6 +111,8 @@ class RTC_EXPORT VideoFrame {
|
|||
const absl::optional<Timestamp>& capture_time_identifier);
|
||||
Builder& set_reference_time(
|
||||
const absl::optional<Timestamp>& reference_time);
|
||||
Builder& set_rtp_timestamp(uint32_t rtp_timestamp);
|
||||
// TODO(https://bugs.webrtc.org/13756): Deprecate and use set_rtp_timestamp.
|
||||
Builder& set_timestamp_rtp(uint32_t timestamp_rtp);
|
||||
Builder& set_ntp_time_ms(int64_t ntp_time_ms);
|
||||
Builder& set_rotation(VideoRotation rotation);
|
||||
|
@ -188,9 +190,15 @@ class RTC_EXPORT VideoFrame {
|
|||
}
|
||||
|
||||
// Set frame timestamp (90kHz).
|
||||
void set_rtp_timestamp(uint32_t rtp_timestamp) {
|
||||
timestamp_rtp_ = rtp_timestamp;
|
||||
}
|
||||
// TODO(https://bugs.webrtc.org/13756): Deprecate and use set_rtp_timestamp.
|
||||
void set_timestamp(uint32_t timestamp) { timestamp_rtp_ = timestamp; }
|
||||
|
||||
// Get frame timestamp (90kHz).
|
||||
uint32_t rtp_timestamp() const { return timestamp_rtp_; }
|
||||
// TODO(https://bugs.webrtc.org/13756): Deprecate and use rtp_timestamp.
|
||||
uint32_t timestamp() const { return timestamp_rtp_; }
|
||||
|
||||
// Set capture ntp time in milliseconds.
|
||||
|
|
|
@ -376,7 +376,7 @@ TEST_F(VideoEncoderSoftwareFallbackWrapperTest,
|
|||
|
||||
// Encoding a frame using the fallback should arrive at the new callback.
|
||||
std::vector<VideoFrameType> types(1, VideoFrameType::kVideoFrameKey);
|
||||
frame_->set_timestamp(frame_->timestamp() + 1000);
|
||||
frame_->set_rtp_timestamp(frame_->rtp_timestamp() + 1000);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, fallback_wrapper_->Encode(*frame_, &types));
|
||||
EXPECT_EQ(callback2.callback_count_, 1);
|
||||
|
||||
|
@ -384,7 +384,7 @@ TEST_F(VideoEncoderSoftwareFallbackWrapperTest,
|
|||
InitEncode();
|
||||
EXPECT_EQ(&callback2, fake_encoder_->encode_complete_callback_);
|
||||
|
||||
frame_->set_timestamp(frame_->timestamp() + 2000);
|
||||
frame_->set_rtp_timestamp(frame_->rtp_timestamp() + 2000);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, fallback_wrapper_->Encode(*frame_, &types));
|
||||
EXPECT_EQ(callback2.callback_count_, 2);
|
||||
}
|
||||
|
|
|
@ -456,7 +456,7 @@ void CallPerfTest::TestCaptureNtpTime(
|
|||
}
|
||||
|
||||
FrameCaptureTimeList::iterator iter =
|
||||
capture_time_list_.find(video_frame.timestamp());
|
||||
capture_time_list_.find(video_frame.rtp_timestamp());
|
||||
EXPECT_TRUE(iter != capture_time_list_.end());
|
||||
|
||||
// The real capture time has been wrapped to uint32_t before converted
|
||||
|
|
|
@ -211,8 +211,8 @@ TEST(TestVideoFrame, WidthHeightValues) {
|
|||
const int valid_value = 10;
|
||||
EXPECT_EQ(valid_value, frame.width());
|
||||
EXPECT_EQ(valid_value, frame.height());
|
||||
frame.set_timestamp(123u);
|
||||
EXPECT_EQ(123u, frame.timestamp());
|
||||
frame.set_rtp_timestamp(123u);
|
||||
EXPECT_EQ(123u, frame.rtp_timestamp());
|
||||
frame.set_ntp_time_ms(456);
|
||||
EXPECT_EQ(456, frame.ntp_time_ms());
|
||||
EXPECT_EQ(789, frame.render_time_ms());
|
||||
|
@ -246,7 +246,7 @@ TEST(TestVideoFrame, ShallowCopy) {
|
|||
.set_rotation(kRotation)
|
||||
.set_timestamp_us(0)
|
||||
.build();
|
||||
frame1.set_timestamp(timestamp);
|
||||
frame1.set_rtp_timestamp(timestamp);
|
||||
frame1.set_ntp_time_ms(ntp_time_ms);
|
||||
frame1.set_timestamp_us(timestamp_us);
|
||||
VideoFrame frame2(frame1);
|
||||
|
@ -260,17 +260,17 @@ TEST(TestVideoFrame, ShallowCopy) {
|
|||
EXPECT_EQ(yuv1->DataU(), yuv2->DataU());
|
||||
EXPECT_EQ(yuv1->DataV(), yuv2->DataV());
|
||||
|
||||
EXPECT_EQ(frame2.timestamp(), frame1.timestamp());
|
||||
EXPECT_EQ(frame2.rtp_timestamp(), frame1.rtp_timestamp());
|
||||
EXPECT_EQ(frame2.ntp_time_ms(), frame1.ntp_time_ms());
|
||||
EXPECT_EQ(frame2.timestamp_us(), frame1.timestamp_us());
|
||||
EXPECT_EQ(frame2.rotation(), frame1.rotation());
|
||||
|
||||
frame2.set_timestamp(timestamp + 1);
|
||||
frame2.set_rtp_timestamp(timestamp + 1);
|
||||
frame2.set_ntp_time_ms(ntp_time_ms + 1);
|
||||
frame2.set_timestamp_us(timestamp_us + 1);
|
||||
frame2.set_rotation(kVideoRotation_90);
|
||||
|
||||
EXPECT_NE(frame2.timestamp(), frame1.timestamp());
|
||||
EXPECT_NE(frame2.rtp_timestamp(), frame1.rtp_timestamp());
|
||||
EXPECT_NE(frame2.ntp_time_ms(), frame1.ntp_time_ms());
|
||||
EXPECT_NE(frame2.timestamp_us(), frame1.timestamp_us());
|
||||
EXPECT_NE(frame2.rotation(), frame1.rotation());
|
||||
|
@ -281,14 +281,14 @@ TEST(TestVideoFrame, TextureInitialValues) {
|
|||
640, 480, 100, 10, webrtc::kVideoRotation_0);
|
||||
EXPECT_EQ(640, frame.width());
|
||||
EXPECT_EQ(480, frame.height());
|
||||
EXPECT_EQ(100u, frame.timestamp());
|
||||
EXPECT_EQ(100u, frame.rtp_timestamp());
|
||||
EXPECT_EQ(10, frame.render_time_ms());
|
||||
ASSERT_TRUE(frame.video_frame_buffer() != nullptr);
|
||||
EXPECT_TRUE(frame.video_frame_buffer()->type() ==
|
||||
VideoFrameBuffer::Type::kNative);
|
||||
|
||||
frame.set_timestamp(200);
|
||||
EXPECT_EQ(200u, frame.timestamp());
|
||||
frame.set_rtp_timestamp(200);
|
||||
EXPECT_EQ(200u, frame.rtp_timestamp());
|
||||
frame.set_timestamp_us(20);
|
||||
EXPECT_EQ(20, frame.timestamp_us());
|
||||
}
|
||||
|
|
|
@ -534,7 +534,7 @@ int SimulcastEncoderAdapter::Encode(
|
|||
|
||||
// Convert timestamp from RTP 90kHz clock.
|
||||
const Timestamp frame_timestamp =
|
||||
Timestamp::Micros((1000 * input_image.timestamp()) / 90);
|
||||
Timestamp::Micros((1000 * input_image.rtp_timestamp()) / 90);
|
||||
|
||||
// If adapter is passed through and only one sw encoder does simulcast,
|
||||
// frame types for all streams should be passed to the encoder unchanged.
|
||||
|
|
|
@ -672,7 +672,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, ReusesEncodersInOrder) {
|
|||
rtc::scoped_refptr<VideoFrameBuffer> buffer(I420Buffer::Create(1280, 720));
|
||||
VideoFrame input_frame = VideoFrame::Builder()
|
||||
.set_video_frame_buffer(buffer)
|
||||
.set_timestamp_rtp(100)
|
||||
.set_rtp_timestamp(100)
|
||||
.set_timestamp_ms(1000)
|
||||
.set_rotation(kVideoRotation_180)
|
||||
.build();
|
||||
|
@ -1046,7 +1046,7 @@ TEST_F(TestSimulcastEncoderAdapterFake,
|
|||
/*allow_to_i420=*/false));
|
||||
VideoFrame input_frame = VideoFrame::Builder()
|
||||
.set_video_frame_buffer(buffer)
|
||||
.set_timestamp_rtp(100)
|
||||
.set_rtp_timestamp(100)
|
||||
.set_timestamp_ms(1000)
|
||||
.set_rotation(kVideoRotation_180)
|
||||
.build();
|
||||
|
@ -1083,7 +1083,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, NativeHandleForwardingOnlyIfSupported) {
|
|||
/*allow_to_i420=*/true));
|
||||
VideoFrame input_frame = VideoFrame::Builder()
|
||||
.set_video_frame_buffer(buffer)
|
||||
.set_timestamp_rtp(100)
|
||||
.set_rtp_timestamp(100)
|
||||
.set_timestamp_ms(1000)
|
||||
.set_rotation(kVideoRotation_180)
|
||||
.build();
|
||||
|
@ -1141,7 +1141,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, GeneratesKeyFramesOnRequestedLayers) {
|
|||
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
|
||||
VideoFrame first_frame = VideoFrame::Builder()
|
||||
.set_video_frame_buffer(buffer)
|
||||
.set_timestamp_rtp(0)
|
||||
.set_rtp_timestamp(0)
|
||||
.set_timestamp_ms(0)
|
||||
.build();
|
||||
EXPECT_EQ(0, adapter_->Encode(first_frame, &frame_types));
|
||||
|
@ -1161,7 +1161,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, GeneratesKeyFramesOnRequestedLayers) {
|
|||
frame_types[2] = VideoFrameType::kVideoFrameDelta;
|
||||
VideoFrame second_frame = VideoFrame::Builder()
|
||||
.set_video_frame_buffer(buffer)
|
||||
.set_timestamp_rtp(10000)
|
||||
.set_rtp_timestamp(10000)
|
||||
.set_timestamp_ms(100000)
|
||||
.build();
|
||||
EXPECT_EQ(0, adapter_->Encode(second_frame, &frame_types));
|
||||
|
@ -1181,7 +1181,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, GeneratesKeyFramesOnRequestedLayers) {
|
|||
frame_types[2] = VideoFrameType::kVideoFrameDelta;
|
||||
VideoFrame third_frame = VideoFrame::Builder()
|
||||
.set_video_frame_buffer(buffer)
|
||||
.set_timestamp_rtp(20000)
|
||||
.set_rtp_timestamp(20000)
|
||||
.set_timestamp_ms(200000)
|
||||
.build();
|
||||
EXPECT_EQ(0, adapter_->Encode(third_frame, &frame_types));
|
||||
|
@ -1205,7 +1205,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, TestFailureReturnCodesFromEncodeCalls) {
|
|||
input_buffer->InitializeData();
|
||||
VideoFrame input_frame = VideoFrame::Builder()
|
||||
.set_video_frame_buffer(input_buffer)
|
||||
.set_timestamp_rtp(0)
|
||||
.set_rtp_timestamp(0)
|
||||
.set_timestamp_us(0)
|
||||
.set_rotation(kVideoRotation_0)
|
||||
.build();
|
||||
|
@ -1310,7 +1310,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, ActivatesCorrectStreamsInInitEncode) {
|
|||
rtc::scoped_refptr<VideoFrameBuffer> buffer(I420Buffer::Create(1280, 720));
|
||||
VideoFrame input_frame = VideoFrame::Builder()
|
||||
.set_video_frame_buffer(buffer)
|
||||
.set_timestamp_rtp(100)
|
||||
.set_rtp_timestamp(100)
|
||||
.set_timestamp_ms(1000)
|
||||
.set_rotation(kVideoRotation_180)
|
||||
.build();
|
||||
|
@ -1348,7 +1348,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, TrustedRateControl) {
|
|||
rtc::scoped_refptr<VideoFrameBuffer> buffer(I420Buffer::Create(1280, 720));
|
||||
VideoFrame input_frame = VideoFrame::Builder()
|
||||
.set_video_frame_buffer(buffer)
|
||||
.set_timestamp_rtp(100)
|
||||
.set_rtp_timestamp(100)
|
||||
.set_timestamp_ms(1000)
|
||||
.set_rotation(kVideoRotation_180)
|
||||
.build();
|
||||
|
@ -1655,7 +1655,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, SupportsSimulcast) {
|
|||
rtc::scoped_refptr<VideoFrameBuffer> buffer(I420Buffer::Create(1280, 720));
|
||||
VideoFrame input_frame = VideoFrame::Builder()
|
||||
.set_video_frame_buffer(buffer)
|
||||
.set_timestamp_rtp(100)
|
||||
.set_rtp_timestamp(100)
|
||||
.set_timestamp_ms(1000)
|
||||
.set_rotation(kVideoRotation_180)
|
||||
.build();
|
||||
|
@ -1706,7 +1706,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, SupportsFallback) {
|
|||
rtc::scoped_refptr<VideoFrameBuffer> buffer(I420Buffer::Create(1280, 720));
|
||||
VideoFrame input_frame = VideoFrame::Builder()
|
||||
.set_video_frame_buffer(buffer)
|
||||
.set_timestamp_rtp(100)
|
||||
.set_rtp_timestamp(100)
|
||||
.set_timestamp_ms(1000)
|
||||
.set_rotation(kVideoRotation_180)
|
||||
.build();
|
||||
|
|
|
@ -4270,7 +4270,7 @@ TEST_F(WebRtcVideoChannelTest, EstimatesNtpStartTimeCorrectly) {
|
|||
webrtc::VideoFrame video_frame =
|
||||
webrtc::VideoFrame::Builder()
|
||||
.set_video_frame_buffer(CreateBlackFrameBuffer(4, 4))
|
||||
.set_timestamp_rtp(kInitialTimestamp)
|
||||
.set_rtp_timestamp(kInitialTimestamp)
|
||||
.set_timestamp_us(0)
|
||||
.set_rotation(webrtc::kVideoRotation_0)
|
||||
.build();
|
||||
|
@ -4284,7 +4284,7 @@ TEST_F(WebRtcVideoChannelTest, EstimatesNtpStartTimeCorrectly) {
|
|||
// triggers a constant-overflow warning, hence we're calculating it explicitly
|
||||
// here.
|
||||
time_controller_.AdvanceTime(webrtc::TimeDelta::Millis(kFrameOffsetMs));
|
||||
video_frame.set_timestamp(kFrameOffsetMs * 90 - 1);
|
||||
video_frame.set_rtp_timestamp(kFrameOffsetMs * 90 - 1);
|
||||
video_frame.set_ntp_time_ms(kInitialNtpTimeMs + kFrameOffsetMs);
|
||||
stream->InjectFrame(video_frame);
|
||||
|
||||
|
@ -7542,7 +7542,7 @@ TEST_F(WebRtcVideoChannelTest, ReceiveDifferentUnsignaledSsrc) {
|
|||
webrtc::VideoFrame video_frame =
|
||||
webrtc::VideoFrame::Builder()
|
||||
.set_video_frame_buffer(CreateBlackFrameBuffer(4, 4))
|
||||
.set_timestamp_rtp(100)
|
||||
.set_rtp_timestamp(100)
|
||||
.set_timestamp_us(0)
|
||||
.set_rotation(webrtc::kVideoRotation_0)
|
||||
.build();
|
||||
|
@ -7561,7 +7561,7 @@ TEST_F(WebRtcVideoChannelTest, ReceiveDifferentUnsignaledSsrc) {
|
|||
webrtc::VideoFrame video_frame2 =
|
||||
webrtc::VideoFrame::Builder()
|
||||
.set_video_frame_buffer(CreateBlackFrameBuffer(4, 4))
|
||||
.set_timestamp_rtp(200)
|
||||
.set_rtp_timestamp(200)
|
||||
.set_timestamp_us(0)
|
||||
.set_rotation(webrtc::kVideoRotation_0)
|
||||
.build();
|
||||
|
@ -7581,7 +7581,7 @@ TEST_F(WebRtcVideoChannelTest, ReceiveDifferentUnsignaledSsrc) {
|
|||
webrtc::VideoFrame video_frame3 =
|
||||
webrtc::VideoFrame::Builder()
|
||||
.set_video_frame_buffer(CreateBlackFrameBuffer(4, 4))
|
||||
.set_timestamp_rtp(300)
|
||||
.set_rtp_timestamp(300)
|
||||
.set_timestamp_us(0)
|
||||
.set_rotation(webrtc::kVideoRotation_0)
|
||||
.build();
|
||||
|
|
|
@ -247,7 +247,7 @@ int32_t VideoCaptureImpl::IncomingFrame(uint8_t* videoFrame,
|
|||
VideoFrame captureFrame =
|
||||
VideoFrame::Builder()
|
||||
.set_video_frame_buffer(buffer)
|
||||
.set_timestamp_rtp(0)
|
||||
.set_rtp_timestamp(0)
|
||||
.set_timestamp_ms(rtc::TimeMillis())
|
||||
.set_rotation(!apply_rotation_ ? _rotateFrame : kVideoRotation_0)
|
||||
.build();
|
||||
|
|
|
@ -186,7 +186,7 @@ int32_t Dav1dDecoder::Decode(const EncodedImage& encoded_image,
|
|||
VideoFrame decoded_frame =
|
||||
VideoFrame::Builder()
|
||||
.set_video_frame_buffer(wrapped_buffer)
|
||||
.set_timestamp_rtp(encoded_image.RtpTimestamp())
|
||||
.set_rtp_timestamp(encoded_image.RtpTimestamp())
|
||||
.set_ntp_time_ms(encoded_image.ntp_time_ms_)
|
||||
.set_color_space(encoded_image.ColorSpace())
|
||||
.build();
|
||||
|
|
|
@ -730,7 +730,7 @@ int32_t LibaomAv1Encoder::Encode(
|
|||
encoded_image._frameType = layer_frame->IsKeyframe()
|
||||
? VideoFrameType::kVideoFrameKey
|
||||
: VideoFrameType::kVideoFrameDelta;
|
||||
encoded_image.SetRtpTimestamp(frame.timestamp());
|
||||
encoded_image.SetRtpTimestamp(frame.rtp_timestamp());
|
||||
encoded_image.SetCaptureTimeIdentifier(frame.capture_time_identifier());
|
||||
encoded_image.capture_time_ms_ = frame.render_time_ms();
|
||||
encoded_image.rotation_ = frame.rotation();
|
||||
|
|
|
@ -426,7 +426,7 @@ TEST(LibaomAv1EncoderTest, AdheresToTargetBitrateDespiteUnevenFrameTiming) {
|
|||
VideoFrame frame = VideoFrame::Builder()
|
||||
.set_video_frame_buffer(
|
||||
frame_buffer_generator->NextFrame().buffer)
|
||||
.set_timestamp_rtp(rtp_timestamp)
|
||||
.set_rtp_timestamp(rtp_timestamp)
|
||||
.build();
|
||||
|
||||
RTC_CHECK_EQ(encoder->Encode(frame, &frame_types), WEBRTC_VIDEO_CODEC_OK);
|
||||
|
|
|
@ -616,7 +616,7 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
|
|||
|
||||
VideoFrame decoded_frame = VideoFrame::Builder()
|
||||
.set_video_frame_buffer(cropped_buffer)
|
||||
.set_timestamp_rtp(input_image.RtpTimestamp())
|
||||
.set_rtp_timestamp(input_image.RtpTimestamp())
|
||||
.set_color_space(color_space)
|
||||
.build();
|
||||
|
||||
|
|
|
@ -544,7 +544,7 @@ int32_t H264EncoderImpl::Encode(
|
|||
|
||||
encoded_images_[i]._encodedWidth = configurations_[i].width;
|
||||
encoded_images_[i]._encodedHeight = configurations_[i].height;
|
||||
encoded_images_[i].SetRtpTimestamp(input_frame.timestamp());
|
||||
encoded_images_[i].SetRtpTimestamp(input_frame.rtp_timestamp());
|
||||
encoded_images_[i].SetColorSpace(input_frame.color_space());
|
||||
encoded_images_[i]._frameType = ConvertToVideoFrameType(info.eFrameType);
|
||||
encoded_images_[i].SetSimulcastIndex(configurations_[i].simulcast_idx);
|
||||
|
|
|
@ -61,7 +61,7 @@ EncodedVideoFrameProducer::Encode() {
|
|||
VideoFrame frame =
|
||||
VideoFrame::Builder()
|
||||
.set_video_frame_buffer(frame_buffer_generator->NextFrame().buffer)
|
||||
.set_timestamp_rtp(rtp_timestamp_)
|
||||
.set_rtp_timestamp(rtp_timestamp_)
|
||||
.set_capture_time_identifier(capture_time_identifier_)
|
||||
.build();
|
||||
rtp_timestamp_ += rtp_tick;
|
||||
|
|
|
@ -110,7 +110,7 @@ VideoFrame VideoCodecUnitTest::NextInputFrame() {
|
|||
const uint32_t timestamp =
|
||||
last_input_frame_timestamp_ +
|
||||
kVideoPayloadTypeFrequency / codec_settings_.maxFramerate;
|
||||
input_frame.set_timestamp(timestamp);
|
||||
input_frame.set_rtp_timestamp(timestamp);
|
||||
input_frame.set_timestamp_us(timestamp * (1000 / 90));
|
||||
|
||||
last_input_frame_timestamp_ = timestamp;
|
||||
|
|
|
@ -254,7 +254,7 @@ void VideoProcessor::ProcessFrame() {
|
|||
VideoFrame input_frame =
|
||||
VideoFrame::Builder()
|
||||
.set_video_frame_buffer(buffer)
|
||||
.set_timestamp_rtp(static_cast<uint32_t>(timestamp))
|
||||
.set_rtp_timestamp(static_cast<uint32_t>(timestamp))
|
||||
.set_timestamp_ms(static_cast<int64_t>(timestamp / kMsToRtpTimestamp))
|
||||
.set_rotation(webrtc::kVideoRotation_0)
|
||||
.build();
|
||||
|
@ -352,7 +352,7 @@ int32_t VideoProcessor::VideoProcessorDecodeCompleteCallback::Decoded(
|
|||
.set_timestamp_us(image.timestamp_us())
|
||||
.set_id(image.id())
|
||||
.build();
|
||||
copy.set_timestamp(image.timestamp());
|
||||
copy.set_rtp_timestamp(image.rtp_timestamp());
|
||||
|
||||
task_queue_->PostTask([this, copy]() {
|
||||
video_processor_->FrameDecoded(copy, simulcast_svc_idx_);
|
||||
|
@ -555,7 +555,7 @@ void VideoProcessor::FrameDecoded(const VideoFrame& decoded_frame,
|
|||
const int64_t decode_stop_ns = rtc::TimeNanos();
|
||||
|
||||
FrameStatistics* frame_stat =
|
||||
stats_->GetFrameWithTimestamp(decoded_frame.timestamp(), spatial_idx);
|
||||
stats_->GetFrameWithTimestamp(decoded_frame.rtp_timestamp(), spatial_idx);
|
||||
const size_t frame_number = frame_stat->frame_number;
|
||||
|
||||
if (!first_decoded_frame_[spatial_idx]) {
|
||||
|
|
|
@ -105,15 +105,15 @@ TEST_F(VideoProcessorTest, ProcessFrames_FixedFramerate) {
|
|||
|
||||
EXPECT_CALL(frame_reader_mock_, PullFrame(_, _, _))
|
||||
.WillRepeatedly(Return(I420Buffer::Create(kWidth, kHeight)));
|
||||
EXPECT_CALL(
|
||||
encoder_mock_,
|
||||
Encode(Property(&VideoFrame::timestamp, 1 * 90000 / kFramerateFps), _))
|
||||
EXPECT_CALL(encoder_mock_, Encode(Property(&VideoFrame::rtp_timestamp,
|
||||
1 * 90000 / kFramerateFps),
|
||||
_))
|
||||
.Times(1);
|
||||
q_.SendTask([this] { video_processor_->ProcessFrame(); });
|
||||
|
||||
EXPECT_CALL(
|
||||
encoder_mock_,
|
||||
Encode(Property(&VideoFrame::timestamp, 2 * 90000 / kFramerateFps), _))
|
||||
EXPECT_CALL(encoder_mock_, Encode(Property(&VideoFrame::rtp_timestamp,
|
||||
2 * 90000 / kFramerateFps),
|
||||
_))
|
||||
.Times(1);
|
||||
q_.SendTask([this] { video_processor_->ProcessFrame(); });
|
||||
|
||||
|
@ -135,7 +135,7 @@ TEST_F(VideoProcessorTest, ProcessFrames_VariableFramerate) {
|
|||
EXPECT_CALL(frame_reader_mock_, PullFrame(_, _, _))
|
||||
.WillRepeatedly(Return(I420Buffer::Create(kWidth, kHeight)));
|
||||
EXPECT_CALL(encoder_mock_,
|
||||
Encode(Property(&VideoFrame::timestamp, kStartTimestamp), _))
|
||||
Encode(Property(&VideoFrame::rtp_timestamp, kStartTimestamp), _))
|
||||
.Times(1);
|
||||
q_.SendTask([this] { video_processor_->ProcessFrame(); });
|
||||
|
||||
|
@ -149,7 +149,7 @@ TEST_F(VideoProcessorTest, ProcessFrames_VariableFramerate) {
|
|||
[=] { video_processor_->SetRates(kBitrateKbps, kNewFramerateFps); });
|
||||
|
||||
EXPECT_CALL(encoder_mock_,
|
||||
Encode(Property(&VideoFrame::timestamp,
|
||||
Encode(Property(&VideoFrame::rtp_timestamp,
|
||||
kStartTimestamp + 90000 / kNewFramerateFps),
|
||||
_))
|
||||
.Times(1);
|
||||
|
|
|
@ -302,7 +302,7 @@ int LibvpxVp8Decoder::ReturnFrame(
|
|||
|
||||
VideoFrame decoded_image = VideoFrame::Builder()
|
||||
.set_video_frame_buffer(buffer)
|
||||
.set_timestamp_rtp(timestamp)
|
||||
.set_rtp_timestamp(timestamp)
|
||||
.set_color_space(explicit_color_space)
|
||||
.build();
|
||||
decode_complete_callback_->Decoded(decoded_image, absl::nullopt, qp);
|
||||
|
|
|
@ -1039,11 +1039,12 @@ int LibvpxVp8Encoder::Encode(const VideoFrame& frame,
|
|||
if (frame.update_rect().IsEmpty() && num_steady_state_frames_ >= 3 &&
|
||||
!key_frame_requested) {
|
||||
if (variable_framerate_experiment_.enabled &&
|
||||
framerate_controller_.DropFrame(frame.timestamp() / kRtpTicksPerMs) &&
|
||||
framerate_controller_.DropFrame(frame.rtp_timestamp() /
|
||||
kRtpTicksPerMs) &&
|
||||
frame_drop_overrides_.empty()) {
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
framerate_controller_.AddFrame(frame.timestamp() / kRtpTicksPerMs);
|
||||
framerate_controller_.AddFrame(frame.rtp_timestamp() / kRtpTicksPerMs);
|
||||
}
|
||||
|
||||
bool send_key_frame = key_frame_requested;
|
||||
|
@ -1052,7 +1053,7 @@ int LibvpxVp8Encoder::Encode(const VideoFrame& frame,
|
|||
Vp8FrameConfig tl_configs[kMaxSimulcastStreams];
|
||||
for (size_t i = 0; i < encoders_.size(); ++i) {
|
||||
tl_configs[i] =
|
||||
frame_buffer_controller_->NextFrameConfig(i, frame.timestamp());
|
||||
frame_buffer_controller_->NextFrameConfig(i, frame.rtp_timestamp());
|
||||
send_key_frame |= tl_configs[i].IntraFrame();
|
||||
drop_frame |= tl_configs[i].drop_frame;
|
||||
RTC_DCHECK(i == 0 ||
|
||||
|
@ -1255,7 +1256,7 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image,
|
|||
encoded_images_[encoder_idx].set_size(encoded_pos);
|
||||
encoded_images_[encoder_idx].SetSimulcastIndex(stream_idx);
|
||||
PopulateCodecSpecific(&codec_specific, *pkt, stream_idx, encoder_idx,
|
||||
input_image.timestamp());
|
||||
input_image.rtp_timestamp());
|
||||
if (codec_specific.codecSpecific.VP8.temporalIdx != kNoTemporalIdx) {
|
||||
encoded_images_[encoder_idx].SetTemporalIndex(
|
||||
codec_specific.codecSpecific.VP8.temporalIdx);
|
||||
|
@ -1263,7 +1264,7 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image,
|
|||
break;
|
||||
}
|
||||
}
|
||||
encoded_images_[encoder_idx].SetRtpTimestamp(input_image.timestamp());
|
||||
encoded_images_[encoder_idx].SetRtpTimestamp(input_image.rtp_timestamp());
|
||||
encoded_images_[encoder_idx].SetCaptureTimeIdentifier(
|
||||
input_image.capture_time_identifier());
|
||||
encoded_images_[encoder_idx].SetColorSpace(input_image.color_space());
|
||||
|
@ -1301,7 +1302,7 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image,
|
|||
if (encoded_images_[encoder_idx].size() == 0) {
|
||||
// Dropped frame that will be re-encoded.
|
||||
frame_buffer_controller_->OnFrameDropped(stream_idx,
|
||||
input_image.timestamp());
|
||||
input_image.rtp_timestamp());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -253,7 +253,7 @@ TEST_F(TestVp8Impl, Configure) {
|
|||
TEST_F(TestVp8Impl, OnEncodedImageReportsInfo) {
|
||||
constexpr Timestamp kCaptureTimeIdentifier = Timestamp::Micros(1000);
|
||||
VideoFrame input_frame = NextInputFrame();
|
||||
input_frame.set_timestamp(kInitialTimestampRtp);
|
||||
input_frame.set_rtp_timestamp(kInitialTimestampRtp);
|
||||
input_frame.set_timestamp_us(kInitialTimestampMs *
|
||||
rtc::kNumMicrosecsPerMillisec);
|
||||
input_frame.set_capture_time_identifier(kCaptureTimeIdentifier);
|
||||
|
@ -493,7 +493,7 @@ TEST_F(TestVp8Impl, ChecksSimulcastSettings) {
|
|||
#endif
|
||||
TEST_F(TestVp8Impl, MAYBE_AlignedStrideEncodeDecode) {
|
||||
VideoFrame input_frame = NextInputFrame();
|
||||
input_frame.set_timestamp(kInitialTimestampRtp);
|
||||
input_frame.set_rtp_timestamp(kInitialTimestampRtp);
|
||||
input_frame.set_timestamp_us(kInitialTimestampMs *
|
||||
rtc::kNumMicrosecsPerMillisec);
|
||||
EncodedImage encoded_frame;
|
||||
|
@ -511,7 +511,7 @@ TEST_F(TestVp8Impl, MAYBE_AlignedStrideEncodeDecode) {
|
|||
ASSERT_TRUE(decoded_frame);
|
||||
// Compute PSNR on all planes (faster than SSIM).
|
||||
EXPECT_GT(I420PSNR(&input_frame, decoded_frame.get()), 36);
|
||||
EXPECT_EQ(kInitialTimestampRtp, decoded_frame->timestamp());
|
||||
EXPECT_EQ(kInitialTimestampRtp, decoded_frame->rtp_timestamp());
|
||||
}
|
||||
|
||||
TEST_F(TestVp8Impl, EncoderWith2TemporalLayers) {
|
||||
|
|
|
@ -345,7 +345,7 @@ int LibvpxVp9Decoder::ReturnFrame(
|
|||
|
||||
auto builder = VideoFrame::Builder()
|
||||
.set_video_frame_buffer(img_wrapped_buffer)
|
||||
.set_timestamp_rtp(timestamp);
|
||||
.set_rtp_timestamp(timestamp);
|
||||
if (explicit_color_space) {
|
||||
builder.set_color_space(*explicit_color_space);
|
||||
} else {
|
||||
|
|
|
@ -1057,7 +1057,7 @@ int LibvpxVp9Encoder::Encode(const VideoFrame& input_image,
|
|||
|
||||
if (codec_.mode == VideoCodecMode::kScreensharing) {
|
||||
const uint32_t frame_timestamp_ms =
|
||||
1000 * input_image.timestamp() / kVideoPayloadTypeFrequency;
|
||||
1000 * input_image.rtp_timestamp() / kVideoPayloadTypeFrequency;
|
||||
|
||||
// To ensure that several rate-limiters with different limits don't
|
||||
// interfere, they must be queried in order of increasing limit.
|
||||
|
@ -1818,7 +1818,7 @@ void LibvpxVp9Encoder::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
|
|||
UpdateReferenceBuffers(*pkt, pics_since_key_);
|
||||
|
||||
TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_.size());
|
||||
encoded_image_.SetRtpTimestamp(input_image_->timestamp());
|
||||
encoded_image_.SetRtpTimestamp(input_image_->rtp_timestamp());
|
||||
encoded_image_.SetCaptureTimeIdentifier(
|
||||
input_image_->capture_time_identifier());
|
||||
encoded_image_.SetColorSpace(input_image_->color_space());
|
||||
|
|
|
@ -1940,9 +1940,9 @@ TEST_F(TestVp9ImplFrameDropping, PreEncodeFrameDropping) {
|
|||
VideoFrame input_frame = NextInputFrame();
|
||||
for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(input_frame, nullptr));
|
||||
const size_t timestamp = input_frame.timestamp() +
|
||||
const size_t timestamp = input_frame.rtp_timestamp() +
|
||||
kVideoPayloadTypeFrequency / input_framerate_fps;
|
||||
input_frame.set_timestamp(static_cast<uint32_t>(timestamp));
|
||||
input_frame.set_rtp_timestamp(static_cast<uint32_t>(timestamp));
|
||||
}
|
||||
|
||||
const size_t num_encoded_frames = GetNumEncodedFrames();
|
||||
|
@ -1992,9 +1992,9 @@ TEST_F(TestVp9ImplFrameDropping, DifferentFrameratePerSpatialLayer) {
|
|||
VideoFrame input_frame = NextInputFrame();
|
||||
for (size_t frame_num = 0; frame_num < num_input_frames; ++frame_num) {
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(input_frame, nullptr));
|
||||
const size_t timestamp = input_frame.timestamp() +
|
||||
const size_t timestamp = input_frame.rtp_timestamp() +
|
||||
kVideoPayloadTypeFrequency / input_framerate_fps;
|
||||
input_frame.set_timestamp(static_cast<uint32_t>(timestamp));
|
||||
input_frame.set_rtp_timestamp(static_cast<uint32_t>(timestamp));
|
||||
}
|
||||
|
||||
std::vector<EncodedImage> encoded_frames;
|
||||
|
|
|
@ -104,7 +104,7 @@ void VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage,
|
|||
absl::optional<uint8_t> qp) {
|
||||
RTC_DCHECK(_receiveCallback) << "Callback must not be null at this point";
|
||||
TRACE_EVENT_INSTANT1("webrtc", "VCMDecodedFrameCallback::Decoded",
|
||||
"timestamp", decodedImage.timestamp());
|
||||
"timestamp", decodedImage.rtp_timestamp());
|
||||
// TODO(holmer): We should improve this so that we can handle multiple
|
||||
// callbacks from one call to Decode().
|
||||
absl::optional<FrameInfo> frame_info;
|
||||
|
@ -113,7 +113,7 @@ void VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage,
|
|||
{
|
||||
MutexLock lock(&lock_);
|
||||
std::tie(frame_info, dropped_frames) =
|
||||
FindFrameInfo(decodedImage.timestamp());
|
||||
FindFrameInfo(decodedImage.rtp_timestamp());
|
||||
timestamp_map_size = frame_infos_.size();
|
||||
}
|
||||
if (dropped_frames > 0) {
|
||||
|
@ -123,7 +123,7 @@ void VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage,
|
|||
if (!frame_info) {
|
||||
RTC_LOG(LS_WARNING) << "Too many frames backed up in the decoder, dropping "
|
||||
"frame with timestamp "
|
||||
<< decodedImage.timestamp();
|
||||
<< decodedImage.rtp_timestamp();
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -203,7 +203,7 @@ void VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage,
|
|||
timing_frame_info.decode_finish_ms = now.ms();
|
||||
timing_frame_info.render_time_ms =
|
||||
frame_info->render_time ? frame_info->render_time->ms() : -1;
|
||||
timing_frame_info.rtp_timestamp = decodedImage.timestamp();
|
||||
timing_frame_info.rtp_timestamp = decodedImage.rtp_timestamp();
|
||||
timing_frame_info.receive_start_ms = frame_info->timing.receive_start_ms;
|
||||
timing_frame_info.receive_finish_ms = frame_info->timing.receive_finish_ms;
|
||||
RTC_HISTOGRAM_COUNTS_1000(
|
||||
|
|
|
@ -118,7 +118,7 @@ TEST_F(GenericDecoderTest, FrameDroppedIfTooManyFramesInFlight) {
|
|||
ASSERT_EQ(10U, frames.size());
|
||||
// Expect that the first frame was dropped since all decodes released at the
|
||||
// same time and the oldest frame info is the first one dropped.
|
||||
EXPECT_EQ(frames[0].timestamp(), 90000u);
|
||||
EXPECT_EQ(frames[0].rtp_timestamp(), 90000u);
|
||||
EXPECT_EQ(1u, user_callback_.frames_dropped());
|
||||
}
|
||||
|
||||
|
|
|
@ -314,11 +314,11 @@ void SimulcastTestFixtureImpl::RunActiveStreamsTest(
|
|||
SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
|
||||
|
||||
ExpectStreams(VideoFrameType::kVideoFrameKey, active_streams);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
ExpectStreams(VideoFrameType::kVideoFrameDelta, active_streams);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
}
|
||||
|
||||
|
@ -399,32 +399,32 @@ void SimulcastTestFixtureImpl::TestKeyFrameRequestsOnAllStreams() {
|
|||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
ExpectStreams(VideoFrameType::kVideoFrameDelta, kNumberOfSimulcastStreams);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
frame_types[0] = VideoFrameType::kVideoFrameKey;
|
||||
ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
std::fill(frame_types.begin(), frame_types.end(),
|
||||
VideoFrameType::kVideoFrameDelta);
|
||||
frame_types[1] = VideoFrameType::kVideoFrameKey;
|
||||
ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
std::fill(frame_types.begin(), frame_types.end(),
|
||||
VideoFrameType::kVideoFrameDelta);
|
||||
frame_types[2] = VideoFrameType::kVideoFrameKey;
|
||||
ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
std::fill(frame_types.begin(), frame_types.end(),
|
||||
VideoFrameType::kVideoFrameDelta);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameDelta, kNumberOfSimulcastStreams);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
}
|
||||
|
||||
|
@ -438,14 +438,14 @@ void SimulcastTestFixtureImpl::TestKeyFrameRequestsOnSpecificStreams() {
|
|||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
ExpectStreams(VideoFrameType::kVideoFrameDelta, kNumberOfSimulcastStreams);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
frame_types[0] = VideoFrameType::kVideoFrameKey;
|
||||
ExpectStream(VideoFrameType::kVideoFrameKey, kScaleResolutionDownBy[0]);
|
||||
ExpectStream(VideoFrameType::kVideoFrameDelta, kScaleResolutionDownBy[1]);
|
||||
ExpectStream(VideoFrameType::kVideoFrameDelta, kScaleResolutionDownBy[2]);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
std::fill(frame_types.begin(), frame_types.end(),
|
||||
|
@ -454,7 +454,7 @@ void SimulcastTestFixtureImpl::TestKeyFrameRequestsOnSpecificStreams() {
|
|||
ExpectStream(VideoFrameType::kVideoFrameDelta, kScaleResolutionDownBy[0]);
|
||||
ExpectStream(VideoFrameType::kVideoFrameKey, kScaleResolutionDownBy[1]);
|
||||
ExpectStream(VideoFrameType::kVideoFrameDelta, kScaleResolutionDownBy[2]);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
std::fill(frame_types.begin(), frame_types.end(),
|
||||
|
@ -463,7 +463,7 @@ void SimulcastTestFixtureImpl::TestKeyFrameRequestsOnSpecificStreams() {
|
|||
ExpectStream(VideoFrameType::kVideoFrameDelta, kScaleResolutionDownBy[0]);
|
||||
ExpectStream(VideoFrameType::kVideoFrameDelta, kScaleResolutionDownBy[1]);
|
||||
ExpectStream(VideoFrameType::kVideoFrameKey, kScaleResolutionDownBy[2]);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
std::fill(frame_types.begin(), frame_types.end(),
|
||||
|
@ -473,7 +473,7 @@ void SimulcastTestFixtureImpl::TestKeyFrameRequestsOnSpecificStreams() {
|
|||
ExpectStream(VideoFrameType::kVideoFrameKey, kScaleResolutionDownBy[0]);
|
||||
ExpectStream(VideoFrameType::kVideoFrameDelta, kScaleResolutionDownBy[1]);
|
||||
ExpectStream(VideoFrameType::kVideoFrameKey, kScaleResolutionDownBy[2]);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
std::fill(frame_types.begin(), frame_types.end(),
|
||||
|
@ -481,13 +481,13 @@ void SimulcastTestFixtureImpl::TestKeyFrameRequestsOnSpecificStreams() {
|
|||
ExpectStream(VideoFrameType::kVideoFrameKey, kScaleResolutionDownBy[0]);
|
||||
ExpectStream(VideoFrameType::kVideoFrameKey, kScaleResolutionDownBy[1]);
|
||||
ExpectStream(VideoFrameType::kVideoFrameKey, kScaleResolutionDownBy[2]);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
std::fill(frame_types.begin(), frame_types.end(),
|
||||
VideoFrameType::kVideoFrameDelta);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameDelta, kNumberOfSimulcastStreams);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
}
|
||||
|
||||
|
@ -500,7 +500,7 @@ void SimulcastTestFixtureImpl::TestPaddingAllStreams() {
|
|||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
}
|
||||
|
||||
|
@ -513,7 +513,7 @@ void SimulcastTestFixtureImpl::TestPaddingTwoStreams() {
|
|||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
}
|
||||
|
||||
|
@ -527,7 +527,7 @@ void SimulcastTestFixtureImpl::TestPaddingTwoStreamsOneMaxedOut() {
|
|||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
}
|
||||
|
||||
|
@ -540,7 +540,7 @@ void SimulcastTestFixtureImpl::TestPaddingOneStream() {
|
|||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
ExpectStreams(VideoFrameType::kVideoFrameDelta, 2);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
}
|
||||
|
||||
|
@ -554,7 +554,7 @@ void SimulcastTestFixtureImpl::TestPaddingOneStreamTwoMaxedOut() {
|
|||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
ExpectStreams(VideoFrameType::kVideoFrameDelta, 2);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
}
|
||||
|
||||
|
@ -567,7 +567,7 @@ void SimulcastTestFixtureImpl::TestSendAllStreams() {
|
|||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
ExpectStreams(VideoFrameType::kVideoFrameDelta, 3);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
}
|
||||
|
||||
|
@ -580,40 +580,40 @@ void SimulcastTestFixtureImpl::TestDisablingStreams() {
|
|||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
ExpectStreams(VideoFrameType::kVideoFrameDelta, 3);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
// We should only get two streams and padding for one.
|
||||
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameDelta, 2);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
// We should only get the first stream and padding for two.
|
||||
SetRates(kTargetBitrates[0] + kMinBitrates[1] / 2, 30);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
// We don't have enough bitrate for the thumbnail stream, but we should get
|
||||
// it anyway with current configuration.
|
||||
SetRates(kTargetBitrates[0] - 1, 30);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
// We should only get two streams and padding for one.
|
||||
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
|
||||
// We get a key frame because a new stream is being enabled.
|
||||
ExpectStreams(VideoFrameType::kVideoFrameKey, 2);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
// We should get all three streams.
|
||||
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kTargetBitrates[2], 30);
|
||||
// We get a key frame because a new stream is being enabled.
|
||||
ExpectStreams(VideoFrameType::kVideoFrameKey, 3);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
}
|
||||
|
||||
|
@ -747,7 +747,7 @@ void SimulcastTestFixtureImpl::TestSpatioTemporalLayers333PatternEncoder() {
|
|||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
|
||||
// Next frame: #1.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
|
||||
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
|
||||
|
@ -755,7 +755,7 @@ void SimulcastTestFixtureImpl::TestSpatioTemporalLayers333PatternEncoder() {
|
|||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
|
||||
// Next frame: #2.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
|
||||
SetExpectedValues3<int>(1, 1, 1, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
|
||||
|
@ -763,7 +763,7 @@ void SimulcastTestFixtureImpl::TestSpatioTemporalLayers333PatternEncoder() {
|
|||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
|
||||
// Next frame: #3.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
|
||||
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
|
||||
|
@ -771,7 +771,7 @@ void SimulcastTestFixtureImpl::TestSpatioTemporalLayers333PatternEncoder() {
|
|||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
|
||||
// Next frame: #4.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
|
||||
SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
|
||||
|
@ -779,7 +779,7 @@ void SimulcastTestFixtureImpl::TestSpatioTemporalLayers333PatternEncoder() {
|
|||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
|
||||
// Next frame: #5.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
|
||||
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(is_h264, is_h264, is_h264, expected_layer_sync);
|
||||
|
@ -817,7 +817,7 @@ void SimulcastTestFixtureImpl::TestSpatioTemporalLayers321PatternEncoder() {
|
|||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
|
||||
// Next frame: #1.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
|
||||
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
|
||||
|
@ -825,7 +825,7 @@ void SimulcastTestFixtureImpl::TestSpatioTemporalLayers321PatternEncoder() {
|
|||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
|
||||
// Next frame: #2.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
|
||||
SetExpectedValues3<int>(1, 0, 255, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(true, false, false, expected_layer_sync);
|
||||
|
@ -833,7 +833,7 @@ void SimulcastTestFixtureImpl::TestSpatioTemporalLayers321PatternEncoder() {
|
|||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
|
||||
// Next frame: #3.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
|
||||
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
|
||||
|
@ -841,7 +841,7 @@ void SimulcastTestFixtureImpl::TestSpatioTemporalLayers321PatternEncoder() {
|
|||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
|
||||
// Next frame: #4.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
|
||||
SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
|
||||
|
@ -849,7 +849,7 @@ void SimulcastTestFixtureImpl::TestSpatioTemporalLayers321PatternEncoder() {
|
|||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
|
||||
// Next frame: #5.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
|
||||
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(false, true, false, expected_layer_sync);
|
||||
|
@ -891,7 +891,7 @@ void SimulcastTestFixtureImpl::TestStrideEncodeDecode() {
|
|||
plane_offset[kUPlane] += 1;
|
||||
plane_offset[kVPlane] += 1;
|
||||
CreateImage(input_buffer_, plane_offset);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
input_frame_->set_rtp_timestamp(input_frame_->rtp_timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
|
||||
|
||||
EncodedImage encoded_frame;
|
||||
|
|
|
@ -1347,7 +1347,7 @@ index 563ef5abd2..e085ac2df8 100644
|
|||
|
||||
VideoCaptureModule::DeviceInfo* VideoCaptureFactory::CreateDeviceInfo(
|
||||
diff --git a/modules/video_capture/video_capture_impl.cc b/modules/video_capture/video_capture_impl.cc
|
||||
index 428253bf23..66fd0a0ebe 100644
|
||||
index 5588fae161..51282183f5 100644
|
||||
--- a/modules/video_capture/video_capture_impl.cc
|
||||
+++ b/modules/video_capture/video_capture_impl.cc
|
||||
@@ -77,7 +77,6 @@ VideoCaptureImpl::VideoCaptureImpl()
|
||||
|
@ -1454,7 +1454,7 @@ index 5ec1fd4a83..e46e050609 100644
|
|||
|
||||
int64_t _lastProcessFrameTimeNanos RTC_GUARDED_BY(capture_checker_);
|
||||
diff --git a/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc b/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc
|
||||
index 56267be98d..b2cf68e249 100644
|
||||
index 24d862c4c2..13c8b87c38 100644
|
||||
--- a/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc
|
||||
+++ b/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc
|
||||
@@ -270,6 +270,7 @@ LibvpxVp9Encoder::LibvpxVp9Encoder(std::unique_ptr<LibvpxInterface> interface,
|
||||
|
|
|
@ -14,7 +14,7 @@ Mercurial Revision: https://hg.mozilla.org/mozilla-central/rev/c56cb66f86518dfb0
|
|||
1 file changed, 10 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/modules/video_capture/video_capture_impl.cc b/modules/video_capture/video_capture_impl.cc
|
||||
index 66fd0a0ebe..7071776d0a 100644
|
||||
index 51282183f5..afd6ba4d0a 100644
|
||||
--- a/modules/video_capture/video_capture_impl.cc
|
||||
+++ b/modules/video_capture/video_capture_impl.cc
|
||||
@@ -218,12 +218,21 @@ int32_t VideoCaptureImpl::IncomingFrame(uint8_t* videoFrame,
|
||||
|
|
|
@ -10,7 +10,7 @@ Mercurial Revision: https://hg.mozilla.org/mozilla-central/rev/51d12094d825c4c44
|
|||
1 file changed, 3 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/modules/video_capture/video_capture_impl.cc b/modules/video_capture/video_capture_impl.cc
|
||||
index 7071776d0a..02404697ad 100644
|
||||
index afd6ba4d0a..1a12020b64 100644
|
||||
--- a/modules/video_capture/video_capture_impl.cc
|
||||
+++ b/modules/video_capture/video_capture_impl.cc
|
||||
@@ -180,8 +180,6 @@ int32_t VideoCaptureImpl::IncomingFrame(uint8_t* videoFrame,
|
||||
|
|
|
@ -10,7 +10,7 @@ Mercurial Revision: https://hg.mozilla.org/mozilla-central/rev/1387b2c480b55ecca
|
|||
2 files changed, 7 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc b/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc
|
||||
index 3eed384716..5f61478111 100644
|
||||
index 9a7a6f1498..86ec13fc1f 100644
|
||||
--- a/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc
|
||||
+++ b/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc
|
||||
@@ -47,7 +47,7 @@ const char kVp8PostProcArmFieldTrial[] = "WebRTC-VP8-Postproc-Config-Arm";
|
||||
|
@ -23,7 +23,7 @@ index 3eed384716..5f61478111 100644
|
|||
#else
|
||||
constexpr bool kIsArm = false;
|
||||
diff --git a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
|
||||
index 3368b07946..512755f3b1 100644
|
||||
index a6befaf33b..6205ab74cf 100644
|
||||
--- a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
|
||||
+++ b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
|
||||
@@ -757,7 +757,7 @@ int LibvpxVp8Encoder::InitEncode(const VideoCodec* inst,
|
||||
|
|
|
@ -10,7 +10,7 @@ Mercurial Revision: https://hg.mozilla.org/mozilla-central/rev/a7179d8d75313b6c9
|
|||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/video/video_stream_encoder.cc b/video/video_stream_encoder.cc
|
||||
index c28d086fa4..3a51357e58 100644
|
||||
index daa2976807..719d0d82a6 100644
|
||||
--- a/video/video_stream_encoder.cc
|
||||
+++ b/video/video_stream_encoder.cc
|
||||
@@ -1388,7 +1388,7 @@ void VideoStreamEncoder::ReconfigureEncoder() {
|
||||
|
|
|
@ -17,7 +17,7 @@ Mercurial Revision: https://hg.mozilla.org/mozilla-central/rev/56ff441b644400f09
|
|||
1 file changed, 7 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/modules/video_capture/video_capture_impl.cc b/modules/video_capture/video_capture_impl.cc
|
||||
index 02404697ad..1bddaf824d 100644
|
||||
index 1a12020b64..dad8bee1f8 100644
|
||||
--- a/modules/video_capture/video_capture_impl.cc
|
||||
+++ b/modules/video_capture/video_capture_impl.cc
|
||||
@@ -119,11 +119,14 @@ void VideoCaptureImpl::DeRegisterCaptureDataCallback(
|
||||
|
|
|
@ -13,7 +13,7 @@ Mercurial Revision: https://hg.mozilla.org/mozilla-central/rev/26c84d214137a1b0d
|
|||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/modules/video_capture/video_capture_impl.cc b/modules/video_capture/video_capture_impl.cc
|
||||
index 1bddaf824d..15dfb7fe1f 100644
|
||||
index dad8bee1f8..46fff89a52 100644
|
||||
--- a/modules/video_capture/video_capture_impl.cc
|
||||
+++ b/modules/video_capture/video_capture_impl.cc
|
||||
@@ -134,7 +134,7 @@ int32_t VideoCaptureImpl::DeliverCapturedFrame(VideoFrame& captureFrame) {
|
||||
|
|
|
@ -544,7 +544,7 @@ int main(int argc, char* argv[]) {
|
|||
webrtc::VideoFrame frame =
|
||||
webrtc::VideoFrame::Builder()
|
||||
.set_video_frame_buffer(frame_buffer_generator->NextFrame().buffer)
|
||||
.set_timestamp_rtp(rtp_timestamp)
|
||||
.set_rtp_timestamp(rtp_timestamp)
|
||||
.build();
|
||||
ret = video_encoder->Encode(frame, &frame_types);
|
||||
RTC_CHECK_EQ(ret, WEBRTC_VIDEO_CODEC_OK);
|
||||
|
|
|
@ -218,7 +218,7 @@ class FileRenderPassthrough : public rtc::VideoSinkInterface<VideoFrame> {
|
|||
return;
|
||||
|
||||
std::stringstream filename;
|
||||
filename << basename_ << count_++ << "_" << video_frame.timestamp()
|
||||
filename << basename_ << count_++ << "_" << video_frame.rtp_timestamp()
|
||||
<< ".jpg";
|
||||
|
||||
test::JpegFrameWriter frame_writer(filename.str());
|
||||
|
|
|
@ -55,15 +55,12 @@ class ObjCVideoDecoder : public VideoDecoder {
|
|||
int32_t RegisterDecodeCompleteCallback(DecodedImageCallback *callback) override {
|
||||
[decoder_ setCallback:^(RTC_OBJC_TYPE(RTCVideoFrame) * frame) {
|
||||
const auto buffer = rtc::make_ref_counted<ObjCFrameBuffer>(frame.buffer);
|
||||
VideoFrame videoFrame =
|
||||
VideoFrame::Builder()
|
||||
VideoFrame videoFrame = VideoFrame::Builder()
|
||||
.set_video_frame_buffer(buffer)
|
||||
.set_timestamp_rtp((uint32_t)(frame.timeStampNs / rtc::kNumNanosecsPerMicrosec))
|
||||
.set_rtp_timestamp(frame.timeStamp)
|
||||
.set_timestamp_ms(0)
|
||||
.set_rotation((VideoRotation)frame.rotation)
|
||||
.build();
|
||||
videoFrame.set_timestamp(frame.timeStamp);
|
||||
|
||||
callback->Decoded(videoFrame);
|
||||
}];
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ RTC_OBJC_TYPE(RTCVideoFrame) * ToObjCVideoFrame(const VideoFrame &frame) {
|
|||
initWithBuffer:ToObjCVideoFrameBuffer(frame.video_frame_buffer())
|
||||
rotation:RTCVideoRotation(frame.rotation())
|
||||
timeStampNs:frame.timestamp_us() * rtc::kNumNanosecsPerMicrosec];
|
||||
videoFrame.timeStamp = frame.timestamp();
|
||||
videoFrame.timeStamp = frame.rtp_timestamp();
|
||||
|
||||
return videoFrame;
|
||||
}
|
||||
|
|
|
@ -53,7 +53,7 @@ int32_t ConfigurableFrameSizeEncoder::Encode(
|
|||
encodedImage._encodedHeight = inputImage.height();
|
||||
encodedImage._encodedWidth = inputImage.width();
|
||||
encodedImage._frameType = VideoFrameType::kVideoFrameKey;
|
||||
encodedImage.SetRtpTimestamp(inputImage.timestamp());
|
||||
encodedImage.SetRtpTimestamp(inputImage.rtp_timestamp());
|
||||
encodedImage.capture_time_ms_ = inputImage.render_time_ms();
|
||||
CodecSpecificInfo specific{};
|
||||
specific.codecType = codec_type_;
|
||||
|
|
|
@ -54,7 +54,7 @@ int32_t FakeDecoder::Decode(const EncodedImage& input,
|
|||
.set_rotation(webrtc::kVideoRotation_0)
|
||||
.set_timestamp_ms(render_time_ms)
|
||||
.build();
|
||||
frame.set_timestamp(input.RtpTimestamp());
|
||||
frame.set_rtp_timestamp(input.RtpTimestamp());
|
||||
frame.set_ntp_time_ms(input.ntp_time_ms_);
|
||||
|
||||
if (decode_delay_ms_ == 0 || !task_queue_) {
|
||||
|
|
|
@ -141,7 +141,7 @@ int32_t FakeEncoder::Encode(const VideoFrame& input_image,
|
|||
EncodedImage encoded;
|
||||
encoded.SetEncodedData(buffer);
|
||||
|
||||
encoded.SetRtpTimestamp(input_image.timestamp());
|
||||
encoded.SetRtpTimestamp(input_image.rtp_timestamp());
|
||||
encoded._frameType = frame_info.keyframe ? VideoFrameType::kVideoFrameKey
|
||||
: VideoFrameType::kVideoFrameDelta;
|
||||
encoded._encodedWidth = simulcast_streams[i].width;
|
||||
|
|
|
@ -23,7 +23,7 @@ VideoFrame FakeNativeBuffer::CreateFrame(int width,
|
|||
return VideoFrame::Builder()
|
||||
.set_video_frame_buffer(
|
||||
rtc::make_ref_counted<FakeNativeBuffer>(width, height))
|
||||
.set_timestamp_rtp(timestamp)
|
||||
.set_rtp_timestamp(timestamp)
|
||||
.set_timestamp_ms(render_time_ms)
|
||||
.set_rotation(rotation)
|
||||
.build();
|
||||
|
|
|
@ -57,7 +57,7 @@ int32_t FakeVp8Decoder::Decode(const EncodedImage& input,
|
|||
.set_rotation(webrtc::kVideoRotation_0)
|
||||
.set_timestamp_ms(render_time_ms)
|
||||
.build();
|
||||
frame.set_timestamp(input.RtpTimestamp());
|
||||
frame.set_rtp_timestamp(input.RtpTimestamp());
|
||||
frame.set_ntp_time_ms(input.ntp_time_ms_);
|
||||
|
||||
callback_->Decoded(frame, /*decode_time_ms=*/absl::nullopt,
|
||||
|
|
|
@ -36,7 +36,7 @@ bool EqualPlane(const uint8_t* data1,
|
|||
}
|
||||
|
||||
bool FramesEqual(const webrtc::VideoFrame& f1, const webrtc::VideoFrame& f2) {
|
||||
if (f1.timestamp() != f2.timestamp() ||
|
||||
if (f1.rtp_timestamp() != f2.rtp_timestamp() ||
|
||||
f1.ntp_time_ms() != f2.ntp_time_ms() ||
|
||||
f1.render_time_ms() != f2.render_time_ms()) {
|
||||
return false;
|
||||
|
|
|
@ -49,7 +49,7 @@ VideoFrame CreateMappableNativeFrame(int64_t ntp_time_ms,
|
|||
VideoFrame::Builder()
|
||||
.set_video_frame_buffer(rtc::make_ref_counted<MappableNativeBuffer>(
|
||||
mappable_type, width, height))
|
||||
.set_timestamp_rtp(99)
|
||||
.set_rtp_timestamp(99)
|
||||
.set_timestamp_ms(99)
|
||||
.set_rotation(kVideoRotation_0)
|
||||
.build();
|
||||
|
|
|
@ -72,7 +72,7 @@ EncodedImage FakeEncode(const VideoFrame& frame) {
|
|||
packet_infos.push_back(RtpPacketInfo(
|
||||
/*ssrc=*/1,
|
||||
/*csrcs=*/{},
|
||||
/*rtp_timestamp=*/frame.timestamp(),
|
||||
/*rtp_timestamp=*/frame.rtp_timestamp(),
|
||||
/*receive_time=*/Timestamp::Micros(frame.timestamp_us() + 10000)));
|
||||
image.SetPacketInfos(RtpPacketInfos(packet_infos));
|
||||
return image;
|
||||
|
|
|
@ -81,7 +81,7 @@ EncodedImage FakeEncode(const VideoFrame& frame) {
|
|||
packet_infos.push_back(RtpPacketInfo(
|
||||
/*ssrc=*/1,
|
||||
/*csrcs=*/{},
|
||||
/*rtp_timestamp=*/frame.timestamp(),
|
||||
/*rtp_timestamp=*/frame.rtp_timestamp(),
|
||||
/*receive_time=*/Timestamp::Micros(frame.timestamp_us() + 10000)));
|
||||
image.SetPacketInfos(RtpPacketInfos(packet_infos));
|
||||
return image;
|
||||
|
|
|
@ -192,7 +192,7 @@ QualityAnalyzingVideoDecoder::DecoderCallback::IrrelevantSimulcastStreamDecoded(
|
|||
webrtc::VideoFrame dummy_frame =
|
||||
webrtc::VideoFrame::Builder()
|
||||
.set_video_frame_buffer(GetDummyFrameBuffer())
|
||||
.set_timestamp_rtp(timestamp_ms)
|
||||
.set_rtp_timestamp(timestamp_ms)
|
||||
.set_id(frame_id)
|
||||
.build();
|
||||
MutexLock lock(&callback_mutex_);
|
||||
|
@ -218,19 +218,19 @@ void QualityAnalyzingVideoDecoder::OnFrameDecoded(
|
|||
std::string codec_name;
|
||||
{
|
||||
MutexLock lock(&mutex_);
|
||||
auto it = timestamp_to_frame_id_.find(frame->timestamp());
|
||||
auto it = timestamp_to_frame_id_.find(frame->rtp_timestamp());
|
||||
if (it == timestamp_to_frame_id_.end()) {
|
||||
// Ensure, that we have info about this frame. It can happen that for some
|
||||
// reasons decoder response, that it failed to decode, when we were
|
||||
// posting frame to it, but then call the callback for this frame.
|
||||
RTC_LOG(LS_ERROR) << "QualityAnalyzingVideoDecoder::OnFrameDecoded: No "
|
||||
"frame id for frame for frame->timestamp()="
|
||||
<< frame->timestamp();
|
||||
<< frame->rtp_timestamp();
|
||||
return;
|
||||
}
|
||||
frame_id = it->second;
|
||||
timestamp_to_frame_id_.erase(it);
|
||||
decoding_images_.erase(frame->timestamp());
|
||||
decoding_images_.erase(frame->rtp_timestamp());
|
||||
codec_name = codec_name_;
|
||||
}
|
||||
// Set frame id to the value, that was extracted from corresponding encoded
|
||||
|
|
|
@ -142,7 +142,7 @@ int32_t QualityAnalyzingVideoEncoder::Encode(
|
|||
{
|
||||
MutexLock lock(&mutex_);
|
||||
// Store id to be able to retrieve it in analyzing callback.
|
||||
timestamp_to_frame_id_list_.push_back({frame.timestamp(), frame.id()});
|
||||
timestamp_to_frame_id_list_.push_back({frame.rtp_timestamp(), frame.id()});
|
||||
// If this list is growing, it means that we are not receiving new encoded
|
||||
// images from encoder. So it should be a bug in setup on in the encoder.
|
||||
RTC_DCHECK_LT(timestamp_to_frame_id_list_.size(), kMaxFrameInPipelineCount);
|
||||
|
@ -159,7 +159,7 @@ int32_t QualityAnalyzingVideoEncoder::Encode(
|
|||
auto it = timestamp_to_frame_id_list_.end();
|
||||
while (it != timestamp_to_frame_id_list_.begin()) {
|
||||
--it;
|
||||
if (it->first == frame.timestamp()) {
|
||||
if (it->first == frame.rtp_timestamp()) {
|
||||
timestamp_to_frame_id_list_.erase(it);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -146,7 +146,7 @@ class IvfVideoFrameGeneratorTest : public ::testing::Test {
|
|||
const uint32_t timestamp =
|
||||
last_frame_timestamp +
|
||||
kVideoPayloadTypeFrequency / codec_settings.maxFramerate;
|
||||
frame.set_timestamp(timestamp);
|
||||
frame.set_rtp_timestamp(timestamp);
|
||||
|
||||
last_frame_timestamp = timestamp;
|
||||
|
||||
|
|
|
@ -108,7 +108,7 @@ class VideoSource {
|
|||
frame_num_[timestamp_rtp] = frame_num;
|
||||
return VideoFrame::Builder()
|
||||
.set_video_frame_buffer(buffer)
|
||||
.set_timestamp_rtp(timestamp_rtp)
|
||||
.set_rtp_timestamp(timestamp_rtp)
|
||||
.set_timestamp_us((timestamp_rtp / k90kHz).us())
|
||||
.build();
|
||||
}
|
||||
|
@ -125,7 +125,7 @@ class VideoSource {
|
|||
frame_reader_->ReadFrame(frame_num_.at(timestamp_rtp), resolution);
|
||||
return VideoFrame::Builder()
|
||||
.set_video_frame_buffer(buffer)
|
||||
.set_timestamp_rtp(timestamp_rtp)
|
||||
.set_rtp_timestamp(timestamp_rtp)
|
||||
.build();
|
||||
}
|
||||
|
||||
|
@ -331,7 +331,7 @@ class VideoCodecAnalyzer : public VideoCodecTester::VideoCodecStats {
|
|||
void StartEncode(const VideoFrame& video_frame,
|
||||
const EncodingSettings& encoding_settings) {
|
||||
int64_t encode_start_us = rtc::TimeMicros();
|
||||
task_queue_.PostTask([this, timestamp_rtp = video_frame.timestamp(),
|
||||
task_queue_.PostTask([this, timestamp_rtp = video_frame.rtp_timestamp(),
|
||||
encoding_settings, encode_start_us]() {
|
||||
RTC_CHECK(frames_.find(timestamp_rtp) == frames_.end())
|
||||
<< "Duplicate frame. Frame with timestamp " << timestamp_rtp
|
||||
|
@ -418,7 +418,7 @@ class VideoCodecAnalyzer : public VideoCodecTester::VideoCodecStats {
|
|||
|
||||
void FinishDecode(const VideoFrame& decoded_frame, int spatial_idx) {
|
||||
int64_t decode_finished_us = rtc::TimeMicros();
|
||||
task_queue_.PostTask([this, timestamp_rtp = decoded_frame.timestamp(),
|
||||
task_queue_.PostTask([this, timestamp_rtp = decoded_frame.rtp_timestamp(),
|
||||
spatial_idx, width = decoded_frame.width(),
|
||||
height = decoded_frame.height(),
|
||||
decode_finished_us]() {
|
||||
|
@ -439,7 +439,7 @@ class VideoCodecAnalyzer : public VideoCodecTester::VideoCodecStats {
|
|||
decoded_frame.video_frame_buffer()->ToI420();
|
||||
|
||||
task_queue_.PostTask([this, decoded_buffer,
|
||||
timestamp_rtp = decoded_frame.timestamp(),
|
||||
timestamp_rtp = decoded_frame.rtp_timestamp(),
|
||||
spatial_idx]() {
|
||||
VideoFrame ref_frame = video_source_->ReadFrame(
|
||||
timestamp_rtp, {.width = decoded_buffer->width(),
|
||||
|
@ -926,10 +926,11 @@ class Encoder : public EncodedImageCallback {
|
|||
EncodeCallback callback) {
|
||||
{
|
||||
MutexLock lock(&mutex_);
|
||||
callbacks_[input_frame.timestamp()] = std::move(callback);
|
||||
callbacks_[input_frame.rtp_timestamp()] = std::move(callback);
|
||||
}
|
||||
|
||||
Timestamp pts = Timestamp::Micros((input_frame.timestamp() / k90kHz).us());
|
||||
Timestamp pts =
|
||||
Timestamp::Micros((input_frame.rtp_timestamp() / k90kHz).us());
|
||||
|
||||
task_queue_.PostScheduledTask(
|
||||
[this, input_frame, encoding_settings] {
|
||||
|
@ -943,8 +944,9 @@ class Encoder : public EncodedImageCallback {
|
|||
|
||||
int error = encoder_->Encode(input_frame, /*frame_types=*/nullptr);
|
||||
if (error != 0) {
|
||||
RTC_LOG(LS_WARNING) << "Encode failed with error code " << error
|
||||
<< " RTP timestamp " << input_frame.timestamp();
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "Encode failed with error code " << error
|
||||
<< " RTP timestamp " << input_frame.rtp_timestamp();
|
||||
}
|
||||
},
|
||||
pacer_.Schedule(pts));
|
||||
|
|
|
@ -125,7 +125,7 @@ class TestVideoEncoder : public MockVideoEncoder {
|
|||
encoded_frame.SetFrameType(frame.keyframe
|
||||
? VideoFrameType::kVideoFrameKey
|
||||
: VideoFrameType::kVideoFrameDelta);
|
||||
encoded_frame.SetRtpTimestamp(input_frame.timestamp());
|
||||
encoded_frame.SetRtpTimestamp(input_frame.rtp_timestamp());
|
||||
encoded_frame.SetSpatialIndex(frame.layer_id.spatial_idx);
|
||||
encoded_frame.SetTemporalIndex(frame.layer_id.temporal_idx);
|
||||
encoded_frame.SetEncodedData(
|
||||
|
@ -161,7 +161,7 @@ class TestVideoDecoder : public MockVideoDecoder {
|
|||
VideoFrame decoded_frame =
|
||||
VideoFrame::Builder()
|
||||
.set_video_frame_buffer(frame_buffer)
|
||||
.set_timestamp_rtp(encoded_frame.RtpTimestamp())
|
||||
.set_rtp_timestamp(encoded_frame.RtpTimestamp())
|
||||
.build();
|
||||
callback_->Decoded(decoded_frame);
|
||||
frame_sizes_.push_back(DataSize::Bytes(encoded_frame.size()));
|
||||
|
|
|
@ -105,8 +105,8 @@ class SendProcessingUsage1 : public OveruseFrameDetector::ProcessingUsage {
|
|||
if (last_capture_time_us != -1)
|
||||
AddCaptureSample(1e-3 * (time_when_first_seen_us - last_capture_time_us));
|
||||
|
||||
frame_timing_.push_back(FrameTiming(frame.timestamp_us(), frame.timestamp(),
|
||||
time_when_first_seen_us));
|
||||
frame_timing_.push_back(FrameTiming(
|
||||
frame.timestamp_us(), frame.rtp_timestamp(), time_when_first_seen_us));
|
||||
}
|
||||
|
||||
absl::optional<int> FrameSent(
|
||||
|
|
|
@ -105,7 +105,7 @@ class OveruseFrameDetectorTest : public ::testing::Test,
|
|||
.build();
|
||||
uint32_t timestamp = 0;
|
||||
while (num_frames-- > 0) {
|
||||
frame.set_timestamp(timestamp);
|
||||
frame.set_rtp_timestamp(timestamp);
|
||||
int64_t capture_time_us = rtc::TimeMicros();
|
||||
overuse_detector_->FrameCaptured(frame, capture_time_us);
|
||||
clock_.AdvanceTime(TimeDelta::Micros(delay_us));
|
||||
|
@ -131,7 +131,7 @@ class OveruseFrameDetectorTest : public ::testing::Test,
|
|||
.build();
|
||||
uint32_t timestamp = 0;
|
||||
while (num_frames-- > 0) {
|
||||
frame.set_timestamp(timestamp);
|
||||
frame.set_rtp_timestamp(timestamp);
|
||||
int64_t capture_time_us = rtc::TimeMicros();
|
||||
overuse_detector_->FrameCaptured(frame, capture_time_us);
|
||||
int max_delay_us = 0;
|
||||
|
@ -166,7 +166,7 @@ class OveruseFrameDetectorTest : public ::testing::Test,
|
|||
.build();
|
||||
uint32_t timestamp = 0;
|
||||
while (num_frames-- > 0) {
|
||||
frame.set_timestamp(timestamp);
|
||||
frame.set_rtp_timestamp(timestamp);
|
||||
int interval_us = random.Rand(min_interval_us, max_interval_us);
|
||||
int64_t capture_time_us = rtc::TimeMicros();
|
||||
overuse_detector_->FrameCaptured(frame, capture_time_us);
|
||||
|
@ -381,7 +381,7 @@ TEST_F(OveruseFrameDetectorTest, MeasuresMultipleConcurrentSamples) {
|
|||
.build();
|
||||
for (size_t i = 0; i < 1000; ++i) {
|
||||
// Unique timestamps.
|
||||
frame.set_timestamp(static_cast<uint32_t>(i));
|
||||
frame.set_rtp_timestamp(static_cast<uint32_t>(i));
|
||||
int64_t capture_time_us = rtc::TimeMicros();
|
||||
overuse_detector_->FrameCaptured(frame, capture_time_us);
|
||||
clock_.AdvanceTime(TimeDelta::Micros(kIntervalUs));
|
||||
|
@ -408,7 +408,7 @@ TEST_F(OveruseFrameDetectorTest, UpdatesExistingSamples) {
|
|||
.build();
|
||||
uint32_t timestamp = 0;
|
||||
for (size_t i = 0; i < 1000; ++i) {
|
||||
frame.set_timestamp(timestamp);
|
||||
frame.set_rtp_timestamp(timestamp);
|
||||
int64_t capture_time_us = rtc::TimeMicros();
|
||||
overuse_detector_->FrameCaptured(frame, capture_time_us);
|
||||
// Encode and send first parts almost instantly.
|
||||
|
@ -863,7 +863,7 @@ TEST_F(OveruseFrameDetectorTest2, MeasuresMultipleConcurrentSamples) {
|
|||
.build();
|
||||
for (size_t i = 0; i < 1000; ++i) {
|
||||
// Unique timestamps.
|
||||
frame.set_timestamp(static_cast<uint32_t>(i));
|
||||
frame.set_rtp_timestamp(static_cast<uint32_t>(i));
|
||||
int64_t capture_time_us = rtc::TimeMicros();
|
||||
overuse_detector_->FrameCaptured(frame, capture_time_us);
|
||||
clock_.AdvanceTime(TimeDelta::Micros(kIntervalUs));
|
||||
|
@ -890,7 +890,7 @@ TEST_F(OveruseFrameDetectorTest2, UpdatesExistingSamples) {
|
|||
.build();
|
||||
uint32_t timestamp = 0;
|
||||
for (size_t i = 0; i < 1000; ++i) {
|
||||
frame.set_timestamp(timestamp);
|
||||
frame.set_rtp_timestamp(timestamp);
|
||||
int64_t capture_time_us = rtc::TimeMicros();
|
||||
overuse_detector_->FrameCaptured(frame, capture_time_us);
|
||||
// Encode and send first parts almost instantly.
|
||||
|
|
|
@ -110,7 +110,7 @@ TEST_F(FecEndToEndTest, ReceivesUlpfec) {
|
|||
MutexLock lock(&mutex_);
|
||||
// Rendering frame with timestamp of packet that was dropped -> FEC
|
||||
// protection worked.
|
||||
auto it = dropped_timestamps_.find(video_frame.timestamp());
|
||||
auto it = dropped_timestamps_.find(video_frame.rtp_timestamp());
|
||||
if (it != dropped_timestamps_.end()) {
|
||||
observation_complete_.Set();
|
||||
}
|
||||
|
@ -289,7 +289,7 @@ class FlexfecRenderObserver : public test::EndToEndTest,
|
|||
MutexLock lock(&mutex_);
|
||||
// Rendering frame with timestamp of packet that was dropped -> FEC
|
||||
// protection worked.
|
||||
auto it = dropped_timestamps_.find(video_frame.timestamp());
|
||||
auto it = dropped_timestamps_.find(video_frame.rtp_timestamp());
|
||||
if (it != dropped_timestamps_.end()) {
|
||||
if (!expect_flexfec_rtcp_ || received_flexfec_rtcp_) {
|
||||
observation_complete_.Set();
|
||||
|
|
|
@ -107,11 +107,11 @@ class FrameObserver : public test::RtpRtcpObserver,
|
|||
// Verifies that all sent frames are decoded and rendered.
|
||||
void OnFrame(const VideoFrame& rendered_frame) override {
|
||||
MutexLock lock(&mutex_);
|
||||
EXPECT_THAT(sent_timestamps_, Contains(rendered_frame.timestamp()));
|
||||
EXPECT_THAT(sent_timestamps_, Contains(rendered_frame.rtp_timestamp()));
|
||||
|
||||
// Remove old timestamps too, only the newest decoded frame is rendered.
|
||||
num_rendered_frames_ +=
|
||||
RemoveOlderOrEqual(rendered_frame.timestamp(), &sent_timestamps_);
|
||||
RemoveOlderOrEqual(rendered_frame.rtp_timestamp(), &sent_timestamps_);
|
||||
|
||||
if (num_rendered_frames_ >= kFramesToObserve) {
|
||||
EXPECT_TRUE(sent_timestamps_.empty()) << "All sent frames not decoded.";
|
||||
|
|
|
@ -316,7 +316,7 @@ void RetransmissionEndToEndTest::ReceivesPliAndRecovers(int rtp_history_ms) {
|
|||
void OnFrame(const VideoFrame& video_frame) override {
|
||||
MutexLock lock(&mutex_);
|
||||
if (received_pli_ &&
|
||||
video_frame.timestamp() > highest_dropped_timestamp_) {
|
||||
video_frame.rtp_timestamp() > highest_dropped_timestamp_) {
|
||||
observation_complete_.Set();
|
||||
}
|
||||
if (!received_pli_)
|
||||
|
@ -412,7 +412,7 @@ void RetransmissionEndToEndTest::DecodesRetransmittedFrame(bool enable_rtx,
|
|||
EXPECT_EQ(kVideoRotation_90, frame.rotation());
|
||||
{
|
||||
MutexLock lock(&mutex_);
|
||||
if (frame.timestamp() == retransmitted_timestamp_)
|
||||
if (frame.rtp_timestamp() == retransmitted_timestamp_)
|
||||
observation_complete_.Set();
|
||||
}
|
||||
orig_renderer_->OnFrame(frame);
|
||||
|
|
|
@ -99,7 +99,7 @@ void FrameEncodeMetadataWriter::OnEncodeStarted(const VideoFrame& frame) {
|
|||
|
||||
timing_frames_info_.resize(num_spatial_layers_);
|
||||
FrameMetadata metadata;
|
||||
metadata.rtp_timestamp = frame.timestamp();
|
||||
metadata.rtp_timestamp = frame.rtp_timestamp();
|
||||
metadata.encode_start_time_ms = rtc::TimeMillis();
|
||||
metadata.ntp_time_ms = frame.ntp_time_ms();
|
||||
metadata.timestamp_us = frame.timestamp_us();
|
||||
|
|
|
@ -92,7 +92,7 @@ std::vector<std::vector<FrameType>> GetTimingFrames(
|
|||
for (int i = 0; i < num_frames; ++i) {
|
||||
current_timestamp += 1;
|
||||
VideoFrame frame = VideoFrame::Builder()
|
||||
.set_timestamp_rtp(current_timestamp * 90)
|
||||
.set_rtp_timestamp(current_timestamp * 90)
|
||||
.set_timestamp_ms(current_timestamp)
|
||||
.set_video_frame_buffer(kFrameBuffer)
|
||||
.build();
|
||||
|
@ -213,7 +213,7 @@ TEST(FrameEncodeMetadataWriterTest, NoTimingFrameIfNoEncodeStartTime) {
|
|||
// Verify a single frame works with encode start time set.
|
||||
VideoFrame frame = VideoFrame::Builder()
|
||||
.set_timestamp_ms(timestamp)
|
||||
.set_timestamp_rtp(timestamp * 90)
|
||||
.set_rtp_timestamp(timestamp * 90)
|
||||
.set_video_frame_buffer(kFrameBuffer)
|
||||
.build();
|
||||
encode_timer.OnEncodeStarted(frame);
|
||||
|
@ -244,14 +244,14 @@ TEST(FrameEncodeMetadataWriterTest, NotifiesAboutDroppedFrames) {
|
|||
|
||||
EncodedImage image;
|
||||
VideoFrame frame = VideoFrame::Builder()
|
||||
.set_timestamp_rtp(kTimestampMs1 * 90)
|
||||
.set_rtp_timestamp(kTimestampMs1 * 90)
|
||||
.set_timestamp_ms(kTimestampMs1)
|
||||
.set_video_frame_buffer(kFrameBuffer)
|
||||
.build();
|
||||
|
||||
image.capture_time_ms_ = kTimestampMs1;
|
||||
image.SetRtpTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
|
||||
frame.set_timestamp(image.capture_time_ms_ * 90);
|
||||
frame.set_rtp_timestamp(image.capture_time_ms_ * 90);
|
||||
frame.set_timestamp_us(image.capture_time_ms_ * 1000);
|
||||
encode_timer.OnEncodeStarted(frame);
|
||||
|
||||
|
@ -261,7 +261,7 @@ TEST(FrameEncodeMetadataWriterTest, NotifiesAboutDroppedFrames) {
|
|||
image.capture_time_ms_ = kTimestampMs2;
|
||||
image.SetRtpTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
|
||||
image.timing_ = EncodedImage::Timing();
|
||||
frame.set_timestamp(image.capture_time_ms_ * 90);
|
||||
frame.set_rtp_timestamp(image.capture_time_ms_ * 90);
|
||||
frame.set_timestamp_us(image.capture_time_ms_ * 1000);
|
||||
encode_timer.OnEncodeStarted(frame);
|
||||
// No OnEncodedImageCall for timestamp2. Yet, at this moment it's not known
|
||||
|
@ -271,7 +271,7 @@ TEST(FrameEncodeMetadataWriterTest, NotifiesAboutDroppedFrames) {
|
|||
image.capture_time_ms_ = kTimestampMs3;
|
||||
image.SetRtpTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
|
||||
image.timing_ = EncodedImage::Timing();
|
||||
frame.set_timestamp(image.capture_time_ms_ * 90);
|
||||
frame.set_rtp_timestamp(image.capture_time_ms_ * 90);
|
||||
frame.set_timestamp_us(image.capture_time_ms_ * 1000);
|
||||
encode_timer.OnEncodeStarted(frame);
|
||||
encode_timer.FillTimingInfo(0, &image);
|
||||
|
@ -280,7 +280,7 @@ TEST(FrameEncodeMetadataWriterTest, NotifiesAboutDroppedFrames) {
|
|||
image.capture_time_ms_ = kTimestampMs4;
|
||||
image.SetRtpTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
|
||||
image.timing_ = EncodedImage::Timing();
|
||||
frame.set_timestamp(image.capture_time_ms_ * 90);
|
||||
frame.set_rtp_timestamp(image.capture_time_ms_ * 90);
|
||||
frame.set_timestamp_us(image.capture_time_ms_ * 1000);
|
||||
encode_timer.OnEncodeStarted(frame);
|
||||
encode_timer.FillTimingInfo(0, &image);
|
||||
|
@ -303,7 +303,7 @@ TEST(FrameEncodeMetadataWriterTest, RestoresCaptureTimestamps) {
|
|||
image.SetRtpTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
|
||||
VideoFrame frame = VideoFrame::Builder()
|
||||
.set_timestamp_ms(image.capture_time_ms_)
|
||||
.set_timestamp_rtp(image.capture_time_ms_ * 90)
|
||||
.set_rtp_timestamp(image.capture_time_ms_ * 90)
|
||||
.set_video_frame_buffer(kFrameBuffer)
|
||||
.build();
|
||||
encode_timer.OnEncodeStarted(frame);
|
||||
|
@ -327,7 +327,7 @@ TEST(FrameEncodeMetadataWriterTest, CopiesRotation) {
|
|||
image.SetRtpTimestamp(static_cast<uint32_t>(kTimestampMs * 90));
|
||||
VideoFrame frame = VideoFrame::Builder()
|
||||
.set_timestamp_ms(kTimestampMs)
|
||||
.set_timestamp_rtp(kTimestampMs * 90)
|
||||
.set_rtp_timestamp(kTimestampMs * 90)
|
||||
.set_rotation(kVideoRotation_180)
|
||||
.set_video_frame_buffer(kFrameBuffer)
|
||||
.build();
|
||||
|
@ -353,7 +353,7 @@ TEST(FrameEncodeMetadataWriterTest, SetsContentType) {
|
|||
image.SetRtpTimestamp(static_cast<uint32_t>(kTimestampMs * 90));
|
||||
VideoFrame frame = VideoFrame::Builder()
|
||||
.set_timestamp_ms(kTimestampMs)
|
||||
.set_timestamp_rtp(kTimestampMs * 90)
|
||||
.set_rtp_timestamp(kTimestampMs * 90)
|
||||
.set_rotation(kVideoRotation_180)
|
||||
.set_video_frame_buffer(kFrameBuffer)
|
||||
.build();
|
||||
|
@ -379,7 +379,7 @@ TEST(FrameEncodeMetadataWriterTest, CopiesColorSpace) {
|
|||
image.SetRtpTimestamp(static_cast<uint32_t>(kTimestampMs * 90));
|
||||
VideoFrame frame = VideoFrame::Builder()
|
||||
.set_timestamp_ms(kTimestampMs)
|
||||
.set_timestamp_rtp(kTimestampMs * 90)
|
||||
.set_rtp_timestamp(kTimestampMs * 90)
|
||||
.set_color_space(color_space)
|
||||
.set_video_frame_buffer(kFrameBuffer)
|
||||
.build();
|
||||
|
@ -405,7 +405,7 @@ TEST(FrameEncodeMetadataWriterTest, CopiesPacketInfos) {
|
|||
image.SetRtpTimestamp(static_cast<uint32_t>(kTimestampMs * 90));
|
||||
VideoFrame frame = VideoFrame::Builder()
|
||||
.set_timestamp_ms(kTimestampMs)
|
||||
.set_timestamp_rtp(kTimestampMs * 90)
|
||||
.set_rtp_timestamp(kTimestampMs * 90)
|
||||
.set_packet_infos(packet_infos)
|
||||
.set_video_frame_buffer(kFrameBuffer)
|
||||
.build();
|
||||
|
|
|
@ -83,7 +83,7 @@ class ReceiveStatisticsProxyTest : public ::testing::Test {
|
|||
VideoFrame frame =
|
||||
VideoFrame::Builder()
|
||||
.set_video_frame_buffer(I420Buffer::Create(width, height))
|
||||
.set_timestamp_rtp(0)
|
||||
.set_rtp_timestamp(0)
|
||||
.set_timestamp_ms(render_time_ms)
|
||||
.set_rotation(kVideoRotation_0)
|
||||
.build();
|
||||
|
|
|
@ -55,14 +55,15 @@ int32_t VideoRenderFrames::AddFrame(VideoFrame&& new_frame) {
|
|||
// really slow system never renders any frames.
|
||||
if (!incoming_frames_.empty() &&
|
||||
new_frame.render_time_ms() + kOldRenderTimestampMS < time_now) {
|
||||
RTC_LOG(LS_WARNING) << "Too old frame, timestamp=" << new_frame.timestamp();
|
||||
RTC_LOG(LS_WARNING) << "Too old frame, timestamp="
|
||||
<< new_frame.rtp_timestamp();
|
||||
++frames_dropped_;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (new_frame.render_time_ms() > time_now + kFutureRenderTimestampMS) {
|
||||
RTC_LOG(LS_WARNING) << "Frame too long into the future, timestamp="
|
||||
<< new_frame.timestamp();
|
||||
<< new_frame.rtp_timestamp();
|
||||
++frames_dropped_;
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -256,12 +256,12 @@ void VideoAnalyzer::DeliverRtpPacket(
|
|||
void VideoAnalyzer::PreEncodeOnFrame(const VideoFrame& video_frame) {
|
||||
MutexLock lock(&lock_);
|
||||
if (!first_encoded_timestamp_) {
|
||||
while (frames_.front().timestamp() != video_frame.timestamp()) {
|
||||
while (frames_.front().rtp_timestamp() != video_frame.rtp_timestamp()) {
|
||||
++dropped_frames_before_first_encode_;
|
||||
frames_.pop_front();
|
||||
RTC_CHECK(!frames_.empty());
|
||||
}
|
||||
first_encoded_timestamp_ = video_frame.timestamp();
|
||||
first_encoded_timestamp_ = video_frame.rtp_timestamp();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -317,9 +317,10 @@ void VideoAnalyzer::OnFrame(const VideoFrame& video_frame) {
|
|||
StartExcludingCpuThreadTime();
|
||||
|
||||
int64_t send_timestamp =
|
||||
wrap_handler_.Unwrap(video_frame.timestamp() - rtp_timestamp_delta_);
|
||||
wrap_handler_.Unwrap(video_frame.rtp_timestamp() - rtp_timestamp_delta_);
|
||||
|
||||
while (wrap_handler_.Unwrap(frames_.front().timestamp()) < send_timestamp) {
|
||||
while (wrap_handler_.Unwrap(frames_.front().rtp_timestamp()) <
|
||||
send_timestamp) {
|
||||
if (!last_rendered_frame_) {
|
||||
// No previous frame rendered, this one was dropped after sending but
|
||||
// before rendering.
|
||||
|
@ -335,7 +336,7 @@ void VideoAnalyzer::OnFrame(const VideoFrame& video_frame) {
|
|||
VideoFrame reference_frame = frames_.front();
|
||||
frames_.pop_front();
|
||||
int64_t reference_timestamp =
|
||||
wrap_handler_.Unwrap(reference_frame.timestamp());
|
||||
wrap_handler_.Unwrap(reference_frame.rtp_timestamp());
|
||||
if (send_timestamp == reference_timestamp - 1) {
|
||||
// TODO(ivica): Make this work for > 2 streams.
|
||||
// Look at RTPSender::BuildRTPHeader.
|
||||
|
@ -906,7 +907,7 @@ void VideoAnalyzer::AddFrameComparison(const VideoFrame& reference,
|
|||
const VideoFrame& render,
|
||||
bool dropped,
|
||||
int64_t render_time_ms) {
|
||||
int64_t reference_timestamp = wrap_handler_.Unwrap(reference.timestamp());
|
||||
int64_t reference_timestamp = wrap_handler_.Unwrap(reference.rtp_timestamp());
|
||||
int64_t send_time_ms = send_times_[reference_timestamp];
|
||||
send_times_.erase(reference_timestamp);
|
||||
int64_t recv_time_ms = recv_times_[reference_timestamp];
|
||||
|
@ -1011,10 +1012,10 @@ void VideoAnalyzer::CapturedFrameForwarder::OnFrame(
|
|||
VideoFrame copy = video_frame;
|
||||
// Frames from the capturer does not have a rtp timestamp.
|
||||
// Create one so it can be used for comparison.
|
||||
RTC_DCHECK_EQ(0, video_frame.timestamp());
|
||||
RTC_DCHECK_EQ(0, video_frame.rtp_timestamp());
|
||||
if (video_frame.ntp_time_ms() == 0)
|
||||
copy.set_ntp_time_ms(clock_->CurrentNtpInMilliseconds());
|
||||
copy.set_timestamp(copy.ntp_time_ms() * 90);
|
||||
copy.set_rtp_timestamp(copy.ntp_time_ms() * 90);
|
||||
analyzer_->AddCapturedFrameForComparison(copy);
|
||||
MutexLock lock(&lock_);
|
||||
++captured_frames_;
|
||||
|
|
|
@ -61,7 +61,7 @@ class CallStats;
|
|||
// multiple calls to clock->Now().
|
||||
struct VideoFrameMetaData {
|
||||
VideoFrameMetaData(const webrtc::VideoFrame& frame, Timestamp now)
|
||||
: rtp_timestamp(frame.timestamp()),
|
||||
: rtp_timestamp(frame.rtp_timestamp()),
|
||||
timestamp_us(frame.timestamp_us()),
|
||||
ntp_time_ms(frame.ntp_time_ms()),
|
||||
width(frame.width()),
|
||||
|
|
|
@ -126,10 +126,10 @@ class FakeVideoRenderer : public rtc::VideoSinkInterface<VideoFrame> {
|
|||
|
||||
void OnFrame(const VideoFrame& frame) override {
|
||||
RTC_LOG(LS_VERBOSE) << "Received frame with timestamp="
|
||||
<< frame.timestamp();
|
||||
<< frame.rtp_timestamp();
|
||||
if (!last_frame_.empty()) {
|
||||
RTC_LOG(LS_INFO) << "Already had frame queue with timestamp="
|
||||
<< last_frame_.back().timestamp();
|
||||
<< last_frame_.back().rtp_timestamp();
|
||||
}
|
||||
last_frame_.push_back(frame);
|
||||
}
|
||||
|
@ -164,9 +164,9 @@ MATCHER_P2(MatchResolution, w, h, "") {
|
|||
}
|
||||
|
||||
MATCHER_P(RtpTimestamp, timestamp, "") {
|
||||
if (arg.timestamp() != timestamp) {
|
||||
if (arg.rtp_timestamp() != timestamp) {
|
||||
*result_listener->stream()
|
||||
<< "rtp timestamp was " << arg.timestamp() << " != " << timestamp;
|
||||
<< "rtp timestamp was " << arg.rtp_timestamp() << " != " << timestamp;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
|
|
@ -2915,7 +2915,7 @@ TEST_F(VideoSendStreamTest, ReportsSentResolution) {
|
|||
auto buffer = EncodedImageBuffer::Create(16);
|
||||
memset(buffer->data(), 0, 16);
|
||||
encoded.SetEncodedData(buffer);
|
||||
encoded.SetRtpTimestamp(input_image.timestamp());
|
||||
encoded.SetRtpTimestamp(input_image.rtp_timestamp());
|
||||
encoded.capture_time_ms_ = input_image.render_time_ms();
|
||||
|
||||
for (size_t i = 0; i < kNumStreams; ++i) {
|
||||
|
|
|
@ -1520,7 +1520,7 @@ void VideoStreamEncoder::OnFrame(Timestamp post_time,
|
|||
|
||||
// Convert NTP time, in ms, to RTP timestamp.
|
||||
const int kMsToRtpTimestamp = 90;
|
||||
incoming_frame.set_timestamp(
|
||||
incoming_frame.set_rtp_timestamp(
|
||||
kMsToRtpTimestamp * static_cast<uint32_t>(incoming_frame.ntp_time_ms()));
|
||||
|
||||
// Identifier should remain the same for newly produced incoming frame and the
|
||||
|
@ -2015,7 +2015,7 @@ void VideoStreamEncoder::EncodeVideoFrame(const VideoFrame& video_frame,
|
|||
<< out_frame.width() << "x" << out_frame.height();
|
||||
|
||||
TRACE_EVENT1("webrtc", "VCMGenericEncoder::Encode", "timestamp",
|
||||
out_frame.timestamp());
|
||||
out_frame.rtp_timestamp());
|
||||
|
||||
frame_encode_metadata_writer_.OnEncodeStarted(out_frame);
|
||||
|
||||
|
|
|
@ -1231,17 +1231,18 @@ class VideoStreamEncoderTest : public ::testing::Test {
|
|||
{
|
||||
MutexLock lock(&local_mutex_);
|
||||
if (expect_null_frame_) {
|
||||
EXPECT_EQ(input_image.timestamp(), 0u);
|
||||
EXPECT_EQ(input_image.rtp_timestamp(), 0u);
|
||||
EXPECT_EQ(input_image.width(), 1);
|
||||
last_frame_types_ = *frame_types;
|
||||
expect_null_frame_ = false;
|
||||
} else {
|
||||
EXPECT_GT(input_image.timestamp(), timestamp_);
|
||||
EXPECT_GT(input_image.rtp_timestamp(), timestamp_);
|
||||
EXPECT_GT(input_image.ntp_time_ms(), ntp_time_ms_);
|
||||
EXPECT_EQ(input_image.timestamp(), input_image.ntp_time_ms() * 90);
|
||||
EXPECT_EQ(input_image.rtp_timestamp(),
|
||||
input_image.ntp_time_ms() * 90);
|
||||
}
|
||||
|
||||
timestamp_ = input_image.timestamp();
|
||||
timestamp_ = input_image.rtp_timestamp();
|
||||
ntp_time_ms_ = input_image.ntp_time_ms();
|
||||
last_input_width_ = input_image.width();
|
||||
last_input_height_ = input_image.height();
|
||||
|
|
Загрузка…
Ссылка в новой задаче