Bug 1583867 - add rtpTimestamp field to RTP sync/contrib sources; r=jib,smaug

Differential Revision: https://phabricator.services.mozilla.com/D47096

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Nico Grunbaum 2019-10-02 21:55:33 +00:00
Родитель 69224531f0
Коммит 6722b1c2c1
14 изменённых файлов: 116 добавлений и 78 удалений

Просмотреть файл

@ -1721,6 +1721,7 @@ class RTCPeerConnection {
receiver,
source,
timestamp,
rtpTimestamp,
hasLevel,
level
) {
@ -1728,6 +1729,7 @@ class RTCPeerConnection {
receiver.track,
source,
timestamp,
rtpTimestamp,
hasLevel,
level
);
@ -2499,6 +2501,7 @@ class RTCRtpReceiver {
const newEntry = {
source: e.source,
timestamp: e.timestamp + e.sourceClockOffset,
rtpTimestamp: e.rtpTimestamp,
audioLevel: e.audioLevel,
};
if (e.voiceActivityFlag !== undefined) {

Просмотреть файл

@ -24,8 +24,8 @@
// much of the functionality of getContributingSources as the implementation
// is shared.
var testGetContributingSources = async (test) => {
let remoteReceiver = test.pcRemote.getReceivers()[0];
let localReceiver = test.pcLocal.getReceivers()[0];
const remoteReceiver = test.pcRemote.getReceivers()[0];
const localReceiver = test.pcLocal.getReceivers()[0];
// Check that getContributingSources is empty as there is no MCU
is(remoteReceiver.getContributingSources().length, 0,
@ -35,75 +35,84 @@
// Wait for the next JS event loop iteration, to clear the cache
await Promise.resolve().then();
// Insert new entries as if there were an MCU
let csrc0 = 124756;
let timestamp0 = SpWrap(test.pcRemote).mozGetNowInRtpSourceReferenceTime();
let timestampOffset = new Date().getTime() - timestamp0;
let hasAudioLevel0 = true;
const csrc0 = 124756;
const timestamp0 = SpWrap(test.pcRemote).mozGetNowInRtpSourceReferenceTime();
const rtpTimestamp0 = 11111;
const timestampOffset = new Date().getTime() - timestamp0;
const hasAudioLevel0 = true;
// Audio level as expected to be received in RTP
let audioLevel0 = 34;
const audioLevel0 = 34;
// Audio level as expected to be returned
let expectedAudioLevel0 = 10 ** (-audioLevel0 / 20);
const expectedAudioLevel0 = 10 ** (-audioLevel0 / 20);
SpWrap(test.pcRemote).mozInsertAudioLevelForContributingSource(
remoteReceiver,
csrc0,
timestamp0,
rtpTimestamp0,
hasAudioLevel0,
audioLevel0);
let csrc1 = 5786;
let timestamp1 = timestamp0 - 200;
let hasAudioLevel1 = false;
let audioLevel1 = 0;
const csrc1 = 5786;
const timestamp1 = timestamp0 - 200;
const rtpTimestamp1 = 22222;
const hasAudioLevel1 = false;
const audioLevel1 = 0;
SpWrap(test.pcRemote).mozInsertAudioLevelForContributingSource(
remoteReceiver,
csrc1,
timestamp1,
rtpTimestamp1,
hasAudioLevel1,
audioLevel1);
let csrc2 = 93487;
let timestamp2 = timestamp0 - 200;
let hasAudioLevel2 = true;
let audioLevel2 = 127;
const csrc2 = 93487;
const timestamp2 = timestamp0 - 200;
const rtpTimestamp2 = 333333;
const hasAudioLevel2 = true;
const audioLevel2 = 127;
SpWrap(test.pcRemote).mozInsertAudioLevelForContributingSource(
remoteReceiver,
csrc2,
timestamp2,
rtpTimestamp2,
hasAudioLevel2,
audioLevel2);
let contributingSources = remoteReceiver.getContributingSources();
const contributingSources = remoteReceiver.getContributingSources();
is(contributingSources.length, 3,
"Expected number of contributing sources");
// Check that both inserted were returned
let source0 = contributingSources.find(c => c.source == csrc0);
const source0 = contributingSources.find(c => c.source == csrc0);
ok(source0, "first csrc was found");
let source1 = contributingSources.find(c => c.source == csrc1);
const source1 = contributingSources.find(c => c.source == csrc1);
ok(source1, "second csrsc was found");
// Add a small margin of error in the timestamps
let compareTimestamps = (ts1, ts2) => Math.abs(ts1 - ts2) < 100;
const compareTimestamps = (ts1, ts2) => Math.abs(ts1 - ts2) < 100;
// Check the CSRC with audioLevel
let isWithinErr = Math.abs(source0.audioLevel - expectedAudioLevel0)
const isWithinErr = Math.abs(source0.audioLevel - expectedAudioLevel0)
< expectedAudioLevel0 / 50;
ok(isWithinErr,
`Contributing source has correct audio level. (${source0.audioLevel})`);
ok(compareTimestamps(source0.timestamp, timestamp0 + timestampOffset),
`Contributing source has correct timestamp (${source0.timestamp})`);
is(source0.rtpTimestamp, rtpTimestamp0,
`Contributing source has correct RTP timestamp (${source0.rtpTimestamp}`);
// Check the CSRC without audioLevel
is(source1.audioLevel, undefined,
`Contributing source has no audio level. (${source1.audioLevel})`);
ok(compareTimestamps(source1.timestamp, timestamp1 + timestampOffset),
`Contributing source has correct timestamp (${source1.timestamp})`);
is(source1.rtpTimestamp, rtpTimestamp1,
`Contributing source has correct RTP timestamp (${source1.rtpTimestamp}`);
// Check that a received RTP audio level 127 is exactly 0
let source2 = contributingSources.find(c => c.source == csrc2);
const source2 = contributingSources.find(c => c.source == csrc2);
ok(source2, "third csrc was found");
is(source2.audioLevel, 0,
`Contributing source has audio level of 0 when RTP audio level is 127`);
@ -113,6 +122,7 @@
"getContributingSources is cached");
// Check that sources are sorted in descending order by time stamp
const timestamp3 = SpWrap(test.pcLocal).mozGetNowInRtpSourceReferenceTime();
const rtpTimestamp3 = 44444;
// Larger offsets are further back in time
const testOffsets = [3, 7, 5, 6, 1, 4];
for (const offset of testOffsets) {
@ -120,6 +130,7 @@
localReceiver,
offset, // Using offset for SSRC for convenience
timestamp3 - offset,
rtpTimestamp3,
true,
offset);
}

Просмотреть файл

@ -75,6 +75,7 @@ interface PeerConnectionImpl {
void insertAudioLevelForContributingSource(MediaStreamTrack recvTrack,
unsigned long source,
DOMHighResTimeStamp timestamp,
unsigned long rtpTimestamp,
boolean hasLevel,
byte level);

Просмотреть файл

@ -146,6 +146,7 @@ interface RTCPeerConnection : EventTarget {
void mozInsertAudioLevelForContributingSource(RTCRtpReceiver receiver,
unsigned long source,
DOMHighResTimeStamp timestamp,
unsigned long rtpTimestamp,
boolean hasLevel,
byte level);
[ChromeOnly]

Просмотреть файл

@ -9,8 +9,9 @@
dictionary RTCRtpContributingSource {
required DOMHighResTimeStamp timestamp;
required unsigned long source;
double audioLevel;
required unsigned long source;
double audioLevel;
required unsigned long rtpTimestamp;
};
dictionary RTCRtpSynchronizationSource : RTCRtpContributingSource {

Просмотреть файл

@ -33,7 +33,7 @@ class RtpSourcesTest : public ::testing::Test {
const int64_t times[] = {100, 120, 140, 160, 180, 200, 220};
const size_t numEntries = sizeof(times) / sizeof(times[0]);
for (auto i : times) {
history.Insert(i, i + jitter, hasAudioLevel, audioLevel);
history.Insert(i, i + jitter, i, hasAudioLevel, audioLevel);
}
ASSERT_EQ(history.mDetailedHistory.size(), numEntries);
for (auto i : times) {
@ -66,25 +66,25 @@ class RtpSourcesTest : public ::testing::Test {
constexpr int64_t pruneTime1 = time2 + (10 * 1000) + 1;
// time0
history.Insert(timeNow, time0, true, 0);
history.Insert(timeNow, time0, 0, true, 0);
EXPECT_TRUE(history.Empty());
EXPECT_FALSE(history.mHasEvictedEntry);
// time1
history.Insert(timeNow, time1, true, 0);
history.Insert(timeNow, time1, 1, true, 0);
// Check that the jitter window buffer hasn't been used
EXPECT_TRUE(history.Empty());
ASSERT_EQ(history.mLatestEviction.jitterAdjustedTimestamp, time1);
EXPECT_TRUE(history.mHasEvictedEntry);
// time2
history.Insert(timeNow, time2, true, 0);
history.Insert(timeNow, time2, 2, true, 0);
EXPECT_TRUE(history.Empty());
ASSERT_EQ(history.mLatestEviction.jitterAdjustedTimestamp, time2);
EXPECT_TRUE(history.mHasEvictedEntry);
// time3
history.Insert(timeNow, time3, true, 0);
history.Insert(timeNow, time3, 3, true, 0);
EXPECT_TRUE(history.Empty());
ASSERT_EQ(history.mLatestEviction.jitterAdjustedTimestamp, time2);
EXPECT_TRUE(history.mHasEvictedEntry);
@ -116,7 +116,7 @@ class RtpSourcesTest : public ::testing::Test {
constexpr int64_t timeNow2 = time1 + jitterWindow + 1;
// time0
history.Insert(timeNow0, time0, false, 1);
history.Insert(timeNow0, time0, 0, false, 1);
EXPECT_FALSE(history.Empty());
// Jitter window should not have grown
ASSERT_EQ(history.mMaxJitterWindow, jitterWindow);
@ -124,7 +124,7 @@ class RtpSourcesTest : public ::testing::Test {
EXPECT_FALSE(history.mHasEvictedEntry);
// time1
history.Insert(timeNow0, time1, true, 2);
history.Insert(timeNow0, time1, 1, true, 2);
ASSERT_EQ(history.mMaxJitterWindow, jitterWindow);
EXPECT_EQ(history.mDetailedHistory.size(), static_cast<size_t>(2));
EXPECT_FALSE(history.mHasEvictedEntry);
@ -153,6 +153,7 @@ class RtpSourcesTest : public ::testing::Test {
RtpSourceHistory history;
constexpr int64_t timeNow = 0;
const int64_t jitterAdjusted = timeNow + 10;
const uint32_t ntpTimestamp = 0;
const bool hasAudioLevel = true;
const uint8_t audioLevel0 = 127;
// should result in in hasAudioLevel = false
@ -161,7 +162,8 @@ class RtpSourcesTest : public ::testing::Test {
const uint8_t audioLevel2 = 128;
// audio level 0
history.Insert(timeNow, jitterAdjusted, hasAudioLevel, audioLevel0);
history.Insert(timeNow, jitterAdjusted, ntpTimestamp, hasAudioLevel,
audioLevel0);
ASSERT_FALSE(history.mHasEvictedEntry);
EXPECT_EQ(history.mDetailedHistory.size(), static_cast<size_t>(1));
{
@ -171,7 +173,8 @@ class RtpSourcesTest : public ::testing::Test {
EXPECT_EQ(entry->audioLevel, audioLevel0);
}
// audio level 1
history.Insert(timeNow, jitterAdjusted, hasAudioLevel, audioLevel1);
history.Insert(timeNow, jitterAdjusted, ntpTimestamp, hasAudioLevel,
audioLevel1);
ASSERT_FALSE(history.mHasEvictedEntry);
EXPECT_EQ(history.mDetailedHistory.size(), static_cast<size_t>(1));
{
@ -181,7 +184,8 @@ class RtpSourcesTest : public ::testing::Test {
EXPECT_EQ(entry->audioLevel, audioLevel1);
}
// audio level 2
history.Insert(timeNow, jitterAdjusted, hasAudioLevel, audioLevel2);
history.Insert(timeNow, jitterAdjusted, ntpTimestamp, hasAudioLevel,
audioLevel2);
ASSERT_FALSE(history.mHasEvictedEntry);
EXPECT_EQ(history.mDetailedHistory.size(), static_cast<size_t>(1));
{
@ -207,8 +211,9 @@ class RtpSourcesTest : public ::testing::Test {
constexpr int64_t timeNow = 10000;
constexpr int64_t jitter = RtpSourceHistory::kMinJitterWindow / 2;
const int64_t jitterAdjusted = timeNow + jitter;
const uint32_t ntpTimestamp = 0;
history.Insert(timeNow, jitterAdjusted, 0, false);
history.Insert(timeNow, jitterAdjusted, ntpTimestamp, false, 0);
history.Prune(timeNow + (jitter * 3) + 1);
EXPECT_EQ(history.mDetailedHistory.size(), static_cast<size_t>(0));
EXPECT_TRUE(history.mHasEvictedEntry);

Просмотреть файл

@ -287,22 +287,26 @@ void WebrtcAudioConduit::GetRtpSources(
// test-only: inserts a CSRC entry in a RtpSourceObserver's history for
// getContributingSources mochitests
void InsertAudioLevelForContributingSource(RtpSourceObserver& observer,
uint32_t aCsrcSource,
int64_t aTimestamp,
bool aHasAudioLevel,
uint8_t aAudioLevel) {
const uint32_t aCsrcSource,
const int64_t aTimestamp,
const uint32_t aRtpTimestamp,
const bool aHasAudioLevel,
const uint8_t aAudioLevel) {
using EntryType = dom::RTCRtpSourceEntryType;
auto key = RtpSourceObserver::GetKey(aCsrcSource, EntryType::Contributing);
auto& hist = observer.mRtpSources[key];
hist.Insert(aTimestamp, aTimestamp, aHasAudioLevel, aAudioLevel);
hist.Insert(aTimestamp, aTimestamp, aRtpTimestamp, aHasAudioLevel,
aAudioLevel);
}
void WebrtcAudioConduit::InsertAudioLevelForContributingSource(
uint32_t aCsrcSource, int64_t aTimestamp, bool aHasAudioLevel,
uint8_t aAudioLevel) {
const uint32_t aCsrcSource, const int64_t aTimestamp,
const uint32_t aRtpTimestamp, const bool aHasAudioLevel,
const uint8_t aAudioLevel) {
MOZ_ASSERT(NS_IsMainThread());
mozilla::InsertAudioLevelForContributingSource(
mRtpSourceObserver, aCsrcSource, aTimestamp, aHasAudioLevel, aAudioLevel);
mRtpSourceObserver, aCsrcSource, aTimestamp, aRtpTimestamp,
aHasAudioLevel, aAudioLevel);
}
/*

Просмотреть файл

@ -242,9 +242,11 @@ class WebrtcAudioConduit : public AudioSessionConduit,
const int64_t aTimestamp, const uint32_t aJitter) override;
// test-only: inserts fake CSRCs and audio level data
void InsertAudioLevelForContributingSource(uint32_t aSource,
int64_t aTimestamp, bool aHasLevel,
uint8_t aLevel);
void InsertAudioLevelForContributingSource(const uint32_t aCsrcSource,
const int64_t aTimestamp,
const uint32_t aRtpTimestamp,
const bool aHasAudioLevel,
const uint8_t aAudioLevel);
bool IsSamplingFreqSupported(int freq) const override;

Просмотреть файл

@ -36,8 +36,8 @@ void RtpSourceObserver::OnRtpPacket(const webrtc::RTPHeader& aHeader,
auto& hist = mRtpSources[GetKey(aHeader.ssrc, EntryType::Synchronization)];
hist.Prune(aTimestamp);
// ssrc-audio-level handling
hist.Insert(aTimestamp, jitterAdjusted, aHeader.extension.hasAudioLevel,
aHeader.extension.audioLevel);
hist.Insert(aTimestamp, jitterAdjusted, aHeader.timestamp,
aHeader.extension.hasAudioLevel, aHeader.extension.audioLevel);
// csrc-audio-level handling
const auto& list = aHeader.extension.csrcAudioLevels;
@ -47,7 +47,8 @@ void RtpSourceObserver::OnRtpPacket(const webrtc::RTPHeader& aHeader,
hist.Prune(aTimestamp);
bool hasLevel = i < list.numAudioLevels;
uint8_t level = hasLevel ? list.arrOfAudioLevels[i] : 0;
hist.Insert(aTimestamp, jitterAdjusted, hasLevel, level);
hist.Insert(aTimestamp, jitterAdjusted, aHeader.timestamp, hasLevel,
level);
}
}
}
@ -64,6 +65,7 @@ void RtpSourceObserver::GetRtpSources(
domEntry.mSource = GetSourceFromKey(it.first);
domEntry.mSourceType = GetTypeFromKey(it.first);
domEntry.mTimestamp = entry->jitterAdjustedTimestamp;
domEntry.mRtpTimestamp = entry->rtpTimestamp;
if (entry->hasAudioLevel) {
domEntry.mAudioLevel.Construct(entry->ToLinearAudioLevel());
}
@ -136,9 +138,11 @@ void RtpSourceObserver::RtpSourceHistory::Prune(const int64_t aTimeNow) {
void RtpSourceObserver::RtpSourceHistory::Insert(const int64_t aTimeNow,
const int64_t aTimestamp,
const uint32_t aRtpTimestamp,
const bool aHasAudioLevel,
const uint8_t aAudioLevel) {
Insert(aTimeNow, aTimestamp).Update(aTimestamp, aHasAudioLevel, aAudioLevel);
Insert(aTimeNow, aTimestamp)
.Update(aTimestamp, aRtpTimestamp, aHasAudioLevel, aAudioLevel);
}
RtpSourceObserver::RtpSourceEntry& RtpSourceObserver::RtpSourceHistory::Insert(

Просмотреть файл

@ -57,9 +57,10 @@ class RtpSourceObserver : public webrtc::RtpPacketObserver {
// Note: these are pool allocated
struct RtpSourceEntry {
RtpSourceEntry() = default;
void Update(const int64_t aTimestamp, const bool aHasAudioLevel,
const uint8_t aAudioLevel) {
void Update(const int64_t aTimestamp, const uint32_t aRtpTimestamp,
const bool aHasAudioLevel, const uint8_t aAudioLevel) {
jitterAdjustedTimestamp = aTimestamp;
rtpTimestamp = aRtpTimestamp;
// Audio level range is 0 - 127 inclusive
hasAudioLevel = aHasAudioLevel && !(aAudioLevel & 0x80);
audioLevel = aAudioLevel;
@ -69,6 +70,8 @@ class RtpSourceObserver : public webrtc::RtpPacketObserver {
double ToLinearAudioLevel() const;
// Time this information was received + jitter
int64_t jitterAdjustedTimestamp = 0;
// The original RTP timestamp in the received packet
uint32_t rtpTimestamp = 0;
bool hasAudioLevel = false;
uint8_t audioLevel = 0;
};
@ -103,7 +106,8 @@ class RtpSourceObserver : public webrtc::RtpPacketObserver {
const RtpSourceEntry* FindClosestNotAfter(int64_t aTime) const;
// Inserts data into the history, may silently drop data if it is too old
void Insert(const int64_t aTimeNow, const int64_t aTimestamp,
const bool aHasAudioLevel, const uint8_t aAudioLevel);
const uint32_t aRtpTimestamp, const bool aHasAudioLevel,
const uint8_t aAudioLevel);
// Removes aged out from the jitter window
void Prune(const int64_t aTimeNow);
// Set Source
@ -168,11 +172,10 @@ class RtpSourceObserver : public webrtc::RtpPacketObserver {
// Testing only
// Inserts additional csrc audio levels for mochitests
friend void InsertAudioLevelForContributingSource(RtpSourceObserver& observer,
uint32_t aCsrcSource,
int64_t aTimestamp,
bool aHasAudioLevel,
uint8_t aAudioLevel);
friend void InsertAudioLevelForContributingSource(
RtpSourceObserver& observer, const uint32_t aCsrcSource,
const int64_t aTimestamp, const uint32_t aRtpTimestamp,
const bool aHasAudioLevel, const uint8_t aAudioLevel);
};
} // namespace mozilla
#undef NG

Просмотреть файл

@ -1741,15 +1741,16 @@ DOMHighResTimeStamp PeerConnectionImpl::GetNowInRtpSourceReferenceTime() {
// test-only: adds fake CSRCs and audio data
nsresult PeerConnectionImpl::InsertAudioLevelForContributingSource(
dom::MediaStreamTrack& aRecvTrack, unsigned long aSource,
DOMHighResTimeStamp aTimestamp, bool aHasLevel, uint8_t aLevel) {
const dom::MediaStreamTrack& aRecvTrack, const unsigned long aSource,
const DOMHighResTimeStamp aTimestamp, const unsigned long aRtpTimestamp,
const bool aHasLevel, const uint8_t aLevel) {
PC_AUTO_ENTER_API_CALL(true);
std::vector<RefPtr<TransceiverImpl>>& transceivers =
mMedia->GetTransceivers();
for (RefPtr<TransceiverImpl>& transceiver : transceivers) {
if (transceiver->HasReceiveTrack(&aRecvTrack)) {
transceiver->InsertAudioLevelForContributingSource(aSource, aTimestamp,
aHasLevel, aLevel);
transceiver->InsertAudioLevelForContributingSource(
aSource, aTimestamp, aRtpTimestamp, aHasLevel, aLevel);
break;
}
}

Просмотреть файл

@ -345,12 +345,13 @@ class PeerConnectionImpl final
// test-only: called from contributing sources mochitests.
NS_IMETHODIMP_TO_ERRORRESULT(InsertAudioLevelForContributingSource,
ErrorResult& rv,
dom::MediaStreamTrack& aRecvTrack,
unsigned long aSource,
DOMHighResTimeStamp aTimestamp, bool aHasLevel,
uint8_t aLevel) {
rv = InsertAudioLevelForContributingSource(aRecvTrack, aSource, aTimestamp,
aHasLevel, aLevel);
const dom::MediaStreamTrack& aRecvTrack,
const unsigned long aSource,
const DOMHighResTimeStamp aTimestamp,
const unsigned long aRtpTimestamp,
const bool aHasLevel, const uint8_t aLevel) {
rv = InsertAudioLevelForContributingSource(
aRecvTrack, aSource, aTimestamp, aRtpTimestamp, aHasLevel, aLevel);
}
// test-only: called from simulcast mochitests.

Просмотреть файл

@ -991,17 +991,16 @@ void TransceiverImpl::GetRtpSources(
audio_conduit->GetRtpSources(aTimeNow, outSources);
}
void TransceiverImpl::InsertAudioLevelForContributingSource(uint32_t aSource,
int64_t aTimestamp,
bool aHasLevel,
uint8_t aLevel) {
void TransceiverImpl::InsertAudioLevelForContributingSource(
const uint32_t aSource, const int64_t aTimestamp,
const uint32_t aRtpTimestamp, const bool aHasLevel, const uint8_t aLevel) {
if (!IsValid() || IsVideo()) {
return;
}
WebrtcAudioConduit* audio_conduit =
static_cast<WebrtcAudioConduit*>(mConduit.get());
audio_conduit->InsertAudioLevelForContributingSource(aSource, aTimestamp,
aHasLevel, aLevel);
audio_conduit->InsertAudioLevelForContributingSource(
aSource, aTimestamp, aRtpTimestamp, aHasLevel, aLevel);
}
} // namespace mozilla

Просмотреть файл

@ -123,9 +123,11 @@ class TransceiverImpl : public nsISupports {
nsTArray<dom::RTCRtpSourceEntry>& outSources) const;
// test-only: insert fake CSRCs and audio levels for testing
void InsertAudioLevelForContributingSource(uint32_t aSource,
int64_t aTimestamp, bool aHasLevel,
uint8_t aLevel);
void InsertAudioLevelForContributingSource(const uint32_t aSource,
const int64_t aTimestamp,
const uint32_t aRtpTimestamp,
const bool aHasLevel,
const uint8_t aLevel);
NS_DECL_THREADSAFE_ISUPPORTS