Backed out 4 changesets (bug 1437366) for mochitest failures on /test_peerConnection_basicAudio_forced_higher_rate.html

Backed out changeset 57826a5b03e8 (bug 1437366)
Backed out changeset a26852df062b (bug 1437366)
Backed out changeset abca0eb36d33 (bug 1437366)
Backed out changeset 41bb4a676575 (bug 1437366)

--HG--
extra : rebase_source : d81a0833db5d331fe928100cf88d1b9cc9253a1b
This commit is contained in:
Narcis Beleuzu 2018-03-08 19:27:25 +02:00
Родитель d1e89266c4
Коммит e73ecbc91f
7 изменённых файлов: 56 добавлений и 269 удалений

Просмотреть файл

@ -33,9 +33,6 @@
#define PREF_CUBEB_BACKEND "media.cubeb.backend"
#define PREF_CUBEB_LATENCY_PLAYBACK "media.cubeb_latency_playback_ms"
#define PREF_CUBEB_LATENCY_MSG "media.cubeb_latency_msg_frames"
// Allows to get something non-default for the preferred sample-rate, to allow
// troubleshooting in the field and testing.
#define PREF_CUBEB_FORCE_SAMPLE_RATE "media.cubeb.force_sample_rate"
#define PREF_CUBEB_LOGGING_LEVEL "media.cubeb.logging_level"
#define PREF_CUBEB_SANDBOX "media.cubeb.sandbox"
@ -125,10 +122,6 @@ cubeb* sCubebContext;
double sVolumeScale = 1.0;
uint32_t sCubebPlaybackLatencyInMilliseconds = 100;
uint32_t sCubebMSGLatencyInFrames = 512;
// If sCubebForcedSampleRate is zero, PreferredSampleRate will return the
// preferred sample-rate for the audio backend in use. Otherwise, it will be
// used as the preferred sample-rate.
uint32_t sCubebForcedSampleRate = 0;
bool sCubebPlaybackLatencyPrefSet = false;
bool sCubebMSGLatencyPrefSet = false;
bool sAudioStreamInitEverSucceeded = false;
@ -244,9 +237,6 @@ void PrefChanged(const char* aPref, void* aClosure)
// We don't want to limit the upper limit too much, so that people can
// experiment.
sCubebMSGLatencyInFrames = std::min<uint32_t>(std::max<uint32_t>(value, 128), 1e6);
} else if (strcmp(aPref, PREF_CUBEB_FORCE_SAMPLE_RATE) == 0) {
StaticMutexAutoLock lock(sMutex);
sCubebForcedSampleRate = Preferences::GetUint(aPref);
} else if (strcmp(aPref, PREF_CUBEB_LOGGING_LEVEL) == 0) {
nsAutoCString value;
Preferences::GetCString(aPref, value);
@ -333,9 +323,6 @@ bool InitPreferredSampleRate()
uint32_t PreferredSampleRate()
{
if (sCubebForcedSampleRate) {
return sCubebForcedSampleRate;
}
if (!InitPreferredSampleRate()) {
return 44100;
}
@ -576,7 +563,6 @@ void InitLibrary()
Preferences::RegisterCallbackAndCall(PrefChanged, PREF_VOLUME_SCALE);
Preferences::RegisterCallbackAndCall(PrefChanged, PREF_CUBEB_LATENCY_PLAYBACK);
Preferences::RegisterCallbackAndCall(PrefChanged, PREF_CUBEB_LATENCY_MSG);
Preferences::RegisterCallback(PrefChanged, PREF_CUBEB_FORCE_SAMPLE_RATE);
Preferences::RegisterCallbackAndCall(PrefChanged, PREF_CUBEB_BACKEND);
Preferences::RegisterCallbackAndCall(PrefChanged, PREF_CUBEB_SANDBOX);
if (MOZ_LOG_TEST(gCubebLog, LogLevel::Verbose)) {
@ -606,7 +592,6 @@ void ShutdownLibrary()
Preferences::UnregisterCallback(PrefChanged, PREF_CUBEB_SANDBOX);
Preferences::UnregisterCallback(PrefChanged, PREF_CUBEB_BACKEND);
Preferences::UnregisterCallback(PrefChanged, PREF_CUBEB_LATENCY_PLAYBACK);
Preferences::UnregisterCallback(PrefChanged, PREF_CUBEB_FORCE_SAMPLE_RATE);
Preferences::UnregisterCallback(PrefChanged, PREF_CUBEB_LATENCY_MSG);
Preferences::UnregisterCallback(PrefChanged, PREF_CUBEB_LOGGING_LEVEL);

Просмотреть файл

@ -15,7 +15,6 @@ support-files =
sdpUtils.js
addTurnsSelfsignedCert.js
parser_rtp.js
peerconnection_audio_forced_sample_rate.js
!/dom/canvas/test/captureStream_common.js
!/dom/canvas/test/webgl-mochitest/webgl-util.js
!/dom/media/test/manifest.js
@ -105,8 +104,6 @@ skip-if = (android_version == '18') # android(Bug 1189784, timeouts on 4.3 emula
skip-if = (android_version == '18') # android(Bug 1189784, timeouts on 4.3 emulator)
[test_peerConnection_basicAudio.html]
skip-if = (android_version == '18') # android(Bug 1189784, timeouts on 4.3 emulator)
[test_peerConnection_basicAudio_forced_lower_rate.html]
[test_peerConnection_basicAudio_forced_higher_rate.html]
[test_peerConnection_audioSynchronizationSources.html]
skip-if = (android_version == '18') # android(Bug 1189784, timeouts on 4.3 emulator)
[test_peerConnection_audioSynchronizationSourcesUnidirectional.html]
@ -328,4 +325,3 @@ skip-if = (android_version == '18') # android(Bug 1189784, timeouts on 4.3 emula
[test_peerConnection_verifyDescriptions.html]
skip-if = (android_version == '18')
[test_fingerprinting_resistance.html]
[test_forceSampleRate.html]

Просмотреть файл

@ -1,38 +0,0 @@
// This function takes a sample-rate, and tests that audio flows correctly when
// the sampling-rate at which the MSG runs is not one of the sampling-rates that
// the MediaPipeline can work with.
// It is in a separate file because we have an MSG per document, and we want to
// test multiple sample-rates, so we include it in multiple HTML mochitest
// files.
function test_peerconnection_audio_forced_sample_rate(forcedSampleRate) {
scriptsReady.then(function () {
pushPrefs(
["media.cubeb.force_sample_rate", forcedSampleRate]
).then(function () {
runNetworkTest(function (options) {
let test = new PeerConnectionTest(options);
let ac = new AudioContext();
test.setMediaConstraints([{
audio: true
}], []);
test.chain.replace("PC_LOCAL_GUM", [
function PC_LOCAL_WEBAUDIO_SOURCE(test) {
let oscillator = ac.createOscillator();
oscillator.type = 'sine';
oscillator.frequency.value = 700;
oscillator.start();
let dest = ac.createMediaStreamDestination();
oscillator.connect(dest);
test.pcLocal.attachLocalStream(dest.stream);
}
]);
test.chain.append([
function CHECK_REMOTE_AUDIO_FLOW(test) {
return test.pcRemote.checkReceivingToneFrom(ac, test.pcLocal);
}
]);
test.run();
});
});
})
}

Просмотреть файл

@ -1,23 +0,0 @@
<!DOCTYPE HTML>
<html>
<head>
<title>Test the pref media.cubeb.force_sample_rate</title>
<script type="text/javascript" src="/MochiKit/MochiKit.js"></script>
<script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
</head>
<body>
<script class="testbody" type="text/javascript">
const WEIRD_SAMPLE_RATE = 44101;
SimpleTest.waitForExplicitFinish();
SpecialPowers.pushPrefEnv({"set": [
["media.cubeb.force_sample_rate", WEIRD_SAMPLE_RATE]
]}).then(function() {
var ac = new AudioContext();
is(ac.sampleRate, WEIRD_SAMPLE_RATE, "Forced sample-rate set successfully.");
SimpleTest.finish();
});
</script>
</pre>
</body>

Просмотреть файл

@ -1,20 +0,0 @@
<!DOCTYPE HTML>
<html>
<head>
<script type="application/javascript" src="pc.js"></script>
<script type="application/javascript" src="peerconnection_audio_forced_sample_rate.js"></script>
</head>
<body>
<pre id="test">
<script type="application/javascript">
createHTML({
bug: "1437366",
title: "Basic audio-only peer connection, with the MSG running at a rate not supported by the MediaPipeline (96000Hz)"
});
test_peerconnection_audio_forced_sample_rate(96000);
</script>
</pre>
</body>
</html>

Просмотреть файл

@ -1,20 +0,0 @@
<!DOCTYPE HTML>
<html>
<head>
<script type="application/javascript" src="pc.js"></script>
<script type="application/javascript" src="peerconnection_audio_forced_sample_rate.js"></script>
</head>
<body>
<pre id="test">
<script type="application/javascript">
createHTML({
bug: "1437366",
title: "Basic audio-only peer connection, with the MSG running at a rate not supported by the MediaPipeline (24000Hz)"
});
test_peerconnection_audio_forced_sample_rate(24000);
</script>
</pre>
</body>
</html>

Просмотреть файл

@ -11,7 +11,6 @@
#include <math.h>
#include "AudioSegment.h"
#include "AudioConverter.h"
#include "AutoTaskQueue.h"
#include "CSFLog.h"
#include "DOMMediaStream.h"
@ -488,156 +487,76 @@ public:
, mTaskQueue(
new AutoTaskQueue(GetMediaThreadPool(MediaThreadType::WEBRTC_DECODER),
"AudioProxy"))
, mAudioConverter(nullptr)
{
MOZ_ASSERT(mConduit);
MOZ_COUNT_CTOR(AudioProxyThread);
}
// This function is the identity if aInputRate is supported.
// Else, it returns a rate that is supported, that ensure no loss in audio
// quality: the sampling rate returned is always greater to the inputed
// sampling-rate, if they differ..
uint32_t AppropriateSendingRateForInputRate(uint32_t aInputRate)
{
AudioSessionConduit* conduit =
static_cast<AudioSessionConduit*>(mConduit.get());
if (conduit->IsSamplingFreqSupported(aInputRate)) {
return aInputRate;
}
if (aInputRate < 16000) {
return 16000;
} else if (aInputRate < 32000) {
return 32000;
} else if (aInputRate < 44100) {
return 44100;
} else {
return 48000;
}
}
// From an arbitrary AudioChunk at sampling-rate aRate, process the audio into
// something the conduit can work with (or send silence if the track is not
// enabled), and send the audio in 10ms chunks to the conduit.
void InternalProcessAudioChunk(TrackRate aRate,
const AudioChunk& aChunk,
bool aEnabled)
void InternalProcessAudioChunk(TrackRate rate,
const AudioChunk& chunk,
bool enabled)
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
// Convert to interleaved 16-bits integer audio, with a maximum of two
// Convert to interleaved, 16-bits integer audio, with a maximum of two
// channels (since the WebRTC.org code below makes the assumption that the
// input audio is either mono or stereo), with a sample-rate rate that is
// 16, 32, 44.1, or 48kHz.
uint32_t outputChannels = aChunk.ChannelCount() == 1 ? 1 : 2;
int32_t transmissionRate = AppropriateSendingRateForInputRate(aRate);
// input audio is either mono or stereo).
uint32_t outputChannels = chunk.ChannelCount() == 1 ? 1 : 2;
const int16_t* samples = nullptr;
UniquePtr<int16_t[]> convertedSamples;
// We take advantage of the fact that the common case (microphone directly
// to PeerConnection, that is, a normal call), the samples are already
// 16-bits mono, so the representation in interleaved and planar is the
// same, and we can just use that.
if (aEnabled &&
outputChannels == 1 &&
aChunk.mBufferFormat == AUDIO_FORMAT_S16 &&
transmissionRate == aRate) {
const int16_t* samples = aChunk.ChannelData<int16_t>().Elements()[0];
PacketizeAndSend(samples,
transmissionRate,
outputChannels,
aChunk.mDuration);
return;
}
uint32_t sampleCount = aChunk.mDuration * outputChannels;
if (mInterleavedAudio.Length() < sampleCount) {
mInterleavedAudio.SetLength(sampleCount);
}
if (!aEnabled || aChunk.mBufferFormat == AUDIO_FORMAT_SILENCE) {
PodZero(mInterleavedAudio.Elements(), sampleCount);
} else if (aChunk.mBufferFormat == AUDIO_FORMAT_FLOAT32) {
DownmixAndInterleave(aChunk.ChannelData<float>(),
aChunk.mDuration,
aChunk.mVolume,
outputChannels,
mInterleavedAudio.Elements());
} else if (aChunk.mBufferFormat == AUDIO_FORMAT_S16) {
DownmixAndInterleave(aChunk.ChannelData<int16_t>(),
aChunk.mDuration,
aChunk.mVolume,
outputChannels,
mInterleavedAudio.Elements());
}
int16_t* inputAudio = mInterleavedAudio.Elements();
size_t inputAudioFrameCount = aChunk.mDuration;
AudioConfig inputConfig(AudioConfig::ChannelLayout(outputChannels),
aRate,
AudioConfig::FORMAT_S16);
AudioConfig outputConfig(AudioConfig::ChannelLayout(outputChannels),
transmissionRate,
AudioConfig::FORMAT_S16);
// Resample to an acceptable sample-rate for the sending side
if (!mAudioConverter ||
mAudioConverter->InputConfig() != inputConfig ||
mAudioConverter->OutputConfig() != outputConfig) {
mAudioConverter = MakeUnique<AudioConverter>(inputConfig, outputConfig);
}
int16_t* processedAudio = nullptr;
size_t framesProcessed =
mAudioConverter->Process(inputAudio, inputAudioFrameCount);
if (framesProcessed == 0) {
// In place conversion not possible, use a buffer.
framesProcessed =
mAudioConverter->Process(mOutputAudio,
inputAudio,
inputAudioFrameCount);
processedAudio = mOutputAudio.Data();
if (enabled && outputChannels == 1 &&
chunk.mBufferFormat == AUDIO_FORMAT_S16) {
samples = chunk.ChannelData<int16_t>().Elements()[0];
} else {
processedAudio = inputAudio;
convertedSamples =
MakeUnique<int16_t[]>(chunk.mDuration * outputChannels);
if (!enabled || chunk.mBufferFormat == AUDIO_FORMAT_SILENCE) {
PodZero(convertedSamples.get(), chunk.mDuration * outputChannels);
} else if (chunk.mBufferFormat == AUDIO_FORMAT_FLOAT32) {
DownmixAndInterleave(chunk.ChannelData<float>(),
chunk.mDuration,
chunk.mVolume,
outputChannels,
convertedSamples.get());
} else if (chunk.mBufferFormat == AUDIO_FORMAT_S16) {
DownmixAndInterleave(chunk.ChannelData<int16_t>(),
chunk.mDuration,
chunk.mVolume,
outputChannels,
convertedSamples.get());
}
samples = convertedSamples.get();
}
PacketizeAndSend(processedAudio,
transmissionRate,
outputChannels,
framesProcessed);
}
MOZ_ASSERT(!(rate % 100)); // rate should be a multiple of 100
// This packetizes aAudioData in 10ms chunks and sends it.
// aAudioData is interleaved audio data at a rate and with a channel count
// that is appropriate to send with the conduit.
void PacketizeAndSend(const int16_t* aAudioData,
uint32_t aRate,
uint32_t aChannels,
uint32_t aFrameCount)
{
MOZ_ASSERT(AppropriateSendingRateForInputRate(aRate) == aRate);
MOZ_ASSERT(aChannels == 1 || aChannels == 2);
MOZ_ASSERT(aAudioData);
// Check if the rate or the number of channels has changed since the last
// time we came through. I realize it may be overkill to check if the rate
// has changed, but I believe it is possible (e.g. if we change sources) and
// it costs us very little to handle this case.
uint32_t audio_10ms = aRate / 100;
uint32_t audio_10ms = rate / 100;
if (!mPacketizer || mPacketizer->PacketSize() != audio_10ms ||
mPacketizer->Channels() != aChannels) {
// It's the right thing to drop the bit of audio still in the packetizer:
// we don't want to send to the conduit audio that has two different
// rates while telling it that it has a constante rate.
mPacketizer->Channels() != outputChannels) {
// It's ok to drop the audio still in the packetizer here.
mPacketizer = MakeUnique<AudioPacketizer<int16_t, int16_t>>(
audio_10ms, aChannels);
mPacket = MakeUnique<int16_t[]>(audio_10ms * aChannels);
audio_10ms, outputChannels);
mPacket = MakeUnique<int16_t[]>(audio_10ms * outputChannels);
}
mPacketizer->Input(aAudioData, aFrameCount);
mPacketizer->Input(samples, chunk.mDuration);
while (mPacketizer->PacketsAvailable()) {
mPacketizer->Output(mPacket.get());
mConduit->SendAudioFrame(mPacket.get(),
mPacketizer->PacketSize(),
aRate,
mPacketizer->Channels(),
0);
mConduit->SendAudioFrame(
mPacket.get(), mPacketizer->PacketSize(), rate, mPacketizer->Channels(), 0);
}
}
@ -669,9 +588,6 @@ protected:
UniquePtr<AudioPacketizer<int16_t, int16_t>> mPacketizer;
// A buffer to hold a single packet of audio.
UniquePtr<int16_t[]> mPacket;
nsTArray<int16_t> mInterleavedAudio;
AlignedShortBuffer mOutputAudio;
UniquePtr<AudioConverter> mAudioConverter;
};
static char kDTLSExporterLabel[] = "EXTRACTOR-dtls_srtp";
@ -2049,24 +1965,14 @@ public:
, mMaybeTrackNeedsUnmute(true)
{
MOZ_RELEASE_ASSERT(mSource, "Must be used with a SourceMediaStream");
}
virtual ~GenericReceiveListener()
{
NS_ReleaseOnMainThreadSystemGroup(
"GenericReceiveListener::track_", mTrack.forget());
}
void AddTrackToSource(uint32_t aRate = 0)
{
MOZ_ASSERT((aRate != 0 && mTrack->AsAudioStreamTrack()) ||
mTrack->AsVideoStreamTrack());
if (mTrack->AsAudioStreamTrack()) {
mSource->AddAudioTrack(
mTrackId, aRate, 0, new AudioSegment());
mTrackId, mSource->GraphRate(), 0, new AudioSegment());
} else if (mTrack->AsVideoStreamTrack()) {
mSource->AddTrack(mTrackId, 0, new VideoSegment());
} else {
MOZ_ASSERT_UNREACHABLE("Unknown track type");
}
CSFLogDebug(
LOGTAG,
@ -2080,6 +1986,12 @@ public:
mSource->AddListener(this);
}
virtual ~GenericReceiveListener()
{
NS_ReleaseOnMainThreadSystemGroup(
"GenericReceiveListener::track_", mTrack.forget());
}
void AddSelf()
{
if (!mListening) {
@ -2203,7 +2115,6 @@ public:
"AudioPipelineListener"))
, mLastLog(0)
{
AddTrackToSource(mRate);
}
// Implement MediaStreamListener
@ -2235,13 +2146,10 @@ private:
void NotifyPullImpl(StreamTime aDesiredTime)
{
uint32_t samplesPer10ms = mRate / 100;
// mSource's rate is not necessarily the same as the graph rate, since there
// are sample-rate constraints on the inbound audio: only 16, 32, 44.1 and
// 48kHz are supported. The audio frames we get here is going to be
// resampled when inserted into the graph.
TrackTicks desired = mSource->TimeToTicksRoundUp(mRate, aDesiredTime);
TrackTicks framesNeeded = desired - mPlayedTicks;
// Determine how many frames we need.
// As we get frames from conduit_ at the same rate as the graph's rate,
// the number of frames needed straightfully determined.
TrackTicks framesNeeded = aDesiredTime - mPlayedTicks;
while (framesNeeded >= 0) {
const int scratchBufferLength =
@ -2405,7 +2313,6 @@ public:
LayerManager::CreateImageContainer(ImageContainer::ASYNCHRONOUS))
, mMutex("Video PipelineListener")
{
AddTrackToSource();
}
// Implement MediaStreamListener