Bug 1658353 - Add global mute/unmute capability for microphones, with tests. r=jib

Depends on D87679

Differential Revision: https://phabricator.services.mozilla.com/D86718
This commit is contained in:
Mike Conley 2020-08-26 21:35:46 +00:00
Родитель b89cef39b8
Коммит 90e18673c3
5 изменённых файлов: 424 добавлений и 15 удалений

Просмотреть файл

@ -136,6 +136,20 @@ class WebRTCChild extends JSWindowActorChild {
aMessage.data
);
break;
case "webrtc:MuteMicrophone":
Services.obs.notifyObservers(
null,
"getUserMedia:muteAudio",
aMessage.data
);
break;
case "webrtc:UnmuteMicrophone":
Services.obs.notifyObservers(
null,
"getUserMedia:unmuteAudio",
aMessage.data
);
break;
}
}
}

Просмотреть файл

@ -1,15 +1,27 @@
/* Any copyright is dedicated to the Public Domain.
* http://creativecommons.org/publicdomain/zero/1.0/ */
async function setCameraMuted(mute) {
function setCameraMuted(mute) {
return sendObserverNotification(
mute ? "getUserMedia:muteVideo" : "getUserMedia:unmuteVideo"
);
}
function setMicrophoneMuted(mute) {
return sendObserverNotification(
mute ? "getUserMedia:muteAudio" : "getUserMedia:unmuteAudio"
);
}
function sendObserverNotification(topic) {
const windowId = gBrowser.selectedBrowser.innerWindowID;
return SpecialPowers.spawn(
gBrowser.selectedBrowser,
[{ mute, windowId }],
[{ topic, windowId }],
function(args) {
Services.obs.notifyObservers(
content.window,
args.mute ? "getUserMedia:muteVideo" : "getUserMedia:unmuteVideo",
args.topic,
JSON.stringify(args.windowId)
);
}
@ -48,6 +60,22 @@ async function getVideoTrackEvents() {
);
}
async function getAudioTrackMuted() {
return SpecialPowers.spawn(
gBrowser.selectedBrowser,
[],
() => content.wrappedJSObject.gStreams[0].getAudioTracks()[0].muted
);
}
async function getAudioTrackEvents() {
return SpecialPowers.spawn(
gBrowser.selectedBrowser,
[],
() => content.wrappedJSObject.gAudioEvents
);
}
function cloneTracks(audio, video) {
return SpecialPowers.spawn(
gBrowser.selectedBrowser,
@ -358,7 +386,7 @@ var gTests = [
{
desc:
"getUserMedia audio+video: muting the camera shows the muted indicator",
run: async function checkMuted() {
run: async function checkCameraMuted() {
let observerPromise = expectObserverCalled("getUserMedia:request");
let promise = promisePopupNotificationShown("webRTC-shareDevices");
await promiseRequestDevice(true, true);
@ -443,6 +471,94 @@ var gTests = [
},
},
{
desc:
"getUserMedia audio+video: muting the microphone shows the muted indicator",
run: async function checkMicrophoneMuted() {
let observerPromise = expectObserverCalled("getUserMedia:request");
let promise = promisePopupNotificationShown("webRTC-shareDevices");
await promiseRequestDevice(true, true);
await promise;
await observerPromise;
checkDeviceSelectors(true, true);
let indicator = promiseIndicatorWindow();
let observerPromise1 = expectObserverCalled(
"getUserMedia:response:allow"
);
let observerPromise2 = expectObserverCalled("recording-device-events");
await promiseMessage("ok", () => {
PopupNotifications.panel.firstElementChild.button.click();
});
await observerPromise1;
await observerPromise2;
Assert.deepEqual(
await getMediaCaptureState(),
{ audio: true, video: true },
"expected camera and microphone to be shared"
);
await indicator;
await checkSharingUI({
video: STATE_CAPTURE_ENABLED,
audio: STATE_CAPTURE_ENABLED,
});
is(await getAudioTrackMuted(), false, "audio track starts unmuted");
Assert.deepEqual(
await getAudioTrackEvents(),
[],
"no audio track events fired yet"
);
// Mute microphone.
observerPromise = expectObserverCalled("recording-device-events");
await setMicrophoneMuted(true);
// Wait for capture state to propagate to the UI asynchronously.
await BrowserTestUtils.waitForCondition(
() =>
window.gIdentityHandler._sharingState.webRTC.microphone ==
STATE_CAPTURE_DISABLED,
"audio should be muted"
);
await observerPromise;
// The identity UI should show only microphone as disabled.
await checkSharingUI({
video: STATE_CAPTURE_ENABLED,
audio: STATE_CAPTURE_DISABLED,
});
is(await getAudioTrackMuted(), true, "audio track is muted");
Assert.deepEqual(await getAudioTrackEvents(), ["mute"], "mute fired");
// Unmute audio again.
observerPromise = expectObserverCalled("recording-device-events");
await setMicrophoneMuted(false);
await BrowserTestUtils.waitForCondition(
() =>
window.gIdentityHandler._sharingState.webRTC.microphone ==
STATE_CAPTURE_ENABLED,
"audio should be enabled"
);
await observerPromise;
// Both streams should show as running.
await checkSharingUI({
video: STATE_CAPTURE_ENABLED,
audio: STATE_CAPTURE_ENABLED,
});
is(await getAudioTrackMuted(), false, "audio track is unmuted");
Assert.deepEqual(
await getAudioTrackEvents(),
["mute", "unmute"],
"unmute fired"
);
await closeStream();
},
},
{
desc: "getUserMedia audio+video: disabling & muting camera in combination",
// Test the following combinations of disabling and muting camera:
@ -659,6 +775,224 @@ var gTests = [
await closeStream();
},
},
{
desc:
"getUserMedia audio+video: disabling & muting microphone in combination",
// Test the following combinations of disabling and muting microphone:
// 1. Disable audio track only.
// 2. Mute microphone & disable video (to have a condition to wait for)
// 3. Enable both audio and video tracks (only video should flow).
// 4. Unmute microphone again (audio should flow).
// 5. Mute microphone & disable both tracks.
// 6. Unmute microphone & enable video (only video should flow)
// 7. Enable audio track again (audio should flow).
run: async function checkDisabledMutedCombination() {
let observerPromise = expectObserverCalled("getUserMedia:request");
let promise = promisePopupNotificationShown("webRTC-shareDevices");
await promiseRequestDevice(true, true);
await promise;
await observerPromise;
checkDeviceSelectors(true, true);
let indicator = promiseIndicatorWindow();
let observerPromise1 = expectObserverCalled(
"getUserMedia:response:allow"
);
let observerPromise2 = expectObserverCalled("recording-device-events");
await promiseMessage("ok", () => {
PopupNotifications.panel.firstElementChild.button.click();
});
await observerPromise1;
await observerPromise2;
Assert.deepEqual(
await getMediaCaptureState(),
{ audio: true, video: true },
"expected camera and microphone to be shared"
);
await indicator;
await checkSharingUI({
video: STATE_CAPTURE_ENABLED,
audio: STATE_CAPTURE_ENABLED,
});
// 1. Disable audio track only.
observerPromise = expectObserverCalled("recording-device-events");
await setTrackEnabled(false, null);
// Wait for capture state to propagate to the UI asynchronously.
await BrowserTestUtils.waitForCondition(
() =>
window.gIdentityHandler._sharingState.webRTC.microphone ==
STATE_CAPTURE_DISABLED,
"audio should be disabled"
);
await observerPromise;
// The identity UI should show only audio as disabled.
await checkSharingUI({
video: STATE_CAPTURE_ENABLED,
audio: STATE_CAPTURE_DISABLED,
});
is(await getAudioTrackMuted(), false, "audio track still unmuted");
Assert.deepEqual(
await getAudioTrackEvents(),
[],
"no audio track events fired yet"
);
// 2. Mute microphone & disable video (to have a condition to wait for)
observerPromise = expectObserverCalled("recording-device-events", 2);
await setMicrophoneMuted(true);
await setTrackEnabled(null, false);
await BrowserTestUtils.waitForCondition(
() =>
window.gIdentityHandler._sharingState.webRTC.camera ==
STATE_CAPTURE_DISABLED,
"camera should be disabled"
);
await observerPromise;
// The identity UI should show both as disabled.
await checkSharingUI({
video: STATE_CAPTURE_DISABLED,
audio: STATE_CAPTURE_DISABLED,
});
is(await getAudioTrackMuted(), true, "audio track is muted");
Assert.deepEqual(
await getAudioTrackEvents(),
["mute"],
"mute is still fired even though track was disabled"
);
// 3. Enable both audio and video tracks (only video should flow).
observerPromise = expectObserverCalled("recording-device-events", 2);
await setTrackEnabled(true, true);
await BrowserTestUtils.waitForCondition(
() =>
window.gIdentityHandler._sharingState.webRTC.camera ==
STATE_CAPTURE_ENABLED,
"video should be enabled"
);
await observerPromise;
// The identity UI should show only video as enabled, as audio is muted.
await checkSharingUI({
video: STATE_CAPTURE_ENABLED,
audio: STATE_CAPTURE_DISABLED,
});
is(await getAudioTrackMuted(), true, "audio track is still muted");
Assert.deepEqual(await getAudioTrackEvents(), ["mute"], "no new events");
// 4. Unmute microphone again (audio should flow).
observerPromise = expectObserverCalled("recording-device-events");
await setMicrophoneMuted(false);
await BrowserTestUtils.waitForCondition(
() =>
window.gIdentityHandler._sharingState.webRTC.microphone ==
STATE_CAPTURE_ENABLED,
"audio should be enabled"
);
await observerPromise;
// Both streams should show as running.
await checkSharingUI({
video: STATE_CAPTURE_ENABLED,
audio: STATE_CAPTURE_ENABLED,
});
is(await getAudioTrackMuted(), false, "audio track is unmuted");
Assert.deepEqual(
await getAudioTrackEvents(),
["mute", "unmute"],
"unmute fired"
);
// 5. Mute microphone & disable both tracks.
observerPromise = expectObserverCalled("recording-device-events", 3);
await setMicrophoneMuted(true);
await setTrackEnabled(false, false);
await BrowserTestUtils.waitForCondition(
() =>
window.gIdentityHandler._sharingState.webRTC.microphone ==
STATE_CAPTURE_DISABLED,
"audio should be disabled"
);
await observerPromise;
// The identity UI should show both as disabled.
await checkSharingUI({
video: STATE_CAPTURE_DISABLED,
audio: STATE_CAPTURE_DISABLED,
});
is(await getAudioTrackMuted(), true, "audio track is muted");
Assert.deepEqual(
await getAudioTrackEvents(),
["mute", "unmute", "mute"],
"mute fired again"
);
// 6. Unmute microphone & enable video (only video should flow)
observerPromise = expectObserverCalled("recording-device-events", 2);
await setMicrophoneMuted(false);
await setTrackEnabled(null, true);
await BrowserTestUtils.waitForCondition(
() =>
window.gIdentityHandler._sharingState.webRTC.camera ==
STATE_CAPTURE_ENABLED,
"video should be enabled"
);
await observerPromise;
// Only video should show as running, as audio track is still disabled.
await checkSharingUI({
video: STATE_CAPTURE_ENABLED,
audio: STATE_CAPTURE_DISABLED,
});
is(await getAudioTrackMuted(), false, "audio track is unmuted");
Assert.deepEqual(
await getAudioTrackEvents(),
["mute", "unmute", "mute", "unmute"],
"unmute fired even though track is disabled"
);
// 7. Enable audio track again (audio should flow).
observerPromise = expectObserverCalled("recording-device-events");
await setTrackEnabled(true, null);
await BrowserTestUtils.waitForCondition(
() =>
window.gIdentityHandler._sharingState.webRTC.microphone ==
STATE_CAPTURE_ENABLED,
"audio should be enabled"
);
await observerPromise;
// The identity UI should show both as running again.
await checkSharingUI({
video: STATE_CAPTURE_ENABLED,
audio: STATE_CAPTURE_ENABLED,
});
is(await getAudioTrackMuted(), false, "audio track remains unmuted");
Assert.deepEqual(
await getAudioTrackEvents(),
["mute", "unmute", "mute", "unmute"],
"no new events fired"
);
await closeStream();
},
},
];
add_task(async function test() {

Просмотреть файл

@ -25,6 +25,7 @@ function message(m) {
var gStreams = [];
var gVideoEvents = [];
var gAudioEvents = [];
async function requestDevice(aAudio, aVideo, aShare, aBadDevice = false) {
const opts = {video: aVideo, audio: aAudio};
@ -52,10 +53,18 @@ async function requestDevice(aAudio, aVideo, aShare, aBadDevice = false) {
try {
const stream = await navigator.mediaDevices.getUserMedia(opts)
gStreams.push(stream);
const track = stream.getVideoTracks()[0];
if (track) {
const videoTrack = stream.getVideoTracks()[0];
if (videoTrack) {
for (const name of ["mute", "unmute", "ended"]) {
track.addEventListener(name, () => gVideoEvents.push(name));
videoTrack.addEventListener(name, () => gVideoEvents.push(name));
}
}
const audioTrack = stream.getAudioTracks()[0];
if (audioTrack) {
for (const name of ["mute", "unmute", "ended"]) {
audioTrack.addEventListener(name, () => gAudioEvents.push(name));
}
}
message("ok");
@ -74,6 +83,7 @@ function closeStream() {
}
gStreams = [];
gVideoEvents = [];
gAudioEvents = [];
message("closed");
}
</script>

Просмотреть файл

@ -409,6 +409,7 @@ class SourceListener : public SupportsWeakPtr {
* Mutes or unmutes the associated video device if it is a camera.
*/
void MuteOrUnmuteCamera(bool aMute);
void MuteOrUnmuteMicrophone(bool aMute);
MediaDevice* GetAudioDevice() const {
return mAudioDeviceState ? mAudioDeviceState->mDevice.get() : nullptr;
@ -529,7 +530,7 @@ class GetUserMediaWindowListener {
mInactiveListeners.RemoveElement(aListener);
aListener->Activate(std::move(aAudioDevice), std::move(aAudioTrackSource),
std::move(aVideoDevice), std::move(aVideoTrackSource),
mCamerasAreMuted, /* aStartAudioMuted */ false);
mCamerasAreMuted, mMicrophonesAreMuted);
mActiveListeners.AppendElement(std::move(aListener));
}
@ -675,6 +676,7 @@ class GetUserMediaWindowListener {
void StopRawID(const nsString& removedDeviceID);
void MuteOrUnmuteCameras(bool aMute);
void MuteOrUnmuteMicrophones(bool aMute);
/**
* Called by one of our SourceListeners when one of its tracks has changed so
@ -750,11 +752,12 @@ class GetUserMediaWindowListener {
nsTArray<RefPtr<SourceListener>> mInactiveListeners;
nsTArray<RefPtr<SourceListener>> mActiveListeners;
// Whether camera access in this window is currently User Agent (UA) muted.
// When true, new camera tracks must start out muted, to avoid JS
// circumventing UA mute by calling getUserMedia again.
// Per-camera UA muting is not supported.
// Whether camera and microphone access in this window are currently
// User Agent (UA) muted. When true, new and cloned tracks must start
// out muted, to avoid JS circumventing UA mute. Per-camera and
// per-microphone UA muting is not supported.
bool mCamerasAreMuted = false;
bool mMicrophonesAreMuted = false;
};
class LocalTrackSource : public MediaStreamTrackSource {
@ -2051,8 +2054,8 @@ MediaManager* MediaManager::Get() {
obs->AddObserver(sSingleton, "getUserMedia:revoke", false);
obs->AddObserver(sSingleton, "getUserMedia:muteVideo", false);
obs->AddObserver(sSingleton, "getUserMedia:unmuteVideo", false);
obs->AddObserver(sSingleton, "application-background", false);
obs->AddObserver(sSingleton, "application-foreground", false);
obs->AddObserver(sSingleton, "getUserMedia:muteAudio", false);
obs->AddObserver(sSingleton, "getUserMedia:unmuteAudio", false);
}
// else MediaManager won't work properly and will leak (see bug 837874)
nsCOMPtr<nsIPrefBranch> prefs = do_GetService(NS_PREFSERVICE_CONTRACTID);
@ -3461,6 +3464,16 @@ void MediaManager::OnCameraMute(bool aMute) {
}
}
void MediaManager::OnMicrophoneMute(bool aMute) {
MOZ_ASSERT(NS_IsMainThread());
LOG("OnMicrophoneMute for all windows");
// This is safe since we're on main-thread, and the windowlist can only
// be added to from the main-thread
for (auto iter = mActiveWindows.Iter(); !iter.Done(); iter.Next()) {
iter.UserData()->MuteOrUnmuteMicrophones(aMute);
}
}
void MediaManager::AddWindowID(uint64_t aWindowId,
RefPtr<GetUserMediaWindowListener> aListener) {
MOZ_ASSERT(NS_IsMainThread());
@ -3591,6 +3604,8 @@ void MediaManager::Shutdown() {
obs->RemoveObserver(this, "getUserMedia:revoke");
obs->RemoveObserver(this, "getUserMedia:muteVideo");
obs->RemoveObserver(this, "getUserMedia:unmuteVideo");
obs->RemoveObserver(this, "getUserMedia:muteAudio");
obs->RemoveObserver(this, "getUserMedia:unmuteAudio");
obs->RemoveObserver(this, "application-background");
obs->RemoveObserver(this, "application-foreground");
@ -3878,6 +3893,10 @@ nsresult MediaManager::Observe(nsISupports* aSubject, const char* aTopic,
!strcmp(aTopic, "getUserMedia:unmuteVideo")) {
OnCameraMute(!strcmp(aTopic, "getUserMedia:muteVideo"));
return NS_OK;
} else if (!strcmp(aTopic, "getUserMedia:muteAudio") ||
!strcmp(aTopic, "getUserMedia:unmuteAudio")) {
OnMicrophoneMute(!strcmp(aTopic, "getUserMedia:muteAudio"));
return NS_OK;
} else if ((!strcmp(aTopic, "application-background") ||
!strcmp(aTopic, "application-foreground")) &&
StaticPrefs::media_getusermedia_camera_background_mute_enabled()) {
@ -3889,8 +3908,8 @@ nsresult MediaManager::Observe(nsISupports* aSubject, const char* aTopic,
// NOTE: If a mobile device ever wants to implement "getUserMedia:muteVideo"
// as well, it'd need to update this code to handle & test the combinations.
OnCameraMute(!strcmp(aTopic, "application-background"));
return NS_OK;
}
return NS_OK;
}
@ -4674,6 +4693,22 @@ void SourceListener::MuteOrUnmuteCamera(bool aMute) {
}
}
void SourceListener::MuteOrUnmuteMicrophone(bool aMute) {
MOZ_ASSERT(NS_IsMainThread());
if (mStopped) {
return;
}
MOZ_RELEASE_ASSERT(mWindowListener);
LOG("SourceListener %p MuteOrUnmuteMicrophone", this);
if (mAudioDeviceState && (mAudioDeviceState->mDevice->GetMediaSource() ==
MediaSourceEnum::Microphone)) {
SetMutedFor(mAudioDeviceState->mTrackSource, aMute);
}
}
bool SourceListener::CapturingVideo() const {
MOZ_ASSERT(NS_IsMainThread());
return Activated() && mVideoDeviceState && !mVideoDeviceState->mStopped &&
@ -4838,6 +4873,21 @@ void GetUserMediaWindowListener::MuteOrUnmuteCameras(bool aMute) {
}
}
void GetUserMediaWindowListener::MuteOrUnmuteMicrophones(bool aMute) {
MOZ_ASSERT(NS_IsMainThread(), "Only call on main thread");
if (mMicrophonesAreMuted == aMute) {
return;
}
mMicrophonesAreMuted = aMute;
for (auto& source : mActiveListeners) {
if (source->GetAudioDevice()) {
source->MuteOrUnmuteMicrophone(aMute);
}
}
}
void GetUserMediaWindowListener::ChromeAffectingStateChanged() {
MOZ_ASSERT(NS_IsMainThread());

Просмотреть файл

@ -255,6 +255,7 @@ class MediaManager final : public nsIMediaManagerService, public nsIObserver {
void OnNavigation(uint64_t aWindowID);
void OnCameraMute(bool aMute);
void OnMicrophoneMute(bool aMute);
bool IsActivelyCapturingOrHasAPermission(uint64_t aWindowId);
MediaEventSource<void>& DeviceListChangeEvent() {