2014-04-25 18:09:30 +04:00
|
|
|
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
2018-04-12 18:51:35 +03:00
|
|
|
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
2014-04-25 18:09:30 +04:00
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
|
|
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
|
|
|
|
2019-10-02 13:23:02 +03:00
|
|
|
#include <MediaTrackGraphImpl.h>
|
2015-11-27 07:40:30 +03:00
|
|
|
#include "mozilla/dom/AudioContext.h"
|
2018-08-27 16:11:41 +03:00
|
|
|
#include "mozilla/dom/AudioDeviceInfo.h"
|
2018-12-20 10:58:13 +03:00
|
|
|
#include "mozilla/dom/WorkletThread.h"
|
2016-01-21 19:51:36 +03:00
|
|
|
#include "mozilla/SharedThreadPool.h"
|
|
|
|
#include "mozilla/ClearOnShutdown.h"
|
2016-08-23 07:09:32 +03:00
|
|
|
#include "mozilla/Unused.h"
|
2019-07-29 15:50:39 +03:00
|
|
|
#include "mozilla/MathAlgorithms.h"
|
2019-03-19 13:40:12 +03:00
|
|
|
#include "CubebDeviceEnumerator.h"
|
2018-04-12 18:51:35 +03:00
|
|
|
#include "Tracing.h"
|
2014-04-25 18:09:30 +04:00
|
|
|
|
2016-03-15 18:11:30 +03:00
|
|
|
#ifdef MOZ_WEBRTC
|
2016-03-08 20:11:09 +03:00
|
|
|
# include "webrtc/MediaEngineWebRTC.h"
|
2016-03-15 18:11:30 +03:00
|
|
|
#endif
|
2016-03-08 20:11:09 +03:00
|
|
|
|
2014-08-26 19:02:31 +04:00
|
|
|
#ifdef XP_MACOSX
|
|
|
|
# include <sys/sysctl.h>
|
|
|
|
#endif
|
|
|
|
|
2019-10-02 13:23:02 +03:00
|
|
|
extern mozilla::LazyLogModule gMediaTrackGraphLog;
|
2017-02-06 18:22:36 +03:00
|
|
|
#ifdef LOG
|
|
|
|
# undef LOG
|
|
|
|
#endif // LOG
|
2019-10-02 13:23:02 +03:00
|
|
|
#define LOG(type, msg) MOZ_LOG(gMediaTrackGraphLog, type, msg)
|
2014-08-31 16:19:48 +04:00
|
|
|
|
2014-04-25 18:09:30 +04:00
|
|
|
namespace mozilla {
|
|
|
|
|
2019-10-02 13:23:02 +03:00
|
|
|
GraphDriver::GraphDriver(MediaTrackGraphImpl* aGraphImpl)
|
2019-12-19 01:49:30 +03:00
|
|
|
: mGraphImpl(aGraphImpl) {}
|
|
|
|
|
|
|
|
void GraphDriver::SetState(GraphDriver* aPreviousDriver,
|
|
|
|
GraphTime aIterationStart, GraphTime aIterationEnd,
|
|
|
|
GraphTime aStateComputedTime) {
|
2019-03-06 23:12:25 +03:00
|
|
|
MOZ_ASSERT(OnGraphThread() || !ThreadRunning());
|
2015-12-01 13:47:59 +03:00
|
|
|
GraphImpl()->GetMonitor().AssertCurrentThreadOwns();
|
2019-12-19 01:49:30 +03:00
|
|
|
|
|
|
|
mIterationStart = aIterationStart;
|
|
|
|
mIterationEnd = aIterationEnd;
|
|
|
|
mStateComputedTime = aStateComputedTime;
|
2014-08-26 19:01:33 +04:00
|
|
|
|
2015-12-01 13:47:31 +03:00
|
|
|
MOZ_ASSERT(!PreviousDriver());
|
2016-01-21 19:51:36 +03:00
|
|
|
MOZ_ASSERT(aPreviousDriver);
|
2017-12-21 10:08:21 +03:00
|
|
|
MOZ_DIAGNOSTIC_ASSERT(GraphImpl()->CurrentDriver() == aPreviousDriver);
|
2016-01-21 19:51:36 +03:00
|
|
|
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug,
|
2018-04-17 18:11:13 +03:00
|
|
|
("%p: Setting previous driver: %p (%s)", GraphImpl(), aPreviousDriver,
|
2017-02-06 18:22:36 +03:00
|
|
|
aPreviousDriver->AsAudioCallbackDriver() ? "AudioCallbackDriver"
|
|
|
|
: "SystemClockDriver"));
|
|
|
|
|
2015-12-01 13:47:31 +03:00
|
|
|
SetPreviousDriver(aPreviousDriver);
|
2014-08-26 19:01:33 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void GraphDriver::SwitchAtNextIteration(GraphDriver* aNextDriver) {
|
2019-03-06 23:12:25 +03:00
|
|
|
MOZ_ASSERT(OnGraphThread());
|
2018-05-22 19:31:56 +03:00
|
|
|
MOZ_ASSERT(aNextDriver);
|
2015-12-01 13:47:59 +03:00
|
|
|
GraphImpl()->GetMonitor().AssertCurrentThreadOwns();
|
2018-05-22 19:31:56 +03:00
|
|
|
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug,
|
2018-04-17 18:11:13 +03:00
|
|
|
("%p: Switching to new driver: %p (%s)", GraphImpl(), aNextDriver,
|
2017-02-06 18:22:36 +03:00
|
|
|
aNextDriver->AsAudioCallbackDriver() ? "AudioCallbackDriver"
|
|
|
|
: "SystemClockDriver"));
|
2018-04-30 17:01:56 +03:00
|
|
|
if (mNextDriver && mNextDriver != GraphImpl()->CurrentDriver()) {
|
|
|
|
LOG(LogLevel::Debug,
|
2018-04-17 18:11:13 +03:00
|
|
|
("%p: Discarding previous next driver: %p (%s)", GraphImpl(),
|
2018-04-30 17:01:56 +03:00
|
|
|
mNextDriver.get(),
|
|
|
|
mNextDriver->AsAudioCallbackDriver() ? "AudioCallbackDriver"
|
|
|
|
: "SystemClockDriver"));
|
|
|
|
}
|
2015-12-01 13:47:31 +03:00
|
|
|
SetNextDriver(aNextDriver);
|
2014-08-26 19:01:33 +04:00
|
|
|
}
|
|
|
|
|
2019-03-06 23:12:25 +03:00
|
|
|
#ifdef DEBUG
|
|
|
|
bool GraphDriver::OnGraphThread() {
|
|
|
|
return GraphImpl()->RunByGraphDriver(this);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-12-01 13:47:31 +03:00
|
|
|
bool GraphDriver::Switching() {
|
2019-03-06 23:12:25 +03:00
|
|
|
MOZ_ASSERT(OnGraphThread());
|
2015-12-01 13:47:31 +03:00
|
|
|
GraphImpl()->GetMonitor().AssertCurrentThreadOwns();
|
|
|
|
return mNextDriver || mPreviousDriver;
|
|
|
|
}
|
|
|
|
|
2018-05-07 20:35:56 +03:00
|
|
|
void GraphDriver::SwitchToNextDriver() {
|
2019-03-06 23:12:25 +03:00
|
|
|
MOZ_ASSERT(OnGraphThread() || !ThreadRunning());
|
2018-05-07 20:35:56 +03:00
|
|
|
GraphImpl()->GetMonitor().AssertCurrentThreadOwns();
|
|
|
|
MOZ_ASSERT(NextDriver());
|
2018-05-25 11:58:45 +03:00
|
|
|
|
2019-12-19 01:49:30 +03:00
|
|
|
NextDriver()->SetState(this, mIterationStart, mIterationEnd,
|
|
|
|
mStateComputedTime);
|
2018-05-07 20:35:56 +03:00
|
|
|
GraphImpl()->SetCurrentDriver(NextDriver());
|
|
|
|
NextDriver()->Start();
|
|
|
|
SetNextDriver(nullptr);
|
|
|
|
}
|
|
|
|
|
2015-12-01 13:47:31 +03:00
|
|
|
GraphDriver* GraphDriver::NextDriver() {
|
2019-03-06 23:12:25 +03:00
|
|
|
MOZ_ASSERT(OnGraphThread() || !ThreadRunning());
|
2015-12-01 13:47:31 +03:00
|
|
|
GraphImpl()->GetMonitor().AssertCurrentThreadOwns();
|
|
|
|
return mNextDriver;
|
|
|
|
}
|
|
|
|
|
|
|
|
GraphDriver* GraphDriver::PreviousDriver() {
|
2019-03-06 23:12:25 +03:00
|
|
|
MOZ_ASSERT(OnGraphThread() || !ThreadRunning());
|
2015-12-01 13:47:31 +03:00
|
|
|
GraphImpl()->GetMonitor().AssertCurrentThreadOwns();
|
|
|
|
return mPreviousDriver;
|
|
|
|
}
|
|
|
|
|
|
|
|
void GraphDriver::SetNextDriver(GraphDriver* aNextDriver) {
|
2019-03-06 23:12:25 +03:00
|
|
|
MOZ_ASSERT(OnGraphThread() || !ThreadRunning());
|
2015-12-01 13:47:31 +03:00
|
|
|
GraphImpl()->GetMonitor().AssertCurrentThreadOwns();
|
2018-05-22 19:31:56 +03:00
|
|
|
MOZ_ASSERT(aNextDriver != this);
|
|
|
|
MOZ_ASSERT(aNextDriver != mNextDriver);
|
|
|
|
|
|
|
|
if (mNextDriver && mNextDriver != GraphImpl()->CurrentDriver()) {
|
|
|
|
LOG(LogLevel::Debug,
|
|
|
|
("Discarding previous next driver: %p (%s)", mNextDriver.get(),
|
|
|
|
mNextDriver->AsAudioCallbackDriver() ? "AudioCallbackDriver"
|
|
|
|
: "SystemClockDriver"));
|
|
|
|
}
|
|
|
|
|
2015-12-01 13:47:31 +03:00
|
|
|
mNextDriver = aNextDriver;
|
|
|
|
}
|
|
|
|
|
|
|
|
void GraphDriver::SetPreviousDriver(GraphDriver* aPreviousDriver) {
|
2019-03-06 23:12:25 +03:00
|
|
|
MOZ_ASSERT(OnGraphThread() || !ThreadRunning());
|
2015-12-01 13:47:31 +03:00
|
|
|
GraphImpl()->GetMonitor().AssertCurrentThreadOwns();
|
|
|
|
mPreviousDriver = aPreviousDriver;
|
|
|
|
}
|
|
|
|
|
2019-10-02 13:23:02 +03:00
|
|
|
ThreadedDriver::ThreadedDriver(MediaTrackGraphImpl* aGraphImpl)
|
2014-09-03 17:52:43 +04:00
|
|
|
: GraphDriver(aGraphImpl), mThreadRunning(false) {}
|
|
|
|
|
2019-10-02 13:23:02 +03:00
|
|
|
class MediaTrackGraphShutdownThreadRunnable : public Runnable {
|
2016-04-26 22:33:14 +03:00
|
|
|
public:
|
2019-10-02 13:23:02 +03:00
|
|
|
explicit MediaTrackGraphShutdownThreadRunnable(
|
2017-06-12 22:34:10 +03:00
|
|
|
already_AddRefed<nsIThread> aThread)
|
2019-10-02 13:23:02 +03:00
|
|
|
: Runnable("MediaTrackGraphShutdownThreadRunnable"), mThread(aThread) {}
|
2016-08-08 05:18:10 +03:00
|
|
|
NS_IMETHOD Run() override {
|
2016-04-26 22:33:14 +03:00
|
|
|
MOZ_ASSERT(NS_IsMainThread());
|
|
|
|
MOZ_ASSERT(mThread);
|
|
|
|
|
|
|
|
mThread->Shutdown();
|
|
|
|
mThread = nullptr;
|
|
|
|
return NS_OK;
|
|
|
|
}
|
2018-11-19 16:25:37 +03:00
|
|
|
|
2016-04-26 22:33:14 +03:00
|
|
|
private:
|
|
|
|
nsCOMPtr<nsIThread> mThread;
|
|
|
|
};
|
|
|
|
|
2014-09-03 17:52:43 +04:00
|
|
|
ThreadedDriver::~ThreadedDriver() {
|
|
|
|
if (mThread) {
|
2017-07-04 10:21:23 +03:00
|
|
|
nsCOMPtr<nsIRunnable> event =
|
2019-10-02 13:23:02 +03:00
|
|
|
new MediaTrackGraphShutdownThreadRunnable(mThread.forget());
|
2018-07-03 05:26:21 +03:00
|
|
|
SystemGroup::Dispatch(TaskCategory::Other, event.forget());
|
2014-09-03 17:52:43 +04:00
|
|
|
}
|
|
|
|
}
|
2017-07-18 12:26:58 +03:00
|
|
|
|
2019-10-02 13:23:02 +03:00
|
|
|
class MediaTrackGraphInitThreadRunnable : public Runnable {
|
2014-04-25 20:03:04 +04:00
|
|
|
public:
|
2019-10-02 13:23:02 +03:00
|
|
|
explicit MediaTrackGraphInitThreadRunnable(ThreadedDriver* aDriver)
|
|
|
|
: Runnable("MediaTrackGraphInitThreadRunnable"), mDriver(aDriver) {}
|
2016-08-08 05:18:10 +03:00
|
|
|
NS_IMETHOD Run() override {
|
2018-05-25 11:58:45 +03:00
|
|
|
MOZ_ASSERT(!mDriver->ThreadRunning());
|
2018-07-03 08:23:09 +03:00
|
|
|
LOG(LogLevel::Debug, ("Starting a new system driver for graph %p",
|
|
|
|
mDriver->mGraphImpl.get()));
|
2015-12-01 13:47:31 +03:00
|
|
|
|
2017-09-08 17:41:36 +03:00
|
|
|
RefPtr<GraphDriver> previousDriver;
|
2015-12-01 13:47:31 +03:00
|
|
|
{
|
|
|
|
MonitorAutoLock mon(mDriver->mGraphImpl->GetMonitor());
|
|
|
|
previousDriver = mDriver->PreviousDriver();
|
|
|
|
}
|
|
|
|
if (previousDriver) {
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug,
|
|
|
|
("%p releasing an AudioCallbackDriver(%p), for graph %p",
|
|
|
|
mDriver.get(), previousDriver.get(), mDriver->GraphImpl()));
|
2014-08-26 19:02:07 +04:00
|
|
|
MOZ_ASSERT(!mDriver->AsAudioCallbackDriver());
|
2016-04-22 17:24:19 +03:00
|
|
|
RefPtr<AsyncCubebTask> releaseEvent =
|
|
|
|
new AsyncCubebTask(previousDriver->AsAudioCallbackDriver(),
|
|
|
|
AsyncCubebOperation::SHUTDOWN);
|
|
|
|
releaseEvent->Dispatch();
|
|
|
|
|
|
|
|
MonitorAutoLock mon(mDriver->mGraphImpl->GetMonitor());
|
|
|
|
mDriver->SetPreviousDriver(nullptr);
|
2014-08-26 19:01:33 +04:00
|
|
|
}
|
2016-04-22 17:24:19 +03:00
|
|
|
|
2014-04-25 20:03:04 +04:00
|
|
|
mDriver->RunThread();
|
|
|
|
return NS_OK;
|
|
|
|
}
|
2018-11-19 16:25:37 +03:00
|
|
|
|
2014-04-25 20:03:04 +04:00
|
|
|
private:
|
2016-06-07 23:26:20 +03:00
|
|
|
RefPtr<ThreadedDriver> mDriver;
|
2014-04-25 20:03:04 +04:00
|
|
|
};
|
|
|
|
|
2014-04-25 20:04:53 +04:00
|
|
|
void ThreadedDriver::Start() {
|
2018-05-25 11:58:58 +03:00
|
|
|
MOZ_ASSERT(!ThreadRunning());
|
2018-07-03 08:23:09 +03:00
|
|
|
LOG(LogLevel::Debug,
|
|
|
|
("Starting thread for a SystemClockDriver %p", mGraphImpl.get()));
|
2016-03-29 04:00:21 +03:00
|
|
|
Unused << NS_WARN_IF(mThread);
|
2018-05-25 11:58:58 +03:00
|
|
|
MOZ_ASSERT(!mThread); // Ensure we haven't already started it
|
|
|
|
|
2019-10-02 13:23:02 +03:00
|
|
|
nsCOMPtr<nsIRunnable> event = new MediaTrackGraphInitThreadRunnable(this);
|
2018-05-25 11:58:58 +03:00
|
|
|
// Note: mThread may be null during event->Run() if we pass to NewNamedThread!
|
|
|
|
// See AudioInitTask
|
2019-10-02 13:23:02 +03:00
|
|
|
nsresult rv = NS_NewNamedThread("MediaTrackGrph", getter_AddRefs(mThread));
|
2018-05-25 11:58:58 +03:00
|
|
|
if (NS_SUCCEEDED(rv)) {
|
|
|
|
mThread->EventTarget()->Dispatch(event.forget(), NS_DISPATCH_NORMAL);
|
2014-09-28 20:07:24 +04:00
|
|
|
}
|
2014-04-25 20:03:04 +04:00
|
|
|
}
|
|
|
|
|
2017-09-28 05:05:36 +03:00
|
|
|
void ThreadedDriver::Shutdown() {
|
2014-04-25 20:03:04 +04:00
|
|
|
NS_ASSERTION(NS_IsMainThread(), "Must be called on main thread");
|
|
|
|
// mGraph's thread is not running so it's OK to do whatever here
|
2019-10-02 13:23:02 +03:00
|
|
|
LOG(LogLevel::Debug, ("Stopping threads for MediaTrackGraph %p", this));
|
2014-04-25 20:03:04 +04:00
|
|
|
|
|
|
|
if (mThread) {
|
2018-04-17 18:11:13 +03:00
|
|
|
LOG(LogLevel::Debug,
|
|
|
|
("%p: Stopping ThreadedDriver's %p thread", GraphImpl(), this));
|
2014-04-25 20:03:04 +04:00
|
|
|
mThread->Shutdown();
|
2014-09-28 20:07:24 +04:00
|
|
|
mThread = nullptr;
|
2014-04-25 20:03:04 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-19 01:48:50 +03:00
|
|
|
SystemClockDriver::SystemClockDriver(MediaTrackGraphImpl* aGraphImpl,
|
|
|
|
FallbackMode aFallback)
|
2014-04-25 20:04:53 +04:00
|
|
|
: ThreadedDriver(aGraphImpl),
|
|
|
|
mInitialTimeStamp(TimeStamp::Now()),
|
2019-03-22 14:41:46 +03:00
|
|
|
mCurrentTimeStamp(TimeStamp::Now()),
|
2016-07-27 16:18:17 +03:00
|
|
|
mLastTimeStamp(TimeStamp::Now()),
|
2019-12-19 01:48:50 +03:00
|
|
|
mIsFallback(aFallback == FallbackMode::Fallback) {}
|
2016-07-27 16:18:17 +03:00
|
|
|
|
2014-04-25 20:04:53 +04:00
|
|
|
SystemClockDriver::~SystemClockDriver() {}
|
2018-11-19 16:25:37 +03:00
|
|
|
|
2016-07-27 16:18:17 +03:00
|
|
|
bool SystemClockDriver::IsFallback() { return mIsFallback; }
|
2018-11-19 16:25:37 +03:00
|
|
|
|
2014-04-25 20:04:53 +04:00
|
|
|
void ThreadedDriver::RunThread() {
|
2018-05-22 19:51:42 +03:00
|
|
|
mThreadRunning = true;
|
2017-10-17 08:14:43 +03:00
|
|
|
while (true) {
|
2015-07-23 08:15:49 +03:00
|
|
|
mIterationStart = IterationEnd();
|
|
|
|
mIterationEnd += GetIntervalForIteration();
|
|
|
|
|
2019-12-19 01:49:30 +03:00
|
|
|
if (mStateComputedTime < mIterationEnd) {
|
2018-04-17 18:11:13 +03:00
|
|
|
LOG(LogLevel::Warning, ("%p: Global underrun detected", GraphImpl()));
|
2019-12-19 01:49:30 +03:00
|
|
|
mIterationEnd = mStateComputedTime;
|
2015-07-23 08:15:49 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (mIterationStart >= mIterationEnd) {
|
|
|
|
NS_ASSERTION(mIterationStart == mIterationEnd,
|
|
|
|
"Time can't go backwards!");
|
|
|
|
// This could happen due to low clock resolution, maybe?
|
2018-04-17 18:11:13 +03:00
|
|
|
LOG(LogLevel::Debug, ("%p: Time did not advance", GraphImpl()));
|
2015-07-23 08:15:49 +03:00
|
|
|
}
|
2014-04-25 20:04:23 +04:00
|
|
|
|
2018-04-17 18:11:13 +03:00
|
|
|
GraphTime nextStateComputedTime = GraphImpl()->RoundUpToEndOfAudioBlock(
|
|
|
|
mIterationEnd + GraphImpl()->MillisecondsToMediaTime(AUDIO_TARGET_MS));
|
2019-12-19 01:49:30 +03:00
|
|
|
if (nextStateComputedTime < mStateComputedTime) {
|
2015-08-04 10:54:54 +03:00
|
|
|
// A previous driver may have been processing further ahead of
|
|
|
|
// iterationEnd.
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Warning,
|
2018-04-17 18:11:13 +03:00
|
|
|
("%p: Prevent state from going backwards. interval[%ld; %ld] "
|
|
|
|
"state[%ld; "
|
2017-02-06 18:22:36 +03:00
|
|
|
"%ld]",
|
|
|
|
GraphImpl(), (long)mIterationStart, (long)mIterationEnd,
|
2019-12-19 01:49:30 +03:00
|
|
|
(long)mStateComputedTime, (long)nextStateComputedTime));
|
|
|
|
nextStateComputedTime = mStateComputedTime;
|
2015-08-04 10:54:54 +03:00
|
|
|
}
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Verbose,
|
2018-04-17 18:11:13 +03:00
|
|
|
("%p: interval[%ld; %ld] state[%ld; %ld]", GraphImpl(),
|
2019-12-19 01:49:30 +03:00
|
|
|
(long)mIterationStart, (long)mIterationEnd, (long)mStateComputedTime,
|
2017-02-06 18:22:36 +03:00
|
|
|
(long)nextStateComputedTime));
|
2014-04-25 20:04:23 +04:00
|
|
|
|
2018-04-17 18:11:13 +03:00
|
|
|
bool stillProcessing = GraphImpl()->OneIteration(nextStateComputedTime);
|
2014-08-26 19:01:33 +04:00
|
|
|
|
2017-10-17 08:14:43 +03:00
|
|
|
if (!stillProcessing) {
|
2019-12-19 01:49:30 +03:00
|
|
|
// Enter shutdown mode. The stable-state handler will detect this and
|
|
|
|
// complete shutdown.
|
2018-12-20 10:58:13 +03:00
|
|
|
dom::WorkletThread::DeleteCycleCollectedJSContext();
|
2018-04-17 18:11:13 +03:00
|
|
|
GraphImpl()->SignalMainThreadCleanup();
|
2018-05-22 19:51:42 +03:00
|
|
|
break;
|
2017-10-17 08:14:43 +03:00
|
|
|
}
|
2019-12-19 01:49:30 +03:00
|
|
|
mStateComputedTime = nextStateComputedTime;
|
2019-12-19 01:49:03 +03:00
|
|
|
WaitForNextIteration();
|
2019-12-19 01:49:52 +03:00
|
|
|
MonitorAutoLock lock(GraphImpl()->GetMonitor());
|
2017-10-17 08:14:43 +03:00
|
|
|
if (NextDriver()) {
|
2018-04-17 18:11:13 +03:00
|
|
|
LOG(LogLevel::Debug,
|
|
|
|
("%p: Switching to AudioCallbackDriver", GraphImpl()));
|
2018-05-07 20:35:56 +03:00
|
|
|
SwitchToNextDriver();
|
2018-05-22 19:51:42 +03:00
|
|
|
break;
|
2014-08-26 19:01:33 +04:00
|
|
|
}
|
2014-04-25 20:04:23 +04:00
|
|
|
}
|
2018-05-22 19:51:42 +03:00
|
|
|
mThreadRunning = false;
|
2014-04-25 18:09:30 +04:00
|
|
|
}
|
|
|
|
|
2015-07-23 08:15:49 +03:00
|
|
|
MediaTime SystemClockDriver::GetIntervalForIteration() {
|
2014-04-25 18:09:30 +04:00
|
|
|
TimeStamp now = TimeStamp::Now();
|
2015-07-23 08:15:49 +03:00
|
|
|
MediaTime interval =
|
2018-04-17 18:11:13 +03:00
|
|
|
GraphImpl()->SecondsToMediaTime((now - mCurrentTimeStamp).ToSeconds());
|
2014-04-25 18:09:30 +04:00
|
|
|
mCurrentTimeStamp = now;
|
|
|
|
|
2015-07-23 08:15:49 +03:00
|
|
|
MOZ_LOG(
|
2019-10-02 13:23:02 +03:00
|
|
|
gMediaTrackGraphLog, LogLevel::Verbose,
|
2018-04-17 18:11:13 +03:00
|
|
|
("%p: Updating current time to %f (real %f, StateComputedTime() %f)",
|
|
|
|
GraphImpl(), GraphImpl()->MediaTimeToSeconds(IterationEnd() + interval),
|
2015-07-23 08:15:49 +03:00
|
|
|
(now - mInitialTimeStamp).ToSeconds(),
|
2019-12-19 01:49:30 +03:00
|
|
|
GraphImpl()->MediaTimeToSeconds(mStateComputedTime)));
|
2014-04-25 18:09:30 +04:00
|
|
|
|
2015-07-23 08:15:49 +03:00
|
|
|
return interval;
|
2014-04-25 18:09:30 +04:00
|
|
|
}
|
|
|
|
|
2019-12-19 01:49:52 +03:00
|
|
|
void ThreadedDriver::EnsureNextIteration() {
|
|
|
|
mWaitHelper.EnsureNextIteration();
|
2014-04-25 18:09:30 +04:00
|
|
|
}
|
|
|
|
|
2019-12-19 01:49:52 +03:00
|
|
|
void ThreadedDriver::WaitForNextIteration() {
|
|
|
|
MOZ_ASSERT(mThread);
|
|
|
|
MOZ_ASSERT(OnThread());
|
|
|
|
mWaitHelper.WaitForNextIterationAtLeast(WaitInterval());
|
2014-04-25 18:09:30 +04:00
|
|
|
}
|
|
|
|
|
2018-10-30 12:48:08 +03:00
|
|
|
TimeDuration SystemClockDriver::WaitInterval() {
|
2019-12-19 01:49:52 +03:00
|
|
|
MOZ_ASSERT(mThread);
|
|
|
|
MOZ_ASSERT(OnThread());
|
2018-10-30 12:48:08 +03:00
|
|
|
TimeStamp now = TimeStamp::Now();
|
|
|
|
int64_t timeoutMS = MEDIA_GRAPH_TARGET_PERIOD_MS -
|
|
|
|
int64_t((now - mCurrentTimeStamp).ToMilliseconds());
|
|
|
|
// Make sure timeoutMS doesn't overflow 32 bits by waking up at
|
|
|
|
// least once a minute, if we need to wake up at all
|
|
|
|
timeoutMS = std::max<int64_t>(0, std::min<int64_t>(timeoutMS, 60 * 1000));
|
|
|
|
LOG(LogLevel::Verbose,
|
|
|
|
("%p: Waiting for next iteration; at %f, timeout=%f", GraphImpl(),
|
|
|
|
(now - mInitialTimeStamp).ToSeconds(), timeoutMS / 1000.0));
|
|
|
|
|
|
|
|
return TimeDuration::FromMilliseconds(timeoutMS);
|
|
|
|
}
|
|
|
|
|
2019-10-02 13:23:02 +03:00
|
|
|
OfflineClockDriver::OfflineClockDriver(MediaTrackGraphImpl* aGraphImpl,
|
2014-04-25 18:09:30 +04:00
|
|
|
GraphTime aSlice)
|
2014-04-25 20:04:53 +04:00
|
|
|
: ThreadedDriver(aGraphImpl), mSlice(aSlice) {}
|
2014-04-25 18:09:30 +04:00
|
|
|
|
|
|
|
OfflineClockDriver::~OfflineClockDriver() {}
|
|
|
|
|
2015-07-23 08:15:49 +03:00
|
|
|
MediaTime OfflineClockDriver::GetIntervalForIteration() {
|
2018-04-17 18:11:13 +03:00
|
|
|
return GraphImpl()->MillisecondsToMediaTime(mSlice);
|
2014-04-25 18:09:30 +04:00
|
|
|
}
|
|
|
|
|
2017-06-12 22:34:10 +03:00
|
|
|
AsyncCubebTask::AsyncCubebTask(AudioCallbackDriver* aDriver,
|
|
|
|
AsyncCubebOperation aOperation)
|
|
|
|
: Runnable("AsyncCubebTask"),
|
|
|
|
mDriver(aDriver),
|
|
|
|
mOperation(aOperation),
|
|
|
|
mShutdownGrip(aDriver->GraphImpl()) {
|
2019-04-19 11:34:22 +03:00
|
|
|
NS_WARNING_ASSERTION(
|
|
|
|
mDriver->mAudioStream || aOperation == AsyncCubebOperation::INIT,
|
|
|
|
"No audio stream!");
|
2014-09-03 17:52:43 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
AsyncCubebTask::~AsyncCubebTask() {}
|
2014-04-25 18:09:30 +04:00
|
|
|
|
2016-01-21 19:51:36 +03:00
|
|
|
NS_IMETHODIMP
|
|
|
|
AsyncCubebTask::Run() {
|
2014-08-26 19:02:07 +04:00
|
|
|
MOZ_ASSERT(mDriver);
|
|
|
|
|
|
|
|
switch (mOperation) {
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
case AsyncCubebOperation::INIT: {
|
2018-04-17 18:11:13 +03:00
|
|
|
LOG(LogLevel::Debug, ("%p: AsyncCubebOperation::INIT driver=%p",
|
|
|
|
mDriver->GraphImpl(), mDriver.get()));
|
2017-01-11 22:51:23 +03:00
|
|
|
if (!mDriver->Init()) {
|
2018-03-16 22:09:55 +03:00
|
|
|
LOG(LogLevel::Warning,
|
|
|
|
("AsyncCubebOperation::INIT failed for driver=%p", mDriver.get()));
|
2017-01-11 22:51:23 +03:00
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
mDriver->CompleteAudioContextOperations(mOperation);
|
2014-08-26 19:02:07 +04:00
|
|
|
break;
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
}
|
|
|
|
case AsyncCubebOperation::SHUTDOWN: {
|
2018-04-17 18:11:13 +03:00
|
|
|
LOG(LogLevel::Debug, ("%p: AsyncCubebOperation::SHUTDOWN driver=%p",
|
|
|
|
mDriver->GraphImpl(), mDriver.get()));
|
2014-08-26 19:02:07 +04:00
|
|
|
mDriver->Stop();
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
|
|
|
|
mDriver->CompleteAudioContextOperations(mOperation);
|
|
|
|
|
2014-08-26 19:02:07 +04:00
|
|
|
mDriver = nullptr;
|
2014-09-03 17:52:43 +04:00
|
|
|
mShutdownGrip = nullptr;
|
2014-08-26 19:02:07 +04:00
|
|
|
break;
|
2014-08-26 19:02:08 +04:00
|
|
|
}
|
2014-08-26 19:02:07 +04:00
|
|
|
default:
|
|
|
|
MOZ_CRASH("Operation not implemented.");
|
|
|
|
}
|
|
|
|
|
2016-01-21 19:51:36 +03:00
|
|
|
// The thread will kill itself after a bit
|
2014-08-26 19:02:07 +04:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2019-10-02 13:23:02 +03:00
|
|
|
TrackAndPromiseForOperation::TrackAndPromiseForOperation(
|
|
|
|
MediaTrack* aTrack, void* aPromise, dom::AudioContextOperation aOperation,
|
2019-04-02 14:10:02 +03:00
|
|
|
dom::AudioContextOperationFlags aFlags)
|
2019-10-02 13:23:02 +03:00
|
|
|
: mTrack(aTrack),
|
2019-04-02 14:10:02 +03:00
|
|
|
mPromise(aPromise),
|
|
|
|
mOperation(aOperation),
|
|
|
|
mFlags(aFlags) {}
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
|
2019-10-02 13:23:02 +03:00
|
|
|
AudioCallbackDriver::AudioCallbackDriver(MediaTrackGraphImpl* aGraphImpl,
|
2019-11-19 21:22:46 +03:00
|
|
|
uint32_t aOutputChannelCount,
|
2019-04-16 18:42:38 +03:00
|
|
|
uint32_t aInputChannelCount,
|
|
|
|
AudioInputType aAudioInputType)
|
2014-08-26 19:02:08 +04:00
|
|
|
: GraphDriver(aGraphImpl),
|
2019-11-19 21:22:46 +03:00
|
|
|
mOutputChannels(aOutputChannelCount),
|
2015-12-07 04:17:00 +03:00
|
|
|
mSampleRate(0),
|
2018-04-30 17:01:56 +03:00
|
|
|
mInputChannelCount(aInputChannelCount),
|
2014-09-09 20:16:01 +04:00
|
|
|
mIterationDurationMS(MEDIA_GRAPH_TARGET_PERIOD_MS),
|
2014-08-26 19:02:08 +04:00
|
|
|
mStarted(false),
|
2018-05-25 11:58:45 +03:00
|
|
|
mInitShutdownThread(
|
|
|
|
SharedThreadPool::Get(NS_LITERAL_CSTRING("CubebOperation"), 1)),
|
2016-01-21 19:51:36 +03:00
|
|
|
mAddedMixer(false),
|
2018-05-07 20:36:14 +03:00
|
|
|
mAudioThreadId(std::thread::id()),
|
2018-05-22 19:51:42 +03:00
|
|
|
mAudioThreadRunning(false),
|
2017-11-04 09:00:46 +03:00
|
|
|
mShouldFallbackIfError(false),
|
2016-07-27 16:18:17 +03:00
|
|
|
mFromFallback(false) {
|
2018-04-17 18:11:13 +03:00
|
|
|
LOG(LogLevel::Debug, ("%p: AudioCallbackDriver ctor", GraphImpl()));
|
2018-05-25 11:58:45 +03:00
|
|
|
|
|
|
|
const uint32_t kIdleThreadTimeoutMs = 2000;
|
|
|
|
mInitShutdownThread->SetIdleThreadTimeout(
|
|
|
|
PR_MillisecondsToInterval(kIdleThreadTimeoutMs));
|
2018-04-17 18:11:13 +03:00
|
|
|
|
2017-08-29 12:45:44 +03:00
|
|
|
#if defined(XP_WIN)
|
|
|
|
if (XRE_IsContentProcess()) {
|
|
|
|
audio::AudioNotificationReceiver::Register(this);
|
|
|
|
}
|
|
|
|
#endif
|
2019-04-16 18:42:42 +03:00
|
|
|
if (aAudioInputType == AudioInputType::Voice) {
|
|
|
|
LOG(LogLevel::Debug, ("VOICE."));
|
|
|
|
mInputDevicePreference = CUBEB_DEVICE_PREF_VOICE;
|
|
|
|
CubebUtils::SetInCommunication(true);
|
|
|
|
} else {
|
|
|
|
mInputDevicePreference = CUBEB_DEVICE_PREF_ALL;
|
|
|
|
}
|
2014-08-26 19:01:33 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
AudioCallbackDriver::~AudioCallbackDriver() {
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
MOZ_ASSERT(mPromisesForOperation.IsEmpty());
|
2018-06-29 11:05:56 +03:00
|
|
|
MOZ_ASSERT(!mAddedMixer);
|
2017-08-29 12:45:44 +03:00
|
|
|
#if defined(XP_WIN)
|
|
|
|
if (XRE_IsContentProcess()) {
|
|
|
|
audio::AudioNotificationReceiver::Unregister(this);
|
|
|
|
}
|
|
|
|
#endif
|
2019-04-16 18:42:42 +03:00
|
|
|
if (mInputDevicePreference == CUBEB_DEVICE_PREF_VOICE) {
|
|
|
|
CubebUtils::SetInCommunication(false);
|
|
|
|
}
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
}
|
2014-08-26 19:01:33 +04:00
|
|
|
|
2016-10-14 19:35:35 +03:00
|
|
|
bool IsMacbookOrMacbookAir() {
|
|
|
|
#ifdef XP_MACOSX
|
|
|
|
size_t len = 0;
|
|
|
|
sysctlbyname("hw.model", NULL, &len, NULL, 0);
|
|
|
|
if (len) {
|
2016-10-14 22:13:30 +03:00
|
|
|
UniquePtr<char[]> model(new char[len]);
|
2016-10-14 19:35:35 +03:00
|
|
|
// This string can be
|
|
|
|
// MacBook%d,%d for a normal MacBook
|
|
|
|
// MacBookPro%d,%d for a MacBook Pro
|
|
|
|
// MacBookAir%d,%d for a Macbook Air
|
2016-10-14 22:13:30 +03:00
|
|
|
sysctlbyname("hw.model", model.get(), &len, NULL, 0);
|
|
|
|
char* substring = strstr(model.get(), "MacBook");
|
2016-10-14 19:35:35 +03:00
|
|
|
if (substring) {
|
|
|
|
const size_t offset = strlen("MacBook");
|
2018-01-16 18:44:34 +03:00
|
|
|
if (!strncmp(model.get() + offset, "Air", 3) ||
|
2016-10-14 19:35:35 +03:00
|
|
|
isdigit(model[offset + 1])) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2016-10-14 22:13:30 +03:00
|
|
|
return false;
|
2016-10-14 19:35:35 +03:00
|
|
|
}
|
|
|
|
|
2014-08-26 19:02:07 +04:00
|
|
|
bool AudioCallbackDriver::Init() {
|
2016-08-31 03:20:10 +03:00
|
|
|
cubeb* cubebContext = CubebUtils::GetCubebContext();
|
|
|
|
if (!cubebContext) {
|
|
|
|
NS_WARNING("Could not get cubeb context.");
|
2018-03-19 21:46:36 +03:00
|
|
|
LOG(LogLevel::Warning, ("%s: Could not get cubeb context", __func__));
|
2016-09-16 03:54:24 +03:00
|
|
|
if (!mFromFallback) {
|
|
|
|
CubebUtils::ReportCubebStreamInitFailure(true);
|
|
|
|
}
|
2018-03-19 21:46:36 +03:00
|
|
|
FallbackToSystemClockDriver();
|
|
|
|
return true;
|
2016-08-31 03:20:10 +03:00
|
|
|
}
|
|
|
|
|
2016-01-21 19:51:36 +03:00
|
|
|
cubeb_stream_params output;
|
|
|
|
cubeb_stream_params input;
|
2016-06-23 18:50:52 +03:00
|
|
|
bool firstStream = CubebUtils::GetFirstStream();
|
2014-08-26 19:01:33 +04:00
|
|
|
|
2014-08-26 19:02:07 +04:00
|
|
|
MOZ_ASSERT(!NS_IsMainThread(),
|
|
|
|
"This is blocking and should never run on the main thread.");
|
|
|
|
|
2018-04-03 20:02:15 +03:00
|
|
|
mSampleRate = output.rate = mGraphImpl->GraphRate();
|
2014-08-26 19:01:33 +04:00
|
|
|
|
|
|
|
if (AUDIO_OUTPUT_FORMAT == AUDIO_FORMAT_S16) {
|
2016-01-21 19:51:36 +03:00
|
|
|
output.format = CUBEB_SAMPLE_S16NE;
|
2014-08-26 19:01:33 +04:00
|
|
|
} else {
|
2016-01-21 19:51:36 +03:00
|
|
|
output.format = CUBEB_SAMPLE_FLOAT32NE;
|
2014-08-26 19:01:33 +04:00
|
|
|
}
|
|
|
|
|
2017-11-28 13:57:02 +03:00
|
|
|
if (!mOutputChannels) {
|
|
|
|
LOG(LogLevel::Warning, ("Output number of channels is 0."));
|
|
|
|
FallbackToSystemClockDriver();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-08-01 15:16:30 +03:00
|
|
|
CubebUtils::AudioDeviceID forcedOutputDeviceId = nullptr;
|
|
|
|
|
|
|
|
char* forcedOutputDeviceName = CubebUtils::GetForcedOutputDevice();
|
|
|
|
if (forcedOutputDeviceName) {
|
2019-03-19 13:40:12 +03:00
|
|
|
RefPtr<CubebDeviceEnumerator> enumerator = Enumerator::GetInstance();
|
|
|
|
RefPtr<AudioDeviceInfo> device = enumerator->DeviceInfoFromName(
|
|
|
|
NS_ConvertUTF8toUTF16(forcedOutputDeviceName), EnumeratorSide::OUTPUT);
|
2019-03-25 15:18:25 +03:00
|
|
|
if (device && device->DeviceID()) {
|
2019-03-19 13:40:12 +03:00
|
|
|
forcedOutputDeviceId = device->DeviceID();
|
2018-08-01 15:16:30 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-28 17:57:46 +03:00
|
|
|
mBuffer = AudioCallbackBufferWrapper<AudioDataValue>(mOutputChannels);
|
|
|
|
mScratchBuffer =
|
|
|
|
SpillBuffer<AudioDataValue, WEBAUDIO_BLOCK_SIZE * 2>(mOutputChannels);
|
|
|
|
|
2017-09-26 13:05:01 +03:00
|
|
|
output.channels = mOutputChannels;
|
2019-11-19 21:23:21 +03:00
|
|
|
AudioConfig::ChannelLayout::ChannelMap channelMap =
|
|
|
|
AudioConfig::ChannelLayout(mOutputChannels).Map();
|
|
|
|
|
|
|
|
output.layout = static_cast<uint32_t>(channelMap);
|
2018-04-30 10:59:32 +03:00
|
|
|
output.prefs = CubebUtils::GetDefaultStreamPrefs();
|
2019-05-07 18:19:32 +03:00
|
|
|
#if !defined(XP_WIN)
|
2019-04-16 18:42:42 +03:00
|
|
|
if (mInputDevicePreference == CUBEB_DEVICE_PREF_VOICE) {
|
|
|
|
output.prefs |= static_cast<cubeb_stream_prefs>(CUBEB_STREAM_PREF_VOICE);
|
|
|
|
}
|
2019-05-07 18:19:32 +03:00
|
|
|
#endif
|
2017-01-20 17:54:00 +03:00
|
|
|
|
2019-10-02 13:23:02 +03:00
|
|
|
uint32_t latencyFrames = CubebUtils::GetCubebMTGLatencyInFrames(&output);
|
2014-08-26 19:01:33 +04:00
|
|
|
|
2016-10-14 19:35:35 +03:00
|
|
|
// Macbook and MacBook air don't have enough CPU to run very low latency
|
2019-10-02 13:23:02 +03:00
|
|
|
// MediaTrackGraphs, cap the minimal latency to 512 frames int this case.
|
2016-10-14 19:35:35 +03:00
|
|
|
if (IsMacbookOrMacbookAir()) {
|
2019-07-29 15:50:44 +03:00
|
|
|
latencyFrames = std::max((uint32_t)512, latencyFrames);
|
2016-10-14 19:35:35 +03:00
|
|
|
}
|
|
|
|
|
2019-07-29 15:50:39 +03:00
|
|
|
// On OSX, having a latency that is lower than 10ms is very common. It's
|
|
|
|
// not very useful when doing voice, because all the WebRTC code deal in 10ms
|
|
|
|
// chunks of audio. Take the first power of two above 10ms at the current
|
|
|
|
// rate in this case. It's probably 512, for common rates.
|
|
|
|
#if defined(XP_MACOSX)
|
|
|
|
if (mInputDevicePreference == CUBEB_DEVICE_PREF_VOICE) {
|
2019-07-29 15:50:44 +03:00
|
|
|
if (latencyFrames < mSampleRate / 100) {
|
|
|
|
latencyFrames = mozilla::RoundUpPow2(mSampleRate / 100);
|
2019-07-29 15:50:39 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2019-07-29 15:50:44 +03:00
|
|
|
LOG(LogLevel::Debug, ("Effective latency in frames: %d", latencyFrames));
|
2019-07-29 15:50:39 +03:00
|
|
|
|
2017-06-01 19:26:17 +03:00
|
|
|
input = output;
|
2018-04-30 17:01:56 +03:00
|
|
|
input.channels = mInputChannelCount;
|
2017-06-02 09:11:56 +03:00
|
|
|
input.layout = CUBEB_LAYOUT_UNDEFINED;
|
|
|
|
|
2016-03-08 20:11:09 +03:00
|
|
|
cubeb_stream* stream = nullptr;
|
2018-04-30 17:01:56 +03:00
|
|
|
bool inputWanted = mInputChannelCount > 0;
|
2019-07-29 15:50:44 +03:00
|
|
|
CubebUtils::AudioDeviceID outputId = GraphImpl()->mOutputDeviceID;
|
|
|
|
CubebUtils::AudioDeviceID inputId = GraphImpl()->mInputDeviceID;
|
2018-04-30 17:01:56 +03:00
|
|
|
|
|
|
|
// XXX Only pass input input if we have an input listener. Always
|
|
|
|
// set up output because it's easier, and it will just get silence.
|
2019-07-29 15:50:44 +03:00
|
|
|
if (cubeb_stream_init(cubebContext, &stream, "AudioCallbackDriver", inputId,
|
2018-04-30 17:01:56 +03:00
|
|
|
inputWanted ? &input : nullptr,
|
2019-07-29 15:50:44 +03:00
|
|
|
forcedOutputDeviceId ? forcedOutputDeviceId : outputId,
|
|
|
|
&output, latencyFrames, DataCallback_s, StateCallback_s,
|
|
|
|
this) == CUBEB_OK) {
|
2018-04-30 17:01:56 +03:00
|
|
|
mAudioStream.own(stream);
|
|
|
|
DebugOnly<int> rv =
|
|
|
|
cubeb_stream_set_volume(mAudioStream, CubebUtils::GetVolumeScale());
|
|
|
|
NS_WARNING_ASSERTION(
|
|
|
|
rv == CUBEB_OK,
|
|
|
|
"Could not set the audio stream volume in GraphDriver.cpp");
|
|
|
|
CubebUtils::ReportCubebBackendUsed();
|
|
|
|
} else {
|
|
|
|
NS_WARNING(
|
2019-10-02 13:23:02 +03:00
|
|
|
"Could not create a cubeb stream for MediaTrackGraph, falling "
|
2018-04-30 17:01:56 +03:00
|
|
|
"back to a SystemClockDriver");
|
|
|
|
// Only report failures when we're not coming from a driver that was
|
|
|
|
// created itself as a fallback driver because of a previous audio driver
|
|
|
|
// failure.
|
|
|
|
if (!mFromFallback) {
|
|
|
|
CubebUtils::ReportCubebStreamInitFailure(firstStream);
|
2016-04-01 07:18:13 +03:00
|
|
|
}
|
2018-04-30 17:01:56 +03:00
|
|
|
FallbackToSystemClockDriver();
|
|
|
|
return true;
|
2014-08-26 19:02:07 +04:00
|
|
|
}
|
2016-04-13 21:31:35 +03:00
|
|
|
|
2018-04-30 17:01:56 +03:00
|
|
|
#ifdef XP_MACOSX
|
|
|
|
PanOutputIfNeeded(inputWanted);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
cubeb_stream_register_device_changed_callback(
|
|
|
|
mAudioStream, AudioCallbackDriver::DeviceChangedCallback_s);
|
2014-08-26 19:02:31 +04:00
|
|
|
|
2017-01-11 22:51:23 +03:00
|
|
|
if (!StartStream()) {
|
2018-04-30 17:01:56 +03:00
|
|
|
LOG(LogLevel::Warning,
|
|
|
|
("%p: AudioCallbackDriver couldn't start a cubeb stream.",
|
|
|
|
GraphImpl()));
|
2017-01-11 22:51:23 +03:00
|
|
|
return false;
|
|
|
|
}
|
2014-08-26 19:02:07 +04:00
|
|
|
|
2018-04-30 17:01:56 +03:00
|
|
|
LOG(LogLevel::Debug, ("%p: AudioCallbackDriver started.", GraphImpl()));
|
2017-01-11 22:51:23 +03:00
|
|
|
return true;
|
2014-08-26 19:01:33 +04:00
|
|
|
}
|
|
|
|
|
2014-08-26 19:02:07 +04:00
|
|
|
void AudioCallbackDriver::Start() {
|
2018-05-25 11:58:45 +03:00
|
|
|
MOZ_ASSERT(!IsStarted());
|
|
|
|
MOZ_ASSERT(NS_IsMainThread() || OnCubebOperationThread() ||
|
2019-03-06 23:12:25 +03:00
|
|
|
(PreviousDriver() && PreviousDriver()->OnGraphThread()));
|
2016-01-21 19:51:36 +03:00
|
|
|
if (mPreviousDriver) {
|
|
|
|
if (mPreviousDriver->AsAudioCallbackDriver()) {
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug, ("Releasing audio driver off main thread."));
|
2016-01-21 19:51:36 +03:00
|
|
|
RefPtr<AsyncCubebTask> releaseEvent =
|
|
|
|
new AsyncCubebTask(mPreviousDriver->AsAudioCallbackDriver(),
|
|
|
|
AsyncCubebOperation::SHUTDOWN);
|
|
|
|
releaseEvent->Dispatch();
|
|
|
|
mPreviousDriver = nullptr;
|
|
|
|
} else {
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug,
|
|
|
|
("Dropping driver reference for SystemClockDriver."));
|
2016-07-27 16:18:17 +03:00
|
|
|
MOZ_ASSERT(mPreviousDriver->AsSystemClockDriver());
|
|
|
|
mFromFallback = mPreviousDriver->AsSystemClockDriver()->IsFallback();
|
2016-01-21 19:51:36 +03:00
|
|
|
mPreviousDriver = nullptr;
|
2016-01-22 04:28:23 +03:00
|
|
|
}
|
|
|
|
}
|
2016-01-21 19:51:36 +03:00
|
|
|
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug, ("Starting new audio driver off main thread, "
|
|
|
|
"to ensure it runs after previous shutdown."));
|
2016-01-21 19:51:36 +03:00
|
|
|
RefPtr<AsyncCubebTask> initEvent =
|
|
|
|
new AsyncCubebTask(AsAudioCallbackDriver(), AsyncCubebOperation::INIT);
|
2017-11-04 09:00:46 +03:00
|
|
|
initEvent->Dispatch();
|
2014-08-26 19:01:33 +04:00
|
|
|
}
|
|
|
|
|
2014-08-26 19:02:30 +04:00
|
|
|
bool AudioCallbackDriver::StartStream() {
|
2018-05-25 11:58:45 +03:00
|
|
|
MOZ_ASSERT(!IsStarted() && OnCubebOperationThread());
|
2017-11-04 09:00:46 +03:00
|
|
|
mShouldFallbackIfError = true;
|
2019-12-19 01:49:43 +03:00
|
|
|
// Set mStarted before cubeb_stream_start, since starting the cubeb stream can
|
|
|
|
// result in a callback (that may read mStarted) before mStarted would
|
|
|
|
// otherwise be set to true.
|
|
|
|
mStarted = true;
|
2014-08-26 19:02:30 +04:00
|
|
|
if (cubeb_stream_start(mAudioStream) != CUBEB_OK) {
|
2019-10-02 13:23:02 +03:00
|
|
|
NS_WARNING("Could not start cubeb stream for MTG.");
|
2019-12-19 01:49:43 +03:00
|
|
|
mStarted = false;
|
2017-01-11 22:51:23 +03:00
|
|
|
return false;
|
2014-08-26 19:02:30 +04:00
|
|
|
}
|
|
|
|
|
2017-01-11 22:51:23 +03:00
|
|
|
return true;
|
2014-08-26 19:02:30 +04:00
|
|
|
}
|
|
|
|
|
2014-08-26 19:01:33 +04:00
|
|
|
void AudioCallbackDriver::Stop() {
|
2018-05-25 11:58:45 +03:00
|
|
|
MOZ_ASSERT(OnCubebOperationThread());
|
2014-08-26 19:01:33 +04:00
|
|
|
if (cubeb_stream_stop(mAudioStream) != CUBEB_OK) {
|
2019-10-02 13:23:02 +03:00
|
|
|
NS_WARNING("Could not stop cubeb stream for MTG.");
|
2014-08-26 19:01:33 +04:00
|
|
|
}
|
2019-04-19 11:34:22 +03:00
|
|
|
mStarted = false;
|
2014-08-26 19:01:33 +04:00
|
|
|
}
|
|
|
|
|
2018-06-29 11:05:56 +03:00
|
|
|
void AudioCallbackDriver::RemoveMixerCallback() {
|
2019-03-06 23:12:25 +03:00
|
|
|
MOZ_ASSERT(OnGraphThread() || !ThreadRunning());
|
2018-06-29 11:05:56 +03:00
|
|
|
|
2016-01-21 19:51:36 +03:00
|
|
|
if (mAddedMixer) {
|
2018-04-17 18:11:13 +03:00
|
|
|
GraphImpl()->mMixer.RemoveCallback(this);
|
2016-01-21 19:51:36 +03:00
|
|
|
mAddedMixer = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-29 11:05:56 +03:00
|
|
|
void AudioCallbackDriver::AddMixerCallback() {
|
2019-03-06 23:12:25 +03:00
|
|
|
MOZ_ASSERT(OnGraphThread());
|
2018-06-29 11:05:56 +03:00
|
|
|
|
|
|
|
if (!mAddedMixer) {
|
2019-10-31 13:01:53 +03:00
|
|
|
mGraphImpl->mMixer.AddCallback(WrapNotNull(this));
|
2018-06-29 11:05:56 +03:00
|
|
|
mAddedMixer = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-28 05:05:36 +03:00
|
|
|
void AudioCallbackDriver::Shutdown() {
|
2018-05-25 11:58:45 +03:00
|
|
|
MOZ_ASSERT(NS_IsMainThread());
|
2017-09-28 05:05:36 +03:00
|
|
|
LOG(LogLevel::Debug,
|
2018-04-17 18:11:13 +03:00
|
|
|
("%p: Releasing audio driver off main thread (GraphDriver::Shutdown).",
|
|
|
|
GraphImpl()));
|
2017-09-28 05:05:36 +03:00
|
|
|
RefPtr<AsyncCubebTask> releaseEvent =
|
|
|
|
new AsyncCubebTask(this, AsyncCubebOperation::SHUTDOWN);
|
|
|
|
releaseEvent->Dispatch(NS_DISPATCH_SYNC);
|
|
|
|
}
|
|
|
|
|
2017-08-29 12:45:44 +03:00
|
|
|
#if defined(XP_WIN)
|
|
|
|
void AudioCallbackDriver::ResetDefaultDevice() {
|
|
|
|
if (cubeb_stream_reset_default_device(mAudioStream) != CUBEB_OK) {
|
|
|
|
NS_WARNING("Could not reset cubeb stream to default output device.");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-02-26 01:05:29 +03:00
|
|
|
/* static */
|
|
|
|
long AudioCallbackDriver::DataCallback_s(cubeb_stream* aStream, void* aUser,
|
|
|
|
const void* aInputBuffer,
|
|
|
|
void* aOutputBuffer, long aFrames) {
|
2014-08-26 19:01:33 +04:00
|
|
|
AudioCallbackDriver* driver = reinterpret_cast<AudioCallbackDriver*>(aUser);
|
2016-01-21 19:51:36 +03:00
|
|
|
return driver->DataCallback(static_cast<const AudioDataValue*>(aInputBuffer),
|
2016-01-21 19:51:36 +03:00
|
|
|
static_cast<AudioDataValue*>(aOutputBuffer),
|
|
|
|
aFrames);
|
2014-08-26 19:01:33 +04:00
|
|
|
}
|
|
|
|
|
2019-02-26 01:05:29 +03:00
|
|
|
/* static */
|
|
|
|
void AudioCallbackDriver::StateCallback_s(cubeb_stream* aStream, void* aUser,
|
|
|
|
cubeb_state aState) {
|
2014-08-26 19:01:33 +04:00
|
|
|
AudioCallbackDriver* driver = reinterpret_cast<AudioCallbackDriver*>(aUser);
|
|
|
|
driver->StateCallback(aState);
|
|
|
|
}
|
|
|
|
|
2019-02-26 01:05:29 +03:00
|
|
|
/* static */
|
|
|
|
void AudioCallbackDriver::DeviceChangedCallback_s(void* aUser) {
|
2014-08-26 19:02:31 +04:00
|
|
|
AudioCallbackDriver* driver = reinterpret_cast<AudioCallbackDriver*>(aUser);
|
|
|
|
driver->DeviceChangedCallback();
|
|
|
|
}
|
|
|
|
|
2014-08-26 19:01:35 +04:00
|
|
|
AudioCallbackDriver::AutoInCallback::AutoInCallback(
|
|
|
|
AudioCallbackDriver* aDriver)
|
|
|
|
: mDriver(aDriver) {
|
2018-05-07 20:36:14 +03:00
|
|
|
mDriver->mAudioThreadId = std::this_thread::get_id();
|
2014-08-26 19:01:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
AudioCallbackDriver::AutoInCallback::~AutoInCallback() {
|
2018-05-07 20:36:14 +03:00
|
|
|
mDriver->mAudioThreadId = std::thread::id();
|
2014-08-26 19:01:35 +04:00
|
|
|
}
|
|
|
|
|
2016-01-21 19:51:36 +03:00
|
|
|
long AudioCallbackDriver::DataCallback(const AudioDataValue* aInputBuffer,
|
2016-01-21 19:51:36 +03:00
|
|
|
AudioDataValue* aOutputBuffer,
|
|
|
|
long aFrames) {
|
2018-04-12 18:51:35 +03:00
|
|
|
TRACE_AUDIO_CALLBACK_BUDGET(aFrames, mSampleRate);
|
|
|
|
TRACE_AUDIO_CALLBACK();
|
|
|
|
|
2018-05-07 20:36:14 +03:00
|
|
|
#ifdef DEBUG
|
|
|
|
AutoInCallback aic(this);
|
|
|
|
#endif
|
|
|
|
|
2016-01-21 19:51:36 +03:00
|
|
|
// Don't add the callback until we're inited and ready
|
2019-12-19 01:48:30 +03:00
|
|
|
AddMixerCallback();
|
2016-01-21 19:51:36 +03:00
|
|
|
|
2014-08-26 19:01:33 +04:00
|
|
|
uint32_t durationMS = aFrames * 1000 / mSampleRate;
|
|
|
|
|
|
|
|
// For now, simply average the duration with the previous
|
|
|
|
// duration so there is some damping against sudden changes.
|
|
|
|
if (!mIterationDurationMS) {
|
|
|
|
mIterationDurationMS = durationMS;
|
|
|
|
} else {
|
2014-09-30 18:35:17 +04:00
|
|
|
mIterationDurationMS = (mIterationDurationMS * 3) + durationMS;
|
|
|
|
mIterationDurationMS /= 4;
|
2014-08-26 19:01:33 +04:00
|
|
|
}
|
|
|
|
|
2017-11-28 15:32:21 +03:00
|
|
|
mBuffer.SetBuffer(aOutputBuffer, aFrames);
|
|
|
|
// fill part or all with leftover data from last iteration (since we
|
|
|
|
// align to Audio blocks)
|
|
|
|
mScratchBuffer.Empty(mBuffer);
|
|
|
|
|
|
|
|
// State computed time is decided by the audio callback's buffer length. We
|
|
|
|
// compute the iteration start and end from there, trying to keep the amount
|
|
|
|
// of buffering in the graph constant.
|
2018-04-17 18:11:13 +03:00
|
|
|
GraphTime nextStateComputedTime = GraphImpl()->RoundUpToEndOfAudioBlock(
|
2019-12-19 01:49:30 +03:00
|
|
|
mStateComputedTime + mBuffer.Available());
|
2017-11-28 15:32:21 +03:00
|
|
|
|
|
|
|
mIterationStart = mIterationEnd;
|
|
|
|
// inGraph is the number of audio frames there is between the state time and
|
|
|
|
// the current time, i.e. the maximum theoretical length of the interval we
|
|
|
|
// could use as [mIterationStart; mIterationEnd].
|
2019-12-19 01:49:30 +03:00
|
|
|
GraphTime inGraph = mStateComputedTime - mIterationStart;
|
2017-11-28 15:32:21 +03:00
|
|
|
// We want the interval [mIterationStart; mIterationEnd] to be before the
|
2019-12-19 01:49:30 +03:00
|
|
|
// interval [mStateComputedTime; nextStateComputedTime]. We also want
|
2017-11-28 15:32:21 +03:00
|
|
|
// the distance between these intervals to be roughly equivalent each time, to
|
|
|
|
// ensure there is no clock drift between current time and state time. Since
|
|
|
|
// we can't act on the state time because we have to fill the audio buffer, we
|
|
|
|
// reclock the current time against the state time, here.
|
|
|
|
mIterationEnd = mIterationStart + 0.8 * inGraph;
|
|
|
|
|
|
|
|
LOG(LogLevel::Verbose,
|
2018-04-17 18:11:13 +03:00
|
|
|
("%p: interval[%ld; %ld] state[%ld; %ld] (frames: %ld) (durationMS: %u) "
|
2017-11-28 15:32:21 +03:00
|
|
|
"(duration ticks: %ld)",
|
|
|
|
GraphImpl(), (long)mIterationStart, (long)mIterationEnd,
|
2019-12-19 01:49:30 +03:00
|
|
|
(long)mStateComputedTime, (long)nextStateComputedTime, (long)aFrames,
|
2017-11-28 15:32:21 +03:00
|
|
|
(uint32_t)durationMS,
|
2019-12-19 01:49:30 +03:00
|
|
|
(long)(nextStateComputedTime - mStateComputedTime)));
|
2017-11-28 15:32:21 +03:00
|
|
|
|
2019-12-19 01:49:30 +03:00
|
|
|
if (mStateComputedTime < mIterationEnd) {
|
2018-04-17 18:11:13 +03:00
|
|
|
LOG(LogLevel::Error,
|
|
|
|
("%p: Media graph global underrun detected", GraphImpl()));
|
2017-12-01 17:37:54 +03:00
|
|
|
MOZ_ASSERT_UNREACHABLE("We should not underrun in full duplex");
|
2019-12-19 01:49:30 +03:00
|
|
|
mIterationEnd = mStateComputedTime;
|
2017-11-28 15:32:21 +03:00
|
|
|
}
|
|
|
|
|
2016-04-29 16:16:46 +03:00
|
|
|
// Process mic data if any/needed
|
2018-04-17 18:11:13 +03:00
|
|
|
if (aInputBuffer && mInputChannelCount > 0) {
|
|
|
|
GraphImpl()->NotifyInputData(aInputBuffer, static_cast<size_t>(aFrames),
|
|
|
|
mSampleRate, mInputChannelCount);
|
2016-04-29 16:16:46 +03:00
|
|
|
}
|
|
|
|
|
2017-11-28 15:32:21 +03:00
|
|
|
bool stillProcessing;
|
2014-09-30 18:35:17 +04:00
|
|
|
if (mBuffer.Available()) {
|
2017-11-28 15:32:21 +03:00
|
|
|
// We totally filled the buffer (and mScratchBuffer isn't empty).
|
|
|
|
// We don't need to run an iteration and if we do so we may overflow.
|
2018-04-17 18:11:13 +03:00
|
|
|
stillProcessing = GraphImpl()->OneIteration(nextStateComputedTime);
|
2019-12-19 01:49:30 +03:00
|
|
|
if (stillProcessing) {
|
|
|
|
mStateComputedTime = nextStateComputedTime;
|
|
|
|
}
|
2014-09-30 18:35:17 +04:00
|
|
|
} else {
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Verbose,
|
2018-04-17 18:11:13 +03:00
|
|
|
("%p: DataCallback buffer filled entirely from scratch "
|
|
|
|
"buffer, skipping iteration.",
|
|
|
|
GraphImpl()));
|
2014-09-30 18:35:17 +04:00
|
|
|
stillProcessing = true;
|
|
|
|
}
|
2014-08-26 19:01:33 +04:00
|
|
|
|
2014-08-26 19:04:38 +04:00
|
|
|
mBuffer.BufferFilled();
|
2014-08-26 19:01:33 +04:00
|
|
|
|
2016-01-21 19:51:35 +03:00
|
|
|
// Callback any observers for the AEC speaker data. Note that one
|
|
|
|
// (maybe) of these will be full-duplex, the others will get their input
|
|
|
|
// data off separate cubeb callbacks. Take care with how stuff is
|
|
|
|
// removed/added to this list and TSAN issues, but input and output will
|
|
|
|
// use separate callback methods.
|
2018-04-17 18:11:13 +03:00
|
|
|
GraphImpl()->NotifyOutputData(aOutputBuffer, static_cast<size_t>(aFrames),
|
|
|
|
mSampleRate, mOutputChannels);
|
2016-01-21 19:51:35 +03:00
|
|
|
|
2019-10-11 18:55:02 +03:00
|
|
|
#ifdef XP_MACOSX
|
|
|
|
// This only happens when the output is on a macbookpro's external speaker,
|
|
|
|
// that are stereo, but let's just be safe.
|
|
|
|
if (mNeedsPanning && mOutputChannels == 2) {
|
|
|
|
// hard pan to the right
|
|
|
|
for (uint32_t i = 0; i < aFrames * 2; i += 2) {
|
|
|
|
aOutputBuffer[i + 1] += aOutputBuffer[i];
|
|
|
|
aOutputBuffer[i] = 0.0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-10-17 08:14:43 +03:00
|
|
|
if (!stillProcessing) {
|
2017-11-04 09:00:46 +03:00
|
|
|
// About to hand over control of the graph. Do not start a new driver if
|
|
|
|
// StateCallback() receives an error for this stream while the main thread
|
|
|
|
// or another driver has control of the graph.
|
|
|
|
mShouldFallbackIfError = false;
|
2018-10-13 02:45:18 +03:00
|
|
|
RemoveMixerCallback();
|
|
|
|
// Update the flag before handing over the graph and going to drain.
|
|
|
|
mAudioThreadRunning = false;
|
2017-10-17 08:14:43 +03:00
|
|
|
// Enter shutdown mode. The stable-state handler will detect this
|
|
|
|
// and complete shutdown if the graph does not get restarted.
|
|
|
|
mGraphImpl->SignalMainThreadCleanup();
|
|
|
|
return aFrames - 1;
|
|
|
|
}
|
|
|
|
|
2015-12-01 13:47:31 +03:00
|
|
|
bool switching = false;
|
|
|
|
{
|
2018-04-17 18:11:13 +03:00
|
|
|
MonitorAutoLock mon(GraphImpl()->GetMonitor());
|
2015-12-01 13:47:31 +03:00
|
|
|
switching = !!NextDriver();
|
|
|
|
}
|
|
|
|
|
2017-10-17 08:14:43 +03:00
|
|
|
if (switching) {
|
2017-11-04 09:00:46 +03:00
|
|
|
mShouldFallbackIfError = false;
|
2015-12-01 13:47:31 +03:00
|
|
|
// If the audio stream has not been started by the previous driver or
|
|
|
|
// the graph itself, keep it alive.
|
2018-04-17 18:11:13 +03:00
|
|
|
MonitorAutoLock mon(GraphImpl()->GetMonitor());
|
2015-12-01 13:47:31 +03:00
|
|
|
if (!IsStarted()) {
|
|
|
|
return aFrames;
|
2014-08-26 19:01:33 +04:00
|
|
|
}
|
2018-04-17 18:11:13 +03:00
|
|
|
LOG(LogLevel::Debug, ("%p: Switching to system driver.", GraphImpl()));
|
2018-06-29 11:05:56 +03:00
|
|
|
RemoveMixerCallback();
|
2018-05-22 19:51:42 +03:00
|
|
|
mAudioThreadRunning = false;
|
2018-10-13 02:45:18 +03:00
|
|
|
SwitchToNextDriver();
|
2014-08-26 19:01:33 +04:00
|
|
|
// Returning less than aFrames starts the draining and eventually stops the
|
|
|
|
// audio thread. This function will never get called again.
|
|
|
|
return aFrames - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return aFrames;
|
|
|
|
}
|
|
|
|
|
2019-05-14 19:00:22 +03:00
|
|
|
static const char* StateToString(cubeb_state aState) {
|
|
|
|
switch (aState) {
|
2019-05-25 20:46:15 +03:00
|
|
|
case CUBEB_STATE_STARTED:
|
|
|
|
return "STARTED";
|
|
|
|
case CUBEB_STATE_STOPPED:
|
|
|
|
return "STOPPED";
|
|
|
|
case CUBEB_STATE_DRAINED:
|
|
|
|
return "DRAINED";
|
|
|
|
case CUBEB_STATE_ERROR:
|
|
|
|
return "ERROR";
|
2019-05-14 19:00:22 +03:00
|
|
|
default:
|
|
|
|
MOZ_CRASH("Unexpected state!");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-26 19:01:33 +04:00
|
|
|
void AudioCallbackDriver::StateCallback(cubeb_state aState) {
|
2019-03-06 23:12:25 +03:00
|
|
|
MOZ_ASSERT(!OnGraphThread());
|
2019-05-25 20:46:15 +03:00
|
|
|
LOG(LogLevel::Debug,
|
|
|
|
("AudioCallbackDriver State: %s", StateToString(aState)));
|
2017-09-08 17:41:36 +03:00
|
|
|
|
2018-05-25 11:58:45 +03:00
|
|
|
// Clear the flag for the not running
|
|
|
|
// states: stopped, drained, error.
|
|
|
|
mAudioThreadRunning = (aState == CUBEB_STATE_STARTED);
|
|
|
|
|
2017-11-04 09:00:46 +03:00
|
|
|
if (aState == CUBEB_STATE_ERROR && mShouldFallbackIfError) {
|
2018-06-29 11:05:56 +03:00
|
|
|
MOZ_ASSERT(!ThreadRunning());
|
2018-02-07 10:16:01 +03:00
|
|
|
mShouldFallbackIfError = false;
|
2018-06-29 11:05:56 +03:00
|
|
|
RemoveMixerCallback();
|
2017-11-28 13:57:02 +03:00
|
|
|
FallbackToSystemClockDriver();
|
2018-06-29 11:05:56 +03:00
|
|
|
} else if (aState == CUBEB_STATE_STOPPED) {
|
|
|
|
MOZ_ASSERT(!ThreadRunning());
|
|
|
|
RemoveMixerCallback();
|
2017-02-24 18:42:20 +03:00
|
|
|
}
|
2014-08-26 19:01:33 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void AudioCallbackDriver::MixerCallback(AudioDataValue* aMixedBuffer,
|
|
|
|
AudioSampleFormat aFormat,
|
|
|
|
uint32_t aChannels, uint32_t aFrames,
|
|
|
|
uint32_t aSampleRate) {
|
2019-03-06 23:12:25 +03:00
|
|
|
MOZ_ASSERT(OnGraphThread());
|
2014-08-26 19:01:33 +04:00
|
|
|
uint32_t toWrite = mBuffer.Available();
|
|
|
|
|
|
|
|
if (!mBuffer.Available()) {
|
2014-09-30 18:35:17 +04:00
|
|
|
NS_WARNING("DataCallback buffer full, expect frame drops.");
|
2014-08-26 19:01:33 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
MOZ_ASSERT(mBuffer.Available() <= aFrames);
|
|
|
|
|
|
|
|
mBuffer.WriteFrames(aMixedBuffer, mBuffer.Available());
|
|
|
|
MOZ_ASSERT(mBuffer.Available() == 0,
|
|
|
|
"Missing frames to fill audio callback's buffer.");
|
|
|
|
|
|
|
|
DebugOnly<uint32_t> written = mScratchBuffer.Fill(
|
|
|
|
aMixedBuffer + toWrite * aChannels, aFrames - toWrite);
|
2016-09-01 08:01:16 +03:00
|
|
|
NS_WARNING_ASSERTION(written == aFrames - toWrite, "Dropping frames.");
|
2014-08-26 19:01:33 +04:00
|
|
|
};
|
|
|
|
|
2014-08-26 19:02:31 +04:00
|
|
|
void AudioCallbackDriver::PanOutputIfNeeded(bool aMicrophoneActive) {
|
|
|
|
#ifdef XP_MACOSX
|
2019-11-15 00:00:34 +03:00
|
|
|
cubeb_device* out = nullptr;
|
2014-08-26 19:02:31 +04:00
|
|
|
int rv;
|
|
|
|
char name[128];
|
|
|
|
size_t length = sizeof(name);
|
|
|
|
|
|
|
|
rv = sysctlbyname("hw.model", name, &length, NULL, 0);
|
|
|
|
if (rv) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!strncmp(name, "MacBookPro", 10)) {
|
|
|
|
if (cubeb_stream_get_current_device(mAudioStream, &out) == CUBEB_OK) {
|
2019-11-15 00:00:34 +03:00
|
|
|
MOZ_ASSERT(out);
|
2014-08-26 19:02:31 +04:00
|
|
|
// Check if we are currently outputing sound on external speakers.
|
2019-11-15 00:00:34 +03:00
|
|
|
if (out->output_name && !strcmp(out->output_name, "ispk")) {
|
2014-08-26 19:02:31 +04:00
|
|
|
// Pan everything to the right speaker.
|
2019-10-11 18:55:02 +03:00
|
|
|
LOG(LogLevel::Debug, ("Using the built-in speakers, with%s audio input",
|
|
|
|
aMicrophoneActive ? "" : "out"));
|
|
|
|
mNeedsPanning = aMicrophoneActive;
|
2014-08-26 19:02:31 +04:00
|
|
|
} else {
|
2019-10-11 18:55:02 +03:00
|
|
|
LOG(LogLevel::Debug, ("Using an external output device"));
|
|
|
|
mNeedsPanning = false;
|
2014-08-26 19:02:31 +04:00
|
|
|
}
|
|
|
|
cubeb_stream_device_destroy(mAudioStream, out);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-05-25 11:58:45 +03:00
|
|
|
void AudioCallbackDriver::DeviceChangedCallback() {
|
2019-03-06 23:12:25 +03:00
|
|
|
MOZ_ASSERT(!OnGraphThread());
|
2016-04-22 17:24:17 +03:00
|
|
|
// Tell the audio engine the device has changed, it might want to reset some
|
|
|
|
// state.
|
2014-08-26 19:02:31 +04:00
|
|
|
MonitorAutoLock mon(mGraphImpl->GetMonitor());
|
2018-04-30 17:01:56 +03:00
|
|
|
GraphImpl()->DeviceChanged();
|
2016-04-22 17:24:19 +03:00
|
|
|
#ifdef XP_MACOSX
|
2019-10-11 18:45:18 +03:00
|
|
|
RefPtr<AudioCallbackDriver> self(this);
|
|
|
|
bool hasInput = mInputChannelCount;
|
2019-11-06 00:19:18 +03:00
|
|
|
NS_DispatchBackgroundTask(NS_NewRunnableFunction(
|
2019-10-11 18:45:18 +03:00
|
|
|
"PanOutputIfNeeded", [self{std::move(self)}, hasInput]() {
|
|
|
|
self->PanOutputIfNeeded(hasInput);
|
|
|
|
}));
|
2016-04-13 21:31:35 +03:00
|
|
|
#endif
|
2014-08-26 19:02:31 +04:00
|
|
|
}
|
2014-08-26 19:01:33 +04:00
|
|
|
|
|
|
|
uint32_t AudioCallbackDriver::IterationDuration() {
|
2019-03-06 23:12:25 +03:00
|
|
|
MOZ_ASSERT(OnGraphThread());
|
2014-08-26 19:01:33 +04:00
|
|
|
// The real fix would be to have an API in cubeb to give us the number. Short
|
|
|
|
// of that, we approximate it here. bug 1019507
|
|
|
|
return mIterationDurationMS;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool AudioCallbackDriver::IsStarted() { return mStarted; }
|
|
|
|
|
2019-10-02 13:23:02 +03:00
|
|
|
void AudioCallbackDriver::EnqueueTrackAndPromiseForOperation(
|
|
|
|
MediaTrack* aTrack, void* aPromise, dom::AudioContextOperation aOperation,
|
2019-04-02 14:10:02 +03:00
|
|
|
dom::AudioContextOperationFlags aFlags) {
|
2019-03-06 23:12:25 +03:00
|
|
|
MOZ_ASSERT(OnGraphThread() || !ThreadRunning());
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
MonitorAutoLock mon(mGraphImpl->GetMonitor());
|
2019-04-02 14:10:02 +03:00
|
|
|
MOZ_ASSERT((aFlags | dom::AudioContextOperationFlags::SendStateChange) ||
|
|
|
|
!aPromise);
|
|
|
|
if (aFlags == dom::AudioContextOperationFlags::SendStateChange) {
|
|
|
|
mPromisesForOperation.AppendElement(
|
2019-10-02 13:23:02 +03:00
|
|
|
TrackAndPromiseForOperation(aTrack, aPromise, aOperation, aFlags));
|
2019-04-02 14:10:02 +03:00
|
|
|
}
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void AudioCallbackDriver::CompleteAudioContextOperations(
|
|
|
|
AsyncCubebOperation aOperation) {
|
2018-05-25 11:58:45 +03:00
|
|
|
MOZ_ASSERT(OnCubebOperationThread());
|
2019-10-02 13:23:02 +03:00
|
|
|
AutoTArray<TrackAndPromiseForOperation, 1> array;
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
|
|
|
|
// We can't lock for the whole function because AudioContextOperationCompleted
|
|
|
|
// will grab the monitor
|
|
|
|
{
|
|
|
|
MonitorAutoLock mon(GraphImpl()->GetMonitor());
|
|
|
|
array.SwapElements(mPromisesForOperation);
|
|
|
|
}
|
|
|
|
|
2015-04-29 12:02:57 +03:00
|
|
|
for (uint32_t i = 0; i < array.Length(); i++) {
|
2019-10-02 13:23:02 +03:00
|
|
|
TrackAndPromiseForOperation& s = array[i];
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
if ((aOperation == AsyncCubebOperation::INIT &&
|
2015-05-10 06:38:15 +03:00
|
|
|
s.mOperation == dom::AudioContextOperation::Resume) ||
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
(aOperation == AsyncCubebOperation::SHUTDOWN &&
|
2015-05-10 06:38:15 +03:00
|
|
|
s.mOperation != dom::AudioContextOperation::Resume)) {
|
2019-04-02 14:10:02 +03:00
|
|
|
MOZ_ASSERT(s.mFlags == dom::AudioContextOperationFlags::SendStateChange);
|
2019-10-02 13:23:02 +03:00
|
|
|
GraphImpl()->AudioContextOperationCompleted(s.mTrack, s.mPromise,
|
2019-04-02 14:10:02 +03:00
|
|
|
s.mOperation, s.mFlags);
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
array.RemoveElementAt(i);
|
2015-04-29 12:02:57 +03:00
|
|
|
i--;
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!array.IsEmpty()) {
|
|
|
|
MonitorAutoLock mon(GraphImpl()->GetMonitor());
|
|
|
|
mPromisesForOperation.AppendElements(array);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-12 14:28:03 +03:00
|
|
|
TimeDuration AudioCallbackDriver::AudioOutputLatency() {
|
2019-07-29 15:50:44 +03:00
|
|
|
uint32_t latencyFrames;
|
|
|
|
int rv = cubeb_stream_get_latency(mAudioStream, &latencyFrames);
|
2019-07-12 14:28:03 +03:00
|
|
|
if (rv || mSampleRate == 0) {
|
|
|
|
return TimeDuration::FromSeconds(0.0);
|
|
|
|
}
|
|
|
|
|
2019-07-29 15:50:44 +03:00
|
|
|
return TimeDuration::FromSeconds(static_cast<double>(latencyFrames) /
|
2019-07-12 14:28:03 +03:00
|
|
|
mSampleRate);
|
|
|
|
}
|
|
|
|
|
2017-11-28 13:57:02 +03:00
|
|
|
void AudioCallbackDriver::FallbackToSystemClockDriver() {
|
2018-05-25 11:58:45 +03:00
|
|
|
MOZ_ASSERT(!ThreadRunning());
|
2019-12-19 01:48:50 +03:00
|
|
|
SystemClockDriver* nextDriver =
|
|
|
|
new SystemClockDriver(GraphImpl(), FallbackMode::Fallback);
|
2019-12-19 01:48:38 +03:00
|
|
|
|
|
|
|
MonitorAutoLock lock(GraphImpl()->GetMonitor());
|
2018-05-07 20:35:56 +03:00
|
|
|
SetNextDriver(nextDriver);
|
2017-11-28 13:57:02 +03:00
|
|
|
// We're not using SwitchAtNextIteration here, because there
|
|
|
|
// won't be a next iteration if we don't restart things manually:
|
|
|
|
// the audio stream just signaled that it's in error state.
|
2018-05-07 20:35:56 +03:00
|
|
|
SwitchToNextDriver();
|
2017-11-28 13:57:02 +03:00
|
|
|
}
|
2014-04-25 18:09:30 +04:00
|
|
|
|
2015-07-13 18:25:42 +03:00
|
|
|
} // namespace mozilla
|
2017-03-24 06:17:17 +03:00
|
|
|
|
|
|
|
// avoid redefined macro in unified build
|
|
|
|
#undef LOG
|