Backed out 6 changesets (bug 1531833) for geckoview failures on PermissionDelegateTest.media CLOSED TREE

Backed out changeset f90ad6bb8ebd (bug 1531833)
Backed out changeset 465570a54b46 (bug 1531833)
Backed out changeset e725253ee976 (bug 1531833)
Backed out changeset 74ad8e7a722b (bug 1531833)
Backed out changeset b1268e5f7023 (bug 1531833)
Backed out changeset e3ec78b2db1f (bug 1531833)

--HG--
extra : amend_source : 81aa19c352e72cac2369e014d19ec5a896538b21
This commit is contained in:
Oana Pop Rus 2019-04-11 21:16:55 +03:00
Родитель 7977731cfd
Коммит 212a653d81
18 изменённых файлов: 103 добавлений и 402 удалений

Просмотреть файл

@ -147,14 +147,6 @@ size_t sAudioIPCStackSize;
StaticAutoPtr<char> sBrandName;
StaticAutoPtr<char> sCubebBackendName;
StaticAutoPtr<char> sCubebOutputDeviceName;
#ifdef MOZ_WIDGET_ANDROID
// Counts the number of time a request for switching to global "communication
// mode" has been received. If this is > 0, global communication mode is to be
// enabled. If it is 0, the global communication mode is to be disabled.
// This allows to correctly track the global behaviour to adopt accross
// asynchronous GraphDriver changes, on Android.
int sInCommunicationCount = 0;
#endif
const char kBrandBundleURL[] = "chrome://branding/locale/brand.properties";
@ -317,24 +309,6 @@ void ForceSetCubebContext(cubeb* aCubebContext) {
sCubebState = CubebState::Initialized;
}
void SetInCommunication(bool aInCommunication) {
#ifdef MOZ_WIDGET_ANDROID
StaticMutexAutoLock lock(sMutex);
if (aInCommunication) {
sInCommunicationCount++;
} else {
MOZ_ASSERT(sInCommunicationCount > 0);
sInCommunicationCount--;
}
if (sInCommunicationCount == 1) {
java::GeckoAppShell::SetCommunicationAudioModeOn(true);
} else if (sInCommunicationCount == 0) {
java::GeckoAppShell::SetCommunicationAudioModeOn(false);
}
#endif
}
bool InitPreferredSampleRate() {
StaticMutexAutoLock lock(sMutex);
if (sPreferredSampleRate != 0) {

Просмотреть файл

@ -13,8 +13,6 @@
class AudioDeviceInfo;
MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS(cubeb_stream_prefs)
namespace mozilla {
namespace CubebUtils {
@ -47,10 +45,6 @@ bool CubebLatencyPrefSet();
void GetCurrentBackend(nsAString& aBackend);
cubeb_stream_prefs GetDefaultStreamPrefs();
char* GetForcedOutputDevice();
// No-op on all platforms but Android, where it tells the device's AudioManager
// to switch to "communication mode", which might change audio routing,
// bluetooth communication type, etc.
void SetInCommunication(bool aInCommunication);
# ifdef MOZ_WIDGET_ANDROID
uint32_t AndroidGetAudioOutputSampleRate();

Просмотреть файл

@ -469,8 +469,7 @@ StreamAndPromiseForOperation::StreamAndPromiseForOperation(
mFlags(aFlags) {}
AudioCallbackDriver::AudioCallbackDriver(MediaStreamGraphImpl* aGraphImpl,
uint32_t aInputChannelCount,
AudioInputType aAudioInputType)
uint32_t aInputChannelCount)
: GraphDriver(aGraphImpl),
mOutputChannels(0),
mSampleRate(0),
@ -495,13 +494,6 @@ AudioCallbackDriver::AudioCallbackDriver(MediaStreamGraphImpl* aGraphImpl,
audio::AudioNotificationReceiver::Register(this);
}
#endif
if (aAudioInputType == AudioInputType::Voice) {
LOG(LogLevel::Debug, ("VOICE."));
mInputDevicePreference = CUBEB_DEVICE_PREF_VOICE;
CubebUtils::SetInCommunication(true);
} else {
mInputDevicePreference = CUBEB_DEVICE_PREF_ALL;
}
}
AudioCallbackDriver::~AudioCallbackDriver() {
@ -512,9 +504,6 @@ AudioCallbackDriver::~AudioCallbackDriver() {
audio::AudioNotificationReceiver::Unregister(this);
}
#endif
if (mInputDevicePreference == CUBEB_DEVICE_PREF_VOICE) {
CubebUtils::SetInCommunication(false);
}
}
bool IsMacbookOrMacbookAir() {
@ -600,9 +589,6 @@ bool AudioCallbackDriver::Init() {
output.channels = mOutputChannels;
output.layout = CUBEB_LAYOUT_UNDEFINED;
output.prefs = CubebUtils::GetDefaultStreamPrefs();
if (mInputDevicePreference == CUBEB_DEVICE_PREF_VOICE) {
output.prefs |= static_cast<cubeb_stream_prefs>(CUBEB_STREAM_PREF_VOICE);
}
uint32_t latency_frames = CubebUtils::GetCubebMSGLatencyInFrames(&output);

Просмотреть файл

@ -323,7 +323,6 @@ struct StreamAndPromiseForOperation {
};
enum AsyncCubebOperation { INIT, SHUTDOWN };
enum class AudioInputType { Unknown, Voice };
/**
* This is a graph driver that is based on callback functions called by the
@ -355,8 +354,7 @@ class AudioCallbackDriver : public GraphDriver,
public:
/** If aInputChannelCount is zero, then this driver is output-only. */
AudioCallbackDriver(MediaStreamGraphImpl* aGraphImpl,
uint32_t aInputChannelCount,
AudioInputType aAudioInputType);
uint32_t aInputChannelCount);
virtual ~AudioCallbackDriver();
void Start() override;
@ -404,13 +402,6 @@ class AudioCallbackDriver : public GraphDriver,
uint32_t InputChannelCount() { return mInputChannelCount; }
AudioInputType InputDevicePreference() {
if (mInputDevicePreference == CUBEB_DEVICE_PREF_VOICE) {
return AudioInputType::Voice;
}
return AudioInputType::Unknown;
}
/* Enqueue a promise that is going to be resolved when a specific operation
* occurs on the cubeb stream. */
void EnqueueStreamAndPromiseForOperation(
@ -512,12 +503,12 @@ class AudioCallbackDriver : public GraphDriver,
const RefPtr<SharedThreadPool> mInitShutdownThread;
/* This must be accessed with the graph monitor held. */
AutoTArray<StreamAndPromiseForOperation, 1> mPromisesForOperation;
cubeb_device_pref mInputDevicePreference;
/* This is used to signal adding the mixer callback on first run
* of audio callback. This is atomic because it is touched from different
* threads, the audio callback thread and the state change thread. However,
* the order of the threads does not allow concurent access. */
Atomic<bool> mAddedMixer;
/* Contains the id of the audio thread for as long as the callback
* is taking place, after that it is reseted to an invalid value. */
std::atomic<std::thread::id> mAudioThreadId;

Просмотреть файл

@ -368,8 +368,8 @@ void MediaStreamGraphImpl::UpdateStreamOrder() {
!CurrentDriver()->AsAudioCallbackDriver() && !switching) {
MonitorAutoLock mon(mMonitor);
if (LifecycleStateRef() == LIFECYCLE_RUNNING) {
AudioCallbackDriver* driver = new AudioCallbackDriver(
this, AudioInputChannelCount(), AudioInputDevicePreference());
AudioCallbackDriver* driver =
new AudioCallbackDriver(this, AudioInputChannelCount());
CurrentDriver()->SwitchAtNextIteration(driver);
}
}
@ -612,8 +612,8 @@ void MediaStreamGraphImpl::CreateOrDestroyAudioStreams(MediaStream* aStream) {
if (!CurrentDriver()->AsAudioCallbackDriver() && !switching) {
MonitorAutoLock mon(mMonitor);
if (LifecycleStateRef() == LIFECYCLE_RUNNING) {
AudioCallbackDriver* driver = new AudioCallbackDriver(
this, AudioInputChannelCount(), AudioInputDevicePreference());
AudioCallbackDriver* driver =
new AudioCallbackDriver(this, AudioInputChannelCount());
CurrentDriver()->SwitchAtNextIteration(driver);
}
}
@ -746,8 +746,8 @@ void MediaStreamGraphImpl::OpenAudioInputImpl(CubebUtils::AudioDeviceID aID,
// Switch Drivers since we're adding input (to input-only or full-duplex)
MonitorAutoLock mon(mMonitor);
if (LifecycleStateRef() == LIFECYCLE_RUNNING) {
AudioCallbackDriver* driver = new AudioCallbackDriver(
this, AudioInputChannelCount(), AudioInputDevicePreference());
AudioCallbackDriver* driver =
new AudioCallbackDriver(this, AudioInputChannelCount());
LOG(LogLevel::Debug,
("%p OpenAudioInput: starting new AudioCallbackDriver(input) %p",
this, driver));
@ -830,8 +830,7 @@ void MediaStreamGraphImpl::CloseAudioInputImpl(
LOG(LogLevel::Debug,
("%p: CloseInput: output present (AudioCallback)", this));
driver = new AudioCallbackDriver(this, AudioInputChannelCount(),
AudioInputDevicePreference());
driver = new AudioCallbackDriver(this, AudioInputChannelCount());
CurrentDriver()->SwitchAtNextIteration(driver);
} else if (CurrentDriver()->AsAudioCallbackDriver()) {
LOG(LogLevel::Debug,
@ -982,10 +981,6 @@ void MediaStreamGraphImpl::ReevaluateInputDevice() {
if (audioCallbackDriver->InputChannelCount() != AudioInputChannelCount()) {
needToSwitch = true;
}
if (audioCallbackDriver->InputDevicePreference() !=
AudioInputDevicePreference()) {
needToSwitch = true;
}
} else {
// We're already in the process of switching to a audio callback driver,
// which will happen at the next iteration.
@ -998,8 +993,8 @@ void MediaStreamGraphImpl::ReevaluateInputDevice() {
needToSwitch = true;
}
if (needToSwitch) {
AudioCallbackDriver* newDriver = new AudioCallbackDriver(
this, AudioInputChannelCount(), AudioInputDevicePreference());
AudioCallbackDriver* newDriver =
new AudioCallbackDriver(this, AudioInputChannelCount());
{
MonitorAutoLock lock(mMonitor);
CurrentDriver()->SwitchAtNextIteration(newDriver);
@ -3158,9 +3153,8 @@ MediaStreamGraphImpl::MediaStreamGraphImpl(GraphDriverType aDriverRequested,
mMainThreadGraphTime(0, "MediaStreamGraphImpl::mMainThreadGraphTime") {
if (mRealtime) {
if (aDriverRequested == AUDIO_THREAD_DRIVER) {
// Always start with zero input channels, and no particular preferences
// for the input channel.
mDriver = new AudioCallbackDriver(this, 0, AudioInputType::Unknown);
// Always start with zero input channels.
mDriver = new AudioCallbackDriver(this, 0);
} else {
mDriver = new SystemClockDriver(this);
}
@ -3651,8 +3645,7 @@ void MediaStreamGraphImpl::ApplyAudioContextOperationImpl(
MOZ_ASSERT(nextDriver->AsAudioCallbackDriver());
driver = nextDriver->AsAudioCallbackDriver();
} else {
driver = new AudioCallbackDriver(this, AudioInputChannelCount(),
AudioInputDevicePreference());
driver = new AudioCallbackDriver(this, AudioInputChannelCount());
MonitorAutoLock lock(mMonitor);
CurrentDriver()->SwitchAtNextIteration(driver);
}

Просмотреть файл

@ -122,10 +122,6 @@ class AudioDataListenerInterface {
*/
virtual uint32_t RequestedInputChannelCount(MediaStreamGraphImpl* aGraph) = 0;
/**
* Whether the underlying audio device is used for voice input.
*/
virtual bool IsVoiceInput(MediaStreamGraphImpl* aGraph) const = 0;
/**
* Called when the underlying audio device has changed.
*/

Просмотреть файл

@ -398,7 +398,6 @@ class MediaStreamGraphImpl : public MediaStreamGraph,
* reevaluated, for example, if the channel count of the input stream should
* be changed. */
void ReevaluateInputDevice();
/* Called on the graph thread when there is new output data for listeners.
* This is the mixed audio output of this MediaStreamGraph. */
void NotifyOutputData(AudioDataValue* aBuffer, size_t aFrames,
@ -486,29 +485,6 @@ class MediaStreamGraphImpl : public MediaStreamGraph,
return maxInputChannels;
}
AudioInputType AudioInputDevicePreference() {
MOZ_ASSERT(OnGraphThreadOrNotRunning());
if (!mInputDeviceUsers.GetValue(mInputDeviceID)) {
return AudioInputType::Unknown;
}
bool voiceInput = false;
// When/if we decide to support multiple input device per graph, this needs
// loop over them.
nsTArray<RefPtr<AudioDataListener>>* listeners =
mInputDeviceUsers.GetValue(mInputDeviceID);
MOZ_ASSERT(listeners);
// If at least one stream is considered to be voice,
for (const auto& listener : *listeners) {
voiceInput |= listener->IsVoiceInput(this);
}
if (voiceInput) {
return AudioInputType::Voice;
}
return AudioInputType::Unknown;
}
CubebUtils::AudioDeviceID InputDeviceID() { return mInputDeviceID; }
double MediaTimeToSeconds(GraphTime aTime) const {

Просмотреть файл

@ -180,14 +180,9 @@ void MediaEngineWebRTC::EnumerateMicrophoneDevices(
if (!foundPreferredDevice) {
foundPreferredDevice = true;
} else {
// This is possible on windows, there is a default communication
// device, and a default device:
// See https://bugzilla.mozilla.org/show_bug.cgi?id=1542739
#ifndef XP_WIN
MOZ_ASSERT(!foundPreferredDevice,
"Found more than one preferred audio input device"
"while enumerating");
#endif
}
#endif
aDevices->InsertElementAt(0, device);

Просмотреть файл

@ -160,12 +160,6 @@ class AudioInputProcessing : public AudioDataListener {
void NotifyInputData(MediaStreamGraphImpl* aGraph,
const AudioDataValue* aBuffer, size_t aFrames,
TrackRate aRate, uint32_t aChannels) override;
bool IsVoiceInput(MediaStreamGraphImpl* aGraph) const override {
// If we're passing data directly without AEC or any other process, this
// means that all voice-processing has been disabled intentionaly. In this
// case, consider that the device is not used for voice input.
return !PassThrough(aGraph);
}
void Start();
void Stop();

Просмотреть файл

@ -1,3 +1,34 @@
diff --git a/media/libcubeb/include/cubeb.h b/media/libcubeb/include/cubeb.h
--- a/media/libcubeb/include/cubeb.h
+++ b/media/libcubeb/include/cubeb.h
@@ -216,20 +216,23 @@ enum {
CHANNEL_FRONT_CENTER | CHANNEL_LOW_FREQUENCY |
CHANNEL_BACK_LEFT | CHANNEL_BACK_RIGHT |
CHANNEL_SIDE_LEFT | CHANNEL_SIDE_RIGHT,
};
/** Miscellaneous stream preferences. */
typedef enum {
CUBEB_STREAM_PREF_NONE = 0x00, /**< No stream preferences are requested. */
- CUBEB_STREAM_PREF_LOOPBACK = 0x01 /**< Request a loopback stream. Should be
- specified on the input params and an
- output device to loopback from should
- be passed in place of an input device. */
+ CUBEB_STREAM_PREF_LOOPBACK = 0x01, /**< Request a loopback stream. Should be
+ specified on the input params and an
+ output device to loopback from should
+ be passed in place of an input device. */
+ CUBEB_STREAM_PREF_DISABLE_DEVICE_SWITCHING = 0x02, /**< Disable switching
+ default device on OS
+ changes. */
} cubeb_stream_prefs;
/** Stream format initialization parameters. */
typedef struct {
cubeb_sample_format format; /**< Requested sample format. One of
#cubeb_sample_format. */
uint32_t rate; /**< Requested sample rate. Valid range is [1000, 192000]. */
uint32_t channels; /**< Requested channel count. Valid range is [1, 8]. */
diff --git a/media/libcubeb/src/cubeb_wasapi.cpp b/media/libcubeb/src/cubeb_wasapi.cpp
--- a/media/libcubeb/src/cubeb_wasapi.cpp
+++ b/media/libcubeb/src/cubeb_wasapi.cpp
@ -54,26 +85,3 @@ diff --git a/media/libcubeb/src/cubeb_wasapi.cpp b/media/libcubeb/src/cubeb_wasa
// The variables intialized in wasapi_stream_init,
// must be destroyed in wasapi_stream_destroy.
stm->linear_input_buffer.reset();
diff --git a/media/libcubeb/include/cubeb.h b/media/libcubeb/include/cubeb.h
--- a/media/libcubeb/include/cubeb.h
+++ a/media/libcubeb/include/cubeb.h
@@ -222,16 +222,19 @@
/** Miscellaneous stream preferences. */
typedef enum {
CUBEB_STREAM_PREF_NONE = 0x00, /**< No stream preferences are requested. */
CUBEB_STREAM_PREF_LOOPBACK = 0x01, /**< Request a loopback stream. Should be
specified on the input params and an
output device to loopback from should
be passed in place of an input device. */
+ CUBEB_STREAM_PREF_DISABLE_DEVICE_SWITCHING = 0x02, /**< Disable switching
+ default device on OS
+ changes. */
CUBEB_STREAM_PREF_VOICE = 0x04 /**< This stream is going to transport voice data.
Depending on the backend and platform, this can
change the audio input or output devices
selected, as well as the quality of the stream,
for example to accomodate bluetooth SCO modes on
bluetooth devices. */
} cubeb_stream_prefs;

Просмотреть файл

@ -224,18 +224,12 @@ enum {
typedef enum {
CUBEB_STREAM_PREF_NONE = 0x00, /**< No stream preferences are requested. */
CUBEB_STREAM_PREF_LOOPBACK = 0x01, /**< Request a loopback stream. Should be
specified on the input params and an
output device to loopback from should
be passed in place of an input device. */
specified on the input params and an
output device to loopback from should
be passed in place of an input device. */
CUBEB_STREAM_PREF_DISABLE_DEVICE_SWITCHING = 0x02, /**< Disable switching
default device on OS
changes. */
CUBEB_STREAM_PREF_VOICE = 0x04 /**< This stream is going to transport voice data.
Depending on the backend and platform, this can
change the audio input or output devices
selected, as well as the quality of the stream,
for example to accomodate bluetooth SCO modes on
bluetooth devices. */
} cubeb_stream_prefs;
/** Stream format initialization parameters. */

Просмотреть файл

@ -19,5 +19,5 @@ origin:
license: "ISC"
# update.sh will update this value
release: "c0a71704ae935666ecbd0e8a61345de583139607 (2019-04-10 18:19:29 +0200)"
release: "66d9c48d916f00c396482f9c5075feacc2bc0db8 (2019-04-03 12:41:20 +0300)"

Просмотреть файл

@ -43,9 +43,10 @@
#define SL_ANDROID_RECORDING_PRESET_VOICE_RECOGNITION ((SLuint32) 0x00000003)
/** uses the main microphone tuned for audio communications */
#define SL_ANDROID_RECORDING_PRESET_VOICE_COMMUNICATION ((SLuint32) 0x00000004)
/** uses the main microphone unprocessed */
#define SL_ANDROID_RECORDING_PRESET_UNPROCESSED ((SLuint32) 0x00000005)
/** Audio recording get session ID (read only) */
/** Audio recording get session ID key */
#define SL_ANDROID_KEY_RECORDING_SESSION_ID ((const SLchar*) "androidRecordingSessionId")
/*---------------------------------------------------------------------------*/
/* Android AudioPlayer configuration */
@ -68,35 +69,9 @@
#define SL_ANDROID_STREAM_ALARM ((SLint32) 0x00000004)
/* same as android.media.AudioManager.STREAM_NOTIFICATION */
#define SL_ANDROID_STREAM_NOTIFICATION ((SLint32) 0x00000005)
/*---------------------------------------------------------------------------*/
/* Android AudioPlayer and AudioRecorder configuration */
/*---------------------------------------------------------------------------*/
/** Audio Performance mode.
* Performance mode tells the framework how to configure the audio path
* for a player or recorder according to application performance and
* functional requirements.
* It affects the output or input latency based on acceptable tradeoffs on
* battery drain and use of pre or post processing effects.
* Performance mode should be set before realizing the object and should be
* read after realizing the object to check if the requested mode could be
* granted or not.
*/
/** Audio Performance mode key */
#define SL_ANDROID_KEY_PERFORMANCE_MODE ((const SLchar*) "androidPerformanceMode")
/** Audio performance values */
/* No specific performance requirement. Allows HW and SW pre/post processing. */
#define SL_ANDROID_PERFORMANCE_NONE ((SLuint32) 0x00000000)
/* Priority given to latency. No HW or software pre/post processing.
* This is the default if no performance mode is specified. */
#define SL_ANDROID_PERFORMANCE_LATENCY ((SLuint32) 0x00000001)
/* Priority given to latency while still allowing HW pre and post processing. */
#define SL_ANDROID_PERFORMANCE_LATENCY_EFFECTS ((SLuint32) 0x00000002)
/* Priority given to power saving if latency is not a concern.
* Allows HW and SW pre/post processing. */
#define SL_ANDROID_PERFORMANCE_POWER_SAVING ((SLuint32) 0x00000003)
/* same as android.media.AudioManager.STREAM_BLUETOOTH_SCO */
#define SL_ANDROID_STREAM_BLUETOOTH_SCO ((SLint32) 0x00000006)
/* same as android.media.AudioManager.STREAM_SYSTEM_ENFORCED */
#define SL_ANDROID_STREAM_SYSTEM_ENFORCED ((SLint32) 0x00000007)
#endif /* OPENSL_ES_ANDROIDCONFIGURATION_H_ */

Просмотреть файл

@ -868,7 +868,7 @@ audiounit_reinit_stream_async(cubeb_stream * stm, device_flags_value flags)
{
if (std::atomic_exchange(&stm->reinit_pending, true)) {
// A reinit task is already pending, nothing more to do.
ALOG("(%p) re-init stream task already pending, cancelling request", stm);
ALOG("(%p) re-init stream task already pending, cancelling request ", stm);
return;
}
@ -944,7 +944,7 @@ audiounit_property_listener_callback(AudioObjectID id, UInt32 address_count,
}
break;
case kAudioDevicePropertyDataSource: {
LOG("Event[%u] - mSelector == kAudioDevicePropertyDataSource for id=%d", (unsigned int) i, id);
LOG("Event[%u] - mSelector == kAudioHardwarePropertyDataSource for id=%d", (unsigned int) i, id);
}
break;
default:
@ -1314,9 +1314,9 @@ audiounit_get_preferred_sample_rate(cubeb * /* ctx */, uint32_t * rate)
static cubeb_channel_layout
audiounit_convert_channel_layout(AudioChannelLayout * layout)
{
// When having one or two channel, force mono or stereo. Some devices (namely,
// Bose QC35, mark 1 and 2), expose a single channel mapped to the right for
// some reason.
// When having on or two channel, force mono or stereo. Some devices (namely,
// Bose QC35, mark 1 and 2), expose a single channel mapped to the right for
// some reason.
if (layout->mNumberChannelDescriptions == 1) {
return CUBEB_LAYOUT_MONO;
} else if (layout->mNumberChannelDescriptions == 2) {
@ -1611,7 +1611,7 @@ audiounit_create_blank_aggregate_device(AudioObjectID * plugin_id, AudioDeviceID
0, NULL,
&size);
if (r != noErr) {
LOG("AudioObjectGetPropertyDataSize/kAudioHardwarePropertyPlugInForBundleID, rv=%d", r);
LOG("AudioHardwareGetPropertyInfo/kAudioHardwarePropertyPlugInForBundleID, rv=%d", r);
return CUBEB_ERROR;
}
@ -1629,7 +1629,7 @@ audiounit_create_blank_aggregate_device(AudioObjectID * plugin_id, AudioDeviceID
&size,
&translation_value);
if (r != noErr) {
LOG("AudioObjectGetPropertyData/kAudioHardwarePropertyPlugInForBundleID, rv=%d", r);
LOG("AudioHardwareGetProperty/kAudioHardwarePropertyPlugInForBundleID, rv=%d", r);
return CUBEB_ERROR;
}
@ -2072,23 +2072,23 @@ audiounit_create_unit(AudioUnit * unit, device_info * device)
if (device->flags & DEV_INPUT) {
r = audiounit_enable_unit_scope(unit, io_side::INPUT, ENABLE);
if (r != CUBEB_OK) {
LOG("Failed to enable audiounit input scope");
LOG("Failed to enable audiounit input scope ");
return r;
}
r = audiounit_enable_unit_scope(unit, io_side::OUTPUT, DISABLE);
if (r != CUBEB_OK) {
LOG("Failed to disable audiounit output scope");
LOG("Failed to disable audiounit output scope ");
return r;
}
} else if (device->flags & DEV_OUTPUT) {
r = audiounit_enable_unit_scope(unit, io_side::OUTPUT, ENABLE);
if (r != CUBEB_OK) {
LOG("Failed to enable audiounit output scope");
LOG("Failed to enable audiounit output scope ");
return r;
}
r = audiounit_enable_unit_scope(unit, io_side::INPUT, DISABLE);
if (r != CUBEB_OK) {
LOG("Failed to disable audiounit input scope");
LOG("Failed to disable audiounit input scope ");
return r;
}
} else {
@ -2588,16 +2588,16 @@ audiounit_setup_stream(cubeb_stream * stm)
}
/* Latency cannot change if another stream is operating in parallel. In this case
* latency is set to the other stream value. */
* latecy is set to the other stream value. */
if (audiounit_active_streams(stm->context) > 1) {
LOG("(%p) More than one active stream, use global latency.", stm);
stm->latency_frames = stm->context->global_latency_frames;
} else {
/* Silently clamp the latency down to the platform default, because we
* synthetize the clock from the callbacks, and we want the clock to update
* often. */
* synthetize the clock from the callbacks, and we want the clock to update
* often. */
stm->latency_frames = audiounit_clamp_latency(stm, stm->latency_frames);
assert(stm->latency_frames); // Ugly error check
assert(stm->latency_frames); // Ungly error check
audiounit_set_global_latency(stm->context, stm->latency_frames);
}

Просмотреть файл

@ -63,11 +63,6 @@
#define DEFAULT_SAMPLE_RATE 48000
#define DEFAULT_NUM_OF_FRAMES 480
// If the latency requested is above this threshold, this stream is considered
// intended for playback (vs. real-time). Tell Android it should favor saving
// power over performance or latency.
// This is around 100ms at 44100 or 48000
#define POWERSAVE_LATENCY_FRAMES_THRESHOLD 4000
static struct cubeb_ops const opensl_ops;
@ -89,7 +84,7 @@ struct cubeb {
};
#define NELEMS(A) (sizeof(A) / sizeof A[0])
#define NBUFS 2
#define NBUFS 4
struct cubeb_stream {
/* Note: Must match cubeb_stream layout in cubeb.c. */
@ -160,13 +155,10 @@ struct cubeb_stream {
cubeb_resampler * resampler;
unsigned int user_output_rate;
unsigned int output_configured_rate;
unsigned int buffer_size_frames;
// Audio output latency used in cubeb_stream_get_position().
unsigned int output_latency_ms;
unsigned int latency_frames;
int64_t lastPosition;
int64_t lastPositionTimeStamp;
int64_t lastCompensativePosition;
int voice;
};
/* Forward declaration. */
@ -854,17 +846,16 @@ opensl_configure_capture(cubeb_stream * stm, cubeb_stream_params * params)
lDataSource.pLocator = &lDataLocatorIn;
lDataSource.pFormat = NULL;
const SLuint32 lSoundRecorderIIDCount = 2;
const SLInterfaceID lSoundRecorderIIDs[] = { stm->context->SL_IID_RECORD,
stm->context->SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
stm->context->SL_IID_ANDROIDCONFIGURATION };
const SLboolean lSoundRecorderReqs[] = { SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE};
stm->context->SL_IID_ANDROIDSIMPLEBUFFERQUEUE };
const SLboolean lSoundRecorderReqs[] = { SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE };
// create the audio recorder abstract object
SLresult res = (*stm->context->eng)->CreateAudioRecorder(stm->context->eng,
&stm->recorderObj,
&lDataSource,
&lDataSink,
NELEMS(lSoundRecorderIIDs),
lSoundRecorderIIDCount,
lSoundRecorderIIDs,
lSoundRecorderReqs);
// Sample rate not supported. Try again with default sample rate!
@ -883,7 +874,7 @@ opensl_configure_capture(cubeb_stream * stm, cubeb_stream_params * params)
&stm->recorderObj,
&lDataSource,
&lDataSink,
NELEMS(lSoundRecorderIIDs),
lSoundRecorderIIDCount,
lSoundRecorderIIDs,
lSoundRecorderReqs);
@ -893,35 +884,6 @@ opensl_configure_capture(cubeb_stream * stm, cubeb_stream_params * params)
}
}
SLAndroidConfigurationItf recorderConfig;
res = (*stm->recorderObj)
->GetInterface(stm->recorderObj,
stm->context->SL_IID_ANDROIDCONFIGURATION,
&recorderConfig);
if (res != SL_RESULT_SUCCESS) {
LOG("Failed to get the android configuration interface for recorder. Error "
"code: %lu",
res);
return CUBEB_ERROR;
}
// Voice recognition is the lowest latency, according to the docs. Camcorder
// uses a microphone that is in the same direction as the camera.
SLint32 streamType = stm->voice ? SL_ANDROID_RECORDING_PRESET_VOICE_RECOGNITION
: SL_ANDROID_RECORDING_PRESET_CAMCORDER;
res = (*recorderConfig)
->SetConfiguration(recorderConfig, SL_ANDROID_KEY_RECORDING_PRESET,
&streamType, sizeof(SLint32));
if (res != SL_RESULT_SUCCESS) {
LOG("Failed to set the android configuration to VOICE for the recorder. "
"Error code: %lu",
res);
return CUBEB_ERROR;
}
// realize the audio recorder
res = (*stm->recorderObj)->Realize(stm->recorderObj, SL_BOOLEAN_FALSE);
if (res != SL_RESULT_SUCCESS) {
@ -975,7 +937,7 @@ opensl_configure_capture(cubeb_stream * stm, cubeb_stream_params * params)
// Calculate length of input buffer according to requested latency
stm->input_frame_size = params->channels * sizeof(int16_t);
stm->input_buffer_length = (stm->input_frame_size * stm->buffer_size_frames);
stm->input_buffer_length = (stm->input_frame_size * stm->latency_frames);
// Calculate the capacity of input array
stm->input_array_capacity = NBUFS;
@ -1086,7 +1048,7 @@ opensl_configure_playback(cubeb_stream * stm, cubeb_stream_params * params) {
stm->output_configured_rate = preferred_sampling_rate;
stm->bytespersec = stm->output_configured_rate * stm->framesize;
stm->queuebuf_len = stm->framesize * stm->buffer_size_frames;
stm->queuebuf_len = stm->framesize * stm->latency_frames;
// Calculate the capacity of input array
stm->queuebuf_capacity = NBUFS;
@ -1101,80 +1063,12 @@ opensl_configure_playback(cubeb_stream * stm, cubeb_stream_params * params) {
assert(stm->queuebuf[i]);
}
SLAndroidConfigurationItf playerConfig;
res = (*stm->playerObj)
->GetInterface(stm->playerObj,
stm->context->SL_IID_ANDROIDCONFIGURATION,
&playerConfig);
if (res != SL_RESULT_SUCCESS) {
LOG("Failed to get Android configuration interface. Error code: %lu", res);
return CUBEB_ERROR;
}
SLint32 streamType = SL_ANDROID_STREAM_MEDIA;
if (stm->voice) {
streamType = SL_ANDROID_STREAM_VOICE;
}
res = (*playerConfig)->SetConfiguration(playerConfig,
SL_ANDROID_KEY_STREAM_TYPE,
&streamType,
sizeof(streamType));
if (res != SL_RESULT_SUCCESS) {
LOG("Failed to set Android configuration to %d Error code: %lu",
streamType, res);
return CUBEB_ERROR;
}
SLuint32 performanceMode = SL_ANDROID_PERFORMANCE_LATENCY;
if (stm->buffer_size_frames > POWERSAVE_LATENCY_FRAMES_THRESHOLD) {
performanceMode = SL_ANDROID_PERFORMANCE_POWER_SAVING;
}
res = (*playerConfig)->SetConfiguration(playerConfig,
SL_ANDROID_KEY_PERFORMANCE_MODE,
&performanceMode,
sizeof(performanceMode));
if (res != SL_RESULT_SUCCESS) {
LOG("Failed to set Android performance mode to %d Error code: %lu. This is"
" not fatal", performanceMode, res);
}
res = (*stm->playerObj)->Realize(stm->playerObj, SL_BOOLEAN_FALSE);
if (res != SL_RESULT_SUCCESS) {
LOG("Failed to realize player object. Error code: %lu", res);
return CUBEB_ERROR;
}
// There are two ways of getting the audio output latency:
// - a configuration value, only available on some devices (notably devices
// running FireOS)
// - A Java method, that we call using JNI.
//
// The first method is prefered, if available, because it can account for more
// latency causes, and is more precise.
// Latency has to be queried after the realization of the interface, when
// using SL_IID_ANDROIDCONFIGURATION.
SLuint32 audioLatency = 0;
SLuint32 paramSize = sizeof(SLuint32);
// The reported latency is in milliseconds.
res = (*playerConfig)->GetConfiguration(playerConfig,
(const SLchar *)"androidGetAudioLatency",
&paramSize,
&audioLatency);
if (res == SL_RESULT_SUCCESS) {
LOG("Got playback latency using android configuration extension");
stm->output_latency_ms = audioLatency;
} else if (cubeb_output_latency_method_is_loaded(stm->context->p_output_latency_function)) {
LOG("Got playback latency using JNI");
stm->output_latency_ms = cubeb_get_output_latency(stm->context->p_output_latency_function);
} else {
LOG("No alternate latency querying method loaded, A/V sync will be off.");
stm->output_latency_ms = 0;
}
LOG("Audio output latency: %dms", stm->output_latency_ms);
res = (*stm->playerObj)->GetInterface(stm->playerObj,
stm->context->SL_IID_PLAY,
&stm->play);
@ -1254,14 +1148,6 @@ opensl_validate_stream_param(cubeb_stream_params * stream_params)
return CUBEB_OK;
}
int has_pref_set(cubeb_stream_params* input_params,
cubeb_stream_params* output_params,
cubeb_stream_prefs pref)
{
return (input_params && input_params->prefs & pref) ||
(output_params && output_params->prefs & pref);
}
static int
opensl_stream_init(cubeb * ctx, cubeb_stream ** stream, char const * stream_name,
cubeb_devid input_device,
@ -1299,14 +1185,10 @@ opensl_stream_init(cubeb * ctx, cubeb_stream ** stream, char const * stream_name
stm->data_callback = data_callback;
stm->state_callback = state_callback;
stm->user_ptr = user_ptr;
stm->buffer_size_frames = latency_frames ? latency_frames : DEFAULT_NUM_OF_FRAMES;
stm->latency_frames = latency_frames ? latency_frames : DEFAULT_NUM_OF_FRAMES;
stm->input_enabled = (input_stream_params) ? 1 : 0;
stm->output_enabled = (output_stream_params) ? 1 : 0;
stm->shutdown = 1;
stm->voice = has_pref_set(input_stream_params, output_stream_params, CUBEB_STREAM_PREF_VOICE);
LOG("cubeb stream prefs: voice: %s", stm->voice ? "true" : "false");
#ifdef DEBUG
pthread_mutexattr_t attr;
@ -1321,7 +1203,7 @@ opensl_stream_init(cubeb * ctx, cubeb_stream ** stream, char const * stream_name
if (output_stream_params) {
LOG("Playback params: Rate %d, channels %d, format %d, latency in frames %d.",
output_stream_params->rate, output_stream_params->channels,
output_stream_params->format, stm->buffer_size_frames);
output_stream_params->format, stm->latency_frames);
r = opensl_configure_playback(stm, output_stream_params);
if (r != CUBEB_OK) {
opensl_stream_destroy(stm);
@ -1332,7 +1214,7 @@ opensl_stream_init(cubeb * ctx, cubeb_stream ** stream, char const * stream_name
if (input_stream_params) {
LOG("Capture params: Rate %d, channels %d, format %d, latency in frames %d.",
input_stream_params->rate, input_stream_params->channels,
input_stream_params->format, stm->buffer_size_frames);
input_stream_params->format, stm->latency_frames);
r = opensl_configure_capture(stm, input_stream_params);
if (r != CUBEB_OK) {
opensl_stream_destroy(stm);
@ -1570,6 +1452,10 @@ opensl_stream_get_position(cubeb_stream * stm, uint64_t * position)
uint32_t compensation_msec = 0;
SLresult res;
if (!cubeb_output_latency_method_is_loaded(stm->context->p_output_latency_function)) {
return CUBEB_ERROR_NOT_SUPPORTED;
}
res = (*stm->play)->GetPosition(stm->play, &msec);
if (res != SL_RESULT_SUCCESS)
return CUBEB_ERROR;
@ -1585,22 +1471,22 @@ opensl_stream_get_position(cubeb_stream * stm, uint64_t * position)
}
uint64_t samplerate = stm->user_output_rate;
uint32_t output_latency = stm->output_latency_ms;
uint32_t mixer_latency = cubeb_get_output_latency(stm->context->p_output_latency_function);
pthread_mutex_lock(&stm->mutex);
int64_t maximum_position = stm->written * (int64_t)stm->user_output_rate / stm->output_configured_rate;
pthread_mutex_unlock(&stm->mutex);
assert(maximum_position >= 0);
if (msec > output_latency) {
if (msec > mixer_latency) {
int64_t unadjusted_position;
if (stm->lastCompensativePosition > msec + compensation_msec) {
// Over compensation, use lastCompensativePosition.
unadjusted_position =
samplerate * (stm->lastCompensativePosition - output_latency) / 1000;
samplerate * (stm->lastCompensativePosition - mixer_latency) / 1000;
} else {
unadjusted_position =
samplerate * (msec - output_latency + compensation_msec) / 1000;
samplerate * (msec - mixer_latency + compensation_msec) / 1000;
stm->lastCompensativePosition = msec + compensation_msec;
}
*position = unadjusted_position < maximum_position ?

Просмотреть файл

@ -141,7 +141,6 @@ int wasapi_stream_stop(cubeb_stream * stm);
int wasapi_stream_start(cubeb_stream * stm);
void close_wasapi_stream(cubeb_stream * stm);
int setup_wasapi_stream(cubeb_stream * stm);
ERole pref_to_role(cubeb_stream_prefs param);
static char const * wstr_to_utf8(wchar_t const * str);
static std::unique_ptr<wchar_t const []> utf8_to_wstr(char const * str);
@ -192,8 +191,6 @@ struct cubeb_stream {
* and what will be presented in the callback. */
cubeb_stream_params input_stream_params = { CUBEB_SAMPLE_FLOAT32NE, 0, 0, CUBEB_LAYOUT_UNDEFINED, CUBEB_STREAM_PREF_NONE };
cubeb_stream_params output_stream_params = { CUBEB_SAMPLE_FLOAT32NE, 0, 0, CUBEB_LAYOUT_UNDEFINED, CUBEB_STREAM_PREF_NONE };
/* A MMDevice role for this stream: either communication or console here. */
ERole role;
/* The input and output device, or NULL for default. */
std::unique_ptr<const wchar_t[]> input_device;
std::unique_ptr<const wchar_t[]> output_device;
@ -568,10 +565,9 @@ public:
return S_OK;
}
wasapi_endpoint_notification_client(HANDLE event, ERole role)
wasapi_endpoint_notification_client(HANDLE event)
: ref_count(1)
, reconfigure_event(event)
, role(role)
{ }
virtual ~wasapi_endpoint_notification_client()
@ -583,7 +579,7 @@ public:
LOG("endpoint: Audio device default changed.");
/* we only support a single stream type for now. */
if (flow != eRender && role != this->role) {
if (flow != eRender && role != eConsole) {
return S_OK;
}
@ -626,7 +622,6 @@ private:
/* refcount for this instance, necessary to implement MSCOM semantics. */
LONG ref_count;
HANDLE reconfigure_event;
ERole role;
};
namespace {
@ -1261,7 +1256,7 @@ HRESULT register_notification_client(cubeb_stream * stm)
return hr;
}
stm->notification_client.reset(new wasapi_endpoint_notification_client(stm->reconfigure_event, stm->role));
stm->notification_client.reset(new wasapi_endpoint_notification_client(stm->reconfigure_event));
hr = stm->device_enumerator->RegisterEndpointNotificationCallback(stm->notification_client.get());
if (FAILED(hr)) {
@ -1356,7 +1351,7 @@ HRESULT unregister_collection_notification_client(cubeb * context)
return hr;
}
HRESULT get_default_endpoint(com_ptr<IMMDevice> & device, EDataFlow direction, ERole role)
HRESULT get_default_endpoint(com_ptr<IMMDevice> & device, EDataFlow direction)
{
com_ptr<IMMDeviceEnumerator> enumerator;
HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator),
@ -1366,7 +1361,7 @@ HRESULT get_default_endpoint(com_ptr<IMMDevice> & device, EDataFlow direction, E
LOG("Could not get device enumerator: %lx", hr);
return hr;
}
hr = enumerator->GetDefaultAudioEndpoint(direction, role, device.receive());
hr = enumerator->GetDefaultAudioEndpoint(direction, eConsole, device.receive());
if (FAILED(hr)) {
LOG("Could not get default audio endpoint: %lx", hr);
return hr;
@ -1452,11 +1447,11 @@ int wasapi_init(cubeb ** context, char const * context_name)
so that this backend is not incorrectly enabled on platforms that don't
support WASAPI. */
com_ptr<IMMDevice> device;
HRESULT hr = get_default_endpoint(device, eRender, eConsole);
HRESULT hr = get_default_endpoint(device, eRender);
if (FAILED(hr)) {
XASSERT(hr != CO_E_NOTINITIALIZED);
LOG("It wasn't able to find a default rendering device: %lx", hr);
hr = get_default_endpoint(device, eCapture, eConsole);
hr = get_default_endpoint(device, eCapture);
if (FAILED(hr)) {
LOG("It wasn't able to find a default capture device: %lx", hr);
return CUBEB_ERROR;
@ -1555,7 +1550,7 @@ wasapi_get_max_channel_count(cubeb * ctx, uint32_t * max_channels)
XASSERT(ctx && max_channels);
com_ptr<IMMDevice> device;
HRESULT hr = get_default_endpoint(device, eRender, eConsole);
HRESULT hr = get_default_endpoint(device, eRender);
if (FAILED(hr)) {
return CUBEB_ERROR;
}
@ -1587,10 +1582,8 @@ wasapi_get_min_latency(cubeb * ctx, cubeb_stream_params params, uint32_t * laten
return CUBEB_ERROR_INVALID_FORMAT;
}
ERole role = pref_to_role(params.prefs);
com_ptr<IMMDevice> device;
HRESULT hr = get_default_endpoint(device, eRender, role);
HRESULT hr = get_default_endpoint(device, eRender);
if (FAILED(hr)) {
LOG("Could not get default endpoint: %lx", hr);
return CUBEB_ERROR;
@ -1630,7 +1623,7 @@ int
wasapi_get_preferred_sample_rate(cubeb * ctx, uint32_t * rate)
{
com_ptr<IMMDevice> device;
HRESULT hr = get_default_endpoint(device, eRender, eConsole);
HRESULT hr = get_default_endpoint(device, eRender);
if (FAILED(hr)) {
return CUBEB_ERROR;
}
@ -1762,7 +1755,7 @@ int setup_wasapi_stream_one_side(cubeb_stream * stm,
// If caller has requested loopback but not specified a device, look for
// the default render device. Otherwise look for the default device
// appropriate to the direction.
hr = get_default_endpoint(device, is_loopback ? eRender : direction, pref_to_role(stream_params->prefs));
hr = get_default_endpoint(device, is_loopback ? eRender : direction);
if (FAILED(hr)) {
if (is_loopback) {
LOG("Could not get default render endpoint for loopback, error: %lx\n", hr);
@ -2064,16 +2057,6 @@ int setup_wasapi_stream(cubeb_stream * stm)
return CUBEB_OK;
}
ERole
pref_to_role(cubeb_stream_prefs prefs)
{
if (prefs & CUBEB_STREAM_PREF_VOICE) {
return eCommunications;
}
return eConsole;
}
int
wasapi_stream_init(cubeb * context, cubeb_stream ** stream,
char const * stream_name,
@ -2099,14 +2082,6 @@ wasapi_stream_init(cubeb * context, cubeb_stream ** stream,
stm->data_callback = data_callback;
stm->state_callback = state_callback;
stm->user_ptr = user_ptr;
if (stm->output_stream_params.prefs & CUBEB_STREAM_PREF_VOICE ||
stm->input_stream_params.prefs & CUBEB_STREAM_PREF_VOICE) {
stm->role = eCommunications;
} else {
stm->role = eConsole;
}
if (input_stream_params) {
stm->input_stream_params = *input_stream_params;
stm->input_device = utf8_to_wstr(reinterpret_cast<char const *>(input_device));

Просмотреть файл

@ -5,7 +5,6 @@
<uses-permission android:name="android.permission.ACCESS_NETWORK_STATE"/>
<uses-permission android:name="android.permission.INTERNET"/>
<uses-permission android:name="android.permission.WAKE_LOCK"/>
<uses-permission android:name="android.permission.MODIFY_AUDIO_SETTINGS" />
<uses-feature
android:name="android.hardware.location"

Просмотреть файл

@ -1996,41 +1996,6 @@ public class GeckoAppShell {
return Integer.parseInt(prop);
}
static private int sPreviousAudioMode = -2;
@WrapForJNI(calledFrom = "any")
public static void setCommunicationAudioModeOn(final boolean on) {
final AudioManager am = (AudioManager)getApplicationContext()
.getSystemService(Context.AUDIO_SERVICE);
if (am == null) {
return;
}
if (sPreviousAudioMode == AudioManager.MODE_INVALID) {
sPreviousAudioMode = am.getMode();
}
try {
if (on) {
Log.e(LOGTAG, "Setting communication mode ON");
sPreviousAudioMode = am.getMode();
am.startBluetoothSco();
am.setBluetoothScoOn(true);
am.setMode(AudioManager.MODE_IN_COMMUNICATION);
} else {
Log.e(LOGTAG, "Setting communication mode OFF");
am.setMode(sPreviousAudioMode);
sPreviousAudioMode = AudioManager.MODE_INVALID;
am.stopBluetoothSco();
am.setBluetoothScoOn(false);
}
} catch (SecurityException e) {
Log.e(LOGTAG, "could not set communication mode", e);
}
am.setSpeakerphoneOn(!on);
}
private static String getLanguageTag(final Locale locale) {
final StringBuilder out = new StringBuilder(locale.getLanguage());
final String country = locale.getCountry();