2008-07-30 10:50:14 +04:00
|
|
|
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
|
|
|
/* vim:set ts=2 sw=2 sts=2 et cindent: */
|
2012-05-21 15:12:37 +04:00
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
2008-07-30 10:50:14 +04:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <math.h>
|
|
|
|
#include "prlog.h"
|
2011-05-19 01:12:25 +04:00
|
|
|
#include "prdtoa.h"
|
2012-11-14 23:46:40 +04:00
|
|
|
#include "AudioStream.h"
|
2011-04-14 02:12:23 +04:00
|
|
|
#include "VideoUtils.h"
|
2012-11-19 04:54:29 +04:00
|
|
|
#include "mozilla/Monitor.h"
|
2011-05-19 01:12:25 +04:00
|
|
|
#include "mozilla/Mutex.h"
|
2013-01-15 16:22:03 +04:00
|
|
|
#include <algorithm>
|
2011-05-28 11:03:00 +04:00
|
|
|
#include "mozilla/Preferences.h"
|
2013-09-06 00:25:17 +04:00
|
|
|
#include "soundtouch/SoundTouch.h"
|
2013-01-28 22:22:37 +04:00
|
|
|
#include "Latency.h"
|
2011-05-28 11:03:00 +04:00
|
|
|
|
2012-11-14 23:45:33 +04:00
|
|
|
namespace mozilla {
|
2010-04-02 07:03:07 +04:00
|
|
|
|
2014-04-09 23:59:07 +04:00
|
|
|
#ifdef LOG
|
|
|
|
#undef LOG
|
|
|
|
#endif
|
|
|
|
|
2008-07-30 10:50:14 +04:00
|
|
|
#ifdef PR_LOGGING
|
2012-07-30 18:20:58 +04:00
|
|
|
PRLogModuleInfo* gAudioStreamLog = nullptr;
|
2014-04-09 23:59:07 +04:00
|
|
|
// For simple logs
|
|
|
|
#define LOG(x) PR_LOG(gAudioStreamLog, PR_LOG_DEBUG, x)
|
|
|
|
#else
|
|
|
|
#define LOG(x)
|
2008-07-30 10:50:14 +04:00
|
|
|
#endif
|
|
|
|
|
2013-04-04 03:12:27 +04:00
|
|
|
/**
|
|
|
|
* When MOZ_DUMP_AUDIO is set in the environment (to anything),
|
|
|
|
* we'll drop a series of files in the current working directory named
|
2013-11-28 09:09:08 +04:00
|
|
|
* dumped-audio-<nnn>.wav, one per AudioStream created, containing
|
2013-04-04 03:12:27 +04:00
|
|
|
* the audio for the stream including any skips due to underruns.
|
|
|
|
*/
|
|
|
|
static int gDumpedAudioCount = 0;
|
|
|
|
|
2013-11-28 09:09:08 +04:00
|
|
|
#define PREF_VOLUME_SCALE "media.volume_scale"
|
|
|
|
#define PREF_CUBEB_LATENCY "media.cubeb_latency_ms"
|
|
|
|
|
|
|
|
static const uint32_t CUBEB_NORMAL_LATENCY_MS = 100;
|
|
|
|
|
|
|
|
StaticMutex AudioStream::sMutex;
|
|
|
|
cubeb* AudioStream::sCubebContext;
|
|
|
|
uint32_t AudioStream::sPreferredSampleRate;
|
|
|
|
double AudioStream::sVolumeScale;
|
|
|
|
uint32_t AudioStream::sCubebLatency;
|
|
|
|
bool AudioStream::sCubebLatencyPrefSet;
|
|
|
|
|
2013-12-11 03:10:01 +04:00
|
|
|
/*static*/ void AudioStream::PrefChanged(const char* aPref, void* aClosure)
|
2012-01-13 01:20:36 +04:00
|
|
|
{
|
|
|
|
if (strcmp(aPref, PREF_VOLUME_SCALE) == 0) {
|
|
|
|
nsAdoptingString value = Preferences::GetString(aPref);
|
2013-11-28 09:09:08 +04:00
|
|
|
StaticMutexAutoLock lock(sMutex);
|
2012-01-13 01:20:36 +04:00
|
|
|
if (value.IsEmpty()) {
|
2013-11-28 09:09:08 +04:00
|
|
|
sVolumeScale = 1.0;
|
2012-01-13 01:20:36 +04:00
|
|
|
} else {
|
|
|
|
NS_ConvertUTF16toUTF8 utf8(value);
|
2013-11-28 09:09:08 +04:00
|
|
|
sVolumeScale = std::max<double>(0, PR_strtod(utf8.get(), nullptr));
|
2012-01-13 01:20:36 +04:00
|
|
|
}
|
2012-05-03 08:48:54 +04:00
|
|
|
} else if (strcmp(aPref, PREF_CUBEB_LATENCY) == 0) {
|
2012-06-17 06:13:22 +04:00
|
|
|
// Arbitrary default stream latency of 100ms. The higher this
|
|
|
|
// value, the longer stream volume changes will take to become
|
|
|
|
// audible.
|
2013-11-28 09:09:08 +04:00
|
|
|
sCubebLatencyPrefSet = Preferences::HasUserValue(aPref);
|
2013-10-17 17:44:52 +04:00
|
|
|
uint32_t value = Preferences::GetUint(aPref, CUBEB_NORMAL_LATENCY_MS);
|
2013-11-28 09:09:08 +04:00
|
|
|
StaticMutexAutoLock lock(sMutex);
|
|
|
|
sCubebLatency = std::min<uint32_t>(std::max<uint32_t>(value, 1), 1000);
|
2011-05-19 01:12:25 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-28 09:09:08 +04:00
|
|
|
/*static*/ double AudioStream::GetVolumeScale()
|
2012-01-13 01:20:36 +04:00
|
|
|
{
|
2013-11-28 09:09:08 +04:00
|
|
|
StaticMutexAutoLock lock(sMutex);
|
|
|
|
return sVolumeScale;
|
2011-05-19 01:12:25 +04:00
|
|
|
}
|
2010-11-17 07:14:19 +03:00
|
|
|
|
2013-11-28 09:09:08 +04:00
|
|
|
/*static*/ cubeb* AudioStream::GetCubebContext()
|
|
|
|
{
|
|
|
|
StaticMutexAutoLock lock(sMutex);
|
|
|
|
return GetCubebContextUnlocked();
|
|
|
|
}
|
2012-04-16 07:00:12 +04:00
|
|
|
|
2013-12-09 23:54:49 +04:00
|
|
|
/*static*/ void AudioStream::InitPreferredSampleRate()
|
|
|
|
{
|
|
|
|
StaticMutexAutoLock lock(sMutex);
|
2014-02-03 08:40:03 +04:00
|
|
|
if (sPreferredSampleRate == 0 &&
|
|
|
|
cubeb_get_preferred_sample_rate(GetCubebContextUnlocked(),
|
|
|
|
&sPreferredSampleRate) != CUBEB_OK) {
|
2013-12-09 23:54:49 +04:00
|
|
|
sPreferredSampleRate = 44100;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-28 09:09:08 +04:00
|
|
|
/*static*/ cubeb* AudioStream::GetCubebContextUnlocked()
|
2012-04-16 07:00:12 +04:00
|
|
|
{
|
2013-11-28 09:09:08 +04:00
|
|
|
sMutex.AssertCurrentThreadOwns();
|
|
|
|
if (sCubebContext ||
|
|
|
|
cubeb_init(&sCubebContext, "AudioStream") == CUBEB_OK) {
|
|
|
|
return sCubebContext;
|
2012-04-16 07:00:12 +04:00
|
|
|
}
|
|
|
|
NS_WARNING("cubeb_init failed");
|
2012-07-30 18:20:58 +04:00
|
|
|
return nullptr;
|
2012-04-16 07:00:12 +04:00
|
|
|
}
|
2012-05-03 08:48:54 +04:00
|
|
|
|
2013-11-28 09:09:08 +04:00
|
|
|
/*static*/ uint32_t AudioStream::GetCubebLatency()
|
2012-05-03 08:48:54 +04:00
|
|
|
{
|
2013-11-28 09:09:08 +04:00
|
|
|
StaticMutexAutoLock lock(sMutex);
|
|
|
|
return sCubebLatency;
|
2012-05-03 08:48:54 +04:00
|
|
|
}
|
2013-10-17 17:44:52 +04:00
|
|
|
|
2013-11-28 09:09:08 +04:00
|
|
|
/*static*/ bool AudioStream::CubebLatencyPrefSet()
|
2013-10-17 17:44:52 +04:00
|
|
|
{
|
2013-11-28 09:09:08 +04:00
|
|
|
StaticMutexAutoLock lock(sMutex);
|
|
|
|
return sCubebLatencyPrefSet;
|
2013-10-17 17:44:52 +04:00
|
|
|
}
|
2012-01-13 01:20:36 +04:00
|
|
|
|
2013-11-28 09:09:08 +04:00
|
|
|
#if defined(__ANDROID__) && defined(MOZ_B2G)
|
2014-04-10 21:39:20 +04:00
|
|
|
static cubeb_stream_type ConvertChannelToCubebType(dom::AudioChannel aChannel)
|
2013-03-12 07:46:32 +04:00
|
|
|
{
|
2014-04-10 21:39:20 +04:00
|
|
|
switch(aChannel) {
|
|
|
|
case dom::AudioChannel::Normal:
|
2013-03-12 07:46:32 +04:00
|
|
|
return CUBEB_STREAM_TYPE_SYSTEM;
|
2014-04-10 21:39:20 +04:00
|
|
|
case dom::AudioChannel::Content:
|
2013-03-12 07:46:32 +04:00
|
|
|
return CUBEB_STREAM_TYPE_MUSIC;
|
2014-04-10 21:39:20 +04:00
|
|
|
case dom::AudioChannel::Notification:
|
2013-03-12 07:46:32 +04:00
|
|
|
return CUBEB_STREAM_TYPE_NOTIFICATION;
|
2014-04-10 21:39:20 +04:00
|
|
|
case dom::AudioChannel::Alarm:
|
2013-03-12 07:46:32 +04:00
|
|
|
return CUBEB_STREAM_TYPE_ALARM;
|
2014-04-10 21:39:20 +04:00
|
|
|
case dom::AudioChannel::Telephony:
|
2013-03-12 07:46:32 +04:00
|
|
|
return CUBEB_STREAM_TYPE_VOICE_CALL;
|
2014-04-10 21:39:20 +04:00
|
|
|
case dom::AudioChannel::Ringer:
|
2013-03-12 07:46:32 +04:00
|
|
|
return CUBEB_STREAM_TYPE_RING;
|
|
|
|
// Currently Android openSLES library doesn't support FORCE_AUDIBLE yet.
|
2014-04-10 21:39:20 +04:00
|
|
|
case dom::AudioChannel::Publicnotification:
|
2013-03-12 07:46:32 +04:00
|
|
|
default:
|
2014-04-10 21:39:20 +04:00
|
|
|
NS_ERROR("The value of AudioChannel is invalid");
|
2013-03-12 07:46:32 +04:00
|
|
|
return CUBEB_STREAM_TYPE_MAX;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-11-22 14:38:28 +04:00
|
|
|
AudioStream::AudioStream()
|
2013-11-28 09:09:08 +04:00
|
|
|
: mMonitor("AudioStream")
|
|
|
|
, mInRate(0)
|
|
|
|
, mOutRate(0)
|
|
|
|
, mChannels(0)
|
2013-09-26 23:06:59 +04:00
|
|
|
, mOutChannels(0)
|
2013-11-28 09:09:08 +04:00
|
|
|
, mWritten(0)
|
|
|
|
, mAudioClock(MOZ_THIS_IN_INITIALIZER_LIST())
|
|
|
|
, mLatencyRequest(HighLatency)
|
|
|
|
, mReadPoint(0)
|
|
|
|
, mLostFrames(0)
|
|
|
|
, mDumpFile(nullptr)
|
|
|
|
, mVolume(1.0)
|
|
|
|
, mBytesPerFrame(0)
|
|
|
|
, mState(INITIALIZED)
|
2014-04-09 23:59:07 +04:00
|
|
|
, mNeedsStart(false)
|
2013-11-28 09:09:08 +04:00
|
|
|
{
|
|
|
|
// keep a ref in case we shut down later than nsLayoutStatics
|
|
|
|
mLatencyLog = AsyncLatencyLogger::Get(true);
|
|
|
|
}
|
2013-11-28 09:09:08 +04:00
|
|
|
|
2013-11-28 09:09:08 +04:00
|
|
|
AudioStream::~AudioStream()
|
|
|
|
{
|
2014-04-09 23:59:07 +04:00
|
|
|
LOG(("AudioStream: delete %p, state %d", this, mState));
|
2013-11-28 09:09:08 +04:00
|
|
|
Shutdown();
|
|
|
|
if (mDumpFile) {
|
|
|
|
fclose(mDumpFile);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-13 22:08:10 +04:00
|
|
|
size_t
|
|
|
|
AudioStream::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
|
|
|
|
{
|
|
|
|
size_t amount = aMallocSizeOf(this);
|
|
|
|
|
|
|
|
// Possibly add in the future:
|
|
|
|
// - mTimeStretcher
|
|
|
|
// - mLatencyLog
|
|
|
|
// - mCubebStream
|
|
|
|
|
|
|
|
amount += mInserts.SizeOfExcludingThis(aMallocSizeOf);
|
|
|
|
amount += mBuffer.SizeOfExcludingThis(aMallocSizeOf);
|
|
|
|
|
|
|
|
return amount;
|
|
|
|
}
|
|
|
|
|
2013-11-28 09:09:08 +04:00
|
|
|
/*static*/ void AudioStream::InitLibrary()
|
2008-07-30 10:50:14 +04:00
|
|
|
{
|
|
|
|
#ifdef PR_LOGGING
|
2012-11-14 23:46:40 +04:00
|
|
|
gAudioStreamLog = PR_NewLogModule("AudioStream");
|
2008-07-30 10:50:14 +04:00
|
|
|
#endif
|
2012-07-30 18:20:58 +04:00
|
|
|
PrefChanged(PREF_VOLUME_SCALE, nullptr);
|
2012-01-13 01:20:36 +04:00
|
|
|
Preferences::RegisterCallback(PrefChanged, PREF_VOLUME_SCALE);
|
2012-07-30 18:20:58 +04:00
|
|
|
PrefChanged(PREF_CUBEB_LATENCY, nullptr);
|
2012-06-17 06:13:22 +04:00
|
|
|
Preferences::RegisterCallback(PrefChanged, PREF_CUBEB_LATENCY);
|
2008-07-30 10:50:14 +04:00
|
|
|
}
|
|
|
|
|
2013-11-28 09:09:08 +04:00
|
|
|
/*static*/ void AudioStream::ShutdownLibrary()
|
2008-07-30 10:50:14 +04:00
|
|
|
{
|
2012-01-13 01:20:36 +04:00
|
|
|
Preferences::UnregisterCallback(PrefChanged, PREF_VOLUME_SCALE);
|
2013-03-19 08:12:36 +04:00
|
|
|
Preferences::UnregisterCallback(PrefChanged, PREF_CUBEB_LATENCY);
|
2012-01-13 01:20:36 +04:00
|
|
|
|
2013-11-28 09:09:08 +04:00
|
|
|
StaticMutexAutoLock lock(sMutex);
|
|
|
|
if (sCubebContext) {
|
|
|
|
cubeb_destroy(sCubebContext);
|
|
|
|
sCubebContext = nullptr;
|
2012-01-13 01:20:36 +04:00
|
|
|
}
|
2013-11-29 06:50:16 +04:00
|
|
|
}
|
|
|
|
|
2013-11-28 09:09:08 +04:00
|
|
|
nsresult AudioStream::EnsureTimeStretcherInitialized()
|
2013-11-29 06:50:16 +04:00
|
|
|
{
|
2013-11-28 09:09:08 +04:00
|
|
|
MonitorAutoLock mon(mMonitor);
|
|
|
|
return EnsureTimeStretcherInitializedUnlocked();
|
2011-01-04 06:55:32 +03:00
|
|
|
}
|
|
|
|
|
2013-11-28 09:09:08 +04:00
|
|
|
nsresult AudioStream::EnsureTimeStretcherInitializedUnlocked()
|
2012-11-22 14:38:28 +04:00
|
|
|
{
|
2013-11-28 09:09:08 +04:00
|
|
|
mMonitor.AssertCurrentThreadOwns();
|
2012-11-28 20:25:57 +04:00
|
|
|
if (!mTimeStretcher) {
|
2012-11-29 18:40:57 +04:00
|
|
|
mTimeStretcher = new soundtouch::SoundTouch();
|
|
|
|
mTimeStretcher->setSampleRate(mInRate);
|
2013-09-26 23:06:59 +04:00
|
|
|
mTimeStretcher->setChannels(mOutChannels);
|
2012-11-29 18:40:57 +04:00
|
|
|
mTimeStretcher->setPitch(1.0);
|
2012-11-22 14:38:28 +04:00
|
|
|
}
|
2013-03-04 18:48:58 +04:00
|
|
|
return NS_OK;
|
2012-11-22 14:38:28 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
nsresult AudioStream::SetPlaybackRate(double aPlaybackRate)
|
|
|
|
{
|
|
|
|
NS_ASSERTION(aPlaybackRate > 0.0,
|
|
|
|
"Can't handle negative or null playbackrate in the AudioStream.");
|
|
|
|
// Avoid instantiating the resampler if we are not changing the playback rate.
|
|
|
|
if (aPlaybackRate == mAudioClock.GetPlaybackRate()) {
|
|
|
|
return NS_OK;
|
|
|
|
}
|
2013-03-04 18:48:58 +04:00
|
|
|
|
|
|
|
if (EnsureTimeStretcherInitialized() != NS_OK) {
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
|
2012-11-22 14:38:28 +04:00
|
|
|
mAudioClock.SetPlaybackRate(aPlaybackRate);
|
|
|
|
mOutRate = mInRate / aPlaybackRate;
|
2012-11-29 18:40:57 +04:00
|
|
|
|
2012-11-22 14:38:28 +04:00
|
|
|
if (mAudioClock.GetPreservesPitch()) {
|
|
|
|
mTimeStretcher->setTempo(aPlaybackRate);
|
|
|
|
mTimeStretcher->setRate(1.0f);
|
|
|
|
} else {
|
|
|
|
mTimeStretcher->setTempo(1.0f);
|
|
|
|
mTimeStretcher->setRate(aPlaybackRate);
|
|
|
|
}
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult AudioStream::SetPreservesPitch(bool aPreservesPitch)
|
|
|
|
{
|
|
|
|
// Avoid instantiating the timestretcher instance if not needed.
|
|
|
|
if (aPreservesPitch == mAudioClock.GetPreservesPitch()) {
|
|
|
|
return NS_OK;
|
|
|
|
}
|
2012-11-29 18:40:57 +04:00
|
|
|
|
2013-03-04 18:48:58 +04:00
|
|
|
if (EnsureTimeStretcherInitialized() != NS_OK) {
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
2012-11-29 18:40:57 +04:00
|
|
|
|
2012-11-22 14:38:28 +04:00
|
|
|
if (aPreservesPitch == true) {
|
|
|
|
mTimeStretcher->setTempo(mAudioClock.GetPlaybackRate());
|
|
|
|
mTimeStretcher->setRate(1.0f);
|
|
|
|
} else {
|
|
|
|
mTimeStretcher->setTempo(1.0f);
|
|
|
|
mTimeStretcher->setRate(mAudioClock.GetPlaybackRate());
|
|
|
|
}
|
|
|
|
|
|
|
|
mAudioClock.SetPreservesPitch(aPreservesPitch);
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2012-11-26 18:13:08 +04:00
|
|
|
int64_t AudioStream::GetWritten()
|
|
|
|
{
|
|
|
|
return mWritten;
|
|
|
|
}
|
|
|
|
|
2013-11-28 09:09:08 +04:00
|
|
|
/*static*/ int AudioStream::MaxNumberOfChannels()
|
2013-11-29 06:50:16 +04:00
|
|
|
{
|
2013-11-28 09:09:08 +04:00
|
|
|
cubeb* cubebContext = GetCubebContext();
|
2013-10-23 19:44:13 +04:00
|
|
|
uint32_t maxNumberOfChannels;
|
2013-11-28 09:09:08 +04:00
|
|
|
if (cubebContext &&
|
|
|
|
cubeb_get_max_channel_count(cubebContext,
|
2013-10-23 19:44:13 +04:00
|
|
|
&maxNumberOfChannels) == CUBEB_OK) {
|
|
|
|
return static_cast<int>(maxNumberOfChannels);
|
2013-06-10 21:32:28 +04:00
|
|
|
}
|
|
|
|
|
2013-10-23 19:44:13 +04:00
|
|
|
return 0;
|
2013-06-10 21:32:28 +04:00
|
|
|
}
|
|
|
|
|
2013-12-09 02:49:25 +04:00
|
|
|
/*static*/ int AudioStream::PreferredSampleRate()
|
2013-10-17 17:44:52 +04:00
|
|
|
{
|
2013-12-09 23:54:49 +04:00
|
|
|
MOZ_ASSERT(sPreferredSampleRate,
|
|
|
|
"sPreferredSampleRate has not been initialized!");
|
2013-12-09 02:49:25 +04:00
|
|
|
return sPreferredSampleRate;
|
2013-10-17 17:44:52 +04:00
|
|
|
}
|
|
|
|
|
2013-06-13 05:26:59 +04:00
|
|
|
static void SetUint16LE(uint8_t* aDest, uint16_t aValue)
|
2013-04-04 03:12:27 +04:00
|
|
|
{
|
|
|
|
aDest[0] = aValue & 0xFF;
|
|
|
|
aDest[1] = aValue >> 8;
|
|
|
|
}
|
|
|
|
|
2013-06-13 05:26:59 +04:00
|
|
|
static void SetUint32LE(uint8_t* aDest, uint32_t aValue)
|
2013-04-04 03:12:27 +04:00
|
|
|
{
|
|
|
|
SetUint16LE(aDest, aValue & 0xFFFF);
|
|
|
|
SetUint16LE(aDest + 2, aValue >> 16);
|
|
|
|
}
|
|
|
|
|
|
|
|
static FILE*
|
|
|
|
OpenDumpFile(AudioStream* aStream)
|
|
|
|
{
|
|
|
|
if (!getenv("MOZ_DUMP_AUDIO"))
|
|
|
|
return nullptr;
|
|
|
|
char buf[100];
|
|
|
|
sprintf(buf, "dumped-audio-%d.wav", gDumpedAudioCount);
|
|
|
|
FILE* f = fopen(buf, "wb");
|
|
|
|
if (!f)
|
|
|
|
return nullptr;
|
|
|
|
++gDumpedAudioCount;
|
|
|
|
|
2013-06-13 05:26:59 +04:00
|
|
|
uint8_t header[] = {
|
2013-04-04 03:12:27 +04:00
|
|
|
// RIFF header
|
|
|
|
0x52, 0x49, 0x46, 0x46, 0x00, 0x00, 0x00, 0x00, 0x57, 0x41, 0x56, 0x45,
|
|
|
|
// fmt chunk. We always write 16-bit samples.
|
|
|
|
0x66, 0x6d, 0x74, 0x20, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0xFF, 0xFF,
|
|
|
|
0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x10, 0x00,
|
|
|
|
// data chunk
|
|
|
|
0x64, 0x61, 0x74, 0x61, 0xFE, 0xFF, 0xFF, 0x7F
|
|
|
|
};
|
|
|
|
static const int CHANNEL_OFFSET = 22;
|
|
|
|
static const int SAMPLE_RATE_OFFSET = 24;
|
|
|
|
static const int BLOCK_ALIGN_OFFSET = 32;
|
|
|
|
SetUint16LE(header + CHANNEL_OFFSET, aStream->GetChannels());
|
|
|
|
SetUint32LE(header + SAMPLE_RATE_OFFSET, aStream->GetRate());
|
|
|
|
SetUint16LE(header + BLOCK_ALIGN_OFFSET, aStream->GetChannels()*2);
|
|
|
|
fwrite(header, sizeof(header), 1, f);
|
|
|
|
|
|
|
|
return f;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2013-06-13 05:26:59 +04:00
|
|
|
WriteDumpFile(FILE* aDumpFile, AudioStream* aStream, uint32_t aFrames,
|
2013-04-04 03:12:27 +04:00
|
|
|
void* aBuffer)
|
|
|
|
{
|
|
|
|
if (!aDumpFile)
|
|
|
|
return;
|
|
|
|
|
2013-09-26 23:06:59 +04:00
|
|
|
uint32_t samples = aStream->GetOutChannels()*aFrames;
|
2013-04-04 03:12:27 +04:00
|
|
|
if (AUDIO_OUTPUT_FORMAT == AUDIO_FORMAT_S16) {
|
|
|
|
fwrite(aBuffer, 2, samples, aDumpFile);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
NS_ASSERTION(AUDIO_OUTPUT_FORMAT == AUDIO_FORMAT_FLOAT32, "bad format");
|
2013-06-13 05:26:59 +04:00
|
|
|
nsAutoTArray<uint8_t, 1024*2> buf;
|
2013-04-04 03:12:27 +04:00
|
|
|
buf.SetLength(samples*2);
|
|
|
|
float* input = static_cast<float*>(aBuffer);
|
2013-06-13 05:26:59 +04:00
|
|
|
uint8_t* output = buf.Elements();
|
|
|
|
for (uint32_t i = 0; i < samples; ++i) {
|
|
|
|
SetUint16LE(output + i*2, int16_t(input[i]*32767.0f));
|
2013-04-04 03:12:27 +04:00
|
|
|
}
|
|
|
|
fwrite(output, 2, samples, aDumpFile);
|
|
|
|
fflush(aDumpFile);
|
|
|
|
}
|
|
|
|
|
2014-04-09 23:59:07 +04:00
|
|
|
// NOTE: this must not block a LowLatency stream for any significant amount
|
|
|
|
// of time, or it will block the entirety of MSG
|
2013-03-04 18:48:58 +04:00
|
|
|
nsresult
|
2013-11-28 09:09:08 +04:00
|
|
|
AudioStream::Init(int32_t aNumChannels, int32_t aRate,
|
2014-04-10 21:39:20 +04:00
|
|
|
const dom::AudioChannel aAudioChannel,
|
2013-11-28 09:09:08 +04:00
|
|
|
LatencyRequest aLatencyRequest)
|
2012-01-13 01:20:36 +04:00
|
|
|
{
|
2014-04-09 23:59:07 +04:00
|
|
|
if (!GetCubebContext() || aNumChannels < 0 || aRate < 0) {
|
2012-01-13 01:20:36 +04:00
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
|
2013-10-26 02:13:42 +04:00
|
|
|
PR_LOG(gAudioStreamLog, PR_LOG_DEBUG,
|
2014-04-09 23:59:07 +04:00
|
|
|
("%s channels: %d, rate: %d for %p", __FUNCTION__, aNumChannels, aRate, this));
|
2012-11-22 14:38:28 +04:00
|
|
|
mInRate = mOutRate = aRate;
|
2012-01-13 01:20:36 +04:00
|
|
|
mChannels = aNumChannels;
|
2013-09-26 23:06:59 +04:00
|
|
|
mOutChannels = (aNumChannels > 2) ? 2 : aNumChannels;
|
2013-11-19 01:43:15 +04:00
|
|
|
mLatencyRequest = aLatencyRequest;
|
2012-01-13 01:20:36 +04:00
|
|
|
|
2013-04-04 03:12:27 +04:00
|
|
|
mDumpFile = OpenDumpFile(this);
|
|
|
|
|
2012-01-13 01:20:36 +04:00
|
|
|
cubeb_stream_params params;
|
|
|
|
params.rate = aRate;
|
2013-09-26 23:06:59 +04:00
|
|
|
params.channels = mOutChannels;
|
2013-03-12 07:46:32 +04:00
|
|
|
#if defined(__ANDROID__)
|
2013-03-21 06:36:29 +04:00
|
|
|
#if defined(MOZ_B2G)
|
2014-04-10 21:39:20 +04:00
|
|
|
params.stream_type = ConvertChannelToCubebType(aAudioChannel);
|
2013-03-21 06:36:29 +04:00
|
|
|
#else
|
|
|
|
params.stream_type = CUBEB_STREAM_TYPE_MUSIC;
|
|
|
|
#endif
|
2013-03-12 07:46:32 +04:00
|
|
|
|
|
|
|
if (params.stream_type == CUBEB_STREAM_TYPE_MAX) {
|
|
|
|
return NS_ERROR_INVALID_ARG;
|
|
|
|
}
|
|
|
|
#endif
|
2012-10-25 14:09:40 +04:00
|
|
|
if (AUDIO_OUTPUT_FORMAT == AUDIO_FORMAT_S16) {
|
|
|
|
params.format = CUBEB_SAMPLE_S16NE;
|
2012-10-25 14:09:39 +04:00
|
|
|
} else {
|
|
|
|
params.format = CUBEB_SAMPLE_FLOAT32NE;
|
|
|
|
}
|
2013-09-26 23:06:59 +04:00
|
|
|
mBytesPerFrame = sizeof(AudioDataValue) * mOutChannels;
|
2012-01-13 01:20:36 +04:00
|
|
|
|
2012-11-22 14:38:28 +04:00
|
|
|
mAudioClock.Init();
|
|
|
|
|
2014-04-09 23:59:07 +04:00
|
|
|
// Size mBuffer for one second of audio. This value is arbitrary, and was
|
|
|
|
// selected based on the observed behaviour of the existing AudioStream
|
|
|
|
// implementations.
|
|
|
|
uint32_t bufferLimit = FramesToBytes(aRate);
|
|
|
|
NS_ABORT_IF_FALSE(bufferLimit % mBytesPerFrame == 0, "Must buffer complete frames");
|
|
|
|
mBuffer.SetCapacity(bufferLimit);
|
|
|
|
|
|
|
|
if (aLatencyRequest == LowLatency) {
|
|
|
|
// Don't block this thread to initialize a cubeb stream.
|
|
|
|
// When this is done, it will start callbacks from Cubeb. Those will
|
|
|
|
// cause us to move from INITIALIZED to RUNNING. Until then, we
|
|
|
|
// can't access any cubeb functions.
|
2014-04-17 00:39:16 +04:00
|
|
|
// Use a RefPtr to avoid leaks if Dispatch fails
|
|
|
|
RefPtr<AudioInitTask> init = new AudioInitTask(this, aLatencyRequest, params);
|
2014-04-09 23:59:07 +04:00
|
|
|
init->Dispatch();
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
// High latency - open synchronously
|
|
|
|
nsresult rv = OpenCubeb(params, aLatencyRequest);
|
|
|
|
// See if we need to start() the stream, since we must do that from this
|
|
|
|
// thread for now (cubeb API issue)
|
|
|
|
CheckForStart();
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
|
|
|
// This code used to live inside AudioStream::Init(), but on Mac (others?)
|
|
|
|
// it has been known to take 300-800 (or even 8500) ms to execute(!)
|
|
|
|
nsresult
|
|
|
|
AudioStream::OpenCubeb(cubeb_stream_params &aParams,
|
|
|
|
LatencyRequest aLatencyRequest)
|
|
|
|
{
|
|
|
|
cubeb* cubebContext = GetCubebContext();
|
|
|
|
if (!cubebContext) {
|
|
|
|
MonitorAutoLock mon(mMonitor);
|
|
|
|
mState = AudioStream::ERRORED;
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
|
2013-10-17 17:44:52 +04:00
|
|
|
// If the latency pref is set, use it. Otherwise, if this stream is intended
|
|
|
|
// for low latency playback, try to get the lowest latency possible.
|
|
|
|
// Otherwise, for normal streams, use 100ms.
|
|
|
|
uint32_t latency;
|
2013-11-28 09:09:08 +04:00
|
|
|
if (aLatencyRequest == LowLatency && !CubebLatencyPrefSet()) {
|
2014-04-09 23:59:07 +04:00
|
|
|
if (cubeb_get_min_latency(cubebContext, aParams, &latency) != CUBEB_OK) {
|
2013-10-17 17:44:52 +04:00
|
|
|
latency = GetCubebLatency();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
latency = GetCubebLatency();
|
|
|
|
}
|
|
|
|
|
2012-01-13 01:20:36 +04:00
|
|
|
{
|
|
|
|
cubeb_stream* stream;
|
2014-04-09 23:59:07 +04:00
|
|
|
if (cubeb_stream_init(cubebContext, &stream, "AudioStream", aParams,
|
2013-10-17 17:44:52 +04:00
|
|
|
latency, DataCallback_S, StateCallback_S, this) == CUBEB_OK) {
|
2014-04-09 23:59:07 +04:00
|
|
|
MonitorAutoLock mon(mMonitor);
|
2012-01-13 01:20:36 +04:00
|
|
|
mCubebStream.own(stream);
|
2014-04-09 23:59:07 +04:00
|
|
|
// Make sure we weren't shut down while in flight!
|
|
|
|
if (mState == SHUTDOWN) {
|
|
|
|
mCubebStream.reset();
|
|
|
|
LOG(("AudioStream::OpenCubeb() %p Shutdown while opening cubeb", this));
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
2012-01-13 01:20:36 +04:00
|
|
|
|
2014-04-09 23:59:07 +04:00
|
|
|
// We can't cubeb_stream_start() the thread from a transient thread due to
|
|
|
|
// cubeb API requirements (init can be called from another thread, but
|
|
|
|
// not start/stop/destroy/etc)
|
|
|
|
} else {
|
|
|
|
MonitorAutoLock mon(mMonitor);
|
|
|
|
mState = ERRORED;
|
|
|
|
LOG(("AudioStream::OpenCubeb() %p failed to init cubeb", this));
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
2013-11-19 01:43:15 +04:00
|
|
|
}
|
|
|
|
|
2012-01-13 01:20:36 +04:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2014-04-09 23:59:07 +04:00
|
|
|
AudioStream::CheckForStart()
|
2012-01-13 01:20:36 +04:00
|
|
|
{
|
2014-04-09 23:59:07 +04:00
|
|
|
if (mState == INITIALIZED) {
|
|
|
|
// Start the stream right away when low latency has been requested. This means
|
|
|
|
// that the DataCallback will feed silence to cubeb, until the first frames
|
|
|
|
// are written to this AudioStream. Also start if a start has been queued.
|
|
|
|
if (mLatencyRequest == LowLatency || mNeedsStart) {
|
|
|
|
StartUnlocked(); // mState = STARTED or ERRORED
|
|
|
|
mNeedsStart = false;
|
|
|
|
PR_LOG(gAudioStreamLog, PR_LOG_WARNING,
|
|
|
|
("Started waiting %s-latency stream",
|
|
|
|
mLatencyRequest == LowLatency ? "low" : "high"));
|
|
|
|
} else {
|
|
|
|
// high latency, not full - OR Pause() was called before we got here
|
|
|
|
PR_LOG(gAudioStreamLog, PR_LOG_DEBUG,
|
|
|
|
("Not starting waiting %s-latency stream",
|
|
|
|
mLatencyRequest == LowLatency ? "low" : "high"));
|
|
|
|
}
|
2012-06-01 08:45:01 +04:00
|
|
|
}
|
2014-04-09 23:59:07 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
NS_IMETHODIMP
|
|
|
|
AudioInitTask::Run()
|
|
|
|
{
|
2014-04-17 23:13:44 +04:00
|
|
|
MOZ_ASSERT(mThread);
|
2014-04-09 23:59:07 +04:00
|
|
|
if (NS_IsMainThread()) {
|
|
|
|
mThread->Shutdown(); // can't Shutdown from the thread itself, darn
|
2014-04-22 23:32:13 +04:00
|
|
|
// Don't null out mThread!
|
|
|
|
// See bug 999104. We must hold a ref to the thread across Dispatch()
|
|
|
|
// since the internal mThread ref could be released while processing
|
|
|
|
// the Dispatch(), and Dispatch/PutEvent itself doesn't hold a ref; it
|
|
|
|
// assumes the caller does.
|
2014-04-09 23:59:07 +04:00
|
|
|
return NS_OK;
|
2012-01-13 01:20:36 +04:00
|
|
|
}
|
2014-04-09 23:59:07 +04:00
|
|
|
|
|
|
|
nsresult rv = mAudioStream->OpenCubeb(mParams, mLatencyRequest);
|
|
|
|
|
|
|
|
// and now kill this thread
|
|
|
|
NS_DispatchToMainThread(this);
|
|
|
|
return rv;
|
2012-01-13 01:20:36 +04:00
|
|
|
}
|
|
|
|
|
2013-10-26 02:13:42 +04:00
|
|
|
// aTime is the time in ms the samples were inserted into MediaStreamGraph
|
2012-01-13 01:20:36 +04:00
|
|
|
nsresult
|
2013-11-28 09:09:08 +04:00
|
|
|
AudioStream::Write(const AudioDataValue* aBuf, uint32_t aFrames, TimeStamp *aTime)
|
2012-01-13 01:20:36 +04:00
|
|
|
{
|
|
|
|
MonitorAutoLock mon(mMonitor);
|
2014-04-09 23:59:07 +04:00
|
|
|
if (mState == ERRORED) {
|
2012-01-13 01:20:36 +04:00
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
2014-04-09 23:59:07 +04:00
|
|
|
NS_ASSERTION(mState == INITIALIZED || mState == STARTED || mState == RUNNING,
|
2012-11-22 14:38:28 +04:00
|
|
|
"Stream write in unexpected state.");
|
2012-01-13 01:20:36 +04:00
|
|
|
|
2014-04-09 23:59:07 +04:00
|
|
|
// See if we need to start() the stream, since we must do that from this thread
|
|
|
|
CheckForStart();
|
|
|
|
|
2013-09-26 23:06:59 +04:00
|
|
|
// Downmix to Stereo.
|
|
|
|
if (mChannels > 2 && mChannels <= 8) {
|
|
|
|
DownmixAudioToStereo(const_cast<AudioDataValue*> (aBuf), mChannels, aFrames);
|
|
|
|
}
|
|
|
|
else if (mChannels > 8) {
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
|
2012-10-25 14:10:51 +04:00
|
|
|
const uint8_t* src = reinterpret_cast<const uint8_t*>(aBuf);
|
2012-11-22 14:38:28 +04:00
|
|
|
uint32_t bytesToCopy = FramesToBytes(aFrames);
|
2012-01-13 01:20:36 +04:00
|
|
|
|
2013-10-26 02:13:42 +04:00
|
|
|
// XXX this will need to change if we want to enable this on-the-fly!
|
|
|
|
if (PR_LOG_TEST(GetLatencyLog(), PR_LOG_DEBUG)) {
|
|
|
|
// Record the position and time this data was inserted
|
|
|
|
int64_t timeMs;
|
|
|
|
if (aTime && !aTime->IsNull()) {
|
|
|
|
if (mStartTime.IsNull()) {
|
|
|
|
AsyncLatencyLogger::Get(true)->GetStartTime(mStartTime);
|
|
|
|
}
|
|
|
|
timeMs = (*aTime - mStartTime).ToMilliseconds();
|
|
|
|
} else {
|
|
|
|
timeMs = 0;
|
|
|
|
}
|
|
|
|
struct Inserts insert = { timeMs, aFrames};
|
|
|
|
mInserts.AppendElement(insert);
|
|
|
|
}
|
|
|
|
|
2012-01-13 01:20:36 +04:00
|
|
|
while (bytesToCopy > 0) {
|
2013-01-15 16:22:03 +04:00
|
|
|
uint32_t available = std::min(bytesToCopy, mBuffer.Available());
|
2012-11-22 14:38:28 +04:00
|
|
|
NS_ABORT_IF_FALSE(available % mBytesPerFrame == 0,
|
|
|
|
"Must copy complete frames.");
|
2012-01-13 01:20:36 +04:00
|
|
|
|
|
|
|
mBuffer.AppendElements(src, available);
|
|
|
|
src += available;
|
|
|
|
bytesToCopy -= available;
|
|
|
|
|
2012-11-26 18:13:08 +04:00
|
|
|
if (bytesToCopy > 0) {
|
2014-04-09 23:59:07 +04:00
|
|
|
// Careful - the CubebInit thread may not have gotten to STARTED yet
|
|
|
|
if ((mState == INITIALIZED || mState == STARTED) && mLatencyRequest == LowLatency) {
|
|
|
|
// don't ever block MediaStreamGraph low-latency streams
|
|
|
|
uint32_t remains = 0; // we presume the buffer is full
|
|
|
|
if (mBuffer.Length() > bytesToCopy) {
|
|
|
|
remains = mBuffer.Length() - bytesToCopy; // Free up just enough space
|
2013-01-23 09:53:10 +04:00
|
|
|
}
|
2014-04-09 23:59:07 +04:00
|
|
|
// account for dropping samples
|
|
|
|
PR_LOG(gAudioStreamLog, PR_LOG_WARNING, ("Stream %p dropping %u bytes (%u frames)in Write()",
|
|
|
|
this, mBuffer.Length() - remains, BytesToFrames(mBuffer.Length() - remains)));
|
|
|
|
mReadPoint += BytesToFrames(mBuffer.Length() - remains);
|
|
|
|
mBuffer.ContractTo(remains);
|
|
|
|
} else { // RUNNING or high latency
|
|
|
|
// If we are not playing, but our buffer is full, start playing to make
|
|
|
|
// room for soon-to-be-decoded data.
|
|
|
|
if (mState != STARTED && mState != RUNNING) {
|
|
|
|
PR_LOG(gAudioStreamLog, PR_LOG_WARNING, ("Starting stream %p in Write (%u waiting)",
|
|
|
|
this, bytesToCopy));
|
|
|
|
StartUnlocked();
|
|
|
|
if (mState == ERRORED) {
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
PR_LOG(gAudioStreamLog, PR_LOG_WARNING, ("Stream %p waiting in Write() (%u waiting)",
|
|
|
|
this, bytesToCopy));
|
|
|
|
mon.Wait();
|
2012-06-01 08:45:01 +04:00
|
|
|
}
|
|
|
|
}
|
2012-01-13 01:20:36 +04:00
|
|
|
}
|
|
|
|
|
2012-11-26 18:13:08 +04:00
|
|
|
mWritten += aFrames;
|
2012-01-13 01:20:36 +04:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2012-08-22 19:56:38 +04:00
|
|
|
uint32_t
|
2013-11-28 09:09:08 +04:00
|
|
|
AudioStream::Available()
|
2012-01-13 01:20:36 +04:00
|
|
|
{
|
|
|
|
MonitorAutoLock mon(mMonitor);
|
|
|
|
NS_ABORT_IF_FALSE(mBuffer.Length() % mBytesPerFrame == 0, "Buffer invariant violated.");
|
2012-11-22 14:38:28 +04:00
|
|
|
return BytesToFrames(mBuffer.Available());
|
2012-01-13 01:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2013-11-28 09:09:08 +04:00
|
|
|
AudioStream::SetVolume(double aVolume)
|
2012-01-13 01:20:36 +04:00
|
|
|
{
|
|
|
|
MonitorAutoLock mon(mMonitor);
|
|
|
|
NS_ABORT_IF_FALSE(aVolume >= 0.0 && aVolume <= 1.0, "Invalid volume");
|
|
|
|
mVolume = aVolume;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2013-11-28 09:09:08 +04:00
|
|
|
AudioStream::Drain()
|
2012-01-13 01:20:36 +04:00
|
|
|
{
|
|
|
|
MonitorAutoLock mon(mMonitor);
|
2014-04-09 23:59:07 +04:00
|
|
|
LOG(("AudioStream::Drain() for %p, state %d, avail %u", this, mState, mBuffer.Available()));
|
|
|
|
if (mState != STARTED && mState != RUNNING) {
|
|
|
|
NS_ASSERTION(mState == ERRORED || mBuffer.Available() == 0, "Draining without full buffer of unplayed audio");
|
2012-01-13 01:20:36 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
mState = DRAINING;
|
2012-04-16 07:00:40 +04:00
|
|
|
while (mState == DRAINING) {
|
2012-01-13 01:20:36 +04:00
|
|
|
mon.Wait();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-23 09:53:10 +04:00
|
|
|
void
|
2013-11-28 09:09:08 +04:00
|
|
|
AudioStream::Start()
|
2012-11-26 18:13:08 +04:00
|
|
|
{
|
2013-01-23 09:53:10 +04:00
|
|
|
MonitorAutoLock mon(mMonitor);
|
|
|
|
StartUnlocked();
|
2012-11-26 18:13:08 +04:00
|
|
|
}
|
|
|
|
|
2013-01-23 09:53:10 +04:00
|
|
|
void
|
2013-11-28 09:09:08 +04:00
|
|
|
AudioStream::StartUnlocked()
|
2012-11-26 18:13:08 +04:00
|
|
|
{
|
2013-01-23 09:53:10 +04:00
|
|
|
mMonitor.AssertCurrentThreadOwns();
|
2014-04-09 23:59:07 +04:00
|
|
|
if (!mCubebStream) {
|
|
|
|
mNeedsStart = true;
|
2013-01-23 09:53:10 +04:00
|
|
|
return;
|
|
|
|
}
|
2014-04-09 23:59:07 +04:00
|
|
|
MonitorAutoUnlock mon(mMonitor);
|
|
|
|
if (mState == INITIALIZED) {
|
|
|
|
int r = cubeb_stream_start(mCubebStream);
|
|
|
|
mState = r == CUBEB_OK ? STARTED : ERRORED;
|
|
|
|
LOG(("AudioStream: started %p, state %s", this, mState == STARTED ? "STARTED" : "ERRORED"));
|
2013-01-23 09:53:10 +04:00
|
|
|
}
|
2012-11-26 18:13:08 +04:00
|
|
|
}
|
|
|
|
|
2012-01-13 01:20:36 +04:00
|
|
|
void
|
2013-11-28 09:09:08 +04:00
|
|
|
AudioStream::Pause()
|
2012-01-13 01:20:36 +04:00
|
|
|
{
|
|
|
|
MonitorAutoLock mon(mMonitor);
|
2014-04-09 23:59:07 +04:00
|
|
|
if (!mCubebStream || (mState != STARTED && mState != RUNNING)) {
|
|
|
|
mNeedsStart = false;
|
|
|
|
mState = STOPPED; // which also tells async OpenCubeb not to start, just init
|
2012-01-13 01:20:36 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-06-01 08:45:01 +04:00
|
|
|
int r;
|
|
|
|
{
|
|
|
|
MonitorAutoUnlock mon(mMonitor);
|
|
|
|
r = cubeb_stream_stop(mCubebStream);
|
|
|
|
}
|
|
|
|
if (mState != ERRORED && r == CUBEB_OK) {
|
2012-01-13 01:20:36 +04:00
|
|
|
mState = STOPPED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2013-11-28 09:09:08 +04:00
|
|
|
AudioStream::Resume()
|
2012-01-13 01:20:36 +04:00
|
|
|
{
|
|
|
|
MonitorAutoLock mon(mMonitor);
|
|
|
|
if (!mCubebStream || mState != STOPPED) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-06-01 08:45:01 +04:00
|
|
|
int r;
|
|
|
|
{
|
|
|
|
MonitorAutoUnlock mon(mMonitor);
|
|
|
|
r = cubeb_stream_start(mCubebStream);
|
|
|
|
}
|
|
|
|
if (mState != ERRORED && r == CUBEB_OK) {
|
2012-01-13 01:20:36 +04:00
|
|
|
mState = STARTED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-09 23:59:07 +04:00
|
|
|
void
|
|
|
|
AudioStream::Shutdown()
|
|
|
|
{
|
|
|
|
LOG(("AudioStream: Shutdown %p, state %d", this, mState));
|
|
|
|
{
|
|
|
|
MonitorAutoLock mon(mMonitor);
|
|
|
|
if (mState == STARTED || mState == RUNNING) {
|
|
|
|
MonitorAutoUnlock mon(mMonitor);
|
|
|
|
Pause();
|
|
|
|
}
|
|
|
|
MOZ_ASSERT(mState != STARTED && mState != RUNNING); // paranoia
|
|
|
|
mState = SHUTDOWN;
|
|
|
|
}
|
|
|
|
// Must not try to shut down cubeb from within the lock! wasapi may still
|
|
|
|
// call our callback after Pause()/stop()!?! Bug 996162
|
|
|
|
if (mCubebStream) {
|
|
|
|
mCubebStream.reset();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-22 19:56:38 +04:00
|
|
|
int64_t
|
2013-11-28 09:09:08 +04:00
|
|
|
AudioStream::GetPosition()
|
2012-01-13 01:20:36 +04:00
|
|
|
{
|
2012-11-22 14:38:28 +04:00
|
|
|
return mAudioClock.GetPosition();
|
2012-01-13 01:20:36 +04:00
|
|
|
}
|
|
|
|
|
2012-07-17 01:21:04 +04:00
|
|
|
// This function is miscompiled by PGO with MSVC 2010. See bug 768333.
|
|
|
|
#ifdef _MSC_VER
|
|
|
|
#pragma optimize("", off)
|
|
|
|
#endif
|
2012-08-22 19:56:38 +04:00
|
|
|
int64_t
|
2013-11-28 09:09:08 +04:00
|
|
|
AudioStream::GetPositionInFrames()
|
2012-01-13 01:20:36 +04:00
|
|
|
{
|
2012-11-22 14:38:28 +04:00
|
|
|
return mAudioClock.GetPositionInFrames();
|
2012-01-13 01:20:36 +04:00
|
|
|
}
|
2012-07-17 01:21:04 +04:00
|
|
|
#ifdef _MSC_VER
|
|
|
|
#pragma optimize("", on)
|
|
|
|
#endif
|
2012-01-13 01:20:36 +04:00
|
|
|
|
2012-11-22 14:38:28 +04:00
|
|
|
int64_t
|
2013-11-28 09:09:08 +04:00
|
|
|
AudioStream::GetPositionInFramesInternal()
|
2012-11-22 14:38:28 +04:00
|
|
|
{
|
|
|
|
MonitorAutoLock mon(mMonitor);
|
|
|
|
return GetPositionInFramesUnlocked();
|
|
|
|
}
|
|
|
|
|
2012-08-22 19:56:38 +04:00
|
|
|
int64_t
|
2013-11-28 09:09:08 +04:00
|
|
|
AudioStream::GetPositionInFramesUnlocked()
|
2012-01-13 01:20:36 +04:00
|
|
|
{
|
|
|
|
mMonitor.AssertCurrentThreadOwns();
|
|
|
|
|
2012-04-16 07:00:40 +04:00
|
|
|
if (!mCubebStream || mState == ERRORED) {
|
2012-01-13 01:20:36 +04:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t position = 0;
|
2012-06-01 08:45:01 +04:00
|
|
|
{
|
|
|
|
MonitorAutoUnlock mon(mMonitor);
|
|
|
|
if (cubeb_stream_get_position(mCubebStream, &position) != CUBEB_OK) {
|
|
|
|
return -1;
|
|
|
|
}
|
2012-01-13 01:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Adjust the reported position by the number of silent frames written
|
|
|
|
// during stream underruns.
|
2012-08-22 19:56:38 +04:00
|
|
|
uint64_t adjustedPosition = 0;
|
2012-01-13 01:20:36 +04:00
|
|
|
if (position >= mLostFrames) {
|
|
|
|
adjustedPosition = position - mLostFrames;
|
|
|
|
}
|
2013-01-15 16:22:03 +04:00
|
|
|
return std::min<uint64_t>(adjustedPosition, INT64_MAX);
|
2012-01-13 01:20:36 +04:00
|
|
|
}
|
|
|
|
|
2013-09-17 10:39:30 +04:00
|
|
|
int64_t
|
2013-11-28 09:09:08 +04:00
|
|
|
AudioStream::GetLatencyInFrames()
|
2013-09-17 10:39:30 +04:00
|
|
|
{
|
|
|
|
uint32_t latency;
|
2013-11-28 09:09:08 +04:00
|
|
|
if (cubeb_stream_get_latency(mCubebStream, &latency)) {
|
2013-09-17 10:39:30 +04:00
|
|
|
NS_WARNING("Could not get cubeb latency.");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return static_cast<int64_t>(latency);
|
|
|
|
}
|
|
|
|
|
2012-01-13 01:20:36 +04:00
|
|
|
bool
|
2013-11-28 09:09:08 +04:00
|
|
|
AudioStream::IsPaused()
|
2012-01-13 01:20:36 +04:00
|
|
|
{
|
|
|
|
MonitorAutoLock mon(mMonitor);
|
|
|
|
return mState == STOPPED;
|
|
|
|
}
|
|
|
|
|
2013-10-26 02:13:42 +04:00
|
|
|
void
|
2013-11-28 09:09:08 +04:00
|
|
|
AudioStream::GetBufferInsertTime(int64_t &aTimeMs)
|
2013-10-26 02:13:42 +04:00
|
|
|
{
|
|
|
|
if (mInserts.Length() > 0) {
|
|
|
|
// Find the right block, but don't leave the array empty
|
|
|
|
while (mInserts.Length() > 1 && mReadPoint >= mInserts[0].mFrames) {
|
|
|
|
mReadPoint -= mInserts[0].mFrames;
|
|
|
|
mInserts.RemoveElementAt(0);
|
|
|
|
}
|
|
|
|
// offset for amount already read
|
|
|
|
// XXX Note: could misreport if we couldn't find a block in the right timeframe
|
|
|
|
aTimeMs = mInserts[0].mTimeMs + ((mReadPoint * 1000) / mOutRate);
|
|
|
|
} else {
|
|
|
|
aTimeMs = INT64_MAX;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-11-22 14:38:28 +04:00
|
|
|
long
|
2013-11-28 09:09:08 +04:00
|
|
|
AudioStream::GetUnprocessed(void* aBuffer, long aFrames, int64_t &aTimeMs)
|
2012-11-22 14:38:28 +04:00
|
|
|
{
|
|
|
|
uint8_t* wpos = reinterpret_cast<uint8_t*>(aBuffer);
|
|
|
|
|
|
|
|
// Flush the timestretcher pipeline, if we were playing using a playback rate
|
|
|
|
// other than 1.0.
|
|
|
|
uint32_t flushedFrames = 0;
|
|
|
|
if (mTimeStretcher && mTimeStretcher->numSamples()) {
|
|
|
|
flushedFrames = mTimeStretcher->receiveSamples(reinterpret_cast<AudioDataValue*>(wpos), aFrames);
|
|
|
|
wpos += FramesToBytes(flushedFrames);
|
|
|
|
}
|
|
|
|
uint32_t toPopBytes = FramesToBytes(aFrames - flushedFrames);
|
2013-01-15 16:22:03 +04:00
|
|
|
uint32_t available = std::min(toPopBytes, mBuffer.Length());
|
2012-11-22 14:38:28 +04:00
|
|
|
|
|
|
|
void* input[2];
|
|
|
|
uint32_t input_size[2];
|
|
|
|
mBuffer.PopElements(available, &input[0], &input_size[0], &input[1], &input_size[1]);
|
|
|
|
memcpy(wpos, input[0], input_size[0]);
|
|
|
|
wpos += input_size[0];
|
|
|
|
memcpy(wpos, input[1], input_size[1]);
|
2013-10-26 02:13:42 +04:00
|
|
|
|
|
|
|
// First time block now has our first returned sample
|
|
|
|
mReadPoint += BytesToFrames(available);
|
|
|
|
GetBufferInsertTime(aTimeMs);
|
|
|
|
|
2012-11-22 14:38:28 +04:00
|
|
|
return BytesToFrames(available) + flushedFrames;
|
|
|
|
}
|
|
|
|
|
2013-11-19 01:43:15 +04:00
|
|
|
// Get unprocessed samples, and pad the beginning of the buffer with silence if
|
|
|
|
// there is not enough data.
|
|
|
|
long
|
2013-11-28 09:09:08 +04:00
|
|
|
AudioStream::GetUnprocessedWithSilencePadding(void* aBuffer, long aFrames, int64_t& aTimeMs)
|
2013-11-19 01:43:15 +04:00
|
|
|
{
|
|
|
|
uint32_t toPopBytes = FramesToBytes(aFrames);
|
|
|
|
uint32_t available = std::min(toPopBytes, mBuffer.Length());
|
|
|
|
uint32_t silenceOffset = toPopBytes - available;
|
|
|
|
|
|
|
|
uint8_t* wpos = reinterpret_cast<uint8_t*>(aBuffer);
|
|
|
|
|
|
|
|
memset(wpos, 0, silenceOffset);
|
|
|
|
wpos += silenceOffset;
|
|
|
|
|
|
|
|
void* input[2];
|
|
|
|
uint32_t input_size[2];
|
|
|
|
mBuffer.PopElements(available, &input[0], &input_size[0], &input[1], &input_size[1]);
|
|
|
|
memcpy(wpos, input[0], input_size[0]);
|
|
|
|
wpos += input_size[0];
|
|
|
|
memcpy(wpos, input[1], input_size[1]);
|
|
|
|
|
|
|
|
GetBufferInsertTime(aTimeMs);
|
|
|
|
|
|
|
|
return aFrames;
|
|
|
|
}
|
|
|
|
|
2012-11-22 14:38:28 +04:00
|
|
|
long
|
2013-11-28 09:09:08 +04:00
|
|
|
AudioStream::GetTimeStretched(void* aBuffer, long aFrames, int64_t &aTimeMs)
|
2012-11-22 14:38:28 +04:00
|
|
|
{
|
|
|
|
long processedFrames = 0;
|
2012-11-29 18:40:57 +04:00
|
|
|
|
2013-01-18 17:21:47 +04:00
|
|
|
// We need to call the non-locking version, because we already have the lock.
|
2013-11-28 09:09:08 +04:00
|
|
|
if (EnsureTimeStretcherInitializedUnlocked() != NS_OK) {
|
2013-03-04 18:48:58 +04:00
|
|
|
return 0;
|
|
|
|
}
|
2012-11-29 18:40:57 +04:00
|
|
|
|
2012-11-22 14:38:28 +04:00
|
|
|
uint8_t* wpos = reinterpret_cast<uint8_t*>(aBuffer);
|
|
|
|
double playbackRate = static_cast<double>(mInRate) / mOutRate;
|
|
|
|
uint32_t toPopBytes = FramesToBytes(ceil(aFrames / playbackRate));
|
|
|
|
uint32_t available = 0;
|
|
|
|
bool lowOnBufferedData = false;
|
|
|
|
do {
|
|
|
|
// Check if we already have enough data in the time stretcher pipeline.
|
|
|
|
if (mTimeStretcher->numSamples() <= static_cast<uint32_t>(aFrames)) {
|
|
|
|
void* input[2];
|
|
|
|
uint32_t input_size[2];
|
2013-01-15 16:22:03 +04:00
|
|
|
available = std::min(mBuffer.Length(), toPopBytes);
|
2012-11-22 14:38:28 +04:00
|
|
|
if (available != toPopBytes) {
|
|
|
|
lowOnBufferedData = true;
|
|
|
|
}
|
|
|
|
mBuffer.PopElements(available, &input[0], &input_size[0],
|
|
|
|
&input[1], &input_size[1]);
|
2013-10-26 02:13:42 +04:00
|
|
|
mReadPoint += BytesToFrames(available);
|
2012-11-22 14:38:28 +04:00
|
|
|
for(uint32_t i = 0; i < 2; i++) {
|
|
|
|
mTimeStretcher->putSamples(reinterpret_cast<AudioDataValue*>(input[i]), BytesToFrames(input_size[i]));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
uint32_t receivedFrames = mTimeStretcher->receiveSamples(reinterpret_cast<AudioDataValue*>(wpos), aFrames - processedFrames);
|
|
|
|
wpos += FramesToBytes(receivedFrames);
|
|
|
|
processedFrames += receivedFrames;
|
|
|
|
} while (processedFrames < aFrames && !lowOnBufferedData);
|
|
|
|
|
2013-10-26 02:13:42 +04:00
|
|
|
GetBufferInsertTime(aTimeMs);
|
|
|
|
|
2012-11-22 14:38:28 +04:00
|
|
|
return processedFrames;
|
|
|
|
}
|
|
|
|
|
2012-01-13 01:20:36 +04:00
|
|
|
long
|
2013-11-28 09:09:08 +04:00
|
|
|
AudioStream::DataCallback(void* aBuffer, long aFrames)
|
2012-01-13 01:20:36 +04:00
|
|
|
{
|
|
|
|
MonitorAutoLock mon(mMonitor);
|
2013-01-15 16:22:03 +04:00
|
|
|
uint32_t available = std::min(static_cast<uint32_t>(FramesToBytes(aFrames)), mBuffer.Length());
|
2012-01-13 01:20:36 +04:00
|
|
|
NS_ABORT_IF_FALSE(available % mBytesPerFrame == 0, "Must copy complete frames");
|
2013-10-26 02:13:42 +04:00
|
|
|
AudioDataValue* output = reinterpret_cast<AudioDataValue*>(aBuffer);
|
2012-11-22 14:38:28 +04:00
|
|
|
uint32_t underrunFrames = 0;
|
|
|
|
uint32_t servicedFrames = 0;
|
2013-10-26 02:13:42 +04:00
|
|
|
int64_t insertTime;
|
2012-01-13 01:20:36 +04:00
|
|
|
|
2014-04-09 23:59:07 +04:00
|
|
|
// NOTE: wasapi (others?) can call us back *after* stop()/Shutdown() (mState == SHUTDOWN)
|
|
|
|
// Bug 996162
|
|
|
|
|
|
|
|
// callback tells us cubeb succeeded initializing
|
|
|
|
if (mState == STARTED) {
|
|
|
|
// For low-latency streams, we want to minimize any built-up data when
|
|
|
|
// we start getting callbacks.
|
|
|
|
// Simple version - contract on first callback only.
|
|
|
|
if (mLatencyRequest == LowLatency) {
|
|
|
|
#ifdef PR_LOGGING
|
|
|
|
uint32_t old_len = mBuffer.Length();
|
|
|
|
#endif
|
|
|
|
available = mBuffer.ContractTo(FramesToBytes(aFrames));
|
|
|
|
#ifdef PR_LOGGING
|
|
|
|
TimeStamp now = TimeStamp::Now();
|
|
|
|
if (!mStartTime.IsNull()) {
|
|
|
|
int64_t timeMs = (now - mStartTime).ToMilliseconds();
|
|
|
|
PR_LOG(gAudioStreamLog, PR_LOG_WARNING,
|
|
|
|
("Stream took %lldms to start after first Write() @ %u", timeMs, mOutRate));
|
|
|
|
} else {
|
|
|
|
PR_LOG(gAudioStreamLog, PR_LOG_WARNING,
|
|
|
|
("Stream started before Write() @ %u", mOutRate));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (old_len != available) {
|
|
|
|
// Note that we may have dropped samples in Write() as well!
|
|
|
|
PR_LOG(gAudioStreamLog, PR_LOG_WARNING,
|
|
|
|
("AudioStream %p dropped %u + %u initial frames @ %u", this,
|
|
|
|
mReadPoint, BytesToFrames(old_len - available), mOutRate));
|
|
|
|
mReadPoint += BytesToFrames(old_len - available);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
mState = RUNNING;
|
|
|
|
}
|
|
|
|
|
2012-11-22 14:38:28 +04:00
|
|
|
if (available) {
|
2013-11-19 01:43:15 +04:00
|
|
|
// When we are playing a low latency stream, and it is the first time we are
|
|
|
|
// getting data from the buffer, we prefer to add the silence for an
|
|
|
|
// underrun at the beginning of the buffer, so the first buffer is not cut
|
|
|
|
// in half by the silence inserted to compensate for the underrun.
|
2012-11-22 14:38:28 +04:00
|
|
|
if (mInRate == mOutRate) {
|
2013-11-28 09:09:08 +04:00
|
|
|
if (mLatencyRequest == LowLatency && !mWritten) {
|
2013-11-19 01:43:15 +04:00
|
|
|
servicedFrames = GetUnprocessedWithSilencePadding(output, aFrames, insertTime);
|
|
|
|
} else {
|
|
|
|
servicedFrames = GetUnprocessed(output, aFrames, insertTime);
|
|
|
|
}
|
2012-11-22 14:38:28 +04:00
|
|
|
} else {
|
2013-10-26 02:13:42 +04:00
|
|
|
servicedFrames = GetTimeStretched(output, aFrames, insertTime);
|
2012-11-22 14:38:28 +04:00
|
|
|
}
|
2012-10-25 14:09:40 +04:00
|
|
|
float scaled_volume = float(GetVolumeScale() * mVolume);
|
2012-01-13 01:20:36 +04:00
|
|
|
|
2013-09-26 23:06:59 +04:00
|
|
|
ScaleAudioSamples(output, aFrames * mOutChannels, scaled_volume);
|
2012-05-03 08:48:54 +04:00
|
|
|
|
|
|
|
NS_ABORT_IF_FALSE(mBuffer.Length() % mBytesPerFrame == 0, "Must copy complete frames");
|
2012-01-13 01:20:36 +04:00
|
|
|
|
2012-05-03 08:48:54 +04:00
|
|
|
// Notify any blocked Write() call that more space is available in mBuffer.
|
|
|
|
mon.NotifyAll();
|
2013-10-26 02:13:42 +04:00
|
|
|
} else {
|
|
|
|
GetBufferInsertTime(insertTime);
|
2012-05-03 08:48:54 +04:00
|
|
|
}
|
2012-01-13 01:20:36 +04:00
|
|
|
|
2012-11-22 14:38:28 +04:00
|
|
|
underrunFrames = aFrames - servicedFrames;
|
|
|
|
|
2012-01-13 01:20:36 +04:00
|
|
|
if (mState != DRAINING) {
|
2012-11-22 14:38:28 +04:00
|
|
|
uint8_t* rpos = static_cast<uint8_t*>(aBuffer) + FramesToBytes(aFrames - underrunFrames);
|
|
|
|
memset(rpos, 0, FramesToBytes(underrunFrames));
|
2013-04-04 03:12:27 +04:00
|
|
|
if (underrunFrames) {
|
|
|
|
PR_LOG(gAudioStreamLog, PR_LOG_WARNING,
|
|
|
|
("AudioStream %p lost %d frames", this, underrunFrames));
|
|
|
|
}
|
2012-11-22 14:38:28 +04:00
|
|
|
mLostFrames += underrunFrames;
|
|
|
|
servicedFrames += underrunFrames;
|
2012-01-13 01:20:36 +04:00
|
|
|
}
|
|
|
|
|
2013-04-04 03:12:27 +04:00
|
|
|
WriteDumpFile(mDumpFile, this, aFrames, aBuffer);
|
2013-10-26 02:13:42 +04:00
|
|
|
// Don't log if we're not interested or if the stream is inactive
|
|
|
|
if (PR_LOG_TEST(GetLatencyLog(), PR_LOG_DEBUG) &&
|
2014-04-09 23:59:07 +04:00
|
|
|
mState != SHUTDOWN &&
|
2013-10-26 02:13:42 +04:00
|
|
|
insertTime != INT64_MAX && servicedFrames > underrunFrames) {
|
2013-01-28 22:22:37 +04:00
|
|
|
uint32_t latency = UINT32_MAX;
|
|
|
|
if (cubeb_stream_get_latency(mCubebStream, &latency)) {
|
|
|
|
NS_WARNING("Could not get latency from cubeb.");
|
|
|
|
}
|
2013-10-26 02:13:42 +04:00
|
|
|
TimeStamp now = TimeStamp::Now();
|
|
|
|
|
|
|
|
mLatencyLog->Log(AsyncLatencyLogger::AudioStream, reinterpret_cast<uint64_t>(this),
|
|
|
|
insertTime, now);
|
|
|
|
mLatencyLog->Log(AsyncLatencyLogger::Cubeb, reinterpret_cast<uint64_t>(mCubebStream.get()),
|
|
|
|
(latency * 1000) / mOutRate, now);
|
2013-01-28 22:22:37 +04:00
|
|
|
}
|
2013-04-04 03:12:27 +04:00
|
|
|
|
2012-11-22 14:38:28 +04:00
|
|
|
mAudioClock.UpdateWritePosition(servicedFrames);
|
|
|
|
return servicedFrames;
|
2012-01-13 01:20:36 +04:00
|
|
|
}
|
|
|
|
|
2012-07-17 01:15:24 +04:00
|
|
|
void
|
2013-11-28 09:09:08 +04:00
|
|
|
AudioStream::StateCallback(cubeb_state aState)
|
2012-01-13 01:20:36 +04:00
|
|
|
{
|
2012-06-01 08:45:01 +04:00
|
|
|
MonitorAutoLock mon(mMonitor);
|
2012-01-13 01:20:36 +04:00
|
|
|
if (aState == CUBEB_STATE_DRAINED) {
|
|
|
|
mState = DRAINED;
|
2012-04-16 07:00:40 +04:00
|
|
|
} else if (aState == CUBEB_STATE_ERROR) {
|
2014-04-09 23:59:07 +04:00
|
|
|
LOG(("AudioStream::StateCallback() state %d cubeb error", mState));
|
2012-04-16 07:00:40 +04:00
|
|
|
mState = ERRORED;
|
2012-01-13 01:20:36 +04:00
|
|
|
}
|
2012-06-01 08:45:01 +04:00
|
|
|
mon.NotifyAll();
|
2012-01-13 01:20:36 +04:00
|
|
|
}
|
2012-11-22 14:38:28 +04:00
|
|
|
|
|
|
|
AudioClock::AudioClock(AudioStream* aStream)
|
|
|
|
:mAudioStream(aStream),
|
|
|
|
mOldOutRate(0),
|
|
|
|
mBasePosition(0),
|
|
|
|
mBaseOffset(0),
|
|
|
|
mOldBaseOffset(0),
|
2012-12-11 00:43:04 +04:00
|
|
|
mOldBasePosition(0),
|
2012-11-22 14:38:28 +04:00
|
|
|
mPlaybackRateChangeOffset(0),
|
|
|
|
mPreviousPosition(0),
|
|
|
|
mWritten(0),
|
|
|
|
mOutRate(0),
|
|
|
|
mInRate(0),
|
|
|
|
mPreservesPitch(true),
|
|
|
|
mCompensatingLatency(false)
|
|
|
|
{}
|
|
|
|
|
|
|
|
void AudioClock::Init()
|
|
|
|
{
|
|
|
|
mOutRate = mAudioStream->GetRate();
|
|
|
|
mInRate = mAudioStream->GetRate();
|
2012-12-11 00:43:04 +04:00
|
|
|
mOldOutRate = mOutRate;
|
2012-11-22 14:38:28 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void AudioClock::UpdateWritePosition(uint32_t aCount)
|
|
|
|
{
|
|
|
|
mWritten += aCount;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t AudioClock::GetPosition()
|
|
|
|
{
|
|
|
|
int64_t position = mAudioStream->GetPositionInFramesInternal();
|
|
|
|
int64_t diffOffset;
|
2013-02-28 21:11:04 +04:00
|
|
|
NS_ASSERTION(position < 0 || (mInRate != 0 && mOutRate != 0), "AudioClock not initialized.");
|
2012-11-22 14:38:28 +04:00
|
|
|
if (position >= 0) {
|
|
|
|
if (position < mPlaybackRateChangeOffset) {
|
|
|
|
// See if we are still playing frames pushed with the old playback rate in
|
|
|
|
// the backend. If we are, use the old output rate to compute the
|
|
|
|
// position.
|
|
|
|
mCompensatingLatency = true;
|
|
|
|
diffOffset = position - mOldBaseOffset;
|
|
|
|
position = static_cast<uint64_t>(mOldBasePosition +
|
|
|
|
static_cast<float>(USECS_PER_S * diffOffset) / mOldOutRate);
|
|
|
|
mPreviousPosition = position;
|
|
|
|
return position;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mCompensatingLatency) {
|
|
|
|
diffOffset = position - mPlaybackRateChangeOffset;
|
|
|
|
mCompensatingLatency = false;
|
|
|
|
mBasePosition = mPreviousPosition;
|
|
|
|
} else {
|
|
|
|
diffOffset = position - mPlaybackRateChangeOffset;
|
|
|
|
}
|
|
|
|
position = static_cast<uint64_t>(mBasePosition +
|
|
|
|
(static_cast<float>(USECS_PER_S * diffOffset) / mOutRate));
|
|
|
|
return position;
|
|
|
|
}
|
2013-01-28 22:22:37 +04:00
|
|
|
return UINT64_MAX;
|
2012-11-22 14:38:28 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t AudioClock::GetPositionInFrames()
|
|
|
|
{
|
|
|
|
return (GetPosition() * mOutRate) / USECS_PER_S;
|
|
|
|
}
|
|
|
|
|
|
|
|
void AudioClock::SetPlaybackRate(double aPlaybackRate)
|
|
|
|
{
|
|
|
|
int64_t position = mAudioStream->GetPositionInFramesInternal();
|
|
|
|
if (position > mPlaybackRateChangeOffset) {
|
|
|
|
mOldBasePosition = mBasePosition;
|
|
|
|
mBasePosition = GetPosition();
|
|
|
|
mOldBaseOffset = mPlaybackRateChangeOffset;
|
|
|
|
mBaseOffset = position;
|
|
|
|
mPlaybackRateChangeOffset = mWritten;
|
|
|
|
mOldOutRate = mOutRate;
|
|
|
|
mOutRate = static_cast<int>(mInRate / aPlaybackRate);
|
|
|
|
} else {
|
|
|
|
// The playbackRate has been changed before the end of the latency
|
|
|
|
// compensation phase. We don't update the mOld* variable. That way, the
|
|
|
|
// last playbackRate set is taken into account.
|
|
|
|
mBasePosition = GetPosition();
|
|
|
|
mBaseOffset = position;
|
|
|
|
mPlaybackRateChangeOffset = mWritten;
|
|
|
|
mOutRate = static_cast<int>(mInRate / aPlaybackRate);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
double AudioClock::GetPlaybackRate()
|
|
|
|
{
|
2012-12-29 02:01:38 +04:00
|
|
|
return static_cast<double>(mInRate) / mOutRate;
|
2012-11-22 14:38:28 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void AudioClock::SetPreservesPitch(bool aPreservesPitch)
|
|
|
|
{
|
|
|
|
mPreservesPitch = aPreservesPitch;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool AudioClock::GetPreservesPitch()
|
|
|
|
{
|
|
|
|
return mPreservesPitch;
|
|
|
|
}
|
2012-11-14 23:45:33 +04:00
|
|
|
} // namespace mozilla
|