Bug 679269 - Rename SoundData{,Value} to AudioData{,Value} and fix inconsistent use of "sound" vs "audio". r=doublec
This commit is contained in:
Родитель
efc645d526
Коммит
24e6652958
|
@ -116,7 +116,7 @@ void nsAudioAvailableEventManager::DispatchPendingEvents(PRUint64 aCurrentTime)
|
|||
}
|
||||
}
|
||||
|
||||
void nsAudioAvailableEventManager::QueueWrittenAudioData(SoundDataValue* aAudioData,
|
||||
void nsAudioAvailableEventManager::QueueWrittenAudioData(AudioDataValue* aAudioData,
|
||||
PRUint32 aAudioDataLength,
|
||||
PRUint64 aEndTimeSampleOffset)
|
||||
{
|
||||
|
@ -136,7 +136,7 @@ void nsAudioAvailableEventManager::QueueWrittenAudioData(SoundDataValue* aAudioD
|
|||
}
|
||||
mSignalBufferLength = currentBufferSize;
|
||||
}
|
||||
SoundDataValue* audioData = aAudioData;
|
||||
AudioDataValue* audioData = aAudioData;
|
||||
PRUint32 audioDataLength = aAudioDataLength;
|
||||
PRUint32 signalBufferTail = mSignalBufferLength - mSignalBufferPosition;
|
||||
|
||||
|
@ -153,7 +153,7 @@ void nsAudioAvailableEventManager::QueueWrittenAudioData(SoundDataValue* aAudioD
|
|||
PRUint32 i;
|
||||
float *signalBuffer = mSignalBuffer.get() + mSignalBufferPosition;
|
||||
for (i = 0; i < signalBufferTail; ++i) {
|
||||
signalBuffer[i] = MOZ_CONVERT_SOUND_SAMPLE(audioData[i]);
|
||||
signalBuffer[i] = MOZ_CONVERT_AUDIO_SAMPLE(audioData[i]);
|
||||
}
|
||||
audioData += signalBufferTail;
|
||||
audioDataLength -= signalBufferTail;
|
||||
|
@ -172,7 +172,7 @@ void nsAudioAvailableEventManager::QueueWrittenAudioData(SoundDataValue* aAudioD
|
|||
}
|
||||
}
|
||||
|
||||
// Inform the element that we've written sound data.
|
||||
// Inform the element that we've written audio data.
|
||||
nsCOMPtr<nsIRunnable> event =
|
||||
new nsAudioAvailableEventRunner(mDecoder, mSignalBuffer.forget(),
|
||||
mSignalBufferLength, time);
|
||||
|
@ -194,7 +194,7 @@ void nsAudioAvailableEventManager::QueueWrittenAudioData(SoundDataValue* aAudioD
|
|||
PRUint32 i;
|
||||
float *signalBuffer = mSignalBuffer.get() + mSignalBufferPosition;
|
||||
for (i = 0; i < audioDataLength; ++i) {
|
||||
signalBuffer[i] = MOZ_CONVERT_SOUND_SAMPLE(audioData[i]);
|
||||
signalBuffer[i] = MOZ_CONVERT_AUDIO_SAMPLE(audioData[i]);
|
||||
}
|
||||
mSignalBufferPosition += audioDataLength;
|
||||
}
|
||||
|
|
|
@ -63,7 +63,7 @@ public:
|
|||
|
||||
// Queues audio sample data and re-packages it into equal sized
|
||||
// framebuffers. Called from the audio thread.
|
||||
void QueueWrittenAudioData(SoundDataValue* aAudioData,
|
||||
void QueueWrittenAudioData(AudioDataValue* aAudioData,
|
||||
PRUint32 aAudioDataLength,
|
||||
PRUint64 aEndTimeSampleOffset);
|
||||
|
||||
|
|
|
@ -74,7 +74,7 @@ public:
|
|||
static nsAudioStream* AllocateStream();
|
||||
|
||||
// Initialize the audio stream. aNumChannels is the number of audio channels
|
||||
// (1 for mono, 2 for stereo, etc) and aRate is the frequency of the sound
|
||||
// (1 for mono, 2 for stereo, etc) and aRate is the frequency of the audio
|
||||
// samples (22050, 44100, etc).
|
||||
// Unsafe to call with the decoder monitor held.
|
||||
virtual nsresult Init(PRInt32 aNumChannels, PRInt32 aRate, SampleFormat aFormat) = 0;
|
||||
|
@ -83,14 +83,14 @@ public:
|
|||
// Unsafe to call with the decoder monitor held.
|
||||
virtual void Shutdown() = 0;
|
||||
|
||||
// Write sound data to the audio hardware. aBuf is an array of samples in
|
||||
// Write audio data to the audio hardware. aBuf is an array of samples in
|
||||
// the format specified by mFormat of length aCount. aCount should be
|
||||
// evenly divisible by the number of channels in this audio stream. If
|
||||
// aCount is larger than the result of Available(), the write will block
|
||||
// until sufficient buffer space is available.
|
||||
virtual nsresult Write(const void* aBuf, PRUint32 aCount) = 0;
|
||||
|
||||
// Return the number of sound samples that can be written to the audio device
|
||||
// Return the number of audio samples that can be written to the audio device
|
||||
// without blocking.
|
||||
virtual PRUint32 Available() = 0;
|
||||
|
||||
|
|
|
@ -228,10 +228,10 @@ VideoData* nsBuiltinDecoderReader::FindStartTime(PRInt64& aOutStartTime)
|
|||
}
|
||||
}
|
||||
if (HasAudio()) {
|
||||
SoundData* soundData = DecodeToFirstData(&nsBuiltinDecoderReader::DecodeAudioData,
|
||||
AudioData* audioData = DecodeToFirstData(&nsBuiltinDecoderReader::DecodeAudioData,
|
||||
mAudioQueue);
|
||||
if (soundData) {
|
||||
audioStartTime = soundData->mTime;
|
||||
if (audioData) {
|
||||
audioStartTime = audioData->mTime;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -321,7 +321,7 @@ nsresult nsBuiltinDecoderReader::DecodeToTarget(PRInt64 aTarget)
|
|||
}
|
||||
}
|
||||
}
|
||||
const SoundData* audio = mAudioQueue.PeekFront();
|
||||
const AudioData* audio = mAudioQueue.PeekFront();
|
||||
if (!audio)
|
||||
break;
|
||||
PRInt64 startSample = 0;
|
||||
|
@ -329,7 +329,7 @@ nsresult nsBuiltinDecoderReader::DecodeToTarget(PRInt64 aTarget)
|
|||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
if (startSample + audio->mSamples <= targetSample) {
|
||||
// Our seek target lies after the samples in this SoundData. Pop it
|
||||
// Our seek target lies after the samples in this AudioData. Pop it
|
||||
// off the queue, and keep decoding forwards.
|
||||
delete mAudioQueue.PopFront();
|
||||
audio = nsnull;
|
||||
|
@ -347,7 +347,7 @@ nsresult nsBuiltinDecoderReader::DecodeToTarget(PRInt64 aTarget)
|
|||
break;
|
||||
}
|
||||
|
||||
// The seek target lies somewhere in this SoundData's samples, strip off
|
||||
// The seek target lies somewhere in this AudioData's samples, strip off
|
||||
// any samples which lie before the seek target, so we'll begin playback
|
||||
// exactly at the seek target.
|
||||
NS_ASSERTION(targetSample >= startSample, "Target must at or be after data start.");
|
||||
|
@ -362,15 +362,15 @@ nsresult nsBuiltinDecoderReader::DecodeToTarget(PRInt64 aTarget)
|
|||
}
|
||||
PRUint32 samples = audio->mSamples - static_cast<PRUint32>(samplesToPrune);
|
||||
PRUint32 channels = audio->mChannels;
|
||||
nsAutoArrayPtr<SoundDataValue> audioData(new SoundDataValue[samples * channels]);
|
||||
nsAutoArrayPtr<AudioDataValue> audioData(new AudioDataValue[samples * channels]);
|
||||
memcpy(audioData.get(),
|
||||
audio->mAudioData.get() + (samplesToPrune * channels),
|
||||
samples * channels * sizeof(SoundDataValue));
|
||||
samples * channels * sizeof(AudioDataValue));
|
||||
PRInt64 duration;
|
||||
if (!SamplesToUsecs(samples, mInfo.mAudioRate, duration)) {
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
nsAutoPtr<SoundData> data(new SoundData(audio->mOffset,
|
||||
nsAutoPtr<AudioData> data(new AudioData(audio->mOffset,
|
||||
aTarget,
|
||||
duration,
|
||||
samples,
|
||||
|
|
|
@ -93,37 +93,37 @@ public:
|
|||
#ifdef MOZ_TREMOR
|
||||
#include <ogg/os_types.h>
|
||||
typedef ogg_int32_t VorbisPCMValue;
|
||||
typedef short SoundDataValue;
|
||||
typedef short AudioDataValue;
|
||||
|
||||
#define MOZ_SOUND_DATA_FORMAT (nsAudioStream::FORMAT_S16_LE)
|
||||
#define MOZ_AUDIO_DATA_FORMAT (nsAudioStream::FORMAT_S16_LE)
|
||||
#define MOZ_CLIP_TO_15(x) ((x)<-32768?-32768:(x)<=32767?(x):32767)
|
||||
// Convert the output of vorbis_synthesis_pcmout to a SoundDataValue
|
||||
// Convert the output of vorbis_synthesis_pcmout to a AudioDataValue
|
||||
#define MOZ_CONVERT_VORBIS_SAMPLE(x) \
|
||||
(static_cast<SoundDataValue>(MOZ_CLIP_TO_15((x)>>9)))
|
||||
// Convert a SoundDataValue to a float for the Audio API
|
||||
#define MOZ_CONVERT_SOUND_SAMPLE(x) ((x)*(1.F/32768))
|
||||
(static_cast<AudioDataValue>(MOZ_CLIP_TO_15((x)>>9)))
|
||||
// Convert a AudioDataValue to a float for the Audio API
|
||||
#define MOZ_CONVERT_AUDIO_SAMPLE(x) ((x)*(1.F/32768))
|
||||
#define MOZ_SAMPLE_TYPE_S16LE 1
|
||||
|
||||
#else /*MOZ_VORBIS*/
|
||||
|
||||
typedef float VorbisPCMValue;
|
||||
typedef float SoundDataValue;
|
||||
typedef float AudioDataValue;
|
||||
|
||||
#define MOZ_SOUND_DATA_FORMAT (nsAudioStream::FORMAT_FLOAT32)
|
||||
#define MOZ_AUDIO_DATA_FORMAT (nsAudioStream::FORMAT_FLOAT32)
|
||||
#define MOZ_CONVERT_VORBIS_SAMPLE(x) (x)
|
||||
#define MOZ_CONVERT_SOUND_SAMPLE(x) (x)
|
||||
#define MOZ_CONVERT_AUDIO_SAMPLE(x) (x)
|
||||
#define MOZ_SAMPLE_TYPE_FLOAT32 1
|
||||
|
||||
#endif
|
||||
|
||||
// Holds chunk a decoded sound samples.
|
||||
class SoundData {
|
||||
// Holds chunk a decoded audio samples.
|
||||
class AudioData {
|
||||
public:
|
||||
SoundData(PRInt64 aOffset,
|
||||
AudioData(PRInt64 aOffset,
|
||||
PRInt64 aTime,
|
||||
PRInt64 aDuration,
|
||||
PRUint32 aSamples,
|
||||
SoundDataValue* aData,
|
||||
AudioDataValue* aData,
|
||||
PRUint32 aChannels)
|
||||
: mOffset(aOffset),
|
||||
mTime(aTime),
|
||||
|
@ -132,13 +132,13 @@ public:
|
|||
mChannels(aChannels),
|
||||
mAudioData(aData)
|
||||
{
|
||||
MOZ_COUNT_CTOR(SoundData);
|
||||
MOZ_COUNT_CTOR(AudioData);
|
||||
}
|
||||
|
||||
SoundData(PRInt64 aOffset,
|
||||
AudioData(PRInt64 aOffset,
|
||||
PRInt64 aDuration,
|
||||
PRUint32 aSamples,
|
||||
SoundDataValue* aData,
|
||||
AudioDataValue* aData,
|
||||
PRUint32 aChannels)
|
||||
: mOffset(aOffset),
|
||||
mTime(-1),
|
||||
|
@ -147,12 +147,12 @@ public:
|
|||
mChannels(aChannels),
|
||||
mAudioData(aData)
|
||||
{
|
||||
MOZ_COUNT_CTOR(SoundData);
|
||||
MOZ_COUNT_CTOR(AudioData);
|
||||
}
|
||||
|
||||
~SoundData()
|
||||
~AudioData()
|
||||
{
|
||||
MOZ_COUNT_DTOR(SoundData);
|
||||
MOZ_COUNT_DTOR(AudioData);
|
||||
}
|
||||
|
||||
PRUint32 AudioDataLength() {
|
||||
|
@ -167,7 +167,7 @@ public:
|
|||
const PRInt64 mDuration; // In usecs.
|
||||
const PRUint32 mSamples;
|
||||
const PRUint32 mChannels;
|
||||
nsAutoArrayPtr<SoundDataValue> mAudioData;
|
||||
nsAutoArrayPtr<AudioDataValue> mAudioData;
|
||||
};
|
||||
|
||||
// Holds a decoded video frame, in YCbCr format. These are queued in the reader.
|
||||
|
@ -457,7 +457,7 @@ public:
|
|||
|
||||
// Queue of audio samples. This queue is threadsafe, and is accessed from
|
||||
// the audio, decoder, state machine, and main threads.
|
||||
MediaQueue<SoundData> mAudioQueue;
|
||||
MediaQueue<AudioData> mAudioQueue;
|
||||
|
||||
// Queue of video samples. This queue is threadsafe, and is accessed from
|
||||
// the decoder, state machine, and main threads.
|
||||
|
@ -501,8 +501,8 @@ public:
|
|||
AudioQueueMemoryFunctor() : mResult(0) {}
|
||||
|
||||
virtual void* operator()(void* anObject) {
|
||||
const SoundData* soundData = static_cast<const SoundData*>(anObject);
|
||||
mResult += soundData->mSamples * soundData->mChannels * sizeof(SoundDataValue);
|
||||
const AudioData* audioData = static_cast<const AudioData*>(anObject);
|
||||
mResult += audioData->mSamples * audioData->mChannels * sizeof(AudioDataValue);
|
||||
return nsnull;
|
||||
}
|
||||
|
||||
|
|
|
@ -348,7 +348,7 @@ void nsBuiltinDecoderStateMachine::DecodeLoop()
|
|||
PRInt64 ampleAudioThreshold = AMPLE_AUDIO_USECS;
|
||||
|
||||
MediaQueue<VideoData>& videoQueue = mReader->mVideoQueue;
|
||||
MediaQueue<SoundData>& audioQueue = mReader->mAudioQueue;
|
||||
MediaQueue<AudioData>& audioQueue = mReader->mAudioQueue;
|
||||
|
||||
// Main decode loop.
|
||||
PRBool videoPlaying = HasVideo();
|
||||
|
@ -449,8 +449,8 @@ void nsBuiltinDecoderStateMachine::DecodeLoop()
|
|||
// All active bitstreams' decode is well ahead of the playback
|
||||
// position, we may as well wait for the playback to catch up. Note the
|
||||
// audio push thread acquires and notifies the decoder monitor every time
|
||||
// it pops SoundData off the audio queue. So if the audio push thread pops
|
||||
// the last SoundData off the audio queue right after that queue reported
|
||||
// it pops AudioData off the audio queue. So if the audio push thread pops
|
||||
// the last AudioData off the audio queue right after that queue reported
|
||||
// it was non-empty here, we'll receive a notification on the decoder
|
||||
// monitor which will wake us up shortly after we sleep, thus preventing
|
||||
// both the decode and audio push threads waiting at the same time.
|
||||
|
@ -516,7 +516,7 @@ void nsBuiltinDecoderStateMachine::AudioLoop()
|
|||
// are unsafe to call with the decoder monitor held are documented as such
|
||||
// in nsAudioStream.h.
|
||||
nsRefPtr<nsAudioStream> audioStream = nsAudioStream::AllocateStream();
|
||||
audioStream->Init(channels, rate, MOZ_SOUND_DATA_FORMAT);
|
||||
audioStream->Init(channels, rate, MOZ_AUDIO_DATA_FORMAT);
|
||||
|
||||
{
|
||||
// We must hold the monitor while setting mAudioStream or whenever we query
|
||||
|
@ -581,7 +581,7 @@ void nsBuiltinDecoderStateMachine::AudioLoop()
|
|||
"Should have data to play");
|
||||
// See if there's missing samples in the audio stream. If there is, push
|
||||
// silence into the audio hardware, so we can play across the gap.
|
||||
const SoundData* s = mReader->mAudioQueue.PeekFront();
|
||||
const AudioData* s = mReader->mAudioQueue.PeekFront();
|
||||
|
||||
// Calculate the number of samples that have been pushed onto the audio
|
||||
// hardware.
|
||||
|
@ -609,9 +609,9 @@ void nsBuiltinDecoderStateMachine::AudioLoop()
|
|||
}
|
||||
|
||||
if (missingSamples > 0) {
|
||||
// The next sound chunk begins some time after the end of the last chunk
|
||||
// we pushed to the sound hardware. We must push silence into the audio
|
||||
// hardware so that the next sound chunk begins playback at the correct
|
||||
// The next audio chunk begins some time after the end of the last chunk
|
||||
// we pushed to the audio hardware. We must push silence into the audio
|
||||
// hardware so that the next audio chunk begins playback at the correct
|
||||
// time.
|
||||
missingSamples = NS_MIN(static_cast<PRInt64>(PR_UINT32_MAX), missingSamples);
|
||||
samplesWritten = PlaySilence(static_cast<PRUint32>(missingSamples),
|
||||
|
@ -669,8 +669,8 @@ void nsBuiltinDecoderStateMachine::AudioLoop()
|
|||
// Write silence manually rather than using PlaySilence(), so that
|
||||
// the AudioAPI doesn't get a copy of the samples.
|
||||
PRUint32 numValues = samples * channels;
|
||||
nsAutoArrayPtr<SoundDataValue> buf(new SoundDataValue[numValues]);
|
||||
memset(buf.get(), 0, sizeof(SoundDataValue) * numValues);
|
||||
nsAutoArrayPtr<AudioDataValue> buf(new AudioDataValue[numValues]);
|
||||
memset(buf.get(), 0, sizeof(AudioDataValue) * numValues);
|
||||
mAudioStream->Write(buf, numValues);
|
||||
}
|
||||
}
|
||||
|
@ -727,8 +727,8 @@ PRUint32 nsBuiltinDecoderStateMachine::PlaySilence(PRUint32 aSamples,
|
|||
PRUint32 maxSamples = SILENCE_BYTES_CHUNK / aChannels;
|
||||
PRUint32 samples = NS_MIN(aSamples, maxSamples);
|
||||
PRUint32 numValues = samples * aChannels;
|
||||
nsAutoArrayPtr<SoundDataValue> buf(new SoundDataValue[numValues]);
|
||||
memset(buf.get(), 0, sizeof(SoundDataValue) * numValues);
|
||||
nsAutoArrayPtr<AudioDataValue> buf(new AudioDataValue[numValues]);
|
||||
memset(buf.get(), 0, sizeof(AudioDataValue) * numValues);
|
||||
mAudioStream->Write(buf, numValues);
|
||||
// Dispatch events to the DOM for the audio just written.
|
||||
mEventManager.QueueWrittenAudioData(buf.get(), numValues,
|
||||
|
@ -741,7 +741,7 @@ PRUint32 nsBuiltinDecoderStateMachine::PlayFromAudioQueue(PRUint64 aSampleOffset
|
|||
{
|
||||
NS_ASSERTION(OnAudioThread(), "Only call on audio thread.");
|
||||
NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused");
|
||||
nsAutoPtr<SoundData> sound(mReader->mAudioQueue.PopFront());
|
||||
nsAutoPtr<AudioData> audioData(mReader->mAudioQueue.PopFront());
|
||||
{
|
||||
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
|
||||
NS_WARN_IF_FALSE(IsPlaying(), "Should be playing");
|
||||
|
@ -759,19 +759,19 @@ PRUint32 nsBuiltinDecoderStateMachine::PlayFromAudioQueue(PRUint64 aSampleOffset
|
|||
// able to acquire the audio monitor in order to resume or destroy the
|
||||
// audio stream.
|
||||
if (!mAudioStream->IsPaused()) {
|
||||
mAudioStream->Write(sound->mAudioData,
|
||||
sound->AudioDataLength());
|
||||
mAudioStream->Write(audioData->mAudioData,
|
||||
audioData->AudioDataLength());
|
||||
|
||||
offset = sound->mOffset;
|
||||
samples = sound->mSamples;
|
||||
offset = audioData->mOffset;
|
||||
samples = audioData->mSamples;
|
||||
|
||||
// Dispatch events to the DOM for the audio just written.
|
||||
mEventManager.QueueWrittenAudioData(sound->mAudioData.get(),
|
||||
sound->AudioDataLength(),
|
||||
mEventManager.QueueWrittenAudioData(audioData->mAudioData.get(),
|
||||
audioData->AudioDataLength(),
|
||||
(aSampleOffset + samples) * aChannels);
|
||||
} else {
|
||||
mReader->mAudioQueue.PushFront(sound);
|
||||
sound.forget();
|
||||
mReader->mAudioQueue.PushFront(audioData);
|
||||
audioData.forget();
|
||||
}
|
||||
if (offset != -1) {
|
||||
mDecoder->UpdatePlaybackOffset(offset);
|
||||
|
@ -1312,7 +1312,7 @@ void nsBuiltinDecoderStateMachine::DecodeSeek()
|
|||
mediaTime);
|
||||
}
|
||||
if (NS_SUCCEEDED(res)) {
|
||||
SoundData* audio = HasAudio() ? mReader->mAudioQueue.PeekFront() : nsnull;
|
||||
AudioData* audio = HasAudio() ? mReader->mAudioQueue.PeekFront() : nsnull;
|
||||
NS_ASSERTION(!audio || (audio->mTime <= seekTime &&
|
||||
seekTime <= audio->mTime + audio->mDuration),
|
||||
"Seek target should lie inside the first audio block after seek");
|
||||
|
@ -1649,7 +1649,7 @@ void nsBuiltinDecoderStateMachine::AdvanceFrame()
|
|||
mPlayDuration = clock_time - mStartTime;
|
||||
mPlayStartTime = TimeStamp::Now();
|
||||
} else {
|
||||
// Sound is disabled on this system. Sync to the system clock.
|
||||
// Audio hardware is disabled on this system. Sync to the system clock.
|
||||
clock_time = DurationToUsecs(TimeStamp::Now() - mPlayStartTime) + mPlayDuration;
|
||||
// Ensure the clock can never go backwards.
|
||||
NS_ASSERTION(mCurrentFrameTime <= clock_time, "Clock should go forwards");
|
||||
|
|
|
@ -86,7 +86,7 @@ is done on the decode thread when video frames are decoded.
|
|||
|
||||
The decode thread pushes decoded audio and videos frames into two
|
||||
separate queues - one for audio and one for video. These are kept
|
||||
separate to make it easy to constantly feed audio data to the sound
|
||||
separate to make it easy to constantly feed audio data to the audio
|
||||
hardware while allowing frame skipping of video data. These queues are
|
||||
threadsafe, and neither the decode, audio, or state machine should
|
||||
be able to monopolize them, and cause starvation of the other threads.
|
||||
|
@ -102,7 +102,7 @@ to shut down the decode thread in order to conserve resources.
|
|||
|
||||
During playback the audio thread will be idle (via a Wait() on the
|
||||
monitor) if the audio queue is empty. Otherwise it constantly pops
|
||||
sound data off the queue and plays it with a blocking write to the audio
|
||||
audio data off the queue and plays it with a blocking write to the audio
|
||||
hardware (via nsAudioStream and libsydneyaudio).
|
||||
|
||||
*/
|
||||
|
@ -360,7 +360,7 @@ protected:
|
|||
PRUint64 aSampleOffset);
|
||||
|
||||
// Pops an audio chunk from the front of the audio queue, and pushes its
|
||||
// sound data to the audio hardware. MozAudioAvailable sample data is also
|
||||
// audio data to the audio hardware. MozAudioAvailable sample data is also
|
||||
// queued here. Called on the audio thread.
|
||||
PRUint32 PlayFromAudioQueue(PRUint64 aSampleOffset, PRUint32 aChannels);
|
||||
|
||||
|
|
|
@ -367,7 +367,7 @@ nsresult nsOggReader::DecodeVorbis(ogg_packet* aPacket) {
|
|||
ogg_int64_t endSample = aPacket->granulepos;
|
||||
while ((samples = vorbis_synthesis_pcmout(&mVorbisState->mDsp, &pcm)) > 0) {
|
||||
mVorbisState->ValidateVorbisPacketSamples(aPacket, samples);
|
||||
nsAutoArrayPtr<SoundDataValue> buffer(new SoundDataValue[samples * channels]);
|
||||
nsAutoArrayPtr<AudioDataValue> buffer(new AudioDataValue[samples * channels]);
|
||||
for (PRUint32 j = 0; j < channels; ++j) {
|
||||
VorbisPCMValue* channel = pcm[j];
|
||||
for (PRUint32 i = 0; i < PRUint32(samples); ++i) {
|
||||
|
@ -377,7 +377,7 @@ nsresult nsOggReader::DecodeVorbis(ogg_packet* aPacket) {
|
|||
|
||||
PRInt64 duration = mVorbisState->Time((PRInt64)samples);
|
||||
PRInt64 startTime = mVorbisState->Time(endSample - samples);
|
||||
mAudioQueue.Push(new SoundData(mPageOffset,
|
||||
mAudioQueue.Push(new AudioData(mPageOffset,
|
||||
startTime,
|
||||
duration,
|
||||
samples,
|
||||
|
|
|
@ -187,9 +187,9 @@ PRBool nsWaveReader::DecodeAudioData()
|
|||
PRInt64 readSize = NS_MIN(BLOCK_SIZE, remaining);
|
||||
PRInt64 samples = readSize / mSampleSize;
|
||||
|
||||
PR_STATIC_ASSERT(PRUint64(BLOCK_SIZE) < UINT_MAX / sizeof(SoundDataValue) / MAX_CHANNELS);
|
||||
PR_STATIC_ASSERT(PRUint64(BLOCK_SIZE) < UINT_MAX / sizeof(AudioDataValue) / MAX_CHANNELS);
|
||||
const size_t bufferSize = static_cast<size_t>(samples * mChannels);
|
||||
nsAutoArrayPtr<SoundDataValue> sampleBuffer(new SoundDataValue[bufferSize]);
|
||||
nsAutoArrayPtr<AudioDataValue> sampleBuffer(new AudioDataValue[bufferSize]);
|
||||
|
||||
PR_STATIC_ASSERT(PRUint64(BLOCK_SIZE) < UINT_MAX / sizeof(char));
|
||||
nsAutoArrayPtr<char> dataBuffer(new char[static_cast<size_t>(readSize)]);
|
||||
|
@ -201,7 +201,7 @@ PRBool nsWaveReader::DecodeAudioData()
|
|||
|
||||
// convert data to samples
|
||||
const char* d = dataBuffer.get();
|
||||
SoundDataValue* s = sampleBuffer.get();
|
||||
AudioDataValue* s = sampleBuffer.get();
|
||||
for (int i = 0; i < samples; ++i) {
|
||||
for (unsigned int j = 0; j < mChannels; ++j) {
|
||||
if (mSampleFormat == nsAudioStream::FORMAT_U8) {
|
||||
|
@ -229,7 +229,7 @@ PRBool nsWaveReader::DecodeAudioData()
|
|||
NS_ASSERTION(readSizeTime <= PR_INT64_MAX / USECS_PER_S, "readSizeTime overflow");
|
||||
NS_ASSERTION(samples < PR_INT32_MAX, "samples overflow");
|
||||
|
||||
mAudioQueue.Push(new SoundData(pos,
|
||||
mAudioQueue.Push(new AudioData(pos,
|
||||
static_cast<PRInt64>(posTime * USECS_PER_S),
|
||||
static_cast<PRInt64>(readSizeTime * USECS_PER_S),
|
||||
static_cast<PRInt32>(samples),
|
||||
|
|
|
@ -431,8 +431,8 @@ PRBool nsWebMReader::DecodeAudioPacket(nestegg_packet* aPacket, PRInt64 aOffset)
|
|||
// is the start of this chunk.
|
||||
mAudioStartUsec = tstamp_usecs;
|
||||
}
|
||||
// If there's a gap between the start of this sound chunk and the end of
|
||||
// the previous sound chunk, we need to increment the packet count so that
|
||||
// If there's a gap between the start of this audio chunk and the end of
|
||||
// the previous audio chunk, we need to increment the packet count so that
|
||||
// the vorbis decode doesn't use data from before the gap to help decode
|
||||
// from after the gap.
|
||||
PRInt64 tstamp_samples = 0;
|
||||
|
@ -484,7 +484,7 @@ PRBool nsWebMReader::DecodeAudioPacket(nestegg_packet* aPacket, PRInt64 aOffset)
|
|||
VorbisPCMValue** pcm = 0;
|
||||
PRInt32 samples = 0;
|
||||
while ((samples = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm)) > 0) {
|
||||
nsAutoArrayPtr<SoundDataValue> buffer(new SoundDataValue[samples * mChannels]);
|
||||
nsAutoArrayPtr<AudioDataValue> buffer(new AudioDataValue[samples * mChannels]);
|
||||
for (PRUint32 j = 0; j < mChannels; ++j) {
|
||||
VorbisPCMValue* channel = pcm[j];
|
||||
for (PRUint32 i = 0; i < PRUint32(samples); ++i) {
|
||||
|
@ -505,7 +505,7 @@ PRBool nsWebMReader::DecodeAudioPacket(nestegg_packet* aPacket, PRInt64 aOffset)
|
|||
|
||||
PRInt64 time = tstamp_usecs + total_duration;
|
||||
total_samples += samples;
|
||||
mAudioQueue.Push(new SoundData(aOffset,
|
||||
mAudioQueue.Push(new AudioData(aOffset,
|
||||
time,
|
||||
duration,
|
||||
samples,
|
||||
|
|
Загрузка…
Ссылка в новой задаче