зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1712598 - part1 : fill silence if detecting a gap in audio. r=padenot
If there is a gap bewteen the audio that is going to be appended next and the amount of frames that we've appended, then we need to fill silence data in order to keep A/V sync correct. We did that for the DecodedStream [1] before, but that got removed during the refactoring. [1] https://searchfox.org/mozilla-central/rev/36181d2c169bafd5e13c534851c8b25d1567cfc3/dom/media/mediasink/DecodedStream.cpp#660-667 Differential Revision: https://phabricator.services.mozilla.com/D115857
This commit is contained in:
Родитель
07724ed21f
Коммит
88ff5a6b27
|
@ -729,6 +729,16 @@ void DecodedStream::SendAudio(const PrincipalHandle& aPrincipalHandle) {
|
|||
AutoTArray<RefPtr<AudioData>, 10> audio;
|
||||
mAudioQueue.GetElementsAfter(mData->mNextAudioTime, &audio);
|
||||
|
||||
// This will happen everytime when the media sink switches from `AudioSink` to
|
||||
// `DecodedStream`. If we don't insert the silence then the A/V will be out of
|
||||
// sync.
|
||||
RefPtr<AudioData> nextAudio = audio.IsEmpty() ? nullptr : audio[0];
|
||||
if (RefPtr<AudioData> silence = CreateSilenceDataIfGapExists(nextAudio)) {
|
||||
LOG_DS(LogLevel::Verbose, "Detect a gap in audio, insert silence=%u",
|
||||
silence->Frames());
|
||||
audio.InsertElementAt(0, silence);
|
||||
}
|
||||
|
||||
// Append data which hasn't been sent to audio track before.
|
||||
mData->mAudioTrack->AppendData(audio, aPrincipalHandle);
|
||||
for (uint32_t i = 0; i < audio.Length(); ++i) {
|
||||
|
@ -743,6 +753,41 @@ void DecodedStream::SendAudio(const PrincipalHandle& aPrincipalHandle) {
|
|||
}
|
||||
}
|
||||
|
||||
already_AddRefed<AudioData> DecodedStream::CreateSilenceDataIfGapExists(
|
||||
RefPtr<AudioData>& aNextAudio) {
|
||||
AssertOwnerThread();
|
||||
if (!aNextAudio) {
|
||||
return nullptr;
|
||||
}
|
||||
CheckedInt64 audioWrittenOffset =
|
||||
mData->mAudioFramesWritten +
|
||||
TimeUnitToFrames(*mStartTime, aNextAudio->mRate);
|
||||
CheckedInt64 frameOffset =
|
||||
TimeUnitToFrames(aNextAudio->mTime, aNextAudio->mRate);
|
||||
if (audioWrittenOffset.value() >= frameOffset.value()) {
|
||||
return nullptr;
|
||||
}
|
||||
// We've written less audio than our frame offset, return a silence data so we
|
||||
// have enough audio to be at the correct offset for our current frames.
|
||||
CheckedInt64 missingFrames = frameOffset - audioWrittenOffset;
|
||||
AlignedAudioBuffer silenceBuffer(missingFrames.value() *
|
||||
aNextAudio->mChannels);
|
||||
if (!silenceBuffer) {
|
||||
NS_WARNING("OOM in DecodedStream::CreateSilenceDataIfGapExists");
|
||||
return nullptr;
|
||||
}
|
||||
auto duration = FramesToTimeUnit(missingFrames.value(), aNextAudio->mRate);
|
||||
if (!duration.IsValid()) {
|
||||
NS_WARNING("Int overflow in DecodedStream::CreateSilenceDataIfGapExists");
|
||||
return nullptr;
|
||||
}
|
||||
RefPtr<AudioData> silenceData = new AudioData(
|
||||
aNextAudio->mOffset, aNextAudio->mTime, std::move(silenceBuffer),
|
||||
aNextAudio->mChannels, aNextAudio->mRate);
|
||||
MOZ_DIAGNOSTIC_ASSERT(duration == silenceData->mDuration, "must be equal");
|
||||
return silenceData.forget();
|
||||
}
|
||||
|
||||
void DecodedStream::CheckIsDataAudible(const AudioData* aData) {
|
||||
MOZ_ASSERT(aData);
|
||||
|
||||
|
|
|
@ -89,6 +89,13 @@ class DecodedStream : public MediaSink {
|
|||
void ConnectListener();
|
||||
void DisconnectListener();
|
||||
|
||||
// Give the audio that is going to be appended next as an input, if there is
|
||||
// a gap between audio's time and the frames that we've written, then return
|
||||
// a silence data that has same amount of frames and can be used to fill the
|
||||
// gap. If no gap exists, return nullptr.
|
||||
already_AddRefed<AudioData> CreateSilenceDataIfGapExists(
|
||||
RefPtr<AudioData>& aNextAudio);
|
||||
|
||||
const RefPtr<AbstractThread> mOwnerThread;
|
||||
|
||||
// Used to access the graph.
|
||||
|
|
Загрузка…
Ссылка в новой задаче