diff --git a/dom/media/MediaInfo.h b/dom/media/MediaInfo.h index ee6915d0ff5b..cff69c744470 100644 --- a/dom/media/MediaInfo.h +++ b/dom/media/MediaInfo.h @@ -70,17 +70,6 @@ struct AacCodecSpecificData { *mDecoderConfigDescriptorBinaryBlob == *rhs.mDecoderConfigDescriptorBinaryBlob; } - // An explanation for the necessity of handling the encoder delay and the - // padding is available here: - // https://developer.apple.com/library/archive/documentation/QuickTime/QTFF/QTFFAppenG/QTFFAppenG.html - - // The number of frames that should be skipped from the beginning of the - // decoded stream. - uint32_t mEncoderDelayFrames{0}; - - // The total number of frames of the media, that is, excluding the encoder - // delay and the padding of the last packet, that must be discarded. - uint64_t mMediaFrameCount{0}; // The bytes of the ES_Descriptor field parsed out of esds box. We store // this as a blob as some decoders want this. diff --git a/dom/media/ipc/MediaIPCUtils.h b/dom/media/ipc/MediaIPCUtils.h index 40801184436e..274bf46a5b36 100644 --- a/dom/media/ipc/MediaIPCUtils.h +++ b/dom/media/ipc/MediaIPCUtils.h @@ -110,15 +110,11 @@ struct ParamTraits { static void Write(MessageWriter* aWriter, const paramType& aParam) { WriteParam(aWriter, *aParam.mEsDescriptorBinaryBlob); WriteParam(aWriter, *aParam.mDecoderConfigDescriptorBinaryBlob); - WriteParam(aWriter, aParam.mEncoderDelayFrames); - WriteParam(aWriter, aParam.mMediaFrameCount); } static bool Read(MessageReader* aReader, paramType* aResult) { return ReadParam(aReader, aResult->mEsDescriptorBinaryBlob.get()) && ReadParam(aReader, - aResult->mDecoderConfigDescriptorBinaryBlob.get()) && - ReadParam(aReader, &aResult->mEncoderDelayFrames) && - ReadParam(aReader, &aResult->mMediaFrameCount); + aResult->mDecoderConfigDescriptorBinaryBlob.get()); } }; diff --git a/dom/media/mp4/DecoderData.cpp b/dom/media/mp4/DecoderData.cpp index ffac665de9a6..b1b503cb9013 100644 --- a/dom/media/mp4/DecoderData.cpp +++ b/dom/media/mp4/DecoderData.cpp @@ -10,15 +10,11 @@ #include "mozilla/EndianUtils.h" #include "mozilla/Telemetry.h" #include "VideoUtils.h" -#include "MP4Metadata.h" -#include "mozilla/Logging.h" // OpusDecoder header is really needed only by MP4 in rust #include "OpusDecoder.h" #include "mp4parse.h" -#define LOG(...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, (__VA_ARGS__)) - using mozilla::media::TimeUnit; namespace mozilla { @@ -136,17 +132,16 @@ static MediaResult VerifyAudioOrVideoInfoAndRecordTelemetry( return NS_OK; } -MediaResult MP4AudioInfo::Update(const Mp4parseTrackInfo* aTrack, - const Mp4parseTrackAudioInfo* aAudio, - const IndiceWrapper* aIndices) { - auto rv = VerifyAudioOrVideoInfoAndRecordTelemetry(aAudio); +MediaResult MP4AudioInfo::Update(const Mp4parseTrackInfo* track, + const Mp4parseTrackAudioInfo* audio) { + auto rv = VerifyAudioOrVideoInfoAndRecordTelemetry(audio); NS_ENSURE_SUCCESS(rv, rv); - Mp4parseCodec codecType = aAudio->sample_info[0].codec_type; - for (uint32_t i = 0; i < aAudio->sample_info_count; i++) { - if (aAudio->sample_info[i].protected_data.is_encrypted) { - auto rv = UpdateTrackProtectedInfo(*this, - aAudio->sample_info[i].protected_data); + Mp4parseCodec codecType = audio->sample_info[0].codec_type; + for (uint32_t i = 0; i < audio->sample_info_count; i++) { + if (audio->sample_info[i].protected_data.is_encrypted) { + auto rv = + UpdateTrackProtectedInfo(*this, audio->sample_info[i].protected_data); NS_ENSURE_SUCCESS(rv, rv); break; } @@ -157,8 +152,8 @@ MediaResult MP4AudioInfo::Update(const Mp4parseTrackInfo* aTrack, // ever not hold. E.g. if we need to handle different codecs in a single // track, or if we have different numbers or channels in a single track. Mp4parseByteData mp4ParseSampleCodecSpecific = - aAudio->sample_info[0].codec_specific_config; - Mp4parseByteData extraData = aAudio->sample_info[0].extra_data; + audio->sample_info[0].codec_specific_config; + Mp4parseByteData extraData = audio->sample_info[0].extra_data; MOZ_ASSERT(mCodecSpecificConfig.is(), "Should have no codec specific data yet"); if (codecType == MP4PARSE_CODEC_OPUS) { @@ -183,42 +178,7 @@ MediaResult MP4AudioInfo::Update(const Mp4parseTrackInfo* aTrack, AudioCodecSpecificVariant{std::move(opusCodecSpecificData)}; } else if (codecType == MP4PARSE_CODEC_AAC) { mMimeType = "audio/mp4a-latm"_ns; - int64_t codecDelayUS = aTrack->media_time; - double USECS_PER_S = 1e6; - // We can't use mozilla::UsecsToFrames here because we need to round, and it - // floors. - uint32_t encoderDelayFrameCount = 0; - if (codecDelayUS > 0) { - encoderDelayFrameCount = static_cast( - std::lround(static_cast(codecDelayUS) * - aAudio->sample_info->sample_rate / USECS_PER_S)); - LOG("AAC stream in MP4 container, %" PRIu32 " frames of encoder delay.", - encoderDelayFrameCount); - } - - // Pass the padding number, in frames, to the AAC decoder as well. - MP4SampleIndex::Indice indice = {0}; - bool rv = aIndices->GetIndice(aIndices->Length() - 1, indice); - uint64_t mediaFrameCount = 0; - if (rv) { - // The `end_composition` member of the very last index member is the - // duration of the media in microseconds, excluding decoder delay and - // padding. Convert to frames and give to the decoder so that trimming can - // be done properly. - mediaFrameCount = static_cast(indice.end_composition) * - aAudio->sample_info->sample_rate / USECS_PER_S; - LOG("AAC stream in MP4 container, total media duration is %" PRIu64 - " frames", - mediaFrameCount); - } else { - LOG("AAC stream in MP4 container, couldn't determine total media time"); - } - AacCodecSpecificData aacCodecSpecificData{}; - - aacCodecSpecificData.mEncoderDelayFrames = encoderDelayFrameCount; - aacCodecSpecificData.mMediaFrameCount = mediaFrameCount; - // codec specific data is used to store the DecoderConfigDescriptor. aacCodecSpecificData.mDecoderConfigDescriptorBinaryBlob->AppendElements( mp4ParseSampleCodecSpecific.data, mp4ParseSampleCodecSpecific.length); @@ -245,17 +205,17 @@ MediaResult MP4AudioInfo::Update(const Mp4parseTrackInfo* aTrack, mCodecSpecificConfig = AudioCodecSpecificVariant{Mp3CodecSpecificData{}}; } - mRate = aAudio->sample_info[0].sample_rate; - mChannels = aAudio->sample_info[0].channels; - mBitDepth = aAudio->sample_info[0].bit_depth; - mExtendedProfile = aAudio->sample_info[0].extended_profile; - mDuration = TimeUnit::FromMicroseconds(aTrack->duration); - mMediaTime = TimeUnit::FromMicroseconds(aTrack->media_time); - mTrackId = aTrack->track_id; + mRate = audio->sample_info[0].sample_rate; + mChannels = audio->sample_info[0].channels; + mBitDepth = audio->sample_info[0].bit_depth; + mExtendedProfile = audio->sample_info[0].extended_profile; + mDuration = TimeUnit::FromMicroseconds(track->duration); + mMediaTime = TimeUnit::FromMicroseconds(track->media_time); + mTrackId = track->track_id; // In stagefright, mProfile is kKeyAACProfile, mExtendedProfile is kKeyAACAOT. - if (aAudio->sample_info[0].profile <= 4) { - mProfile = aAudio->sample_info[0].profile; + if (audio->sample_info[0].profile <= 4) { + mProfile = audio->sample_info[0].profile; } if (mCodecSpecificConfig.is()) { @@ -329,5 +289,3 @@ bool MP4VideoInfo::IsValid() const { } } // namespace mozilla - -#undef LOG diff --git a/dom/media/mp4/DecoderData.h b/dom/media/mp4/DecoderData.h index a8d38d0abc84..5509ba9a29ac 100644 --- a/dom/media/mp4/DecoderData.h +++ b/dom/media/mp4/DecoderData.h @@ -17,7 +17,6 @@ namespace mozilla { -class IndiceWrapper; class MP4Demuxer; struct PsshInfo { @@ -54,9 +53,8 @@ class MP4AudioInfo : public mozilla::AudioInfo { public: MP4AudioInfo() = default; - MediaResult Update(const Mp4parseTrackInfo* aTrack, - const Mp4parseTrackAudioInfo* aAudio, - const IndiceWrapper* aIndices); + MediaResult Update(const Mp4parseTrackInfo* track, + const Mp4parseTrackAudioInfo* audio); virtual bool IsValid() const override; }; diff --git a/dom/media/mp4/MP4Metadata.cpp b/dom/media/mp4/MP4Metadata.cpp index 6e6e6b3c176d..74767321669b 100644 --- a/dom/media/mp4/MP4Metadata.cpp +++ b/dom/media/mp4/MP4Metadata.cpp @@ -355,17 +355,8 @@ MP4Metadata::ResultAndTrackInfo MP4Metadata::GetTrackInfo( TrackTypeToStr(aType), aTrackNumber)), nullptr}; } - - auto indices = GetTrackIndice(info.track_id); - if (!indices.Ref()) { - // non fatal - MOZ_LOG(gMP4MetadataLog, LogLevel::Warning, - ("Can't get index table for audio track, duration might be " - "slightly incorrect")); - } auto track = mozilla::MakeUnique(); - MediaResult updateStatus = - track->Update(&info, &audio, indices.Ref().get()); + MediaResult updateStatus = track->Update(&info, &audio); if (NS_FAILED(updateStatus)) { MOZ_LOG(gMP4MetadataLog, LogLevel::Warning, ("Updating audio track failed with %s", @@ -441,8 +432,7 @@ MP4Metadata::ResultAndCryptoFile MP4Metadata::Crypto() const { return {NS_OK, &mCrypto}; } -MP4Metadata::ResultAndIndice MP4Metadata::GetTrackIndice( - uint32_t aTrackId) const { +MP4Metadata::ResultAndIndice MP4Metadata::GetTrackIndice(uint32_t aTrackId) { Mp4parseByteData indiceRawData = {}; uint8_t fragmented = false; diff --git a/dom/media/mp4/MP4Metadata.h b/dom/media/mp4/MP4Metadata.h index e900fbedc34f..824983a57930 100644 --- a/dom/media/mp4/MP4Metadata.h +++ b/dom/media/mp4/MP4Metadata.h @@ -96,7 +96,7 @@ class MP4Metadata : public DecoderDoctorLifeLogger { ResultAndCryptoFile Crypto() const; using ResultAndIndice = ResultAndType>; - ResultAndIndice GetTrackIndice(uint32_t aTrackId) const; + ResultAndIndice GetTrackIndice(uint32_t aTrackId); nsresult Parse(); diff --git a/dom/media/platforms/android/AndroidDecoderModule.cpp b/dom/media/platforms/android/AndroidDecoderModule.cpp index e9fbf87adcba..3169f68a1941 100644 --- a/dom/media/platforms/android/AndroidDecoderModule.cpp +++ b/dom/media/platforms/android/AndroidDecoderModule.cpp @@ -29,17 +29,17 @@ ("%s: " arg, __func__, ##__VA_ARGS__)) using namespace mozilla; +using media::TimeUnit; namespace mozilla { mozilla::LazyLogModule sAndroidDecoderModuleLog("AndroidDecoderModule"); -nsCString TranslateMimeType(const nsACString& aMimeType) { +const nsCString TranslateMimeType(const nsACString& aMimeType) { if (VPXDecoder::IsVPX(aMimeType, VPXDecoder::VP8)) { static constexpr auto vp8 = "video/x-vnd.on2.vp8"_ns; return vp8; - } - if (VPXDecoder::IsVPX(aMimeType, VPXDecoder::VP9)) { + } else if (VPXDecoder::IsVPX(aMimeType, VPXDecoder::VP9)) { static constexpr auto vp9 = "video/x-vnd.on2.vp9"_ns; return vp9; } diff --git a/dom/media/platforms/android/AndroidDecoderModule.h b/dom/media/platforms/android/AndroidDecoderModule.h index 602c3a779786..d2a97fc39956 100644 --- a/dom/media/platforms/android/AndroidDecoderModule.h +++ b/dom/media/platforms/android/AndroidDecoderModule.h @@ -44,7 +44,7 @@ class AndroidDecoderModule : public PlatformDecoderModule { extern LazyLogModule sAndroidDecoderModuleLog; -nsCString TranslateMimeType(const nsACString& aMimeType); +const nsCString TranslateMimeType(const nsACString& aMimeType); } // namespace mozilla diff --git a/dom/media/platforms/android/RemoteDataDecoder.cpp b/dom/media/platforms/android/RemoteDataDecoder.cpp index ef4023d3fa79..43008e41505b 100644 --- a/dom/media/platforms/android/RemoteDataDecoder.cpp +++ b/dom/media/platforms/android/RemoteDataDecoder.cpp @@ -27,7 +27,6 @@ #include "mozilla/java/SampleWrappers.h" #include "mozilla/java/SurfaceAllocatorWrappers.h" #include "mozilla/Maybe.h" -#include "mozilla/Casting.h" #include "nsPromiseFlatString.h" #include "nsThreadUtils.h" #include "prlog.h" @@ -84,7 +83,7 @@ class RemoteVideoDecoder : public RemoteDataDecoder { class InputInfo { public: - InputInfo() = default; + InputInfo() {} InputInfo(const int64_t aDurationUs, const gfx::IntSize& aImageSize, const gfx::IntSize& aDisplaySize) @@ -92,9 +91,9 @@ class RemoteVideoDecoder : public RemoteDataDecoder { mImageSize(aImageSize), mDisplaySize(aDisplaySize) {} - int64_t mDurationUs = {}; - gfx::IntSize mImageSize = {}; - gfx::IntSize mDisplaySize = {}; + int64_t mDurationUs; + gfx::IntSize mImageSize; + gfx::IntSize mDisplaySize; }; class CallbacksSupport final : public JavaCallbacksSupport { @@ -110,7 +109,7 @@ class RemoteVideoDecoder : public RemoteDataDecoder { java::SampleBuffer::Param aBuffer) override { MOZ_ASSERT(!aBuffer, "Video sample should be bufferless"); // aSample will be implicitly converted into a GlobalRef. - mDecoder->ProcessOutput(aSample); + mDecoder->ProcessOutput(std::move(aSample)); } void HandleOutputFormatChanged( @@ -523,7 +522,7 @@ class RemoteVideoDecoder : public RemoteDataDecoder { const VideoInfo mConfig; java::GeckoSurface::GlobalRef mSurface; - AndroidSurfaceTextureHandle mSurfaceHandle{}; + AndroidSurfaceTextureHandle mSurfaceHandle; // Used to override the SurfaceTexture transform on some devices where the // decoder provides a buggy value. Maybe mTransformOverride; @@ -545,7 +544,7 @@ class RemoteVideoDecoder : public RemoteDataDecoder { const Maybe mTrackingId; // Can be accessed on any thread, but only written during init. // Pre-filled decode info used by the performance recorder. - MediaInfoFlag mMediaInfoFlag = {}; + MediaInfoFlag mMediaInfoFlag; // Only accessed on mThread. // Records decode performance to the profiler. PerformanceRecorderMulti mPerformanceRecorder; @@ -565,35 +564,15 @@ class RemoteAudioDecoder : public RemoteDataDecoder { bool formatHasCSD = false; NS_ENSURE_SUCCESS_VOID(aFormat->ContainsKey(u"csd-0"_ns, &formatHasCSD)); - uint8_t* audioSpecConfig; - uint32_t configLength; - if (aConfig.mCodecSpecificConfig.is()) { - const AacCodecSpecificData& aacCodecSpecificData = - aConfig.mCodecSpecificConfig.as(); - - mRemainingEncoderDelay = mEncoderDelay = - aacCodecSpecificData.mEncoderDelayFrames; - mTotalMediaFrames = aacCodecSpecificData.mMediaFrameCount; - audioSpecConfig = - aacCodecSpecificData.mDecoderConfigDescriptorBinaryBlob->Elements(); - configLength = - aacCodecSpecificData.mDecoderConfigDescriptorBinaryBlob->Length(); - LOG("Android RemoteDataDecoder: Found AAC decoder delay (%" PRIu32 - " frames) and total media frames (%" PRIu64 " frames)", - mEncoderDelay, mTotalMediaFrames); - } else { - // Generally not used, this class is used only for decoding AAC, but can - // decode other codecs. - RefPtr audioCodecSpecificBinaryBlob = - ForceGetAudioCodecSpecificBlob(aConfig.mCodecSpecificConfig); - audioSpecConfig = audioCodecSpecificBinaryBlob->Elements(); - configLength = audioCodecSpecificBinaryBlob->Length(); - LOG("Android RemoteDataDecoder: extracting generic codec-specific data."); - } - - if (!formatHasCSD && configLength >= 2) { + // It would be nice to instead use more specific information here, but + // we force a byte buffer for now since this handles arbitrary codecs. + // TODO(bug 1768564): implement further type checking for codec data. + RefPtr audioCodecSpecificBinaryBlob = + ForceGetAudioCodecSpecificBlob(aConfig.mCodecSpecificConfig); + if (!formatHasCSD && audioCodecSpecificBinaryBlob->Length() >= 2) { jni::ByteBuffer::LocalRef buffer(env); - buffer = jni::ByteBuffer::New(audioSpecConfig, configLength); + buffer = jni::ByteBuffer::New(audioCodecSpecificBinaryBlob->Elements(), + audioCodecSpecificBinaryBlob->Length()); NS_ENSURE_SUCCESS_VOID(aFormat->SetByteBuffer(u"csd-0"_ns, buffer)); } } @@ -657,7 +636,7 @@ class RemoteAudioDecoder : public RemoteDataDecoder { java::SampleBuffer::Param aBuffer) override { MOZ_ASSERT(aBuffer, "Audio sample should have buffer"); // aSample will be implicitly converted into a GlobalRef. - mDecoder->ProcessOutput(aSample, aBuffer); + mDecoder->ProcessOutput(std::move(aSample), std::move(aBuffer)); } void HandleOutputFormatChanged( @@ -755,63 +734,25 @@ class RemoteAudioDecoder : public RemoteDataDecoder { if (size > 0) { #ifdef MOZ_SAMPLE_TYPE_S16 - uint32_t numSamples = size / sizeof(int16_t); - uint32_t numFrames = numSamples / mOutputChannels; + const int32_t numSamples = size / 2; #else # error We only support 16-bit integer PCM #endif - uint32_t bufferOffset = AssertedCast(offset); - if (mRemainingEncoderDelay) { - uint32_t toPop = std::min(numFrames, mRemainingEncoderDelay); - bufferOffset += toPop * mOutputChannels * sizeof(int16_t); - numFrames -= toPop; - numSamples -= toPop * mOutputChannels; - mRemainingEncoderDelay -= toPop; - LOG("Dropping %" PRId32 - " audio frames, corresponding the the encoder" - " delay. Remaining " - "%" PRIu32 ".", - toPop, mRemainingEncoderDelay); - } - - mDecodedFrames += numFrames; - - if (mTotalMediaFrames && mDecodedFrames > mTotalMediaFrames) { - uint32_t paddingFrames = std::min(mDecodedFrames - mTotalMediaFrames, - AssertedCast(numFrames)); - // This needs to trim the buffer, removing elements at the end: simply - // updating the frame count is enough. - numFrames -= AssertedCast(paddingFrames); - numSamples -= paddingFrames * mOutputChannels; - // Reset the decoded frame count, so that the encoder delay and padding - // are trimmed correctly when looping. - mDecodedFrames = 0; - mRemainingEncoderDelay = mEncoderDelay; - - LOG("Dropped: %u frames, corresponding to the padding", paddingFrames); - } - - if (numSamples == 0) { - LOG("Trimmed a whole packet, returning."); - return; - } AlignedAudioBuffer audio(numSamples); if (!audio) { Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__)); return; } - jni::ByteBuffer::LocalRef dest = - jni::ByteBuffer::New(audio.get(), numSamples * sizeof(int16_t)); - aBuffer->WriteToByteBuffer( - dest, AssertedCast(bufferOffset), - AssertedCast(numSamples * sizeof(int16_t))); - RefPtr processed_data = + jni::ByteBuffer::LocalRef dest = jni::ByteBuffer::New(audio.get(), size); + aBuffer->WriteToByteBuffer(dest, offset, size); + + RefPtr data = new AudioData(0, TimeUnit::FromMicroseconds(presentationTimeUs), std::move(audio), mOutputChannels, mOutputSampleRate); - UpdateOutputStatus(processed_data); + UpdateOutputStatus(std::move(data)); } if (isEOS) { @@ -836,13 +777,9 @@ class RemoteAudioDecoder : public RemoteDataDecoder { mOutputSampleRate = aSampleRate; } - int32_t mOutputChannels{}; - int32_t mOutputSampleRate{}; + int32_t mOutputChannels; + int32_t mOutputSampleRate; Maybe mFirstDemuxedSampleTime; - uint64_t mDecodedFrames = 0; - uint64_t mTotalMediaFrames = 0; - uint32_t mEncoderDelay = 0; - uint32_t mRemainingEncoderDelay = 0; }; already_AddRefed RemoteDataDecoder::CreateAudioDecoder( @@ -955,7 +892,7 @@ using CryptoInfoResult = Result; static CryptoInfoResult GetCryptoInfoFromSample(const MediaRawData* aSample) { - const auto& cryptoObj = aSample->mCrypto; + auto& cryptoObj = aSample->mCrypto; java::sdk::MediaCodec::CryptoInfo::LocalRef cryptoInfo; if (!cryptoObj.IsEncrypted()) { @@ -974,10 +911,10 @@ static CryptoInfoResult GetCryptoInfoFromSample(const MediaRawData* aSample) { cryptoObj.mPlainSizes.Length(), cryptoObj.mEncryptedSizes.Length()); uint32_t totalSubSamplesSize = 0; - for (const auto& size : cryptoObj.mPlainSizes) { + for (auto& size : cryptoObj.mPlainSizes) { totalSubSamplesSize += size; } - for (const auto& size : cryptoObj.mEncryptedSizes) { + for (auto& size : cryptoObj.mEncryptedSizes) { totalSubSamplesSize += size; } @@ -1021,9 +958,7 @@ static CryptoInfoResult GetCryptoInfoFromSample(const MediaRawData* aSample) { tempIV.AppendElement(0); } - MOZ_ASSERT(numSubSamples <= INT32_MAX); - cryptoInfo->Set(static_cast(numSubSamples), - mozilla::jni::IntArray::From(plainSizes), + cryptoInfo->Set(numSubSamples, mozilla::jni::IntArray::From(plainSizes), mozilla::jni::IntArray::From(cryptoObj.mEncryptedSizes), mozilla::jni::ByteArray::From(cryptoObj.mKeyId), mozilla::jni::ByteArray::From(tempIV), mode); @@ -1044,9 +979,7 @@ RefPtr RemoteDataDecoder::Decode( const_cast(aSample->Data()), aSample->Size()); SetState(State::DRAINABLE); - MOZ_ASSERT(aSample->Size() <= INT32_MAX); - mInputBufferInfo->Set(0, static_cast(aSample->Size()), - aSample->mTime.ToMicroseconds(), 0); + mInputBufferInfo->Set(0, aSample->Size(), aSample->mTime.ToMicroseconds(), 0); CryptoInfoResult crypto = GetCryptoInfoFromSample(aSample); if (crypto.isErr()) { return DecodePromise::CreateAndReject( diff --git a/dom/media/platforms/android/RemoteDataDecoder.h b/dom/media/platforms/android/RemoteDataDecoder.h index d55aa728b244..fd7dea8b1f0e 100644 --- a/dom/media/platforms/android/RemoteDataDecoder.h +++ b/dom/media/platforms/android/RemoteDataDecoder.h @@ -35,7 +35,7 @@ class RemoteDataDecoder : public MediaDataDecoder, } protected: - virtual ~RemoteDataDecoder() = default; + virtual ~RemoteDataDecoder() {} RemoteDataDecoder(MediaData::Type aType, const nsACString& aMimeType, java::sdk::MediaFormat::Param aFormat, const nsString& aDrmStubId); diff --git a/dom/media/platforms/apple/AppleATDecoder.cpp b/dom/media/platforms/apple/AppleATDecoder.cpp index 232b4a51d295..3ec8b0d17fed 100644 --- a/dom/media/platforms/apple/AppleATDecoder.cpp +++ b/dom/media/platforms/apple/AppleATDecoder.cpp @@ -30,7 +30,6 @@ AppleATDecoder::AppleATDecoder(const AudioInfo& aConfig) mOutputFormat(), mStream(nullptr), mParsedFramesForAACMagicCookie(0), - mRemainingEncoderDelay(0), mErrored(false) { MOZ_COUNT_CTOR(AppleATDecoder); LOG("Creating Apple AudioToolbox decoder"); @@ -42,16 +41,6 @@ AppleATDecoder::AppleATDecoder(const AudioInfo& aConfig) mFormatID = kAudioFormatMPEGLayer3; } else if (mConfig.mMimeType.EqualsLiteral("audio/mp4a-latm")) { mFormatID = kAudioFormatMPEG4AAC; - if (aConfig.mCodecSpecificConfig.is()) { - const AacCodecSpecificData& aacCodecSpecificData = - aConfig.mCodecSpecificConfig.as(); - mRemainingEncoderDelay = mEncoderDelay = - aacCodecSpecificData.mEncoderDelayFrames; - mTotalMediaFrames = aacCodecSpecificData.mMediaFrameCount; - LOG("AppleATDecoder (aac), found encoder delay (%" PRIu32 - ") and total frame count (%" PRIu64 ") in codec-specific side data", - mEncoderDelay, mTotalMediaFrames); - } } else { mFormatID = 0; } @@ -172,9 +161,9 @@ static OSStatus _PassthroughInputDataCallback( RefPtr AppleATDecoder::Decode( MediaRawData* aSample) { MOZ_ASSERT(mThread->IsOnCurrentThread()); - LOG("mp4 input sample %p duration=%lldus pts=%lld %s %llu bytes audio", - aSample, aSample->mDuration.ToMicroseconds(), - aSample->mTime.ToMicroseconds(), aSample->mKeyframe ? " keyframe" : "", + LOG("mp4 input sample %p %lld us %lld pts%s %llu bytes audio", aSample, + aSample->mDuration.ToMicroseconds(), aSample->mTime.ToMicroseconds(), + aSample->mKeyframe ? " keyframe" : "", (unsigned long long)aSample->Size()); MediaResult rv = NS_OK; @@ -210,9 +199,8 @@ MediaResult AppleATDecoder::DecodeSample(MediaRawData* aSample) { nsTArray outputData; UInt32 channels = mOutputFormat.mChannelsPerFrame; // Pick a multiple of the frame size close to a power of two - // for efficient allocation. We're mainly using this decoder to decode AAC, - // that has packets of 1024 audio frames. - const uint32_t MAX_AUDIO_FRAMES = 1024; + // for efficient allocation. + const uint32_t MAX_AUDIO_FRAMES = 128; const uint32_t maxDecodedSamples = MAX_AUDIO_FRAMES * channels; // Descriptions for _decompressed_ audio packets. ignored. @@ -254,40 +242,7 @@ MediaResult AppleATDecoder::DecodeSample(MediaRawData* aSample) { } if (numFrames) { - AudioDataValue* outputFrames = decoded.get(); - - if (mFormatID == kAudioFormatMPEG4AAC) { - // Remove decoder delay and padding when decoding AAC - if (mRemainingEncoderDelay) { - uint64_t toPop = std::min(mRemainingEncoderDelay, numFrames); - mRemainingEncoderDelay -= toPop; - numFrames -= toPop; - LOG("Removing %" PRIu64 - " frames of audio, corresponding to the decoder delay, remaining " - "%" PRIu32 " (remaining in buffer: %u).", - toPop, mRemainingEncoderDelay, numFrames); - outputFrames += toPop * channels; - } - - mDecodedFrames += numFrames; - - if (mTotalMediaFrames && mDecodedFrames > mTotalMediaFrames) { - uint64_t toPop = std::min(mDecodedFrames - mTotalMediaFrames, - static_cast(numFrames)); - MOZ_ASSERT(mRemainingEncoderDelay == 0); - numFrames -= toPop; - LOG("Removing %" PRIu64 - " frames of audio, corresponding to the decoder padding", - toPop); - // When this decoder is past the "real" end time of the media, reset - // the number of decoded frames. If more frames come, it's because - // this decoder is being used for looping, in which case encoder delay - // and padding need to be trimmed again. - mDecodedFrames = 0; - mRemainingEncoderDelay = mEncoderDelay; - } - } - outputData.AppendElements(outputFrames, numFrames * channels); + outputData.AppendElements(decoded.get(), numFrames * channels); } if (rv == kNoMoreDataErr) { @@ -331,10 +286,6 @@ MediaResult AppleATDecoder::DecodeSample(MediaRawData* aSample) { data = mAudioConverter->Process(std::move(data)); } - // Offset the pts by the encoder delay - aSample->mTime -= - media::TimeUnit::FromSeconds(static_cast(mEncoderDelay) / rate); - RefPtr audio = new AudioData( aSample->mOffset, aSample->mTime, data.Forget(), channels, rate, mChannelLayout && mChannelLayout->IsValid() diff --git a/dom/media/platforms/apple/AppleATDecoder.h b/dom/media/platforms/apple/AppleATDecoder.h index 02c36c6c2c79..1720a14730fc 100644 --- a/dom/media/platforms/apple/AppleATDecoder.h +++ b/dom/media/platforms/apple/AppleATDecoder.h @@ -65,10 +65,6 @@ class AppleATDecoder : public MediaDataDecoder, nsresult GetImplicitAACMagicCookie(const MediaRawData* aSample); nsresult SetupChannelLayout(); uint32_t mParsedFramesForAACMagicCookie; - uint32_t mDecodedFrames = 0; - uint32_t mEncoderDelay; - uint32_t mRemainingEncoderDelay; - uint64_t mTotalMediaFrames = 0; bool mErrored; }; diff --git a/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp index 67a723a48bf7..146efc32b10e 100644 --- a/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp +++ b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp @@ -26,13 +26,6 @@ FFmpegAudioDecoder::FFmpegAudioDecoder(FFmpegLibWrapper* aLib, // Ffmpeg expects the DecoderConfigDescriptor blob. mExtraData->AppendElements( *aacCodecSpecificData.mDecoderConfigDescriptorBinaryBlob); - mRemainingEncoderDelay = mEncoderDelay = - aacCodecSpecificData.mEncoderDelayFrames; - mEncoderPaddingOrTotalFrames = aacCodecSpecificData.mMediaFrameCount; - FFMPEG_LOG("FFmpegAudioDecoder (aac), found encoder delay (%" PRIu32 - ") and total frame count (%" PRIu64 - ") in codec-specific side data", - mEncoderDelay, TotalFrames()); return; } @@ -45,13 +38,11 @@ FFmpegAudioDecoder::FFmpegAudioDecoder(FFmpegLibWrapper* aLib, if (aConfig.mCodecSpecificConfig.is()) { const Mp3CodecSpecificData& mp3CodecSpecificData = aConfig.mCodecSpecificConfig.as(); - mEncoderDelay = mRemainingEncoderDelay = - mp3CodecSpecificData.mEncoderDelayFrames; - mEncoderPaddingOrTotalFrames = mp3CodecSpecificData.mEncoderPaddingFrames; - FFMPEG_LOG("FFmpegAudioDecoder (mp3), found encoder delay (%" PRIu32 - ")" - "and padding values (%" PRIu64 ") in codec-specific side-data", - mEncoderDelay, Padding()); + mEncoderDelay = mp3CodecSpecificData.mEncoderDelayFrames; + mEncoderPadding = mp3CodecSpecificData.mEncoderPaddingFrames; + FFMPEG_LOG("FFmpegAudioDecoder, found encoder delay (%" PRIu32 + ") and padding values (%" PRIu32 ") in extra data", + mEncoderDelay, mEncoderPadding); return; } } @@ -238,16 +229,7 @@ static AlignedAudioBuffer CopyAndPackAudio(AVFrame* aFrame, return audio; } -using ChannelLayout = AudioConfig::ChannelLayout; - -uint64_t FFmpegAudioDecoder::Padding() const { - MOZ_ASSERT(mCodecID == AV_CODEC_ID_MP3); - return mEncoderPaddingOrTotalFrames; -} -uint64_t FFmpegAudioDecoder::TotalFrames() const { - MOZ_ASSERT(mCodecID == AV_CODEC_ID_AAC); - return mEncoderPaddingOrTotalFrames; -} +typedef AudioConfig::ChannelLayout ChannelLayout; MediaResult FFmpegAudioDecoder::DoDecode(MediaRawData* aSample, uint8_t* aData, int aSize, @@ -266,7 +248,6 @@ MediaResult FFmpegAudioDecoder::DoDecode(MediaRawData* aSample, } if (!PrepareFrame()) { - FFMPEG_LOG("FFmpegAudioDecoder: OOM in PrepareFrame"); return MediaResult( NS_ERROR_OUT_OF_MEMORY, RESULT_DETAIL("FFmpeg audio decoder failed to allocate frame")); @@ -338,69 +319,36 @@ MediaResult FFmpegAudioDecoder::DoDecode(MediaRawData* aSample, AlignedAudioBuffer audio = CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples); if (!audio) { - FFMPEG_LOG("FFmpegAudioDecoder: OOM"); return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__); } DebugOnly trimmed = false; if (mEncoderDelay) { trimmed = true; - int toPop = std::min(mFrame->nb_samples, - static_cast(mRemainingEncoderDelay)); + uint32_t toPop = std::min((uint32_t)mFrame->nb_samples, mEncoderDelay); audio.PopFront(toPop * numChannels); mFrame->nb_samples -= toPop; - mRemainingEncoderDelay -= toPop; - FFMPEG_LOG("FFmpegAudioDecoder, dropped %" PRIu32 - " audio frames, corresponding to the encoder delay " - "(remaining: %" PRIu32 ")", - toPop, mRemainingEncoderDelay); + mEncoderDelay -= toPop; } - mDecodedFrames += mFrame->nb_samples; - - if (mCodecID == AV_CODEC_ID_MP3 && aSample->mEOS && Padding()) { + if (aSample->mEOS && mEncoderPadding) { trimmed = true; uint32_t toTrim = - std::min(static_cast(mFrame->nb_samples), Padding()); + std::min((uint32_t)mFrame->nb_samples, mEncoderPadding); + mEncoderPadding -= toTrim; audio.PopBack(toTrim * numChannels); - MOZ_ASSERT(audio.Length() / numChannels <= INT32_MAX); - mFrame->nb_samples = static_cast(audio.Length() / numChannels); - FFMPEG_LOG("FFmpegAudioDecoder (mp3), dropped %" PRIu32 - " audio frames, corresponding to the padding.", - toTrim); - } - - if (mCodecID == AV_CODEC_ID_AAC && TotalFrames() && - mDecodedFrames > TotalFrames()) { - trimmed = true; - uint32_t paddingFrames = - std::min(mDecodedFrames - TotalFrames(), - static_cast(mFrame->nb_samples)); - audio.PopBack(paddingFrames * numChannels); - MOZ_ASSERT(audio.Length() / numChannels <= INT32_MAX); - mFrame->nb_samples = static_cast(audio.Length() / numChannels); - FFMPEG_LOG("FFmpegAudioDecoder (aac), dropped %" PRIu32 - " audio frames, corresponding to the padding.", - paddingFrames); - // When this decoder is past the "real" end time of the media, reset the - // number of decoded frames. If more frames come, it's because this - // decoder is being used for looping, in which case encoder delay and - // padding need to be trimmed again. - mDecodedFrames = 0; - mRemainingEncoderDelay = mEncoderDelay; + mFrame->nb_samples = audio.Length() / numChannels; } media::TimeUnit duration = FramesToTimeUnit(mFrame->nb_samples, samplingRate); if (!duration.IsValid()) { - FFMPEG_LOG("FFmpegAudioDecoder: invalid duration"); return MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR, RESULT_DETAIL("Invalid sample duration")); } media::TimeUnit newpts = pts + duration; if (!newpts.IsValid()) { - FFMPEG_LOG("FFmpegAudioDecoder: invalid PTS."); return MediaResult( NS_ERROR_DOM_MEDIA_OVERFLOW_ERR, RESULT_DETAIL("Invalid count of accumulated audio samples")); diff --git a/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.h b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.h index c31297035627..6a7de10e6501 100644 --- a/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.h +++ b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.h @@ -42,26 +42,8 @@ class FFmpegAudioDecoder private: MediaResult DoDecode(MediaRawData* aSample, uint8_t* aData, int aSize, bool* aGotFrame, DecodedData& aResults) override; - // This method is to be called only when decoding mp3, in order to correctly - // discard padding frames. - uint64_t Padding() const; - // This method is to be called only when decoding AAC, in order to correctly - // discard padding frames, based on the number of frames decoded and the total - // frame count of the media. - uint64_t TotalFrames() const; - // This is the total number of media frames that have been decoded, and does - // not include the frames discarded because part of the encoder delay. - uint64_t mDecodedFrames = 0; - // The number of frames of encoder delay, that need to be discarded at the - // beginning of the stream. uint32_t mEncoderDelay = 0; - // The remaining encoder delay for this loop iteration. - uint32_t mRemainingEncoderDelay = 0; - // This holds either the encoder padding (when this decoder decodes mp3), or - // the total frame count of the media (when this decoder decodes AAC). - // It is best accessed via the `Padding` and `TotalFrames` methods, for - // clarity. - uint64_t mEncoderPaddingOrTotalFrames = 0; + uint32_t mEncoderPadding = 0; }; } // namespace mozilla diff --git a/dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp b/dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp index 8846a7eb7be2..39185318cb0c 100644 --- a/dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp +++ b/dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp @@ -76,7 +76,7 @@ MediaResult FFmpegDataDecoder::InitDecoder() { AVCodec* codec = FindAVCodec(mLib, mCodecID); if (!codec) { - FFMPEG_LOG(" couldn't find ffmpeg decoder for codec id %d", mCodecID); + FFMPEG_LOG(" unable to find codec"); return MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, RESULT_DETAIL("unable to find codec")); } @@ -85,7 +85,7 @@ MediaResult FFmpegDataDecoder::InitDecoder() { StaticMutexAutoLock mon(sMutex); if (!(mCodecContext = mLib->avcodec_alloc_context3(codec))) { - FFMPEG_LOG(" couldn't allocate ffmpeg context for codec %s", codec->name); + FFMPEG_LOG(" couldn't init ffmpeg context"); return MediaResult(NS_ERROR_OUT_OF_MEMORY, RESULT_DETAIL("Couldn't init ffmpeg context")); } @@ -102,8 +102,7 @@ MediaResult FFmpegDataDecoder::InitDecoder() { InitCodecContext(); MediaResult ret = AllocateExtraData(); if (NS_FAILED(ret)) { - FFMPEG_LOG(" couldn't allocate ffmpeg extra data for codec %s", - codec->name); + FFMPEG_LOG(" failed to allocate extra data"); mLib->av_freep(&mCodecContext); return ret; } diff --git a/dom/media/platforms/wmf/WMFAudioMFTManager.cpp b/dom/media/platforms/wmf/WMFAudioMFTManager.cpp index 6bc6315fad49..1ace9808ae0e 100644 --- a/dom/media/platforms/wmf/WMFAudioMFTManager.cpp +++ b/dom/media/platforms/wmf/WMFAudioMFTManager.cpp @@ -13,8 +13,6 @@ #include "mozilla/Logging.h" #include "mozilla/Telemetry.h" #include "nsTArray.h" -#include "BufferReader.h" -#include "mozilla/ScopeExit.h" #define LOG(...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, (__VA_ARGS__)) @@ -39,13 +37,6 @@ WMFAudioMFTManager::WMFAudioMFTManager(const AudioInfo& aConfig) aacCodecSpecificData.mDecoderConfigDescriptorBinaryBlob->Elements(); configLength = aacCodecSpecificData.mDecoderConfigDescriptorBinaryBlob->Length(); - - mRemainingEncoderDelay = mEncoderDelay = - aacCodecSpecificData.mEncoderDelayFrames; - mTotalMediaFrames = aacCodecSpecificData.mMediaFrameCount; - LOG("AudioMFT decoder: Found AAC decoder delay (%" PRIu32 - "frames) and total media frames (%" PRIu64 " frames)\n", - mEncoderDelay, mTotalMediaFrames); } else { // Gracefully handle failure to cover all codec specific cases above. Once // we're confident there is no fall through from these cases above, we @@ -224,7 +215,6 @@ WMFAudioMFTManager::Output(int64_t aStreamOffset, RefPtr& aOutData) { // don't need to free it. DWORD maxLength = 0, currentLength = 0; hr = buffer->Lock(&data, &maxLength, ¤tLength); - ScopeExit exit([buffer] { buffer->Unlock(); }); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); // Output is made of floats. @@ -238,43 +228,6 @@ WMFAudioMFTManager::Output(int64_t aStreamOffset, RefPtr& aOutData) { return S_OK; } - bool trimmed = false; - float* floatData = reinterpret_cast(data); - if (mRemainingEncoderDelay) { - trimmed = true; - int32_t toPop = std::min(numFrames, (int32_t)mRemainingEncoderDelay); - floatData += toPop * mAudioChannels; - numFrames -= toPop; - numSamples -= toPop * mAudioChannels; - mRemainingEncoderDelay -= toPop; - LOG("AudioMFTManager: Dropped %" PRId32 - " audio frames, corresponding the the encoder delay. Remaining %" PRIu32 - ".\n", - toPop, mRemainingEncoderDelay); - } - - mDecodedFrames += numFrames; - - if (mTotalMediaFrames && mDecodedFrames > mTotalMediaFrames) { - trimmed = true; - uint64_t paddingFrames = - std::min(mDecodedFrames - mTotalMediaFrames, (uint64_t)numFrames); - // This needs to trim the buffer, removing elements at the end: simply - // updating the frame count is enough. - numFrames -= paddingFrames; - numSamples -= paddingFrames * mAudioChannels; - // Reset the decoded frame count, so that the encoder delay and padding are - // trimmed correctly when looping. - mDecodedFrames = 0; - mRemainingEncoderDelay = mEncoderDelay; - - LOG("Dropped: %llu frames, corresponding to the padding", paddingFrames); - } - - if (!numSamples) { - return S_OK; - } - if (oldAudioRate != mAudioRate) { LOG("Audio rate changed from %" PRIu32 " to %" PRIu32, oldAudioRate, mAudioRate); @@ -282,19 +235,18 @@ WMFAudioMFTManager::Output(int64_t aStreamOffset, RefPtr& aOutData) { AlignedAudioBuffer audioData(numSamples); if (!audioData) { - if (numSamples == 0) { - return S_OK; - } return E_OUTOFMEMORY; } - PodCopy(audioData.Data(), floatData, numSamples); + PodCopy(audioData.Data(), reinterpret_cast(data), numSamples); + + buffer->Unlock(); TimeUnit duration = FramesToTimeUnit(numFrames, mAudioRate); NS_ENSURE_TRUE(duration.IsValid(), E_FAIL); const bool isAudioRateChangedToHigher = oldAudioRate < mAudioRate; - if (!trimmed && IsPartialOutput(duration, isAudioRateChangedToHigher)) { + if (IsPartialOutput(duration, isAudioRateChangedToHigher)) { LOG("Encounter a partial frame?! duration shrinks from %" PRId64 " to %" PRId64, mLastOutputDuration.ToMicroseconds(), duration.ToMicroseconds()); diff --git a/dom/media/platforms/wmf/WMFAudioMFTManager.h b/dom/media/platforms/wmf/WMFAudioMFTManager.h index a48c095d6d33..28531d72ca9c 100644 --- a/dom/media/platforms/wmf/WMFAudioMFTManager.h +++ b/dom/media/platforms/wmf/WMFAudioMFTManager.h @@ -56,11 +56,6 @@ class WMFAudioMFTManager : public MFTManager { media::TimeUnit mLastOutputDuration = media::TimeUnit::Zero(); bool mFirstFrame = true; - - uint64_t mDecodedFrames = 0; - uint64_t mTotalMediaFrames = 0; - uint32_t mEncoderDelay = 0; - uint32_t mRemainingEncoderDelay = 0; }; } // namespace mozilla diff --git a/dom/media/platforms/wmf/WMFMediaDataDecoder.cpp b/dom/media/platforms/wmf/WMFMediaDataDecoder.cpp index e03820711229..73589d02c24d 100644 --- a/dom/media/platforms/wmf/WMFMediaDataDecoder.cpp +++ b/dom/media/platforms/wmf/WMFMediaDataDecoder.cpp @@ -150,8 +150,7 @@ WMFMediaDataDecoder::ProcessOutput(DecodedData& aResults) { RefPtr output; HRESULT hr = S_OK; while (SUCCEEDED(hr = mMFTManager->Output(mLastStreamOffset, output))) { - MOZ_ASSERT_IF(output && output->mType == MediaData::Type::VIDEO_DATA, - output.get()); + MOZ_ASSERT(output.get(), "Upon success, we must receive an output"); if (ShouldGuardAgaintIncorrectFirstSample(output)) { LOG("Discarding sample with time %" PRId64 " because of ShouldGuardAgaintIncorrectFirstSample check", @@ -162,12 +161,7 @@ WMFMediaDataDecoder::ProcessOutput(DecodedData& aResults) { // Got first valid sample, don't need to guard following sample anymore. mInputTimesSet.clear(); } - // When handling encoder delay or padding, it is possible to strip entier - // audio packet, and to not have something to return here despite the - // decoding having succeeded. - if (output) { - aResults.AppendElement(std::move(output)); - } + aResults.AppendElement(std::move(output)); if (mDrainStatus == DrainStatus::DRAINING) { break; } diff --git a/dom/media/platforms/wrappers/AudioTrimmer.cpp b/dom/media/platforms/wrappers/AudioTrimmer.cpp index 1210a56f3a00..fa37132314aa 100644 --- a/dom/media/platforms/wrappers/AudioTrimmer.cpp +++ b/dom/media/platforms/wrappers/AudioTrimmer.cpp @@ -100,8 +100,8 @@ RefPtr AudioTrimmer::HandleDecodedResult( // No samples returned, we assume this is due to the latency of the // decoder and that the related decoded sample will be returned during // the next call to Decode(). - LOGV("No sample returned for sample[%" PRId64 ",%" PRId64 "]", rawStart, - rawEnd); + LOG("No sample returned for sample[%" PRId64 ",%" PRId64 "]", rawStart, + rawEnd); } for (uint32_t i = 0; i < results.Length();) { const RefPtr& data = results[i]; @@ -110,10 +110,10 @@ RefPtr AudioTrimmer::HandleDecodedResult( if (mTrimmers.IsEmpty()) { // mTrimmers being empty can only occurs if the decoder returned more // frames than we pushed in. We can't handle this case, abort trimming. - LOGV("sample[%" PRId64 ",%" PRId64 "] (decoded[%" PRId64 ",%" PRId64 - "] no trimming information", - rawStart, rawEnd, sampleInterval.mStart.ToMicroseconds(), - sampleInterval.mEnd.ToMicroseconds()); + LOG("sample[%" PRId64 ",%" PRId64 "] (decoded[%" PRId64 ",%" PRId64 + "] no trimming information", + rawStart, rawEnd, sampleInterval.mStart.ToMicroseconds(), + sampleInterval.mEnd.ToMicroseconds()); i++; continue; } @@ -130,19 +130,19 @@ RefPtr AudioTrimmer::HandleDecodedResult( continue; } if (!trimmer->Intersects(sampleInterval)) { - LOGV("sample[%" PRId64 ",%" PRId64 "] (decoded[%" PRId64 ",%" PRId64 - "] would be empty after trimming, dropping it", - rawStart, rawEnd, sampleInterval.mStart.ToMicroseconds(), - sampleInterval.mEnd.ToMicroseconds()); + LOG("sample[%" PRId64 ",%" PRId64 "] (decoded[%" PRId64 ",%" PRId64 + "] would be empty after trimming, dropping it", + rawStart, rawEnd, sampleInterval.mStart.ToMicroseconds(), + sampleInterval.mEnd.ToMicroseconds()); results.RemoveElementAt(i); continue; } - LOGV("Trimming sample[%" PRId64 ",%" PRId64 "] to [%" PRId64 ",%" PRId64 - "] (raw " - "was:[%" PRId64 ",%" PRId64 "])", - sampleInterval.mStart.ToMicroseconds(), - sampleInterval.mEnd.ToMicroseconds(), trimmer->mStart.ToMicroseconds(), - trimmer->mEnd.ToMicroseconds(), rawStart, rawEnd); + LOG("Trimming sample[%" PRId64 ",%" PRId64 "] to [%" PRId64 ",%" PRId64 + "] (raw " + "was:[%" PRId64 ",%" PRId64 "])", + sampleInterval.mStart.ToMicroseconds(), + sampleInterval.mEnd.ToMicroseconds(), trimmer->mStart.ToMicroseconds(), + trimmer->mEnd.ToMicroseconds(), rawStart, rawEnd); TimeInterval trim({std::max(trimmer->mStart, sampleInterval.mStart), std::min(trimmer->mEnd, sampleInterval.mEnd)}); @@ -151,9 +151,9 @@ RefPtr AudioTrimmer::HandleDecodedResult( NS_ASSERTION(ok, "Trimming of audio sample failed"); Unused << ok; if (sample->Frames() == 0) { - LOGV("sample[%" PRId64 ",%" PRId64 - "] is empty after trimming, dropping it", - rawStart, rawEnd); + LOG("sample[%" PRId64 ",%" PRId64 + "] is empty after trimming, dropping it", + rawStart, rawEnd); results.RemoveElementAt(i); continue; } @@ -166,7 +166,7 @@ RefPtr AudioTrimmer::DecodeBatch( nsTArray>&& aSamples) { MOZ_ASSERT(mThread->IsOnCurrentThread(), "We're not on the thread we were first initialized on"); - LOGV("DecodeBatch"); + LOG("DecodeBatch"); for (auto&& sample : aSamples) { PrepareTrimmers(sample); @@ -193,11 +193,11 @@ void AudioTrimmer::PrepareTrimmers(MediaRawData* aRaw) { // the frame set by the demuxer and mTime and mDuration set to what it // should be after trimming. if (aRaw->mOriginalPresentationWindow) { - LOGV("sample[%" PRId64 ",%" PRId64 "] has trimming info ([%" PRId64 - ",%" PRId64 "]", - aRaw->mOriginalPresentationWindow->mStart.ToMicroseconds(), - aRaw->mOriginalPresentationWindow->mEnd.ToMicroseconds(), - aRaw->mTime.ToMicroseconds(), aRaw->GetEndTime().ToMicroseconds()); + LOG("sample[%" PRId64 ",%" PRId64 "] has trimming info ([%" PRId64 + ",%" PRId64 "]", + aRaw->mOriginalPresentationWindow->mStart.ToMicroseconds(), + aRaw->mOriginalPresentationWindow->mEnd.ToMicroseconds(), + aRaw->mTime.ToMicroseconds(), aRaw->GetEndTime().ToMicroseconds()); mTrimmers.AppendElement( Some(TimeInterval(aRaw->mTime, aRaw->GetEndTime()))); aRaw->mTime = aRaw->mOriginalPresentationWindow->mStart; diff --git a/dom/media/webaudio/test/half-a-second-1ch-44100.m4a b/dom/media/webaudio/test/half-a-second-1ch-44100.m4a deleted file mode 100644 index 8255ce14f9db..000000000000 Binary files a/dom/media/webaudio/test/half-a-second-1ch-44100.m4a and /dev/null differ diff --git a/dom/media/webaudio/test/half-a-second-1ch-48000.m4a b/dom/media/webaudio/test/half-a-second-1ch-48000.m4a deleted file mode 100644 index 805e76610bd2..000000000000 Binary files a/dom/media/webaudio/test/half-a-second-1ch-48000.m4a and /dev/null differ diff --git a/dom/media/webaudio/test/half-a-second-2ch-44100.m4a b/dom/media/webaudio/test/half-a-second-2ch-44100.m4a deleted file mode 100644 index 98622be537df..000000000000 Binary files a/dom/media/webaudio/test/half-a-second-2ch-44100.m4a and /dev/null differ diff --git a/dom/media/webaudio/test/half-a-second-2ch-48000.m4a b/dom/media/webaudio/test/half-a-second-2ch-48000.m4a deleted file mode 100644 index c01de2627048..000000000000 Binary files a/dom/media/webaudio/test/half-a-second-2ch-48000.m4a and /dev/null differ diff --git a/dom/media/webaudio/test/mochitest.ini b/dom/media/webaudio/test/mochitest.ini index 3c9721ca6295..fc013b470137 100644 --- a/dom/media/webaudio/test/mochitest.ini +++ b/dom/media/webaudio/test/mochitest.ini @@ -34,10 +34,6 @@ support-files = sine-440-10s.opus half-a-second-8000.mp3 half-a-second-48000.mp3 - half-a-second-2ch-44100.m4a - half-a-second-2ch-48000.m4a - half-a-second-1ch-44100.m4a - half-a-second-1ch-48000.m4a webaudio.js ../../webrtc/tests/mochitests/mediaStreamPlayback.js ../../webrtc/tests/mochitests/head.js diff --git a/dom/media/webaudio/test/test_decoderDelay.html b/dom/media/webaudio/test/test_decoderDelay.html index 35f82742a9e2..c6486e4c4ed3 100644 --- a/dom/media/webaudio/test/test_decoderDelay.html +++ b/dom/media/webaudio/test/test_decoderDelay.html @@ -14,10 +14,6 @@ SimpleTest.waitForExplicitFinish(); var tests = [ "half-a-second-8000.mp3", "half-a-second-48000.mp3", - "half-a-second-1ch-44100.m4a", - "half-a-second-1ch-48000.m4a", - "half-a-second-2ch-44100.m4a", - "half-a-second-2ch-48000.m4a", ]; async function doit(t) { @@ -27,7 +23,7 @@ async function doit(t) { var response = await fetch(testfile); var buffer = await response.arrayBuffer(); var decoded = await context.decodeAudioData(buffer); - is(decoded.duration, 0.5, "The file " + testfile + " is half a second."); + is(decoded.duration, 0.5, "The file is half a second."); if (++count == tests.length) { SimpleTest.finish(); }