2013-11-21 01:04:33 +04:00
|
|
|
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
|
|
|
/* vim:set ts=2 sw=2 sts=2 et cindent: */
|
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
|
|
|
|
|
|
|
#include "MediaDecoderReader.h"
|
|
|
|
#include "PlatformDecoderModule.h"
|
|
|
|
#include "nsRect.h"
|
|
|
|
#include "mozilla/RefPtr.h"
|
|
|
|
#include "mozilla/CheckedInt.h"
|
|
|
|
#include "VideoUtils.h"
|
|
|
|
#include "ImageContainer.h"
|
Bug 959440 - Various cleanups in MP4Reader. r=kinetik
Change PlatformDecoderModule::Create*Decoder() to take an
mp4_demuxer::{Audio,Video}DecoderConfig parameter, instead of enumerating all
parameters. This means the platform decoders can have more data if need be,
like the AACAudioConfig.
Change MediaDataDecoder::Input() to take an nsAutoPtr<MP4Sample>&. The sample
will be deleted by the caller (MP4Reader) if Input() returns DECODE_STATUS_OK,
but if the MediaDataDecoder wants to assume responsibility of the
lifecycle of the sample (say to enqueue it), it can forget() on the
nsAutoPtr& passed in and assume responsibility.
Call PlatformDecoderModule::Create() on the decode thread. This is a step
towards making these classes decode-thread only.
Add PlatformDecoderModule::Init(), which caches the pref's we need, since
PlatformDecoderModule::Create() is no longer called on the main thread, we can
no longer access them in there.
Add Init() method to MediaDataDecoder interface. This is so that we can call
MediaDataDecoder::Shutdown() to unblock the initialization of a decoder, if
that init needs to block.
Pass LayersBackend type to WMFVideoDecoder, so it knows whether to init DXVA.
2014-01-15 07:13:54 +04:00
|
|
|
#include "mp4_demuxer/mp4_demuxer.h"
|
2014-02-05 05:29:28 +04:00
|
|
|
#include "MediaTaskQueue.h"
|
2013-11-21 01:04:33 +04:00
|
|
|
|
|
|
|
namespace mozilla {
|
|
|
|
|
|
|
|
// Decoder that uses a passed in object's Create function to create blank
|
|
|
|
// MediaData objects.
|
|
|
|
template<class BlankMediaDataCreator>
|
|
|
|
class BlankMediaDataDecoder : public MediaDataDecoder {
|
|
|
|
public:
|
|
|
|
|
2014-02-05 05:29:28 +04:00
|
|
|
BlankMediaDataDecoder(BlankMediaDataCreator* aCreator,
|
2015-02-15 06:08:15 +03:00
|
|
|
FlushableMediaTaskQueue* aTaskQueue,
|
2014-02-05 05:29:28 +04:00
|
|
|
MediaDataDecoderCallback* aCallback)
|
|
|
|
: mCreator(aCreator)
|
|
|
|
, mTaskQueue(aTaskQueue)
|
|
|
|
, mCallback(aCallback)
|
2013-11-21 01:04:33 +04:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2015-03-21 19:28:04 +03:00
|
|
|
virtual nsresult Init() override {
|
Bug 959440 - Various cleanups in MP4Reader. r=kinetik
Change PlatformDecoderModule::Create*Decoder() to take an
mp4_demuxer::{Audio,Video}DecoderConfig parameter, instead of enumerating all
parameters. This means the platform decoders can have more data if need be,
like the AACAudioConfig.
Change MediaDataDecoder::Input() to take an nsAutoPtr<MP4Sample>&. The sample
will be deleted by the caller (MP4Reader) if Input() returns DECODE_STATUS_OK,
but if the MediaDataDecoder wants to assume responsibility of the
lifecycle of the sample (say to enqueue it), it can forget() on the
nsAutoPtr& passed in and assume responsibility.
Call PlatformDecoderModule::Create() on the decode thread. This is a step
towards making these classes decode-thread only.
Add PlatformDecoderModule::Init(), which caches the pref's we need, since
PlatformDecoderModule::Create() is no longer called on the main thread, we can
no longer access them in there.
Add Init() method to MediaDataDecoder interface. This is so that we can call
MediaDataDecoder::Shutdown() to unblock the initialization of a decoder, if
that init needs to block.
Pass LayersBackend type to WMFVideoDecoder, so it knows whether to init DXVA.
2014-01-15 07:13:54 +04:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2015-03-21 19:28:04 +03:00
|
|
|
virtual nsresult Shutdown() override {
|
2013-11-21 01:04:33 +04:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2014-02-05 05:29:28 +04:00
|
|
|
class OutputEvent : public nsRunnable {
|
|
|
|
public:
|
|
|
|
OutputEvent(mp4_demuxer::MP4Sample* aSample,
|
|
|
|
MediaDataDecoderCallback* aCallback,
|
|
|
|
BlankMediaDataCreator* aCreator)
|
|
|
|
: mSample(aSample)
|
|
|
|
, mCreator(aCreator)
|
2014-03-21 10:35:14 +04:00
|
|
|
, mCallback(aCallback)
|
2014-02-05 05:29:28 +04:00
|
|
|
{
|
2013-11-21 01:04:33 +04:00
|
|
|
}
|
2015-03-21 19:28:04 +03:00
|
|
|
NS_IMETHOD Run() override
|
2014-02-05 05:29:28 +04:00
|
|
|
{
|
2014-11-20 00:01:10 +03:00
|
|
|
nsRefPtr<MediaData> data = mCreator->Create(mSample->composition_timestamp,
|
|
|
|
mSample->duration,
|
|
|
|
mSample->byte_offset);
|
|
|
|
mCallback->Output(data);
|
2014-02-05 05:29:28 +04:00
|
|
|
return NS_OK;
|
2013-11-21 01:04:33 +04:00
|
|
|
}
|
2014-02-05 05:29:28 +04:00
|
|
|
private:
|
|
|
|
nsAutoPtr<mp4_demuxer::MP4Sample> mSample;
|
|
|
|
BlankMediaDataCreator* mCreator;
|
|
|
|
MediaDataDecoderCallback* mCallback;
|
|
|
|
};
|
2013-11-21 01:04:33 +04:00
|
|
|
|
2015-03-21 19:28:04 +03:00
|
|
|
virtual nsresult Input(mp4_demuxer::MP4Sample* aSample) override
|
2014-02-05 05:29:28 +04:00
|
|
|
{
|
|
|
|
// The MediaDataDecoder must delete the sample when we're finished
|
|
|
|
// with it, so the OutputEvent stores it in an nsAutoPtr and deletes
|
|
|
|
// it once it's run.
|
|
|
|
RefPtr<nsIRunnable> r(new OutputEvent(aSample, mCallback, mCreator));
|
|
|
|
mTaskQueue->Dispatch(r);
|
|
|
|
return NS_OK;
|
2013-11-21 01:04:33 +04:00
|
|
|
}
|
|
|
|
|
2015-03-21 19:28:04 +03:00
|
|
|
virtual nsresult Flush() override {
|
2014-06-02 19:38:04 +04:00
|
|
|
mTaskQueue->Flush();
|
2014-02-05 05:29:28 +04:00
|
|
|
return NS_OK;
|
2013-11-21 01:04:33 +04:00
|
|
|
}
|
|
|
|
|
2015-03-21 19:28:04 +03:00
|
|
|
virtual nsresult Drain() override {
|
2014-07-25 06:57:25 +04:00
|
|
|
mCallback->DrainComplete();
|
2014-02-05 05:29:28 +04:00
|
|
|
return NS_OK;
|
2013-11-21 01:04:33 +04:00
|
|
|
}
|
2014-02-05 05:29:28 +04:00
|
|
|
|
2013-11-21 01:04:33 +04:00
|
|
|
private:
|
|
|
|
nsAutoPtr<BlankMediaDataCreator> mCreator;
|
2015-02-15 06:08:15 +03:00
|
|
|
RefPtr<FlushableMediaTaskQueue> mTaskQueue;
|
2014-02-05 05:29:28 +04:00
|
|
|
MediaDataDecoderCallback* mCallback;
|
2013-11-21 01:04:33 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
class BlankVideoDataCreator {
|
|
|
|
public:
|
2014-02-05 05:29:28 +04:00
|
|
|
BlankVideoDataCreator(uint32_t aFrameWidth,
|
|
|
|
uint32_t aFrameHeight,
|
|
|
|
layers::ImageContainer* aImageContainer)
|
|
|
|
: mFrameWidth(aFrameWidth)
|
|
|
|
, mFrameHeight(aFrameHeight)
|
|
|
|
, mImageContainer(aImageContainer)
|
2013-11-21 01:04:33 +04:00
|
|
|
{
|
2014-02-05 05:29:28 +04:00
|
|
|
mInfo.mDisplay = nsIntSize(mFrameWidth, mFrameHeight);
|
2014-02-09 12:04:38 +04:00
|
|
|
mPicture = gfx::IntRect(0, 0, mFrameWidth, mFrameHeight);
|
2013-11-21 01:04:33 +04:00
|
|
|
}
|
|
|
|
|
2014-11-20 00:01:10 +03:00
|
|
|
already_AddRefed<MediaData>
|
|
|
|
Create(Microseconds aDTS, Microseconds aDuration, int64_t aOffsetInStream)
|
2013-11-21 01:04:33 +04:00
|
|
|
{
|
|
|
|
// Create a fake YUV buffer in a 420 format. That is, an 8bpp Y plane,
|
|
|
|
// with a U and V plane that are half the size of the Y plane, i.e 8 bit,
|
|
|
|
// 2x2 subsampled. Have the data pointers of each frame point to the
|
|
|
|
// first plane, they'll always be zero'd memory anyway.
|
2014-10-14 02:05:00 +04:00
|
|
|
nsAutoArrayPtr<uint8_t> frame(new uint8_t[mFrameWidth * mFrameHeight]);
|
2014-02-05 05:29:28 +04:00
|
|
|
memset(frame, 0, mFrameWidth * mFrameHeight);
|
2013-11-21 01:04:33 +04:00
|
|
|
VideoData::YCbCrBuffer buffer;
|
|
|
|
|
|
|
|
// Y plane.
|
|
|
|
buffer.mPlanes[0].mData = frame;
|
2014-02-05 05:29:28 +04:00
|
|
|
buffer.mPlanes[0].mStride = mFrameWidth;
|
|
|
|
buffer.mPlanes[0].mHeight = mFrameHeight;
|
|
|
|
buffer.mPlanes[0].mWidth = mFrameWidth;
|
2013-11-21 01:04:33 +04:00
|
|
|
buffer.mPlanes[0].mOffset = 0;
|
|
|
|
buffer.mPlanes[0].mSkip = 0;
|
|
|
|
|
|
|
|
// Cb plane.
|
|
|
|
buffer.mPlanes[1].mData = frame;
|
2014-02-05 05:29:28 +04:00
|
|
|
buffer.mPlanes[1].mStride = mFrameWidth / 2;
|
|
|
|
buffer.mPlanes[1].mHeight = mFrameHeight / 2;
|
|
|
|
buffer.mPlanes[1].mWidth = mFrameWidth / 2;
|
2013-11-21 01:04:33 +04:00
|
|
|
buffer.mPlanes[1].mOffset = 0;
|
|
|
|
buffer.mPlanes[1].mSkip = 0;
|
|
|
|
|
|
|
|
// Cr plane.
|
|
|
|
buffer.mPlanes[2].mData = frame;
|
2014-02-05 05:29:28 +04:00
|
|
|
buffer.mPlanes[2].mStride = mFrameWidth / 2;
|
|
|
|
buffer.mPlanes[2].mHeight = mFrameHeight / 2;
|
|
|
|
buffer.mPlanes[2].mWidth = mFrameWidth / 2;
|
2013-11-21 01:04:33 +04:00
|
|
|
buffer.mPlanes[2].mOffset = 0;
|
|
|
|
buffer.mPlanes[2].mSkip = 0;
|
|
|
|
|
|
|
|
return VideoData::Create(mInfo,
|
|
|
|
mImageContainer,
|
|
|
|
nullptr,
|
|
|
|
aOffsetInStream,
|
|
|
|
aDTS,
|
|
|
|
aDuration,
|
|
|
|
buffer,
|
|
|
|
true,
|
|
|
|
aDTS,
|
|
|
|
mPicture);
|
|
|
|
}
|
|
|
|
private:
|
|
|
|
VideoInfo mInfo;
|
2014-02-09 12:04:38 +04:00
|
|
|
gfx::IntRect mPicture;
|
2014-02-05 05:29:28 +04:00
|
|
|
uint32_t mFrameWidth;
|
|
|
|
uint32_t mFrameHeight;
|
2013-11-21 01:04:33 +04:00
|
|
|
RefPtr<layers::ImageContainer> mImageContainer;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
class BlankAudioDataCreator {
|
|
|
|
public:
|
2014-05-22 06:42:39 +04:00
|
|
|
BlankAudioDataCreator(uint32_t aChannelCount, uint32_t aSampleRate)
|
|
|
|
: mFrameSum(0), mChannelCount(aChannelCount), mSampleRate(aSampleRate)
|
2013-11-21 01:04:33 +04:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
MediaData* Create(Microseconds aDTS,
|
|
|
|
Microseconds aDuration,
|
|
|
|
int64_t aOffsetInStream)
|
|
|
|
{
|
|
|
|
// Convert duration to frames. We add 1 to duration to account for
|
|
|
|
// rounding errors, so we get a consistent tone.
|
|
|
|
CheckedInt64 frames = UsecsToFrames(aDuration+1, mSampleRate);
|
|
|
|
if (!frames.isValid() ||
|
|
|
|
!mChannelCount ||
|
|
|
|
!mSampleRate ||
|
|
|
|
frames.value() > (UINT32_MAX / mChannelCount)) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
AudioDataValue* samples = new AudioDataValue[frames.value() * mChannelCount];
|
|
|
|
// Fill the sound buffer with an A4 tone.
|
|
|
|
static const float pi = 3.14159265f;
|
|
|
|
static const float noteHz = 440.0f;
|
|
|
|
for (int i = 0; i < frames.value(); i++) {
|
|
|
|
float f = sin(2 * pi * noteHz * mFrameSum / mSampleRate);
|
|
|
|
for (unsigned c = 0; c < mChannelCount; c++) {
|
|
|
|
samples[i * mChannelCount + c] = AudioDataValue(f);
|
|
|
|
}
|
|
|
|
mFrameSum++;
|
|
|
|
}
|
|
|
|
return new AudioData(aOffsetInStream,
|
|
|
|
aDTS,
|
|
|
|
aDuration,
|
|
|
|
uint32_t(frames.value()),
|
|
|
|
samples,
|
2014-08-11 09:27:00 +04:00
|
|
|
mChannelCount,
|
|
|
|
mSampleRate);
|
2013-11-21 01:04:33 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
int64_t mFrameSum;
|
|
|
|
uint32_t mChannelCount;
|
|
|
|
uint32_t mSampleRate;
|
|
|
|
};
|
|
|
|
|
|
|
|
class BlankDecoderModule : public PlatformDecoderModule {
|
|
|
|
public:
|
|
|
|
|
|
|
|
// Decode thread.
|
2014-07-31 05:40:21 +04:00
|
|
|
virtual already_AddRefed<MediaDataDecoder>
|
2014-11-11 06:30:52 +03:00
|
|
|
CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
|
|
|
|
layers::LayersBackend aLayersBackend,
|
|
|
|
layers::ImageContainer* aImageContainer,
|
2015-02-15 06:08:15 +03:00
|
|
|
FlushableMediaTaskQueue* aVideoTaskQueue,
|
2015-03-21 19:28:04 +03:00
|
|
|
MediaDataDecoderCallback* aCallback) override {
|
2014-07-31 05:40:21 +04:00
|
|
|
BlankVideoDataCreator* creator = new BlankVideoDataCreator(
|
2014-05-22 06:42:39 +04:00
|
|
|
aConfig.display_width, aConfig.display_height, aImageContainer);
|
2014-07-31 05:40:21 +04:00
|
|
|
nsRefPtr<MediaDataDecoder> decoder =
|
|
|
|
new BlankMediaDataDecoder<BlankVideoDataCreator>(creator,
|
|
|
|
aVideoTaskQueue,
|
|
|
|
aCallback);
|
|
|
|
return decoder.forget();
|
2013-11-21 01:04:33 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Decode thread.
|
2014-07-31 05:40:21 +04:00
|
|
|
virtual already_AddRefed<MediaDataDecoder>
|
2014-08-15 10:25:06 +04:00
|
|
|
CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
|
2015-02-15 06:08:15 +03:00
|
|
|
FlushableMediaTaskQueue* aAudioTaskQueue,
|
2015-03-21 19:28:04 +03:00
|
|
|
MediaDataDecoderCallback* aCallback) override {
|
2014-07-31 05:40:21 +04:00
|
|
|
BlankAudioDataCreator* creator = new BlankAudioDataCreator(
|
2014-05-22 06:42:39 +04:00
|
|
|
aConfig.channel_count, aConfig.samples_per_second);
|
2014-07-31 05:40:21 +04:00
|
|
|
|
|
|
|
nsRefPtr<MediaDataDecoder> decoder =
|
|
|
|
new BlankMediaDataDecoder<BlankAudioDataCreator>(creator,
|
|
|
|
aAudioTaskQueue,
|
|
|
|
aCallback);
|
|
|
|
return decoder.forget();
|
2013-11-21 01:04:33 +04:00
|
|
|
}
|
2014-08-15 10:25:06 +04:00
|
|
|
|
|
|
|
virtual bool
|
2015-03-24 06:45:17 +03:00
|
|
|
SupportsAudioMimeType(const nsACString& aMimeType) override
|
2014-08-15 10:25:06 +04:00
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-11-21 01:04:33 +04:00
|
|
|
};
|
|
|
|
|
2014-12-23 06:36:10 +03:00
|
|
|
already_AddRefed<PlatformDecoderModule> CreateBlankDecoderModule()
|
2013-11-21 01:04:33 +04:00
|
|
|
{
|
2014-12-23 06:36:10 +03:00
|
|
|
nsRefPtr<PlatformDecoderModule> pdm = new BlankDecoderModule();
|
|
|
|
return pdm.forget();
|
2013-11-21 01:04:33 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace mozilla
|