Merge mozilla-central to mozilla-inbound

This commit is contained in:
Carsten "Tomcat" Book 2016-12-12 17:14:34 +01:00
Родитель df263f0393 594937fec2
Коммит d3df6f204d
10 изменённых файлов: 13852 добавлений и 13975 удалений

Просмотреть файл

@ -28,6 +28,31 @@ LazyLogModule gVP8TrackEncoderLog("VP8TrackEncoder");
using namespace mozilla::gfx;
using namespace mozilla::layers;
using namespace mozilla::media;
static already_AddRefed<SourceSurface>
GetSourceSurface(already_AddRefed<Image> aImg)
{
RefPtr<Image> img = aImg;
if (!img) {
return nullptr;
}
if (!img->AsGLImage() || NS_IsMainThread()) {
RefPtr<SourceSurface> surf = img->GetAsSourceSurface();
return surf.forget();
}
// GLImage::GetAsSourceSurface() only supports main thread
RefPtr<SourceSurface> surf;
RefPtr<Runnable> runnable = NewRunnableFrom([img, &surf]() -> nsresult {
surf = img->GetAsSourceSurface();
return NS_OK;
});
NS_DispatchToMainThread(runnable, NS_DISPATCH_SYNC);
return surf.forget();
}
VP8TrackEncoder::VP8TrackEncoder(TrackRate aTrackRate)
: VideoTrackEncoder(aTrackRate)
@ -433,37 +458,6 @@ nsresult VP8TrackEncoder::PrepareRawFrame(VideoChunk &aChunk)
return NS_OK;
}
void
VP8TrackEncoder::ReplyGetSourceSurface(already_AddRefed<gfx::SourceSurface> aSurf)
{
mSourceSurface = aSurf;
}
already_AddRefed<gfx::SourceSurface>
VP8TrackEncoder::GetSourceSurface(already_AddRefed<Image> aImg)
{
RefPtr<Image> img = aImg;
mSourceSurface = nullptr;
if (img) {
if (img->AsGLImage() && !NS_IsMainThread()) {
// GLImage::GetAsSourceSurface() only support main thread
RefPtr<Runnable> getsourcesurface_runnable =
media::NewRunnableFrom([this, img]() -> nsresult {
// Due to the parameter DISPATCH_SYNC, encoder thread will stock at
// MediaRecorder::Session::Extract(bool). There is no chance
// that TrackEncoder will be destroyed during this period. So
// there is no need to use RefPtr to hold TrackEncoder.
ReplyGetSourceSurface(img->GetAsSourceSurface());
return NS_OK;
});
NS_DispatchToMainThread(getsourcesurface_runnable, NS_DISPATCH_SYNC);
} else {
mSourceSurface = img->GetAsSourceSurface();
}
}
return mSourceSurface.forget();
}
// These two define value used in GetNextEncodeOperation to determine the
// EncodeOperation for next target frame.
#define I_FRAME_RATIO (0.5)

Просмотреть файл

@ -36,7 +36,6 @@ public:
nsresult GetEncodedTrack(EncodedFrameContainer& aData) final override;
void ReplyGetSourceSurface(already_AddRefed<gfx::SourceSurface> aSurf);
protected:
nsresult Init(int32_t aWidth, int32_t aHeight,
int32_t aDisplayWidth, int32_t aDisplayHeight) final override;
@ -61,8 +60,6 @@ private:
// Prepare the input data to the mVPXImageWrapper for encoding.
nsresult PrepareRawFrame(VideoChunk &aChunk);
already_AddRefed<gfx::SourceSurface> GetSourceSurface(already_AddRefed<layers::Image> aImg);
// Output frame rate.
uint32_t mEncodedFrameRate;
// Duration for the output frame, reciprocal to mEncodedFrameRate.
@ -91,7 +88,6 @@ private:
nsAutoPtr<vpx_codec_ctx_t> mVPXContext;
// Image Descriptor.
nsAutoPtr<vpx_image_t> mVPXImageWrapper;
RefPtr<gfx::SourceSurface> mSourceSurface;
};
} // namespace mozilla

Просмотреть файл

@ -11,12 +11,14 @@
#include "nsDebug.h"
#include "OggCodecState.h"
#include "OpusDecoder.h"
#include "OpusParser.h"
#include "VideoUtils.h"
#include <algorithm>
#include <opus/opus.h>
#include "opus/opus_multistream.h"
#include "XiphExtradata.h"
// On Android JellyBean, the hardware.h header redefines version_major and
// version_minor, which breaks our build. See:
@ -145,6 +147,24 @@ OggCodecState::AddVorbisComment(MetadataTags* aTags,
return true;
}
bool
OggCodecState::SetCodecSpecificConfig(MediaByteBuffer* aBuffer,
OggPacketQueue& aHeaders)
{
nsTArray<const unsigned char*> headers;
nsTArray<size_t> headerLens;
for (size_t i = 0; i < aHeaders.Length(); i++) {
headers.AppendElement(aHeaders[i]->packet);
headerLens.AppendElement(aHeaders[i]->bytes);
}
// Save header packets for the decoder
if (!XiphHeadersToExtradata(aBuffer, headers, headerLens)) {
return false;
}
aHeaders.Erase();
return true;
}
void
VorbisState::RecordVorbisPacketSamples(ogg_packet* aPacket, long aSamples)
{
@ -317,10 +337,9 @@ TheoraState::TheoraState(ogg_page* aBosPage)
: OggCodecState(aBosPage, true)
, mSetup(0)
, mCtx(0)
, mPixelAspectRatio(0)
{
MOZ_COUNT_CTOR(TheoraState);
th_info_init(&mInfo);
th_info_init(&mTheoraInfo);
th_comment_init(&mComment);
}
@ -330,7 +349,8 @@ TheoraState::~TheoraState()
th_setup_free(mSetup);
th_decode_free(mCtx);
th_comment_clear(&mComment);
th_info_clear(&mInfo);
th_info_clear(&mTheoraInfo);
Reset();
}
bool
@ -340,34 +360,49 @@ TheoraState::Init()
return false;
}
int64_t n = mInfo.aspect_numerator;
int64_t d = mInfo.aspect_denominator;
int64_t n = mTheoraInfo.aspect_numerator;
int64_t d = mTheoraInfo.aspect_denominator;
mPixelAspectRatio = (n == 0 || d == 0)
float aspectRatio = (n == 0 || d == 0)
? 1.0f : static_cast<float>(n) / static_cast<float>(d);
// Ensure the frame and picture regions aren't larger than our prescribed
// maximum, or zero sized.
nsIntSize frame(mInfo.frame_width, mInfo.frame_height);
nsIntRect picture(mInfo.pic_x, mInfo.pic_y, mInfo.pic_width, mInfo.pic_height);
if (!IsValidVideoRegion(frame, picture, frame)) {
nsIntSize frame(mTheoraInfo.frame_width, mTheoraInfo.frame_height);
nsIntRect picture(mTheoraInfo.pic_x, mTheoraInfo.pic_y, mTheoraInfo.pic_width, mTheoraInfo.pic_height);
nsIntSize display(mTheoraInfo.pic_width, mTheoraInfo.pic_height);
ScaleDisplayByAspectRatio(display, aspectRatio);
if (!IsValidVideoRegion(frame, picture, display)) {
return mActive = false;
}
mCtx = th_decode_alloc(&mInfo, mSetup);
mCtx = th_decode_alloc(&mTheoraInfo, mSetup);
if (!mCtx) {
return mActive = false;
}
return true;
// Video track's frame sizes will not overflow. Activate the video track.
mInfo.mMimeType = NS_LITERAL_CSTRING("video/theora");
mInfo.mDisplay = display;
mInfo.mImage = frame;
mInfo.SetImageRect(picture);
return mActive = SetCodecSpecificConfig(mInfo.mCodecSpecificConfig, mHeaders);
}
nsresult
TheoraState::Reset()
{
mHeaders.Erase();
return OggCodecState::Reset();
}
bool
TheoraState::DecodeHeader(ogg_packet* aPacket)
{
nsAutoRef<ogg_packet> autoRelease(aPacket);
mHeaders.Append(aPacket);
mPacketCount++;
int ret = th_decode_headerin(&mInfo,
int ret = th_decode_headerin(&mTheoraInfo,
&mComment,
&mSetup,
aPacket);
@ -405,7 +440,7 @@ TheoraState::Time(int64_t granulepos)
if (!mActive) {
return -1;
}
return TheoraState::Time(&mInfo, granulepos);
return TheoraState::Time(&mTheoraInfo, granulepos);
}
bool
@ -442,26 +477,26 @@ TheoraState::Time(th_info* aInfo, int64_t aGranulepos)
int64_t TheoraState::StartTime(int64_t granulepos)
{
if (granulepos < 0 || !mActive || mInfo.fps_numerator == 0) {
if (granulepos < 0 || !mActive || mTheoraInfo.fps_numerator == 0) {
return -1;
}
CheckedInt64 t =
(CheckedInt64(th_granule_frame(mCtx, granulepos)) * USECS_PER_S)
* mInfo.fps_denominator;
* mTheoraInfo.fps_denominator;
if (!t.isValid()) {
return -1;
}
return t.value() / mInfo.fps_numerator;
return t.value() / mTheoraInfo.fps_numerator;
}
int64_t
TheoraState::PacketDuration(ogg_packet* aPacket)
{
if (!mActive || mInfo.fps_numerator == 0) {
if (!mActive || mTheoraInfo.fps_numerator == 0) {
return -1;
}
CheckedInt64 t =
SaferMultDiv(mInfo.fps_denominator, USECS_PER_S, mInfo.fps_numerator);
CheckedInt64 t = SaferMultDiv(mTheoraInfo.fps_denominator, USECS_PER_S,
mTheoraInfo.fps_numerator);
return t.isValid() ? t.value() : -1;
}
@ -476,10 +511,11 @@ TheoraState::MaxKeyframeOffset()
int64_t frameDuration;
// Max number of frames keyframe could possibly be offset.
int64_t keyframeDiff = (1 << mInfo.keyframe_granule_shift) - 1;
int64_t keyframeDiff = (1 << mTheoraInfo.keyframe_granule_shift) - 1;
// Length of frame in usecs.
frameDuration = (mInfo.fps_denominator * USECS_PER_S) / mInfo.fps_numerator;
frameDuration =
(mTheoraInfo.fps_denominator * USECS_PER_S) / mTheoraInfo.fps_numerator;
// Total time in usecs keyframe can be offset from any given frame.
return frameDuration * keyframeDiff;
@ -551,8 +587,8 @@ TheoraState::ReconstructTheoraGranulepos()
// frames. Granulepos are stored as ((keyframe<<shift)+offset). We
// know the granulepos of the last frame in the list, so we can infer
// the granulepos of the intermediate frames using their frame numbers.
ogg_int64_t shift = mInfo.keyframe_granule_shift;
ogg_int64_t version_3_2_1 = TheoraVersion(&mInfo,3,2,1);
ogg_int64_t shift = mTheoraInfo.keyframe_granule_shift;
ogg_int64_t version_3_2_1 = TheoraVersion(&mTheoraInfo,3,2,1);
ogg_int64_t lastFrame = th_granule_frame(mCtx,
lastGranulepos) + version_3_2_1;
ogg_int64_t firstFrame = lastFrame - mUnstamped.Length() + 1;
@ -625,6 +661,7 @@ VorbisState::Reset()
if (mActive && vorbis_synthesis_restart(&mDsp) != 0) {
res = NS_ERROR_FAILURE;
}
mHeaders.Erase();
if (NS_FAILED(OggCodecState::Reset())) {
return NS_ERROR_FAILURE;
}
@ -641,7 +678,7 @@ VorbisState::VorbisState(ogg_page* aBosPage)
, mGranulepos(0)
{
MOZ_COUNT_CTOR(VorbisState);
vorbis_info_init(&mInfo);
vorbis_info_init(&mVorbisInfo);
vorbis_comment_init(&mComment);
memset(&mDsp, 0, sizeof(vorbis_dsp_state));
memset(&mBlock, 0, sizeof(vorbis_block));
@ -653,16 +690,16 @@ VorbisState::~VorbisState()
Reset();
vorbis_block_clear(&mBlock);
vorbis_dsp_clear(&mDsp);
vorbis_info_clear(&mInfo);
vorbis_info_clear(&mVorbisInfo);
vorbis_comment_clear(&mComment);
}
bool
VorbisState::DecodeHeader(ogg_packet* aPacket)
{
nsAutoRef<ogg_packet> autoRelease(aPacket);
mHeaders.Append(aPacket);
mPacketCount++;
int ret = vorbis_synthesis_headerin(&mInfo,
int ret = vorbis_synthesis_headerin(&mVorbisInfo,
&mComment,
aPacket);
// We must determine when we've read the last header packet.
@ -688,11 +725,12 @@ VorbisState::DecodeHeader(ogg_packet* aPacket)
// header packets. Assume bad input. Our caller will deactivate the
// bitstream.
return false;
} else if (ret == 0 && isSetupHeader && mPacketCount == 3) {
} else if (!ret && isSetupHeader && mPacketCount == 3) {
// Successfully read the three header packets.
// The bitstream remains active.
mDoneReadingHeaders = true;
}
return true;
}
@ -703,7 +741,7 @@ VorbisState::Init()
return false;
}
int ret = vorbis_synthesis_init(&mDsp, &mInfo);
int ret = vorbis_synthesis_init(&mDsp, &mVorbisInfo);
if (ret != 0) {
NS_WARNING("vorbis_synthesis_init() failed initializing vorbis bitstream");
return mActive = false;
@ -716,6 +754,24 @@ VorbisState::Init()
}
return mActive = false;
}
nsTArray<const unsigned char*> headers;
nsTArray<size_t> headerLens;
for (size_t i = 0; i < mHeaders.Length(); i++) {
headers.AppendElement(mHeaders[i]->packet);
headerLens.AppendElement(mHeaders[i]->bytes);
}
// Save header packets for the decoder
if (!XiphHeadersToExtradata(mInfo.mCodecSpecificConfig,
headers, headerLens)) {
return mActive = false;
}
mHeaders.Erase();
mInfo.mMimeType = NS_LITERAL_CSTRING("audio/vorbis");
mInfo.mRate = mVorbisInfo.rate;
mInfo.mChannels = mVorbisInfo.channels;
mInfo.mBitDepth = 16;
return true;
}
@ -726,7 +782,7 @@ VorbisState::Time(int64_t granulepos)
return -1;
}
return VorbisState::Time(&mInfo, granulepos);
return VorbisState::Time(&mVorbisInfo, granulepos);
}
int64_t
@ -837,7 +893,7 @@ VorbisState::ReconstructVorbisGranulepos()
"Must know last granulepos!");
if (mUnstamped.Length() == 1) {
ogg_packet* packet = mUnstamped[0];
long blockSize = vorbis_packet_blocksize(&mInfo, packet);
long blockSize = vorbis_packet_blocksize(&mVorbisInfo, packet);
if (blockSize < 0) {
// On failure vorbis_packet_blocksize returns < 0. If we've got
// a bad packet, we just assume that decode will have to skip this
@ -868,8 +924,8 @@ VorbisState::ReconstructVorbisGranulepos()
ogg_packet* prev = mUnstamped[i-1];
ogg_int64_t granulepos = packet->granulepos;
NS_ASSERTION(granulepos != -1, "Must know granulepos!");
long prevBlockSize = vorbis_packet_blocksize(&mInfo, prev);
long blockSize = vorbis_packet_blocksize(&mInfo, packet);
long prevBlockSize = vorbis_packet_blocksize(&mVorbisInfo, prev);
long blockSize = vorbis_packet_blocksize(&mVorbisInfo, packet);
if (blockSize < 0 || prevBlockSize < 0) {
// On failure vorbis_packet_blocksize returns < 0. If we've got
@ -893,7 +949,7 @@ VorbisState::ReconstructVorbisGranulepos()
}
ogg_packet* first = mUnstamped[0];
long blockSize = vorbis_packet_blocksize(&mInfo, first);
long blockSize = vorbis_packet_blocksize(&mVorbisInfo, first);
if (blockSize < 0) {
mPrevVorbisBlockSize = 0;
blockSize = 0;
@ -919,7 +975,7 @@ VorbisState::ReconstructVorbisGranulepos()
#endif
}
mPrevVorbisBlockSize = vorbis_packet_blocksize(&mInfo, last);
mPrevVorbisBlockSize = vorbis_packet_blocksize(&mVorbisInfo, last);
mPrevVorbisBlockSize = std::max(static_cast<long>(0), mPrevVorbisBlockSize);
mGranulepos = last->granulepos;
@ -930,7 +986,6 @@ OpusState::OpusState(ogg_page* aBosPage)
: OggCodecState(aBosPage, true)
, mParser(nullptr)
, mDecoder(nullptr)
, mSkip(0)
, mPrevPacketGranulepos(0)
, mPrevPageGranulepos(0)
{
@ -962,8 +1017,6 @@ OpusState::Reset(bool aStart)
if (mActive && mDecoder) {
// Reset the decoder.
opus_multistream_decoder_ctl(mDecoder, OPUS_RESET_STATE);
// Let the seek logic handle pre-roll if we're not seeking to the start.
mSkip = aStart ? mParser->mPreSkip : 0;
// This lets us distinguish the first page being the last page vs. just
// not having processed the previous page when we encounter the last page.
mPrevPageGranulepos = aStart ? 0 : -1;
@ -975,7 +1028,7 @@ OpusState::Reset(bool aStart)
return NS_ERROR_FAILURE;
}
LOG(LogLevel::Debug, ("Opus decoder reset, to skip %d", mSkip));
LOG(LogLevel::Debug, ("Opus decoder reset"));
return res;
}
@ -998,9 +1051,20 @@ OpusState::Init(void)
mParser->mMappingTable,
&error);
mSkip = mParser->mPreSkip;
LOG(LogLevel::Debug, ("Opus decoder init, to skip %d", mSkip));
mInfo.mMimeType = NS_LITERAL_CSTRING("audio/opus");
mInfo.mRate = mParser->mRate;
mInfo.mChannels = mParser->mChannels;
mInfo.mBitDepth = 16;
// Save preskip & the first header packet for the Opus decoder
OpusDataDecoder::AppendCodecDelay(mInfo.mCodecSpecificConfig,
Time(0, mParser->mPreSkip));
if (!mHeaders.PeekFront()) {
return false;
}
mInfo.mCodecSpecificConfig->AppendElements(mHeaders.PeekFront()->packet,
mHeaders.PeekFront()->bytes);
mHeaders.Erase();
LOG(LogLevel::Debug, ("Opus decoder init"));
return error == OPUS_OK;
}
@ -1016,14 +1080,7 @@ OpusState::DecodeHeader(ogg_packet* aPacket)
if (!mParser->DecodeHeader(aPacket->packet, aPacket->bytes)) {
return false;
}
mRate = mParser->mRate;
mChannels = mParser->mChannels;
mPreSkip = mParser->mPreSkip;
#ifdef MOZ_SAMPLE_TYPE_FLOAT32
mGain = mParser->mGain;
#else
mGain_Q16 = mParser->mGain_Q16;
#endif
mHeaders.Append(autoRelease.disown());
break;
// Parse the metadata header.
@ -1156,7 +1213,7 @@ OpusState::ReconstructOpusGranulepos(void)
if (mPrevPageGranulepos != -1) {
// If this file only has one page and the final granule position is
// smaller than the pre-skip amount, we MUST reject the stream.
if (!mDoneReadingHeaders && last->granulepos < mPreSkip)
if (!mDoneReadingHeaders && last->granulepos < mParser->mPreSkip)
return false;
int64_t last_gp = last->granulepos;
gp = mPrevPageGranulepos;
@ -1236,12 +1293,13 @@ already_AddRefed<MediaRawData>
OpusState::PacketOutAsMediaRawData()
{
ogg_packet* packet = PacketPeek();
uint32_t frames = 0;
const int64_t endFrame = packet->granulepos;
if (!packet) {
return nullptr;
}
uint32_t frames = 0;
const int64_t endFrame = packet->granulepos;
if (packet->e_o_s) {
frames = GetOpusDeltaGP(packet);
}
@ -1345,10 +1403,10 @@ FlacState::GetTags()
return mParser.GetTags();
}
const AudioInfo&
FlacState::Info()
const TrackInfo*
FlacState::GetInfo() const
{
return mParser.mInfo;
return &mParser.mInfo;
}
bool

Просмотреть файл

@ -68,6 +68,11 @@ public:
ogg_packet* PopFront() { return static_cast<ogg_packet*>(nsDeque::PopFront()); }
ogg_packet* PeekFront() { return static_cast<ogg_packet*>(nsDeque::PeekFront()); }
ogg_packet* Pop() { return static_cast<ogg_packet*>(nsDeque::Pop()); }
ogg_packet* operator[](size_t aIndex) const
{
return static_cast<ogg_packet*>(nsDeque::ObjectAt(aIndex));
}
size_t Length() const { return nsDeque::GetSize(); }
void PushFront(ogg_packet* aPacket) { nsDeque::PushFront(aPacket); }
void Erase() { nsDeque::Erase(); }
};
@ -205,6 +210,12 @@ public:
// captured.
virtual nsresult PageIn(ogg_page* aPage);
// Returns the maximum number of microseconds which a keyframe can be offset
// from any given interframe.b
virtual int64_t MaxKeyframeOffset() { return 0; }
// Public access for mTheoraInfo.keyframe_granule_shift
virtual int32_t KeyFrameGranuleJobs() { return 0; }
// Number of packets read.
uint64_t mPacketCount;
@ -224,6 +235,12 @@ public:
// True when all headers packets have been read.
bool mDoneReadingHeaders;
virtual const TrackInfo* GetInfo() const
{
MOZ_RELEASE_ASSERT(false, "Can't be called directly");
return nullptr;
}
// Validation utility for vorbis-style tag names.
static bool IsValidVorbisTagName(nsCString& aName);
@ -256,6 +273,8 @@ protected:
// in order to capture granulepos.
nsTArray<ogg_packet*> mUnstamped;
bool SetCodecSpecificConfig(MediaByteBuffer* aBuffer, OggPacketQueue& aHeaders);
private:
bool InternalInit();
};
@ -274,19 +293,21 @@ public:
nsresult Reset() override;
bool IsHeader(ogg_packet* aPacket) override;
nsresult PageIn(ogg_page* aPage) override;
const TrackInfo* GetInfo() const override { return &mInfo; }
// Return a hash table with tag metadata.
MetadataTags* GetTags() override;
// Returns the end time that a granulepos represents.
static int64_t Time(vorbis_info* aInfo, int64_t aGranulePos);
vorbis_info mInfo;
private:
AudioInfo mInfo;
vorbis_info mVorbisInfo;
vorbis_comment mComment;
vorbis_dsp_state mDsp;
vorbis_block mBlock;
OggPacketQueue mHeaders;
private:
// Returns the end time that a granulepos represents.
static int64_t Time(vorbis_info* aInfo, int64_t aGranulePos);
// Reconstructs the granulepos of Vorbis packets stored in the mUnstamped
// array.
@ -347,25 +368,28 @@ public:
int64_t StartTime(int64_t granulepos) override;
int64_t PacketDuration(ogg_packet* aPacket) override;
bool Init() override;
nsresult Reset() override;
bool IsHeader(ogg_packet* aPacket) override;
bool IsKeyframe(ogg_packet* aPacket) override;
nsresult PageIn(ogg_page* aPage) override;
const TrackInfo* GetInfo() const override { return &mInfo; }
int64_t MaxKeyframeOffset() override;
int32_t KeyFrameGranuleJobs() override
{
return mTheoraInfo.keyframe_granule_shift;
}
// Returns the maximum number of microseconds which a keyframe can be offset
// from any given interframe.
int64_t MaxKeyframeOffset();
private:
// Returns the end time that a granulepos represents.
static int64_t Time(th_info* aInfo, int64_t aGranulePos);
th_info mInfo;
th_info mTheoraInfo;
th_comment mComment;
th_setup_info* mSetup;
th_dec_ctx* mCtx;
float mPixelAspectRatio;
private:
VideoInfo mInfo;
OggPacketQueue mHeaders;
// Reconstructs the granulepos of Theora packets stored in the
// mUnstamped array. mUnstamped must be filled with consecutive packets from
@ -392,31 +416,21 @@ public:
bool IsHeader(ogg_packet* aPacket) override;
nsresult PageIn(ogg_page* aPage) override;
already_AddRefed<MediaRawData> PacketOutAsMediaRawData() override;
const TrackInfo* GetInfo() const override { return &mInfo; }
// Returns the end time that a granulepos represents.
static int64_t Time(int aPreSkip, int64_t aGranulepos);
// Various fields from the Ogg Opus header.
int mRate; // Sample rate the decoder uses (always 48 kHz).
int mChannels; // Number of channels the stream encodes.
uint16_t mPreSkip; // Number of samples to strip after decoder reset.
#ifdef MOZ_SAMPLE_TYPE_FLOAT32
float mGain; // Gain to apply to decoder output.
#else
int32_t mGain_Q16; // Gain to apply to the decoder output.
#endif
nsAutoPtr<OpusParser> mParser;
OpusMSDecoder* mDecoder;
int mSkip; // Number of samples left to trim before playback.
// Granule position (end sample) of the last decoded Opus packet. This is
// used to calculate the amount we should trim from the last packet.
int64_t mPrevPacketGranulepos;
// Construct and return a table of tags from the metadata header.
MetadataTags* GetTags() override;
private:
nsAutoPtr<OpusParser> mParser;
OpusMSDecoder* mDecoder;
// Granule position (end sample) of the last decoded Opus packet. This is
// used to calculate the amount we should trim from the last packet.
int64_t mPrevPacketGranulepos;
// Reconstructs the granulepos of Opus packets stored in the
// mUnstamped array. mUnstamped must be filled with consecutive packets from
@ -429,7 +443,8 @@ private:
// used to calculate the Opus per-packet granule positions on the last page,
// where we may need to trim some samples from the end.
int64_t mPrevPageGranulepos;
AudioInfo mInfo;
OggPacketQueue mHeaders;
};
// Constructs a 32bit version number out of two 16 bit major,minor
@ -618,7 +633,7 @@ public:
// Return a hash table with tag metadata.
MetadataTags* GetTags() override;
const AudioInfo& Info();
const TrackInfo* GetInfo() const override;
private:
bool ReconstructFlacGranulepos(void);

Просмотреть файл

@ -53,30 +53,6 @@ static const int64_t OGG_SEEK_OPUS_PREROLL = 80 * USECS_PER_MS;
static Atomic<uint32_t> sStreamSourceID(0u);
class OggHeaders
{
public:
OggHeaders() {}
~OggHeaders()
{
for (size_t i = 0; i < mHeaders.Length(); i++) {
delete[] mHeaders[i];
}
}
void AppendPacket(const ogg_packet* aPacket)
{
size_t packetSize = aPacket->bytes;
unsigned char* packetData = new unsigned char[packetSize];
memcpy(packetData, aPacket->packet, packetSize);
mHeaders.AppendElement(packetData);
mHeaderLens.AppendElement(packetSize);
}
nsTArray<const unsigned char*> mHeaders;
nsTArray<size_t> mHeaderLens;
};
// Return the corresponding category in aKind based on the following specs.
// (https://www.whatwg.org/specs/web-apps/current-
// work/multipage/embedded-content.html#dom-audiotrack-kind) &
@ -135,17 +111,11 @@ OggDemuxer::OggDemuxer(MediaResource* aResource)
, mSkeletonState(nullptr)
, mAudioOggState(aResource)
, mVideoOggState(aResource)
, mVorbisSerial(0)
, mOpusSerial(0)
, mTheoraSerial(0)
, mFlacSerial(0)
, mOpusPreSkip(0)
, mIsChained(false)
, mTimedMetadataEvent(nullptr)
, mOnSeekableEvent(nullptr)
{
MOZ_COUNT_CTOR(OggDemuxer);
PodZero(&mTheoraInfo);
}
OggDemuxer::~OggDemuxer()
@ -329,8 +299,7 @@ OggDemuxer::Reset(TrackInfo::TrackType aType)
bool
OggDemuxer::ReadHeaders(TrackInfo::TrackType aType,
OggCodecState* aState,
OggHeaders& aHeaders)
OggCodecState* aState)
{
while (!aState->DoneReadingHeaders()) {
DemuxUntilPacketAvailable(aType, aState);
@ -341,10 +310,6 @@ OggDemuxer::ReadHeaders(TrackInfo::TrackType aType,
return false;
}
// Save a copy of the header packet for the decoder to use later;
// OggCodecState::DecodeHeader will free it when processing locally.
aHeaders.AppendPacket(packet);
// Local OggCodecState needs to decode headers in order to process
// packet granulepos -> time mappings, etc.
if (!aState->DecodeHeader(packet)) {
@ -374,106 +339,18 @@ OggDemuxer::BuildSerialList(nsTArray<uint32_t>& aTracks)
}
void
OggDemuxer::SetupTargetTheora(TheoraState* aTheoraState, OggHeaders& aHeaders)
OggDemuxer::SetupTarget(OggCodecState** aSavedState, OggCodecState* aNewState)
{
if (mTheoraState) {
mTheoraState->Reset();
if (*aSavedState) {
(*aSavedState)->Reset();
}
nsIntRect picture = nsIntRect(aTheoraState->mInfo.pic_x,
aTheoraState->mInfo.pic_y,
aTheoraState->mInfo.pic_width,
aTheoraState->mInfo.pic_height);
nsIntSize displaySize = nsIntSize(aTheoraState->mInfo.pic_width,
aTheoraState->mInfo.pic_height);
// Apply the aspect ratio to produce the intrinsic display size we report
// to the element.
ScaleDisplayByAspectRatio(displaySize, aTheoraState->mPixelAspectRatio);
nsIntSize frameSize(aTheoraState->mInfo.frame_width,
aTheoraState->mInfo.frame_height);
if (IsValidVideoRegion(frameSize, picture, displaySize)) {
// Video track's frame sizes will not overflow. Activate the video track.
mInfo.mVideo.mMimeType = "video/theora";
mInfo.mVideo.mDisplay = displaySize;
mInfo.mVideo.mImage = frameSize;
mInfo.mVideo.SetImageRect(picture);
// Copy Theora info data for time computations on other threads.
memcpy(&mTheoraInfo, &aTheoraState->mInfo, sizeof(mTheoraInfo));
// Save header packets for the decoder
if (!XiphHeadersToExtradata(mInfo.mVideo.mCodecSpecificConfig,
aHeaders.mHeaders, aHeaders.mHeaderLens)) {
return;
}
mTheoraState = aTheoraState;
mTheoraSerial = aTheoraState->mSerial;
if (aNewState->GetInfo()->GetAsAudioInfo()) {
mInfo.mAudio = *aNewState->GetInfo()->GetAsAudioInfo();
} else {
mInfo.mVideo = *aNewState->GetInfo()->GetAsVideoInfo();
}
}
void
OggDemuxer::SetupTargetVorbis(VorbisState* aVorbisState, OggHeaders& aHeaders)
{
if (mVorbisState) {
mVorbisState->Reset();
}
// Copy Vorbis info data for time computations on other threads.
memcpy(&mVorbisInfo, &aVorbisState->mInfo, sizeof(mVorbisInfo));
mVorbisInfo.codec_setup = nullptr;
mInfo.mAudio.mMimeType = "audio/vorbis";
mInfo.mAudio.mRate = aVorbisState->mInfo.rate;
mInfo.mAudio.mChannels = aVorbisState->mInfo.channels;
// Save header packets for the decoder
if (!XiphHeadersToExtradata(mInfo.mAudio.mCodecSpecificConfig,
aHeaders.mHeaders, aHeaders.mHeaderLens)) {
return;
}
mVorbisState = aVorbisState;
mVorbisSerial = aVorbisState->mSerial;
}
void
OggDemuxer::SetupTargetOpus(OpusState* aOpusState, OggHeaders& aHeaders)
{
if (mOpusState) {
mOpusState->Reset();
}
mInfo.mAudio.mMimeType = "audio/opus";
mInfo.mAudio.mRate = aOpusState->mRate;
mInfo.mAudio.mChannels = aOpusState->mChannels;
// Save preskip & the first header packet for the Opus decoder
uint64_t preSkip = aOpusState->Time(0, aOpusState->mPreSkip);
uint8_t c[sizeof(preSkip)];
BigEndian::writeUint64(&c[0], preSkip);
mInfo.mAudio.mCodecSpecificConfig->AppendElements(&c[0], sizeof(preSkip));
mInfo.mAudio.mCodecSpecificConfig->AppendElements(aHeaders.mHeaders[0],
aHeaders.mHeaderLens[0]);
mOpusState = aOpusState;
mOpusSerial = aOpusState->mSerial;
mOpusPreSkip = aOpusState->mPreSkip;
}
void
OggDemuxer::SetupTargetFlac(FlacState* aFlacState, OggHeaders& aHeaders)
{
if (mFlacState) {
mFlacState->Reset();
}
mInfo.mAudio = aFlacState->Info();
mFlacState = aFlacState;
mFlacSerial = aFlacState->mSerial;
*aSavedState = aNewState;
}
void
@ -482,13 +359,12 @@ OggDemuxer::SetupTargetSkeleton()
// Setup skeleton related information after mVorbisState & mTheroState
// being set (if they exist).
if (mSkeletonState) {
OggHeaders headers;
if (!HasAudio() && !HasVideo()) {
// We have a skeleton track, but no audio or video, may as well disable
// the skeleton, we can't do anything useful with this media.
OGG_DEBUG("Deactivating skeleton stream %ld", mSkeletonState->mSerial);
mSkeletonState->Deactivate();
} else if (ReadHeaders(TrackInfo::kAudioTrack, mSkeletonState, headers) &&
} else if (ReadHeaders(TrackInfo::kAudioTrack, mSkeletonState) &&
mSkeletonState->HasIndex()) {
// We don't particularly care about which track we are currently using
// as both MediaResource points to the same content.
@ -522,68 +398,32 @@ OggDemuxer::SetupMediaTracksInfo(const nsTArray<uint32_t>& aSerials)
mSkeletonState->mMsgFieldStore.Get(serial, &msgInfo);
}
if (codecState->GetType() == OggCodecState::TYPE_THEORA) {
TheoraState* theoraState = static_cast<TheoraState*>(codecState);
if (!(mTheoraState && mTheoraState->mSerial == theoraState->mSerial)) {
continue;
}
OggCodecState* primeState = nullptr;
switch (codecState->GetType()) {
case OggCodecState::TYPE_THEORA:
primeState = mTheoraState;
break;
case OggCodecState::TYPE_VORBIS:
primeState = mVorbisState;
break;
case OggCodecState::TYPE_OPUS:
primeState = mOpusState;
break;
case OggCodecState::TYPE_FLAC:
primeState = mFlacState;
break;
default:
break;
}
if (primeState && primeState == codecState) {
bool isAudio = primeState->GetInfo()->GetAsAudioInfo();
if (msgInfo) {
InitTrack(msgInfo, &mInfo.mVideo, mTheoraState == theoraState);
InitTrack(msgInfo, isAudio ? static_cast<TrackInfo*>(&mInfo.mAudio)
: &mInfo.mVideo,
true);
}
nsIntRect picture = nsIntRect(theoraState->mInfo.pic_x,
theoraState->mInfo.pic_y,
theoraState->mInfo.pic_width,
theoraState->mInfo.pic_height);
nsIntSize displaySize = nsIntSize(theoraState->mInfo.pic_width,
theoraState->mInfo.pic_height);
nsIntSize frameSize(theoraState->mInfo.frame_width,
theoraState->mInfo.frame_height);
ScaleDisplayByAspectRatio(displaySize, theoraState->mPixelAspectRatio);
if (IsValidVideoRegion(frameSize, picture, displaySize)) {
mInfo.mVideo.mDisplay = displaySize;
}
} else if (codecState->GetType() == OggCodecState::TYPE_VORBIS) {
VorbisState* vorbisState = static_cast<VorbisState*>(codecState);
if (!(mVorbisState && mVorbisState->mSerial == vorbisState->mSerial)) {
continue;
}
if (msgInfo) {
InitTrack(msgInfo,
&mInfo.mAudio,
mVorbisState == vorbisState);
}
mInfo.mAudio.mRate = vorbisState->mInfo.rate;
mInfo.mAudio.mChannels = vorbisState->mInfo.channels;
FillTags(&mInfo.mAudio, vorbisState->GetTags());
} else if (codecState->GetType() == OggCodecState::TYPE_OPUS) {
OpusState* opusState = static_cast<OpusState*>(codecState);
if (!(mOpusState && mOpusState->mSerial == opusState->mSerial)) {
continue;
}
if (msgInfo) {
InitTrack(msgInfo, &mInfo.mAudio, mOpusState == opusState);
}
mInfo.mAudio.mRate = opusState->mRate;
mInfo.mAudio.mChannels = opusState->mChannels;
FillTags(&mInfo.mAudio, opusState->GetTags());
} else if (codecState->GetType() == OggCodecState::TYPE_FLAC) {
FlacState* flacState = static_cast<FlacState*>(codecState);
if (!(mFlacState && mFlacState->mSerial == flacState->mSerial)) {
continue;
}
if (msgInfo) {
InitTrack(msgInfo, &mInfo.mAudio, mFlacState == flacState);
}
mInfo.mAudio = flacState->Info();
FillTags(&mInfo.mAudio, flacState->GetTags());
FillTags(isAudio ? static_cast<TrackInfo*>(&mInfo.mAudio) : &mInfo.mVideo,
primeState->GetTags());
}
}
}
@ -659,29 +499,25 @@ OggDemuxer::ReadMetadata()
for (uint32_t i = 0; i < bitstreams.Length(); ++i) {
OggCodecState* s = bitstreams[i];
if (s) {
OggHeaders headers;
if (s->GetType() == OggCodecState::TYPE_THEORA &&
ReadHeaders(TrackInfo::kVideoTrack, s, headers)) {
ReadHeaders(TrackInfo::kVideoTrack, s)) {
if (!mTheoraState) {
TheoraState* theoraState = static_cast<TheoraState*>(s);
SetupTargetTheora(theoraState, headers);
SetupTarget(&mTheoraState, s);
} else {
s->Deactivate();
}
} else if (s->GetType() == OggCodecState::TYPE_VORBIS &&
ReadHeaders(TrackInfo::kAudioTrack, s, headers)) {
ReadHeaders(TrackInfo::kAudioTrack, s)) {
if (!mVorbisState) {
VorbisState* vorbisState = static_cast<VorbisState*>(s);
SetupTargetVorbis(vorbisState, headers);
SetupTarget(&mVorbisState, s);
} else {
s->Deactivate();
}
} else if (s->GetType() == OggCodecState::TYPE_OPUS &&
ReadHeaders(TrackInfo::kAudioTrack, s, headers)) {
ReadHeaders(TrackInfo::kAudioTrack, s)) {
if (mOpusEnabled) {
if (!mOpusState) {
OpusState* opusState = static_cast<OpusState*>(s);
SetupTargetOpus(opusState, headers);
SetupTarget(&mOpusState, s);
} else {
s->Deactivate();
}
@ -691,10 +527,9 @@ OggDemuxer::ReadMetadata()
}
} else if (MediaPrefs::FlacInOgg() &&
s->GetType() == OggCodecState::TYPE_FLAC &&
ReadHeaders(TrackInfo::kAudioTrack, s, headers)) {
ReadHeaders(TrackInfo::kAudioTrack, s)) {
if (!mFlacState) {
FlacState* flacState = static_cast<FlacState*>(s);
SetupTargetFlac(flacState, headers);
SetupTarget(&mFlacState, s);
} else {
s->Deactivate();
}
@ -821,59 +656,57 @@ OggDemuxer::ReadOggChain(const media::TimeUnit& aLastEndTime)
mSkeletonState->mMsgFieldStore.Get(serial, &msgInfo);
}
OggHeaders vorbisHeaders;
if ((newVorbisState &&
ReadHeaders(TrackInfo::kAudioTrack, newVorbisState, vorbisHeaders)) &&
(mVorbisState->mInfo.rate == newVorbisState->mInfo.rate) &&
(mVorbisState->mInfo.channels == newVorbisState->mInfo.channels)) {
ReadHeaders(TrackInfo::kAudioTrack, newVorbisState)) &&
(mVorbisState->GetInfo()->GetAsAudioInfo()->mRate ==
newVorbisState->GetInfo()->GetAsAudioInfo()->mRate) &&
(mVorbisState->GetInfo()->GetAsAudioInfo()->mChannels ==
newVorbisState->GetInfo()->GetAsAudioInfo()->mChannels)) {
SetupTargetVorbis(newVorbisState, vorbisHeaders);
LOG(LogLevel::Debug, ("New vorbis ogg link, serial=%d\n", mVorbisSerial));
SetupTarget(&mVorbisState, newVorbisState);
LOG(LogLevel::Debug,
("New vorbis ogg link, serial=%d\n", mVorbisState->mSerial));
if (msgInfo) {
InitTrack(msgInfo, &mInfo.mAudio, true);
}
mInfo.mAudio.mMimeType = NS_LITERAL_CSTRING("audio/vorbis");
mInfo.mAudio.mRate = newVorbisState->mInfo.rate;
mInfo.mAudio.mChannels = newVorbisState->mInfo.channels;
chained = true;
tags = newVorbisState->GetTags();
}
OggHeaders opusHeaders;
if ((newOpusState &&
ReadHeaders(TrackInfo::kAudioTrack, newOpusState, opusHeaders)) &&
(mOpusState->mRate == newOpusState->mRate) &&
(mOpusState->mChannels == newOpusState->mChannels)) {
ReadHeaders(TrackInfo::kAudioTrack, newOpusState)) &&
(mOpusState->GetInfo()->GetAsAudioInfo()->mRate ==
newOpusState->GetInfo()->GetAsAudioInfo()->mRate) &&
(mOpusState->GetInfo()->GetAsAudioInfo()->mChannels ==
newOpusState->GetInfo()->GetAsAudioInfo()->mChannels)) {
SetupTargetOpus(newOpusState, opusHeaders);
SetupTarget(&mOpusState, newOpusState);
if (msgInfo) {
InitTrack(msgInfo, &mInfo.mAudio, true);
}
mInfo.mAudio.mMimeType = NS_LITERAL_CSTRING("audio/opus");
mInfo.mAudio.mRate = newOpusState->mRate;
mInfo.mAudio.mChannels = newOpusState->mChannels;
chained = true;
tags = newOpusState->GetTags();
}
OggHeaders flacHeaders;
if ((newFlacState &&
ReadHeaders(TrackInfo::kAudioTrack, newFlacState, flacHeaders)) &&
(mFlacState->Info().mRate == newFlacState->Info().mRate) &&
(mFlacState->Info().mChannels == newFlacState->Info().mChannels)) {
ReadHeaders(TrackInfo::kAudioTrack, newFlacState)) &&
(mFlacState->GetInfo()->GetAsAudioInfo()->mRate ==
newFlacState->GetInfo()->GetAsAudioInfo()->mRate) &&
(mFlacState->GetInfo()->GetAsAudioInfo()->mChannels ==
newFlacState->GetInfo()->GetAsAudioInfo()->mChannels)) {
SetupTargetFlac(newFlacState, flacHeaders);
LOG(LogLevel::Debug, ("New flac ogg link, serial=%d\n", mFlacSerial));
SetupTarget(&mFlacState, newFlacState);
LOG(LogLevel::Debug,
("New flac ogg link, serial=%d\n", mFlacState->mSerial));
if (msgInfo) {
InitTrack(msgInfo, &mInfo.mAudio, true);
}
mInfo.mAudio = newFlacState->Info();
chained = true;
tags = newFlacState->GetTags();
}
@ -1110,20 +943,20 @@ OggDemuxer::GetBuffered(TrackInfo::TrackType aType)
uint32_t serial = ogg_page_serialno(&page);
if (aType == TrackInfo::kAudioTrack && mVorbisState &&
serial == mVorbisSerial) {
startTime = VorbisState::Time(&mVorbisInfo, granulepos);
serial == mVorbisState->mSerial) {
startTime = mVorbisState->Time(granulepos);
NS_ASSERTION(startTime > 0, "Must have positive start time");
} else if (aType == TrackInfo::kAudioTrack && mOpusState &&
serial == mOpusSerial) {
startTime = OpusState::Time(mOpusPreSkip, granulepos);
serial == mOpusState->mSerial) {
startTime = mOpusState->Time(granulepos);
NS_ASSERTION(startTime > 0, "Must have positive start time");
} else if (aType == TrackInfo::kAudioTrack && mFlacState &&
serial == mFlacSerial) {
serial == mFlacState->mSerial) {
startTime = mFlacState->Time(granulepos);
NS_ASSERTION(startTime > 0, "Must have positive start time");
} else if (aType == TrackInfo::kVideoTrack && mTheoraState &&
serial == mTheoraSerial) {
startTime = TheoraState::Time(&mTheoraInfo, granulepos);
serial == mTheoraState->mSerial) {
startTime = mTheoraState->Time(granulepos);
NS_ASSERTION(startTime > 0, "Must have positive start time");
} else if (mCodecStore.Contains(serial)) {
// Stream is not the theora or vorbis stream we're playing,
@ -1861,11 +1694,12 @@ OggDemuxer::SeekInBufferedRange(TrackInfo::TrackType aType,
// First post-seek frame isn't a keyframe, seek back to previous keyframe,
// otherwise we'll get visual artifacts.
NS_ASSERTION(packet->granulepos != -1, "Must have a granulepos");
int shift = mTheoraState->mInfo.keyframe_granule_shift;
int shift = mTheoraState->KeyFrameGranuleJobs();
int64_t keyframeGranulepos = (packet->granulepos >> shift) << shift;
int64_t keyframeTime = mTheoraState->StartTime(keyframeGranulepos);
SEEK_LOG(LogLevel::Debug, ("Keyframe for %lld is at %lld, seeking back to it",
frameTime, keyframeTime));
SEEK_LOG(LogLevel::Debug,
("Keyframe for %lld is at %lld, seeking back to it", frameTime,
keyframeTime));
aAdjustedTarget = std::min(aAdjustedTarget, keyframeTime);
}
}

Просмотреть файл

@ -15,7 +15,6 @@
namespace mozilla {
class OggTrackDemuxer;
class OggHeaders;
class OggDemuxer : public MediaDataDemuxer
{
@ -203,7 +202,7 @@ private:
// fails, or is complete. Initializes the codec state before returning.
// Returns true if reading headers and initializtion of the stream
// succeeds.
bool ReadHeaders(TrackInfo::TrackType aType, OggCodecState* aState, OggHeaders& aHeaders);
bool ReadHeaders(TrackInfo::TrackType aType, OggCodecState* aState);
// Reads the next link in the chain.
bool ReadOggChain(const media::TimeUnit& aLastEndTime);
@ -217,10 +216,7 @@ private:
void BuildSerialList(nsTArray<uint32_t>& aTracks);
// Setup target bitstreams for decoding.
void SetupTargetTheora(TheoraState* aTheoraState, OggHeaders& aHeaders);
void SetupTargetVorbis(VorbisState* aVorbisState, OggHeaders& aHeaders);
void SetupTargetOpus(OpusState* aOpusState, OggHeaders& aHeaders);
void SetupTargetFlac(FlacState* aFlacState, OggHeaders& aHeaders);
void SetupTarget(OggCodecState** aSavedState, OggCodecState* aNewState);
void SetupTargetSkeleton();
void SetupMediaTracksInfo(const nsTArray<uint32_t>& aSerials);
void FillTags(TrackInfo* aInfo, MetadataTags* aTags);
@ -256,17 +252,17 @@ private:
OggCodecStore mCodecStore;
// Decode state of the Theora bitstream we're decoding, if we have video.
TheoraState* mTheoraState;
OggCodecState* mTheoraState;
// Decode state of the Vorbis bitstream we're decoding, if we have audio.
VorbisState* mVorbisState;
OggCodecState* mVorbisState;
// Decode state of the Opus bitstream we're decoding, if we have one.
OpusState* mOpusState;
OggCodecState* mOpusState;
// Get the bitstream decode state for the given track type
// Decode state of the Flac bitstream we're decoding, if we have one.
FlacState* mFlacState;
OggCodecState* mFlacState;
OggCodecState* GetTrackCodecState(TrackInfo::TrackType aType) const;
TrackInfo::TrackType GetCodecStateType(OggCodecState* aState) const;
@ -297,21 +293,6 @@ private:
OggStateContext mAudioOggState;
OggStateContext mVideoOggState;
// Vorbis/Opus/Theora data used to compute timestamps. This is written on the
// decoder thread and read on the main thread. All reading on the main
// thread must be done after metadataloaded. We can't use the existing
// data in the codec states due to threading issues. You must check the
// associated mTheoraState or mVorbisState pointer is non-null before
// using this codec data.
uint32_t mVorbisSerial;
uint32_t mOpusSerial;
uint32_t mTheoraSerial;
uint32_t mFlacSerial;
vorbis_info mVorbisInfo;
int mOpusPreSkip;
th_info mTheoraInfo;
Maybe<int64_t> mStartTime;
// Booleans to indicate if we have audio and/or video data

Просмотреть файл

@ -208,7 +208,10 @@ already_AddRefed<MediaDataDecoder>
AndroidDecoderModule::CreateAudioDecoder(const CreateDecoderParams& aParams)
{
const AudioInfo& config = aParams.AudioConfig();
MOZ_ASSERT(config.mBitDepth == 16, "We only handle 16-bit audio!");
if (config.mBitDepth != 16) {
// We only handle 16-bit audio.
return nullptr;
}
MediaFormat::LocalRef format;

Просмотреть файл

@ -1149,4 +1149,4 @@ static const TransportSecurityPreload kPublicKeyPinningPreloadList[] = {
static const int32_t kUnknownId = -1;
static const PRTime kPreloadPKPinsExpirationTime = INT64_C(1489847113400000);
static const PRTime kPreloadPKPinsExpirationTime = INT64_C(1490020570826000);

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу