Bug 1022524 - Set up audio track and video track information for Ogg files. r=rillian

This commit is contained in:
Kilik Kuo 2014-09-03 11:01:37 +08:00
Родитель f9a8f31cb4
Коммит 6b7ffc4559
4 изменённых файлов: 412 добавлений и 139 удалений

Просмотреть файл

@ -1104,6 +1104,9 @@ static const long SKELETON_4_0_MIN_HEADER_LEN = 80;
// Minimum length in bytes of a Skeleton 4.0 index packet.
static const long SKELETON_4_0_MIN_INDEX_LEN = 42;
// Minimum length in bytes of a Skeleton 3.0/4.0 Fisbone packet.
static const long SKELETON_MIN_FISBONE_LEN = 52;
// Minimum possible size of a compressed index keypoint.
static const size_t MIN_KEY_POINT_SIZE = 2;
@ -1127,18 +1130,34 @@ static const size_t INDEX_FIRST_NUMER_OFFSET = 26;
static const size_t INDEX_LAST_NUMER_OFFSET = 34;
static const size_t INDEX_KEYPOINT_OFFSET = 42;
// Byte-offsets of the fields in the Skeleton Fisbone packet.
static const size_t FISBONE_MSG_FIELDS_OFFSET = 8;
static const size_t FISBONE_SERIALNO_OFFSET = 12;
static bool IsSkeletonBOS(ogg_packet* aPacket)
{
return aPacket->bytes >= SKELETON_MIN_HEADER_LEN &&
static_assert(SKELETON_MIN_HEADER_LEN >= 8,
"Minimum length of skeleton BOS header incorrect");
return aPacket->bytes >= SKELETON_MIN_HEADER_LEN &&
memcmp(reinterpret_cast<char*>(aPacket->packet), "fishead", 8) == 0;
}
static bool IsSkeletonIndex(ogg_packet* aPacket)
{
static_assert(SKELETON_4_0_MIN_INDEX_LEN >= 5,
"Minimum length of skeleton index header incorrect");
return aPacket->bytes >= SKELETON_4_0_MIN_INDEX_LEN &&
memcmp(reinterpret_cast<char*>(aPacket->packet), "index", 5) == 0;
}
static bool IsSkeletonFisbone(ogg_packet* aPacket)
{
static_assert(SKELETON_MIN_FISBONE_LEN >= 8,
"Minimum length of skeleton fisbone header incorrect");
return aPacket->bytes >= SKELETON_MIN_FISBONE_LEN &&
memcmp(reinterpret_cast<char*>(aPacket->packet), "fisbone", 8) == 0;
}
// Reads a variable length encoded integer at p. Will not read
// past aLimit. Returns pointer to character after end of integer.
static const unsigned char* ReadVariableLengthInt(const unsigned char* p,
@ -1367,6 +1386,85 @@ nsresult SkeletonState::GetDuration(const nsTArray<uint32_t>& aTracks,
return duration.isValid() ? NS_OK : NS_ERROR_FAILURE;
}
bool SkeletonState::DecodeFisbone(ogg_packet* aPacket)
{
if (aPacket->bytes < static_cast<long>(FISBONE_MSG_FIELDS_OFFSET + 4)) {
return false;
}
uint32_t offsetMsgField = LittleEndian::readUint32(aPacket->packet + FISBONE_MSG_FIELDS_OFFSET);
if (aPacket->bytes < static_cast<long>(FISBONE_SERIALNO_OFFSET + 4)) {
return false;
}
uint32_t serialno = LittleEndian::readUint32(aPacket->packet + FISBONE_SERIALNO_OFFSET);
CheckedUint32 checked_fields_pos = CheckedUint32(FISBONE_MSG_FIELDS_OFFSET) + offsetMsgField;
if (!checked_fields_pos.isValid() ||
aPacket->bytes < static_cast<int64_t>(checked_fields_pos.value())) {
return false;
}
int64_t msgLength = aPacket->bytes - checked_fields_pos.value();
char* msgProbe = (char*)aPacket->packet + checked_fields_pos.value();
char* msgHead = msgProbe;
nsAutoPtr<MessageField> field(new MessageField());
const static FieldPatternType kFieldTypeMaps[] = {
{"Content-Type:", eContentType},
{"Role:", eRole},
{"Name:", eName},
{"Language:", eLanguage},
{"Title:", eTitle},
{"Display-hint:", eDisplayHint},
{"Altitude:", eAltitude},
{"TrackOrder:", eTrackOrder},
{"Track dependencies:", eTrackDependencies}
};
bool isContentTypeParsed = false;
while (msgLength > 1) {
if (*msgProbe == '\r' && *(msgProbe+1) == '\n') {
nsAutoCString strMsg(msgHead, msgProbe-msgHead);
for (size_t i = 0; i < ArrayLength(kFieldTypeMaps); i++) {
if (strMsg.Find(kFieldTypeMaps[i].mPatternToRecognize) != -1) {
// The content of message header fields follows [RFC2822], and the
// mandatory message field must be encoded in US-ASCII, others
// must be be encoded in UTF-8. "Content-Type" must come first
// for all of message header fields.
// See http://svn.annodex.net/standards/draft-pfeiffer-oggskeleton-current.txt.
if (i != 0 && !isContentTypeParsed) {
return false;
}
if ((i == 0 && IsASCII(strMsg)) || (i != 0 && IsUTF8(strMsg))) {
EMsgHeaderType eHeaderType = kFieldTypeMaps[i].mMsgHeaderType;
if (!field->mValuesStore.Contains(eHeaderType)) {
uint32_t nameLen = strlen(kFieldTypeMaps[i].mPatternToRecognize);
field->mValuesStore.Put(eHeaderType, new nsCString(msgHead+nameLen,
msgProbe-msgHead-nameLen));
}
isContentTypeParsed = i==0 ? true : isContentTypeParsed;
}
break;
}
}
msgProbe += 2;
msgLength -= 2;
msgHead = msgProbe;
continue;
}
msgLength--;
msgProbe++;
};
if (!mMsgFieldStore.Contains(serialno)) {
mMsgFieldStore.Put(serialno, field.forget());
} else {
return false;
}
return true;
}
bool SkeletonState::DecodeHeader(ogg_packet* aPacket)
{
nsAutoRef<ogg_packet> autoRelease(aPacket);
@ -1396,6 +1494,8 @@ bool SkeletonState::DecodeHeader(ogg_packet* aPacket)
return true;
} else if (IsSkeletonIndex(aPacket) && mVersion >= SKELETON_VERSION(4,0)) {
return DecodeIndex(aPacket);
} else if (IsSkeletonFisbone(aPacket)) {
return DecodeFisbone(aPacket);
} else if (aPacket->e_o_s) {
mDoneReadingHeaders = true;
return true;

Просмотреть файл

@ -382,10 +382,35 @@ private:
// version numbers.
#define SKELETON_VERSION(major, minor) (((major)<<16)|(minor))
enum EMsgHeaderType {
eContentType,
eRole,
eName,
eLanguage,
eTitle,
eDisplayHint,
eAltitude,
eTrackOrder,
eTrackDependencies
};
typedef struct {
const char* mPatternToRecognize;
EMsgHeaderType mMsgHeaderType;
} FieldPatternType;
// Stores the message information for different logical bitstream.
typedef struct {
nsClassHashtable<nsUint32HashKey, nsCString> mValuesStore;
} MessageField;
class SkeletonState : public OggCodecState {
public:
explicit SkeletonState(ogg_page* aBosPage);
~SkeletonState();
nsClassHashtable<nsUint32HashKey, MessageField> mMsgFieldStore;
CodecType GetType() { return TYPE_SKELETON; }
bool DecodeHeader(ogg_packet* aPacket);
int64_t Time(int64_t granulepos) { return -1; }
@ -454,6 +479,8 @@ private:
// Decodes an index packet. Returns false on failure.
bool DecodeIndex(ogg_packet* aPacket);
// Decodes an fisbone packet. Returns false on failure.
bool DecodeFisbone(ogg_packet* aPacket);
// Gets the keypoint you must seek to in order to get the keyframe required
// to render the stream at time aTarget on stream with serial aSerialno.

Просмотреть файл

@ -86,6 +86,51 @@ PageSync(MediaResource* aResource,
// is about 4300 bytes, so we read the file in chunks larger than that.
static const int PAGE_STEP = 8192;
// Return the corresponding category in aKind based on the following specs.
// (https://www.whatwg.org/specs/web-apps/current-
// work/multipage/embedded-content.html#dom-audiotrack-kind) &
// (http://wiki.xiph.org/SkeletonHeaders)
static const nsString GetKind(const nsCString& aRole)
{
if (aRole.Find("audio/main") != -1 || aRole.Find("video/main") != -1) {
return NS_LITERAL_STRING("main");
} else if (aRole.Find("audio/alternate") != -1 ||
aRole.Find("video/alternate") != -1) {
return NS_LITERAL_STRING("alternative");
} else if (aRole.Find("audio/audiodesc") != -1) {
return NS_LITERAL_STRING("descriptions");
} else if (aRole.Find("audio/described") != -1) {
return NS_LITERAL_STRING("main-desc");
} else if (aRole.Find("audio/dub") != -1) {
return NS_LITERAL_STRING("translation");
} else if (aRole.Find("audio/commentary") != -1) {
return NS_LITERAL_STRING("commentary");
} else if (aRole.Find("video/sign") != -1) {
return NS_LITERAL_STRING("sign");
} else if (aRole.Find("video/captioned") != -1) {
return NS_LITERAL_STRING("captions");
} else if (aRole.Find("video/subtitled") != -1) {
return NS_LITERAL_STRING("subtitles");
}
return EmptyString();
}
static void InitTrack(MessageField* aMsgInfo, TrackInfo* aInfo, bool aEnable)
{
MOZ_ASSERT(aMsgInfo);
MOZ_ASSERT(aInfo);
nsCString* sName = aMsgInfo->mValuesStore.Get(eName);
nsCString* sRole = aMsgInfo->mValuesStore.Get(eRole);
nsCString* sTitle = aMsgInfo->mValuesStore.Get(eTitle);
nsCString* sLanguage = aMsgInfo->mValuesStore.Get(eLanguage);
aInfo->Init(sName? NS_ConvertUTF8toUTF16(*sName):EmptyString(),
sRole? GetKind(*sRole):EmptyString(),
sTitle? NS_ConvertUTF8toUTF16(*sTitle):EmptyString(),
sLanguage? NS_ConvertUTF8toUTF16(*sLanguage):EmptyString(),
aEnable);
}
OggReader::OggReader(AbstractMediaDecoder* aDecoder)
: MediaDecoderReader(aDecoder),
mMonitor("OggReader"),
@ -165,6 +210,7 @@ bool OggReader::ReadHeaders(OggCodecState* aState)
void OggReader::BuildSerialList(nsTArray<uint32_t>& aTracks)
{
// Obtaining seek index information for currently active bitstreams.
if (HasVideo()) {
aTracks.AppendElement(mTheoraState->mSerial);
}
@ -179,6 +225,159 @@ void OggReader::BuildSerialList(nsTArray<uint32_t>& aTracks)
}
}
void OggReader::SetupTargetTheora(TheoraState* aTheoraState)
{
if (mTheoraState) {
mTheoraState->Reset();
}
nsIntRect picture = nsIntRect(aTheoraState->mInfo.pic_x,
aTheoraState->mInfo.pic_y,
aTheoraState->mInfo.pic_width,
aTheoraState->mInfo.pic_height);
nsIntSize displaySize = nsIntSize(aTheoraState->mInfo.pic_width,
aTheoraState->mInfo.pic_height);
// Apply the aspect ratio to produce the intrinsic display size we report
// to the element.
ScaleDisplayByAspectRatio(displaySize, aTheoraState->mPixelAspectRatio);
nsIntSize frameSize(aTheoraState->mInfo.frame_width,
aTheoraState->mInfo.frame_height);
if (IsValidVideoRegion(frameSize, picture, displaySize)) {
// Video track's frame sizes will not overflow. Activate the video track.
mPicture = picture;
VideoFrameContainer* container = mDecoder->GetVideoFrameContainer();
if (container) {
container->SetCurrentFrame(gfxIntSize(displaySize.width, displaySize.height),
nullptr,
TimeStamp::Now());
}
// Copy Theora info data for time computations on other threads.
memcpy(&mTheoraInfo, &aTheoraState->mInfo, sizeof(mTheoraInfo));
mTheoraState = aTheoraState;
mTheoraSerial = aTheoraState->mSerial;
}
}
void OggReader::SetupTargetVorbis(VorbisState* aVorbisState)
{
if (mVorbisState) {
mVorbisState->Reset();
}
// Copy Vorbis info data for time computations on other threads.
memcpy(&mVorbisInfo, &aVorbisState->mInfo, sizeof(mVorbisInfo));
mVorbisInfo.codec_setup = nullptr;
mVorbisState = aVorbisState;
mVorbisSerial = aVorbisState->mSerial;
}
void OggReader::SetupTargetOpus(OpusState* aOpusState)
{
if (mOpusState) {
mOpusState->Reset();
}
mOpusState = aOpusState;
mOpusSerial = aOpusState->mSerial;
mOpusPreSkip = aOpusState->mPreSkip;
}
void OggReader::SetupTargetSkeleton(SkeletonState* aSkeletonState)
{
// Setup skeleton related information after mVorbisState & mTheroState
// being set (if they exist).
if (aSkeletonState) {
if (!HasAudio() && !HasVideo()) {
// We have a skeleton track, but no audio or video, may as well disable
// the skeleton, we can't do anything useful with this media.
aSkeletonState->Deactivate();
} else if (ReadHeaders(aSkeletonState) && aSkeletonState->HasIndex()) {
// Extract the duration info out of the index, so we don't need to seek to
// the end of resource to get it.
nsAutoTArray<uint32_t, 2> tracks;
BuildSerialList(tracks);
int64_t duration = 0;
if (NS_SUCCEEDED(aSkeletonState->GetDuration(tracks, duration))) {
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
mDecoder->SetMediaDuration(duration);
LOG(PR_LOG_DEBUG, ("Got duration from Skeleton index %lld", duration));
}
}
}
}
void OggReader::SetupMediaTracksInfo(const nsTArray<uint32_t>& aSerials)
{
// For each serial number
// 1. Retrieve a codecState from mCodecStore by this serial number.
// 2. Retrieve a message field from mMsgFieldStore by this serial number.
// 3. For now, skip if the serial number refers to a non-primary bitstream.
// 4. Setup track and other audio/video related information per different types.
for (size_t i = 0; i < aSerials.Length(); i++) {
uint32_t serial = aSerials[i];
OggCodecState* codecState = mCodecStore.Get(serial);
MessageField* msgInfo = nullptr;
if (mSkeletonState && mSkeletonState->mMsgFieldStore.Contains(serial)) {
mSkeletonState->mMsgFieldStore.Get(serial, &msgInfo);
}
if (codecState->GetType() == OggCodecState::TYPE_THEORA) {
TheoraState* theoraState = static_cast<TheoraState*>(codecState);
if (!(mTheoraState && mTheoraState->mSerial == theoraState->mSerial)) {
continue;
}
if (msgInfo) {
InitTrack(msgInfo, &mInfo.mVideo.mTrackInfo, mTheoraState == theoraState);
}
nsIntRect picture = nsIntRect(theoraState->mInfo.pic_x,
theoraState->mInfo.pic_y,
theoraState->mInfo.pic_width,
theoraState->mInfo.pic_height);
nsIntSize displaySize = nsIntSize(theoraState->mInfo.pic_width,
theoraState->mInfo.pic_height);
nsIntSize frameSize(theoraState->mInfo.frame_width,
theoraState->mInfo.frame_height);
ScaleDisplayByAspectRatio(displaySize, theoraState->mPixelAspectRatio);
mInfo.mVideo.mDisplay = displaySize;
mInfo.mVideo.mHasVideo = IsValidVideoRegion(frameSize, picture, displaySize)? true:false;
} else if (codecState->GetType() == OggCodecState::TYPE_VORBIS) {
VorbisState* vorbisState = static_cast<VorbisState*>(codecState);
if (!(mVorbisState && mVorbisState->mSerial == vorbisState->mSerial)) {
continue;
}
if (msgInfo) {
InitTrack(msgInfo, &mInfo.mAudio.mTrackInfo, mVorbisState == vorbisState);
}
mInfo.mAudio.mHasAudio = true;
mInfo.mAudio.mRate = vorbisState->mInfo.rate;
mInfo.mAudio.mChannels = vorbisState->mInfo.channels;
#ifdef MOZ_OPUS
} else if (codecState->GetType() == OggCodecState::TYPE_OPUS) {
OpusState* opusState = static_cast<OpusState*>(codecState);
if (!(mOpusState && mOpusState->mSerial == opusState->mSerial)) {
continue;
}
if (msgInfo) {
InitTrack(msgInfo, &mInfo.mAudio.mTrackInfo, mOpusState == opusState);
}
mInfo.mAudio.mHasAudio = true;
mInfo.mAudio.mRate = opusState->mRate;
mInfo.mAudio.mChannels = opusState->mChannels;
#endif
}
}
}
nsresult OggReader::ReadMetadata(MediaInfo* aInfo,
MetadataTags** aTags)
{
@ -193,6 +392,7 @@ nsresult OggReader::ReadMetadata(MediaInfo* aInfo,
ogg_page page;
nsAutoTArray<OggCodecState*,4> bitstreams;
nsTArray<uint32_t> serials;
bool readAllBOS = false;
while (!readAllBOS) {
if (!ReadOggPage(&page)) {
@ -215,41 +415,7 @@ nsresult OggReader::ReadMetadata(MediaInfo* aInfo,
codecState = OggCodecState::Create(&page);
mCodecStore.Add(serial, codecState);
bitstreams.AppendElement(codecState);
if (codecState &&
codecState->GetType() == OggCodecState::TYPE_VORBIS &&
!mVorbisState)
{
// First Vorbis bitstream, we'll play this one. Subsequent Vorbis
// bitstreams will be ignored.
mVorbisState = static_cast<VorbisState*>(codecState);
}
if (codecState &&
codecState->GetType() == OggCodecState::TYPE_THEORA &&
!mTheoraState)
{
// First Theora bitstream, we'll play this one. Subsequent Theora
// bitstreams will be ignored.
mTheoraState = static_cast<TheoraState*>(codecState);
}
#ifdef MOZ_OPUS
if (codecState &&
codecState->GetType() == OggCodecState::TYPE_OPUS &&
!mOpusState)
{
if (mOpusEnabled) {
mOpusState = static_cast<OpusState*>(codecState);
} else {
NS_WARNING("Opus decoding disabled."
" See media.opus.enabled in about:config");
}
}
#endif /* MOZ_OPUS */
if (codecState &&
codecState->GetType() == OggCodecState::TYPE_SKELETON &&
!mSkeletonState)
{
mSkeletonState = static_cast<SkeletonState*>(codecState);
}
serials.AppendElement(serial);
}
codecState = mCodecStore.Get(serial);
@ -261,96 +427,56 @@ nsresult OggReader::ReadMetadata(MediaInfo* aInfo,
}
// We've read all BOS pages, so we know the streams contained in the media.
// Now process all available header packets in the active Theora, Vorbis and
// Skeleton streams.
// Deactivate any non-primary bitstreams.
for (uint32_t i = 0; i < bitstreams.Length(); i++) {
// 1. Process all available header packets in the Theora, Vorbis/Opus bitstreams.
// 2. Find the first encountered Theora/Vorbis/Opus bitstream, and configure
// it as the target A/V bitstream.
// 3. Deactivate the rest of bitstreams for now, until we have MediaInfo
// support multiple track infos.
for (uint32_t i = 0; i < bitstreams.Length(); ++i) {
OggCodecState* s = bitstreams[i];
if (s != mVorbisState &&
if (s) {
if (s->GetType() == OggCodecState::TYPE_THEORA && ReadHeaders(s)) {
if (!mTheoraState) {
TheoraState* theoraState = static_cast<TheoraState*>(s);
SetupTargetTheora(theoraState);
} else {
s->Deactivate();
}
} else if (s->GetType() == OggCodecState::TYPE_VORBIS && ReadHeaders(s)) {
if (!mVorbisState) {
VorbisState* vorbisState = static_cast<VorbisState*>(s);
SetupTargetVorbis(vorbisState);
*aTags = vorbisState->GetTags();
} else {
s->Deactivate();
}
#ifdef MOZ_OPUS
s != mOpusState &&
#endif /* MOZ_OPUS */
s != mTheoraState && s != mSkeletonState) {
s->Deactivate();
}
}
if (mTheoraState && ReadHeaders(mTheoraState)) {
nsIntRect picture = nsIntRect(mTheoraState->mInfo.pic_x,
mTheoraState->mInfo.pic_y,
mTheoraState->mInfo.pic_width,
mTheoraState->mInfo.pic_height);
nsIntSize displaySize = nsIntSize(mTheoraState->mInfo.pic_width,
mTheoraState->mInfo.pic_height);
// Apply the aspect ratio to produce the intrinsic display size we report
// to the element.
ScaleDisplayByAspectRatio(displaySize, mTheoraState->mPixelAspectRatio);
nsIntSize frameSize(mTheoraState->mInfo.frame_width,
mTheoraState->mInfo.frame_height);
if (IsValidVideoRegion(frameSize, picture, displaySize)) {
// Video track's frame sizes will not overflow. Activate the video track.
mInfo.mVideo.mHasVideo = true;
mInfo.mVideo.mDisplay = displaySize;
mPicture = picture;
VideoFrameContainer* container = mDecoder->GetVideoFrameContainer();
if (container) {
container->SetCurrentFrame(gfxIntSize(displaySize.width, displaySize.height),
nullptr,
TimeStamp::Now());
} else if (s->GetType() == OggCodecState::TYPE_OPUS && ReadHeaders(s)) {
if (mOpusEnabled) {
if (!mOpusState) {
OpusState* opusState = static_cast<OpusState*>(s);
SetupTargetOpus(opusState);
*aTags = opusState->GetTags();
} else {
s->Deactivate();
}
} else {
NS_WARNING("Opus decoding disabled."
" See media.opus.enabled in about:config");
}
#endif // MOZ_OPUS
} else if (s->GetType() == OggCodecState::TYPE_SKELETON && !mSkeletonState) {
mSkeletonState = static_cast<SkeletonState*>(s);
} else {
// Deactivate any non-primary bitstreams.
s->Deactivate();
}
// Copy Theora info data for time computations on other threads.
memcpy(&mTheoraInfo, &mTheoraState->mInfo, sizeof(mTheoraInfo));
mTheoraSerial = mTheoraState->mSerial;
}
}
if (mVorbisState && ReadHeaders(mVorbisState)) {
mInfo.mAudio.mHasAudio = true;
mInfo.mAudio.mRate = mVorbisState->mInfo.rate;
mInfo.mAudio.mChannels = mVorbisState->mInfo.channels;
// Copy Vorbis info data for time computations on other threads.
memcpy(&mVorbisInfo, &mVorbisState->mInfo, sizeof(mVorbisInfo));
mVorbisInfo.codec_setup = nullptr;
mVorbisSerial = mVorbisState->mSerial;
*aTags = mVorbisState->GetTags();
} else {
memset(&mVorbisInfo, 0, sizeof(mVorbisInfo));
}
#ifdef MOZ_OPUS
if (mOpusState && ReadHeaders(mOpusState)) {
mInfo.mAudio.mHasAudio = true;
mInfo.mAudio.mRate = mOpusState->mRate;
mInfo.mAudio.mChannels = mOpusState->mChannels;
mOpusSerial = mOpusState->mSerial;
mOpusPreSkip = mOpusState->mPreSkip;
*aTags = mOpusState->GetTags();
}
#endif
if (mSkeletonState) {
if (!HasAudio() && !HasVideo()) {
// We have a skeleton track, but no audio or video, may as well disable
// the skeleton, we can't do anything useful with this media.
mSkeletonState->Deactivate();
} else if (ReadHeaders(mSkeletonState) && mSkeletonState->HasIndex()) {
// Extract the duration info out of the index, so we don't need to seek to
// the end of resource to get it.
nsAutoTArray<uint32_t, 2> tracks;
BuildSerialList(tracks);
int64_t duration = 0;
if (NS_SUCCEEDED(mSkeletonState->GetDuration(tracks, duration))) {
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
mDecoder->SetMediaDuration(duration);
LOG(PR_LOG_DEBUG, ("Got duration from Skeleton index %lld", duration));
}
}
}
SetupTargetSkeleton(mSkeletonState);
SetupMediaTracksInfo(serials);
if (HasAudio() || HasVideo()) {
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
@ -673,42 +799,55 @@ bool OggReader::ReadOggChain()
return false;
}
nsAutoPtr<MediaInfo> info(new MediaInfo());
MessageField* msgInfo = nullptr;
if (mSkeletonState && mSkeletonState->mMsgFieldStore.Contains(serial)) {
mSkeletonState->mMsgFieldStore.Get(serial, &msgInfo);
}
if ((newVorbisState && ReadHeaders(newVorbisState)) &&
(mVorbisState->mInfo.rate == newVorbisState->mInfo.rate) &&
(mVorbisState->mInfo.channels == newVorbisState->mInfo.channels)) {
mVorbisState->Reset();
mVorbisState = newVorbisState;
mVorbisSerial = mVorbisState->mSerial;
SetupTargetVorbis(newVorbisState);
LOG(PR_LOG_DEBUG, ("New vorbis ogg link, serial=%d\n", mVorbisSerial));
if (msgInfo) {
InitTrack(msgInfo, &mInfo.mAudio.mTrackInfo, true);
}
mInfo.mAudio.mRate = newVorbisState->mInfo.rate;
mInfo.mAudio.mChannels = newVorbisState->mInfo.channels;
chained = true;
info->mAudio.mRate = mVorbisState->mInfo.rate;
info->mAudio.mChannels = mVorbisState->mInfo.channels;
tags = mVorbisState->GetTags();
tags = newVorbisState->GetTags();
}
#ifdef MOZ_OPUS
if ((newOpusState && ReadHeaders(newOpusState)) &&
(mOpusState->mRate == newOpusState->mRate) &&
(mOpusState->mChannels == newOpusState->mChannels)) {
mOpusState->Reset();
mOpusState = newOpusState;
mOpusSerial = mOpusState->mSerial;
SetupTargetOpus(newOpusState);
if (msgInfo) {
InitTrack(msgInfo, &mInfo.mAudio.mTrackInfo, true);
}
mInfo.mAudio.mRate = newOpusState->mRate;
mInfo.mAudio.mChannels = newOpusState->mChannels;
chained = true;
info->mAudio.mRate = mOpusState->mRate;
info->mAudio.mChannels = mOpusState->mChannels;
tags = mOpusState->GetTags();
tags = newOpusState->GetTags();
}
#endif
if (chained) {
SetChained(true);
{
info->mAudio.mHasAudio = HasAudio();
info->mVideo.mHasVideo = HasVideo();
int rate = info->mAudio.mRate;
mInfo.mAudio.mHasAudio = HasAudio();
mInfo.mVideo.mHasVideo = HasVideo();
nsAutoPtr<MediaInfo> info(new MediaInfo());
*info = mInfo;
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
mDecoder->QueueMetadata((mDecodedAudioFrames * USECS_PER_S) / rate,
mDecoder->QueueMetadata((mDecodedAudioFrames * USECS_PER_S) / mInfo.mAudio.mRate,
info.forget(), tags);
}
return true;
@ -1024,7 +1163,6 @@ int64_t OggReader::RangeEndTime(int64_t aStartOffset,
endTime = -1;
break;
}
continue;
}
@ -1054,7 +1192,6 @@ int64_t OggReader::RangeEndTime(int64_t aStartOffset,
OggCodecState* codecState = nullptr;
codecState = mCodecStore.Get(serial);
if (!codecState) {
// This page is from a bitstream which we haven't encountered yet.
// It's probably from a new "link" in a "chained" ogg. Don't
@ -1143,7 +1280,9 @@ OggReader::SelectSeekRange(const nsTArray<SeekRange>& ranges,
OggReader::IndexedSeekResult OggReader::RollbackIndexedSeek(int64_t aOffset)
{
mSkeletonState->Deactivate();
if (mSkeletonState) {
mSkeletonState->Deactivate();
}
MediaResource* resource = mDecoder->GetResource();
NS_ENSURE_TRUE(resource != nullptr, SEEK_FATAL_ERROR);
nsresult res = resource->Seek(nsISeekableStream::NS_SEEK_SET, aOffset);

Просмотреть файл

@ -268,6 +268,13 @@ private:
// various SkeletonState functions.
void BuildSerialList(nsTArray<uint32_t>& aTracks);
// Setup target bitstreams for decoding.
void SetupTargetTheora(TheoraState* aTheoraState);
void SetupTargetVorbis(VorbisState* aVorbisState);
void SetupTargetOpus(OpusState* aOpusState);
void SetupTargetSkeleton(SkeletonState* aSkeletonState);
void SetupMediaTracksInfo(const nsTArray<uint32_t>& aSerials);
OggCodecStore mCodecStore;
// Decode state of the Theora bitstream we're decoding, if we have video.