Bug 1901076 - Part 2. Expose frame count variant of metadata decoders. r=tnikkel

It is useful/necessary for WebCodecs image decoding support to be able
to calculate a frame count for the encoded image without actually
decoding every frame. It needs to be able to provide results without the
complete buffer as well.

Differential Revision: https://phabricator.services.mozilla.com/D212831
This commit is contained in:
Andrew Osmond 2024-06-24 17:59:49 +00:00
Родитель ae67966c25
Коммит 6fa0ded86a
22 изменённых файлов: 354 добавлений и 49 удалений

Просмотреть файл

@ -80,6 +80,29 @@ SampleIterator::~SampleIterator() { mIndex->UnregisterIterator(this); }
bool SampleIterator::HasNext() { return !!Get(); }
already_AddRefed<MediaRawData> SampleIterator::GetNextHeader() {
Sample* s(Get());
if (!s) {
return nullptr;
}
int64_t length = std::numeric_limits<int64_t>::max();
mIndex->mSource->Length(&length);
if (s->mByteRange.mEnd > length) {
// We don't have this complete sample.
return nullptr;
}
RefPtr<MediaRawData> sample = new MediaRawData();
sample->mTimecode = s->mDecodeTime;
sample->mTime = s->mCompositionRange.start;
sample->mDuration = s->mCompositionRange.Length();
sample->mOffset = s->mByteRange.mStart;
sample->mKeyframe = s->mSync;
Next();
return sample.forget();
}
already_AddRefed<MediaRawData> SampleIterator::GetNext() {
Sample* s(Get());
if (!s) {

Просмотреть файл

@ -26,6 +26,7 @@ class SampleIterator {
explicit SampleIterator(MP4SampleIndex* aIndex);
~SampleIterator();
bool HasNext();
already_AddRefed<mozilla::MediaRawData> GetNextHeader();
already_AddRefed<mozilla::MediaRawData> GetNext();
void Seek(const media::TimeUnit& aTime);
media::TimeUnit GetNextKeyframeTime();

Просмотреть файл

@ -151,6 +151,9 @@ nsresult Decoder::Init() {
// XXX(seth): Soon that exception will be removed.
MOZ_ASSERT_IF(mImage, IsMetadataDecode());
// We can only request the frame count for metadata decoders.
MOZ_ASSERT_IF(WantsFrameCount(), IsMetadataDecode());
// Implementation-specific initialization.
nsresult rv = InitInternal();
@ -467,6 +470,10 @@ void Decoder::PostIsAnimated(FrameTimeout aFirstFrameTimeout) {
mImageMetadata.SetFirstFrameTimeout(aFirstFrameTimeout);
}
void Decoder::PostFrameCount(uint32_t aFrameCount) {
mImageMetadata.SetFrameCount(aFrameCount);
}
void Decoder::PostFrameStop(Opacity aFrameOpacity) {
// We should be mid-frame
MOZ_ASSERT(!IsMetadataDecode(), "Stopping frame during metadata decode");

Просмотреть файл

@ -197,6 +197,13 @@ class Decoder {
}
bool IsMetadataDecode() const { return mMetadataDecode; }
/**
* Should we return how many frames we expect are in the animation.
*/
bool WantsFrameCount() const {
return bool(mDecoderFlags & DecoderFlags::COUNT_FRAMES);
}
/**
* Sets the output size of this decoder. If this is smaller than the intrinsic
* size of the image, we'll downscale it while decoding. For memory usage
@ -300,7 +307,7 @@ class Decoder {
/// useless?
bool GetDecodeDone() const {
return mReachedTerminalState || mDecodeDone ||
(mMetadataDecode && HasSize()) || HasError();
(mMetadataDecode && HasSize() && !WantsFrameCount()) || HasError();
}
/// Are we in the middle of a frame right now? Used for assertions only.
@ -505,6 +512,10 @@ class Decoder {
// we advance to the next frame.
void PostIsAnimated(FrameTimeout aFirstFrameTimeout);
// Called by decoders if they determine the expected frame count.
// @param aFrameCount The expected frame count.
void PostFrameCount(uint32_t aFrameCount);
// Called by decoders when they end a frame. Informs the image, sends
// notifications, and does internal book-keeping.
// Specify whether this frame is opaque as an optimization.

Просмотреть файл

@ -178,6 +178,11 @@ nsresult DecoderFactory::CreateDecoder(
return NS_ERROR_INVALID_ARG;
}
// Only can use COUNT_FRAMES with metadata decoders.
if (NS_WARN_IF(bool(aDecoderFlags & DecoderFlags::COUNT_FRAMES))) {
return NS_ERROR_INVALID_ARG;
}
// Create an anonymous decoder. Interaction with the SurfaceCache and the
// owning RasterImage will be mediated by DecodedSurfaceProvider.
RefPtr<Decoder> decoder = GetDecoder(
@ -233,6 +238,11 @@ nsresult DecoderFactory::CreateAnimationDecoder(
return NS_ERROR_INVALID_ARG;
}
// Only can use COUNT_FRAMES with metadata decoders.
if (NS_WARN_IF(bool(aDecoderFlags & DecoderFlags::COUNT_FRAMES))) {
return NS_ERROR_INVALID_ARG;
}
MOZ_ASSERT(aType == DecoderType::GIF || aType == DecoderType::PNG ||
aType == DecoderType::WEBP || aType == DecoderType::AVIF,
"Calling CreateAnimationDecoder for non-animating DecoderType");
@ -391,6 +401,11 @@ already_AddRefed<Decoder> DecoderFactory::CreateAnonymousDecoder(
return nullptr;
}
// Only can use COUNT_FRAMES with metadata decoders.
if (NS_WARN_IF(bool(aDecoderFlags & DecoderFlags::COUNT_FRAMES))) {
return nullptr;
}
RefPtr<Decoder> decoder =
GetDecoder(aType, /* aImage = */ nullptr, /* aIsRedecode = */ false);
MOZ_ASSERT(decoder, "Should have a decoder now");
@ -420,7 +435,8 @@ already_AddRefed<Decoder> DecoderFactory::CreateAnonymousDecoder(
/* static */
already_AddRefed<Decoder> DecoderFactory::CreateAnonymousMetadataDecoder(
DecoderType aType, NotNull<SourceBuffer*> aSourceBuffer) {
DecoderType aType, NotNull<SourceBuffer*> aSourceBuffer,
DecoderFlags aDecoderFlags) {
if (aType == DecoderType::UNKNOWN) {
return nullptr;
}
@ -432,7 +448,7 @@ already_AddRefed<Decoder> DecoderFactory::CreateAnonymousMetadataDecoder(
// Initialize the decoder.
decoder->SetMetadataDecode(true);
decoder->SetIterator(aSourceBuffer->Iterator());
decoder->SetDecoderFlags(DecoderFlags::FIRST_FRAME_ONLY);
decoder->SetDecoderFlags(aDecoderFlags);
if (NS_FAILED(decoder->Init())) {
return nullptr;

Просмотреть файл

@ -190,9 +190,11 @@ class DecoderFactory {
* @param aType Which type of decoder to create - JPEG, PNG, etc.
* @param aSourceBuffer The SourceBuffer which the decoder will read its data
* from.
* @param aDecoderFlags Flags specifying the behavior of this decoder.
*/
static already_AddRefed<Decoder> CreateAnonymousMetadataDecoder(
DecoderType aType, NotNull<SourceBuffer*> aSourceBuffer);
DecoderType aType, NotNull<SourceBuffer*> aSourceBuffer,
DecoderFlags aDecoderFlags);
private:
virtual ~DecoderFactory() = 0;

Просмотреть файл

@ -42,6 +42,14 @@ enum class DecoderFlags : uint8_t {
// "image.avif.sequence.animate_avif_major_branded_images" preference.
AVIF_ANIMATE_AVIF_MAJOR = 1 << 6,
#endif
/**
* By default, we don't count how many animated frames there are in an image,
* as that would require us to iterate over the entire buffer for some image
* formats. If the caller requires a full accounting of how many frames there
* are.
*/
COUNT_FRAMES = 1 << 7,
};
MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS(DecoderFlags)

Просмотреть файл

@ -38,6 +38,10 @@ class ImageMetadata {
FrameTimeout GetLoopLength() const { return *mLoopLength; }
bool HasLoopLength() const { return mLoopLength.isSome(); }
void SetFrameCount(uint32_t aCount) { mFrameCount = Some(aCount); }
uint32_t GetFrameCount() const { return *mFrameCount; }
bool HasFrameCount() const { return mFrameCount.isSome(); }
void SetFirstFrameTimeout(FrameTimeout aTimeout) {
mFirstFrameTimeout = aTimeout;
}
@ -94,6 +98,9 @@ class ImageMetadata {
// The total length of a single loop through an animated image.
Maybe<FrameTimeout> mLoopLength;
// The total number of frames we expect from the animated image.
Maybe<uint32_t> mFrameCount;
/// The timeout of an animated image's first frame.
FrameTimeout mFirstFrameTimeout = FrameTimeout::Forever();

Просмотреть файл

@ -171,8 +171,11 @@ nsresult ImageOps::DecodeMetadata(ImageBuffer* aBuffer,
// Create a decoder.
DecoderType decoderType =
DecoderFactory::GetDecoderType(PromiseFlatCString(aMimeType).get());
DecoderFlags decoderFlags =
DecoderFactory::GetDefaultDecoderFlagsForType(decoderType);
decoderFlags |= DecoderFlags::FIRST_FRAME_ONLY;
RefPtr<Decoder> decoder = DecoderFactory::CreateAnonymousMetadataDecoder(
decoderType, WrapNotNull(sourceBuffer));
decoderType, WrapNotNull(sourceBuffer), decoderFlags);
if (!decoder) {
return NS_ERROR_FAILURE;
}

Просмотреть файл

@ -219,6 +219,38 @@ Mp4parseStatus AVIFParser::Create(const Mp4parseIo* aIo, ByteStream* aBuffer,
return status;
}
uint32_t AVIFParser::GetFrameCount() {
MOZ_ASSERT(mParser);
// Note that because this consumes the frame iterators, this can only be
// requested for metadata decodes. Since we had to partially decode the
// first frame to determine the size, we need to add one to the result.
// This means we return 0 for 1 frame, 1 for 2 frames, etc.
if (!IsAnimated()) {
return 0;
}
uint32_t frameCount = 0;
while (true) {
RefPtr<MediaRawData> header = mColorSampleIter->GetNextHeader();
if (!header) {
break;
}
if (mAlphaSampleIter) {
header = mAlphaSampleIter->GetNextHeader();
if (!header) {
break;
}
}
++frameCount;
}
return frameCount;
}
nsAVIFDecoder::DecodeResult AVIFParser::GetImage(AVIFImage& aImage) {
MOZ_ASSERT(mParser);
@ -1612,6 +1644,12 @@ nsAVIFDecoder::DecodeResult nsAVIFDecoder::DoDecodeInternal(
mIsAnimated ? parsedInfo.alpha_track_bit_depth
: parsedInfo.alpha_item_bit_depth));
PostSize(ispeImageSize->width, ispeImageSize->height, orientation);
if (WantsFrameCount()) {
// Note that this consumes the frame iterators, so this can only be
// requested for metadata decodes. Since we had to partially decode the
// first frame to determine the size, we need to add one to the result.
PostFrameCount(mParser->GetFrameCount() + 1);
}
if (IsMetadataDecode()) {
MOZ_LOG(
sAVIFLog, LogLevel::Debug,
@ -1672,6 +1710,12 @@ nsAVIFDecoder::DecodeResult nsAVIFDecoder::DoDecodeInternal(
decodedData->mPictureRect.width, decodedData->mPictureRect.height));
PostSize(decodedData->mPictureRect.width, decodedData->mPictureRect.height,
orientation);
if (WantsFrameCount()) {
// Note that this consumes the frame iterators, so this can only be
// requested for metadata decodes. Since we had to partially decode the
// first frame to determine the size, we need to add one to the result.
PostFrameCount(mParser->GetFrameCount() + 1);
}
AccumulateCategorical(LABELS_AVIF_ISPE::absent);
mozilla::glean::avif::ispe.EnumGet(mozilla::glean::avif::IspeLabel::eAbsent)
.Add();

Просмотреть файл

@ -126,6 +126,8 @@ class AVIFParser {
const Mp4parseAvifInfo& GetInfo() const { return mInfo; }
uint32_t GetFrameCount();
nsAVIFDecoder::DecodeResult GetImage(AVIFImage& aImage);
bool IsAnimated() const;

Просмотреть файл

@ -698,6 +698,9 @@ LexerTransition<nsBMPDecoder::State> nsBMPDecoder::ReadBitfields(
// Post our size to the superclass.
PostSize(mH.mWidth, AbsoluteHeight());
if (WantsFrameCount()) {
PostFrameCount(/* aFrameCount */ 1);
}
if (HasError()) {
return Transition::TerminateFailure();
}

Просмотреть файл

@ -113,14 +113,19 @@ nsresult nsGIFDecoder2::FinishInternal() {
PostLoopCount(mGIFStruct.loop_count);
// If the GIF got cut off, handle it anyway
if (!IsMetadataDecode()) {
if (mCurrentFrameIndex == mGIFStruct.images_decoded) {
EndImageFrame();
}
PostDecodeDone();
mGIFOpen = false;
if (WantsFrameCount()) {
PostFrameCount(mGIFStruct.images_decoded);
}
if (!IsMetadataDecode()) {
PostDecodeDone();
}
mGIFOpen = false;
return NS_OK;
}
@ -194,6 +199,14 @@ nsresult nsGIFDecoder2::BeginImageFrame(const OrientedIntRect& aFrameRect,
uint16_t aDepth, bool aIsInterlaced) {
MOZ_ASSERT(HasSize());
// If we are just counting frames for a metadata decode, there is no actual
// decoding done. We are just iterating over the blocks to find when a frame
// begins and ends.
if (WantsFrameCount()) {
mCurrentFrameIndex = mGIFStruct.images_decoded;
return NS_OK;
}
bool hasTransparency = CheckForTransparency(aFrameRect);
// Make sure there's no animation if we're downscaling.
@ -241,6 +254,16 @@ nsresult nsGIFDecoder2::BeginImageFrame(const OrientedIntRect& aFrameRect,
//******************************************************************************
void nsGIFDecoder2::EndImageFrame() {
if (WantsFrameCount()) {
mGIFStruct.pixels_remaining = 0;
mGIFStruct.images_decoded++;
mCurrentFrameIndex = -1;
// Keep updating the count every time we find a frame.
PostFrameCount(mGIFStruct.images_decoded);
return;
}
Opacity opacity = Opacity::SOME_TRANSPARENCY;
if (mGIFStruct.images_decoded == 0) {
@ -430,7 +453,9 @@ std::tuple<int32_t, Maybe<WriteState>> nsGIFDecoder2::YieldPixels(
/// Expand the colormap from RGB to Packed ARGB as needed by Cairo.
/// And apply any LCMS transformation.
void nsGIFDecoder2::ConvertColormap(uint32_t* aColormap, uint32_t aColors) {
if (!aColors) {
// If we are just counting frames for a metadata decode, there is no need to
// prep the colormap.
if (!aColors || WantsFrameCount()) {
return;
}
@ -771,6 +796,10 @@ LexerTransition<nsGIFDecoder2::State> nsGIFDecoder2::ReadImageDescriptor(
MOZ_ASSERT(Size() == OutputSize(), "Downscaling an animated image?");
if (WantsFrameCount()) {
return FinishImageDescriptor(aData);
}
// Yield to allow access to the previous frame before we start a new one.
return Transition::ToAfterYield(State::FINISH_IMAGE_DESCRIPTOR);
}
@ -805,8 +834,8 @@ LexerTransition<nsGIFDecoder2::State> nsGIFDecoder2::FinishImageDescriptor(
return Transition::TerminateFailure();
}
// If we're doing a metadata decode, we're done.
if (IsMetadataDecode()) {
// If we're doing a metadata decode without the frame count, we're done.
if (IsMetadataDecode() && !WantsFrameCount()) {
CheckForTransparency(frameRect);
FinishInternal();
return Transition::TerminateSuccess();
@ -921,9 +950,13 @@ LexerTransition<nsGIFDecoder2::State> nsGIFDecoder2::FinishImageDescriptor(
LexerTransition<nsGIFDecoder2::State> nsGIFDecoder2::ReadLocalColorTable(
const char* aData, size_t aLength) {
// If we are just counting frames for a metadata decode, there is no need to
// prep the colormap.
if (!WantsFrameCount()) {
uint8_t* dest = reinterpret_cast<uint8_t*>(mColormap) + mColorTablePos;
memcpy(dest, aData, aLength);
mColorTablePos += aLength;
}
return Transition::ContinueUnbuffered(State::LOCAL_COLOR_TABLE);
}
@ -1005,6 +1038,12 @@ LexerTransition<nsGIFDecoder2::State> nsGIFDecoder2::ReadImageDataSubBlock(
LexerTransition<nsGIFDecoder2::State> nsGIFDecoder2::ReadLZWData(
const char* aData, size_t aLength) {
// If we are just counting frames for a metadata decode, there is no need to
// do the actual decode.
if (WantsFrameCount()) {
return Transition::ContinueUnbuffered(State::LZW_DATA);
}
const uint8_t* data = reinterpret_cast<const uint8_t*>(aData);
size_t length = aLength;

Просмотреть файл

@ -291,6 +291,9 @@ LexerTransition<ICOState> nsICODecoder::FinishDirEntry() {
// is necessary for downscale-during-decode to work since we won't even
// attempt to *upscale* while decoding.
PostSize(biggestEntry->mSize.width, biggestEntry->mSize.height);
if (WantsFrameCount()) {
PostFrameCount(/* aFrameCount */ 1);
}
if (HasError()) {
return Transition::TerminateFailure();
}

Просмотреть файл

@ -79,6 +79,10 @@ LexerTransition<nsIconDecoder::State> nsIconDecoder::ReadHeader(
// Post our size to the superclass.
PostSize(width, height);
if (WantsFrameCount()) {
PostFrameCount(/* aFrameCount */ 1);
}
// Icons have alpha.
PostHasTransparency();

Просмотреть файл

@ -275,6 +275,9 @@ LexerTransition<nsJPEGDecoder::State> nsJPEGDecoder::ReadJPEGData(
EXIFData exif = ReadExifData();
PostSize(mInfo.image_width, mInfo.image_height, exif.orientation,
exif.resolution);
if (WantsFrameCount()) {
PostFrameCount(/* aFrameCount */ 1);
}
if (HasError()) {
// Setting the size led to an error.
mState = JPEG_ERROR;

Просмотреть файл

@ -112,6 +112,9 @@ LexerTransition<nsJXLDecoder::State> nsJXLDecoder::ReadJXLData(
case JXL_DEC_BASIC_INFO: {
JXL_TRY(JxlDecoderGetBasicInfo(mDecoder.get(), &mInfo));
PostSize(mInfo.xsize, mInfo.ysize);
if (WantsFrameCount()) {
PostFrameCount(/* aFrameCount */ 1);
}
if (mInfo.alpha_bits > 0) {
PostHasTransparency();
}

Просмотреть файл

@ -890,14 +890,22 @@ nsresult nsPNGDecoder::FinishInternal() {
MOZ_ASSERT(!HasError(), "Can't call FinishInternal on error!");
int32_t loop_count = 0;
uint32_t frame_count = 1;
#ifdef PNG_APNG_SUPPORTED
if (png_get_valid(mPNG, mInfo, PNG_INFO_acTL)) {
int32_t num_plays = png_get_num_plays(mPNG, mInfo);
loop_count = num_plays - 1;
uint32_t num_plays = 0;
if (png_get_acTL(mPNG, mInfo, &frame_count, &num_plays)) {
loop_count = int32_t(num_plays) - 1;
} else {
frame_count = 1;
}
#endif
PostLoopCount(loop_count);
if (WantsFrameCount()) {
PostFrameCount(frame_count);
}
if (IsMetadataDecode()) {
return NS_OK;
}

Просмотреть файл

@ -390,6 +390,12 @@ LexerResult nsWebPDecoder::ReadHeader(WebPDemuxer* aDemuxer, bool aIsComplete) {
}
if (flags & WebPFeatureFlags::ANIMATION_FLAG) {
// The demuxer only knows how many frames it will have once it has the
// complete buffer.
if (WantsFrameCount() && !aIsComplete) {
return LexerResult(Yield::NEED_MORE_DATA);
}
// A metadata decode expects to get the correct first frame timeout which
// sadly is not provided by the normal WebP header parsing.
WebPIterator iter;
@ -423,6 +429,11 @@ LexerResult nsWebPDecoder::ReadHeader(WebPDemuxer* aDemuxer, bool aIsComplete) {
PostSize(width, height);
if (WantsFrameCount()) {
uint32_t frameCount = WebPDemuxGetI(aDemuxer, WEBP_FF_FRAME_COUNT);
PostFrameCount(frameCount);
}
bool alpha = flags & WebPFeatureFlags::ALPHA_FLAG;
if (alpha) {
mFormat = SurfaceFormat::OS_RGBA;

Просмотреть файл

@ -739,42 +739,48 @@ ImageTestCase GreenWebPIccSrgbTestCase() {
ImageTestCase GreenFirstFrameAnimatedGIFTestCase() {
return ImageTestCase("first-frame-green.gif", "image/gif", IntSize(100, 100),
TEST_CASE_IS_ANIMATED);
TEST_CASE_IS_ANIMATED, /* aFrameCount */ 2);
}
ImageTestCase GreenFirstFrameAnimatedPNGTestCase() {
return ImageTestCase("first-frame-green.png", "image/png", IntSize(100, 100),
TEST_CASE_IS_TRANSPARENT | TEST_CASE_IS_ANIMATED);
TEST_CASE_IS_TRANSPARENT | TEST_CASE_IS_ANIMATED,
/* aFrameCount */ 2);
}
ImageTestCase GreenFirstFrameAnimatedWebPTestCase() {
return ImageTestCase("first-frame-green.webp", "image/webp",
IntSize(100, 100), TEST_CASE_IS_ANIMATED);
IntSize(100, 100), TEST_CASE_IS_ANIMATED,
/* aFrameCount */ 2);
}
ImageTestCase GreenFirstFrameAnimatedAVIFTestCase() {
return ImageTestCase("first-frame-green.avif", "image/avif",
IntSize(100, 100), TEST_CASE_IS_ANIMATED);
IntSize(100, 100), TEST_CASE_IS_ANIMATED,
/* aFrameCount */ 2);
}
ImageTestCase BlendAnimatedGIFTestCase() {
return ImageTestCase("blend.gif", "image/gif", IntSize(100, 100),
TEST_CASE_IS_ANIMATED);
TEST_CASE_IS_ANIMATED, /* aFrameCount */ 2);
}
ImageTestCase BlendAnimatedPNGTestCase() {
return ImageTestCase("blend.png", "image/png", IntSize(100, 100),
TEST_CASE_IS_TRANSPARENT | TEST_CASE_IS_ANIMATED);
TEST_CASE_IS_TRANSPARENT | TEST_CASE_IS_ANIMATED,
/* aFrameCount */ 2);
}
ImageTestCase BlendAnimatedWebPTestCase() {
return ImageTestCase("blend.webp", "image/webp", IntSize(100, 100),
TEST_CASE_IS_TRANSPARENT | TEST_CASE_IS_ANIMATED);
TEST_CASE_IS_TRANSPARENT | TEST_CASE_IS_ANIMATED,
/* aFrameCount */ 2);
}
ImageTestCase BlendAnimatedAVIFTestCase() {
return ImageTestCase("blend.avif", "image/avif", IntSize(100, 100),
TEST_CASE_IS_TRANSPARENT | TEST_CASE_IS_ANIMATED);
TEST_CASE_IS_TRANSPARENT | TEST_CASE_IS_ANIMATED,
/* aFrameCount */ 2);
}
ImageTestCase CorruptTestCase() {

Просмотреть файл

@ -113,11 +113,13 @@ enum TestCaseFlags {
struct ImageTestCase {
ImageTestCase(const char* aPath, const char* aMimeType, gfx::IntSize aSize,
uint32_t aFlags = TEST_CASE_DEFAULT_FLAGS)
uint32_t aFlags = TEST_CASE_DEFAULT_FLAGS,
uint32_t aFrameCount = 1)
: mPath(aPath),
mMimeType(aMimeType),
mSize(aSize),
mOutputSize(aSize),
mFrameCount(aFrameCount),
mFlags(aFlags),
mSurfaceFlags(DefaultSurfaceFlags()),
mColor(BGRAColor::Green()) {}
@ -173,6 +175,7 @@ struct ImageTestCase {
const char* mMimeType;
gfx::IntSize mSize;
gfx::IntSize mOutputSize;
uint32_t mFrameCount = 0;
uint32_t mFlags;
SurfaceFlags mSurfaceFlags;
BGRAColor mColor;

Просмотреть файл

@ -28,27 +28,82 @@ using namespace mozilla::image;
enum class BMPWithinICO { NO, YES };
static void CheckMetadata(const ImageTestCase& aTestCase,
BMPWithinICO aBMPWithinICO = BMPWithinICO::NO) {
nsCOMPtr<nsIInputStream> inputStream = LoadFile(aTestCase.mPath);
ASSERT_TRUE(inputStream != nullptr);
// Figure out how much data we have.
uint64_t length;
nsresult rv = inputStream->Available(&length);
ASSERT_NS_SUCCEEDED(rv);
// Write the data into a SourceBuffer.
auto sourceBuffer = MakeNotNull<RefPtr<SourceBuffer>>();
sourceBuffer->ExpectLength(length);
rv = sourceBuffer->AppendFromInputStream(inputStream, length);
ASSERT_NS_SUCCEEDED(rv);
sourceBuffer->Complete(NS_OK);
static void CheckMetadataFrameCount(
const ImageTestCase& aTestCase,
NotNull<RefPtr<SourceBuffer>>& aSourceBuffer, BMPWithinICO aBMPWithinICO) {
// Create a metadata decoder.
DecoderType decoderType = DecoderFactory::GetDecoderType(aTestCase.mMimeType);
DecoderFlags decoderFlags =
DecoderFactory::GetDefaultDecoderFlagsForType(decoderType);
decoderFlags |= DecoderFlags::COUNT_FRAMES;
RefPtr<image::Decoder> decoder =
DecoderFactory::CreateAnonymousMetadataDecoder(decoderType, sourceBuffer);
DecoderFactory::CreateAnonymousMetadataDecoder(decoderType, aSourceBuffer,
decoderFlags);
ASSERT_TRUE(decoder != nullptr);
RefPtr<IDecodingTask> task =
new AnonymousDecodingTask(WrapNotNull(decoder), /* aResumable */ false);
if (aBMPWithinICO == BMPWithinICO::YES) {
static_cast<nsBMPDecoder*>(decoder.get())->SetIsWithinICO();
}
// Run the metadata decoder synchronously.
task->Run();
// Ensure that the metadata decoder didn't make progress it shouldn't have
// (which would indicate that it decoded past the header of the image).
Progress metadataProgress = decoder->TakeProgress();
EXPECT_TRUE(
0 == (metadataProgress &
~(FLAG_SIZE_AVAILABLE | FLAG_HAS_TRANSPARENCY | FLAG_IS_ANIMATED)));
// If the test case is corrupt, assert what we can and return early.
if (aTestCase.mFlags & TEST_CASE_HAS_ERROR) {
EXPECT_TRUE(decoder->GetDecodeDone());
EXPECT_TRUE(decoder->HasError());
return;
}
EXPECT_TRUE(decoder->GetDecodeDone() && !decoder->HasError());
// Check that we got the expected metadata.
EXPECT_TRUE(metadataProgress & FLAG_SIZE_AVAILABLE);
OrientedIntSize metadataSize = decoder->Size();
EXPECT_EQ(aTestCase.mSize.width, metadataSize.width);
if (aBMPWithinICO == BMPWithinICO::YES) {
// Half the data is considered to be part of the AND mask if embedded
EXPECT_EQ(aTestCase.mSize.height / 2, metadataSize.height);
} else {
EXPECT_EQ(aTestCase.mSize.height, metadataSize.height);
}
bool expectTransparency =
aBMPWithinICO == BMPWithinICO::YES
? true
: bool(aTestCase.mFlags & TEST_CASE_IS_TRANSPARENT);
EXPECT_EQ(expectTransparency, bool(metadataProgress & FLAG_HAS_TRANSPARENCY));
EXPECT_EQ(bool(aTestCase.mFlags & TEST_CASE_IS_ANIMATED),
bool(metadataProgress & FLAG_IS_ANIMATED));
EXPECT_TRUE(decoder->WantsFrameCount());
const auto metadata = decoder->GetImageMetadata();
ASSERT_TRUE(metadata.HasFrameCount());
EXPECT_EQ(aTestCase.mFrameCount, metadata.GetFrameCount());
}
static void CheckMetadataCommon(const ImageTestCase& aTestCase,
NotNull<RefPtr<SourceBuffer>>& aSourceBuffer,
BMPWithinICO aBMPWithinICO) {
// Create a metadata decoder.
DecoderType decoderType = DecoderFactory::GetDecoderType(aTestCase.mMimeType);
DecoderFlags decoderFlags =
DecoderFactory::GetDefaultDecoderFlagsForType(decoderType);
decoderFlags |= DecoderFlags::FIRST_FRAME_ONLY;
RefPtr<image::Decoder> decoder =
DecoderFactory::CreateAnonymousMetadataDecoder(decoderType, aSourceBuffer,
decoderFlags);
ASSERT_TRUE(decoder != nullptr);
RefPtr<IDecodingTask> task =
new AnonymousDecodingTask(WrapNotNull(decoder), /* aResumable */ false);
@ -99,7 +154,7 @@ static void CheckMetadata(const ImageTestCase& aTestCase,
// Create a full decoder, so we can compare the result.
decoder = DecoderFactory::CreateAnonymousDecoder(
decoderType, sourceBuffer, Nothing(), DecoderFlags::FIRST_FRAME_ONLY,
decoderType, aSourceBuffer, Nothing(), DecoderFlags::FIRST_FRAME_ONLY,
aTestCase.mSurfaceFlags);
ASSERT_TRUE(decoder != nullptr);
task =
@ -131,6 +186,34 @@ static void CheckMetadata(const ImageTestCase& aTestCase,
(fullProgress & FLAG_IS_ANIMATED));
}
static void CheckMetadata(const ImageTestCase& aTestCase,
BMPWithinICO aBMPWithinICO = BMPWithinICO::NO,
bool aSkipCommon = false,
bool aSkipFrameCount = false) {
nsCOMPtr<nsIInputStream> inputStream = LoadFile(aTestCase.mPath);
ASSERT_TRUE(inputStream != nullptr);
// Figure out how much data we have.
uint64_t length;
nsresult rv = inputStream->Available(&length);
ASSERT_NS_SUCCEEDED(rv);
// Write the data into a SourceBuffer.
auto sourceBuffer = MakeNotNull<RefPtr<SourceBuffer>>();
sourceBuffer->ExpectLength(length);
rv = sourceBuffer->AppendFromInputStream(inputStream, length);
ASSERT_NS_SUCCEEDED(rv);
sourceBuffer->Complete(NS_OK);
if (!aSkipCommon) {
CheckMetadataCommon(aTestCase, sourceBuffer, aBMPWithinICO);
}
if (!aSkipFrameCount) {
CheckMetadataFrameCount(aTestCase, sourceBuffer, aBMPWithinICO);
}
}
class ImageDecoderMetadata : public ::testing::Test {
protected:
AutoInitializeImageLib mInit;
@ -153,6 +236,7 @@ TEST_F(ImageDecoderMetadata, BMP) { CheckMetadata(GreenBMPTestCase()); }
TEST_F(ImageDecoderMetadata, ICO) { CheckMetadata(GreenICOTestCase()); }
TEST_F(ImageDecoderMetadata, Icon) { CheckMetadata(GreenIconTestCase()); }
TEST_F(ImageDecoderMetadata, WebP) { CheckMetadata(GreenWebPTestCase()); }
TEST_F(ImageDecoderMetadata, AVIF) { CheckMetadata(GreenAVIFTestCase()); }
#ifdef MOZ_JXL
TEST_F(ImageDecoderMetadata, JXL) { CheckMetadata(GreenJXLTestCase()); }
@ -169,6 +253,17 @@ TEST_F(ImageDecoderMetadata, AnimatedPNG) {
CheckMetadata(GreenFirstFrameAnimatedPNGTestCase());
}
TEST_F(ImageDecoderMetadata, AnimatedWebP) {
CheckMetadata(GreenFirstFrameAnimatedWebPTestCase());
}
TEST_F(ImageDecoderMetadata, AnimatedAVIF) {
// TODO: If we request first frame only decoding, the AVIF decoder says the
// animated image is not animated. This should be fixed at some point.
CheckMetadata(GreenFirstFrameAnimatedAVIFTestCase(), BMPWithinICO::NO,
/* aSkipCommon */ true, /* aSkipFrameCount */ false);
}
TEST_F(ImageDecoderMetadata, FirstFramePaddingGIF) {
CheckMetadata(FirstFramePaddingGIFTestCase());
}
@ -189,7 +284,10 @@ TEST_F(ImageDecoderMetadata, RLE8BMP) { CheckMetadata(RLE8BMPTestCase()); }
TEST_F(ImageDecoderMetadata, Corrupt) { CheckMetadata(CorruptTestCase()); }
TEST_F(ImageDecoderMetadata, NoFrameDelayGIF) {
CheckMetadata(NoFrameDelayGIFTestCase());
// We skip the frame count version because we realize it is animated with a
// full decode, so the test isn't consistent.
CheckMetadata(NoFrameDelayGIFTestCase(), BMPWithinICO::NO,
/* aSkipCommon */ false, /* aSkipFrameCount */ true);
}
TEST_F(ImageDecoderMetadata, NoFrameDelayGIFFullDecode) {