Bug 1729051 - Simplify imgFrame to only use shared surfaces for raster images. r=jrmuizel

In practice we already only use SourceSurfaceSharedData as our
rasterized image backing. This means we no longer need to lock the data
to keep it in memory (when we used volatile memory), nor to try to
optimize the surface for the DrawTarget.

Differential Revision: https://phabricator.services.mozilla.com/D124476
This commit is contained in:
Andrew Osmond 2021-09-17 15:50:04 +00:00
Родитель 5b7aa9b3a9
Коммит 66fd73b08c
12 изменённых файлов: 58 добавлений и 556 удалений

Просмотреть файл

@ -612,27 +612,6 @@
}
# Conditional jump or move depends on uninitialised value(s)
# at 0xE626A5C: mozilla::image::imgFrame::Optimize() (in /builds/worker/work
# by 0xE626C68: mozilla::image::imgFrame::UnlockImageData() (in /home/work
# by 0xE608E8F: mozilla::image::RawAccessFrameRef::~RawAccessFrameRef() (i
# by 0xE61F5E4: mozilla::image::Decoder::~Decoder() (in /builds/worker/works
# by 0xE630E32: mozilla::image::nsIconDecoder::~nsIconDecoder() (in /home/
# by 0xE61A5B2: mozilla::image::Decoder::Release() (in /builds/worker/worksp
# by 0xE61DD73: mozilla::image::NotifyDecodeCompleteWorker::~NotifyDecodeC
# by 0xE61DD8F: mozilla::image::NotifyDecodeCompleteWorker::~NotifyDecodeC
# Uninitialised value was created by a stack allocation
# at 0xB8E46B0: ??? (in /usr/lib/x86_64-linux-gnu/libpixman-1.so.0.30.2)
{
Bug 1248365: mochitest-libpixman-4
Memcheck:Cond
fun:_ZN7mozilla5image8imgFrame8OptimizeEv
fun:_ZN7mozilla5image8imgFrame15UnlockImageDataEv
fun:_ZN7mozilla5image17RawAccessFrameRefD1Ev
fun:_ZN7mozilla5image7DecoderD1Ev
}
# Not sure what this. I can't reproduce it locally despite much trying.
# Syscall param sendmsg(msg.msg_iov[0]) points to uninitialised byte(s)
# at 0x4E4533D: ??? (syscall-template.S:82)

Просмотреть файл

@ -1,48 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "SourceSurfaceVolatileData.h"
#include "gfxAlphaRecovery.h"
#include "mozilla/Likely.h"
#include "mozilla/Types.h" // for decltype
namespace mozilla {
namespace gfx {
bool SourceSurfaceVolatileData::Init(const IntSize& aSize, int32_t aStride,
SurfaceFormat aFormat) {
mSize = aSize;
mStride = aStride;
mFormat = aFormat;
size_t alignment = size_t(1) << gfxAlphaRecovery::GoodAlignmentLog2();
mVBuf = new VolatileBuffer();
if (MOZ_UNLIKELY(!mVBuf->Init(aStride * aSize.height, alignment))) {
mVBuf = nullptr;
return false;
}
return true;
}
void SourceSurfaceVolatileData::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf,
SizeOfInfo& aInfo) const {
aInfo.AddType(SurfaceType::DATA);
if (mVBuf) {
aInfo.mHeapBytes = mVBuf->HeapSizeOfExcludingThis(aMallocSizeOf);
aInfo.mNonHeapBytes = mVBuf->NonHeapSizeOfExcludingThis();
#ifdef ANDROID
if (!mVBuf->OnHeap()) {
// Volatile buffers keep a file handle open on Android.
aInfo.mExternalHandles = 1;
}
#endif
}
}
} // namespace gfx
} // namespace mozilla

Просмотреть файл

@ -1,98 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef MOZILLA_GFX_SOURCESURFACEVOLATILEDATA_H_
#define MOZILLA_GFX_SOURCESURFACEVOLATILEDATA_H_
#include "mozilla/gfx/2D.h"
#include "mozilla/Mutex.h"
#include "mozilla/VolatileBuffer.h"
namespace mozilla {
namespace gfx {
/**
* This class is used to wrap volatile data buffers used for source surfaces.
* The Map and Unmap semantics are used to guarantee that the volatile data
* buffer is not freed by the operating system while the surface is in active
* use. If GetData is expected to return a non-null value without a
* corresponding Map call (and verification of the result), the surface data
* should be wrapped in a temporary SourceSurfaceRawData with a ScopedMap
* closure.
*/
class SourceSurfaceVolatileData : public DataSourceSurface {
public:
MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(SourceSurfaceVolatileData, override)
SourceSurfaceVolatileData()
: mMutex("SourceSurfaceVolatileData"),
mStride(0),
mFormat(SurfaceFormat::UNKNOWN),
mWasPurged(false) {}
bool Init(const IntSize& aSize, int32_t aStride, SurfaceFormat aFormat);
uint8_t* GetData() override { return mVBufPtr; }
int32_t Stride() override { return mStride; }
SurfaceType GetType() const override { return SurfaceType::DATA; }
IntSize GetSize() const override { return mSize; }
SurfaceFormat GetFormat() const override { return mFormat; }
void SizeOfExcludingThis(MallocSizeOf aMallocSizeOf,
SizeOfInfo& aInfo) const override;
bool OnHeap() const override { return mVBuf->OnHeap(); }
// Althought Map (and Moz2D in general) isn't normally threadsafe,
// we want to allow it for SourceSurfaceVolatileData since it should
// always be fine (for reading at least).
//
// This is the same as the base class implementation except using
// mMapCount instead of mIsMapped since that breaks for multithread.
bool Map(MapType, MappedSurface* aMappedSurface) override {
MutexAutoLock lock(mMutex);
if (mWasPurged) {
return false;
}
if (mMapCount == 0) {
mVBufPtr = mVBuf;
}
if (mVBufPtr.WasBufferPurged()) {
mWasPurged = true;
return false;
}
aMappedSurface->mData = mVBufPtr;
aMappedSurface->mStride = mStride;
++mMapCount;
return true;
}
void Unmap() override {
MutexAutoLock lock(mMutex);
MOZ_ASSERT(mMapCount > 0);
MOZ_ASSERT(!mWasPurged);
if (--mMapCount == 0) {
mVBufPtr = nullptr;
}
}
private:
virtual ~SourceSurfaceVolatileData() = default;
Mutex mMutex;
int32_t mStride;
IntSize mSize;
RefPtr<VolatileBuffer> mVBuf;
VolatileBufferPtr<uint8_t> mVBufPtr;
SurfaceFormat mFormat;
bool mWasPurged;
};
} // namespace gfx
} // namespace mozilla
#endif /* MOZILLA_GFX_SOURCESURFACEVOLATILEDATA_H_ */

Просмотреть файл

@ -198,7 +198,6 @@ EXPORTS.mozilla.layers += [
"ScrollableLayerGuid.h",
"ShareableCanvasRenderer.h",
"SourceSurfaceSharedData.h",
"SourceSurfaceVolatileData.h",
"SurfacePool.h",
"SyncObject.h",
"TextureSourceProvider.h",
@ -412,7 +411,6 @@ UNIFIED_SOURCES += [
"ScrollableLayerGuid.cpp",
"ShareableCanvasRenderer.cpp",
"SourceSurfaceSharedData.cpp",
"SourceSurfaceVolatileData.cpp",
"SyncObject.cpp",
"TextureSourceProvider.cpp",
"TextureWrapperImage.cpp",

Просмотреть файл

@ -248,7 +248,9 @@ void Decoder::CompleteDecode() {
// If PostDecodeDone() has not been called, we may need to send teardown
// notifications if it is unrecoverable.
if (!mDecodeDone) {
if (mDecodeDone) {
MOZ_ASSERT(HasError() || mCurrentFrame, "Should have an error or a frame");
} else {
// We should always report an error to the console in this case.
mShouldReportError = true;
@ -262,18 +264,6 @@ void Decoder::CompleteDecode() {
mProgress |= FLAG_DECODE_COMPLETE | FLAG_HAS_ERROR;
}
}
if (mDecodeDone) {
MOZ_ASSERT(HasError() || mCurrentFrame, "Should have an error or a frame");
// If this image wasn't animated and isn't a transient image, mark its frame
// as optimizable. We don't support optimizing animated images and
// optimizing transient images isn't worth it.
if (!HasAnimation() &&
!(mDecoderFlags & DecoderFlags::IMAGE_IS_TRANSIENT) && mCurrentFrame) {
mCurrentFrame->SetOptimizable();
}
}
}
void Decoder::SetOutputSize(const gfx::IntSize& aSize) {
@ -350,11 +340,6 @@ RawAccessFrameRef Decoder::AllocateFrameInternal(
return RawAccessFrameRef();
}
if (frameNum == 1) {
MOZ_ASSERT(aPreviousFrame, "Must provide a previous frame when animated");
aPreviousFrame->SetRawAccessOnly();
}
if (frameNum > 0) {
if (aPreviousFrame->GetDisposalMethod() !=
DisposalMethod::RESTORE_PREVIOUS) {
@ -420,10 +405,6 @@ RawAccessFrameRef Decoder::AllocateFrameInternal(
frame->Abort();
return RawAccessFrameRef();
}
if (frameNum > 0) {
frame->SetRawAccessOnly();
}
}
mFrameCount++;

Просмотреть файл

@ -9,3 +9,5 @@ SOURCES += [
]
FINAL_LIBRARY = "xul"
include("/ipc/chromium/chromium-config.mozbuild")

Просмотреть файл

@ -13,3 +13,5 @@ LOCAL_INCLUDES += [
]
FINAL_LIBRARY = "xul"
include("/ipc/chromium/chromium-config.mozbuild")

Просмотреть файл

@ -15,4 +15,6 @@ LOCAL_INCLUDES += [
"/image/encoders/png",
]
include("/ipc/chromium/chromium-config.mozbuild")
FINAL_LIBRARY = "xul"

Просмотреть файл

@ -12,4 +12,6 @@ LOCAL_INCLUDES += [
"/image",
]
include("/ipc/chromium/chromium-config.mozbuild")
FINAL_LIBRARY = "xul"

Просмотреть файл

@ -6,7 +6,6 @@
#include "imgFrame.h"
#include "ImageRegion.h"
#include "ShutdownTracker.h"
#include "SurfaceCache.h"
#include "prenv.h"
@ -19,16 +18,11 @@
#include "MainThreadUtils.h"
#include "mozilla/CheckedInt.h"
#include "mozilla/gfx/gfxVars.h"
#include "mozilla/gfx/Tools.h"
#include "mozilla/gfx/SourceSurfaceRawData.h"
#include "mozilla/layers/SourceSurfaceSharedData.h"
#include "mozilla/layers/SourceSurfaceVolatileData.h"
#include "mozilla/Likely.h"
#include "mozilla/MemoryReporting.h"
#include "mozilla/ProfilerLabels.h"
#include "mozilla/StaticPrefs_browser.h"
#include "mozilla/StaticPrefs_image.h"
#include "nsMargin.h"
#include "nsRefreshDriver.h"
#include "nsThreadUtils.h"
@ -56,90 +50,25 @@ class RecyclingSourceSurfaceSharedData final : public SourceSurfaceSharedData {
}
};
static int32_t VolatileSurfaceStride(const IntSize& size,
SurfaceFormat format) {
static already_AddRefed<SourceSurfaceSharedData> AllocateBufferForImage(
const IntSize& size, SurfaceFormat format, bool aShouldRecycle = false) {
// Stride must be a multiple of four or cairo will complain.
return (size.width * BytesPerPixel(format) + 0x3) & ~0x3;
}
int32_t stride = (size.width * BytesPerPixel(format) + 0x3) & ~0x3;
static already_AddRefed<DataSourceSurface> CreateLockedSurface(
DataSourceSurface* aSurface, const IntSize& size, SurfaceFormat format) {
switch (aSurface->GetType()) {
case SurfaceType::DATA_SHARED:
case SurfaceType::DATA_RECYCLING_SHARED:
case SurfaceType::DATA_ALIGNED: {
// Shared memory is never released until the surface itself is released.
// Similar for aligned/heap surfaces.
RefPtr<DataSourceSurface> surf(aSurface);
return surf.forget();
}
default: {
// Volatile memory requires us to map it first, and it is fallible.
DataSourceSurface::ScopedMap smap(aSurface,
DataSourceSurface::READ_WRITE);
if (smap.IsMapped()) {
return MakeAndAddRef<SourceSurfaceMappedData>(std::move(smap), size,
format);
}
break;
}
}
return nullptr;
}
static bool ShouldUseHeap(const IntSize& aSize, int32_t aStride,
bool aIsAnimated) {
// On some platforms (i.e. Android), a volatile buffer actually keeps a file
// handle active. We would like to avoid too many since we could easily
// exhaust the pool. However, other platforms we do not have the file handle
// problem, and additionally we may avoid a superfluous memset since the
// volatile memory starts out as zero-filled. Hence the knobs below.
// For as long as an animated image is retained, its frames will never be
// released to let the OS purge volatile buffers.
if (aIsAnimated && StaticPrefs::image_mem_animated_use_heap()) {
return true;
}
// Lets us avoid too many small images consuming all of the handles. The
// actual allocation checks for overflow.
int32_t bufferSize = (aStride * aSize.height) / 1024;
return bufferSize < StaticPrefs::image_mem_volatile_min_threshold_kb();
}
static already_AddRefed<DataSourceSurface> AllocateBufferForImage(
const IntSize& size, SurfaceFormat format, bool aShouldRecycle = false,
bool aIsAnimated = false) {
int32_t stride = VolatileSurfaceStride(size, format);
if (gfxVars::GetUseWebRenderOrDefault() && StaticPrefs::image_mem_shared()) {
RefPtr<SourceSurfaceSharedData> newSurf;
if (aShouldRecycle) {
newSurf = new RecyclingSourceSurfaceSharedData();
} else {
newSurf = new SourceSurfaceSharedData();
}
if (newSurf->Init(size, stride, format)) {
return newSurf.forget();
}
} else if (ShouldUseHeap(size, stride, aIsAnimated)) {
RefPtr<SourceSurfaceAlignedRawData> newSurf =
new SourceSurfaceAlignedRawData();
if (newSurf->Init(size, format, false, 0, stride)) {
return newSurf.forget();
}
RefPtr<SourceSurfaceSharedData> newSurf;
if (aShouldRecycle) {
newSurf = new RecyclingSourceSurfaceSharedData();
} else {
RefPtr<SourceSurfaceVolatileData> newSurf = new SourceSurfaceVolatileData();
if (newSurf->Init(size, stride, format)) {
return newSurf.forget();
}
newSurf = new SourceSurfaceSharedData();
}
return nullptr;
if (!newSurf->Init(size, stride, format)) {
return nullptr;
}
return newSurf.forget();
}
static bool GreenSurface(DataSourceSurface* aSurface, const IntSize& aSize,
SurfaceFormat aFormat) {
static bool GreenSurface(SourceSurfaceSharedData* aSurface,
const IntSize& aSize, SurfaceFormat aFormat) {
int32_t stride = aSurface->Stride();
uint32_t* surfaceData = reinterpret_cast<uint32_t*>(aSurface->GetData());
uint32_t surfaceDataLength = (stride * aSize.height) / sizeof(uint32_t);
@ -171,8 +100,8 @@ static bool GreenSurface(DataSourceSurface* aSurface, const IntSize& aSize,
return true;
}
static bool ClearSurface(DataSourceSurface* aSurface, const IntSize& aSize,
SurfaceFormat aFormat) {
static bool ClearSurface(SourceSurfaceSharedData* aSurface,
const IntSize& aSize, SurfaceFormat aFormat) {
int32_t stride = aSurface->Stride();
uint8_t* data = aSurface->GetData();
MOZ_ASSERT(data);
@ -196,10 +125,8 @@ static bool ClearSurface(DataSourceSurface* aSurface, const IntSize& aSize,
imgFrame::imgFrame()
: mMonitor("imgFrame"),
mDecoded(0, 0, 0, 0),
mLockCount(0),
mAborted(false),
mFinished(false),
mOptimizable(false),
mShouldRecycle(false),
mTimeout(FrameTimeout::FromRawMilliseconds(100)),
mDisposalMethod(DisposalMethod::NOT_SPECIFIED),
@ -256,11 +183,9 @@ nsresult imgFrame::InitForDecoder(const nsIntSize& aImageSize,
mNonPremult = aNonPremult;
mShouldRecycle = aShouldRecycle;
MOZ_ASSERT(!mLockedSurface, "Called imgFrame::InitForDecoder() twice?");
MOZ_ASSERT(!mRawSurface, "Called imgFrame::InitForDecoder() twice?");
bool postFirstFrame = aAnimParams && aAnimParams->mFrameNum > 0;
mRawSurface = AllocateBufferForImage(mImageSize, mFormat, mShouldRecycle,
postFirstFrame);
mRawSurface = AllocateBufferForImage(mImageSize, mFormat, mShouldRecycle);
if (!mRawSurface) {
mAborted = true;
return NS_ERROR_OUT_OF_MEMORY;
@ -275,23 +200,6 @@ nsresult imgFrame::InitForDecoder(const nsIntSize& aImageSize,
}
}
mLockedSurface = CreateLockedSurface(mRawSurface, mImageSize, mFormat);
if (!mLockedSurface) {
NS_WARNING("Failed to create LockedSurface");
mAborted = true;
return NS_ERROR_OUT_OF_MEMORY;
}
if (mBlankRawSurface) {
mBlankLockedSurface =
CreateLockedSurface(mBlankRawSurface, mImageSize, mFormat);
if (!mBlankLockedSurface) {
NS_WARNING("Failed to create BlankLockedSurface");
mAborted = true;
return NS_ERROR_OUT_OF_MEMORY;
}
}
if (!ClearSurface(mRawSurface, mImageSize, mFormat)) {
NS_WARNING("Could not clear allocated buffer");
mAborted = true;
@ -314,8 +222,7 @@ nsresult imgFrame::InitForDecoderRecycle(const AnimationParams& aAnimParams) {
// done with it in a timely manner. Let's ensure they are done with it first.
MonitorAutoLock lock(mMonitor);
MOZ_ASSERT(mLockCount > 0);
MOZ_ASSERT(mLockedSurface);
MOZ_ASSERT(mRawSurface);
if (!mShouldRecycle) {
// This frame either was never marked as recyclable, or the flag was cleared
@ -325,14 +232,11 @@ nsresult imgFrame::InitForDecoderRecycle(const AnimationParams& aAnimParams) {
// Ensure we account for all internal references to the surface.
MozRefCountType internalRefs = 1;
if (mRawSurface == mLockedSurface) {
++internalRefs;
}
if (mOptSurface == mLockedSurface) {
if (mOptSurface == mRawSurface) {
++internalRefs;
}
if (mLockedSurface->refCount() > internalRefs) {
if (mRawSurface->refCount() > internalRefs) {
if (NS_IsMainThread()) {
// We should never be both decoding and recycling on the main thread. Sync
// decoding can only be used to produce the first set of frames. Those
@ -362,7 +266,7 @@ nsresult imgFrame::InitForDecoderRecycle(const AnimationParams& aAnimParams) {
TimeStamp::Now() + TimeDuration::FromMilliseconds(refreshInterval);
while (true) {
mMonitor.Wait(waitInterval);
if (mLockedSurface->refCount() <= internalRefs) {
if (mRawSurface->refCount() <= internalRefs) {
break;
}
@ -406,7 +310,7 @@ nsresult imgFrame::InitWithDrawable(gfxDrawable* aDrawable,
if (canUseDataSurface) {
// It's safe to use data surfaces for content on this platform, so we can
// get away with using volatile buffers.
MOZ_ASSERT(!mLockedSurface, "Called imgFrame::InitWithDrawable() twice?");
MOZ_ASSERT(!mRawSurface, "Called imgFrame::InitWithDrawable() twice?");
mRawSurface = AllocateBufferForImage(mImageSize, mFormat);
if (!mRawSurface) {
@ -414,13 +318,6 @@ nsresult imgFrame::InitWithDrawable(gfxDrawable* aDrawable,
return NS_ERROR_OUT_OF_MEMORY;
}
mLockedSurface = CreateLockedSurface(mRawSurface, mImageSize, mFormat);
if (!mLockedSurface) {
NS_WARNING("Failed to create LockedSurface");
mAborted = true;
return NS_ERROR_OUT_OF_MEMORY;
}
if (!ClearSurface(mRawSurface, mImageSize, mFormat)) {
NS_WARNING("Could not clear allocated buffer");
mAborted = true;
@ -428,8 +325,7 @@ nsresult imgFrame::InitWithDrawable(gfxDrawable* aDrawable,
}
target = gfxPlatform::CreateDrawTargetForData(
mLockedSurface->GetData(), mImageSize, mLockedSurface->Stride(),
mFormat);
mRawSurface->GetData(), mImageSize, mRawSurface->Stride(), mFormat);
} else {
// We can't use data surfaces for content, so we'll create an offscreen
// surface instead. This means if someone later calls RawAccessRef(), we
@ -458,8 +354,8 @@ nsresult imgFrame::InitWithDrawable(gfxDrawable* aDrawable,
ImageRegion::Create(ThebesRect(GetRect())),
mFormat, aSamplingFilter, aImageFlags);
if (canUseDataSurface && !mLockedSurface) {
NS_WARNING("Failed to create VolatileDataSourceSurface");
if (canUseDataSurface && !mRawSurface) {
NS_WARNING("Failed to create SourceSurfaceSharedData");
mAborted = true;
return NS_ERROR_OUT_OF_MEMORY;
}
@ -484,83 +380,9 @@ nsresult imgFrame::InitWithDrawable(gfxDrawable* aDrawable,
return NS_OK;
}
nsresult imgFrame::Optimize(DrawTarget* aTarget) {
MOZ_ASSERT(NS_IsMainThread());
mMonitor.AssertCurrentThreadOwns();
if (mLockCount > 0 || !mOptimizable) {
// Don't optimize right now.
return NS_OK;
}
// Check whether image optimization is disabled -- not thread safe!
static bool gDisableOptimize = false;
static bool hasCheckedOptimize = false;
if (!hasCheckedOptimize) {
if (PR_GetEnv("MOZ_DISABLE_IMAGE_OPTIMIZE")) {
gDisableOptimize = true;
}
hasCheckedOptimize = true;
}
// Don't optimize during shutdown because gfxPlatform may not be available.
if (ShutdownTracker::ShutdownHasStarted()) {
return NS_OK;
}
if (gDisableOptimize) {
return NS_OK;
}
if (mOptSurface) {
return NS_OK;
}
// XXX(seth): It's currently unclear if there's any reason why we can't
// optimize non-premult surfaces. We should look into removing this.
if (mNonPremult) {
return NS_OK;
}
if (!gfxVars::UseWebRender()) {
mOptSurface = aTarget->OptimizeSourceSurface(mLockedSurface);
} else {
mOptSurface = gfxPlatform::GetPlatform()
->ScreenReferenceDrawTarget()
->OptimizeSourceSurface(mLockedSurface);
}
if (mOptSurface == mLockedSurface) {
mOptSurface = nullptr;
}
if (mOptSurface) {
// There's no reason to keep our original surface around if we have an
// optimized surface. Release our reference to it. This will leave
// |mLockedSurface| as the only thing keeping it alive, so it'll get freed
// below.
mRawSurface = nullptr;
}
// Release all strong references to the surface's memory. If the underlying
// surface is volatile, this will allow the operating system to free the
// memory if it needs to.
mLockedSurface = nullptr;
mOptimizable = false;
return NS_OK;
}
DrawableFrameRef imgFrame::DrawableRef() { return DrawableFrameRef(this); }
RawAccessFrameRef imgFrame::RawAccessRef(bool aOnlyFinished /*= false*/) {
return RawAccessFrameRef(this, aOnlyFinished);
}
void imgFrame::SetRawAccessOnly() {
AssertImageDataLocked();
// Lock our data and throw away the key.
LockImageData(false);
}
RawAccessFrameRef imgFrame::RawAccessRef() { return RawAccessFrameRef(this); }
imgFrame::SurfaceWithFormat imgFrame::SurfaceForDrawing(
bool aDoPartialDecode, bool aDoTile, ImageRegion& aRegion,
@ -627,11 +449,6 @@ bool imgFrame::Draw(gfxContext* aContext, const ImageRegion& aRegion,
{
MonitorAutoLock lock(mMonitor);
// Possibly convert this image into a GPU texture, this may also cause our
// mLockedSurface to be released and the OS to release the underlying
// memory.
Optimize(aContext->GetDrawTarget());
bool doPartialDecode = !AreAllPixelsWritten();
// Most draw targets will just use the surface only during DrawPixelSnapped
@ -692,16 +509,12 @@ nsresult imgFrame::ImageUpdatedInternal(const nsIntRect& aUpdateRect) {
if (mRawSurface) {
mRawSurface->Invalidate(updateRect);
}
if (mLockedSurface && mRawSurface != mLockedSurface) {
mLockedSurface->Invalidate(updateRect);
}
return NS_OK;
}
void imgFrame::Finish(Opacity aFrameOpacity /* = Opacity::SOME_TRANSPARENCY */,
bool aFinalize /* = true */) {
MonitorAutoLock lock(mMonitor);
MOZ_ASSERT(mLockCount > 0, "Image data should be locked");
IntRect frameRect(GetRect());
if (!mDecoded.IsEqualEdges(frameRect)) {
@ -755,17 +568,15 @@ void imgFrame::GetImageData(uint8_t** aData, uint32_t* aLength) const {
void imgFrame::GetImageDataInternal(uint8_t** aData, uint32_t* aLength) const {
mMonitor.AssertCurrentThreadOwns();
MOZ_ASSERT(mLockCount > 0, "Image data should be locked");
MOZ_ASSERT(mLockedSurface);
MOZ_ASSERT(mRawSurface);
if (mLockedSurface) {
if (mRawSurface) {
// TODO: This is okay for now because we only realloc shared surfaces on
// the main thread after decoding has finished, but if animations want to
// read frame data off the main thread, we will need to reconsider this.
*aData = mLockedSurface->GetData();
MOZ_ASSERT(
*aData,
"mLockedSurface is non-null, but GetData is null in GetImageData");
*aData = mRawSurface->GetData();
MOZ_ASSERT(*aData,
"mRawSurface is non-null, but GetData is null in GetImageData");
} else {
*aData = nullptr;
}
@ -780,60 +591,6 @@ uint8_t* imgFrame::GetImageData() const {
return data;
}
uint8_t* imgFrame::LockImageData(bool aOnlyFinished) {
MonitorAutoLock lock(mMonitor);
MOZ_ASSERT(mLockCount >= 0, "Unbalanced locks and unlocks");
if (mLockCount < 0 || (aOnlyFinished && !mFinished)) {
return nullptr;
}
uint8_t* data;
if (mLockedSurface) {
data = mLockedSurface->GetData();
} else {
data = nullptr;
}
// If the raw data is still available, we should get a valid pointer for it.
if (!data) {
MOZ_ASSERT_UNREACHABLE("It's illegal to re-lock an optimized imgFrame");
return nullptr;
}
++mLockCount;
return data;
}
void imgFrame::AssertImageDataLocked() const {
#ifdef DEBUG
MonitorAutoLock lock(mMonitor);
MOZ_ASSERT(mLockCount > 0, "Image data should be locked");
#endif
}
nsresult imgFrame::UnlockImageData() {
MonitorAutoLock lock(mMonitor);
MOZ_ASSERT(mLockCount > 0, "Unlocking an unlocked image!");
if (mLockCount <= 0) {
return NS_ERROR_FAILURE;
}
MOZ_ASSERT(mLockCount > 1 || mFinished || mAborted,
"Should have Finish()'d or aborted before unlocking");
mLockCount--;
return NS_OK;
}
void imgFrame::SetOptimizable() {
AssertImageDataLocked();
MonitorAutoLock lock(mMonitor);
mOptimizable = true;
}
void imgFrame::FinalizeSurface() {
MonitorAutoLock lock(mMonitor);
FinalizeSurfaceInternal();
@ -868,26 +625,16 @@ already_AddRefed<SourceSurface> imgFrame::GetSourceSurfaceInternal() {
mOptSurface = nullptr;
}
if (mBlankLockedSurface) {
if (mBlankRawSurface) {
// We are going to return the blank surface because of the flags.
// We are including comments here that are copied from below
// just so that we are on the same page!
RefPtr<SourceSurface> surf(mBlankLockedSurface);
RefPtr<SourceSurface> surf(mBlankRawSurface);
return surf.forget();
}
if (mLockedSurface) {
RefPtr<SourceSurface> surf(mLockedSurface);
return surf.forget();
}
MOZ_ASSERT(!mShouldRecycle, "Should recycle but no locked surface!");
if (!mRawSurface) {
return nullptr;
}
return CreateLockedSurface(mRawSurface, mImageSize, mFormat);
RefPtr<SourceSurface> surf(mRawSurface);
return surf.forget();
}
void imgFrame::Abort() {
@ -936,12 +683,6 @@ void imgFrame::AddSizeOfExcludingThis(MallocSizeOf aMallocSizeOf,
metadata.mSurface = mOptSurface ? mOptSurface.get() : mRawSurface.get();
metadata.mFinished = mFinished;
if (mLockedSurface) {
// The locked surface should only be present if we have mRawSurface. Hence
// we only need to get its allocation size to avoid double counting.
metadata.mHeapBytes += aMallocSizeOf(mLockedSurface);
metadata.AddType(mLockedSurface->GetType());
}
if (mOptSurface) {
metadata.mHeapBytes += aMallocSizeOf(mOptSurface);

Просмотреть файл

@ -13,6 +13,7 @@
#include "AnimationParams.h"
#include "MainThreadUtils.h"
#include "gfxDrawable.h"
#include "mozilla/layers/SourceSurfaceSharedData.h"
#include "mozilla/Maybe.h"
#include "mozilla/MemoryReporting.h"
#include "mozilla/Monitor.h"
@ -28,7 +29,7 @@ class RawAccessFrameRef;
enum class Opacity : uint8_t { FULLY_OPAQUE, SOME_TRANSPARENCY };
class imgFrame {
typedef gfx::DataSourceSurface DataSourceSurface;
typedef gfx::SourceSurfaceSharedData SourceSurfaceSharedData;
typedef gfx::DrawTarget DrawTarget;
typedef gfx::SamplingFilter SamplingFilter;
typedef gfx::IntPoint IntPoint;
@ -88,23 +89,8 @@ class imgFrame {
/**
* Create a RawAccessFrameRef for the frame.
*
* @param aOnlyFinished If true, only return a valid RawAccessFrameRef if
* imgFrame::Finish has been called.
*/
RawAccessFrameRef RawAccessRef(bool aOnlyFinished = false);
/**
* Make this imgFrame permanently available for raw access.
*
* This is irrevocable, and should be avoided whenever possible, since it
* prevents this imgFrame from being optimized and makes it impossible for its
* volatile buffer to be freed.
*
* It is an error to call this without already holding a RawAccessFrameRef to
* this imgFrame.
*/
void SetRawAccessOnly();
RawAccessFrameRef RawAccessRef();
bool Draw(gfxContext* aContext, const ImageRegion& aRegion,
SamplingFilter aSamplingFilter, uint32_t aImageFlags,
@ -155,10 +141,7 @@ class imgFrame {
void WaitUntilFinished() const;
/**
* Returns the number of bytes per pixel this imgFrame requires. This is a
* worst-case value that does not take into account the effects of format
* changes caused by Optimize(), since an imgFrame is not optimized throughout
* its lifetime.
* Returns the number of bytes per pixel this imgFrame requires.
*/
uint32_t GetBytesPerPixel() const { return 4; }
@ -178,8 +161,6 @@ class imgFrame {
const IntRect& GetDirtyRect() const { return mDirtyRect; }
void SetDirtyRect(const IntRect& aDirtyRect) { mDirtyRect = aDirtyRect; }
void SetOptimizable();
void FinalizeSurface();
already_AddRefed<SourceSurface> GetSourceSurface();
@ -200,20 +181,6 @@ class imgFrame {
private: // methods
~imgFrame();
/**
* Used when the caller desires raw access to the underlying frame buffer.
* If the locking succeeds, the data pointer to the start of the buffer is
* returned, else it returns nullptr.
*
* @param aOnlyFinished If true, only attempt to lock if imgFrame::Finish has
* been called.
*/
uint8_t* LockImageData(bool aOnlyFinished);
nsresult UnlockImageData();
nsresult Optimize(gfx::DrawTarget* aTarget);
void AssertImageDataLocked() const;
bool AreAllPixelsWritten() const;
nsresult ImageUpdatedInternal(const nsIntRect& aUpdateRect);
void GetImageDataInternal(uint8_t** aData, uint32_t* length) const;
@ -256,35 +223,21 @@ class imgFrame {
mutable Monitor mMonitor;
/**
* Surface which contains either a weak or a strong reference to its
* underlying data buffer. If it is a weak reference, and there are no strong
* references, the buffer may be released due to events such as low memory.
* Used for rasterized images, this contains the raw pixel data.
*/
RefPtr<DataSourceSurface> mRawSurface;
RefPtr<DataSourceSurface> mBlankRawSurface;
RefPtr<SourceSurfaceSharedData> mRawSurface;
RefPtr<SourceSurfaceSharedData> mBlankRawSurface;
/**
* Refers to the same data as mRawSurface, but when set, it guarantees that
* we hold a strong reference to the underlying data buffer.
*/
RefPtr<DataSourceSurface> mLockedSurface;
RefPtr<DataSourceSurface> mBlankLockedSurface;
/**
* Optimized copy of mRawSurface for the DrawTarget that will render it. This
* is unused if the DrawTarget is able to render DataSourceSurface buffers
* directly.
* Used for vector images that were not rasterized directly. This might be a
* blob recording or native surface.
*/
RefPtr<SourceSurface> mOptSurface;
nsIntRect mDecoded;
//! Number of RawAccessFrameRefs currently alive for this imgFrame.
int16_t mLockCount;
bool mAborted;
bool mFinished;
bool mOptimizable;
bool mShouldRecycle;
//////////////////////////////////////////////////////////////////////////////
@ -402,11 +355,11 @@ class RawAccessFrameRef final {
public:
RawAccessFrameRef() : mData(nullptr) {}
explicit RawAccessFrameRef(imgFrame* aFrame, bool aOnlyFinished)
explicit RawAccessFrameRef(imgFrame* aFrame)
: mFrame(aFrame), mData(nullptr) {
MOZ_ASSERT(mFrame, "Need a frame");
mData = mFrame->LockImageData(aOnlyFinished);
mData = mFrame->GetImageData();
if (!mData) {
mFrame = nullptr;
}
@ -417,19 +370,11 @@ class RawAccessFrameRef final {
aOther.mData = nullptr;
}
~RawAccessFrameRef() {
if (mFrame) {
mFrame->UnlockImageData();
}
}
~RawAccessFrameRef() {}
RawAccessFrameRef& operator=(RawAccessFrameRef&& aOther) {
MOZ_ASSERT(this != &aOther, "Self-moves are prohibited");
if (mFrame) {
mFrame->UnlockImageData();
}
mFrame = std::move(aOther.mFrame);
mData = aOther.mData;
aOther.mData = nullptr;
@ -453,9 +398,6 @@ class RawAccessFrameRef final {
const imgFrame* get() const { return mFrame; }
void reset() {
if (mFrame) {
mFrame->UnlockImageData();
}
mFrame = nullptr;
mData = nullptr;
}

Просмотреть файл

@ -25,7 +25,6 @@ static already_AddRefed<imgFrame> CreateEmptyFrame(
Some(animParams), aCanRecycle);
EXPECT_TRUE(NS_SUCCEEDED(rv));
RawAccessFrameRef frameRef = frame->RawAccessRef();
frame->SetRawAccessOnly();
// Normally the blend animation filter would set the dirty rect, but since
// we aren't producing an actual animation here, we need to fake it.
frame->SetDirtyRect(aFrameRect);