Refactor GrLayerCache for new API

The only substantive change in this CL is skipping atlasing
for any layers that are involved in nesting. Prior versions
have allowed nesting layers to be atlased.

--------------------------------------------------------

All times are on Windows with a repeat count of 200.
Format is: <time in ms> (<# of glBindframebuffer calls>)

How to interpret this:

For the boogie page:

 both columns should be the same (since boogie has no
        nested layers)
 without the new API the tiled case doesn't show any real
        benefit from hoisting

For the carsvg page:

 The nesting change does increase the number of FBO
      switches but doesn't kill performance
 Because of the location & size of the layers the
      tile case does show some improvement (even
      without the new API)

boogie
-------

		   w/o nested change   w/ nested change

simple                5.62 (811)           N/A

tiled                 7.72 (811)           N/A

simple w/ hoisting    5.23 (409)        5.77 (409)
(but no caching)

tiled w/ hoisting     7.57 (809)        7.49 (809)
(but no caching)

carsvg
------

		   w/o nested change   w/ nested change

simple 		      60.37 (141990)        N/A

tiled                115.13 (256929)        N/A

simple w/ hoisting    41.57 (64570)      42.82 (72279)
(but no caching)

tiled w/ hoisting     84.24 (154352)     84.71 (165630)
(but no caching)

R=bsalomon@google.com

Author: robertphillips@google.com

Review URL: https://codereview.chromium.org/476833004
This commit is contained in:
robertphillips 2014-08-18 08:50:03 -07:00 коммит произвёл Commit bot
Родитель 479601b9a7
Коммит 6f294af43b
4 изменённых файлов: 107 добавлений и 72 удалений

Просмотреть файл

@ -117,37 +117,36 @@ void GrLayerCache::freeAll() {
this->initAtlas();
}
GrCachedLayer* GrLayerCache::createLayer(const SkPicture* picture,
GrCachedLayer* GrLayerCache::createLayer(uint32_t pictureID,
int start, int stop,
const SkMatrix& ctm) {
SkASSERT(picture->uniqueID() != SK_InvalidGenID && start > 0 && stop > 0);
SkASSERT(pictureID != SK_InvalidGenID && start > 0 && stop > 0);
GrCachedLayer* layer = SkNEW_ARGS(GrCachedLayer, (picture->uniqueID(), start, stop, ctm));
GrCachedLayer* layer = SkNEW_ARGS(GrCachedLayer, (pictureID, start, stop, ctm));
fLayerHash.add(layer);
return layer;
}
GrCachedLayer* GrLayerCache::findLayer(const SkPicture* picture,
GrCachedLayer* GrLayerCache::findLayer(uint32_t pictureID,
int start, int stop,
const SkMatrix& ctm) {
SkASSERT(picture->uniqueID() != SK_InvalidGenID && start > 0 && stop > 0);
return fLayerHash.find(GrCachedLayer::Key(picture->uniqueID(), start, stop, ctm));
SkASSERT(pictureID != SK_InvalidGenID && start > 0 && stop > 0);
return fLayerHash.find(GrCachedLayer::Key(pictureID, start, stop, ctm));
}
GrCachedLayer* GrLayerCache::findLayerOrCreate(const SkPicture* picture,
GrCachedLayer* GrLayerCache::findLayerOrCreate(uint32_t pictureID,
int start, int stop,
const SkMatrix& ctm) {
SkASSERT(picture->uniqueID() != SK_InvalidGenID && start > 0 && stop > 0);
GrCachedLayer* layer = fLayerHash.find(GrCachedLayer::Key(picture->uniqueID(),
start, stop, ctm));
SkASSERT(pictureID != SK_InvalidGenID && start > 0 && stop > 0);
GrCachedLayer* layer = fLayerHash.find(GrCachedLayer::Key(pictureID, start, stop, ctm));
if (NULL == layer) {
layer = this->createLayer(picture, start, stop, ctm);
layer = this->createLayer(pictureID, start, stop, ctm);
}
return layer;
}
bool GrLayerCache::lock(GrCachedLayer* layer, const GrTextureDesc& desc) {
bool GrLayerCache::lock(GrCachedLayer* layer, const GrTextureDesc& desc, bool dontAtlas) {
SkDEBUGCODE(GrAutoValidateLayer avl(fAtlas->getTexture(), layer);)
if (layer->locked()) {
@ -155,19 +154,21 @@ bool GrLayerCache::lock(GrCachedLayer* layer, const GrTextureDesc& desc) {
#ifdef SK_DEBUG
if (layer->isAtlased()) {
// It claims to be atlased
SkASSERT(!dontAtlas);
SkASSERT(layer->rect().width() == desc.fWidth);
SkASSERT(layer->rect().height() == desc.fHeight);
}
#endif
return true;
return false;
}
if (layer->isAtlased()) {
// Hooray it is still in the atlas - make sure it stays there
SkASSERT(!dontAtlas);
layer->setLocked(true);
fPlotLocks[layer->plot()->id()]++;
return true;
} else if (PlausiblyAtlasable(desc.fWidth, desc.fHeight)) {
return false;
} else if (!dontAtlas && PlausiblyAtlasable(desc.fWidth, desc.fHeight)) {
// Not in the atlas - will it fit?
GrPictureInfo* pictInfo = fPictureHash.find(layer->pictureID());
if (NULL == pictInfo) {
@ -185,13 +186,13 @@ bool GrLayerCache::lock(GrCachedLayer* layer, const GrTextureDesc& desc) {
if (NULL != plot) {
// The layer was successfully added to the atlas
GrIRect16 bounds = GrIRect16::MakeXYWH(loc.fX, loc.fY,
SkToS16(desc.fWidth),
SkToS16(desc.fWidth),
SkToS16(desc.fHeight));
layer->setTexture(fAtlas->getTexture(), bounds);
layer->setPlot(plot);
layer->setLocked(true);
fPlotLocks[layer->plot()->id()]++;
return false;
return true;
}
// The layer was rejected by the atlas (even though we know it is
@ -210,7 +211,7 @@ bool GrLayerCache::lock(GrCachedLayer* layer, const GrTextureDesc& desc) {
layer->setTexture(tex, GrIRect16::MakeWH(SkToS16(desc.fWidth), SkToS16(desc.fHeight)));
layer->setLocked(true);
return false;
return true;
}
void GrLayerCache::unlock(GrCachedLayer* layer) {
@ -228,7 +229,7 @@ void GrLayerCache::unlock(GrCachedLayer* layer) {
fPlotLocks[plotID]--;
// At this point we could aggressively clear out un-locked plots but
// by delaying we may be able to reuse some of the atlased layers later.
#if 0
#if DISABLE_CACHING
// This testing code aggressively removes the atlased layers. This
// can be used to separate the performance contribution of less
// render target pingponging from that due to the re-use of cached layers
@ -264,7 +265,9 @@ void GrLayerCache::validate() const {
if (NULL != pictInfo) {
// In aggressive cleanup mode a picture info should only exist if
// it has some atlased layers
#if !DISABLE_CACHING
SkASSERT(!pictInfo->fPlotUsage.isEmpty());
#endif
} else {
// If there is no picture info for this layer then all of its
// layers should be non-atlased.
@ -341,41 +344,59 @@ bool GrLayerCache::purgePlot() {
continue;
}
// We need to find all the layers in 'plot' and remove them.
SkTDArray<GrCachedLayer*> toBeRemoved;
SkTDynamicHash<GrCachedLayer, GrCachedLayer::Key>::Iter iter(&fLayerHash);
for (; !iter.done(); ++iter) {
if (plot == (*iter).plot()) {
*toBeRemoved.append() = &(*iter);
}
}
for (int i = 0; i < toBeRemoved.count(); ++i) {
SkASSERT(!toBeRemoved[i]->locked());
GrPictureInfo* pictInfo = fPictureHash.find(toBeRemoved[i]->pictureID());
SkASSERT(NULL != pictInfo);
GrAtlas::RemovePlot(&pictInfo->fPlotUsage, plot);
// Aggressively remove layers and, if now totally uncached, picture info
fLayerHash.remove(GrCachedLayer::GetKey(*toBeRemoved[i]));
SkDELETE(toBeRemoved[i]);
if (pictInfo->fPlotUsage.isEmpty()) {
fPictureHash.remove(pictInfo->fPictureID);
SkDELETE(pictInfo);
}
}
plot->resetRects();
this->purgePlot(plot);
return true;
}
return false;
}
void GrLayerCache::purgePlot(GrPlot* plot) {
SkASSERT(0 == fPlotLocks[plot->id()]);
// We need to find all the layers in 'plot' and remove them.
SkTDArray<GrCachedLayer*> toBeRemoved;
SkTDynamicHash<GrCachedLayer, GrCachedLayer::Key>::Iter iter(&fLayerHash);
for (; !iter.done(); ++iter) {
if (plot == (*iter).plot()) {
*toBeRemoved.append() = &(*iter);
}
}
for (int i = 0; i < toBeRemoved.count(); ++i) {
SkASSERT(!toBeRemoved[i]->locked());
GrPictureInfo* pictInfo = fPictureHash.find(toBeRemoved[i]->pictureID());
SkASSERT(NULL != pictInfo);
GrAtlas::RemovePlot(&pictInfo->fPlotUsage, plot);
// Aggressively remove layers and, if now totally uncached, picture info
fLayerHash.remove(GrCachedLayer::GetKey(*toBeRemoved[i]));
SkDELETE(toBeRemoved[i]);
if (pictInfo->fPlotUsage.isEmpty()) {
fPictureHash.remove(pictInfo->fPictureID);
SkDELETE(pictInfo);
}
}
plot->resetRects();
}
void GrLayerCache::purgeAll() {
GrAtlas::PlotIter iter;
GrPlot* plot;
for (plot = fAtlas->iterInit(&iter, GrAtlas::kLRUFirst_IterOrder);
NULL != plot;
plot = iter.prev()) {
SkASSERT(0 == fPlotLocks[plot->id()]);
this->purgePlot(plot);
}
}
class GrPictureDeletionListener : public SkPicture::DeletionListener {
virtual void onDeletion(uint32_t pictureID) SK_OVERRIDE{
const GrPictureDeletedMessage message = { pictureID };

Просмотреть файл

@ -117,7 +117,7 @@ public:
const GrIRect16& rect() const { return fRect; }
void setPlot(GrPlot* plot) {
SkASSERT(NULL == fPlot);
SkASSERT(NULL == plot || NULL == fPlot);
fPlot = plot;
}
GrPlot* plot() { return fPlot; }
@ -171,16 +171,15 @@ public:
// elements by the GrContext
void freeAll();
GrCachedLayer* findLayer(const SkPicture* picture, int start, int stop, const SkMatrix& ctm);
GrCachedLayer* findLayerOrCreate(const SkPicture* picture,
GrCachedLayer* findLayer(uint32_t pictureID, int start, int stop, const SkMatrix& ctm);
GrCachedLayer* findLayerOrCreate(uint32_t pictureID,
int start, int stop,
const SkMatrix& ctm);
// Inform the cache that layer's cached image is now required. Return true
// if it was found in the ResourceCache and doesn't need to be regenerated.
// If false is returned the caller should (re)render the layer into the
// newly acquired texture.
bool lock(GrCachedLayer* layer, const GrTextureDesc& desc);
// Inform the cache that layer's cached image is now required.
// Return true if the layer must be re-rendered. Return false if the
// layer was found in the cache and can be reused.
bool lock(GrCachedLayer* layer, const GrTextureDesc& desc, bool dontAtlas);
// Inform the cache that layer's cached image is not currently required
void unlock(GrCachedLayer* layer);
@ -228,7 +227,10 @@ private:
int fPlotLocks[kNumPlotsX * kNumPlotsY];
void initAtlas();
GrCachedLayer* createLayer(const SkPicture* picture, int start, int stop, const SkMatrix& ctm);
GrCachedLayer* createLayer(uint32_t pictureID, int start, int stop, const SkMatrix& ctm);
public:
void purgeAll();
// Remove all the layers (and unlock any resources) associated with 'pictureID'
void purge(uint32_t pictureID);
@ -237,6 +239,8 @@ private:
return width <= kPlotWidth && height <= kPlotHeight;
}
void purgePlot(GrPlot* plot);
// Try to find a purgeable plot and clear it out. Return true if a plot
// was purged; false otherwise.
bool purgePlot();

Просмотреть файл

@ -1989,7 +1989,7 @@ bool SkGpuDevice::EXPERIMENTAL_drawPicture(SkCanvas* mainCanvas, const SkPicture
if (pullForward[i]) {
const GPUAccelData::SaveLayerInfo& info = gpuData->saveLayerInfo(i);
GrCachedLayer* layer = fContext->getLayerCache()->findLayerOrCreate(picture,
GrCachedLayer* layer = fContext->getLayerCache()->findLayerOrCreate(picture->uniqueID(),
info.fSaveLayerOpID,
info.fRestoreOpID,
info.fCTM);
@ -2007,7 +2007,8 @@ bool SkGpuDevice::EXPERIMENTAL_drawPicture(SkCanvas* mainCanvas, const SkPicture
desc.fConfig = kSkia8888_GrPixelConfig;
// TODO: need to deal with sample count
bool needsRendering = !fContext->getLayerCache()->lock(layer, desc);
bool needsRendering = fContext->getLayerCache()->lock(layer, desc,
info.fHasNestedLayers || info.fIsNested);
if (NULL == layer->texture()) {
continue;
}
@ -2126,13 +2127,22 @@ bool SkGpuDevice::EXPERIMENTAL_drawPicture(SkCanvas* mainCanvas, const SkPicture
for (int i = 0; i < gpuData->numSaveLayers(); ++i) {
const GPUAccelData::SaveLayerInfo& info = gpuData->saveLayerInfo(i);
GrCachedLayer* layer = fContext->getLayerCache()->findLayer(picture,
GrCachedLayer* layer = fContext->getLayerCache()->findLayer(picture->uniqueID(),
info.fSaveLayerOpID,
info.fRestoreOpID,
info.fCTM);
fContext->getLayerCache()->unlock(layer);
}
#if DISABLE_CACHING
// This code completely clears out the atlas. It is required when
// caching is disabled so the atlas doesn't fill up and force more
// free floating layers
fContext->getLayerCache()->purge(picture->uniqueID());
fContext->getLayerCache()->purgeAll();
#endif
return true;
}

Просмотреть файл

@ -31,11 +31,11 @@ static void create_layers(skiatest::Reporter* reporter,
int idOffset) {
for (int i = 0; i < numToAdd; ++i) {
GrCachedLayer* layer = cache->findLayerOrCreate(&picture,
GrCachedLayer* layer = cache->findLayerOrCreate(picture.uniqueID(),
idOffset+i+1, idOffset+i+2,
SkMatrix::I());
REPORTER_ASSERT(reporter, NULL != layer);
GrCachedLayer* temp = cache->findLayer(&picture, idOffset+i+1, idOffset+i+2, SkMatrix::I());
GrCachedLayer* temp = cache->findLayer(picture.uniqueID(), idOffset+i+1, idOffset+i+2, SkMatrix::I());
REPORTER_ASSERT(reporter, temp == layer);
REPORTER_ASSERT(reporter, TestingAccess::NumLayers(cache) == idOffset + i + 1);
@ -60,11 +60,11 @@ static void lock_layer(skiatest::Reporter* reporter,
desc.fHeight = 512;
desc.fConfig = kSkia8888_GrPixelConfig;
bool foundInCache = cache->lock(layer, desc);
REPORTER_ASSERT(reporter, !foundInCache);
bool needsRerendering = cache->lock(layer, desc, false);
REPORTER_ASSERT(reporter, needsRerendering);
foundInCache = cache->lock(layer, desc);
REPORTER_ASSERT(reporter, foundInCache);
needsRerendering = cache->lock(layer, desc, false);
REPORTER_ASSERT(reporter, !needsRerendering);
REPORTER_ASSERT(reporter, NULL != layer->texture());
REPORTER_ASSERT(reporter, layer->locked());
@ -99,7 +99,7 @@ DEF_GPUTEST(GpuLayerCache, reporter, factory) {
create_layers(reporter, &cache, *picture, kInitialNumLayers, 0);
for (int i = 0; i < kInitialNumLayers; ++i) {
GrCachedLayer* layer = cache.findLayer(picture, i+1, i+2, SkMatrix::I());
GrCachedLayer* layer = cache.findLayer(picture->uniqueID(), i+1, i+2, SkMatrix::I());
REPORTER_ASSERT(reporter, NULL != layer);
lock_layer(reporter, &cache, layer);
@ -116,14 +116,14 @@ DEF_GPUTEST(GpuLayerCache, reporter, factory) {
// Unlock the textures
for (int i = 0; i < kInitialNumLayers; ++i) {
GrCachedLayer* layer = cache.findLayer(picture, i+1, i+2, SkMatrix::I());
GrCachedLayer* layer = cache.findLayer(picture->uniqueID(), i+1, i+2, SkMatrix::I());
REPORTER_ASSERT(reporter, NULL != layer);
cache.unlock(layer);
}
for (int i = 0; i < kInitialNumLayers; ++i) {
GrCachedLayer* layer = cache.findLayer(picture, i+1, i+2, SkMatrix::I());
GrCachedLayer* layer = cache.findLayer(picture->uniqueID(), i+1, i+2, SkMatrix::I());
REPORTER_ASSERT(reporter, NULL != layer);
REPORTER_ASSERT(reporter, !layer->locked());
@ -142,7 +142,7 @@ DEF_GPUTEST(GpuLayerCache, reporter, factory) {
// Add an additional layer. Since all the layers are unlocked this
// will force out the first atlased layer
create_layers(reporter, &cache, *picture, 1, kInitialNumLayers);
GrCachedLayer* layer = cache.findLayer(picture,
GrCachedLayer* layer = cache.findLayer(picture->uniqueID(),
kInitialNumLayers+1, kInitialNumLayers+2,
SkMatrix::I());
REPORTER_ASSERT(reporter, NULL != layer);
@ -152,7 +152,7 @@ DEF_GPUTEST(GpuLayerCache, reporter, factory) {
}
for (int i = 0; i < kInitialNumLayers+1; ++i) {
GrCachedLayer* layer = cache.findLayer(picture, i+1, i+2, SkMatrix::I());
GrCachedLayer* layer = cache.findLayer(picture->uniqueID(), i+1, i+2, SkMatrix::I());
// 3 old layers plus the new one should be in the atlas.
if (1 == i || 2 == i || 3 == i || 5 == i) {
REPORTER_ASSERT(reporter, NULL != layer);