Replaced TextureCacheEntry with GrTexture* and a back pointer to GrResourceEntry (in GrTexture)

http://codereview.appspot.com/6460089/



git-svn-id: http://skia.googlecode.com/svn/trunk@5122 2bbb7eff-a529-9590-31e7-b0007b416f81
This commit is contained in:
robertphillips@google.com 2012-08-16 14:49:16 +00:00
Родитель 71329d809a
Коммит 1f47f4f732
16 изменённых файлов: 215 добавлений и 219 удалений

Просмотреть файл

@ -94,31 +94,9 @@ public:
///////////////////////////////////////////////////////////////////////////
// Textures
/**
* Token that refers to an entry in the texture cache. Returned by
* functions that lock textures. Passed to unlockTexture.
*/
class SK_API TextureCacheEntry {
public:
TextureCacheEntry() : fEntry(NULL) {}
TextureCacheEntry(const TextureCacheEntry& e) : fEntry(e.fEntry) {}
TextureCacheEntry& operator= (const TextureCacheEntry& e) {
fEntry = e.fEntry;
return *this;
}
GrTexture* texture() const;
void reset() { fEntry = NULL; }
GrResourceEntry* cacheEntry() { return fEntry; }
private:
explicit TextureCacheEntry(GrResourceEntry* entry) { fEntry = entry; }
void set(GrResourceEntry* entry) { fEntry = entry; }
GrResourceEntry* fEntry;
friend class GrContext;
};
/**
* Create a new entry, based on the specified key and texture, and return
* its "locked" entry. Must call be balanced with an unlockTexture() call.
* a "locked" texture. Must call be balanced with an unlockTexture() call.
*
* @param params The tex params used to draw a texture may help determine
* the cache entry used. (e.g. different versions may exist
@ -130,14 +108,14 @@ public:
* @param rowBytes The number of bytes between rows of the texture. Zero
* implies tightly packed rows.
*/
TextureCacheEntry createAndLockTexture(const GrTextureParams* params,
const GrTextureDesc& desc,
const GrCacheData& cacheData,
void* srcData, size_t rowBytes);
GrTexture* createAndLockTexture(const GrTextureParams* params,
const GrTextureDesc& desc,
const GrCacheData& cacheData,
void* srcData, size_t rowBytes);
/**
* Search for an entry based on key and dimensions. If found, "lock" it and
* return it. The entry's texture() function will return NULL if not found.
* return it. The return value will be NULL if not found.
* Must be balanced with an unlockTexture() call.
*
* @param desc Description of the texture properties.
@ -147,9 +125,9 @@ public:
* for different wrap modes on GPUs with limited NPOT
* texture support). NULL implies clamp wrap modes.
*/
TextureCacheEntry findAndLockTexture(const GrTextureDesc& desc,
const GrCacheData& cacheData,
const GrTextureParams* params);
GrTexture* findAndLockTexture(const GrTextureDesc& desc,
const GrCacheData& cacheData,
const GrTextureParams* params);
/**
* Determines whether a texture is in the cache. If the texture is found it
* will not be locked or returned. This call does not affect the priority of
@ -191,19 +169,25 @@ public:
* such an API will create gaps in the tiling pattern. This includes clamp
* mode. (This may be addressed in a future update.)
*/
TextureCacheEntry lockScratchTexture(const GrTextureDesc& desc,
ScratchTexMatch match);
GrTexture* lockScratchTexture(const GrTextureDesc& desc,
ScratchTexMatch match);
/**
* When done with an entry, call unlockTexture(entry) on it, which returns
* it to the cache, where it may be purged.
*/
void unlockTexture(TextureCacheEntry entry);
void unlockTexture(GrTexture* texture);
/**
* Free any data associated with the provided entry in the texture cache
* Free any data associated with the provided entry in the texture cache.
* Currently this entry point is only used when a scratch texture is
* detached from the cache. In this case the GrResourceEntry* associated
* with the texture needs to be freed since it will be re-allocated when
* the texture is re-added. This entry point will be removed soon since the
* texture can now carry around a pointer to its GrResourceEntry* (and
* will eventually take over its functionality).
*/
void freeEntry(TextureCacheEntry entry);
void freeEntry(GrTexture* texture);
/**
* Creates a texture that is outside the cache. Does not count against
@ -753,8 +737,8 @@ public:
* a SB that matching an RT's criteria. If a match is found that has been
* unlocked (its attachment count has reached 0) then it will be relocked.
*/
GrResourceEntry* addAndLockStencilBuffer(GrStencilBuffer* sb);
void unlockStencilBuffer(GrResourceEntry* sbEntry);
void addAndLockStencilBuffer(GrStencilBuffer* sb);
void unlockStencilBuffer(GrStencilBuffer* sb);
GrStencilBuffer* findStencilBuffer(int width, int height, int sampleCnt);
GrPathRenderer* getPathRenderer(const SkPath& path,
@ -861,14 +845,16 @@ private:
class GrAutoScratchTexture : ::GrNoncopyable {
public:
GrAutoScratchTexture()
: fContext(NULL) {
: fContext(NULL)
, fTexture(NULL) {
}
GrAutoScratchTexture(GrContext* context,
const GrTextureDesc& desc,
GrContext::ScratchTexMatch match =
GrContext::kApprox_ScratchTexMatch)
: fContext(NULL) {
: fContext(NULL)
, fTexture(NULL) {
this->set(context, desc, match);
}
@ -877,9 +863,9 @@ public:
}
void reset() {
if (NULL != fContext && NULL != fEntry.cacheEntry()) {
fContext->unlockTexture(fEntry);
fEntry.reset();
if (NULL != fContext && NULL != fTexture) {
fContext->unlockTexture(fTexture);
fTexture = NULL;
}
}
@ -896,14 +882,14 @@ public:
* returned texture.
*/
GrTexture* detach() {
GrTexture* temp = this->texture();
GrTexture* temp = fTexture;
GrAssert(1 == temp->getRefCnt());
// freeEntry will remove the texture cache's ref
temp->ref();
fContext->freeEntry(fEntry);
fEntry.reset();
fContext->freeEntry(fTexture);
fTexture = NULL;
temp->setFlag((GrTextureFlags) GrTexture::kReturnToCache_FlagBit);
GrAssert(1 == temp->getRefCnt());
@ -918,21 +904,21 @@ public:
fContext = context;
if (NULL != fContext) {
fEntry = fContext->lockScratchTexture(desc, match);
GrTexture* ret = fEntry.texture();
if (NULL == ret) {
fTexture = fContext->lockScratchTexture(desc, match);
if (NULL == fTexture) {
fContext = NULL;
}
return ret;
return fTexture;
} else {
return NULL;
}
}
GrTexture* texture() { return fEntry.texture(); }
GrTexture* texture() { return fTexture; }
private:
GrContext* fContext;
GrContext::TextureCacheEntry fEntry;
GrTexture* fTexture;
};
#endif

Просмотреть файл

@ -14,6 +14,7 @@
class GrGpu;
class GrContext;
class GrResourceEntry;
/**
* Base class for the GPU resources created by a GrContext.
@ -52,7 +53,7 @@ public:
*
* @return the size of the buffer in bytes
*/
virtual size_t sizeInBytes() const = 0;
virtual size_t sizeInBytes() const = 0;
/**
* Retrieves the context that owns the resource. Note that it is possible
@ -60,8 +61,11 @@ public:
* abandon()ed they no longer have an owning context. Destroying a
* GrContext automatically releases all its resources.
*/
const GrContext* getContext() const;
GrContext* getContext();
const GrContext* getContext() const;
GrContext* getContext();
void setCacheEntry(GrResourceEntry* cacheEntry) { fCacheEntry = cacheEntry; }
GrResourceEntry* getCacheEntry() { return fCacheEntry; }
protected:
explicit GrResource(GrGpu* gpu);
@ -83,6 +87,8 @@ private:
GrResource* fNext; // dl-list of resources per-GrGpu
GrResource* fPrevious;
GrResourceEntry* fCacheEntry; // NULL if not in cache
typedef GrRefCnt INHERITED;
};

Просмотреть файл

@ -116,7 +116,6 @@ public:
class SkAutoCachedTexture; // used internally
protected:
typedef GrContext::TextureCacheEntry TexCache;
bool isBitmapInTextureCache(const SkBitmap& bitmap,
const GrTextureParams& params) const;
@ -133,7 +132,8 @@ private:
GrClipData fClipData;
// state for our offscreen render-target
TexCache fCache;
// TODO: remove 'fCached' and let fTexture automatically return to the cache
bool fCached; // is fTexture in the cache
GrTexture* fTexture;
GrRenderTarget* fRenderTarget;
bool fNeedClear;
@ -143,7 +143,7 @@ private:
void initFromRenderTarget(GrContext*, GrRenderTarget*);
// used by createCompatibleDevice
SkGpuDevice(GrContext*, GrTexture* texture, TexCache, bool needClear);
SkGpuDevice(GrContext*, GrTexture* texture, bool needClear);
// override from SkDevice
virtual SkDevice* onCreateCompatibleDevice(SkBitmap::Config config,

Просмотреть файл

@ -75,11 +75,11 @@ static inline GrColor SkColor2GrColor(SkColor c) {
////////////////////////////////////////////////////////////////////////////////
GrContext::TextureCacheEntry GrLockCachedBitmapTexture(GrContext*,
const SkBitmap&,
const GrTextureParams*);
GrTexture* GrLockCachedBitmapTexture(GrContext*,
const SkBitmap&,
const GrTextureParams*);
void GrUnlockCachedBitmapTexture(GrContext*, GrContext::TextureCacheEntry);
void GrUnlockCachedBitmapTexture(GrTexture*);
////////////////////////////////////////////////////////////////////////////////
// Classes

Просмотреть файл

@ -705,16 +705,15 @@ GrGradientEffect::GrGradientEffect(GrContext* ctx,
SkBitmap bitmap;
shader.getGradientTableBitmap(&bitmap);
GrContext::TextureCacheEntry entry = GrLockCachedBitmapTexture(ctx, bitmap,
sampler->textureParams());
fTexture = entry.texture();
fTexture = GrLockCachedBitmapTexture(ctx, bitmap,
sampler->textureParams());
SkSafeRef(fTexture);
fUseTexture = true;
// Unlock immediately, this is not great, but we don't have a way of
// knowing when else to unlock it currently, so it may get purged from
// the cache, but it'll still be ref'd until it's no longer being used.
GrUnlockCachedBitmapTexture(ctx, entry);
GrUnlockCachedBitmapTexture(fTexture);
}
GrGradientEffect::~GrGradientEffect() {

Просмотреть файл

@ -167,14 +167,6 @@ size_t GrContext::getGpuTextureCacheBytes() const {
////////////////////////////////////////////////////////////////////////////////
GrTexture* GrContext::TextureCacheEntry::texture() const {
if (NULL == fEntry) {
return NULL;
} else {
return (GrTexture*) fEntry->resource();
}
}
namespace {
void scale_rect(SkRect* rect, float xScale, float yScale) {
@ -237,12 +229,13 @@ void convolve_gaussian(GrGpu* gpu,
}
GrContext::TextureCacheEntry GrContext::findAndLockTexture(const GrTextureDesc& desc,
const GrCacheData& cacheData,
const GrTextureParams* params) {
GrTexture* GrContext::findAndLockTexture(const GrTextureDesc& desc,
const GrCacheData& cacheData,
const GrTextureParams* params) {
GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheData, false);
return TextureCacheEntry(fTextureCache->findAndLock(resourceKey,
GrResourceCache::kNested_LockType));
GrResource* resource = fTextureCache->findAndLock(resourceKey,
GrResourceCache::kNested_LockType);
return static_cast<GrTexture*>(resource);
}
bool GrContext::isTextureInCache(const GrTextureDesc& desc,
@ -252,13 +245,13 @@ bool GrContext::isTextureInCache(const GrTextureDesc& desc,
return fTextureCache->hasKey(resourceKey);
}
GrResourceEntry* GrContext::addAndLockStencilBuffer(GrStencilBuffer* sb) {
void GrContext::addAndLockStencilBuffer(GrStencilBuffer* sb) {
ASSERT_OWNED_RESOURCE(sb);
GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(sb->width(),
sb->height(),
sb->numSamples());
return fTextureCache->createAndLock(resourceKey, sb);
fTextureCache->createAndLock(resourceKey, sb);
}
GrStencilBuffer* GrContext::findStencilBuffer(int width, int height,
@ -266,19 +259,16 @@ GrStencilBuffer* GrContext::findStencilBuffer(int width, int height,
GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(width,
height,
sampleCnt);
GrResourceEntry* entry = fTextureCache->findAndLock(resourceKey,
GrResource* resource = fTextureCache->findAndLock(resourceKey,
GrResourceCache::kSingle_LockType);
if (NULL != entry) {
GrStencilBuffer* sb = (GrStencilBuffer*) entry->resource();
return sb;
} else {
return NULL;
}
return static_cast<GrStencilBuffer*>(resource);
}
void GrContext::unlockStencilBuffer(GrResourceEntry* sbEntry) {
ASSERT_OWNED_RESOURCE(sbEntry->resource());
fTextureCache->unlock(sbEntry);
void GrContext::unlockStencilBuffer(GrStencilBuffer* sb) {
ASSERT_OWNED_RESOURCE(sb);
GrAssert(NULL != sb->getCacheEntry());
fTextureCache->unlock(sb->getCacheEntry());
}
static void stretchImage(void* dst,
@ -315,12 +305,12 @@ GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc,
void* srcData,
size_t rowBytes,
bool needsFiltering) {
TextureCacheEntry clampEntry = this->findAndLockTexture(desc, cacheData, NULL);
GrTexture* clampedTexture = this->findAndLockTexture(desc, cacheData, NULL);
if (NULL == clampEntry.texture()) {
clampEntry = this->createAndLockTexture(NULL, desc, cacheData, srcData, rowBytes);
GrAssert(NULL != clampEntry.texture());
if (NULL == clampEntry.texture()) {
if (NULL == clampedTexture) {
clampedTexture = this->createAndLockTexture(NULL, desc, cacheData, srcData, rowBytes);
GrAssert(NULL != clampedTexture);
if (NULL == clampedTexture) {
return NULL;
}
}
@ -343,7 +333,7 @@ GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc,
// the original.
drawState->sampler(0)->reset(SkShader::kClamp_TileMode,
needsFiltering);
drawState->createTextureEffect(0, clampEntry.texture());
drawState->createTextureEffect(0, clampedTexture);
static const GrVertexLayout layout =
GrDrawTarget::StageTexCoordVertexLayoutBit(0,0);
@ -384,12 +374,12 @@ GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc,
stretchedRowBytes);
GrAssert(NULL != texture);
}
fTextureCache->unlock(clampEntry.cacheEntry());
this->unlockTexture(clampedTexture);
return texture;
}
GrContext::TextureCacheEntry GrContext::createAndLockTexture(
GrTexture* GrContext::createAndLockTexture(
const GrTextureParams* params,
const GrTextureDesc& desc,
const GrCacheData& cacheData,
@ -401,8 +391,6 @@ GrContext::TextureCacheEntry GrContext::createAndLockTexture(
GrPrintf("GrContext::createAndLockTexture [%d %d]\n", desc.fWidth, desc.fHeight);
#endif
TextureCacheEntry entry;
GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheData, false);
GrTexture* texture = NULL;
@ -415,15 +403,14 @@ GrContext::TextureCacheEntry GrContext::createAndLockTexture(
}
if (NULL != texture) {
entry.set(fTextureCache->createAndLock(resourceKey, texture));
fTextureCache->createAndLock(resourceKey, texture);
}
return entry;
return texture;
}
GrContext::TextureCacheEntry GrContext::lockScratchTexture(
const GrTextureDesc& inDesc,
ScratchTexMatch match) {
GrTexture* GrContext::lockScratchTexture(const GrTextureDesc& inDesc,
ScratchTexMatch match) {
GrTextureDesc desc = inDesc;
GrCacheData cacheData(GrCacheData::kScratch_CacheID);
@ -434,7 +421,7 @@ GrContext::TextureCacheEntry GrContext::lockScratchTexture(
desc.fHeight = GrMax(MIN_SIZE, GrNextPow2(desc.fHeight));
}
GrResourceEntry* entry;
GrResource* resource = NULL;
int origWidth = desc.fWidth;
int origHeight = desc.fHeight;
bool doubledW = false;
@ -442,11 +429,11 @@ GrContext::TextureCacheEntry GrContext::lockScratchTexture(
do {
GrResourceKey key = GrTexture::ComputeKey(fGpu, NULL, desc, cacheData, true);
entry = fTextureCache->findAndLock(key,
GrResourceCache::kNested_LockType);
resource = fTextureCache->findAndLock(key,
GrResourceCache::kNested_LockType);
// if we miss, relax the fit of the flags...
// then try doubling width... then height.
if (NULL != entry || kExact_ScratchTexMatch == match) {
if (NULL != resource || kExact_ScratchTexMatch == match) {
break;
}
if (!(desc.fFlags & kRenderTarget_GrTextureFlagBit)) {
@ -468,7 +455,7 @@ GrContext::TextureCacheEntry GrContext::lockScratchTexture(
} while (true);
if (NULL == entry) {
if (NULL == resource) {
desc.fFlags = inDesc.fFlags;
desc.fWidth = origWidth;
desc.fHeight = origHeight;
@ -478,17 +465,18 @@ GrContext::TextureCacheEntry GrContext::lockScratchTexture(
texture->desc(),
cacheData,
true);
entry = fTextureCache->createAndLock(key, texture);
fTextureCache->createAndLock(key, texture);
resource = texture;
}
}
// If the caller gives us the same desc/sampler twice we don't want
// to return the same texture the second time (unless it was previously
// released). So we detach the entry from the cache and reattach at release.
if (NULL != entry) {
fTextureCache->detach(entry);
if (NULL != resource) {
fTextureCache->detach(resource->getCacheEntry());
}
return TextureCacheEntry(entry);
return static_cast<GrTexture*>(resource);
}
void GrContext::addExistingTextureToCache(GrTexture* texture) {
@ -507,22 +495,26 @@ void GrContext::addExistingTextureToCache(GrTexture* texture) {
fTextureCache->attach(key, texture);
}
void GrContext::unlockTexture(TextureCacheEntry entry) {
ASSERT_OWNED_RESOURCE(entry.texture());
void GrContext::unlockTexture(GrTexture* texture) {
ASSERT_OWNED_RESOURCE(texture);
GrAssert(NULL != texture->getCacheEntry());
// If this is a scratch texture we detached it from the cache
// while it was locked (to avoid two callers simultaneously getting
// the same texture).
if (GrTexture::IsScratchTexture(entry.cacheEntry()->key())) {
fTextureCache->reattachAndUnlock(entry.cacheEntry());
if (GrTexture::IsScratchTexture(texture->getCacheEntry()->key())) {
fTextureCache->reattachAndUnlock(texture->getCacheEntry());
} else {
fTextureCache->unlock(entry.cacheEntry());
fTextureCache->unlock(texture->getCacheEntry());
}
}
void GrContext::freeEntry(TextureCacheEntry entry) {
ASSERT_OWNED_RESOURCE(entry.texture());
void GrContext::freeEntry(GrTexture* texture) {
ASSERT_OWNED_RESOURCE(texture);
GrAssert(NULL != texture->getCacheEntry());
fTextureCache->freeEntry(entry.cacheEntry());
fTextureCache->freeEntry(texture->getCacheEntry());
texture->setCacheEntry(NULL);
}
GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& descIn,

Просмотреть файл

@ -16,6 +16,7 @@ GrResource::GrResource(GrGpu* gpu) {
fGpu = gpu;
fNext = NULL;
fPrevious = NULL;
fCacheEntry = NULL;
fGpu->insertResource(this);
}

Просмотреть файл

@ -28,6 +28,7 @@ GrResourceEntry::~GrResourceEntry() {
void GrResourceEntry::validate() const {
GrAssert(fLockCount >= 0);
GrAssert(fResource);
GrAssert(fResource->getCacheEntry() == this);
fResource->validate();
}
#endif
@ -158,21 +159,24 @@ public:
#endif
};
GrResourceEntry* GrResourceCache::findAndLock(const GrResourceKey& key,
LockType type) {
GrResource* GrResourceCache::findAndLock(const GrResourceKey& key,
LockType type) {
GrAutoResourceCacheValidate atcv(this);
GrResourceEntry* entry = fCache.find(key);
if (entry) {
this->internalDetach(entry, false);
// mark the entry as "busy" so it doesn't get purged
// do this between detach and attach for locked count tracking
if (kNested_LockType == type || !entry->isLocked()) {
entry->lock();
}
this->attachToHead(entry, false);
if (NULL == entry) {
return NULL;
}
return entry;
this->internalDetach(entry, false);
// mark the entry as "busy" so it doesn't get purged
// do this between detach and attach for locked count tracking
if (kNested_LockType == type || !entry->isLocked()) {
entry->lock();
}
this->attachToHead(entry, false);
return entry->fResource;
}
bool GrResourceCache::hasKey(const GrResourceKey& key) const {
@ -192,6 +196,8 @@ GrResourceEntry* GrResourceCache::create(const GrResourceKey& key,
GrResourceEntry* entry = SkNEW_ARGS(GrResourceEntry, (key, resource));
resource->setCacheEntry(entry);
if (lock) {
// mark the entry as "busy" so it doesn't get purged
// do this before attach for locked count tracking
@ -210,13 +216,15 @@ GrResourceEntry* GrResourceCache::create(const GrResourceKey& key,
return entry;
}
GrResourceEntry* GrResourceCache::createAndLock(const GrResourceKey& key,
GrResource* resource) {
return this->create(key, resource, true, false);
void GrResourceCache::createAndLock(const GrResourceKey& key,
GrResource* resource) {
GrAssert(NULL == resource->getCacheEntry());
this->create(key, resource, true, false);
}
void GrResourceCache::attach(const GrResourceKey& key,
GrResource* resource) {
GrAssert(NULL == resource->getCacheEntry());
this->create(key, resource, false, true);
}

Просмотреть файл

@ -225,7 +225,7 @@ public:
* Search for an entry with the same Key. If found, "lock" it and return it.
* If not found, return null.
*/
GrResourceEntry* findAndLock(const GrResourceKey&, LockType style);
GrResource* findAndLock(const GrResourceKey&, LockType style);
/**
* Create a new cache entry, based on the provided key and resource, and
@ -234,7 +234,7 @@ public:
* Ownership of the resource is transferred to the resource cache,
* which will unref() it when it is purged or deleted.
*/
GrResourceEntry* createAndLock(const GrResourceKey&, GrResource*);
void createAndLock(const GrResourceKey&, GrResource*);
/**
* Create a new cache entry, based on the provided key and resource.

Просмотреть файл

@ -23,9 +23,11 @@ void GrStencilBuffer::wasDetachedFromRenderTarget(const GrRenderTarget* rt) {
}
void GrStencilBuffer::transferToCacheAndLock() {
GrAssert(NULL == fCacheEntry);
fCacheEntry =
this->getGpu()->getContext()->addAndLockStencilBuffer(this);
GrAssert(NULL == this->getCacheEntry());
GrAssert(!fHoldingLock);
this->getGpu()->getContext()->addAndLockStencilBuffer(this);
fHoldingLock = true;
}
void GrStencilBuffer::onRelease() {
@ -39,7 +41,7 @@ void GrStencilBuffer::onRelease() {
this->unlockInCache();
// we shouldn't be deleted here because some RT still has a ref on us.
}
fCacheEntry = NULL;
fHoldingLock = false;
}
void GrStencilBuffer::onAbandon() {
@ -48,11 +50,11 @@ void GrStencilBuffer::onAbandon() {
}
void GrStencilBuffer::unlockInCache() {
if (NULL != fCacheEntry) {
if (fHoldingLock) {
GrGpu* gpu = this->getGpu();
if (NULL != gpu) {
GrAssert(NULL != gpu->getContext());
gpu->getContext()->unlockStencilBuffer(fCacheEntry);
gpu->getContext()->unlockStencilBuffer(this);
}
}
}

Просмотреть файл

@ -84,7 +84,7 @@ protected:
, fLastClipData()
, fLastClipWidth(-1)
, fLastClipHeight(-1)
, fCacheEntry(NULL)
, fHoldingLock(false)
, fRTAttachmentCnt(0) {
}
@ -109,7 +109,7 @@ private:
int fLastClipWidth;
int fLastClipHeight;
GrResourceEntry* fCacheEntry;
bool fHoldingLock;
int fRTAttachmentCnt;
typedef GrResource INHERITED;

Просмотреть файл

@ -81,43 +81,47 @@ enum {
class SkGpuDevice::SkAutoCachedTexture : public ::SkNoncopyable {
public:
SkAutoCachedTexture() { }
SkAutoCachedTexture()
: fDevice(NULL)
, fTexture(NULL) {
}
SkAutoCachedTexture(SkGpuDevice* device,
const SkBitmap& bitmap,
const GrTextureParams* params,
GrTexture** texture) {
GrAssert(texture);
GrTexture** texture)
: fDevice(NULL)
, fTexture(NULL) {
GrAssert(NULL != texture);
*texture = this->set(device, bitmap, params);
}
~SkAutoCachedTexture() {
if (fTex.texture()) {
GrUnlockCachedBitmapTexture(fDevice->context(), fTex);
if (NULL != fTexture) {
GrUnlockCachedBitmapTexture(fTexture);
}
}
GrTexture* set(SkGpuDevice* device,
const SkBitmap& bitmap,
const GrTextureParams* params) {
if (fTex.texture()) {
GrUnlockCachedBitmapTexture(fDevice->context(), fTex);
if (NULL != fTexture) {
GrUnlockCachedBitmapTexture(fTexture);
fTexture = NULL;
}
fDevice = device;
GrTexture* texture = (GrTexture*)bitmap.getTexture();
if (texture) {
// return the native texture
fTex.reset();
} else {
// look it up in our cache
fTex = GrLockCachedBitmapTexture(device->context(), bitmap, params);
texture = fTex.texture();
GrTexture* result = (GrTexture*)bitmap.getTexture();
if (NULL == result) {
// Cannot return the native texture so look it up in our cache
fTexture = GrLockCachedBitmapTexture(device->context(), bitmap, params);
result = fTexture;
}
return texture;
return result;
}
private:
SkGpuDevice* fDevice;
GrContext::TextureCacheEntry fTex;
GrTexture* fTexture;
};
///////////////////////////////////////////////////////////////////////////////
@ -184,6 +188,7 @@ void SkGpuDevice::initFromRenderTarget(GrContext* context,
fContext = context;
fContext->ref();
fCached = false;
fTexture = NULL;
fRenderTarget = NULL;
fNeedClear = false;
@ -221,6 +226,7 @@ SkGpuDevice::SkGpuDevice(GrContext* context,
fContext = context;
fContext->ref();
fCached = false;
fTexture = NULL;
fRenderTarget = NULL;
fNeedClear = false;
@ -266,10 +272,9 @@ SkGpuDevice::~SkGpuDevice() {
SkSafeUnref(fTexture);
SkSafeUnref(fRenderTarget);
if (fCache.texture()) {
GrAssert(NULL != fTexture);
if (NULL != fTexture && fCached) {
GrAssert(fRenderTarget == fTexture->asRenderTarget());
fContext->unlockTexture(fCache);
fContext->unlockTexture(fTexture);
}
fContext->unref();
}
@ -1925,7 +1930,6 @@ SkDevice* SkGpuDevice::onCreateCompatibleDevice(SkBitmap::Config config,
desc.fHeight = height;
desc.fSampleCnt = fRenderTarget->numSamples();
GrContext::TextureCacheEntry cacheEntry;
GrTexture* texture;
SkAutoTUnref<GrTexture> tunref;
// Skia's convention is to only clear a device if it is non-opaque.
@ -1937,8 +1941,7 @@ SkDevice* SkGpuDevice::onCreateCompatibleDevice(SkBitmap::Config config,
GrContext::ScratchTexMatch matchType = (kSaveLayer_Usage == usage) ?
GrContext::kApprox_ScratchTexMatch :
GrContext::kExact_ScratchTexMatch;
cacheEntry = fContext->lockScratchTexture(desc, matchType);
texture = cacheEntry.texture();
texture = fContext->lockScratchTexture(desc, matchType);
#else
tunref.reset(fContext->createUncachedTexture(desc, NULL, 0));
texture = tunref.get();
@ -1946,7 +1949,6 @@ SkDevice* SkGpuDevice::onCreateCompatibleDevice(SkBitmap::Config config,
if (texture) {
return SkNEW_ARGS(SkGpuDevice,(fContext,
texture,
cacheEntry,
needClear));
} else {
GrPrintf("---- failed to create compatible device texture [%d %d]\n",
@ -1957,13 +1959,11 @@ SkDevice* SkGpuDevice::onCreateCompatibleDevice(SkBitmap::Config config,
SkGpuDevice::SkGpuDevice(GrContext* context,
GrTexture* texture,
TexCache cacheEntry,
bool needClear)
: SkDevice(make_bitmap(context, texture->asRenderTarget())) {
GrAssert(texture && texture->asRenderTarget());
GrAssert(NULL == cacheEntry.texture() || texture == cacheEntry.texture());
this->initFromRenderTarget(context, texture->asRenderTarget());
fCache = cacheEntry;
fCached = true;
fNeedClear = needClear;
}

Просмотреть файл

@ -56,15 +56,14 @@ static void build_compressed_data(void* buffer, const SkBitmap& bitmap) {
////////////////////////////////////////////////////////////////////////////////
static GrContext::TextureCacheEntry sk_gr_create_bitmap_texture(GrContext* ctx,
uint64_t key,
const GrTextureParams* params,
const SkBitmap& origBitmap) {
static GrTexture* sk_gr_create_bitmap_texture(GrContext* ctx,
uint64_t key,
const GrTextureParams* params,
const SkBitmap& origBitmap) {
SkAutoLockPixels alp(origBitmap);
GrContext::TextureCacheEntry entry;
if (!origBitmap.readyToDraw()) {
return entry;
return NULL;
}
SkBitmap tmpBitmap;
@ -97,12 +96,12 @@ static GrContext::TextureCacheEntry sk_gr_create_bitmap_texture(GrContext* ctx,
storage.get(),
bitmap->width());
} else {
entry = ctx->lockScratchTexture(desc,
GrContext::kExact_ScratchTexMatch);
entry.texture()->writePixels(0, 0, bitmap->width(),
bitmap->height(), desc.fConfig,
storage.get(), 0);
return entry;
GrTexture* result = ctx->lockScratchTexture(desc,
GrContext::kExact_ScratchTexMatch);
result->writePixels(0, 0, bitmap->width(),
bitmap->height(), desc.fConfig,
storage.get(), 0);
return result;
}
} else {
@ -125,23 +124,23 @@ static GrContext::TextureCacheEntry sk_gr_create_bitmap_texture(GrContext* ctx,
// cache so no one else can find it. Additionally, once unlocked, the
// scratch texture will go to the end of the list for purging so will
// likely be available for this volatile bitmap the next time around.
entry = ctx->lockScratchTexture(desc,
GrContext::kExact_ScratchTexMatch);
entry.texture()->writePixels(0, 0,
bitmap->width(), bitmap->height(),
desc.fConfig,
bitmap->getPixels(),
bitmap->rowBytes());
return entry;
GrTexture* result = ctx->lockScratchTexture(desc,
GrContext::kExact_ScratchTexMatch);
result->writePixels(0, 0,
bitmap->width(), bitmap->height(),
desc.fConfig,
bitmap->getPixels(),
bitmap->rowBytes());
return result;
}
}
///////////////////////////////////////////////////////////////////////////////
GrContext::TextureCacheEntry GrLockCachedBitmapTexture(GrContext* ctx,
const SkBitmap& bitmap,
const GrTextureParams* params) {
GrContext::TextureCacheEntry entry;
GrTexture* GrLockCachedBitmapTexture(GrContext* ctx,
const SkBitmap& bitmap,
const GrTextureParams* params) {
GrTexture* result = NULL;
if (!bitmap.isVolatile()) {
// If the bitmap isn't changing try to find a cached copy first
@ -155,22 +154,24 @@ GrContext::TextureCacheEntry GrLockCachedBitmapTexture(GrContext* ctx,
GrCacheData cacheData(key);
entry = ctx->findAndLockTexture(desc, cacheData, params);
if (NULL == entry.texture()) {
entry = sk_gr_create_bitmap_texture(ctx, key, params, bitmap);
result = ctx->findAndLockTexture(desc, cacheData, params);
if (NULL == result) {
result = sk_gr_create_bitmap_texture(ctx, key, params, bitmap);
}
} else {
entry = sk_gr_create_bitmap_texture(ctx, GrCacheData::kScratch_CacheID, params, bitmap);
result = sk_gr_create_bitmap_texture(ctx, GrCacheData::kScratch_CacheID, params, bitmap);
}
if (NULL == entry.texture()) {
if (NULL == result) {
GrPrintf("---- failed to create texture for cache [%d %d]\n",
bitmap.width(), bitmap.height());
}
return entry;
return result;
}
void GrUnlockCachedBitmapTexture(GrContext* ctx, GrContext::TextureCacheEntry cache) {
ctx->unlockTexture(cache);
void GrUnlockCachedBitmapTexture(GrTexture* texture) {
GrAssert(NULL != texture->getContext());
texture->getContext()->unlockTexture(texture);
}
///////////////////////////////////////////////////////////////////////////////

Просмотреть файл

@ -63,6 +63,7 @@ GrTextureStripAtlas::GrTextureStripAtlas(GrTextureStripAtlas::Desc desc)
, fLockedRows(0)
, fDesc(desc)
, fNumRows(desc.fHeight / desc.fRowHeight)
, fTexture(NULL)
, fRows(SkNEW_ARRAY(AtlasRow, fNumRows))
, fLRUFront(NULL)
, fLRUBack(NULL) {
@ -139,7 +140,7 @@ int GrTextureStripAtlas::lockRow(const SkBitmap& data) {
// Pass in the kDontFlush flag, since we know we're writing to a part of this texture
// that is not currently in use
fDesc.fContext->internalWriteTexturePixels(fEntry.texture(), 0,
fDesc.fContext->internalWriteTexturePixels(fTexture, 0,
rowNumber * fDesc.fRowHeight,
fDesc.fWidth,
fDesc.fRowHeight,
@ -182,20 +183,20 @@ void GrTextureStripAtlas::lockTexture() {
texDesc.fConfig = fDesc.fConfig;
GrCacheData cacheData(fCacheID);
cacheData.fResourceDomain = GetTextureStripAtlasDomain();
fEntry = fDesc.fContext->findAndLockTexture(texDesc, cacheData, &params);
if (NULL == fEntry.texture()) {
fEntry = fDesc.fContext->createAndLockTexture(&params, texDesc, cacheData, NULL, 0);
fTexture = fDesc.fContext->findAndLockTexture(texDesc, cacheData, &params);
if (NULL == fTexture) {
fTexture = fDesc.fContext->createAndLockTexture(&params, texDesc, cacheData, NULL, 0);
// This is a new texture, so all of our cache info is now invalid
this->initLRU();
fKeyTable.rewind();
}
GrAssert(NULL != fEntry.texture());
GrAssert(NULL != fTexture);
}
void GrTextureStripAtlas::unlockTexture() {
GrAssert(NULL != fEntry.texture() && 0 == fLockedRows);
fDesc.fContext->unlockTexture(fEntry);
fEntry.reset();
GrAssert(NULL != fTexture && 0 == fLockedRows);
fDesc.fContext->unlockTexture(fTexture);
fTexture = NULL;
}
void GrTextureStripAtlas::initLRU() {
@ -311,9 +312,9 @@ void GrTextureStripAtlas::validate() {
// If we have locked rows, we should have a locked texture, otherwise
// it should be unlocked
if (fLockedRows == 0) {
GrAssert(NULL == fEntry.texture());
GrAssert(NULL == fTexture);
} else {
GrAssert(NULL != fEntry.texture());
GrAssert(NULL != fTexture);
}
}
#endif

Просмотреть файл

@ -69,7 +69,7 @@ public:
GrScalar getVerticalScaleFactor() const { return SkIntToScalar(fDesc.fRowHeight) / fDesc.fHeight; }
GrContext* getContext() const { return fDesc.fContext; }
GrTexture* getTexture() const { return fEntry.texture(); }
GrTexture* getTexture() const { return fTexture; }
private:
@ -141,7 +141,7 @@ private:
const Desc fDesc;
const uint16_t fNumRows;
GrContext::TextureCacheEntry fEntry;
GrTexture* fTexture;
// Array of AtlasRows which store the state of all our rows. Stored in a contiguous array, in
// order that they appear in our texture, this means we can subtract this pointer from a row

Просмотреть файл

@ -1143,7 +1143,7 @@ bool GrGpuGL::createStencilBufferForRenderTarget(GrRenderTarget* rt,
int width, int height) {
// All internally created RTs are also textures. We don't create
// SBs for a client's standalone RT (that is RT that isnt also a texture).
// SBs for a client's standalone RT (that is a RT that isn't also a texture).
GrAssert(rt->asTexture());
GrAssert(width >= rt->width());
GrAssert(height >= rt->height());