Bug 981315: Add ShmemSection and use it for gfxShmSharedReacLock. r=gal

This commit is contained in:
Bas Schouten 2014-03-13 06:17:05 +01:00
Родитель 750ae8d03a
Коммит 1a6bc3746e
6 изменённых файлов: 192 добавлений и 19 удалений

Просмотреть файл

@ -315,9 +315,7 @@ gfxShmSharedReadLock::gfxShmSharedReadLock(ISurfaceAllocator* aAllocator)
MOZ_COUNT_CTOR(gfxShmSharedReadLock);
MOZ_ASSERT(mAllocator);
if (mAllocator) {
#define MOZ_ALIGN_WORD(x) (((x) + 3) & ~3)
if (mAllocator->AllocUnsafeShmem(MOZ_ALIGN_WORD(sizeof(ShmReadLockInfo)),
mozilla::ipc::SharedMemory::TYPE_BASIC, &mShmem)) {
if (mAllocator->AllocShmemSection(MOZ_ALIGN_WORD(sizeof(ShmReadLockInfo)), &mShmemSection)) {
ShmReadLockInfo* info = GetShmReadLockInfoPtr();
info->readCount = 1;
mAllocSuccess = true;
@ -349,7 +347,7 @@ gfxShmSharedReadLock::ReadUnlock() {
int32_t readCount = PR_ATOMIC_DECREMENT(&info->readCount);
NS_ASSERTION(readCount >= 0, "ReadUnlock called without a ReadLock.");
if (readCount <= 0) {
mAllocator->DeallocShmem(mShmem);
mAllocator->FreeShmemSection(mShmemSection);
}
return readCount;
}
@ -548,10 +546,15 @@ TileClient::GetTileDescriptor()
// see TiledLayerBufferComposite::TiledLayerBufferComposite
mFrontLock->AddRef();
}
return TexturedTileDescriptor(nullptr, mFrontBuffer->GetIPDLActor(),
mFrontLock->GetType() == gfxSharedReadLock::TYPE_MEMORY
? TileLock(uintptr_t(mFrontLock.get()))
: TileLock(static_cast<gfxShmSharedReadLock*>(mFrontLock.get())->GetShmem()));
if (mFrontLock->GetType() == gfxSharedReadLock::TYPE_MEMORY) {
return TexturedTileDescriptor(nullptr, mFrontBuffer->GetIPDLActor(),
TileLock(uintptr_t(mFrontLock.get())));
} else {
gfxShmSharedReadLock *lock = static_cast<gfxShmSharedReadLock*>(mFrontLock.get());
return TexturedTileDescriptor(nullptr, mFrontBuffer->GetIPDLActor(),
TileLock(lock->GetShmemSection()));
}
}
void

Просмотреть файл

@ -108,19 +108,19 @@ public:
virtual gfxSharedReadLockType GetType() MOZ_OVERRIDE { return TYPE_SHMEM; }
mozilla::ipc::Shmem& GetShmem() { return mShmem; }
mozilla::layers::ShmemSection& GetShmemSection() { return mShmemSection; }
static already_AddRefed<gfxShmSharedReadLock>
Open(mozilla::layers::ISurfaceAllocator* aAllocator, const mozilla::ipc::Shmem& aShmem)
Open(mozilla::layers::ISurfaceAllocator* aAllocator, const mozilla::layers::ShmemSection& aShmemSection)
{
nsRefPtr<gfxShmSharedReadLock> readLock = new gfxShmSharedReadLock(aAllocator, aShmem);
nsRefPtr<gfxShmSharedReadLock> readLock = new gfxShmSharedReadLock(aAllocator, aShmemSection);
return readLock.forget();
}
private:
gfxShmSharedReadLock(ISurfaceAllocator* aAllocator, const mozilla::ipc::Shmem& aShmem)
gfxShmSharedReadLock(ISurfaceAllocator* aAllocator, const mozilla::layers::ShmemSection& aShmemSection)
: mAllocator(aAllocator)
, mShmem(aShmem)
, mShmemSection(aShmemSection)
, mAllocSuccess(true)
{
MOZ_COUNT_CTOR(gfxShmSharedReadLock);
@ -129,11 +129,11 @@ private:
ShmReadLockInfo* GetShmReadLockInfoPtr()
{
return reinterpret_cast<ShmReadLockInfo*>
(mShmem.get<char>() + mShmem.Size<char>() - sizeof(ShmReadLockInfo));
(mShmemSection.shmem().get<char>() + mShmemSection.offset());
}
RefPtr<ISurfaceAllocator> mAllocator;
mozilla::ipc::Shmem mShmem;
mozilla::layers::ShmemSection mShmemSection;
bool mAllocSuccess;
};

Просмотреть файл

@ -58,8 +58,8 @@ TiledLayerBufferComposite::TiledLayerBufferComposite(ISurfaceAllocator* aAllocat
texture = TextureHost::AsTextureHost(tileDesc.get_TexturedTileDescriptor().textureParent());
const TileLock& ipcLock = tileDesc.get_TexturedTileDescriptor().sharedLock();
nsRefPtr<gfxSharedReadLock> sharedLock;
if (ipcLock.type() == TileLock::TShmem) {
sharedLock = gfxShmSharedReadLock::Open(aAllocator, ipcLock.get_Shmem());
if (ipcLock.type() == TileLock::TShmemSection) {
sharedLock = gfxShmSharedReadLock::Open(aAllocator, ipcLock.get_ShmemSection());
} else {
sharedLock = reinterpret_cast<gfxMemorySharedReadLock*>(ipcLock.get_uintptr_t());
if (sharedLock) {

Просмотреть файл

@ -12,6 +12,7 @@
#include "gfxPlatform.h" // for gfxPlatform, gfxImageFormat
#include "gfxSharedImageSurface.h" // for gfxSharedImageSurface
#include "mozilla/Assertions.h" // for MOZ_ASSERT, etc
#include "mozilla/Atomics.h" // for PrimitiveIntrinsics
#include "mozilla/ipc/SharedMemory.h" // for SharedMemory, etc
#include "mozilla/layers/LayersSurfaces.h" // for SurfaceDescriptor, etc
#include "ShadowLayerUtils.h"
@ -44,6 +45,14 @@ IsSurfaceDescriptorValid(const SurfaceDescriptor& aSurface)
aSurface.type() != SurfaceDescriptor::Tnull_t;
}
ISurfaceAllocator::~ISurfaceAllocator()
{
ShrinkShmemSectionHeap();
// Check if we're not leaking..
MOZ_ASSERT(mUsedShmems.empty());
}
bool
ISurfaceAllocator::AllocSharedImageSurface(const gfx::IntSize& aSize,
gfxContentType aContent,
@ -177,5 +186,139 @@ ISurfaceAllocator::PlatformAllocSurfaceDescriptor(const gfx::IntSize&,
}
#endif
// XXX - We should actually figure out the minimum shmem allocation size on
// a certain platform and use that.
const uint32_t sShmemPageSize = 4096;
const uint32_t sSupportedBlockSize = 4;
enum AllocationStatus
{
STATUS_ALLOCATED,
STATUS_FREED
};
struct ShmemSectionHeapHeader
{
Atomic<uint32_t> mTotalBlocks;
Atomic<uint32_t> mAllocatedBlocks;
};
struct ShmemSectionHeapAllocation
{
Atomic<uint32_t> mStatus;
uint32_t mSize;
};
bool
ISurfaceAllocator::AllocShmemSection(size_t aSize, mozilla::layers::ShmemSection* aShmemSection)
{
// For now we only support sizes of 4. If we want to support different sizes
// some more complicated bookkeeping should be added.
MOZ_ASSERT(aSize == sSupportedBlockSize);
MOZ_ASSERT(aShmemSection);
uint32_t allocationSize = (aSize + sizeof(ShmemSectionHeapAllocation));
for (size_t i = 0; i < mUsedShmems.size(); i++) {
ShmemSectionHeapHeader* header = mUsedShmems[i].get<ShmemSectionHeapHeader>();
if ((header->mAllocatedBlocks + 1) * allocationSize + sizeof(ShmemSectionHeapHeader) < sShmemPageSize) {
aShmemSection->shmem() = mUsedShmems[i];
MOZ_ASSERT(mUsedShmems[i].IsWritable());
break;
}
}
if (!aShmemSection->shmem().IsWritable()) {
ipc::Shmem tmp;
if (!AllocUnsafeShmem(sShmemPageSize, ipc::SharedMemory::TYPE_BASIC, &tmp)) {
return false;
}
ShmemSectionHeapHeader* header = tmp.get<ShmemSectionHeapHeader>();
header->mTotalBlocks = 0;
header->mAllocatedBlocks = 0;
mUsedShmems.push_back(tmp);
aShmemSection->shmem() = tmp;
}
MOZ_ASSERT(aShmemSection->shmem().IsWritable());
ShmemSectionHeapHeader* header = aShmemSection->shmem().get<ShmemSectionHeapHeader>();
uint8_t* heap = aShmemSection->shmem().get<uint8_t>() + sizeof(ShmemSectionHeapHeader);
ShmemSectionHeapAllocation* allocHeader = nullptr;
if (header->mTotalBlocks > header->mAllocatedBlocks) {
// Search for the first available block.
for (size_t i = 0; i < header->mTotalBlocks; i++) {
allocHeader = reinterpret_cast<ShmemSectionHeapAllocation*>(heap);
if (allocHeader->mStatus == STATUS_FREED) {
break;
}
heap += allocationSize;
}
MOZ_ASSERT(allocHeader && allocHeader->mStatus == STATUS_FREED);
MOZ_ASSERT(allocHeader->mSize == sSupportedBlockSize);
} else {
heap += header->mTotalBlocks * allocationSize;
header->mTotalBlocks++;
allocHeader = reinterpret_cast<ShmemSectionHeapAllocation*>(heap);
allocHeader->mSize = aSize;
}
MOZ_ASSERT(allocHeader);
header->mAllocatedBlocks++;
allocHeader->mStatus = STATUS_ALLOCATED;
aShmemSection->size() = aSize;
aShmemSection->offset() = (heap + sizeof(ShmemSectionHeapAllocation)) - aShmemSection->shmem().get<uint8_t>();
ShrinkShmemSectionHeap();
return true;
}
void
ISurfaceAllocator::FreeShmemSection(mozilla::layers::ShmemSection& aShmemSection)
{
MOZ_ASSERT(aShmemSection.size() == sSupportedBlockSize);
MOZ_ASSERT(aShmemSection.offset() < sShmemPageSize - sSupportedBlockSize);
ShmemSectionHeapAllocation* allocHeader =
reinterpret_cast<ShmemSectionHeapAllocation*>(aShmemSection.shmem().get<char>() +
aShmemSection.offset() -
sizeof(ShmemSectionHeapAllocation));
MOZ_ASSERT(allocHeader->mSize == aShmemSection.size());
DebugOnly<bool> success = allocHeader->mStatus.compareExchange(STATUS_ALLOCATED, STATUS_FREED);
// If this fails something really weird is going on.
MOZ_ASSERT(success);
ShmemSectionHeapHeader* header = aShmemSection.shmem().get<ShmemSectionHeapHeader>();
header->mAllocatedBlocks--;
ShrinkShmemSectionHeap();
}
void
ISurfaceAllocator::ShrinkShmemSectionHeap()
{
for (size_t i = 0; i < mUsedShmems.size(); i++) {
ShmemSectionHeapHeader* header = mUsedShmems[i].get<ShmemSectionHeapHeader>();
if (header->mAllocatedBlocks == 0) {
DeallocShmem(mUsedShmems[i]);
// We don't particularly care about order, move the last one in the array
// to this position.
mUsedShmems[i] = mUsedShmems[mUsedShmems.size() - 1];
mUsedShmems.pop_back();
i--;
break;
}
}
}
} // namespace
} // namespace

Просмотреть файл

@ -14,7 +14,9 @@
#include "mozilla/RefPtr.h"
#include "nsIMemoryReporter.h" // for nsIMemoryReporter
#include "mozilla/Atomics.h" // for Atomic
#include "mozilla/layers/LayersMessages.h" // for ShmemSection
#include "LayersTypes.h"
#include <vector>
/*
* FIXME [bjacob] *** PURE CRAZYNESS WARNING ***
@ -106,6 +108,20 @@ public:
virtual bool AllocUnsafeShmem(size_t aSize,
mozilla::ipc::SharedMemory::SharedMemoryType aType,
mozilla::ipc::Shmem* aShmem) = 0;
/**
* Allocate memory in shared memory that can always be accessed by both
* processes at a time. Safety is left for the user of the memory to care
* about.
*/
bool AllocShmemSection(size_t aSize,
mozilla::layers::ShmemSection* aShmemSection);
/**
* Deallocates a shmem section.
*/
void FreeShmemSection(mozilla::layers::ShmemSection& aShmemSection);
/**
* Deallocate memory allocated by either AllocShmem or AllocUnsafeShmem.
*/
@ -158,7 +174,12 @@ protected:
SurfaceDescriptor* aBuffer);
virtual ~ISurfaceAllocator() {}
virtual ~ISurfaceAllocator();
void ShrinkShmemSectionHeap();
// This is used to implement an extremely simple & naive heap allocator.
std::vector<mozilla::ipc::Shmem> mUsedShmems;
friend class detail::RefCounted<ISurfaceAllocator, detail::AtomicRefCount>;
};

Просмотреть файл

@ -255,8 +255,14 @@ struct OpRaiseToTopChild { PLayer container; PLayer childLayer; };
struct OpSetDiagnosticTypes { DiagnosticTypes diagnostics; };
struct ShmemSection {
Shmem shmem;
uint32_t offset;
size_t size;
};
union TileLock {
Shmem;
ShmemSection;
uintptr_t;
};