Revert "Vulkan:Adding custom pool allocator"

This reverts commit 9417174961.

Reason for revert: crashes on Debug bots

Original change's description:
> Vulkan:Adding custom pool allocator
> 
> Migrated pool allocator used by compiler to common.
> 
> Planning to use this for ANGLE custom command buffers so this some
> refactoring in preparation for that work.
> 
> Added a unit test to check PoolAllocator functionality.
> 
> Bug: angleproject:2951
> Change-Id: I29618cfdb065b8a5fefd40719a35d27b1f6e99ef
> Reviewed-on: https://chromium-review.googlesource.com/c/1476953
> Reviewed-by: Jamie Madill <jmadill@google.com>
> Reviewed-by: Shahbaz Youssefi <syoussefi@chromium.org>
> Commit-Queue: Tobin Ehlis <tobine@google.com>

TBR=ynovikov@chromium.org,jmadill@google.com,tobine@google.com,syoussefi@chromium.org

Change-Id: Id8c522bd1d94154e871211d975e801a55cc9c257
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: angleproject:2951, angleproject:3169
Reviewed-on: https://chromium-review.googlesource.com/c/1487977
Reviewed-by: Yuly Novikov <ynovikov@chromium.org>
Commit-Queue: Yuly Novikov <ynovikov@chromium.org>
This commit is contained in:
Yuly Novikov 2019-02-25 22:47:17 +00:00 коммит произвёл Commit Bot
Родитель ce4dc5694f
Коммит 0546b53807
17 изменённых файлов: 558 добавлений и 662 удалений

Просмотреть файл

@ -193,7 +193,7 @@ angle_static_library("preprocessor") {
}
config("translator_disable_pool_alloc") {
defines = [ "ANGLE_DISABLE_POOL_ALLOC" ]
defines = [ "ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC" ]
}
config("debug_annotations_config") {

Просмотреть файл

@ -1,320 +0,0 @@
//
// Copyright 2019 The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// PoolAlloc.cpp:
// Implements the class methods for PoolAllocator and Allocation classes.
//
#include "common/PoolAlloc.h"
#include <assert.h>
#include <stdint.h>
#include <stdio.h>
#include "common/angleutils.h"
#include "common/debug.h"
#include "common/platform.h"
#include "common/tls.h"
namespace angle
{
//
// Implement the functionality of the PoolAllocator class, which
// is documented in PoolAlloc.h.
//
PoolAllocator::PoolAllocator(int growthIncrement, int allocationAlignment)
: mAlignment(allocationAlignment),
#if !defined(ANGLE_DISABLE_POOL_ALLOC)
mPageSize(growthIncrement),
mFreeList(0),
mInUseList(0),
mNumCalls(0),
mTotalBytes(0),
#endif
mLocked(false)
{
//
// Adjust mAlignment to be at least pointer aligned and
// power of 2.
//
size_t minAlign = sizeof(void *);
mAlignment &= ~(minAlign - 1);
if (mAlignment < minAlign)
mAlignment = minAlign;
size_t a = 1;
while (a < mAlignment)
a <<= 1;
mAlignment = a;
mAlignmentMask = a - 1;
#if !defined(ANGLE_DISABLE_POOL_ALLOC)
//
// Don't allow page sizes we know are smaller than all common
// OS page sizes.
//
if (mPageSize < 4 * 1024)
mPageSize = 4 * 1024;
//
// A large mCurrentPageOffset indicates a new page needs to
// be obtained to allocate memory.
//
mCurrentPageOffset = mPageSize;
//
// Align header skip
//
mHeaderSkip = minAlign;
if (mHeaderSkip < sizeof(Header))
{
mHeaderSkip = (sizeof(Header) + mAlignmentMask) & ~mAlignmentMask;
}
#else // !defined(ANGLE_DISABLE_POOL_ALLOC)
mStack.push_back({});
#endif
}
PoolAllocator::~PoolAllocator()
{
#if !defined(ANGLE_DISABLE_POOL_ALLOC)
while (mInUseList)
{
Header *next = mInUseList->nextPage;
mInUseList->~Header();
delete[] reinterpret_cast<char *>(mInUseList);
mInUseList = next;
}
// We should not check the guard blocks
// here, because we did it already when the block was
// placed into the free list.
//
while (mFreeList)
{
Header *next = mFreeList->nextPage;
delete[] reinterpret_cast<char *>(mFreeList);
mFreeList = next;
}
#else // !defined(ANGLE_DISABLE_POOL_ALLOC)
for (auto &allocs : mStack)
{
for (auto alloc : allocs)
{
free(alloc);
}
}
mStack.clear();
#endif
}
//
// Check a single guard block for damage
//
void Allocation::checkGuardBlock(unsigned char *blockMem,
unsigned char val,
const char *locText) const
{
#if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
for (size_t x = 0; x < kGuardBlockSize; x++)
{
if (blockMem[x] != val)
{
char assertMsg[80];
// We don't print the assert message. It's here just to be helpful.
snprintf(assertMsg, sizeof(assertMsg),
"PoolAlloc: Damage %s %zu byte allocation at 0x%p\n", locText, mSize, data());
assert(0 && "PoolAlloc: Damage in guard block");
}
}
#endif
}
void PoolAllocator::push()
{
#if !defined(ANGLE_DISABLE_POOL_ALLOC)
AllocState state = {mCurrentPageOffset, mInUseList};
mStack.push_back(state);
//
// Indicate there is no current page to allocate from.
//
mCurrentPageOffset = mPageSize;
#else // !defined(ANGLE_DISABLE_POOL_ALLOC)
mStack.push_back({});
#endif
}
//
// Do a mass-deallocation of all the individual allocations
// that have occurred since the last push(), or since the
// last pop(), or since the object's creation.
//
// The deallocated pages are saved for future allocations.
//
void PoolAllocator::pop()
{
if (mStack.size() < 1)
return;
#if !defined(ANGLE_DISABLE_POOL_ALLOC)
Header *page = mStack.back().page;
mCurrentPageOffset = mStack.back().offset;
while (mInUseList != page)
{
// invoke destructor to free allocation list
mInUseList->~Header();
Header *nextInUse = mInUseList->nextPage;
if (mInUseList->pageCount > 1)
delete[] reinterpret_cast<char *>(mInUseList);
else
{
mInUseList->nextPage = mFreeList;
mFreeList = mInUseList;
}
mInUseList = nextInUse;
}
mStack.pop_back();
#else // !defined(ANGLE_DISABLE_POOL_ALLOC)
for (auto &alloc : mStack.back())
{
free(alloc);
}
mStack.pop_back();
#endif
}
//
// Do a mass-deallocation of all the individual allocations
// that have occurred.
//
void PoolAllocator::popAll()
{
while (mStack.size() > 0)
pop();
}
void *PoolAllocator::allocate(size_t numBytes)
{
ASSERT(!mLocked);
#if !defined(ANGLE_DISABLE_POOL_ALLOC)
//
// Just keep some interesting statistics.
//
++mNumCalls;
mTotalBytes += numBytes;
// If we are using guard blocks, all allocations are bracketed by
// them: [guardblock][allocation][guardblock]. numBytes is how
// much memory the caller asked for. allocationSize is the total
// size including guard blocks. In release build,
// kGuardBlockSize=0 and this all gets optimized away.
size_t allocationSize = Allocation::AllocationSize(numBytes) + mAlignment;
// Detect integer overflow.
if (allocationSize < numBytes)
return 0;
//
// Do the allocation, most likely case first, for efficiency.
// This step could be moved to be inline sometime.
//
if (allocationSize <= mPageSize - mCurrentPageOffset)
{
//
// Safe to allocate from mCurrentPageOffset.
//
unsigned char *memory = reinterpret_cast<unsigned char *>(mInUseList) + mCurrentPageOffset;
mCurrentPageOffset += allocationSize;
mCurrentPageOffset = (mCurrentPageOffset + mAlignmentMask) & ~mAlignmentMask;
return initializeAllocation(mInUseList, memory, numBytes);
}
if (allocationSize > mPageSize - mHeaderSkip)
{
//
// Do a multi-page allocation. Don't mix these with the others.
// The OS is efficient in allocating and freeing multiple pages.
//
size_t numBytesToAlloc = allocationSize + mHeaderSkip;
// Detect integer overflow.
if (numBytesToAlloc < allocationSize)
return 0;
Header *memory = reinterpret_cast<Header *>(::new char[numBytesToAlloc]);
if (memory == 0)
return 0;
// Use placement-new to initialize header
new (memory) Header(mInUseList, (numBytesToAlloc + mPageSize - 1) / mPageSize);
mInUseList = memory;
mCurrentPageOffset = mPageSize; // make next allocation come from a new page
// No guard blocks for multi-page allocations (yet)
void *unalignedPtr =
reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(memory) + mHeaderSkip);
return std::align(mAlignment, numBytes, unalignedPtr, allocationSize);
}
//
// Need a simple page to allocate from.
//
Header *memory;
if (mFreeList)
{
memory = mFreeList;
mFreeList = mFreeList->nextPage;
}
else
{
memory = reinterpret_cast<Header *>(::new char[mPageSize]);
if (memory == 0)
return 0;
}
// Use placement-new to initialize header
new (memory) Header(mInUseList, 1);
mInUseList = memory;
unsigned char *ret = reinterpret_cast<unsigned char *>(mInUseList) + mHeaderSkip;
mCurrentPageOffset = (mHeaderSkip + allocationSize + mAlignmentMask) & ~mAlignmentMask;
return initializeAllocation(mInUseList, ret, numBytes);
#else // !defined(ANGLE_DISABLE_POOL_ALLOC)
void *alloc = malloc(numBytes + mAlignmentMask);
mStack.back().push_back(alloc);
intptr_t intAlloc = reinterpret_cast<intptr_t>(alloc);
intAlloc = (intAlloc + mAlignmentMask) & ~mAlignmentMask;
return reinterpret_cast<void *>(intAlloc);
#endif
}
void PoolAllocator::lock()
{
ASSERT(!mLocked);
mLocked = true;
}
void PoolAllocator::unlock()
{
ASSERT(mLocked);
mLocked = false;
}
//
// Check all allocations in a list for damage by calling check on each.
//
void Allocation::checkAllocList() const
{
for (const Allocation *alloc = this; alloc != 0; alloc = alloc->mPrevAlloc)
alloc->check();
}
} // namespace angle

Просмотреть файл

@ -1,242 +0,0 @@
//
// Copyright 2019 The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// PoolAlloc.h:
// Defines the class interface for PoolAllocator and the Allocation
// class that it uses internally.
//
#ifndef COMMON_POOLALLOC_H_
#define COMMON_POOLALLOC_H_
#if !defined(NDEBUG)
# define ANGLE_POOL_ALLOC_GUARD_BLOCKS // define to enable guard block sanity checking
#endif
//
// This header defines an allocator that can be used to efficiently
// allocate a large number of small requests for heap memory, with the
// intention that they are not individually deallocated, but rather
// collectively deallocated at one time.
//
// This simultaneously
//
// * Makes each individual allocation much more efficient; the
// typical allocation is trivial.
// * Completely avoids the cost of doing individual deallocation.
// * Saves the trouble of tracking down and plugging a large class of leaks.
//
// Individual classes can use this allocator by supplying their own
// new and delete methods.
//
#include <stddef.h>
#include <string.h>
#include <memory>
#include <vector>
#include "angleutils.h"
namespace angle
{
// If we are using guard blocks, we must track each individual
// allocation. If we aren't using guard blocks, these
// never get instantiated, so won't have any impact.
//
class Allocation
{
public:
Allocation(size_t size, unsigned char *mem, Allocation *prev = 0)
: mSize(size), mMem(mem), mPrevAlloc(prev)
{
// Allocations are bracketed:
// [allocationHeader][initialGuardBlock][userData][finalGuardBlock]
// This would be cleaner with if (kGuardBlockSize)..., but that
// makes the compiler print warnings about 0 length memsets,
// even with the if() protecting them.
#if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
memset(preGuard(), kGuardBlockBeginVal, kGuardBlockSize);
memset(data(), kUserDataFill, mSize);
memset(postGuard(), kGuardBlockEndVal, kGuardBlockSize);
#endif
}
void check() const
{
checkGuardBlock(preGuard(), kGuardBlockBeginVal, "before");
checkGuardBlock(postGuard(), kGuardBlockEndVal, "after");
}
void checkAllocList() const;
// Return total size needed to accommodate user buffer of 'size',
// plus our tracking data.
static size_t AllocationSize(size_t size) { return size + 2 * kGuardBlockSize + HeaderSize(); }
// Offset from surrounding buffer to get to user data buffer.
static unsigned char *OffsetAllocation(unsigned char *m)
{
return m + kGuardBlockSize + HeaderSize();
}
private:
void checkGuardBlock(unsigned char *blockMem, unsigned char val, const char *locText) const;
// Find offsets to pre and post guard blocks, and user data buffer
unsigned char *preGuard() const { return mMem + HeaderSize(); }
unsigned char *data() const { return preGuard() + kGuardBlockSize; }
unsigned char *postGuard() const { return data() + mSize; }
size_t mSize; // size of the user data area
unsigned char *mMem; // beginning of our allocation (pts to header)
Allocation *mPrevAlloc; // prior allocation in the chain
static constexpr unsigned char kGuardBlockBeginVal = 0xfb;
static constexpr unsigned char kGuardBlockEndVal = 0xfe;
static constexpr unsigned char kUserDataFill = 0xcd;
#if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
static constexpr size_t kGuardBlockSize = 16;
static constexpr size_t HeaderSize() { return sizeof(Allocation); }
#else
static constexpr size_t kGuardBlockSize = 0;
static constexpr size_t HeaderSize() { return 0; }
#endif
};
//
// There are several stacks. One is to track the pushing and popping
// of the user, and not yet implemented. The others are simply a
// repositories of free pages or used pages.
//
// Page stacks are linked together with a simple header at the beginning
// of each allocation obtained from the underlying OS. Multi-page allocations
// are returned to the OS. Individual page allocations are kept for future
// re-use.
//
// The "page size" used is not, nor must it match, the underlying OS
// page size. But, having it be about that size or equal to a set of
// pages is likely most optimal.
//
class PoolAllocator : angle::NonCopyable
{
public:
static const int kDefaultAlignment = 16;
PoolAllocator(int growthIncrement = 8 * 1024, int allocationAlignment = kDefaultAlignment);
//
// Don't call the destructor just to free up the memory, call pop()
//
~PoolAllocator();
//
// Call push() to establish a new place to pop memory to. Does not
// have to be called to get things started.
//
void push();
//
// Call pop() to free all memory allocated since the last call to push(),
// or if no last call to push, frees all memory since first allocation.
//
void pop();
//
// Call popAll() to free all memory allocated.
//
void popAll();
//
// Call allocate() to actually acquire memory. Returns 0 if no memory
// available, otherwise a properly aligned pointer to 'numBytes' of memory.
//
void *allocate(size_t numBytes);
//
// There is no deallocate. The point of this class is that
// deallocation can be skipped by the user of it, as the model
// of use is to simultaneously deallocate everything at once
// by calling pop(), and to not have to solve memory leak problems.
//
// Catch unwanted allocations.
// TODO(jmadill): Remove this when we remove the global allocator.
void lock();
void unlock();
private:
#if !defined(ANGLE_DISABLE_POOL_ALLOC)
friend struct Header;
struct Header
{
Header(Header *nextPage, size_t pageCount)
: nextPage(nextPage),
pageCount(pageCount)
# if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
,
lastAllocation(0)
# endif
{}
~Header()
{
# if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
if (lastAllocation)
lastAllocation->checkAllocList();
# endif
}
Header *nextPage;
size_t pageCount;
# if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
Allocation *lastAllocation;
# endif
};
struct AllocState
{
size_t offset;
Header *page;
};
using AllocStack = std::vector<AllocState>;
// Track allocations if and only if we're using guard blocks
void *initializeAllocation(Header *block, unsigned char *memory, size_t numBytes)
{
# if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
new (memory) Allocation(numBytes, memory, block->lastAllocation);
block->lastAllocation = reinterpret_cast<Allocation *>(memory);
# endif
// The OffsetAllocation() call is optimized away if !defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
void *unalignedPtr = Allocation::OffsetAllocation(memory);
size_t alignedBytes = numBytes + mAlignment;
return std::align(mAlignment, numBytes, unalignedPtr, alignedBytes);
}
size_t mAlignment; // all returned allocations will be aligned at
// this granularity, which will be a power of 2
size_t mAlignmentMask;
size_t mPageSize; // granularity of allocation from the OS
size_t mHeaderSkip; // amount of memory to skip to make room for the
// header (basically, size of header, rounded
// up to make it aligned
size_t mCurrentPageOffset; // next offset in top of inUseList to allocate from
Header *mFreeList; // list of popped memory
Header *mInUseList; // list of all memory currently being used
AllocStack mStack; // stack of where to allocate from, to partition pool
int mNumCalls; // just an interesting statistic
size_t mTotalBytes; // just an interesting statistic
#else // !defined(ANGLE_DISABLE_POOL_ALLOC)
std::vector<std::vector<void *>> mStack;
#endif
bool mLocked;
};
} // namespace angle
#endif // COMMON_POOLALLOC_H_

Просмотреть файл

@ -1,74 +0,0 @@
//
// Copyright 2019 The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// PoolAlloc_unittest:
// Tests of the PoolAlloc class
//
#include <gtest/gtest.h>
#include "common/PoolAlloc.h"
namespace angle
{
// Verify the public interface of PoolAllocator class
TEST(PoolAllocatorTest, Interface)
{
size_t numBytes = 1024;
constexpr uint32_t kTestValue = 0xbaadbeef;
// Create a default pool allocator and allocate from it
PoolAllocator poolAllocator;
void *allocation = poolAllocator.allocate(numBytes);
// Verify non-zero ptr returned
EXPECT_NE(nullptr, allocation);
// Write to allocation to check later
uint32_t *writePtr = static_cast<uint32_t *>(allocation);
*writePtr = kTestValue;
// Test push and creating a new allocation
poolAllocator.push();
allocation = poolAllocator.allocate(numBytes);
EXPECT_NE(nullptr, allocation);
// Make an allocation that spans multiple pages
allocation = poolAllocator.allocate(10 * 1024);
// pop previous two allocations
poolAllocator.pop();
// Verify first allocation still has data
EXPECT_EQ(kTestValue, *writePtr);
// Make a bunch of allocations
for (uint32_t i = 0; i < 1000; ++i)
{
allocation = poolAllocator.allocate(rand() % 2000 + 1);
EXPECT_NE(nullptr, allocation);
}
// Free everything
poolAllocator.popAll();
}
#if !defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
// Verify allocations are correctly aligned for different alignments
class PoolAllocatorAlignmentTest : public testing::TestWithParam<int>
{};
TEST_P(PoolAllocatorAlignmentTest, Alignment)
{
int alignment = GetParam();
// Create a pool allocator to allocate from
PoolAllocator poolAllocator(4096, alignment);
// Test a number of allocation sizes for each alignment
for (uint32_t i = 0; i < 100; ++i)
{
// Vary the allocation size around 4k to hit some multi-page allocations
void *allocation = poolAllocator.allocate((rand() % (1024 * 4)) + 1);
// Verify alignment of allocation matches expected default
EXPECT_EQ(0u, (reinterpret_cast<std::uintptr_t>(allocation) % alignment));
}
}
INSTANTIATE_TEST_SUITE_P(,
PoolAllocatorAlignmentTest,
testing::Values(2, 4, 8, 16, 32, 64, 128),
testing::PrintToStringParamName());
#endif
} // namespace angle

Просмотреть файл

@ -170,7 +170,7 @@ namespace
class TScopedPoolAllocator
{
public:
TScopedPoolAllocator(angle::PoolAllocator *allocator) : mAllocator(allocator)
TScopedPoolAllocator(TPoolAllocator *allocator) : mAllocator(allocator)
{
mAllocator->push();
SetGlobalPoolAllocator(mAllocator);
@ -182,7 +182,7 @@ class TScopedPoolAllocator
}
private:
angle::PoolAllocator *mAllocator;
TPoolAllocator *mAllocator;
};
class TScopedSymbolTableLevel

Просмотреть файл

@ -67,7 +67,7 @@ class TShHandleBase
protected:
// Memory allocator. Allocates and tracks memory required by the compiler.
// Deallocates all memory when compiler is destructed.
angle::PoolAllocator allocator;
TPoolAllocator allocator;
};
//

Просмотреть файл

@ -7,7 +7,14 @@
#include "compiler/translator/PoolAlloc.h"
#include <assert.h>
#include <stdint.h>
#include <stdio.h>
#include "common/angleutils.h"
#include "common/debug.h"
#include "common/platform.h"
#include "common/tls.h"
#include "compiler/translator/InitializeGlobals.h"
TLSIndex PoolIndex = TLS_INVALID_INDEX;
@ -27,14 +34,327 @@ void FreePoolIndex()
PoolIndex = TLS_INVALID_INDEX;
}
angle::PoolAllocator *GetGlobalPoolAllocator()
TPoolAllocator *GetGlobalPoolAllocator()
{
assert(PoolIndex != TLS_INVALID_INDEX);
return static_cast<angle::PoolAllocator *>(GetTLSValue(PoolIndex));
return static_cast<TPoolAllocator *>(GetTLSValue(PoolIndex));
}
void SetGlobalPoolAllocator(angle::PoolAllocator *poolAllocator)
void SetGlobalPoolAllocator(TPoolAllocator *poolAllocator)
{
assert(PoolIndex != TLS_INVALID_INDEX);
SetTLSValue(PoolIndex, poolAllocator);
}
//
// Implement the functionality of the TPoolAllocator class, which
// is documented in PoolAlloc.h.
//
TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment)
: alignment(allocationAlignment),
#if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
pageSize(growthIncrement),
freeList(0),
inUseList(0),
numCalls(0),
totalBytes(0),
#endif
mLocked(false)
{
//
// Adjust alignment to be at least pointer aligned and
// power of 2.
//
size_t minAlign = sizeof(void *);
alignment &= ~(minAlign - 1);
if (alignment < minAlign)
alignment = minAlign;
size_t a = 1;
while (a < alignment)
a <<= 1;
alignment = a;
alignmentMask = a - 1;
#if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
//
// Don't allow page sizes we know are smaller than all common
// OS page sizes.
//
if (pageSize < 4 * 1024)
pageSize = 4 * 1024;
//
// A large currentPageOffset indicates a new page needs to
// be obtained to allocate memory.
//
currentPageOffset = pageSize;
//
// Align header skip
//
headerSkip = minAlign;
if (headerSkip < sizeof(tHeader))
{
headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask;
}
#else // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
mStack.push_back({});
#endif
}
TPoolAllocator::~TPoolAllocator()
{
#if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
while (inUseList)
{
tHeader *next = inUseList->nextPage;
inUseList->~tHeader();
delete[] reinterpret_cast<char *>(inUseList);
inUseList = next;
}
// We should not check the guard blocks
// here, because we did it already when the block was
// placed into the free list.
//
while (freeList)
{
tHeader *next = freeList->nextPage;
delete[] reinterpret_cast<char *>(freeList);
freeList = next;
}
#else // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
for (auto &allocs : mStack)
{
for (auto alloc : allocs)
{
free(alloc);
}
}
mStack.clear();
#endif
}
// Support MSVC++ 6.0
const unsigned char TAllocation::guardBlockBeginVal = 0xfb;
const unsigned char TAllocation::guardBlockEndVal = 0xfe;
const unsigned char TAllocation::userDataFill = 0xcd;
#ifdef GUARD_BLOCKS
const size_t TAllocation::guardBlockSize = 16;
#else
const size_t TAllocation::guardBlockSize = 0;
#endif
//
// Check a single guard block for damage
//
void TAllocation::checkGuardBlock(unsigned char *blockMem,
unsigned char val,
const char *locText) const
{
#ifdef GUARD_BLOCKS
for (size_t x = 0; x < guardBlockSize; x++)
{
if (blockMem[x] != val)
{
char assertMsg[80];
// We don't print the assert message. It's here just to be helpful.
# if defined(_MSC_VER)
snprintf(assertMsg, sizeof(assertMsg),
"PoolAlloc: Damage %s %Iu byte allocation at 0x%p\n", locText, size, data());
# else
snprintf(assertMsg, sizeof(assertMsg),
"PoolAlloc: Damage %s %zu byte allocation at 0x%p\n", locText, size, data());
# endif
assert(0 && "PoolAlloc: Damage in guard block");
}
}
#endif
}
void TPoolAllocator::push()
{
#if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
tAllocState state = {currentPageOffset, inUseList};
mStack.push_back(state);
//
// Indicate there is no current page to allocate from.
//
currentPageOffset = pageSize;
#else // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
mStack.push_back({});
#endif
}
//
// Do a mass-deallocation of all the individual allocations
// that have occurred since the last push(), or since the
// last pop(), or since the object's creation.
//
// The deallocated pages are saved for future allocations.
//
void TPoolAllocator::pop()
{
if (mStack.size() < 1)
return;
#if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
tHeader *page = mStack.back().page;
currentPageOffset = mStack.back().offset;
while (inUseList != page)
{
// invoke destructor to free allocation list
inUseList->~tHeader();
tHeader *nextInUse = inUseList->nextPage;
if (inUseList->pageCount > 1)
delete[] reinterpret_cast<char *>(inUseList);
else
{
inUseList->nextPage = freeList;
freeList = inUseList;
}
inUseList = nextInUse;
}
mStack.pop_back();
#else // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
for (auto &alloc : mStack.back())
{
free(alloc);
}
mStack.pop_back();
#endif
}
//
// Do a mass-deallocation of all the individual allocations
// that have occurred.
//
void TPoolAllocator::popAll()
{
while (mStack.size() > 0)
pop();
}
void *TPoolAllocator::allocate(size_t numBytes)
{
ASSERT(!mLocked);
#if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
//
// Just keep some interesting statistics.
//
++numCalls;
totalBytes += numBytes;
// If we are using guard blocks, all allocations are bracketed by
// them: [guardblock][allocation][guardblock]. numBytes is how
// much memory the caller asked for. allocationSize is the total
// size including guard blocks. In release build,
// guardBlockSize=0 and this all gets optimized away.
size_t allocationSize = TAllocation::allocationSize(numBytes);
// Detect integer overflow.
if (allocationSize < numBytes)
return 0;
//
// Do the allocation, most likely case first, for efficiency.
// This step could be moved to be inline sometime.
//
if (allocationSize <= pageSize - currentPageOffset)
{
//
// Safe to allocate from currentPageOffset.
//
unsigned char *memory = reinterpret_cast<unsigned char *>(inUseList) + currentPageOffset;
currentPageOffset += allocationSize;
currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask;
return initializeAllocation(inUseList, memory, numBytes);
}
if (allocationSize > pageSize - headerSkip)
{
//
// Do a multi-page allocation. Don't mix these with the others.
// The OS is efficient and allocating and free-ing multiple pages.
//
size_t numBytesToAlloc = allocationSize + headerSkip;
// Detect integer overflow.
if (numBytesToAlloc < allocationSize)
return 0;
tHeader *memory = reinterpret_cast<tHeader *>(::new char[numBytesToAlloc]);
if (memory == 0)
return 0;
// Use placement-new to initialize header
new (memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize);
inUseList = memory;
currentPageOffset = pageSize; // make next allocation come from a new page
// No guard blocks for multi-page allocations (yet)
return reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(memory) + headerSkip);
}
//
// Need a simple page to allocate from.
//
tHeader *memory;
if (freeList)
{
memory = freeList;
freeList = freeList->nextPage;
}
else
{
memory = reinterpret_cast<tHeader *>(::new char[pageSize]);
if (memory == 0)
return 0;
}
// Use placement-new to initialize header
new (memory) tHeader(inUseList, 1);
inUseList = memory;
unsigned char *ret = reinterpret_cast<unsigned char *>(inUseList) + headerSkip;
currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask;
return initializeAllocation(inUseList, ret, numBytes);
#else // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
void *alloc = malloc(numBytes + alignmentMask);
mStack.back().push_back(alloc);
intptr_t intAlloc = reinterpret_cast<intptr_t>(alloc);
intAlloc = (intAlloc + alignmentMask) & ~alignmentMask;
return reinterpret_cast<void *>(intAlloc);
#endif
}
void TPoolAllocator::lock()
{
ASSERT(!mLocked);
mLocked = true;
}
void TPoolAllocator::unlock()
{
ASSERT(mLocked);
mLocked = false;
}
//
// Check all allocations in a list for damage by calling check on each.
//
void TAllocation::checkAllocList() const
{
for (const TAllocation *alloc = this; alloc != 0; alloc = alloc->prevAlloc)
alloc->check();
}

Просмотреть файл

@ -7,27 +7,242 @@
#ifndef COMPILER_TRANSLATOR_POOLALLOC_H_
#define COMPILER_TRANSLATOR_POOLALLOC_H_
#ifdef _DEBUG
# define GUARD_BLOCKS // define to enable guard block sanity checking
#endif
//
// This header defines the pool_allocator class that allows STL containers
// to use the angle::PoolAllocator class by using the pool_allocator
// This header defines an allocator that can be used to efficiently
// allocate a large number of small requests for heap memory, with the
// intention that they are not individually deallocated, but rather
// collectively deallocated at one time.
//
// This simultaneously
//
// * Makes each individual allocation much more efficient; the
// typical allocation is trivial.
// * Completely avoids the cost of doing individual deallocation.
// * Saves the trouble of tracking down and plugging a large class of leaks.
//
// Individual classes can use this allocator by supplying their own
// new and delete methods.
//
// STL containers can use this allocator by using the pool_allocator
// class as the allocator (second) template argument.
//
// It also defines functions for managing the GlobalPoolAllocator used by the compiler.
//
#include <stddef.h>
#include <string.h>
#include <vector>
#include "common/PoolAlloc.h"
// If we are using guard blocks, we must track each indivual
// allocation. If we aren't using guard blocks, these
// never get instantiated, so won't have any impact.
//
class TAllocation
{
public:
TAllocation(size_t size, unsigned char *mem, TAllocation *prev = 0)
: size(size), mem(mem), prevAlloc(prev)
{
// Allocations are bracketed:
// [allocationHeader][initialGuardBlock][userData][finalGuardBlock]
// This would be cleaner with if (guardBlockSize)..., but that
// makes the compiler print warnings about 0 length memsets,
// even with the if() protecting them.
#ifdef GUARD_BLOCKS
memset(preGuard(), guardBlockBeginVal, guardBlockSize);
memset(data(), userDataFill, size);
memset(postGuard(), guardBlockEndVal, guardBlockSize);
#endif
}
void check() const
{
checkGuardBlock(preGuard(), guardBlockBeginVal, "before");
checkGuardBlock(postGuard(), guardBlockEndVal, "after");
}
void checkAllocList() const;
// Return total size needed to accomodate user buffer of 'size',
// plus our tracking data.
inline static size_t allocationSize(size_t size)
{
return size + 2 * guardBlockSize + headerSize();
}
// Offset from surrounding buffer to get to user data buffer.
inline static unsigned char *offsetAllocation(unsigned char *m)
{
return m + guardBlockSize + headerSize();
}
private:
void checkGuardBlock(unsigned char *blockMem, unsigned char val, const char *locText) const;
// Find offsets to pre and post guard blocks, and user data buffer
unsigned char *preGuard() const { return mem + headerSize(); }
unsigned char *data() const { return preGuard() + guardBlockSize; }
unsigned char *postGuard() const { return data() + size; }
size_t size; // size of the user data area
unsigned char *mem; // beginning of our allocation (pts to header)
TAllocation *prevAlloc; // prior allocation in the chain
// Support MSVC++ 6.0
const static unsigned char guardBlockBeginVal;
const static unsigned char guardBlockEndVal;
const static unsigned char userDataFill;
const static size_t guardBlockSize;
#ifdef GUARD_BLOCKS
inline static size_t headerSize() { return sizeof(TAllocation); }
#else
inline static size_t headerSize() { return 0; }
#endif
};
//
// There are several stacks. One is to track the pushing and popping
// of the user, and not yet implemented. The others are simply a
// repositories of free pages or used pages.
//
// Page stacks are linked together with a simple header at the beginning
// of each allocation obtained from the underlying OS. Multi-page allocations
// are returned to the OS. Individual page allocations are kept for future
// re-use.
//
// The "page size" used is not, nor must it match, the underlying OS
// page size. But, having it be about that size or equal to a set of
// pages is likely most optimal.
//
class TPoolAllocator
{
public:
TPoolAllocator(int growthIncrement = 8 * 1024, int allocationAlignment = 16);
//
// Don't call the destructor just to free up the memory, call pop()
//
~TPoolAllocator();
//
// Call push() to establish a new place to pop memory too. Does not
// have to be called to get things started.
//
void push();
//
// Call pop() to free all memory allocated since the last call to push(),
// or if no last call to push, frees all memory since first allocation.
//
void pop();
//
// Call popAll() to free all memory allocated.
//
void popAll();
//
// Call allocate() to actually acquire memory. Returns 0 if no memory
// available, otherwise a properly aligned pointer to 'numBytes' of memory.
//
void *allocate(size_t numBytes);
//
// There is no deallocate. The point of this class is that
// deallocation can be skipped by the user of it, as the model
// of use is to simultaneously deallocate everything at once
// by calling pop(), and to not have to solve memory leak problems.
//
// Catch unwanted allocations.
// TODO(jmadill): Remove this when we remove the global allocator.
void lock();
void unlock();
private:
size_t alignment; // all returned allocations will be aligned at
// this granularity, which will be a power of 2
size_t alignmentMask;
#if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
friend struct tHeader;
struct tHeader
{
tHeader(tHeader *nextPage, size_t pageCount)
: nextPage(nextPage),
pageCount(pageCount)
# ifdef GUARD_BLOCKS
,
lastAllocation(0)
# endif
{}
~tHeader()
{
# ifdef GUARD_BLOCKS
if (lastAllocation)
lastAllocation->checkAllocList();
# endif
}
tHeader *nextPage;
size_t pageCount;
# ifdef GUARD_BLOCKS
TAllocation *lastAllocation;
# endif
};
struct tAllocState
{
size_t offset;
tHeader *page;
};
typedef std::vector<tAllocState> tAllocStack;
// Track allocations if and only if we're using guard blocks
void *initializeAllocation(tHeader *block, unsigned char *memory, size_t numBytes)
{
# ifdef GUARD_BLOCKS
new (memory) TAllocation(numBytes, memory, block->lastAllocation);
block->lastAllocation = reinterpret_cast<TAllocation *>(memory);
# endif
// This is optimized entirely away if GUARD_BLOCKS is not defined.
return TAllocation::offsetAllocation(memory);
}
size_t pageSize; // granularity of allocation from the OS
size_t headerSkip; // amount of memory to skip to make room for the
// header (basically, size of header, rounded
// up to make it aligned
size_t currentPageOffset; // next offset in top of inUseList to allocate from
tHeader *freeList; // list of popped memory
tHeader *inUseList; // list of all memory currently being used
tAllocStack mStack; // stack of where to allocate from, to partition pool
int numCalls; // just an interesting statistic
size_t totalBytes; // just an interesting statistic
#else // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
std::vector<std::vector<void *>> mStack;
#endif
TPoolAllocator &operator=(const TPoolAllocator &); // dont allow assignment operator
TPoolAllocator(const TPoolAllocator &); // dont allow default copy constructor
bool mLocked;
};
//
// There could potentially be many pools with pops happening at
// different times. But a simple use is to have a global pop
// with everyone using the same global allocator.
//
extern angle::PoolAllocator *GetGlobalPoolAllocator();
extern void SetGlobalPoolAllocator(angle::PoolAllocator *poolAllocator);
extern TPoolAllocator *GetGlobalPoolAllocator();
extern void SetGlobalPoolAllocator(TPoolAllocator *poolAllocator);
//
// This STL compatible allocator is intended to be used as the allocator
@ -96,7 +311,7 @@ class pool_allocator
size_type max_size() const { return static_cast<size_type>(-1) / sizeof(T); }
size_type max_size(int size) const { return static_cast<size_type>(-1) / size; }
angle::PoolAllocator &getAllocator() const { return *GetGlobalPoolAllocator(); }
TPoolAllocator &getAllocator() const { return *GetGlobalPoolAllocator(); }
};
#endif // COMPILER_TRANSLATOR_POOLALLOC_H_

Просмотреть файл

@ -1816,7 +1816,7 @@ angle::Result RendererVk::synchronizeCpuGpuTime(vk::Context *context)
//
// Post-submission work Begin execution
//
// ???? Write timestamp Tgpu
// ???? Write timstamp Tgpu
//
// ???? End execution
//

Просмотреть файл

@ -17,8 +17,6 @@ libangle_common_sources = [
"src/common/PackedEnums.h",
"src/common/PackedGLEnums_autogen.cpp",
"src/common/PackedGLEnums_autogen.h",
"src/common/PoolAlloc.cpp",
"src/common/PoolAlloc.h",
"src/common/aligned_memory.cpp",
"src/common/aligned_memory.h",
"src/common/angleutils.cpp",

Просмотреть файл

@ -5,7 +5,6 @@
angle_unittests_sources = [
"../common/FastVector_unittest.cpp",
"../common/FixedVector_unittest.cpp",
"../common/PoolAlloc_unittest.cpp",
"../common/Optional_unittest.cpp",
"../common/aligned_memory_unittest.cpp",
"../common/angleutils_unittest.cpp",

Просмотреть файл

@ -30,7 +30,7 @@ class ImmutableStringBuilderTest : public testing::Test
allocator.pop();
}
angle::PoolAllocator allocator;
TPoolAllocator allocator;
};
// Test writing a 32-bit signed int as hexadecimal using ImmutableStringBuilder.

Просмотреть файл

@ -131,7 +131,7 @@ class IntermNodeTest : public testing::Test
}
private:
angle::PoolAllocator allocator;
TPoolAllocator allocator;
int mUniqueIndex;
};

Просмотреть файл

@ -61,7 +61,7 @@ class RemovePowTest : public testing::Test
sh::TranslatorGLSL *mTranslatorGLSL;
TIntermNode *mASTRoot;
angle::PoolAllocator allocator;
TPoolAllocator allocator;
};
// Check if there's a pow() node anywhere in the tree.

Просмотреть файл

@ -19,7 +19,7 @@ namespace sh
// Verify that mangled name matches between a vector/matrix TType and a corresponding StaticType.
TEST(Type, VectorAndMatrixMangledNameConsistent)
{
angle::PoolAllocator allocator;
TPoolAllocator allocator;
allocator.push();
SetGlobalPoolAllocator(&allocator);

Просмотреть файл

@ -197,7 +197,7 @@ bool IsPlatformAvailable(const CompilerParameters &param)
case SH_HLSL_4_0_FL9_3_OUTPUT:
case SH_HLSL_3_0_OUTPUT:
{
angle::PoolAllocator allocator;
TPoolAllocator allocator;
InitializePoolIndex();
allocator.push();
SetGlobalPoolAllocator(&allocator);
@ -259,7 +259,7 @@ class CompilerPerfTest : public ANGLEPerfTest,
const char *mTestShader;
ShBuiltInResources mResources;
angle::PoolAllocator mAllocator;
TPoolAllocator mAllocator;
sh::TCompiler *mTranslator;
};

Просмотреть файл

@ -53,7 +53,7 @@ class ShaderCompileTreeTest : public testing::Test
private:
TranslatorESSL *mTranslator;
angle::PoolAllocator mAllocator;
TPoolAllocator mAllocator;
};
// Returns true if the node is some kind of a zero node - either constructor or a constant union