зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1862540 - Remove code dealing with non-zeroed chunks r=glandium
Differential Revision: https://phabricator.services.mozilla.com/D192521
This commit is contained in:
Родитель
e92aff056b
Коммит
99d873c7c2
|
@ -1181,7 +1181,7 @@ struct arena_t {
|
|||
~arena_t();
|
||||
|
||||
private:
|
||||
void InitChunk(arena_chunk_t* aChunk, bool aZeroed);
|
||||
void InitChunk(arena_chunk_t* aChunk);
|
||||
|
||||
// This may return a chunk that should be destroyed with chunk_dealloc outside
|
||||
// of the arena lock. It is not the same chunk as was passed in (since that
|
||||
|
@ -1487,10 +1487,11 @@ static bool opt_randomize_small = true;
|
|||
// ***************************************************************************
|
||||
// Begin forward declarations.
|
||||
|
||||
static void* chunk_alloc(size_t aSize, size_t aAlignment, bool aBase,
|
||||
bool* aZeroed = nullptr);
|
||||
static void* chunk_alloc(size_t aSize, size_t aAlignment, bool aBase);
|
||||
static void chunk_dealloc(void* aChunk, size_t aSize, ChunkType aType);
|
||||
static void chunk_ensure_zero(void* aPtr, size_t aSize, bool aZeroed);
|
||||
#ifdef MOZ_DEBUG
|
||||
static void chunk_assert_zero(void* aPtr, size_t aSize);
|
||||
#endif
|
||||
static void huge_dalloc(void* aPtr, arena_t* aArena);
|
||||
static bool malloc_init_hard();
|
||||
|
||||
|
@ -2201,7 +2202,7 @@ static bool pages_purge(void* addr, size_t length, bool force_zero) {
|
|||
return true;
|
||||
}
|
||||
|
||||
static void* chunk_recycle(size_t aSize, size_t aAlignment, bool* aZeroed) {
|
||||
static void* chunk_recycle(size_t aSize, size_t aAlignment) {
|
||||
extent_node_t key;
|
||||
|
||||
size_t alloc_size = aSize + aAlignment - kChunkSize;
|
||||
|
@ -2223,9 +2224,11 @@ static void* chunk_recycle(size_t aSize, size_t aAlignment, bool* aZeroed) {
|
|||
size_t trailsize = node->mSize - leadsize - aSize;
|
||||
void* ret = (void*)((uintptr_t)node->mAddr + leadsize);
|
||||
ChunkType chunk_type = node->mChunkType;
|
||||
if (aZeroed) {
|
||||
*aZeroed = (chunk_type == ZEROED_CHUNK);
|
||||
}
|
||||
|
||||
// All recycled chunks are zeroed (because they're purged) before being
|
||||
// recycled.
|
||||
MOZ_ASSERT(chunk_type == ZEROED_CHUNK);
|
||||
|
||||
// Remove node from the tree.
|
||||
gChunksBySize.Remove(node);
|
||||
gChunksByAddress.Remove(node);
|
||||
|
@ -2270,10 +2273,6 @@ static void* chunk_recycle(size_t aSize, size_t aAlignment, bool* aZeroed) {
|
|||
if (!pages_commit(ret, aSize)) {
|
||||
return nullptr;
|
||||
}
|
||||
// pages_commit is guaranteed to zero the chunk.
|
||||
if (aZeroed) {
|
||||
*aZeroed = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -2294,8 +2293,7 @@ static void* chunk_recycle(size_t aSize, size_t aAlignment, bool* aZeroed) {
|
|||
// `zeroed` is an outvalue that returns whether the allocated memory is
|
||||
// guaranteed to be full of zeroes. It can be omitted when the caller doesn't
|
||||
// care about the result.
|
||||
static void* chunk_alloc(size_t aSize, size_t aAlignment, bool aBase,
|
||||
bool* aZeroed) {
|
||||
static void* chunk_alloc(size_t aSize, size_t aAlignment, bool aBase) {
|
||||
void* ret = nullptr;
|
||||
|
||||
MOZ_ASSERT(aSize != 0);
|
||||
|
@ -2306,13 +2304,10 @@ static void* chunk_alloc(size_t aSize, size_t aAlignment, bool aBase,
|
|||
// Base allocations can't be fulfilled by recycling because of
|
||||
// possible deadlock or infinite recursion.
|
||||
if (CAN_RECYCLE(aSize) && !aBase) {
|
||||
ret = chunk_recycle(aSize, aAlignment, aZeroed);
|
||||
ret = chunk_recycle(aSize, aAlignment);
|
||||
}
|
||||
if (!ret) {
|
||||
ret = chunk_alloc_mmap(aSize, aAlignment);
|
||||
if (aZeroed) {
|
||||
*aZeroed = true;
|
||||
}
|
||||
}
|
||||
if (ret && !aBase) {
|
||||
if (!gChunkRTree.Set(ret, ret)) {
|
||||
|
@ -2325,21 +2320,16 @@ static void* chunk_alloc(size_t aSize, size_t aAlignment, bool aBase,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void chunk_ensure_zero(void* aPtr, size_t aSize, bool aZeroed) {
|
||||
if (aZeroed == false) {
|
||||
memset(aPtr, 0, aSize);
|
||||
}
|
||||
#ifdef MOZ_DEBUG
|
||||
else {
|
||||
size_t i;
|
||||
size_t* p = (size_t*)(uintptr_t)aPtr;
|
||||
static void chunk_assert_zero(void* aPtr, size_t aSize) {
|
||||
size_t i;
|
||||
size_t* p = (size_t*)(uintptr_t)aPtr;
|
||||
|
||||
for (i = 0; i < aSize / sizeof(size_t); i++) {
|
||||
MOZ_ASSERT(p[i] == 0);
|
||||
}
|
||||
for (i = 0; i < aSize / sizeof(size_t); i++) {
|
||||
MOZ_ASSERT(p[i] == 0);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
static void chunk_record(void* aChunk, size_t aSize, ChunkType aType) {
|
||||
extent_node_t key;
|
||||
|
@ -2703,7 +2693,7 @@ bool arena_t::SplitRun(arena_run_t* aRun, size_t aSize, bool aLarge,
|
|||
return true;
|
||||
}
|
||||
|
||||
void arena_t::InitChunk(arena_chunk_t* aChunk, bool aZeroed) {
|
||||
void arena_t::InitChunk(arena_chunk_t* aChunk) {
|
||||
size_t i;
|
||||
// WARNING: The following relies on !aZeroed meaning "used to be an arena
|
||||
// chunk".
|
||||
|
@ -2715,8 +2705,7 @@ void arena_t::InitChunk(arena_chunk_t* aChunk, bool aZeroed) {
|
|||
// have been emptied before being recycled). In that case, we can get
|
||||
// away with reusing the chunk as-is, marking all runs as madvised.
|
||||
|
||||
size_t flags =
|
||||
aZeroed ? CHUNK_MAP_DECOMMITTED | CHUNK_MAP_ZEROED : CHUNK_MAP_MADVISED;
|
||||
size_t flags = CHUNK_MAP_DECOMMITTED | CHUNK_MAP_ZEROED;
|
||||
|
||||
mStats.mapped += kChunkSize;
|
||||
|
||||
|
@ -2826,14 +2815,13 @@ arena_run_t* arena_t::AllocRun(size_t aSize, bool aLarge, bool aZero) {
|
|||
} else {
|
||||
// No usable runs. Create a new chunk from which to allocate
|
||||
// the run.
|
||||
bool zeroed;
|
||||
arena_chunk_t* chunk =
|
||||
(arena_chunk_t*)chunk_alloc(kChunkSize, kChunkSize, false, &zeroed);
|
||||
(arena_chunk_t*)chunk_alloc(kChunkSize, kChunkSize, false);
|
||||
if (!chunk) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
InitChunk(chunk, zeroed);
|
||||
InitChunk(chunk);
|
||||
run = (arena_run_t*)(uintptr_t(chunk) +
|
||||
(gChunkHeaderNumPages << gPageSize2Pow));
|
||||
}
|
||||
|
@ -4148,7 +4136,6 @@ void* arena_t::PallocHuge(size_t aSize, size_t aAlignment, bool aZero) {
|
|||
size_t csize;
|
||||
size_t psize;
|
||||
extent_node_t* node;
|
||||
bool zeroed;
|
||||
|
||||
// We're going to configure guard pages in the region between the
|
||||
// page-aligned size and the chunk-aligned size, so if those are the same
|
||||
|
@ -4166,17 +4153,17 @@ void* arena_t::PallocHuge(size_t aSize, size_t aAlignment, bool aZero) {
|
|||
}
|
||||
|
||||
// Allocate one or more contiguous chunks for this request.
|
||||
ret = chunk_alloc(csize, aAlignment, false, &zeroed);
|
||||
ret = chunk_alloc(csize, aAlignment, false);
|
||||
if (!ret) {
|
||||
ExtentAlloc::dealloc(node);
|
||||
return nullptr;
|
||||
}
|
||||
psize = PAGE_CEILING(aSize);
|
||||
#ifdef MOZ_DEBUG
|
||||
if (aZero) {
|
||||
// We will decommit anything past psize so there is no need to zero
|
||||
// further.
|
||||
chunk_ensure_zero(ret, psize, zeroed);
|
||||
chunk_assert_zero(ret, psize);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Insert node into huge.
|
||||
node->mAddr = ret;
|
||||
|
|
Загрузка…
Ссылка в новой задаче