Bug 1401099 - Move arena_run_alloc to a method of arena_t. r=njn

--HG--
extra : rebase_source : 6a683f4d0cf5ad68c670dbe8ea9d3a34acf11549
This commit is contained in:
Mike Hommey 2017-09-15 17:57:11 +09:00
Родитель d2ba03c881
Коммит 7cb9914a15
1 изменённых файлов: 57 добавлений и 58 удалений

Просмотреть файл

@ -462,7 +462,7 @@ struct arena_stats_t {
enum ChunkType {
UNKNOWN_CHUNK,
ZEROED_CHUNK, // chunk only contains zeroes
ARENA_CHUNK, // used to back arena runs created by arena_run_alloc
ARENA_CHUNK, // used to back arena runs created by arena_t::AllocRun
HUGE_CHUNK, // used to back huge allocations (e.g. huge_malloc)
RECYCLED_CHUNK, // chunk has been stored for future use by chunk_recycle
};
@ -708,13 +708,11 @@ struct arena_t {
/* Tree of dirty-page-containing chunks this arena manages. */
arena_chunk_tree_t mChunksDirty;
#ifdef MALLOC_DOUBLE_PURGE
private:
#ifdef MALLOC_DOUBLE_PURGE
/* Head of a linked list of MADV_FREE'd-page-containing chunks this
* arena manages. */
mozilla::DoublyLinkedList<arena_chunk_t> mChunksMAdvised;
public:
#endif
/*
@ -729,6 +727,7 @@ public:
*/
arena_chunk_t* mSpare;
public:
/*
* Current count of pages within unused runs that are potentially
* dirty, and for which madvise(... MADV_FREE) has not been called. By
@ -774,10 +773,14 @@ public:
bool Init();
private:
void InitChunk(arena_chunk_t* aChunk, bool aZeroed);
public:
void DeallocChunk(arena_chunk_t* aChunk);
arena_run_t* AllocRun(arena_bin_t* aBin, size_t aSize, bool aLarge, bool aZero);
void Purge(bool aAll);
void HardPurge();
@ -2749,43 +2752,39 @@ arena_t::DeallocChunk(arena_chunk_t* aChunk)
mSpare = aChunk;
}
static arena_run_t *
arena_run_alloc(arena_t *arena, arena_bin_t *bin, size_t size, bool large,
bool zero)
arena_run_t*
arena_t::AllocRun(arena_bin_t* aBin, size_t aSize, bool aLarge, bool aZero)
{
arena_run_t *run;
arena_chunk_map_t *mapelm, key;
arena_run_t* run;
arena_chunk_map_t* mapelm;
arena_chunk_map_t key;
MOZ_ASSERT(size <= arena_maxclass);
MOZ_ASSERT((size & pagesize_mask) == 0);
MOZ_ASSERT(aSize <= arena_maxclass);
MOZ_ASSERT((aSize & pagesize_mask) == 0);
/* Search the arena's chunks for the lowest best fit. */
key.bits = size | CHUNK_MAP_KEY;
mapelm = arena_avail_tree_nsearch(&arena->mRunsAvail, &key);
key.bits = aSize | CHUNK_MAP_KEY;
mapelm = arena_avail_tree_nsearch(&mRunsAvail, &key);
if (mapelm) {
arena_chunk_t *chunk =
arena_chunk_t* chunk =
(arena_chunk_t*)CHUNK_ADDR2BASE(mapelm);
size_t pageind = ((uintptr_t)mapelm -
(uintptr_t)chunk->map) /
size_t pageind = (uintptr_t(mapelm) - uintptr_t(chunk->map)) /
sizeof(arena_chunk_map_t);
run = (arena_run_t *)((uintptr_t)chunk + (pageind
<< pagesize_2pow));
arena_run_split(arena, run, size, large, zero);
return (run);
run = (arena_run_t*)(uintptr_t(chunk) + (pageind << pagesize_2pow));
arena_run_split(this, run, aSize, aLarge, aZero);
return run;
}
if (arena->mSpare) {
if (mSpare) {
/* Use the spare. */
arena_chunk_t *chunk = arena->mSpare;
arena->mSpare = nullptr;
run = (arena_run_t *)((uintptr_t)chunk +
(arena_chunk_header_npages << pagesize_2pow));
arena_chunk_t* chunk = mSpare;
mSpare = nullptr;
run = (arena_run_t*)(uintptr_t(chunk) + (arena_chunk_header_npages << pagesize_2pow));
/* Insert the run into the tree of available runs. */
arena_avail_tree_insert(&arena->mRunsAvail,
&chunk->map[arena_chunk_header_npages]);
arena_run_split(arena, run, size, large, zero);
return (run);
arena_avail_tree_insert(&mRunsAvail, &chunk->map[arena_chunk_header_npages]);
arena_run_split(this, run, aSize, aLarge, aZero);
return run;
}
/*
@ -2794,18 +2793,18 @@ arena_run_alloc(arena_t *arena, arena_bin_t *bin, size_t size, bool large,
*/
{
bool zeroed;
arena_chunk_t *chunk = (arena_chunk_t *)
arena_chunk_t* chunk = (arena_chunk_t*)
chunk_alloc(chunksize, chunksize, false, &zeroed);
if (!chunk)
if (!chunk) {
return nullptr;
}
arena->InitChunk(chunk, zeroed);
run = (arena_run_t *)((uintptr_t)chunk +
(arena_chunk_header_npages << pagesize_2pow));
InitChunk(chunk, zeroed);
run = (arena_run_t*)(uintptr_t(chunk) + (arena_chunk_header_npages << pagesize_2pow));
}
/* Update page map. */
arena_run_split(arena, run, size, large, zero);
return (run);
arena_run_split(this, run, aSize, aLarge, aZero);
return run;
}
void
@ -3068,11 +3067,11 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
/* No existing runs have any space available. */
/* Allocate a new run. */
run = arena_run_alloc(arena, bin, bin->run_size, false, false);
run = arena->AllocRun(bin, bin->run_size, false, false);
if (!run)
return nullptr;
/*
* Don't initialize if a race in arena_run_alloc() allowed an existing
* Don't initialize if a race in arena_t::AllocRun() allowed an existing
* run to become usable.
*/
if (run == bin->runcur)
@ -3280,7 +3279,7 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero)
/* Large allocation. */
size = PAGE_CEILING(size);
malloc_spin_lock(&arena->mLock);
ret = (void *)arena_run_alloc(arena, nullptr, size, true, zero);
ret = arena->AllocRun(nullptr, size, true, zero);
if (!ret) {
malloc_spin_unlock(&arena->mLock);
return nullptr;
@ -3347,7 +3346,7 @@ arena_palloc(arena_t *arena, size_t alignment, size_t size, size_t alloc_size)
MOZ_ASSERT((alignment & pagesize_mask) == 0);
malloc_spin_lock(&arena->mLock);
ret = (void *)arena_run_alloc(arena, nullptr, alloc_size, true, false);
ret = arena->AllocRun(nullptr, alloc_size, true, false);
if (!ret) {
malloc_spin_unlock(&arena->mLock);
return nullptr;