Bug 1414155 - Consolidate "constant/globals". r=njn

There is a set of "constants" that are actually globals that depend on
the page size that we get at runtime, when compiling without
MALLOC_STATIC_PAGESIZE, but that are actual constants when compiling
with it. Their value was unfortunately duplicated.

We setup a set of macros allowing to make the declarations unique.

--HG--
extra : rebase_source : 56557b7ba01ee60fe85f2cd3c2a0aa910c4c93c6
This commit is contained in:
Mike Hommey 2017-11-01 18:33:24 +09:00
Родитель 6b7f782847
Коммит 26674ebcb7
1 изменённых файлов: 80 добавлений и 79 удалений

Просмотреть файл

@ -174,6 +174,18 @@ using namespace mozilla;
#define MALLOC_DECOMMIT
#endif
// When MALLOC_STATIC_PAGESIZE is defined, the page size is fixed at
// compile-time for better performance, as opposed to determined at
// runtime. Some platforms can have different page sizes at runtime
// depending on kernel configuration, so they are opted out by default.
// Debug builds are opted out too, for test coverage.
#ifndef MOZ_DEBUG
#if !defined(__ia64__) && !defined(__sparc__) && !defined(__mips__) && \
!defined(__aarch64__)
#define MALLOC_STATIC_PAGESIZE 1
#endif
#endif
#ifdef XP_WIN
#define STDERR_FILENO 2
@ -388,18 +400,6 @@ struct arena_chunk_t
#define SMALL_MAX_2POW_DEFAULT 9
#define SMALL_MAX_DEFAULT (1U << SMALL_MAX_2POW_DEFAULT)
// When MALLOC_STATIC_PAGESIZE is defined, the page size is fixed at
// compile-time for better performance, as opposed to determined at
// runtime. Some platforms can have different page sizes at runtime
// depending on kernel configuration, so they are opted out by default.
// Debug builds are opted out too, for test coverage.
#ifndef MOZ_DEBUG
#if !defined(__ia64__) && !defined(__sparc__) && !defined(__mips__) && \
!defined(__aarch64__)
#define MALLOC_STATIC_PAGESIZE 1
#endif
#endif
// Various quantum-related settings.
#define QUANTUM_DEFAULT (size_t(1) << QUANTUM_2POW_MIN)
static const size_t quantum = QUANTUM_DEFAULT;
@ -415,8 +415,11 @@ static const unsigned ntbins = unsigned(QUANTUM_2POW_MIN - TINY_MIN_2POW);
// Number of quantum-spaced bins.
static const unsigned nqbins = unsigned(SMALL_MAX_DEFAULT >> QUANTUM_2POW_MIN);
#ifdef MALLOC_STATIC_PAGESIZE
#define CHUNKSIZE_DEFAULT ((size_t)1 << CHUNK_2POW_DEFAULT)
static const size_t chunksize = CHUNKSIZE_DEFAULT;
static const size_t chunksize_mask = CHUNKSIZE_DEFAULT - 1;
#ifdef MALLOC_STATIC_PAGESIZE
// VM page size. It must divide the runtime CPU page size or the code
// will abort.
// Platform specific page size conditions copied from js/public/HeapAPI.h
@ -428,59 +431,77 @@ static const size_t pagesize = 64_KiB;
#else
static const size_t pagesize = 4_KiB;
#endif
#define pagesize_2pow LOG2(pagesize)
#define pagesize_mask (pagesize - 1)
// Max size class for bins.
static const size_t bin_maxclass = pagesize >> 1;
// Number of (2^n)-spaced sub-page bins.
static const unsigned nsbins =
unsigned(pagesize_2pow - SMALL_MAX_2POW_DEFAULT - 1);
#else // !MALLOC_STATIC_PAGESIZE
// VM page size.
#else
static size_t pagesize;
static size_t pagesize_mask;
static size_t pagesize_2pow;
// Various bin-related settings.
static size_t bin_maxclass; // Max size class for bins.
static unsigned nsbins; // Number of (2^n)-spaced sub-page bins.
#endif
// Various chunk-related settings.
// Compute the header size such that it is large enough to contain the page map
// and enough nodes for the worst case: one node per non-header page plus one
// extra for situations where we briefly have one more node allocated than we
// will need.
#define calculate_arena_header_size() \
(sizeof(arena_chunk_t) + sizeof(arena_chunk_map_t) * (chunk_npages - 1))
#define calculate_arena_header_pages() \
((calculate_arena_header_size() >> pagesize_2pow) + \
((calculate_arena_header_size() & pagesize_mask) ? 1 : 0))
// Max size class for arenas.
#define calculate_arena_maxclass() \
(chunksize - (arena_chunk_header_npages << pagesize_2pow))
#define CHUNKSIZE_DEFAULT ((size_t)1 << CHUNK_2POW_DEFAULT)
static const size_t chunksize = CHUNKSIZE_DEFAULT;
static const size_t chunksize_mask = CHUNKSIZE_DEFAULT - 1;
#ifdef MALLOC_STATIC_PAGESIZE
static const size_t chunk_npages = CHUNKSIZE_DEFAULT >> pagesize_2pow;
#define arena_chunk_header_npages calculate_arena_header_pages()
#define arena_maxclass calculate_arena_maxclass()
#define DECLARE_GLOBAL(type, name)
#define DEFINE_GLOBALS
#define END_GLOBALS
#define DEFINE_GLOBAL(type) static const type
#define GLOBAL_LOG2 LOG2
#define GLOBAL_ASSERT_HELPER1(x) static_assert(x, #x)
#define GLOBAL_ASSERT_HELPER2(x, y) static_assert(x, y)
#define GLOBAL_ASSERT(...) \
MACRO_CALL( \
MOZ_PASTE_PREFIX_AND_ARG_COUNT(GLOBAL_ASSERT_HELPER, __VA_ARGS__), \
(__VA_ARGS__))
#else
static size_t chunk_npages;
static size_t arena_chunk_header_npages;
static size_t arena_maxclass; // Max size class for arenas.
#define DECLARE_GLOBAL(type, name) static type name;
#define DEFINE_GLOBALS \
static void DefineGlobals() \
{
#define END_GLOBALS }
#define DEFINE_GLOBAL(type)
#define GLOBAL_LOG2 FloorLog2
#define GLOBAL_ASSERT MOZ_RELEASE_ASSERT
#endif
DECLARE_GLOBAL(size_t, pagesize_mask)
DECLARE_GLOBAL(uint8_t, pagesize_2pow)
DECLARE_GLOBAL(uint8_t, nsbins)
DECLARE_GLOBAL(size_t, bin_maxclass)
DECLARE_GLOBAL(size_t, chunk_npages)
DECLARE_GLOBAL(size_t, arena_chunk_header_npages)
DECLARE_GLOBAL(size_t, arena_maxclass)
DEFINE_GLOBALS
DEFINE_GLOBAL(size_t) pagesize_mask = pagesize - 1;
DEFINE_GLOBAL(uint8_t) pagesize_2pow = GLOBAL_LOG2(pagesize);
// Number of (2^n)-spaced sub-page bins.
DEFINE_GLOBAL(uint8_t)
nsbins = pagesize_2pow - SMALL_MAX_2POW_DEFAULT - 1;
// Max size class for bins.
DEFINE_GLOBAL(size_t) bin_maxclass = pagesize >> 1;
// Number of pages in a chunk.
DEFINE_GLOBAL(size_t) chunk_npages = chunksize >> pagesize_2pow;
// Number of pages necessary for a chunk header.
DEFINE_GLOBAL(size_t)
arena_chunk_header_npages =
((sizeof(arena_chunk_t) + sizeof(arena_chunk_map_t) * (chunk_npages - 1) +
pagesize_mask) &
~pagesize_mask) >>
pagesize_2pow;
// Max size class for arenas.
DEFINE_GLOBAL(size_t)
arena_maxclass = chunksize - (arena_chunk_header_npages << pagesize_2pow);
// Various sanity checks that regard configuration.
GLOBAL_ASSERT(1ULL << pagesize_2pow == pagesize,
"Page size is not a power of two");
GLOBAL_ASSERT(quantum >= sizeof(void*));
GLOBAL_ASSERT(quantum <= pagesize);
GLOBAL_ASSERT(chunksize >= pagesize);
GLOBAL_ASSERT(quantum * 4 <= chunksize);
END_GLOBALS
// Recycle at most 128 chunks. With 1 MiB chunks, this means we retain at most
// 6.25% of the process address space on a 32-bit OS for later use.
#define CHUNK_RECYCLE_LIMIT 128
@ -4117,10 +4138,7 @@ static
}
#else
pagesize = (size_t)result;
pagesize_mask = (size_t)result - 1;
pagesize_2pow = FloorLog2(result);
MOZ_RELEASE_ASSERT(1ULL << pagesize_2pow == pagesize,
"Page size is not a power of two");
DefineGlobals();
#endif
// Get runtime configuration.
@ -4198,25 +4216,8 @@ static
}
}
#ifndef MALLOC_STATIC_PAGESIZE
// Set bin-related variables.
bin_maxclass = (pagesize >> 1);
nsbins = pagesize_2pow - SMALL_MAX_2POW_DEFAULT - 1;
chunk_npages = (chunksize >> pagesize_2pow);
arena_chunk_header_npages = calculate_arena_header_pages();
arena_maxclass = calculate_arena_maxclass();
#endif
gRecycledSize = 0;
// Various sanity checks that regard configuration.
MOZ_ASSERT(quantum >= sizeof(void*));
MOZ_ASSERT(quantum <= pagesize);
MOZ_ASSERT(chunksize >= pagesize);
MOZ_ASSERT(quantum * 4 <= chunksize);
// Initialize chunks data.
chunks_mtx.Init();
gChunksBySize.Init();