Bug 1546442 - Add test for leading guard pages for normal allocations. r=glandium,pbone

Differential Revision: https://phabricator.services.mozilla.com/D132006
This commit is contained in:
Gian-Carlo Pascutto 2021-11-26 11:46:25 +00:00
Родитель b936d4f576
Коммит 1d45775e8c
1 изменённых файлов: 54 добавлений и 3 удалений

Просмотреть файл

@ -383,7 +383,9 @@ class SizeClassesBetween {
};
#define ALIGNMENT_CEILING(s, alignment) \
(((s) + (alignment - 1)) & (~(alignment - 1)))
(((s) + ((alignment)-1)) & (~((alignment)-1)))
#define ALIGNMENT_FLOOR(s, alignment) ((s) & (~((alignment)-1)))
static bool IsSameRoundedHugeClass(size_t aSize1, size_t aSize2,
jemalloc_stats_t& aStats) {
@ -658,7 +660,7 @@ TEST(Jemalloc, JunkPoison)
}
#endif // !defined(XP_WIN) || !defined(MOZ_CODE_COVERAGE)
TEST(Jemalloc, GuardRegion)
TEST(Jemalloc, TrailingGuard)
{
// Disable PHC allocations for this test, because even a single PHC
// allocation occurring can throw it off.
@ -693,7 +695,6 @@ TEST(Jemalloc, GuardRegion)
jemalloc_ptr_info_t info;
jemalloc_ptr_info(guard_page, &info);
ASSERT_TRUE(jemalloc_ptr_is_freed_page(&info));
ASSERT_TRUE(info.tag == TagFreedPage);
ASSERT_DEATH_WRAP(*(char*)guard_page = 0, "");
@ -709,6 +710,56 @@ TEST(Jemalloc, GuardRegion)
#endif
}
TEST(Jemalloc, LeadingGuard)
{
// Disable PHC allocations for this test, because even a single PHC
// allocation occurring can throw it off.
AutoDisablePHCOnCurrentThread disable;
jemalloc_stats_t stats;
jemalloc_stats(&stats);
#ifdef HAS_GDB_SLEEP_DURATION
// Avoid death tests adding some unnecessary (long) delays.
unsigned int old_gdb_sleep_duration = _gdb_sleep_duration;
_gdb_sleep_duration = 0;
#endif
arena_id_t arena = moz_create_arena();
ASSERT_TRUE(arena != 0);
// Do a simple normal allocation, but force all the allocation space
// in the chunk to be used up. This allows us to check that we get
// the safe area right in the logic that follows (all memory will be
// committed and initialized), and it forces this pointer to the start
// of the zone to sit at the very start of the usable chunk area.
void* ptr = moz_arena_malloc(arena, stats.large_max);
ASSERT_TRUE(ptr != nullptr);
// If ptr is chunk-aligned, the above allocation went wrong.
void* chunk_start = (void*)ALIGNMENT_FLOOR((uintptr_t)ptr, stats.chunksize);
ASSERT_NE((uintptr_t)ptr, (uintptr_t)chunk_start);
// If ptr is 1 page after the chunk start (so right after the header),
// we must have missed adding the guard page.
ASSERT_NE((uintptr_t)ptr, (uintptr_t)chunk_start + stats.page_size);
// The actual start depends on the amount of metadata versus the page
// size, so we can't check equality without pulling in too many
// implementation details.
// Guard page should be right before data area
void* guard_page = (void*)(((uintptr_t)ptr) - sizeof(void*));
jemalloc_ptr_info_t info;
jemalloc_ptr_info(guard_page, &info);
ASSERT_TRUE(info.tag == TagUnknown);
ASSERT_DEATH_WRAP(*(char*)guard_page = 0, "");
moz_arena_free(arena, ptr);
moz_dispose_arena(arena);
#ifdef HAS_GDB_SLEEP_DURATION
_gdb_sleep_duration = old_gdb_sleep_duration;
#endif
}
TEST(Jemalloc, DisposeArena)
{
jemalloc_stats_t stats;