Bug 1418104 - Add various realloc, junk and poisoning unit tests. r=njn

--HG--
extra : rebase_source : c79887f72197e5308a74fb5e68a94565ca6464a8
This commit is contained in:
Mike Hommey 2017-11-16 16:41:41 +09:00
Родитель 2c84ef6b8a
Коммит 161d7afcbd
1 изменённых файлов: 320 добавлений и 3 удалений

Просмотреть файл

@ -260,6 +260,9 @@ TEST(Jemalloc, PtrInfo)
}
#ifdef NIGHTLY_BUILD
size_t sSizes[] = { 1, 42, 79, 918, 1.5_KiB,
73_KiB, 129_KiB, 1.1_MiB, 2.6_MiB, 5.1_MiB };
TEST(Jemalloc, Arenas)
{
arena_id_t arena = moz_create_arena();
@ -291,9 +294,10 @@ TEST(Jemalloc, Arenas)
// For convenience, realloc can also be used to reallocate arena pointers.
// The result should be in the same arena. Test various size class transitions.
size_t sizes[] = { 1, 42, 80, 1_KiB, 1.5_KiB, 72_KiB, 129_KiB, 2.5_MiB, 5.1_MiB };
for (size_t from_size : sizes) {
for (size_t to_size : sizes) {
for (size_t from_size : sSizes) {
SCOPED_TRACE(testing::Message() << "from_size = " << from_size);
for (size_t to_size : sSizes) {
SCOPED_TRACE(testing::Message() << "to_size = " << to_size);
ptr = moz_arena_malloc(arena, from_size);
ptr = realloc(ptr, to_size);
// Freeing with the wrong arena should crash.
@ -312,4 +316,317 @@ TEST(Jemalloc, Arenas)
_gdb_sleep_duration = old_gdb_sleep_duration;
#endif
}
// Check that a buffer aPtr is entirely filled with a given character from
// aOffset to aSize. For faster comparison, the caller is required to fill a
// reference buffer with the wanted character, and give the size of that
// reference buffer.
static void
bulk_compare(char* aPtr,
size_t aOffset,
size_t aSize,
char* aReference,
size_t aReferenceSize)
{
for (size_t i = aOffset; i < aSize; i += aReferenceSize) {
size_t length = std::min(aSize - i, aReferenceSize);
if (memcmp(aPtr + i, aReference, length)) {
// We got a mismatch, we now want to report more precisely where.
for (size_t j = i; j < i + length; j++) {
ASSERT_EQ(aPtr[j], *aReference);
}
}
}
}
// A range iterator for size classes between two given values.
class SizeClassesBetween
{
public:
SizeClassesBetween(size_t aStart, size_t aEnd)
: mStart(aStart)
, mEnd(aEnd)
{
}
class Iterator
{
public:
explicit Iterator(size_t aValue)
: mValue(malloc_good_size(aValue))
{
}
operator size_t() const { return mValue; }
size_t operator*() const { return mValue; }
Iterator& operator++()
{
mValue = malloc_good_size(mValue + 1);
return *this;
}
private:
size_t mValue;
};
Iterator begin() { return Iterator(mStart); }
Iterator end() { return Iterator(mEnd); }
private:
size_t mStart, mEnd;
};
#define ALIGNMENT_CEILING(s, alignment) \
(((s) + (alignment - 1)) & (~(alignment - 1)))
static bool
IsSameRoundedHugeClass(size_t aSize1, size_t aSize2, jemalloc_stats_t& aStats)
{
return (aSize1 > aStats.large_max && aSize2 > aStats.large_max &&
ALIGNMENT_CEILING(aSize1, aStats.chunksize) ==
ALIGNMENT_CEILING(aSize2, aStats.chunksize));
}
static bool
CanReallocInPlace(size_t aFromSize, size_t aToSize, jemalloc_stats_t& aStats)
{
if (aFromSize == malloc_good_size(aToSize)) {
// Same size class: in-place.
return true;
}
if (aFromSize >= aStats.page_size && aFromSize <= aStats.large_max &&
aToSize >= aStats.page_size && aToSize <= aStats.large_max) {
// Any large class to any large class: in-place when there is space to.
return true;
}
if (IsSameRoundedHugeClass(aFromSize, aToSize, aStats)) {
// Huge sizes that round up to the same multiple of the chunk size:
// in-place.
return true;
}
return false;
}
TEST(Jemalloc, InPlace)
{
jemalloc_stats_t stats;
jemalloc_stats(&stats);
// Using a separate arena, which is always emptied after an iteration, ensures
// that in-place reallocation happens in all cases it can happen. This test is
// intended for developers to notice they may have to adapt other tests if
// they change the conditions for in-place reallocation.
arena_id_t arena = moz_create_arena();
for (size_t from_size : SizeClassesBetween(1, 2 * stats.chunksize)) {
SCOPED_TRACE(testing::Message() << "from_size = " << from_size);
for (size_t to_size : sSizes) {
SCOPED_TRACE(testing::Message() << "to_size = " << to_size);
char* ptr = (char*)moz_arena_malloc(arena, from_size);
char* ptr2 = (char*)moz_arena_realloc(arena, ptr, to_size);
if (CanReallocInPlace(from_size, to_size, stats)) {
EXPECT_EQ(ptr, ptr2);
} else {
EXPECT_NE(ptr, ptr2);
}
moz_arena_free(arena, ptr2);
}
}
moz_dispose_arena(arena);
}
TEST(Jemalloc, JunkPoison)
{
jemalloc_stats_t stats;
jemalloc_stats(&stats);
// Create buffers in a separate arena, for faster comparisons with
// bulk_compare.
arena_id_t buf_arena = moz_create_arena();
char* junk_buf = (char*)moz_arena_malloc(buf_arena, stats.page_size);
// Depending on its configuration, the allocator will either fill the
// requested allocation with the junk byte (0xe4) or with zeroes, or do
// nothing, in which case, since we're allocating in a fresh arena,
// we'll be getting zeroes.
char junk = stats.opt_junk ? '\xe4' : '\0';
for (size_t i = 0; i < stats.page_size; i++) {
ASSERT_EQ(junk_buf[i], junk);
}
// There are a few cases where we currently *don't* junk memory when
// junk is enabled, but *do* zero it when zeroing is enabled.
// TODO: we may want to change that.
char* zero_buf = (char*)moz_arena_calloc(buf_arena, stats.page_size, 1);
char* poison_buf = (char*)moz_arena_malloc(buf_arena, stats.page_size);
memset(poison_buf, 0xe5, stats.page_size);
static const char fill = 0x42;
char* fill_buf = (char*)moz_arena_malloc(buf_arena, stats.page_size);
memset(fill_buf, fill, stats.page_size);
arena_params_t params;
// Allow as many dirty pages in the arena as possible, so that purge never
// happens in it. Purge breaks some of the tests below randomly depending on
// what other things happen on other threads.
params.mMaxDirty = size_t(-1);
arena_id_t arena = moz_create_arena_with_params(&params);
// Allocating should junk the buffer, and freeing should poison the buffer.
for (size_t size : sSizes) {
if (size <= stats.large_max) {
SCOPED_TRACE(testing::Message() << "size = " << size);
char* buf = (char*)moz_arena_malloc(arena, size);
size_t allocated = moz_malloc_usable_size(buf);
if (stats.opt_junk || stats.opt_zero) {
ASSERT_NO_FATAL_FAILURE(
bulk_compare(buf, 0, allocated, junk_buf, stats.page_size));
}
moz_arena_free(arena, buf);
// We purposefully do a use-after-free here, to check that the data was
// poisoned.
ASSERT_NO_FATAL_FAILURE(
bulk_compare(buf, 0, allocated, poison_buf, stats.page_size));
}
}
// Shrinking in the same size class should be in place and poison between the
// new allocation size and the old one.
size_t prev = 0;
for (size_t size : SizeClassesBetween(1, 2 * stats.chunksize)) {
SCOPED_TRACE(testing::Message() << "size = " << size);
SCOPED_TRACE(testing::Message() << "prev = " << prev);
char* ptr = (char*)moz_arena_malloc(arena, size);
memset(ptr, fill, moz_malloc_usable_size(ptr));
char* ptr2 = (char*)moz_arena_realloc(arena, ptr, prev + 1);
ASSERT_EQ(ptr, ptr2);
ASSERT_NO_FATAL_FAILURE(
bulk_compare(ptr, 0, prev + 1, fill_buf, stats.page_size));
ASSERT_NO_FATAL_FAILURE(
bulk_compare(ptr, prev + 1, size, poison_buf, stats.page_size));
moz_arena_free(arena, ptr);
prev = size;
}
// In-place realloc should junk the new bytes when growing and poison the old
// bytes when shrinking.
for (size_t from_size : SizeClassesBetween(1, 2 * stats.chunksize)) {
SCOPED_TRACE(testing::Message() << "from_size = " << from_size);
for (size_t to_size : sSizes) {
SCOPED_TRACE(testing::Message() << "to_size = " << to_size);
if (CanReallocInPlace(from_size, to_size, stats)) {
char* ptr = (char*)moz_arena_malloc(arena, from_size);
memset(ptr, fill, moz_malloc_usable_size(ptr));
char* ptr2 = (char*)moz_arena_realloc(arena, ptr, to_size);
ASSERT_EQ(ptr, ptr2);
if (from_size >= to_size) {
ASSERT_NO_FATAL_FAILURE(
bulk_compare(ptr, 0, to_size, fill_buf, stats.page_size));
// On Windows (MALLOC_DECOMMIT), in-place realloc of huge allocations
// decommits extra pages, writing to them becomes an error.
#ifdef XP_WIN
if (to_size > stats.large_max) {
size_t page_limit = ALIGNMENT_CEILING(to_size, stats.page_size);
ASSERT_NO_FATAL_FAILURE(bulk_compare(
ptr, to_size, page_limit, poison_buf, stats.page_size));
ASSERT_DEATH_WRAP(ptr[page_limit] = 0, "");
} else
#endif
{
ASSERT_NO_FATAL_FAILURE(bulk_compare(
ptr, to_size, from_size, poison_buf, stats.page_size));
}
} else {
ASSERT_NO_FATAL_FAILURE(
bulk_compare(ptr, 0, from_size, fill_buf, stats.page_size));
if (stats.opt_junk && to_size <= stats.page_size) {
ASSERT_NO_FATAL_FAILURE(
bulk_compare(ptr, from_size, to_size, junk_buf, stats.page_size));
} else if (stats.opt_zero) {
ASSERT_NO_FATAL_FAILURE(
bulk_compare(ptr, from_size, to_size, zero_buf, stats.page_size));
}
}
moz_arena_free(arena, ptr2);
}
}
}
// Growing to a different size class should poison the old allocation,
// preserve the original bytes, and junk the new bytes in the new allocation.
for (size_t from_size : SizeClassesBetween(1, 2 * stats.chunksize)) {
SCOPED_TRACE(testing::Message() << "from_size = " << from_size);
for (size_t to_size : sSizes) {
if (from_size < to_size && malloc_good_size(to_size) != from_size &&
!IsSameRoundedHugeClass(from_size, to_size, stats)) {
SCOPED_TRACE(testing::Message() << "to_size = " << to_size);
char* ptr = (char*)moz_arena_malloc(arena, from_size);
memset(ptr, fill, moz_malloc_usable_size(ptr));
// Avoid in-place realloc by allocating a buffer, expecting it to be
// right after the buffer we just received. Buffers smaller than the
// page size and exactly or larger than the size of the largest large
// size class can't be reallocated in-place.
char* avoid_inplace = nullptr;
if (from_size >= stats.page_size && from_size < stats.large_max) {
avoid_inplace = (char*)moz_arena_malloc(arena, stats.page_size);
ASSERT_EQ(ptr + from_size, avoid_inplace);
}
char* ptr2 = (char*)moz_arena_realloc(arena, ptr, to_size);
ASSERT_NE(ptr, ptr2);
if (from_size <= stats.large_max) {
ASSERT_NO_FATAL_FAILURE(
bulk_compare(ptr, 0, from_size, poison_buf, stats.page_size));
}
ASSERT_NO_FATAL_FAILURE(
bulk_compare(ptr2, 0, from_size, fill_buf, stats.page_size));
if (stats.opt_junk || stats.opt_zero) {
size_t rounded_to_size = malloc_good_size(to_size);
ASSERT_NE(to_size, rounded_to_size);
ASSERT_NO_FATAL_FAILURE(bulk_compare(
ptr2, from_size, rounded_to_size, junk_buf, stats.page_size));
}
moz_arena_free(arena, ptr2);
moz_arena_free(arena, avoid_inplace);
}
}
}
// Shrinking to a different size class should poison the old allocation,
// preserve the original bytes, and junk the extra bytes in the new
// allocation.
for (size_t from_size : SizeClassesBetween(1, 2 * stats.chunksize)) {
SCOPED_TRACE(testing::Message() << "from_size = " << from_size);
for (size_t to_size : sSizes) {
if (from_size > to_size &&
!CanReallocInPlace(from_size, to_size, stats)) {
SCOPED_TRACE(testing::Message() << "to_size = " << to_size);
char* ptr = (char*)moz_arena_malloc(arena, from_size);
memset(ptr, fill, from_size);
char* ptr2 = (char*)moz_arena_realloc(arena, ptr, to_size);
ASSERT_NE(ptr, ptr2);
if (from_size <= stats.large_max) {
ASSERT_NO_FATAL_FAILURE(
bulk_compare(ptr, 0, from_size, poison_buf, stats.page_size));
}
ASSERT_NO_FATAL_FAILURE(
bulk_compare(ptr2, 0, to_size, fill_buf, stats.page_size));
if (stats.opt_junk || stats.opt_zero) {
size_t rounded_to_size = malloc_good_size(to_size);
ASSERT_NE(to_size, rounded_to_size);
ASSERT_NO_FATAL_FAILURE(bulk_compare(
ptr2, from_size, rounded_to_size, junk_buf, stats.page_size));
}
moz_arena_free(arena, ptr2);
}
}
}
moz_dispose_arena(arena);
moz_arena_free(buf_arena, poison_buf);
moz_arena_free(buf_arena, zero_buf);
moz_arena_free(buf_arena, junk_buf);
moz_dispose_arena(buf_arena);
}
#endif