Backed out 4 changesets (bug 1489572) for valgrind failures at js::jit::LIRGenerator::visitBlock.

Backed out changeset f3aa68c506e0 (bug 1489572)
Backed out changeset cbfcb4b2b5c9 (bug 1489572)
Backed out changeset 683e3e0eaee1 (bug 1489572)
Backed out changeset f5973de3f6e8 (bug 1489572)
This commit is contained in:
Cosmin Sabou 2018-10-23 21:48:50 +03:00
Родитель b9e4308d69
Коммит 1527095aea
4 изменённых файлов: 115 добавлений и 312 удалений

Просмотреть файл

@ -27,6 +27,7 @@ namespace detail {
UniquePtr<BumpChunk>
BumpChunk::newWithCapacity(size_t size)
{
MOZ_DIAGNOSTIC_ASSERT(RoundUpPow2(size) == size);
MOZ_DIAGNOSTIC_ASSERT(size >= sizeof(BumpChunk));
void* mem = js_malloc(size);
if (!mem) {
@ -113,17 +114,12 @@ LifoAlloc::reset(size_t defaultChunkSize)
while (!chunks_.empty()) {
chunks_.popFirst();
}
while (!oversize_.empty()) {
chunks_.popFirst();
}
while (!unused_.empty()) {
unused_.popFirst();
}
defaultChunkSize_ = defaultChunkSize;
oversizeThreshold_ = defaultChunkSize;
markCount = 0;
curSize_ = 0;
oversizeSize_ = 0;
}
void
@ -133,11 +129,6 @@ LifoAlloc::freeAll()
UniqueBumpChunk bc = chunks_.popFirst();
decrementCurSize(bc->computedSizeOfIncludingThis());
}
while (!oversize_.empty()) {
UniqueBumpChunk bc = oversize_.popFirst();
decrementCurSize(bc->computedSizeOfIncludingThis());
oversizeSize_ -= bc->computedSizeOfIncludingThis();
}
while (!unused_.empty()) {
UniqueBumpChunk bc = unused_.popFirst();
decrementCurSize(bc->computedSizeOfIncludingThis());
@ -146,40 +137,10 @@ LifoAlloc::freeAll()
// Nb: maintaining curSize_ correctly isn't easy. Fortunately, this is an
// excellent sanity check.
MOZ_ASSERT(curSize_ == 0);
MOZ_ASSERT(oversizeSize_ == 0);
}
// Round at the same page granularity used by malloc.
static size_t
MallocGoodSize(size_t aSize)
{
# if defined(MOZ_MEMORY)
return malloc_good_size(aSize);
# else
return aSize;
# endif
}
// Heuristic to choose the size of the next BumpChunk for small allocations.
// `start` is the size of the first chunk. `used` is the total size of all
// BumpChunks in this LifoAlloc so far.
static size_t
NextSize(size_t start, size_t used)
{
// Double the size, up to 1 MB.
const size_t mb = 1 * 1024 * 1024;
if (used < mb) {
return Max(start, used);
}
// After 1 MB, grow more gradually, to waste less memory.
// The sequence (in megabytes) begins:
// 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, ...
return JS_ROUNDUP(used / 8, mb);
}
LifoAlloc::UniqueBumpChunk
LifoAlloc::newChunkWithCapacity(size_t n, bool oversize)
LifoAlloc::newChunkWithCapacity(size_t n)
{
MOZ_ASSERT(fallibleScope_, "[OOM] Cannot allocate a new chunk in an infallible scope.");
@ -193,10 +154,9 @@ LifoAlloc::newChunkWithCapacity(size_t n, bool oversize)
return nullptr;
}
MOZ_ASSERT(curSize_ >= oversizeSize_);
const size_t chunkSize = (oversize || minSize > defaultChunkSize_)
? MallocGoodSize(minSize)
: NextSize(defaultChunkSize_, curSize_ - oversizeSize_);
const size_t chunkSize = minSize > defaultChunkSize_
? RoundUpPow2(minSize)
: defaultChunkSize_;
// Create a new BumpChunk, and allocate space for it.
UniqueBumpChunk result = detail::BumpChunk::newWithCapacity(chunkSize);
@ -207,7 +167,7 @@ LifoAlloc::newChunkWithCapacity(size_t n, bool oversize)
return result;
}
LifoAlloc::UniqueBumpChunk
bool
LifoAlloc::getOrCreateChunk(size_t n)
{
// Look for existing unused BumpChunks to satisfy the request, and pick the
@ -215,7 +175,8 @@ LifoAlloc::getOrCreateChunk(size_t n)
// chunks.
if (!unused_.empty()) {
if (unused_.begin()->canAlloc(n)) {
return unused_.popFirst();
chunks_.append(unused_.popFirst());
return true;
}
BumpChunkList::Iterator e(unused_.end());
@ -224,170 +185,24 @@ LifoAlloc::getOrCreateChunk(size_t n)
MOZ_ASSERT(elem->empty());
if (elem->canAlloc(n)) {
BumpChunkList temp = unused_.splitAfter(i.get());
UniqueBumpChunk newChunk = temp.popFirst();
chunks_.append(temp.popFirst());
unused_.appendAll(std::move(temp));
return newChunk;
return true;
}
}
}
// Allocate a new BumpChunk with enough space for the next allocation.
UniqueBumpChunk newChunk = newChunkWithCapacity(n, false);
if (!newChunk) {
return newChunk;
}
size_t size = newChunk->computedSizeOfIncludingThis();
incrementCurSize(size);
return newChunk;
}
void*
LifoAlloc::allocImplColdPath(size_t n)
{
void* result;
UniqueBumpChunk newChunk = getOrCreateChunk(n);
if (!newChunk) {
return nullptr;
}
// Since we just created a large enough chunk, this can't fail.
chunks_.append(std::move(newChunk));
result = chunks_.last()->tryAlloc(n);
MOZ_ASSERT(result);
return result;
}
void*
LifoAlloc::allocImplOversize(size_t n)
{
void* result;
UniqueBumpChunk newChunk = newChunkWithCapacity(n, true);
if (!newChunk) {
return nullptr;
}
incrementCurSize(newChunk->computedSizeOfIncludingThis());
oversizeSize_ += newChunk->computedSizeOfIncludingThis();
// Since we just created a large enough chunk, this can't fail.
oversize_.append(std::move(newChunk));
result = oversize_.last()->tryAlloc(n);
MOZ_ASSERT(result);
return result;
}
bool
LifoAlloc::ensureUnusedApproximateColdPath(size_t n, size_t total)
{
for (detail::BumpChunk& bc : unused_) {
total += bc.unused();
if (total >= n) {
return true;
}
}
UniqueBumpChunk newChunk = newChunkWithCapacity(n, false);
UniqueBumpChunk newChunk = newChunkWithCapacity(n);
if (!newChunk) {
return false;
}
size_t size = newChunk->computedSizeOfIncludingThis();
unused_.pushFront(std::move(newChunk));
chunks_.append(std::move(newChunk));
incrementCurSize(size);
return true;
}
LifoAlloc::Mark
LifoAlloc::mark()
{
markCount++;
Mark res;
if (!chunks_.empty()) {
res.chunk = chunks_.last()->mark();
}
if (!oversize_.empty()) {
res.oversize = oversize_.last()->mark();
}
return res;
}
void
LifoAlloc::release(Mark mark)
{
markCount--;
#ifdef DEBUG
auto assertIsContained = [](const detail::BumpChunk::Mark& m, BumpChunkList& list) {
if (m.markedChunk()) {
bool contained = false;
for (const detail::BumpChunk& chunk : list) {
if (&chunk == m.markedChunk() && chunk.contains(m)) {
contained = true;
break;
}
}
MOZ_ASSERT(contained);
}
};
assertIsContained(mark.chunk, chunks_);
assertIsContained(mark.oversize, oversize_);
#endif
BumpChunkList released;
auto cutAtMark = [&released](const detail::BumpChunk::Mark& m, BumpChunkList& list) {
// Move the blocks which are after the mark to the set released chunks.
if (!m.markedChunk()) {
released = std::move(list);
} else {
released = list.splitAfter(m.markedChunk());
}
// Release everything which follows the mark in the last chunk.
if (!list.empty()) {
list.last()->release(m);
}
};
// Release the content of all the blocks which are after the marks, and keep
// blocks as unused.
cutAtMark(mark.chunk, chunks_);
for (detail::BumpChunk& bc : released) {
bc.release();
}
unused_.appendAll(std::move(released));
// Free the content of all the blocks which are after the marks.
cutAtMark(mark.oversize, oversize_);
while (!released.empty()) {
UniqueBumpChunk bc = released.popFirst();
decrementCurSize(bc->computedSizeOfIncludingThis());
oversizeSize_ -= bc->computedSizeOfIncludingThis();
}
}
void
LifoAlloc::steal(LifoAlloc* other)
{
MOZ_ASSERT(!other->markCount);
MOZ_DIAGNOSTIC_ASSERT(unused_.empty());
MOZ_DIAGNOSTIC_ASSERT(chunks_.empty());
MOZ_DIAGNOSTIC_ASSERT(oversize_.empty());
// Copy everything from |other| to |this| except for |peakSize_|, which
// requires some care.
chunks_ = std::move(other->chunks_);
oversize_ = std::move(other->oversize_);
unused_ = std::move(other->unused_);
markCount = other->markCount;
defaultChunkSize_ = other->defaultChunkSize_;
oversizeThreshold_ = other->oversizeThreshold_;
curSize_ = other->curSize_;
peakSize_ = Max(peakSize_, other->peakSize_);
oversizeSize_ = other->oversizeSize_;
#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
fallibleScope_ = other->fallibleScope_;
#endif
other->reset(defaultChunkSize_);
}
void
LifoAlloc::transferFrom(LifoAlloc* other)
{
@ -395,12 +210,9 @@ LifoAlloc::transferFrom(LifoAlloc* other)
MOZ_ASSERT(!other->markCount);
incrementCurSize(other->curSize_);
oversizeSize_ += other->oversizeSize_;
appendUnused(std::move(other->unused_));
appendUsed(std::move(other->chunks_));
oversize_.appendAll(std::move(other->oversize_));
other->curSize_ = 0;
other->oversizeSize_ = 0;
}
void
@ -417,33 +229,3 @@ LifoAlloc::transferUnusedFrom(LifoAlloc* other)
incrementCurSize(size);
other->decrementCurSize(size);
}
#ifdef LIFO_CHUNK_PROTECT
void
LifoAlloc::setReadOnly()
{
for (detail::BumpChunk& bc : chunks_) {
bc.setReadOnly();
}
for (detail::BumpChunk& bc : oversize_) {
bc.setReadOnly();
}
for (detail::BumpChunk& bc : unused_) {
bc.setReadOnly();
}
}
void
LifoAlloc::setReadWrite()
{
for (detail::BumpChunk& bc : chunks_) {
bc.setReadWrite();
}
for (detail::BumpChunk& bc : oversize_) {
bc.setReadWrite();
}
for (detail::BumpChunk& bc : unused_) {
bc.setReadWrite();
}
}
#endif

Просмотреть файл

@ -379,7 +379,7 @@ class BumpChunk : public SingleLinkedListElement<BumpChunk>
uint8_t* bump_;
friend class BumpChunk;
Mark(BumpChunk* chunk, uint8_t* bump)
explicit Mark(BumpChunk* chunk, uint8_t* bump)
: chunk_(chunk), bump_(bump)
{}
@ -527,29 +527,19 @@ class LifoAlloc
using UniqueBumpChunk = js::UniquePtr<detail::BumpChunk>;
using BumpChunkList = detail::SingleLinkedList<detail::BumpChunk>;
// List of chunks containing allocated data of size smaller than the default
// chunk size. In the common case, the last chunk of this list is always
// used to perform the allocations. When the allocation cannot be performed,
// we move a Chunk from the unused set to the list of used chunks.
// List of chunks containing allocated data. In the common case, the last
// chunk of this list is always used to perform the allocations. When the
// allocation cannot be performed, we move a Chunk from the unused set to
// the list of used chunks.
BumpChunkList chunks_;
// List of chunks containing allocated data where each allocation is larger
// than the oversize threshold. Each chunk contains exactly on allocation.
// This reduces wasted space in the normal chunk list.
//
// Oversize chunks are allocated on demand and freed as soon as they are
// released, instead of being pushed to the unused list.
BumpChunkList oversize_;
// Set of unused chunks, which can be reused for future allocations.
BumpChunkList unused_;
size_t markCount;
size_t defaultChunkSize_;
size_t oversizeThreshold_;
size_t curSize_;
size_t peakSize_;
size_t oversizeSize_;
#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
bool fallibleScope_;
#endif
@ -558,11 +548,11 @@ class LifoAlloc
LifoAlloc(const LifoAlloc&) = delete;
// Return a BumpChunk that can perform an allocation of at least size |n|.
UniqueBumpChunk newChunkWithCapacity(size_t n, bool oversize);
UniqueBumpChunk newChunkWithCapacity(size_t n);
// Reuse or allocate a BumpChunk that can perform an allocation of at least
// size |n|, if successful it is placed at the end the list of |chunks_|.
UniqueBumpChunk getOrCreateChunk(size_t n);
MOZ_MUST_USE bool getOrCreateChunk(size_t n);
void reset(size_t defaultChunkSize);
@ -594,25 +584,22 @@ class LifoAlloc
curSize_ -= size;
}
void* allocImplColdPath(size_t n);
void* allocImplOversize(size_t n);
MOZ_ALWAYS_INLINE
void* allocImpl(size_t n) {
void* result;
// Give oversized allocations their own chunk instead of wasting space
// due to fragmentation at the end of normal chunk.
if (MOZ_UNLIKELY(n > oversizeThreshold_)) {
return allocImplOversize(n);
}
if (MOZ_LIKELY(!chunks_.empty() && (result = chunks_.last()->tryAlloc(n)))) {
if (!chunks_.empty() && (result = chunks_.last()->tryAlloc(n))) {
return result;
}
return allocImplColdPath(n);
}
// Check for space in unused chunks or allocate a new unused chunk.
MOZ_MUST_USE bool ensureUnusedApproximateColdPath(size_t n, size_t total);
if (!getOrCreateChunk(n)) {
return nullptr;
}
// Since we just created a large enough chunk, this can't fail.
result = chunks_.last()->tryAlloc(n);
MOZ_ASSERT(result);
return result;
}
public:
explicit LifoAlloc(size_t defaultChunkSize)
@ -624,18 +611,26 @@ class LifoAlloc
reset(defaultChunkSize);
}
// Set the threshold to allocate data in its own chunk outside the space for
// small allocations.
void disableOversize() {
oversizeThreshold_ = SIZE_MAX;
}
void setOversizeThreshold(size_t oversizeThreshold) {
MOZ_ASSERT(oversizeThreshold <= defaultChunkSize_);
oversizeThreshold_ = oversizeThreshold;
}
// Steal allocated chunks from |other|.
void steal(LifoAlloc* other);
void steal(LifoAlloc* other) {
MOZ_ASSERT(!other->markCount);
MOZ_DIAGNOSTIC_ASSERT(unused_.empty());
MOZ_DIAGNOSTIC_ASSERT(chunks_.empty());
// Copy everything from |other| to |this| except for |peakSize_|, which
// requires some care.
chunks_ = std::move(other->chunks_);
unused_ = std::move(other->unused_);
markCount = other->markCount;
defaultChunkSize_ = other->defaultChunkSize_;
curSize_ = other->curSize_;
peakSize_ = Max(peakSize_, other->peakSize_);
#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
fallibleScope_ = other->fallibleScope_;
#endif
other->reset(defaultChunkSize_);
}
// Append all chunks from |other|. They are removed from |other|.
void transferFrom(LifoAlloc* other);
@ -671,7 +666,7 @@ class LifoAlloc
template<typename T, typename... Args>
MOZ_ALWAYS_INLINE T*
newWithSize(size_t n, Args&&... args)
allocInSize(size_t n, Args&&... args)
{
MOZ_ASSERT(n >= sizeof(T), "must request enough space to store a T");
static_assert(alignof(T) <= detail::LIFO_ALLOC_ALIGN,
@ -708,7 +703,21 @@ class LifoAlloc
}
}
return ensureUnusedApproximateColdPath(n, total);
for (detail::BumpChunk& bc : unused_) {
total += bc.unused();
if (total >= n) {
return true;
}
}
UniqueBumpChunk newChunk = newChunkWithCapacity(n);
if (!newChunk) {
return false;
}
size_t size = newChunk->computedSizeOfIncludingThis();
unused_.pushFront(std::move(newChunk));
incrementCurSize(size);
return true;
}
MOZ_ALWAYS_INLINE
@ -760,13 +769,38 @@ class LifoAlloc
return static_cast<T*>(alloc(bytes));
}
class Mark {
friend class LifoAlloc;
detail::BumpChunk::Mark chunk;
detail::BumpChunk::Mark oversize;
};
Mark mark();
void release(Mark mark);
using Mark = detail::BumpChunk::Mark;
Mark mark() {
markCount++;
if (chunks_.empty()) {
return Mark();
}
return chunks_.last()->mark();
}
void release(Mark mark) {
markCount--;
// Move the blocks which are after the mark to the set of unused chunks.
BumpChunkList released;
if (!mark.markedChunk()) {
released = std::move(chunks_);
} else {
released = chunks_.splitAfter(mark.markedChunk());
}
// Release the content of all the blocks which are after the marks.
for (detail::BumpChunk& bc : released) {
bc.release();
}
unused_.appendAll(std::move(released));
// Release everything which follows the mark in the last chunk.
if (!chunks_.empty()) {
chunks_.last()->release(mark);
}
}
void releaseAll() {
MOZ_ASSERT(!markCount);
@ -774,19 +808,26 @@ class LifoAlloc
bc.release();
}
unused_.appendAll(std::move(chunks_));
// On release, we free any oversize allocations instead of keeping them
// in unused chunks.
while (!oversize_.empty()) {
UniqueBumpChunk bc = oversize_.popFirst();
decrementCurSize(bc->computedSizeOfIncludingThis());
oversizeSize_ -= bc->computedSizeOfIncludingThis();
}
}
// Protect the content of the LifoAlloc chunks.
#ifdef LIFO_CHUNK_PROTECT
void setReadOnly();
void setReadWrite();
void setReadOnly() {
for (detail::BumpChunk& bc : chunks_) {
bc.setReadOnly();
}
for (detail::BumpChunk& bc : unused_) {
bc.setReadOnly();
}
}
void setReadWrite() {
for (detail::BumpChunk& bc : chunks_) {
bc.setReadWrite();
}
for (detail::BumpChunk& bc : unused_) {
bc.setReadWrite();
}
}
#else
void setReadOnly() const {}
void setReadWrite() const {}
@ -803,10 +844,8 @@ class LifoAlloc
// Return true if the LifoAlloc does not currently contain any allocations.
bool isEmpty() const {
bool empty = chunks_.empty() ||
(chunks_.begin() == chunks_.last() && chunks_.last()->empty());
MOZ_ASSERT_IF(!oversize_.empty(), !oversize_.last()->empty());
return empty && oversize_.empty();
return chunks_.empty() ||
(chunks_.begin() == chunks_.last() && chunks_.last()->empty());
}
// Return the number of bytes remaining to allocate in the current chunk.
@ -824,9 +863,6 @@ class LifoAlloc
for (const detail::BumpChunk& chunk : chunks_) {
n += chunk.sizeOfIncludingThis(mallocSizeOf);
}
for (const detail::BumpChunk& chunk : oversize_) {
n += chunk.sizeOfIncludingThis(mallocSizeOf);
}
for (const detail::BumpChunk& chunk : unused_) {
n += chunk.sizeOfIncludingThis(mallocSizeOf);
}
@ -839,9 +875,6 @@ class LifoAlloc
for (const detail::BumpChunk& chunk : chunks_) {
n += chunk.computedSizeOfIncludingThis();
}
for (const detail::BumpChunk& chunk : oversize_) {
n += chunk.computedSizeOfIncludingThis();
}
for (const detail::BumpChunk& chunk : unused_) {
n += chunk.computedSizeOfIncludingThis();
}
@ -874,11 +907,6 @@ class LifoAlloc
return true;
}
}
for (const detail::BumpChunk& chunk : oversize_) {
if (chunk.contains(ptr)) {
return true;
}
}
return false;
}
#endif
@ -919,7 +947,6 @@ class LifoAlloc
chunkEnd_(alloc.chunks_.end()),
head_(nullptr)
{
MOZ_RELEASE_ASSERT(alloc.oversize_.empty());
if (chunkIt_ != chunkEnd_) {
head_ = chunkIt_->begin();
}

Просмотреть файл

@ -1938,7 +1938,7 @@ NewEmptyBindingData(JSContext* cx, LifoAlloc& alloc, uint32_t numBindings)
{
using Data = typename Scope::Data;
size_t allocSize = SizeOfData<typename Scope::Data>(numBindings);
auto* bindings = alloc.newWithSize<Data>(allocSize, numBindings);
auto* bindings = alloc.allocInSize<Data>(allocSize, numBindings);
if (!bindings) {
ReportOutOfMemory(cx);
}

Просмотреть файл

@ -153,12 +153,6 @@ class StoreBuffer
MOZ_ASSERT(!head_);
if (!storage_) {
storage_ = js_new<LifoAlloc>(LifoAllocBlockSize);
// This prevents LifoAlloc::Enum from crashing with a release
// assertion if we ever allocate one entry larger than
// LifoAllocBlockSize.
if (storage_) {
storage_->disableOversize();
}
}
clear();
return bool(storage_);