Bug 1582992 - Removed unneded BlocksRingBuffer's entry destructor, and ProfilerBuffer's JS::AutoSuppressGCAnalysis - r=gregtatum

`BlocksRingBuffer` had an "entry destructor" to make it a more generic
container, and it was useful during early prototyping of the new profiler
storage (so that we could store owning pointers).
But this entry destructor is stored in an `std::function`, which gets marked as
a potential GC caller by the js rooting hazard analyzer; and as this bug found,
it's not obvious where to place `JS::AutoSuppressGCAnalysis`, because profiler
entries (including stacks) could be added on one thread while GC happens
elsewhere, which triggers the embedded `AutoAssertNoGC` check.

Since we don't actually use the entry destructor facility in the profiler, it's
easier to just get rid of it. As a bonus, it's a small optimization.
Tests that were using an entry destructor now use the `State` instead, to verify
that entries are pushed and cleared as expected.

If needed in the future outside of the profiler, `BlocksRingBuffer` could again
include an entry destructor, but it would have to be through templating, so that
the class used in the profiler wouldn't contain an `std::function`.

Differential Revision: https://phabricator.services.mozilla.com/D46738

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Gerald Squelart 2019-09-23 20:50:20 +00:00
Родитель 6aca87baed
Коммит 16dbd4f365
3 изменённых файлов: 62 добавлений и 247 удалений

Просмотреть файл

@ -70,16 +70,6 @@ namespace mozilla {
// (`BlockIndex` is an opaque type preventing the user from modifying it). That
// index may later be used to get back to that particular entry if it still
// exists.
//
// The caller may register an "entry destructor" function on creation, which
// will be invoked on entries that are about to be removed, which may be:
// - Entry being overwritten by new data.
// - When the caller is explicitly `Clear()`ing parts of the buffer.
// - When the buffer is destroyed.
// Note that this means the caller's provided entry destructor may be invoked
// from inside of another of the caller's functions, so be ready for this
// re-entrancy; e.g., the entry destructor should not lock a non-recursive mutex
// that buffer-writing/clearing functions may also lock!
class BlocksRingBuffer {
// Near-infinite index type, not expecting overflow.
using Index = uint64_t;
@ -183,9 +173,6 @@ class BlocksRingBuffer {
explicit BlocksRingBuffer(ThreadSafety aThreadSafety)
: mMutex(aThreadSafety != ThreadSafety::WithoutMutex) {}
// Constructors with no entry destructor, the oldest entries will be silently
// overwritten/destroyed.
// Create a buffer of the given length.
explicit BlocksRingBuffer(ThreadSafety aThreadSafety,
PowerOfTwo<Length> aLength)
@ -207,52 +194,10 @@ class BlocksRingBuffer {
mMaybeUnderlyingBuffer(
Some(UnderlyingBuffer(aExternalBuffer, aLength))) {}
// Constructors with an entry destructor, which will be called with an
// `EntryReader` before the oldest entries get overwritten/destroyed.
// Note that this entry destructor may be invoked from another caller's
// function that writes/clears data, be aware of this re-entrancy! (Details
// above class.)
// Create a buffer of the given length.
template <typename EntryDestructor>
explicit BlocksRingBuffer(ThreadSafety aThreadSafety,
PowerOfTwo<Length> aLength,
EntryDestructor&& aEntryDestructor)
: mMutex(aThreadSafety != ThreadSafety::WithoutMutex),
mMaybeUnderlyingBuffer(Some(UnderlyingBuffer(
aLength, std::forward<EntryDestructor>(aEntryDestructor)))) {}
// Take ownership of an existing buffer.
template <typename EntryDestructor>
explicit BlocksRingBuffer(ThreadSafety aThreadSafety,
UniquePtr<Buffer::Byte[]> aExistingBuffer,
PowerOfTwo<Length> aLength,
EntryDestructor&& aEntryDestructor)
: mMutex(aThreadSafety != ThreadSafety::WithoutMutex),
mMaybeUnderlyingBuffer(Some(UnderlyingBuffer(
std::move(aExistingBuffer), aLength,
std::forward<EntryDestructor>(aEntryDestructor)))) {}
// Use an externally-owned buffer.
template <typename EntryDestructor>
explicit BlocksRingBuffer(ThreadSafety aThreadSafety,
Buffer::Byte* aExternalBuffer,
PowerOfTwo<Length> aLength,
EntryDestructor&& aEntryDestructor)
: mMutex(aThreadSafety != ThreadSafety::WithoutMutex),
mMaybeUnderlyingBuffer(Some(UnderlyingBuffer(
aExternalBuffer, aLength,
std::forward<EntryDestructor>(aEntryDestructor)))) {}
// Destructor explictly destroys all remaining entries, this may invoke the
// caller-provided entry destructor.
~BlocksRingBuffer() {
#ifdef DEBUG
// Needed because of lock DEBUG-check in `DestroyAllEntries()`.
baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
#endif // DEBUG
DestroyAllEntries();
}
// Destructor doesn't need to do anything special. (Clearing entries would
// only update indices and stats, which won't be accessible after the object
// is destroyed anyway.)
~BlocksRingBuffer() = default;
// Remove underlying buffer, if any.
void Reset() {
@ -282,37 +227,6 @@ class BlocksRingBuffer {
mMaybeUnderlyingBuffer.emplace(aExternalBuffer, aLength);
}
// Create a buffer of the given length, with entry destructor.
template <typename EntryDestructor>
void Set(PowerOfTwo<Length> aLength, EntryDestructor&& aEntryDestructor) {
baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
ResetUnderlyingBuffer();
mMaybeUnderlyingBuffer.emplace(
aLength, std::forward<EntryDestructor>(aEntryDestructor));
}
// Take ownership of an existing buffer, with entry destructor.
template <typename EntryDestructor>
void Set(UniquePtr<Buffer::Byte[]> aExistingBuffer,
PowerOfTwo<Length> aLength, EntryDestructor&& aEntryDestructor) {
baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
ResetUnderlyingBuffer();
mMaybeUnderlyingBuffer.emplace(
std::move(aExistingBuffer), aLength,
std::forward<EntryDestructor>(aEntryDestructor));
}
// Use an externally-owned buffer, with entry destructor.
template <typename EntryDestructor>
void Set(Buffer::Byte* aExternalBuffer, PowerOfTwo<Length> aLength,
EntryDestructor&& aEntryDestructor) {
baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
ResetUnderlyingBuffer();
mMaybeUnderlyingBuffer.emplace(
aExternalBuffer, aLength,
std::forward<EntryDestructor>(aEntryDestructor));
}
bool IsThreadSafe() const { return mMutex.IsActivated(); }
// Lock the buffer mutex and run the provided callback.
@ -334,8 +248,6 @@ class BlocksRingBuffer {
}
// Size of external resources.
// Note: `mEntryDestructor`'s potential external data (for its captures) is
// not included, as it's hidden in the `std::function` implementation.
size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
if (!mMaybeUnderlyingBuffer) {
return 0;
@ -472,7 +384,7 @@ class BlocksRingBuffer {
BlockIndex BufferRangeEnd() const { return mRing.mNextWriteIndex; }
// Get another entry based on a {Current,Next}BlockIndex(). This may fail if
// the buffer has already looped around and destroyed that block, or for the
// the buffer has already looped around and cleared that block, or for the
// one-past-the-end index.
Maybe<EntryReader> GetEntryAt(BlockIndex aBlockIndex) {
// Don't accept a not-yet-written index.
@ -781,7 +693,7 @@ class BlocksRingBuffer {
BlockIndex BufferRangeEnd() const { return mRing.mNextWriteIndex; }
// Get another entry based on a {Current,Next}BlockIndex(). This may fail if
// the buffer has already looped around and destroyed that block.
// the buffer has already looped around and cleared that block.
Maybe<EntryReader> GetEntryAt(BlockIndex aBlockIndex) {
// Don't accept a not-yet-written index.
MOZ_RELEASE_ASSERT(aBlockIndex < BufferRangeEnd());
@ -859,10 +771,6 @@ class BlocksRingBuffer {
while (blockEnd > Index(mFirstReadIndex) + bufferBytes) {
// About to trample on an old block.
EntryReader reader = ReaderInBlockAt(mFirstReadIndex);
// Call provided entry destructor for that entry.
if (mMaybeUnderlyingBuffer->mEntryDestructor) {
mMaybeUnderlyingBuffer->mEntryDestructor(reader);
}
mMaybeUnderlyingBuffer->mClearedBlockCount += 1;
MOZ_ASSERT(reader.CurrentIndex() <= Index(reader.NextBlockIndex()));
// Move the buffer reading start past this cleared block.
@ -966,10 +874,6 @@ class BlocksRingBuffer {
while (dstEndIndex > Index(mFirstReadIndex) + bufferBytes) {
// About to trample on an old block.
EntryReader reader = ReaderInBlockAt(mFirstReadIndex);
// Call provided entry destructor for that entry.
if (mMaybeUnderlyingBuffer->mEntryDestructor) {
mMaybeUnderlyingBuffer->mEntryDestructor(reader);
}
mMaybeUnderlyingBuffer->mClearedBlockCount += 1;
MOZ_ASSERT(reader.CurrentIndex() <= Index(reader.NextBlockIndex()));
// Move the buffer reading start past this cleared block.
@ -996,16 +900,15 @@ class BlocksRingBuffer {
return BlockIndex(dstStartIndex);
}
// Clear all entries, calling entry destructor (if any), and move read index
// to the end so that these entries cannot be read anymore.
// Clear all entries: Move read index to the end so that these entries cannot
// be read anymore.
void Clear() {
baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
ClearAllEntries();
}
// Clear all entries strictly before aBlockIndex, calling calling entry
// destructor (if any), and move read index to the end so that these entries
// cannot be read anymore.
// Clear all entries strictly before aBlockIndex, and move read index to the
// end so that these entries cannot be read anymore.
void ClearBefore(BlockIndex aBlockIndex) {
baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
if (!mMaybeUnderlyingBuffer) {
@ -1024,27 +927,14 @@ class BlocksRingBuffer {
}
// Otherwise we need to clear a subset of entries.
AssertBlockIndexIsValid(aBlockIndex);
if (mMaybeUnderlyingBuffer->mEntryDestructor) {
// We have an entry destructor, destroy entries before aBlockIndex.
Reader reader(*this);
BlockIterator it = reader.begin();
for (; it.CurrentBlockIndex() < aBlockIndex; ++it) {
MOZ_ASSERT(it.CurrentBlockIndex() < reader.end().CurrentBlockIndex());
EntryReader reader = *it;
mMaybeUnderlyingBuffer->mEntryDestructor(reader);
mMaybeUnderlyingBuffer->mClearedBlockCount += 1;
}
MOZ_ASSERT(it.CurrentBlockIndex() == aBlockIndex);
} else {
// No entry destructor, just count skipped entries.
Reader reader(*this);
BlockIterator it = reader.begin();
for (; it.CurrentBlockIndex() < aBlockIndex; ++it) {
MOZ_ASSERT(it.CurrentBlockIndex() < reader.end().CurrentBlockIndex());
mMaybeUnderlyingBuffer->mClearedBlockCount += 1;
}
MOZ_ASSERT(it.CurrentBlockIndex() == aBlockIndex);
// Just count skipped entries.
Reader reader(*this);
BlockIterator it = reader.begin();
for (; it.CurrentBlockIndex() < aBlockIndex; ++it) {
MOZ_ASSERT(it.CurrentBlockIndex() < reader.end().CurrentBlockIndex());
mMaybeUnderlyingBuffer->mClearedBlockCount += 1;
}
MOZ_ASSERT(it.CurrentBlockIndex() == aBlockIndex);
// Move read index to given index, so there's effectively no more entries
// before.
mFirstReadIndex = aBlockIndex;
@ -1130,45 +1020,27 @@ class BlocksRingBuffer {
return EntryReader(*this, aBlockIndex);
}
// Call entry destructor (if any) on all entries.
// Note: The read index is not moved; this should only be called from the
// destructor or ClearAllEntries.
void DestroyAllEntries() {
mMutex.AssertCurrentThreadOwns();
if (!mMaybeUnderlyingBuffer) {
return;
}
if (mMaybeUnderlyingBuffer->mEntryDestructor) {
// We have an entry destructor, destroy all the things!
Reader reader(*this);
reader.ForEach([this](EntryReader& aReader) {
mMaybeUnderlyingBuffer->mEntryDestructor(aReader);
});
}
mMaybeUnderlyingBuffer->mClearedBlockCount =
mMaybeUnderlyingBuffer->mPushedBlockCount;
}
// Clear all entries, calling entry destructor (if any), and move read index
// to the end so that these entries cannot be read anymore.
// Clear all entries: Move read index to the end so that these entries cannot
// be read anymore.
void ClearAllEntries() {
mMutex.AssertCurrentThreadOwns();
if (!mMaybeUnderlyingBuffer) {
return;
}
DestroyAllEntries();
// Mark all entries pushed so far as cleared.
mMaybeUnderlyingBuffer->mClearedBlockCount =
mMaybeUnderlyingBuffer->mPushedBlockCount;
// Move read index to write index, so there's effectively no more entries
// that can be read. (Not setting both to 0, in case user is keeping
// `BlockIndex`'es to old entries.)
mFirstReadIndex = mNextWriteIndex;
}
// If there is an underlying buffer (with optional entry destructor), destroy
// all entries, move read index to the end, and discard the buffer and entry
// destructor. This BlocksRingBuffer will now gracefully reject all API calls,
// and is in a state where a new underlying buffer&entry deleter may be
// installed.
// If there is an underlying buffer, clear all entries, and discard the
// buffer. This BlocksRingBuffer will now gracefully reject all API calls, and
// is in a state where a new underlying buffer may be set.
void ResetUnderlyingBuffer() {
mMutex.AssertCurrentThreadOwns();
if (!mMaybeUnderlyingBuffer) {
return;
}
@ -1207,38 +1079,6 @@ class BlocksRingBuffer {
"Buffer should be able to contain more than a block size");
}
// Create a buffer of the given length.
template <typename EntryDestructor>
explicit UnderlyingBuffer(PowerOfTwo<Length> aLength,
EntryDestructor&& aEntryDestructor)
: mBuffer(aLength),
mEntryDestructor(std::forward<EntryDestructor>(aEntryDestructor)) {
MOZ_ASSERT(aLength.Value() > ULEB128MaxSize<Length>(),
"Buffer should be able to contain more than a block size");
}
// Take ownership of an existing buffer.
template <typename EntryDestructor>
explicit UnderlyingBuffer(UniquePtr<Buffer::Byte[]> aExistingBuffer,
PowerOfTwo<Length> aLength,
EntryDestructor&& aEntryDestructor)
: mBuffer(std::move(aExistingBuffer), aLength),
mEntryDestructor(std::forward<EntryDestructor>(aEntryDestructor)) {
MOZ_ASSERT(aLength.Value() > ULEB128MaxSize<Length>(),
"Buffer should be able to contain more than a block size");
}
// Use an externally-owned buffer.
template <typename EntryDestructor>
explicit UnderlyingBuffer(Buffer::Byte* aExternalBuffer,
PowerOfTwo<Length> aLength,
EntryDestructor&& aEntryDestructor)
: mBuffer(aExternalBuffer, aLength),
mEntryDestructor(std::forward<EntryDestructor>(aEntryDestructor)) {
MOZ_ASSERT(aLength.Value() > ULEB128MaxSize<Length>(),
"Buffer should be able to contain more than a block size");
}
// Only allow move-construction.
UnderlyingBuffer(UnderlyingBuffer&&) = default;
@ -1249,15 +1089,13 @@ class BlocksRingBuffer {
// Underlying circular byte buffer.
Buffer mBuffer;
// If set, function to call for each entry that is about to be destroyed.
std::function<void(EntryReader&)> mEntryDestructor;
// Statistics.
uint64_t mPushedBlockCount = 0;
uint64_t mClearedBlockCount = 0;
};
// Underlying buffer, with entry destructor and stats.
// Underlying buffer, with stats.
// Only valid during in-session period.
Maybe<UnderlyingBuffer> mMaybeUnderlyingBuffer;

Просмотреть файл

@ -570,9 +570,6 @@ static uint64_t ExtractBlockIndex(const BlocksRingBuffer::BlockIndex bi) {
void TestBlocksRingBufferAPI() {
printf("TestBlocksRingBufferAPI...\n");
// Entry destructor will store about-to-be-cleared value in `lastDestroyed`.
uint32_t lastDestroyed = 0;
// Create a 16-byte buffer, enough to store up to 3 entries (1 byte size + 4
// bytes uint64_t).
constexpr uint32_t MBSize = 16;
@ -584,17 +581,15 @@ void TestBlocksRingBufferAPI() {
// Start a temporary block to constrain buffer lifetime.
{
BlocksRingBuffer rb(BlocksRingBuffer::ThreadSafety::WithMutex,
&buffer[MBSize], MakePowerOfTwo32<MBSize>(),
[&](BlocksRingBuffer::EntryReader& aReader) {
lastDestroyed = aReader.ReadObject<uint32_t>();
});
&buffer[MBSize], MakePowerOfTwo32<MBSize>());
# define VERIFY_START_END_DESTROYED(aStart, aEnd, aLastDestroyed) \
# define VERIFY_START_END_PUSHED_CLEARED(aStart, aEnd, aPushed, aCleared) \
{ \
BlocksRingBuffer::State state = rb.GetState(); \
MOZ_RELEASE_ASSERT(ExtractBlockIndex(state.mRangeStart) == (aStart)); \
MOZ_RELEASE_ASSERT(ExtractBlockIndex(state.mRangeEnd) == (aEnd)); \
MOZ_RELEASE_ASSERT(lastDestroyed == (aLastDestroyed)); \
MOZ_RELEASE_ASSERT(state.mPushedBlockCount == (aPushed)); \
MOZ_RELEASE_ASSERT(state.mClearedBlockCount == (aCleared)); \
}
// All entries will contain one 32-bit number. The resulting blocks will
@ -617,8 +612,8 @@ void TestBlocksRingBufferAPI() {
// Empty buffer to start with.
// Start&end indices still at 1 (0 is reserved for the default BlockIndex{}
// that cannot point at a valid entry), nothing destroyed.
VERIFY_START_END_DESTROYED(1, 1, 0);
// that cannot point at a valid entry), nothing cleared.
VERIFY_START_END_PUSHED_CLEARED(1, 1, 0, 0);
// Default BlockIndex.
BlocksRingBuffer::BlockIndex bi0;
@ -646,7 +641,7 @@ void TestBlocksRingBufferAPI() {
MOZ_RELEASE_ASSERT(ExtractBlockIndex(rb.PutObject(uint32_t(1))) == 1);
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// - S[4 | int(1) ]E
VERIFY_START_END_DESTROYED(1, 6, 0);
VERIFY_START_END_PUSHED_CLEARED(1, 6, 1, 0);
// Push `2` through ReserveAndPut, check output BlockIndex.
auto bi2 = rb.ReserveAndPut([]() { return sizeof(uint32_t); },
@ -662,7 +657,7 @@ void TestBlocksRingBufferAPI() {
MOZ_RELEASE_ASSERT(ExtractBlockIndex(bi2) == 6);
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// - S[4 | int(1) ] [4 | int(2) ]E
VERIFY_START_END_DESTROYED(1, 11, 0);
VERIFY_START_END_PUSHED_CLEARED(1, 11, 2, 0);
// Check single entry at bi2, store next block index.
auto bi2Next =
@ -739,7 +734,7 @@ void TestBlocksRingBufferAPI() {
MOZ_RELEASE_ASSERT(put3 == 11.0);
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 (16)
// - S[4 | int(1) ] [4 | int(2) ] [4 | int(3) ]E
VERIFY_START_END_DESTROYED(1, 16, 0);
VERIFY_START_END_PUSHED_CLEARED(1, 16, 3, 0);
// Re-Read single entry at bi2, should now have a next entry.
rb.ReadAt(bi2, [&](Maybe<BlocksRingBuffer::EntryReader>&& aMaybeReader) {
@ -766,12 +761,12 @@ void TestBlocksRingBufferAPI() {
MOZ_RELEASE_ASSERT(count == 3);
// Push `4`, store its BlockIndex for later.
// This will wrap around, and destroy the first entry.
// This will wrap around, and clear the first entry.
BlocksRingBuffer::BlockIndex bi4 = rb.PutObject(uint32_t(4));
// Before:
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 (16)
// - S[4 | int(1) ] [4 | int(2) ] [4 | int(3) ]E
// 1. First entry destroyed:
// 1. First entry cleared:
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 (16)
// - ? ? ? ? ? S[4 | int(2) ] [4 | int(3) ]E
// 2. New entry starts at 15 and wraps around: (shown on separate line)
@ -782,7 +777,7 @@ void TestBlocksRingBufferAPI() {
// (collapsed)
// 16 17 18 19 20 21 6 7 8 9 10 11 12 13 14 15 (16)
// [4 | int(4) ]E ? S[4 | int(2) ] [4 | int(3) ]
VERIFY_START_END_DESTROYED(6, 21, 1);
VERIFY_START_END_PUSHED_CLEARED(6, 21, 4, 1);
// Check that we have `2` to `4`.
count = 1;
@ -792,7 +787,7 @@ void TestBlocksRingBufferAPI() {
MOZ_RELEASE_ASSERT(count == 4);
// Push 5 through Put, no returns.
// This will destroy the second entry.
// This will clear the second entry.
// Check that the EntryWriter can access bi4 but not bi2.
auto bi5_6 =
rb.Put(sizeof(uint32_t), [&](BlocksRingBuffer::EntryWriter* aEW) {
@ -808,7 +803,7 @@ void TestBlocksRingBufferAPI() {
auto& bi6 = bi5_6.second();
// 16 17 18 19 20 21 22 23 24 25 26 11 12 13 14 15 (16)
// [4 | int(4) ] [4 | int(5) ]E ? S[4 | int(3) ]
VERIFY_START_END_DESTROYED(11, 26, 2);
VERIFY_START_END_PUSHED_CLEARED(11, 26, 5, 2);
// Read single entry at bi2, should now gracefully fail.
rb.ReadAt(bi2, [](Maybe<BlocksRingBuffer::EntryReader>&& aMaybeReader) {
@ -854,11 +849,11 @@ void TestBlocksRingBufferAPI() {
});
MOZ_RELEASE_ASSERT(count == 5);
// Clear everything before `4`, this should destroy `3`.
// Clear everything before `4`, this should clear `3`.
rb.ClearBefore(bi4);
// 16 17 18 19 20 21 22 23 24 25 26 11 12 13 14 15
// S[4 | int(4) ] [4 | int(5) ]E ? ? ? ? ? ?
VERIFY_START_END_DESTROYED(16, 26, 3);
VERIFY_START_END_PUSHED_CLEARED(16, 26, 5, 3);
// Check that we have `4` to `5`.
count = 3;
@ -867,17 +862,16 @@ void TestBlocksRingBufferAPI() {
});
MOZ_RELEASE_ASSERT(count == 5);
// Clear everything before `4` again, nothing to destroy.
lastDestroyed = 0;
// Clear everything before `4` again, nothing to clear.
rb.ClearBefore(bi4);
VERIFY_START_END_DESTROYED(16, 26, 0);
VERIFY_START_END_PUSHED_CLEARED(16, 26, 5, 3);
// Clear everything, this should destroy `4` and `5`, and bring the start
// Clear everything, this should clear `4` and `5`, and bring the start
// index where the end index currently is.
rb.ClearBefore(bi6);
// 16 17 18 19 20 21 22 23 24 25 26 11 12 13 14 15
// ? ? ? ? ? ? ? ? ? ? SE? ? ? ? ? ?
VERIFY_START_END_DESTROYED(26, 26, 5);
VERIFY_START_END_PUSHED_CLEARED(26, 26, 5, 5);
// Check that we have nothing to read.
rb.ReadEach([&](auto&&) { MOZ_RELEASE_ASSERT(false); });
@ -887,16 +881,15 @@ void TestBlocksRingBufferAPI() {
MOZ_RELEASE_ASSERT(aMaybeReader.isNothing());
});
// Clear everything before now-cleared `4`, nothing to destroy.
lastDestroyed = 0;
// Clear everything before now-cleared `4`, nothing to clear.
rb.ClearBefore(bi4);
VERIFY_START_END_DESTROYED(26, 26, 0);
VERIFY_START_END_PUSHED_CLEARED(26, 26, 5, 5);
// Push `6` directly.
MOZ_RELEASE_ASSERT(rb.PutObject(uint32_t(6)) == bi6);
// 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
// ? ? ? ? ? ? ? ? ? ? S[4 | int(6) ]E ?
VERIFY_START_END_DESTROYED(26, 31, 0);
VERIFY_START_END_PUSHED_CLEARED(26, 31, 6, 5);
{
// Create a 2nd buffer and fill it with `7` and `8`.
@ -906,26 +899,26 @@ void TestBlocksRingBufferAPI() {
rb2.PutObject(uint32_t(7));
rb2.PutObject(uint32_t(8));
// Main buffer shouldn't have changed.
VERIFY_START_END_DESTROYED(26, 31, 0);
VERIFY_START_END_PUSHED_CLEARED(26, 31, 6, 5);
// Append contents of rb2 to rb, this should end up being the same as
// pushing the two numbers.
rb.AppendContents(rb2);
// 32 33 34 35 36 37 38 39 40 41 26 27 28 29 30 31
// int(7) ] [4 | int(8) ]E ? S[4 | int(6) ] [4 |
VERIFY_START_END_DESTROYED(26, 41, 0);
VERIFY_START_END_PUSHED_CLEARED(26, 41, 8, 5);
// Append contents of rb2 to rb again, to verify that rb2 was not modified
// above. This should destroy `6` and the first `7`.
// above. This should clear `6` and the first `7`.
rb.AppendContents(rb2);
// 48 49 50 51 36 37 38 39 40 41 42 43 44 45 46 47
// int(8) ]E ? S[4 | int(8) ] [4 | int(7) ] [4 |
VERIFY_START_END_DESTROYED(36, 51, 7);
VERIFY_START_END_PUSHED_CLEARED(36, 51, 10, 7);
// End of block where rb2 lives, to verify that it is not needed anymore
// for its copied values to survive in rb.
}
VERIFY_START_END_DESTROYED(36, 51, 7);
VERIFY_START_END_PUSHED_CLEARED(36, 51, 10, 7);
// bi6 should now have been cleared.
rb.ReadAt(bi6, [](Maybe<BlocksRingBuffer::EntryReader>&& aMaybeReader) {
@ -944,7 +937,6 @@ void TestBlocksRingBufferAPI() {
// End of block where rb lives, BlocksRingBuffer destructor should call
// entry destructor for remaining entries.
}
MOZ_RELEASE_ASSERT(lastDestroyed == 8);
// Check that only the provided stack-based sub-buffer was modified.
uint32_t changed = 0;
@ -1128,9 +1120,7 @@ void TestBlocksRingBufferUnderlyingBufferChanges() {
testOutOfSession();
testOutOfSession();
int cleared = 0;
rb.Set(&buffer[MBSize], MakePowerOfTwo<BlocksRingBuffer::Length, MBSize>(),
[&](auto&&) { ++cleared; });
rb.Set(&buffer[MBSize], MakePowerOfTwo<BlocksRingBuffer::Length, MBSize>());
MOZ_RELEASE_ASSERT(rb.BufferLength().isSome());
rb.ReadEach([](auto&&) { MOZ_RELEASE_ASSERT(false); });
@ -1139,8 +1129,6 @@ void TestBlocksRingBufferUnderlyingBufferChanges() {
// Remove the current underlying buffer, this should clear all entries.
rb.Reset();
// The above should clear all entries (2 tests, three entries each).
MOZ_RELEASE_ASSERT(cleared == 2 * 3);
// Check that only the provided stack-based sub-buffer was modified.
uint32_t changed = 0;
@ -1167,19 +1155,13 @@ void TestBlocksRingBufferUnderlyingBufferChanges() {
void TestBlocksRingBufferThreading() {
printf("TestBlocksRingBufferThreading...\n");
// Entry destructor will store about-to-be-cleared value in `lastDestroyed`.
std::atomic<int> lastDestroyed{0};
constexpr uint32_t MBSize = 8192;
uint8_t buffer[MBSize * 3];
for (size_t i = 0; i < MBSize * 3; ++i) {
buffer[i] = uint8_t('A' + i);
}
BlocksRingBuffer rb(BlocksRingBuffer::ThreadSafety::WithMutex,
&buffer[MBSize], MakePowerOfTwo32<MBSize>(),
[&](BlocksRingBuffer::EntryReader& aReader) {
lastDestroyed = aReader.ReadObject<int>();
});
&buffer[MBSize], MakePowerOfTwo32<MBSize>());
// Start reader thread.
std::atomic<bool> stopReader{false};
@ -1188,7 +1170,7 @@ void TestBlocksRingBufferThreading() {
BlocksRingBuffer::State state = rb.GetState();
printf(
"Reader: range=%llu..%llu (%llu bytes) pushed=%llu cleared=%llu "
"(alive=%llu) lastDestroyed=%d\n",
"(alive=%llu)\n",
static_cast<unsigned long long>(ExtractBlockIndex(state.mRangeStart)),
static_cast<unsigned long long>(ExtractBlockIndex(state.mRangeEnd)),
static_cast<unsigned long long>(ExtractBlockIndex(state.mRangeEnd)) -
@ -1197,8 +1179,7 @@ void TestBlocksRingBufferThreading() {
static_cast<unsigned long long>(state.mPushedBlockCount),
static_cast<unsigned long long>(state.mClearedBlockCount),
static_cast<unsigned long long>(state.mPushedBlockCount -
state.mClearedBlockCount),
int(lastDestroyed));
state.mClearedBlockCount));
if (stopReader) {
break;
}

Просмотреть файл

@ -45,13 +45,9 @@ ProfileBuffer::~ProfileBuffer() {
BlocksRingBuffer::BlockIndex ProfileBuffer::AddEntry(
BlocksRingBuffer& aBlocksRingBuffer, const ProfileBufferEntry& aEntry) {
switch (aEntry.GetKind()) {
#define SWITCH_KIND(KIND, TYPE, SIZE) \
case ProfileBufferEntry::Kind::KIND: { \
/* Rooting analysis cannot get through `BlocksRingBuffer`'s heavy use of \
* lambdas and `std::function`s, which then trips it when used from \
* `MergeStacks()` where unrooted js objects are manipulated. */ \
JS::AutoSuppressGCAnalysis nogc; \
return aBlocksRingBuffer.PutFrom(&aEntry, 1 + (SIZE)); \
#define SWITCH_KIND(KIND, TYPE, SIZE) \
case ProfileBufferEntry::Kind::KIND: { \
return aBlocksRingBuffer.PutFrom(&aEntry, 1 + (SIZE)); \
}
FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(SWITCH_KIND)