Bug 1576551 - Use BlocksRingBuffer in ProfileBuffer - r=gregtatum

This just replaces `ProfileBuffer`'s self-managed circular buffer with a
`BlocksRingBuffer`.

That `BlocksRingBuffer` does not need its own mutex (yet), because all uses go
through gPSMutex-guarded code.

`ProfileBuffer` now also pre-allocates a small buffer for use in
`DuplicateLastSample()`, this avoids multiple mallocs at each sleeping thread
stack duplication.

Note: Internal "magic" sizes have been multiplied by 8 (and tweaked upwards, to
handle bigger stacks), because they originally were the number of 9-byte
entries, but now it's the buffer size in bytes. (And entries can now be smaller
than 9 bytes, so overall the capacity in entries should be similar or better.)
However, external calls still think they are giving a number of "entries", this
will be handled in the next patch.

Differential Revision: https://phabricator.services.mozilla.com/D43421

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Gerald Squelart 2019-09-17 01:49:59 +00:00
Родитель e8ea7f331e
Коммит 71e4f99d21
13 изменённых файлов: 1757 добавлений и 1483 удалений

Просмотреть файл

@ -17,11 +17,14 @@
namespace mozilla {
namespace baseprofiler {
// 65536 bytes should be plenty for a single backtrace.
static constexpr auto DuplicationBufferBytes = MakePowerOfTwo32<65536>();
// mEntries doesn't need its own mutex, because it is guarded by gPSMutex.
ProfileBuffer::ProfileBuffer(PowerOfTwo32 aCapacity)
: mEntries(MakeUnique<ProfileBufferEntry[]>(aCapacity.Value())),
mEntryIndexMask(aCapacity.Mask()),
mRangeStart(0),
mRangeEnd(0) {}
: mEntries(BlocksRingBuffer::ThreadSafety::WithoutMutex, aCapacity),
mDuplicationBuffer(MakeUnique<BlocksRingBuffer::Byte[]>(
DuplicationBufferBytes.Value())) {}
ProfileBuffer::~ProfileBuffer() {
while (mStoredMarkers.peek()) {
@ -29,26 +32,43 @@ ProfileBuffer::~ProfileBuffer() {
}
}
// Called from signal, call only reentrant functions
void ProfileBuffer::AddEntry(const ProfileBufferEntry& aEntry) {
GetEntry(mRangeEnd++) = aEntry;
/* static */
BlocksRingBuffer::BlockIndex ProfileBuffer::AddEntry(
BlocksRingBuffer& aBlocksRingBuffer, const ProfileBufferEntry& aEntry) {
switch (aEntry.GetKind()) {
# define SWITCH_KIND(KIND, TYPE, SIZE) \
case ProfileBufferEntry::Kind::KIND: { \
return aBlocksRingBuffer.PutFrom(&aEntry, 1 + (SIZE)); \
break; \
}
// The distance between mRangeStart and mRangeEnd must never exceed
// capacity, so advance mRangeStart if necessary.
if (mRangeEnd - mRangeStart > mEntryIndexMask.MaskValue() + 1) {
mRangeStart++;
FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(SWITCH_KIND)
# undef SWITCH_KIND
default:
MOZ_ASSERT(false, "Unhandled baseprofiler::ProfilerBuffer entry KIND");
return BlockIndex{};
}
}
uint64_t ProfileBuffer::AddThreadIdEntry(int aThreadId) {
uint64_t pos = mRangeEnd;
AddEntry(ProfileBufferEntry::ThreadId(aThreadId));
return pos;
// Called from signal, call only reentrant functions
uint64_t ProfileBuffer::AddEntry(const ProfileBufferEntry& aEntry) {
return AddEntry(mEntries, aEntry).ConvertToU64();
}
void ProfileBuffer::AddStoredMarker(ProfilerMarker* aStoredMarker) {
aStoredMarker->SetPositionInBuffer(mRangeEnd);
mStoredMarkers.insert(aStoredMarker);
/* static */
BlocksRingBuffer::BlockIndex ProfileBuffer::AddThreadIdEntry(
BlocksRingBuffer& aBlocksRingBuffer, int aThreadId) {
return AddEntry(aBlocksRingBuffer, ProfileBufferEntry::ThreadId(aThreadId));
}
uint64_t ProfileBuffer::AddThreadIdEntry(int aThreadId) {
return AddThreadIdEntry(mEntries, aThreadId).ConvertToU64();
}
void ProfileBuffer::AddMarker(ProfilerMarker* aMarker) {
aMarker->SetPositionInBuffer(AddEntry(ProfileBufferEntry::Marker(aMarker)));
mStoredMarkers.insert(aMarker);
}
void ProfileBuffer::CollectCodeLocation(
@ -94,21 +114,21 @@ void ProfileBuffer::DeleteExpiredStoredMarkers() {
// Delete markers of samples that have been overwritten due to circular
// buffer wraparound.
while (mStoredMarkers.peek() &&
mStoredMarkers.peek()->HasExpired(mRangeStart)) {
mStoredMarkers.peek()->HasExpired(BufferRangeStart())) {
delete mStoredMarkers.popHead();
}
}
size_t ProfileBuffer::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
size_t n = aMallocSizeOf(this);
n += aMallocSizeOf(mEntries.get());
size_t ProfileBuffer::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
// Measurement of the following members may be added later if DMD finds it
// is worthwhile:
// - memory pointed to by the elements within mEntries
// - mStoredMarkers
return mEntries.SizeOfExcludingThis(aMallocSizeOf);
}
return n;
size_t ProfileBuffer::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
}
void ProfileBuffer::CollectOverheadStats(TimeDuration aSamplingTime,
@ -152,9 +172,10 @@ void ProfileBuffer::CollectOverheadStats(TimeDuration aSamplingTime,
}
ProfilerBufferInfo ProfileBuffer::GetProfilerBufferInfo() const {
return {mRangeStart, mRangeEnd, mEntryIndexMask.MaskValue() + 1,
mIntervalsNs, mOverheadsNs, mLockingsNs,
mCleaningsNs, mCountersNs, mThreadsNs};
return {
BufferRangeStart(), BufferRangeEnd(), mEntries.BufferLength()->Value(),
mIntervalsNs, mOverheadsNs, mLockingsNs,
mCleaningsNs, mCountersNs, mThreadsNs};
}
/* ProfileBufferCollector */

Просмотреть файл

@ -9,35 +9,37 @@
#include "ProfileBufferEntry.h"
#include "ProfilerMarker.h"
#include "mozilla/BlocksRingBuffer.h"
#include "mozilla/Maybe.h"
#include "mozilla/PowerOfTwo.h"
namespace mozilla {
namespace baseprofiler {
// A fixed-capacity circular buffer.
// Class storing most profiling data in a BlocksRingBuffer.
//
// This class is used as a queue of entries which, after construction, never
// allocates. This makes it safe to use in the profiler's "critical section".
// Entries are appended at the end. Once the queue capacity has been reached,
// adding a new entry will evict an old entry from the start of the queue.
// Positions in the queue are represented as 64-bit unsigned integers which
// only increase and never wrap around.
// mRangeStart and mRangeEnd describe the range in that uint64_t space which is
// covered by the queue contents.
// Internally, the buffer uses a fixed-size storage and applies a modulo
// operation when accessing entries in that storage buffer. "Evicting" an entry
// really just means that an existing entry in the storage buffer gets
// overwritten and that mRangeStart gets incremented.
class ProfileBuffer final {
public:
// Opaque type containing a block index, which should not be modified outside
// of BlocksRingBuffer.
// TODO: Eventually, all uint64_t values should be replaced with BlockIndex,
// because external users should only store and compare them, but not do other
// arithmetic operations (that uint64_t supports).
using BlockIndex = BlocksRingBuffer::BlockIndex;
// ProfileBuffer constructor
// @param aCapacity The capacity of the buffer.
explicit ProfileBuffer(PowerOfTwo32 aCapacity);
~ProfileBuffer();
bool IsThreadSafe() const { return mEntries.IsThreadSafe(); }
// Add |aEntry| to the buffer, ignoring what kind of entry it is.
void AddEntry(const ProfileBufferEntry& aEntry);
// Returns the position of the entry.
uint64_t AddEntry(const ProfileBufferEntry& aEntry);
// Add to the buffer a sample start (ThreadId) entry for aThreadId.
// Returns the position of the entry.
@ -82,16 +84,12 @@ class ProfileBuffer final {
void DiscardSamplesBeforeTime(double aTime);
void AddStoredMarker(ProfilerMarker* aStoredMarker);
void AddMarker(ProfilerMarker* aMarker);
// The following method is not signal safe!
void DeleteExpiredStoredMarkers();
// Access an entry in the buffer.
ProfileBufferEntry& GetEntry(uint64_t aPosition) const {
return mEntries[aPosition & mEntryIndexMask];
}
size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const;
size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const;
void CollectOverheadStats(TimeDuration aSamplingTime, TimeDuration aLocking,
@ -101,38 +99,50 @@ class ProfileBuffer final {
ProfilerBufferInfo GetProfilerBufferInfo() const;
private:
// The storage that backs our buffer. Holds capacity entries.
// All accesses to entries in mEntries need to go through GetEntry(), which
// translates the given buffer position from the near-infinite uint64_t space
// into the entry storage space.
UniquePtr<ProfileBufferEntry[]> mEntries;
// Add |aEntry| to the provider BlocksRingBuffer.
// `static` because it may be used to add an entry to a `BlocksRingBuffer`
// that is not attached to a `ProfileBuffer`.
static BlockIndex AddEntry(BlocksRingBuffer& aBlocksRingBuffer,
const ProfileBufferEntry& aEntry);
// A mask such that pos & mEntryIndexMask == pos % capacity.
PowerOfTwoMask32 mEntryIndexMask;
// Add a sample start (ThreadId) entry for aThreadId to the provided
// BlocksRingBuffer. Returns the position of the entry.
// `static` because it may be used to add an entry to a `BlocksRingBuffer`
// that is not attached to a `ProfileBuffer`.
static BlockIndex AddThreadIdEntry(BlocksRingBuffer& aBlocksRingBuffer,
int aThreadId);
// The circular-ring storage in which this ProfileBuffer stores its data.
BlocksRingBuffer mEntries;
public:
// mRangeStart and mRangeEnd are uint64_t values that strictly advance and
// never wrap around. mRangeEnd is always greater than or equal to
// mRangeStart, but never gets more than capacity steps ahead of
// mRangeStart, because we can only store a fixed number of entries in the
// buffer. Once the entire buffer is in use, adding a new entry will evict an
// entry from the front of the buffer (and increase mRangeStart).
// In other words, the following conditions hold true at all times:
// (1) mRangeStart <= mRangeEnd
// (2) mRangeEnd - mRangeStart <= capacity
// `BufferRangeStart()` and `BufferRangeEnd()` return `uint64_t` values
// corresponding to the first entry and past the last entry stored in
// `mEntries`.
//
// If there are no live entries, then mRangeStart == mRangeEnd.
// Otherwise, mRangeStart is the first live entry and mRangeEnd is one past
// the last live entry, and also the position at which the next entry will be
// added.
// (mRangeEnd - mRangeStart) always gives the number of live entries.
uint64_t mRangeStart;
uint64_t mRangeEnd;
// The returned values are not guaranteed to be stable, because other threads
// may also be accessing the buffer concurrently. But they will always
// increase, and can therefore give an indication of how far these values have
// *at least* reached. In particular:
// - Entries whose index is strictly less that `BufferRangeStart()` have been
// discarded by now, so any related data may also be safely discarded.
// - It is safe to try and read entries at any index strictly less than
// `BufferRangeEnd()` -- but note that these reads may fail by the time you
// request them, as old entries get overwritten by new ones.
uint64_t BufferRangeStart() const {
return mEntries.GetState().mRangeStart.ConvertToU64();
}
uint64_t BufferRangeEnd() const {
return mEntries.GetState().mRangeEnd.ConvertToU64();
}
// Markers that marker entries in the buffer might refer to.
ProfilerMarkerLinkedList mStoredMarkers;
private:
// Used when duplicating sleeping stacks (to avoid spurious mallocs).
const UniquePtr<BlocksRingBuffer::Byte[]> mDuplicationBuffer;
// Time from launch (ns) when first sampling was recorded.
double mFirstSamplingTimeNs = 0.0;
// Time from launch (ns) when last sampling was recorded.
@ -167,7 +177,9 @@ class ProfileBufferCollector final : public ProfilerStackCollector {
return Some(mSamplePositionInBuffer);
}
Maybe<uint64_t> BufferRangeStart() override { return Some(mBuf.mRangeStart); }
Maybe<uint64_t> BufferRangeStart() override {
return Some(mBuf.BufferRangeStart());
}
virtual void CollectNativeLeafAddr(void* aAddr) override;
virtual void CollectProfilingStackFrame(

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -27,35 +27,36 @@ class ProfilerMarker;
// NOTE! If you add entries, you need to verify if they need to be added to the
// switch statement in DuplicateLastSample!
#define FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(MACRO) \
MACRO(CategoryPair, int) \
MACRO(CollectionStart, double) \
MACRO(CollectionEnd, double) \
MACRO(Label, const char*) \
MACRO(FrameFlags, uint64_t) \
MACRO(DynamicStringFragment, char*) /* char[kNumChars], really */ \
MACRO(JitReturnAddr, void*) \
MACRO(LineNumber, int) \
MACRO(ColumnNumber, int) \
MACRO(NativeLeafAddr, void*) \
MACRO(Marker, ProfilerMarker*) \
MACRO(Pause, double) \
MACRO(Responsiveness, double) \
MACRO(Resume, double) \
MACRO(ThreadId, int) \
MACRO(Time, double) \
MACRO(CounterId, void*) \
MACRO(CounterKey, uint64_t) \
MACRO(Number, uint64_t) \
MACRO(Count, int64_t) \
MACRO(ProfilerOverheadTime, double) \
MACRO(ProfilerOverheadDuration, double)
// This will evaluate the MACRO with (KIND, TYPE, SIZE)
#define FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(MACRO) \
MACRO(CategoryPair, int, sizeof(int)) \
MACRO(CollectionStart, double, sizeof(double)) \
MACRO(CollectionEnd, double, sizeof(double)) \
MACRO(Label, const char*, sizeof(const char*)) \
MACRO(FrameFlags, uint64_t, sizeof(uint64_t)) \
MACRO(DynamicStringFragment, char*, ProfileBufferEntry::kNumChars) \
MACRO(JitReturnAddr, void*, sizeof(void*)) \
MACRO(LineNumber, int, sizeof(int)) \
MACRO(ColumnNumber, int, sizeof(int)) \
MACRO(NativeLeafAddr, void*, sizeof(void*)) \
MACRO(Marker, ProfilerMarker*, sizeof(ProfilerMarker*)) \
MACRO(Pause, double, sizeof(double)) \
MACRO(Responsiveness, double, sizeof(double)) \
MACRO(Resume, double, sizeof(double)) \
MACRO(ThreadId, int, sizeof(int)) \
MACRO(Time, double, sizeof(double)) \
MACRO(CounterId, void*, sizeof(void*)) \
MACRO(CounterKey, uint64_t, sizeof(uint64_t)) \
MACRO(Number, uint64_t, sizeof(uint64_t)) \
MACRO(Count, int64_t, sizeof(int64_t)) \
MACRO(ProfilerOverheadTime, double, sizeof(double)) \
MACRO(ProfilerOverheadDuration, double, sizeof(double))
class ProfileBufferEntry {
public:
enum class Kind : uint8_t {
INVALID = 0,
#define KIND(k, t) k,
#define KIND(KIND, TYPE, SIZE) KIND,
FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(KIND)
#undef KIND
LIMIT
@ -79,17 +80,17 @@ class ProfileBufferEntry {
ProfileBufferEntry(Kind aKind, int aInt);
public:
#define CTOR(k, t) \
static ProfileBufferEntry k(t aVal) { \
return ProfileBufferEntry(Kind::k, aVal); \
#define CTOR(KIND, TYPE, SIZE) \
static ProfileBufferEntry KIND(TYPE aVal) { \
return ProfileBufferEntry(Kind::KIND, aVal); \
}
FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(CTOR)
#undef CTOR
Kind GetKind() const { return mKind; }
#define IS_KIND(k, t) \
bool Is##k() const { return mKind == Kind::k; }
#define IS_KIND(KIND, TYPE, SIZE) \
bool Is##KIND() const { return mKind == Kind::KIND; }
FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(IS_KIND)
#undef IS_KIND

Просмотреть файл

@ -690,7 +690,7 @@ class ActivePS {
LiveProfiledThreadData& thread = sInstance->mLiveProfiledThreads[i];
if (thread.mRegisteredThread == aRegisteredThread) {
thread.mProfiledThreadData->NotifyUnregistered(
sInstance->mBuffer->mRangeEnd);
sInstance->mBuffer->BufferRangeEnd());
MOZ_RELEASE_ASSERT(sInstance->mDeadProfiledThreads.append(
std::move(thread.mProfiledThreadData)));
sInstance->mLiveProfiledThreads.erase(
@ -707,7 +707,7 @@ class ActivePS {
# endif
static void DiscardExpiredDeadProfiledThreads(PSLockRef) {
uint64_t bufferRangeStart = sInstance->mBuffer->mRangeStart;
uint64_t bufferRangeStart = sInstance->mBuffer->BufferRangeStart();
// Discard any dead threads that were unregistered before bufferRangeStart.
sInstance->mDeadProfiledThreads.eraseIf(
[bufferRangeStart](
@ -726,7 +726,7 @@ class ActivePS {
for (size_t i = 0; i < registeredPages.length(); i++) {
RefPtr<PageInformation>& page = registeredPages[i];
if (page->DocShellId() == aRegisteredDocShellId) {
page->NotifyUnregistered(sInstance->mBuffer->mRangeEnd);
page->NotifyUnregistered(sInstance->mBuffer->BufferRangeEnd());
MOZ_RELEASE_ASSERT(
sInstance->mDeadProfiledPages.append(std::move(page)));
registeredPages.erase(&registeredPages[i--]);
@ -735,7 +735,7 @@ class ActivePS {
}
static void DiscardExpiredPages(PSLockRef) {
uint64_t bufferRangeStart = sInstance->mBuffer->mRangeStart;
uint64_t bufferRangeStart = sInstance->mBuffer->BufferRangeStart();
// Discard any dead pages that were unregistered before
// bufferRangeStart.
sInstance->mDeadProfiledPages.eraseIf(
@ -753,7 +753,7 @@ class ActivePS {
}
static void ClearExpiredExitProfiles(PSLockRef) {
uint64_t bufferRangeStart = sInstance->mBuffer->mRangeStart;
uint64_t bufferRangeStart = sInstance->mBuffer->BufferRangeStart();
// Discard exit profiles that were gathered before our buffer RangeStart.
sInstance->mExitProfiles.eraseIf(
[bufferRangeStart](const ExitProfile& aExitProfile) {
@ -765,7 +765,7 @@ class ActivePS {
ClearExpiredExitProfiles(aLock);
MOZ_RELEASE_ASSERT(sInstance->mExitProfiles.append(
ExitProfile{aExitProfile, sInstance->mBuffer->mRangeEnd}));
ExitProfile{aExitProfile, sInstance->mBuffer->BufferRangeEnd()}));
}
static Vector<std::string> MoveExitProfiles(PSLockRef aLock) {
@ -1457,8 +1457,7 @@ static void DoPeriodicSample(PSLockRef aLock,
aRegisteredThread.RacyRegisteredThread().GetPendingMarkers();
while (pendingMarkersList && pendingMarkersList->peek()) {
ProfilerMarker* marker = pendingMarkersList->popHead();
buffer.AddStoredMarker(marker);
buffer.AddEntry(ProfileBufferEntry::Marker(marker));
buffer.AddMarker(marker);
}
}
@ -2671,10 +2670,10 @@ static void locked_profiler_start(PSLockRef aLock, PowerOfTwo32 aCapacity,
# endif
// Fall back to the default values if the passed-in values are unreasonable.
// Less than 1024 would not be enough for the most complex stack, so we should
// Less than 8192 would not be enough for the most complex stack, so we should
// be able to store at least one full stack. TODO: Review magic numbers.
PowerOfTwo32 capacity =
(aCapacity.Value() >= 1024u) ? aCapacity : BASE_PROFILER_DEFAULT_ENTRIES;
(aCapacity.Value() >= 8192u) ? aCapacity : BASE_PROFILER_DEFAULT_ENTRIES;
Maybe<double> duration = aDuration;
if (aDuration && *aDuration <= 0) {
@ -3118,8 +3117,8 @@ UniqueProfilerBacktrace profiler_get_backtrace() {
regs.Clear();
# endif
// 1024 should be plenty for a single backtrace.
auto buffer = MakeUnique<ProfileBuffer>(MakePowerOfTwo32<1024>());
// 65536 bytes should be plenty for a single backtrace.
auto buffer = MakeUnique<ProfileBuffer>(MakePowerOfTwo32<65536>());
DoSyncSample(lock, *registeredThread, now, regs, *buffer.get());
@ -3221,8 +3220,7 @@ void profiler_add_marker_for_thread(int aThreadId,
// Insert the marker into the buffer
ProfileBuffer& buffer = ActivePS::Buffer(lock);
buffer.AddStoredMarker(marker);
buffer.AddEntry(ProfileBufferEntry::Marker(marker));
buffer.AddMarker(marker);
}
void profiler_tracing(const char* aCategoryString, const char* aMarkerName,

Просмотреть файл

@ -84,12 +84,13 @@ class BlocksRingBuffer {
// Near-infinite index type, not expecting overflow.
using Index = uint64_t;
public:
// Using ModuloBuffer as underlying circular byte buffer.
using Buffer = ModuloBuffer<uint32_t, Index>;
using Byte = Buffer::Byte;
using BufferWriter = Buffer::Writer;
using BufferReader = Buffer::Reader;
public:
// Length type for total buffer (as PowerOfTwo<Length>) and each entry.
using Length = uint32_t;
@ -159,6 +160,13 @@ class BlocksRingBuffer {
return mBlockIndex >= aRhs.mBlockIndex;
}
// Temporary escape hatches to let legacy code access block indices.
// TODO: Remove this when legacy code has been modernized.
uint64_t ConvertToU64() const { return uint64_t(mBlockIndex); }
static BlockIndex ConvertFromU64(uint64_t aIndex) {
return BlockIndex(Index(aIndex));
}
private:
// Only BlocksRingBuffer internal functions and serializers can convert
// between `BlockIndex` and `Index`.

Просмотреть файл

@ -16,11 +16,14 @@
using namespace mozilla;
// 65536 bytes should be plenty for a single backtrace.
static constexpr auto DuplicationBufferBytes = MakePowerOfTwo32<65536>();
// mEntries doesn't need its own mutex, because it is guarded by gPSMutex.
ProfileBuffer::ProfileBuffer(PowerOfTwo32 aCapacity)
: mEntries(MakeUnique<ProfileBufferEntry[]>(aCapacity.Value())),
mEntryIndexMask(aCapacity.Mask()),
mRangeStart(0),
mRangeEnd(0) {}
: mEntries(BlocksRingBuffer::ThreadSafety::WithoutMutex, aCapacity),
mDuplicationBuffer(MakeUnique<BlocksRingBuffer::Byte[]>(
DuplicationBufferBytes.Value())) {}
ProfileBuffer::~ProfileBuffer() {
while (mStoredMarkers.peek()) {
@ -28,26 +31,43 @@ ProfileBuffer::~ProfileBuffer() {
}
}
// Called from signal, call only reentrant functions
void ProfileBuffer::AddEntry(const ProfileBufferEntry& aEntry) {
GetEntry(mRangeEnd++) = aEntry;
/* static */
BlocksRingBuffer::BlockIndex ProfileBuffer::AddEntry(
BlocksRingBuffer& aBlocksRingBuffer, const ProfileBufferEntry& aEntry) {
switch (aEntry.GetKind()) {
#define SWITCH_KIND(KIND, TYPE, SIZE) \
case ProfileBufferEntry::Kind::KIND: { \
return aBlocksRingBuffer.PutFrom(&aEntry, 1 + (SIZE)); \
break; \
}
// The distance between mRangeStart and mRangeEnd must never exceed
// capacity, so advance mRangeStart if necessary.
if (mRangeEnd - mRangeStart > mEntryIndexMask.MaskValue() + 1) {
mRangeStart++;
FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(SWITCH_KIND)
#undef SWITCH_KIND
default:
MOZ_ASSERT(false, "Unhandled ProfilerBuffer entry KIND");
return BlockIndex{};
}
}
uint64_t ProfileBuffer::AddThreadIdEntry(int aThreadId) {
uint64_t pos = mRangeEnd;
AddEntry(ProfileBufferEntry::ThreadId(aThreadId));
return pos;
// Called from signal, call only reentrant functions
uint64_t ProfileBuffer::AddEntry(const ProfileBufferEntry& aEntry) {
return AddEntry(mEntries, aEntry).ConvertToU64();
}
void ProfileBuffer::AddStoredMarker(ProfilerMarker* aStoredMarker) {
aStoredMarker->SetPositionInBuffer(mRangeEnd);
mStoredMarkers.insert(aStoredMarker);
/* static */
BlocksRingBuffer::BlockIndex ProfileBuffer::AddThreadIdEntry(
BlocksRingBuffer& aBlocksRingBuffer, int aThreadId) {
return AddEntry(aBlocksRingBuffer, ProfileBufferEntry::ThreadId(aThreadId));
}
uint64_t ProfileBuffer::AddThreadIdEntry(int aThreadId) {
return AddThreadIdEntry(mEntries, aThreadId).ConvertToU64();
}
void ProfileBuffer::AddMarker(ProfilerMarker* aMarker) {
aMarker->SetPositionInBuffer(AddEntry(ProfileBufferEntry::Marker(aMarker)));
mStoredMarkers.insert(aMarker);
}
void ProfileBuffer::CollectCodeLocation(
@ -93,22 +113,21 @@ void ProfileBuffer::DeleteExpiredStoredMarkers() {
// Delete markers of samples that have been overwritten due to circular
// buffer wraparound.
while (mStoredMarkers.peek() &&
mStoredMarkers.peek()->HasExpired(mRangeStart)) {
mStoredMarkers.peek()->HasExpired(BufferRangeStart())) {
delete mStoredMarkers.popHead();
}
}
size_t ProfileBuffer::SizeOfIncludingThis(
mozilla::MallocSizeOf aMallocSizeOf) const {
size_t n = aMallocSizeOf(this);
n += aMallocSizeOf(mEntries.get());
size_t ProfileBuffer::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
// Measurement of the following members may be added later if DMD finds it
// is worthwhile:
// - memory pointed to by the elements within mEntries
// - mStoredMarkers
return mEntries.SizeOfExcludingThis(aMallocSizeOf);
}
return n;
size_t ProfileBuffer::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
}
void ProfileBuffer::CollectOverheadStats(TimeDuration aSamplingTime,
@ -146,9 +165,10 @@ void ProfileBuffer::CollectOverheadStats(TimeDuration aSamplingTime,
}
ProfilerBufferInfo ProfileBuffer::GetProfilerBufferInfo() const {
return {mRangeStart, mRangeEnd, mEntryIndexMask.MaskValue() + 1,
mIntervalsNs, mOverheadsNs, mLockingsNs,
mCleaningsNs, mCountersNs, mThreadsNs};
return {
BufferRangeStart(), BufferRangeEnd(), mEntries.BufferLength()->Value(),
mIntervalsNs, mOverheadsNs, mLockingsNs,
mCleaningsNs, mCountersNs, mThreadsNs};
}
/* ProfileBufferCollector */

Просмотреть файл

@ -9,32 +9,33 @@
#include "ProfileBufferEntry.h"
#include "ProfilerMarker.h"
#include "mozilla/BlocksRingBuffer.h"
#include "mozilla/Maybe.h"
#include "mozilla/PowerOfTwo.h"
// A fixed-capacity circular buffer.
// Class storing most profiling data in a BlocksRingBuffer.
//
// This class is used as a queue of entries which, after construction, never
// allocates. This makes it safe to use in the profiler's "critical section".
// Entries are appended at the end. Once the queue capacity has been reached,
// adding a new entry will evict an old entry from the start of the queue.
// Positions in the queue are represented as 64-bit unsigned integers which
// only increase and never wrap around.
// mRangeStart and mRangeEnd describe the range in that uint64_t space which is
// covered by the queue contents.
// Internally, the buffer uses a fixed-size storage and applies a modulo
// operation when accessing entries in that storage buffer. "Evicting" an entry
// really just means that an existing entry in the storage buffer gets
// overwritten and that mRangeStart gets incremented.
class ProfileBuffer final {
public:
// Opaque type containing a block index, which should not be modified outside
// of BlocksRingBuffer.
// TODO: Eventually, all uint64_t values should be replaced with BlockIndex,
// because external users should only store and compare them, but not do other
// arithmetic operations (that uint64_t supports).
using BlockIndex = mozilla::BlocksRingBuffer::BlockIndex;
// ProfileBuffer constructor
// @param aCapacity The capacity of the buffer.
explicit ProfileBuffer(mozilla::PowerOfTwo32 aCapacity);
~ProfileBuffer();
bool IsThreadSafe() const { return mEntries.IsThreadSafe(); }
// Add |aEntry| to the buffer, ignoring what kind of entry it is.
void AddEntry(const ProfileBufferEntry& aEntry);
uint64_t AddEntry(const ProfileBufferEntry& aEntry);
// Add to the buffer a sample start (ThreadId) entry for aThreadId.
// Returns the position of the entry.
@ -91,16 +92,33 @@ class ProfileBuffer final {
void DiscardSamplesBeforeTime(double aTime);
void AddStoredMarker(ProfilerMarker* aStoredMarker);
void AddMarker(ProfilerMarker* aMarker);
// The following method is not signal safe!
void DeleteExpiredStoredMarkers();
// Access an entry in the buffer.
ProfileBufferEntry& GetEntry(uint64_t aPosition) const {
return mEntries[aPosition & mEntryIndexMask];
// Read an entry in the buffer. Slow!
ProfileBufferEntry GetEntry(uint64_t aPosition) const {
ProfileBufferEntry entry;
mEntries.Read([&](mozilla::BlocksRingBuffer::Reader* aReader) {
// BlocksRingBuffer cannot be out-of-session when sampler is running.
MOZ_ASSERT(aReader);
for (mozilla::BlocksRingBuffer::EntryReader er : *aReader) {
if (er.CurrentBlockIndex().ConvertToU64() > aPosition) {
// Passed the block. (We need a precise position.)
return;
}
if (er.CurrentBlockIndex().ConvertToU64() == aPosition) {
MOZ_RELEASE_ASSERT(er.RemainingBytes() <= sizeof(entry));
er.Read(&entry, er.RemainingBytes());
return;
}
}
});
return entry;
}
size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
void CollectOverheadStats(mozilla::TimeDuration aSamplingTime,
@ -112,38 +130,50 @@ class ProfileBuffer final {
ProfilerBufferInfo GetProfilerBufferInfo() const;
private:
// The storage that backs our buffer. Holds capacity entries.
// All accesses to entries in mEntries need to go through GetEntry(), which
// translates the given buffer position from the near-infinite uint64_t space
// into the entry storage space.
mozilla::UniquePtr<ProfileBufferEntry[]> mEntries;
// Add |aEntry| to the provided BlocksRingBuffer.
// `static` because it may be used to add an entry to a `BlocksRingBuffer`
// that is not attached to a `ProfileBuffer`.
static BlockIndex AddEntry(mozilla::BlocksRingBuffer& aBlocksRingBuffer,
const ProfileBufferEntry& aEntry);
// A mask such that pos & mEntryIndexMask == pos % capacity.
mozilla::PowerOfTwoMask32 mEntryIndexMask;
// Add a sample start (ThreadId) entry for aThreadId to the provided
// BlocksRingBuffer. Returns the position of the entry.
// `static` because it may be used to add an entry to a `BlocksRingBuffer`
// that is not attached to a `ProfileBuffer`.
static BlockIndex AddThreadIdEntry(
mozilla::BlocksRingBuffer& aBlocksRingBuffer, int aThreadId);
// The circular-ring storage in which this ProfileBuffer stores its data.
mozilla::BlocksRingBuffer mEntries;
public:
// mRangeStart and mRangeEnd are uint64_t values that strictly advance and
// never wrap around. mRangeEnd is always greater than or equal to
// mRangeStart, but never gets more than capacity steps ahead of
// mRangeStart, because we can only store a fixed number of entries in the
// buffer. Once the entire buffer is in use, adding a new entry will evict an
// entry from the front of the buffer (and increase mRangeStart).
// In other words, the following conditions hold true at all times:
// (1) mRangeStart <= mRangeEnd
// (2) mRangeEnd - mRangeStart <= capacity
// `BufferRangeStart()` and `BufferRangeEnd()` return `uint64_t` values
// corresponding to the first entry and past the last entry stored in
// `mEntries`.
//
// If there are no live entries, then mRangeStart == mRangeEnd.
// Otherwise, mRangeStart is the first live entry and mRangeEnd is one past
// the last live entry, and also the position at which the next entry will be
// added.
// (mRangeEnd - mRangeStart) always gives the number of live entries.
uint64_t mRangeStart;
uint64_t mRangeEnd;
// The returned values are not guaranteed to be stable, because other threads
// may also be accessing the buffer concurrently. But they will always
// increase, and can therefore give an indication of how far these values have
// *at least* reached. In particular:
// - Entries whose index is strictly less that `BufferRangeStart()` have been
// discarded by now, so any related data may also be safely discarded.
// - It is safe to try and read entries at any index strictly less than
// `BufferRangeEnd()` -- but note that these reads may fail by the time you
// request them, as old entries get overwritten by new ones.
uint64_t BufferRangeStart() const {
return mEntries.GetState().mRangeStart.ConvertToU64();
}
uint64_t BufferRangeEnd() const {
return mEntries.GetState().mRangeEnd.ConvertToU64();
}
// Markers that marker entries in the buffer might refer to.
ProfilerMarkerLinkedList mStoredMarkers;
private:
// Used when duplicating sleeping stacks (to avoid spurious mallocs).
mozilla::UniquePtr<mozilla::BlocksRingBuffer::Byte[]> mDuplicationBuffer;
double mFirstSamplingTimeNs = 0.0;
double mLastSamplingTimeNs = 0.0;
ProfilerStats mIntervalsNs;
@ -171,7 +201,7 @@ class ProfileBufferCollector final : public ProfilerStackCollector {
}
mozilla::Maybe<uint64_t> BufferRangeStart() override {
return mozilla::Some(mBuf.mRangeStart);
return mozilla::Some(mBuf.BufferRangeStart());
}
virtual void CollectNativeLeafAddr(void* aAddr) override;

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -26,35 +26,36 @@ class ProfilerMarker;
// NOTE! If you add entries, you need to verify if they need to be added to the
// switch statement in DuplicateLastSample!
#define FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(MACRO) \
MACRO(CategoryPair, int) \
MACRO(CollectionStart, double) \
MACRO(CollectionEnd, double) \
MACRO(Label, const char*) \
MACRO(FrameFlags, uint64_t) \
MACRO(DynamicStringFragment, char*) /* char[kNumChars], really */ \
MACRO(JitReturnAddr, void*) \
MACRO(LineNumber, int) \
MACRO(ColumnNumber, int) \
MACRO(NativeLeafAddr, void*) \
MACRO(Marker, ProfilerMarker*) \
MACRO(Pause, double) \
MACRO(Responsiveness, double) \
MACRO(Resume, double) \
MACRO(ThreadId, int) \
MACRO(Time, double) \
MACRO(CounterId, void*) \
MACRO(CounterKey, uint64_t) \
MACRO(Number, uint64_t) \
MACRO(Count, int64_t) \
MACRO(ProfilerOverheadTime, double) \
MACRO(ProfilerOverheadDuration, double)
// This will evaluate the MACRO with (KIND, TYPE, SIZE)
#define FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(MACRO) \
MACRO(CategoryPair, int, sizeof(int)) \
MACRO(CollectionStart, double, sizeof(double)) \
MACRO(CollectionEnd, double, sizeof(double)) \
MACRO(Label, const char*, sizeof(const char*)) \
MACRO(FrameFlags, uint64_t, sizeof(uint64_t)) \
MACRO(DynamicStringFragment, char*, ProfileBufferEntry::kNumChars) \
MACRO(JitReturnAddr, void*, sizeof(void*)) \
MACRO(LineNumber, int, sizeof(int)) \
MACRO(ColumnNumber, int, sizeof(int)) \
MACRO(NativeLeafAddr, void*, sizeof(void*)) \
MACRO(Marker, ProfilerMarker*, sizeof(ProfilerMarker*)) \
MACRO(Pause, double, sizeof(double)) \
MACRO(Responsiveness, double, sizeof(double)) \
MACRO(Resume, double, sizeof(double)) \
MACRO(ThreadId, int, sizeof(int)) \
MACRO(Time, double, sizeof(double)) \
MACRO(CounterId, void*, sizeof(void*)) \
MACRO(CounterKey, uint64_t, sizeof(uint64_t)) \
MACRO(Number, uint64_t, sizeof(uint64_t)) \
MACRO(Count, int64_t, sizeof(int64_t)) \
MACRO(ProfilerOverheadTime, double, sizeof(double)) \
MACRO(ProfilerOverheadDuration, double, sizeof(double))
class ProfileBufferEntry {
public:
enum class Kind : uint8_t {
INVALID = 0,
#define KIND(k, t) k,
#define KIND(KIND, TYPE, SIZE) KIND,
FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(KIND)
#undef KIND
LIMIT
@ -78,17 +79,17 @@ class ProfileBufferEntry {
ProfileBufferEntry(Kind aKind, int aInt);
public:
#define CTOR(k, t) \
static ProfileBufferEntry k(t aVal) { \
return ProfileBufferEntry(Kind::k, aVal); \
#define CTOR(KIND, TYPE, SIZE) \
static ProfileBufferEntry KIND(TYPE aVal) { \
return ProfileBufferEntry(Kind::KIND, aVal); \
}
FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(CTOR)
#undef CTOR
Kind GetKind() const { return mKind; }
#define IS_KIND(k, t) \
bool Is##k() const { return mKind == Kind::k; }
#define IS_KIND(KIND, TYPE, SIZE) \
bool Is##KIND() const { return mKind == Kind::KIND; }
FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(IS_KIND)
#undef IS_KIND

Просмотреть файл

@ -38,7 +38,8 @@ void ProfiledThreadData::StreamJSON(const ProfileBuffer& aBuffer,
double aSinceTime, bool JSTracerEnabled,
ProfilerCodeAddressService* aService) {
if (mJITFrameInfoForPreviousJSContexts &&
mJITFrameInfoForPreviousJSContexts->HasExpired(aBuffer.mRangeStart)) {
mJITFrameInfoForPreviousJSContexts->HasExpired(
aBuffer.BufferRangeStart())) {
mJITFrameInfoForPreviousJSContexts = nullptr;
}
@ -287,7 +288,8 @@ void ProfiledThreadData::NotifyAboutToLoseJSContext(
MOZ_RELEASE_ASSERT(aContext);
if (mJITFrameInfoForPreviousJSContexts &&
mJITFrameInfoForPreviousJSContexts->HasExpired(aBuffer.mRangeStart)) {
mJITFrameInfoForPreviousJSContexts->HasExpired(
aBuffer.BufferRangeStart())) {
mJITFrameInfoForPreviousJSContexts = nullptr;
}

Просмотреть файл

@ -756,7 +756,7 @@ class ActivePS {
LiveProfiledThreadData& thread = sInstance->mLiveProfiledThreads[i];
if (thread.mRegisteredThread == aRegisteredThread) {
thread.mProfiledThreadData->NotifyUnregistered(
sInstance->mBuffer->mRangeEnd);
sInstance->mBuffer->BufferRangeEnd());
MOZ_RELEASE_ASSERT(sInstance->mDeadProfiledThreads.append(
std::move(thread.mProfiledThreadData)));
sInstance->mLiveProfiledThreads.erase(
@ -773,7 +773,7 @@ class ActivePS {
#endif
static void DiscardExpiredDeadProfiledThreads(PSLockRef) {
uint64_t bufferRangeStart = sInstance->mBuffer->mRangeStart;
uint64_t bufferRangeStart = sInstance->mBuffer->BufferRangeStart();
// Discard any dead threads that were unregistered before bufferRangeStart.
sInstance->mDeadProfiledThreads.eraseIf(
[bufferRangeStart](
@ -792,7 +792,7 @@ class ActivePS {
for (size_t i = 0; i < registeredPages.length(); i++) {
RefPtr<PageInformation>& page = registeredPages[i];
if (page->DocShellId().Equals(aRegisteredDocShellId)) {
page->NotifyUnregistered(sInstance->mBuffer->mRangeEnd);
page->NotifyUnregistered(sInstance->mBuffer->BufferRangeEnd());
MOZ_RELEASE_ASSERT(
sInstance->mDeadProfiledPages.append(std::move(page)));
registeredPages.erase(&registeredPages[i--]);
@ -801,7 +801,7 @@ class ActivePS {
}
static void DiscardExpiredPages(PSLockRef) {
uint64_t bufferRangeStart = sInstance->mBuffer->mRangeStart;
uint64_t bufferRangeStart = sInstance->mBuffer->BufferRangeStart();
// Discard any dead pages that were unregistered before
// bufferRangeStart.
sInstance->mDeadProfiledPages.eraseIf(
@ -850,7 +850,7 @@ class ActivePS {
#endif
static void ClearExpiredExitProfiles(PSLockRef) {
uint64_t bufferRangeStart = sInstance->mBuffer->mRangeStart;
uint64_t bufferRangeStart = sInstance->mBuffer->BufferRangeStart();
// Discard exit profiles that were gathered before our buffer RangeStart.
#ifdef MOZ_BASE_PROFILER
if (bufferRangeStart != 0 && sInstance->mBaseProfileThreads) {
@ -880,7 +880,7 @@ class ActivePS {
ClearExpiredExitProfiles(aLock);
MOZ_RELEASE_ASSERT(sInstance->mExitProfiles.append(
ExitProfile{aExitProfile, sInstance->mBuffer->mRangeEnd}));
ExitProfile{aExitProfile, sInstance->mBuffer->BufferRangeEnd()}));
}
static Vector<nsCString> MoveExitProfiles(PSLockRef aLock) {
@ -1739,8 +1739,7 @@ static void DoPeriodicSample(PSLockRef aLock,
aRegisteredThread.RacyRegisteredThread().GetPendingMarkers();
while (pendingMarkersList && pendingMarkersList->peek()) {
ProfilerMarker* marker = pendingMarkersList->popHead();
buffer.AddStoredMarker(marker);
buffer.AddEntry(ProfileBufferEntry::Marker(marker));
buffer.AddMarker(marker);
}
ThreadResponsiveness* resp = aProfiledThreadData.GetThreadResponsiveness();
@ -2049,7 +2048,7 @@ static UniquePtr<ProfileBuffer> CollectJavaThreadProfileData() {
// locked_profiler_start uses sample count is 1000 for Java thread.
// This entry size is enough now, but we might have to estimate it
// if we can customize it
auto buffer = MakeUnique<ProfileBuffer>(MakePowerOfTwo32<1024 * 1024>());
auto buffer = MakeUnique<ProfileBuffer>(MakePowerOfTwo32<8 * 1024 * 1024>());
int sampleId = 0;
while (true) {
@ -2769,7 +2768,7 @@ static ProfilingStack* locked_register_thread(PSLockRef aLock,
registeredThread->PollJSSampling();
if (registeredThread->GetJSContext()) {
profiledThreadData->NotifyReceivedJSContext(
ActivePS::Buffer(aLock).mRangeEnd);
ActivePS::Buffer(aLock).BufferRangeEnd());
}
}
}
@ -3391,10 +3390,10 @@ static void locked_profiler_start(PSLockRef aLock, PowerOfTwo32 aCapacity,
#endif
// Fall back to the default values if the passed-in values are unreasonable.
// Less than 1024 would not be enough for the most complex stack, so we should
// Less than 8192 would not be enough for the most complex stack, so we should
// be able to store at least one full stack. TODO: Review magic numbers.
PowerOfTwo32 capacity =
(aCapacity.Value() >= 1024) ? aCapacity : PROFILER_DEFAULT_ENTRIES;
(aCapacity.Value() >= 8192u) ? aCapacity : PROFILER_DEFAULT_ENTRIES;
Maybe<double> duration = aDuration;
if (aDuration && *aDuration <= 0) {
@ -3979,8 +3978,8 @@ UniqueProfilerBacktrace profiler_get_backtrace() {
regs.Clear();
#endif
// 1024 should be plenty for a single backtrace.
auto buffer = MakeUnique<ProfileBuffer>(MakePowerOfTwo32<1024>());
// 65536 bytes should be plenty for a single backtrace.
auto buffer = MakeUnique<ProfileBuffer>(MakePowerOfTwo32<65536>());
DoSyncSample(lock, *registeredThread, now, regs, *buffer.get());
@ -4121,8 +4120,7 @@ void profiler_add_marker_for_thread(int aThreadId,
// Insert the marker into the buffer
ProfileBuffer& buffer = ActivePS::Buffer(lock);
buffer.AddStoredMarker(marker);
buffer.AddEntry(ProfileBufferEntry::Marker(marker));
buffer.AddMarker(marker);
}
void profiler_tracing(const char* aCategoryString, const char* aMarkerName,
@ -4201,7 +4199,7 @@ void profiler_set_js_context(JSContext* aCx) {
ActivePS::GetProfiledThreadData(lock, registeredThread);
if (profiledThreadData) {
profiledThreadData->NotifyReceivedJSContext(
ActivePS::Buffer(lock).mRangeEnd);
ActivePS::Buffer(lock).BufferRangeEnd());
}
}
}

Просмотреть файл

@ -14,46 +14,34 @@
// Make sure we can record one entry and read it
TEST(ThreadProfile, InsertOneEntry)
{
auto pb = MakeUnique<ProfileBuffer>(mozilla::PowerOfTwo32(10));
auto pb = MakeUnique<ProfileBuffer>(
mozilla::PowerOfTwo32(2 * (1 + uint32_t(sizeof(ProfileBufferEntry)))));
pb->AddEntry(ProfileBufferEntry::Time(123.1));
ASSERT_TRUE(pb->GetEntry(pb->mRangeStart).IsTime());
ASSERT_TRUE(pb->GetEntry(pb->mRangeStart).GetDouble() == 123.1);
ProfileBufferEntry entry = pb->GetEntry(pb->BufferRangeStart());
ASSERT_TRUE(entry.IsTime());
ASSERT_EQ(123.1, entry.GetDouble());
}
// See if we can insert some entries
TEST(ThreadProfile, InsertEntriesNoWrap)
{
auto pb = MakeUnique<ProfileBuffer>(mozilla::PowerOfTwo32(100));
int test_size = 50;
auto pb = MakeUnique<ProfileBuffer>(
mozilla::PowerOfTwo32(100 * (1 + uint32_t(sizeof(ProfileBufferEntry)))));
const int test_size = 50;
for (int i = 0; i < test_size; i++) {
pb->AddEntry(ProfileBufferEntry::Time(i));
}
uint64_t readPos = pb->mRangeStart;
while (readPos != pb->mRangeEnd) {
ASSERT_TRUE(pb->GetEntry(readPos).IsTime());
ASSERT_TRUE(pb->GetEntry(readPos).GetDouble() == readPos);
readPos++;
}
}
// See if evicting works as it should in the basic case
TEST(ThreadProfile, InsertEntriesWrap)
{
int entries = 32;
auto pb = MakeUnique<ProfileBuffer>(mozilla::PowerOfTwo32(entries));
ASSERT_TRUE(pb->mRangeStart == 0);
ASSERT_TRUE(pb->mRangeEnd == 0);
int test_size = 43;
for (int i = 0; i < test_size; i++) {
pb->AddEntry(ProfileBufferEntry::Time(i));
}
// We inserted 11 more entries than fit in the buffer, so the first 11 entries
// should have been evicted, and the range start should have increased to 11.
ASSERT_TRUE(pb->mRangeStart == 11);
uint64_t readPos = pb->mRangeStart;
while (readPos != pb->mRangeEnd) {
ASSERT_TRUE(pb->GetEntry(readPos).IsTime());
ASSERT_TRUE(pb->GetEntry(readPos).GetDouble() == readPos);
int times = 0;
uint64_t readPos = pb->BufferRangeStart();
while (readPos != pb->BufferRangeEnd()) {
ProfileBufferEntry entry = pb->GetEntry(readPos);
readPos++;
if (entry.GetKind() == ProfileBufferEntry::Kind::INVALID) {
continue;
}
ASSERT_TRUE(entry.IsTime());
ASSERT_EQ(times, entry.GetDouble());
times++;
}
ASSERT_EQ(test_size, times);
}