Bug 1753192 - Move core ProfileChunkedBuffers to static singletons in profiler_get_core_buffer() - r=canaltinova

Note that they are still separate buffers for now.

Differential Revision: https://phabricator.services.mozilla.com/D137802
This commit is contained in:
Gerald Squelart 2022-02-11 03:13:51 +00:00
Родитель aa44b10600
Коммит f3c4f65d02
7 изменённых файлов: 49 добавлений и 94 удалений

Просмотреть файл

@ -183,6 +183,16 @@ void PrintToConsole(const char* aFmt, ...) {
va_end(args);
}
ProfileChunkedBuffer& profiler_get_core_buffer() {
// This needs its own mutex, because it is used concurrently from functions
// guarded by gPSMutex as well as others without safety (e.g.,
// profiler_add_marker). It is *not* used inside the critical section of the
// sampler, because mutexes cannot be used there.
static ProfileChunkedBuffer sProfileChunkedBuffer{
ProfileChunkedBuffer::ThreadSafety::WithMutex};
return sProfileChunkedBuffer;
}
Atomic<int, MemoryOrdering::Relaxed> gSkipSampling;
constexpr static bool ValidateFeatures() {
@ -316,12 +326,7 @@ typedef const PSAutoLock& PSLockRef;
class CorePS {
private:
CorePS()
: mProcessStartTime(TimeStamp::ProcessCreation()),
// This needs its own mutex, because it is used concurrently from
// functions guarded by gPSMutex as well as others without safety (e.g.,
// profiler_add_marker). It is *not* used inside the critical section of
// the sampler, because mutexes cannot be used there.
mCoreBuffer(ProfileChunkedBuffer::ThreadSafety::WithMutex)
: mProcessStartTime(TimeStamp::ProcessCreation())
#ifdef USE_LUL_STACKWALK
,
mLul(nullptr)
@ -380,9 +385,6 @@ class CorePS {
// No PSLockRef is needed for this field because it's immutable.
PS_GET_LOCKLESS(const TimeStamp&, ProcessStartTime)
// No PSLockRef is needed for this field because it's thread-safe.
PS_GET_LOCKLESS(ProfileChunkedBuffer&, CoreBuffer)
PS_GET(const Vector<UniquePtr<RegisteredThread>>&, RegisteredThreads)
static void AppendRegisteredThread(
@ -488,17 +490,6 @@ class CorePS {
// The time that the process started.
const TimeStamp mProcessStartTime;
// The thread-safe blocks-oriented buffer into which all profiling data is
// recorded.
// ActivePS controls the lifetime of the underlying contents buffer: When
// ActivePS does not exist, mCoreBuffer is empty and rejects all reads&writes;
// see ActivePS for further details.
// Note: This needs to live here outside of ActivePS, because some producers
// are indirectly controlled (e.g., by atomic flags) and therefore may still
// attempt to write some data shortly after ActivePS has shutdown and deleted
// the underlying buffer in memory.
ProfileChunkedBuffer mCoreBuffer;
// Info on all the registered threads.
// ThreadIds in mRegisteredThreads are unique.
Vector<UniquePtr<RegisteredThread>> mRegisteredThreads;
@ -524,11 +515,6 @@ class CorePS {
CorePS* CorePS::sInstance = nullptr;
ProfileChunkedBuffer& profiler_get_core_buffer() {
MOZ_ASSERT(CorePS::Exists());
return CorePS::CoreBuffer();
}
class SamplerThread;
static SamplerThread* NewSamplerThread(PSLockRef aLock, uint32_t aGeneration,
@ -629,8 +615,9 @@ class ActivePS {
size_t(ClampToAllowedEntries(aCapacity.Value())) * scBytesPerEntry,
ChunkSizeForEntries(aCapacity.Value())),
mProfileBuffer([this]() -> ProfileChunkedBuffer& {
CorePS::CoreBuffer().SetChunkManager(mProfileBufferChunkManager);
return CorePS::CoreBuffer();
ProfileChunkedBuffer& buffer = profiler_get_core_buffer();
buffer.SetChunkManager(mProfileBufferChunkManager);
return buffer;
}()),
// The new sampler thread doesn't start sampling immediately because the
// main loop within Run() is blocked until this function's caller
@ -650,7 +637,7 @@ class ActivePS {
}
}
~ActivePS() { CorePS::CoreBuffer().ResetChunkManager(); }
~ActivePS() { profiler_get_core_buffer().ResetChunkManager(); }
bool ThreadSelected(const char* aThreadName) {
if (mFiltersLowered.empty()) {
@ -2338,11 +2325,11 @@ void SamplerThread::Run() {
LOG("Stack sample too big for local storage, needed %u bytes",
unsigned(state.mRangeEnd - previousState.mRangeEnd));
} else if (state.mRangeEnd - previousState.mRangeEnd >=
*CorePS::CoreBuffer().BufferLength()) {
*profiler_get_core_buffer().BufferLength()) {
LOG("Stack sample too big for profiler storage, needed %u bytes",
unsigned(state.mRangeEnd - previousState.mRangeEnd));
} else {
CorePS::CoreBuffer().AppendContents(localBuffer);
profiler_get_core_buffer().AppendContents(localBuffer);
}
// Clean up for the next run.
@ -3651,7 +3638,7 @@ bool profiler_is_locked_on_current_thread() {
// - The buffer mutex, used directly in some functions without locking the
// main mutex, e.g., marker-related functions.
return PSAutoLock::IsLockedOnCurrentThread() ||
CorePS::CoreBuffer().IsThreadSafeAndLockedOnCurrentThread();
profiler_get_core_buffer().IsThreadSafeAndLockedOnCurrentThread();
}
// This is a simplified version of profiler_add_marker that can be easily passed

Просмотреть файл

@ -98,7 +98,7 @@ ProfileBufferBlockIndex AddMarker(
return {};
}
return ::mozilla::baseprofiler::AddMarkerToBuffer(
base_profiler_markers_detail::CachedBaseCoreBuffer(), aName, aCategory,
::mozilla::baseprofiler::profiler_get_core_buffer(), aName, aCategory,
std::move(aOptions), aMarkerType, aPayloadArguments...);
#endif
}

Просмотреть файл

@ -31,14 +31,6 @@ MFBT_API ProfileChunkedBuffer& profiler_get_core_buffer();
namespace mozilla::base_profiler_markers_detail {
// Get the core buffer from the profiler, and cache it in a
// non-templated-function static reference.
inline ProfileChunkedBuffer& CachedBaseCoreBuffer() {
static ProfileChunkedBuffer& coreBuffer =
baseprofiler::profiler_get_core_buffer();
return coreBuffer;
}
struct Streaming {
// A `MarkerDataDeserializer` is a free function that can read a serialized
// payload from an `EntryReader` and streams it as JSON object properties.

Просмотреть файл

@ -341,7 +341,7 @@ void gecko_profiler_add_marker(
markerOptions.Set(mozilla::MarkerThreadId::CurrentThread());
}
auto& buffer = profiler_markers_detail::CachedCoreBuffer();
auto& buffer = profiler_get_core_buffer();
mozilla::Span payload(aPayload, aPayloadSize);
mozilla::StackCaptureOptions captureOptions =

Просмотреть файл

@ -196,6 +196,16 @@ using ThreadRegistry = mozilla::profiler::ThreadRegistry;
LazyLogModule gProfilerLog("prof");
ProfileChunkedBuffer& profiler_get_core_buffer() {
// This needs its own mutex, because it is used concurrently from functions
// guarded by gPSMutex as well as others without safety (e.g.,
// profiler_add_marker). It is *not* used inside the critical section of the
// sampler, because mutexes cannot be used there.
static ProfileChunkedBuffer sProfileChunkedBuffer{
ProfileChunkedBuffer::ThreadSafety::WithMutex};
return sProfileChunkedBuffer;
}
mozilla::Atomic<int, mozilla::MemoryOrdering::Relaxed> gSkipSampling;
#if defined(GP_OS_android)
@ -370,12 +380,7 @@ using JsFrameBuffer = mozilla::profiler::ThreadRegistrationData::JsFrameBuffer;
class CorePS {
private:
CorePS()
: mProcessStartTime(TimeStamp::ProcessCreation()),
// This needs its own mutex, because it is used concurrently from
// functions guarded by gPSMutex as well as others without safety (e.g.,
// profiler_add_marker). It is *not* used inside the critical section of
// the sampler, because mutexes cannot be used there.
mCoreBuffer(ProfileChunkedBuffer::ThreadSafety::WithMutex)
: mProcessStartTime(TimeStamp::ProcessCreation())
#ifdef USE_LUL_STACKWALK
,
mLul(nullptr)
@ -436,9 +441,6 @@ class CorePS {
// No PSLockRef is needed for this field because it's immutable.
PS_GET_LOCKLESS(TimeStamp, ProcessStartTime)
// No PSLockRef is needed for this field because it's thread-safe.
PS_GET_LOCKLESS(ProfileChunkedBuffer&, CoreBuffer)
PS_GET(JsFrameBuffer&, JsFrames)
PS_GET(Vector<RefPtr<PageInformation>>&, RegisteredPages)
@ -527,17 +529,6 @@ class CorePS {
// The time that the process started.
const TimeStamp mProcessStartTime;
// The thread-safe blocks-oriented buffer into which all profiling data is
// recorded.
// ActivePS controls the lifetime of the underlying contents buffer: When
// ActivePS does not exist, mCoreBuffer is empty and rejects all reads&writes;
// see ActivePS for further details.
// Note: This needs to live here outside of ActivePS, because some producers
// are indirectly controlled (e.g., by atomic flags) and therefore may still
// attempt to write some data shortly after ActivePS has shutdown and deleted
// the underlying buffer in memory.
ProfileChunkedBuffer mCoreBuffer;
// Info on all the registered pages.
// InnerWindowIDs in mRegisteredPages are unique.
Vector<RefPtr<PageInformation>> mRegisteredPages;
@ -568,11 +559,6 @@ class CorePS {
CorePS* CorePS::sInstance = nullptr;
ProfileChunkedBuffer& profiler_get_core_buffer() {
MOZ_ASSERT(CorePS::Exists());
return CorePS::CoreBuffer();
}
void locked_profiler_add_sampled_counter(PSLockRef aLock,
BaseProfilerCount* aCounter) {
CorePS::AppendCounter(aLock, aCounter);
@ -691,8 +677,9 @@ class ActivePS {
size_t(ClampToAllowedEntries(aCapacity.Value())) * scBytesPerEntry,
ChunkSizeForEntries(aCapacity.Value())),
mProfileBuffer([this]() -> ProfileChunkedBuffer& {
CorePS::CoreBuffer().SetChunkManager(mProfileBufferChunkManager);
return CorePS::CoreBuffer();
ProfileChunkedBuffer& coreBuffer = profiler_get_core_buffer();
coreBuffer.SetChunkManager(mProfileBufferChunkManager);
return coreBuffer;
}()),
mMaybeProcessCPUCounter(ProfilerFeature::HasProcessCPU(aFeatures)
? new ProcessCPUCounter(aLock)
@ -760,7 +747,7 @@ class ActivePS {
}
}
#endif
CorePS::CoreBuffer().ResetChunkManager();
profiler_get_core_buffer().ResetChunkManager();
}
bool ThreadSelected(const char* aThreadName) {
@ -1198,7 +1185,7 @@ class ActivePS {
if (sInstance->mBaseProfileThreads &&
sInstance->mGeckoIndexWhenBaseProfileAdded
.ConvertToProfileBufferIndex() <
CorePS::CoreBuffer().GetState().mRangeStart) {
profiler_get_core_buffer().GetState().mRangeStart) {
DEBUG_LOG("ClearExpiredExitProfiles() - Discarding base profile %p",
sInstance->mBaseProfileThreads.get());
sInstance->mBaseProfileThreads.reset();
@ -1216,7 +1203,7 @@ class ActivePS {
sInstance->mBaseProfileThreads = std::move(aBaseProfileThreads);
sInstance->mGeckoIndexWhenBaseProfileAdded =
ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
CorePS::CoreBuffer().GetState().mRangeEnd);
profiler_get_core_buffer().GetState().mRangeEnd);
}
static UniquePtr<char[]> MoveBaseProfileThreads(PSLockRef aLock) {
@ -3745,9 +3732,9 @@ void SamplerThread::Run() {
const bool cpuUtilization = ProfilerFeature::HasCPUUtilization(features);
// Use local ProfileBuffer and underlying buffer to capture the stack.
// (This is to avoid touching the CorePS::CoreBuffer lock while a thread is
// suspended, because that thread could be working with the CorePS::CoreBuffer
// as well.)
// (This is to avoid touching the core buffer lock while a thread is
// suspended, because that thread could be working with the core buffer as
// well.
mozilla::ProfileBufferChunkManagerSingle localChunkManager(
ProfileBufferChunkManager::scExpectedMaximumStackSize);
ProfileChunkedBuffer localBuffer(
@ -3955,7 +3942,7 @@ void SamplerThread::Run() {
// Note: It is not stored inside the CompactStack so that it doesn't
// get incorrectly duplicated when the thread is sleeping.
if (!runningTimesDiff.IsEmpty()) {
CorePS::CoreBuffer().PutObjects(
profiler_get_core_buffer().PutObjects(
ProfileBufferEntry::Kind::RunningTimes, runningTimesDiff);
}
@ -4171,7 +4158,7 @@ void SamplerThread::Run() {
// Note: It is not stored inside the CompactStack so that it
// doesn't get incorrectly duplicated when the thread is sleeping.
if (unresponsiveDuration_ms.isSome()) {
CorePS::CoreBuffer().PutObjects(
profiler_get_core_buffer().PutObjects(
ProfileBufferEntry::Kind::UnresponsiveDurationMs,
*unresponsiveDuration_ms);
}
@ -4192,20 +4179,20 @@ void SamplerThread::Run() {
previousState.mFailedPutBytes));
// There *must* be a CompactStack after a TimeBeforeCompactStack,
// even an empty one.
CorePS::CoreBuffer().PutObjects(
profiler_get_core_buffer().PutObjects(
ProfileBufferEntry::Kind::CompactStack,
UniquePtr<ProfileChunkedBuffer>(nullptr));
} else if (state.mRangeEnd - previousState.mRangeEnd >=
*CorePS::CoreBuffer().BufferLength()) {
*profiler_get_core_buffer().BufferLength()) {
LOG("Stack sample too big for profiler storage, needed %u bytes",
unsigned(state.mRangeEnd - previousState.mRangeEnd));
// There *must* be a CompactStack after a TimeBeforeCompactStack,
// even an empty one.
CorePS::CoreBuffer().PutObjects(
profiler_get_core_buffer().PutObjects(
ProfileBufferEntry::Kind::CompactStack,
UniquePtr<ProfileChunkedBuffer>(nullptr));
} else {
CorePS::CoreBuffer().PutObjects(
profiler_get_core_buffer().PutObjects(
ProfileBufferEntry::Kind::CompactStack, localBuffer);
}
@ -6303,7 +6290,7 @@ bool profiler_is_locked_on_current_thread() {
return PSAutoLock::IsLockedOnCurrentThread() ||
ThreadRegistry::IsRegistryMutexLockedOnCurrentThread() ||
ThreadRegistration::IsDataMutexLockedOnCurrentThread() ||
CorePS::CoreBuffer().IsThreadSafeAndLockedOnCurrentThread() ||
profiler_get_core_buffer().IsThreadSafeAndLockedOnCurrentThread() ||
ProfilerParent::IsLockedOnCurrentThread() ||
ProfilerChild::IsLockedOnCurrentThread();
}

Просмотреть файл

@ -150,8 +150,8 @@ mozilla::ProfileBufferBlockIndex profiler_add_marker(
aOptions.ThreadId().ThreadId())) {
return {};
}
return ::AddMarkerToBuffer(profiler_markers_detail::CachedCoreBuffer(), aName,
aCategory, std::move(aOptions), aMarkerType,
return ::AddMarkerToBuffer(profiler_get_core_buffer(), aName, aCategory,
std::move(aOptions), aMarkerType,
aPayloadArguments...);
#endif
}

Просмотреть файл

@ -26,17 +26,6 @@
// Implemented in platform.cpp
mozilla::ProfileChunkedBuffer& profiler_get_core_buffer();
namespace profiler_markers_detail {
// Get the core buffer from the profiler, and cache it in a
// non-templated-function static reference.
inline mozilla::ProfileChunkedBuffer& CachedCoreBuffer() {
static mozilla::ProfileChunkedBuffer& coreBuffer = profiler_get_core_buffer();
return coreBuffer;
}
} // namespace profiler_markers_detail
#endif // MOZ_GECKO_PROFILER
#endif // ProfilerMarkersDetail_h