Bug 1646266 - Rework backtrace-capture functions - r=gregtatum

`profiler_capture_backtrace(ProfileChunkedBuffer&)` renamed to `profiler_capture_backtrace_into(ProfileChunkedBuffer&)` (notice "_into"), which is clearer.

New function `profiler_capture_backtrace()` creates a buffer, uses `profiler_capture_backtrace_into()`, and returns a `UniquePtr<ProfileChunkedBuffer>`, which can later be given to `MarkerStack::TakeBacktrace`.

`profiler_get_backtrace()` (returning a `UniqueProfilerBacktrace`) now uses `profiler_capture_backtrace()`.

This patch reduces most duplicate code between these functions.

Differential Revision: https://phabricator.services.mozilla.com/D88280
This commit is contained in:
Gerald Squelart 2020-09-02 03:58:50 +00:00
Родитель c3ffba3e5c
Коммит 6daee06496
4 изменённых файлов: 156 добавлений и 136 удалений

Просмотреть файл

@ -3535,59 +3535,7 @@ double profiler_time() {
return delta.ToMilliseconds();
}
static void locked_profiler_fill_backtrace(PSLockRef aLock,
RegisteredThread& aRegisteredThread,
ProfileBuffer& aProfileBuffer) {
Registers regs;
#if defined(HAVE_NATIVE_UNWIND)
regs.SyncPopulate();
#else
regs.Clear();
#endif
DoSyncSample(aLock, aRegisteredThread, TimeStamp::NowUnfuzzed(), regs,
aProfileBuffer);
}
static UniqueProfilerBacktrace locked_profiler_get_backtrace(PSLockRef aLock) {
MOZ_RELEASE_ASSERT(CorePS::Exists());
if (!ActivePS::Exists(aLock)) {
return nullptr;
}
RegisteredThread* registeredThread =
TLSRegisteredThread::RegisteredThread(aLock);
if (!registeredThread) {
MOZ_ASSERT(registeredThread);
return nullptr;
}
auto bufferManager = MakeUnique<ProfileChunkedBuffer>(
ProfileChunkedBuffer::ThreadSafety::WithoutMutex,
MakeUnique<ProfileBufferChunkManagerSingle>(scExpectedMaximumStackSize));
ProfileBuffer buffer(*bufferManager);
locked_profiler_fill_backtrace(aLock, *registeredThread, buffer);
return UniqueProfilerBacktrace(
new ProfilerBacktrace("SyncProfile", registeredThread->Info()->ThreadId(),
std::move(bufferManager)));
}
UniqueProfilerBacktrace profiler_get_backtrace() {
MOZ_RELEASE_ASSERT(CorePS::Exists());
PSAutoLock lock;
return locked_profiler_get_backtrace(lock);
}
void ProfilerBacktraceDestructor::operator()(ProfilerBacktrace* aBacktrace) {
delete aBacktrace;
}
bool profiler_capture_backtrace(ProfileChunkedBuffer& aChunkedBuffer) {
bool profiler_capture_backtrace_into(ProfileChunkedBuffer& aChunkedBuffer) {
MOZ_RELEASE_ASSERT(CorePS::Exists());
PSAutoLock lock;
@ -3605,11 +3553,53 @@ bool profiler_capture_backtrace(ProfileChunkedBuffer& aChunkedBuffer) {
ProfileBuffer profileBuffer(aChunkedBuffer);
locked_profiler_fill_backtrace(lock, *registeredThread, profileBuffer);
Registers regs;
#if defined(HAVE_NATIVE_UNWIND)
regs.SyncPopulate();
#else
regs.Clear();
#endif
DoSyncSample(lock, *registeredThread, TimeStamp::NowUnfuzzed(), regs,
profileBuffer);
return true;
}
UniquePtr<ProfileChunkedBuffer> profiler_capture_backtrace() {
MOZ_RELEASE_ASSERT(CorePS::Exists());
// Quick is-active check before allocating a buffer.
if (!profiler_is_active()) {
return nullptr;
}
auto buffer = MakeUnique<ProfileChunkedBuffer>(
ProfileChunkedBuffer::ThreadSafety::WithoutMutex,
MakeUnique<ProfileBufferChunkManagerSingle>(scExpectedMaximumStackSize));
if (!profiler_capture_backtrace_into(*buffer)) {
return nullptr;
}
return buffer;
}
UniqueProfilerBacktrace profiler_get_backtrace() {
UniquePtr<ProfileChunkedBuffer> buffer = profiler_capture_backtrace();
if (!buffer) {
return nullptr;
}
return UniqueProfilerBacktrace(new ProfilerBacktrace(
"SyncProfile", profiler_current_thread_id(), std::move(buffer)));
}
void ProfilerBacktraceDestructor::operator()(ProfilerBacktrace* aBacktrace) {
delete aBacktrace;
}
bool profiler_is_locked_on_current_thread() {
// This function is used to help users avoid calling `profiler_...` functions
// when the profiler may already have a lock in place, which would prevent a

Просмотреть файл

@ -33,8 +33,8 @@
// This file can be #included unconditionally. However, everything within this
// file must be guarded by a #ifdef MOZ_GECKO_PROFILER, *except* for the
// following macros, which encapsulate the most common operations and thus
// avoid the need for many #ifdefs.
// following macros and functions, which encapsulate the most common operations
// and thus avoid the need for many #ifdefs.
# define AUTO_BASE_PROFILER_INIT
@ -66,20 +66,30 @@
# define AUTO_PROFILER_STATS(name)
// Function stubs for when MOZ_GECKO_PROFILER is not defined.
namespace mozilla {
class ProfileChunkedBuffer;
namespace baseprofiler {
struct ProfilerBacktrace {};
using UniqueProfilerBacktrace = UniquePtr<ProfilerBacktrace>;
// Get/Capture-backtrace functions can return nullptr or false, the result
// should be fed to another empty macro or stub anyway.
static inline UniqueProfilerBacktrace profiler_get_backtrace() {
return nullptr;
}
static inline bool profiler_capture_backtrace(
static inline bool profiler_capture_backtrace_into(
ProfileChunkedBuffer& aChunkedBuffer) {
return false;
}
static inline UniquePtr<ProfileChunkedBuffer> profiler_capture_backtrace() {
return nullptr;
}
} // namespace baseprofiler
} // namespace mozilla
@ -539,10 +549,22 @@ struct ProfilerBacktraceDestructor {
using UniqueProfilerBacktrace =
UniquePtr<ProfilerBacktrace, ProfilerBacktraceDestructor>;
// Immediately capture the current thread's call stack and return it. A no-op
// if the profiler is inactive.
// Immediately capture the current thread's call stack, store it in the provided
// buffer (usually to avoid allocations if you can construct the buffer on the
// stack). Returns false if unsuccessful, or if the profiler is inactive.
MFBT_API bool profiler_capture_backtrace_into(
ProfileChunkedBuffer& aChunkedBuffer);
// Immediately capture the current thread's call stack, and return it in a
// ProfileChunkedBuffer (usually for later use in MarkerStack::TakeBacktrace()).
// May be null if unsuccessful, or if the profiler is inactive.
MFBT_API UniquePtr<ProfileChunkedBuffer> profiler_capture_backtrace();
// Immediately capture the current thread's call stack, and return it in a
// ProfilerBacktrace (usually for later use in marker function that take a
// ProfilerBacktrace). May be null if unsuccessful, or if the profiler is
// inactive.
MFBT_API UniqueProfilerBacktrace profiler_get_backtrace();
MFBT_API bool profiler_capture_backtrace(ProfileChunkedBuffer& aChunkedBuffer);
struct ProfilerStats {
unsigned n = 0;

Просмотреть файл

@ -5279,65 +5279,7 @@ double profiler_time() {
return delta.ToMilliseconds();
}
static void locked_profiler_fill_backtrace(PSLockRef aLock,
RegisteredThread& aRegisteredThread,
ProfileBuffer& aProfileBuffer) {
Registers regs;
#if defined(HAVE_NATIVE_UNWIND)
regs.SyncPopulate();
#else
regs.Clear();
#endif
DoSyncSample(aLock, aRegisteredThread, TimeStamp::NowUnfuzzed(), regs,
aProfileBuffer);
}
static UniqueProfilerBacktrace locked_profiler_get_backtrace(PSLockRef aLock) {
if (!ActivePS::Exists(aLock)) {
return nullptr;
}
RegisteredThread* registeredThread =
TLSRegisteredThread::RegisteredThread(aLock);
if (!registeredThread) {
// If this was called from a non-registered thread, return a nullptr
// and do no more work. This can happen from a memory hook. Before
// the allocation tracking there was a MOZ_ASSERT() here checking
// for the existence of a registeredThread.
return nullptr;
}
auto bufferManager = MakeUnique<ProfileChunkedBuffer>(
ProfileChunkedBuffer::ThreadSafety::WithoutMutex,
MakeUnique<ProfileBufferChunkManagerSingle>(scExpectedMaximumStackSize));
ProfileBuffer buffer(*bufferManager);
locked_profiler_fill_backtrace(aLock, *registeredThread, buffer);
return UniqueProfilerBacktrace(
new ProfilerBacktrace("SyncProfile", registeredThread->Info()->ThreadId(),
std::move(bufferManager)));
}
UniqueProfilerBacktrace profiler_get_backtrace() {
MOZ_RELEASE_ASSERT(CorePS::Exists());
// Fast racy early return.
if (!profiler_is_active()) {
return nullptr;
}
PSAutoLock lock(gPSMutex);
return locked_profiler_get_backtrace(lock);
}
void ProfilerBacktraceDestructor::operator()(ProfilerBacktrace* aBacktrace) {
delete aBacktrace;
}
bool profiler_capture_backtrace(ProfileChunkedBuffer& aChunkedBuffer) {
bool profiler_capture_backtrace_into(ProfileChunkedBuffer& aChunkedBuffer) {
MOZ_RELEASE_ASSERT(CorePS::Exists());
PSAutoLock lock(gPSMutex);
@ -5349,17 +5291,62 @@ bool profiler_capture_backtrace(ProfileChunkedBuffer& aChunkedBuffer) {
RegisteredThread* registeredThread =
TLSRegisteredThread::RegisteredThread(lock);
if (!registeredThread) {
MOZ_ASSERT(registeredThread);
// If this was called from a non-registered thread, return false and do no
// more work. This can happen from a memory hook. Before the allocation
// tracking there was a MOZ_ASSERT() here checking for the existence of a
// registeredThread.
return false;
}
ProfileBuffer profileBuffer(aChunkedBuffer);
locked_profiler_fill_backtrace(lock, *registeredThread, profileBuffer);
Registers regs;
#if defined(HAVE_NATIVE_UNWIND)
regs.SyncPopulate();
#else
regs.Clear();
#endif
DoSyncSample(lock, *registeredThread, TimeStamp::NowUnfuzzed(), regs,
profileBuffer);
return true;
}
UniquePtr<ProfileChunkedBuffer> profiler_capture_backtrace() {
MOZ_RELEASE_ASSERT(CorePS::Exists());
// Quick is-active check before allocating a buffer.
if (!profiler_is_active()) {
return nullptr;
}
auto buffer = MakeUnique<ProfileChunkedBuffer>(
ProfileChunkedBuffer::ThreadSafety::WithoutMutex,
MakeUnique<ProfileBufferChunkManagerSingle>(scExpectedMaximumStackSize));
if (!profiler_capture_backtrace_into(*buffer)) {
return nullptr;
}
return buffer;
}
UniqueProfilerBacktrace profiler_get_backtrace() {
UniquePtr<ProfileChunkedBuffer> buffer = profiler_capture_backtrace();
if (!buffer) {
return nullptr;
}
return UniqueProfilerBacktrace(new ProfilerBacktrace(
"SyncProfile", profiler_current_thread_id(), std::move(buffer)));
}
void ProfilerBacktraceDestructor::operator()(ProfilerBacktrace* aBacktrace) {
delete aBacktrace;
}
static void racy_profiler_add_marker(const char* aMarkerName,
JS::ProfilingCategoryPair aCategoryPair,
const ProfilerMarkerPayload* aPayload) {
@ -5532,21 +5519,20 @@ bool profiler_add_native_allocation_marker(int aMainThreadId, int64_t aSize,
// locking the profiler mutex here could end up causing a deadlock if another
// mutex is taken, which the profiler may indirectly need elsewhere.
// See bug 1642726 for such a scenario.
// So instead we only try to lock, and bail out if the mutex is already
// locked. Native allocations are statistically sampled anyway, so missing a
// few because of this is acceptable.
PSAutoTryLock tryLock(gPSMutex);
if (!tryLock.IsLocked()) {
// So instead we bail out if the mutex is already locked. Native allocations
// are statistically sampled anyway, so missing a few because of this is
// acceptable.
if (gPSMutex.IsLockedOnCurrentThread()) {
return false;
}
AUTO_PROFILER_STATS(add_marker_with_NativeAllocationMarkerPayload);
maybelocked_profiler_add_marker_for_thread(
aMainThreadId, JS::ProfilingCategoryPair::OTHER, "Native allocation",
NativeAllocationMarkerPayload(
TimeStamp::Now(), aSize, aMemoryAddress, profiler_current_thread_id(),
locked_profiler_get_backtrace(tryLock.LockRef())),
&tryLock.LockRef());
NativeAllocationMarkerPayload(TimeStamp::Now(), aSize, aMemoryAddress,
profiler_current_thread_id(),
profiler_get_backtrace()),
nullptr);
return true;
}

Просмотреть файл

@ -31,8 +31,8 @@
// This file can be #included unconditionally. However, everything within this
// file must be guarded by a #ifdef MOZ_GECKO_PROFILER, *except* for the
// following macros, which encapsulate the most common operations and thus
// avoid the need for many #ifdefs.
// following macros and functions, which encapsulate the most common operations
// and thus avoid the need for many #ifdefs.
# define AUTO_PROFILER_INIT
# define AUTO_PROFILER_INIT2
@ -82,8 +82,14 @@
# define AUTO_PROFILER_TEXT_MARKER_DOCSHELL_CAUSE( \
markerName, text, categoryPair, docShell, cause)
// Function stubs for when MOZ_GECKO_PROFILER is not defined.
struct ProfilerBacktrace {};
using UniqueProfilerBacktrace = mozilla::UniquePtr<int>;
// Get/Capture-backtrace functions can return nullptr or false, the result
// should be fed to another empty macro or stub anyway.
static inline UniqueProfilerBacktrace profiler_get_backtrace() {
return nullptr;
}
@ -91,10 +97,14 @@ static inline UniqueProfilerBacktrace profiler_get_backtrace() {
namespace mozilla {
class ProfileChunkedBuffer;
} // namespace mozilla
static inline bool profiler_capture_backtrace(
static inline bool profiler_capture_backtrace_into(
mozilla::ProfileChunkedBuffer& aChunkedBuffer) {
return false;
}
static inline mozilla::UniquePtr<mozilla::ProfileChunkedBuffer>
profiler_capture_backtrace() {
return nullptr;
}
#else // !MOZ_GECKO_PROFILER
@ -688,10 +698,22 @@ struct ProfilerBacktraceDestructor {
using UniqueProfilerBacktrace =
mozilla::UniquePtr<ProfilerBacktrace, ProfilerBacktraceDestructor>;
// Immediately capture the current thread's call stack and return it. A no-op
// if the profiler is inactive.
// Immediately capture the current thread's call stack, store it in the provided
// buffer (usually to avoid allocations if you can construct the buffer on the
// stack). Returns false if unsuccessful, or if the profiler is inactive.
bool profiler_capture_backtrace_into(
mozilla::ProfileChunkedBuffer& aChunkedBuffer);
// Immediately capture the current thread's call stack, and return it in a
// ProfileChunkedBuffer (usually for later use in MarkerStack::TakeBacktrace()).
// May be null if unsuccessful, or if the profiler is inactive.
mozilla::UniquePtr<mozilla::ProfileChunkedBuffer> profiler_capture_backtrace();
// Immediately capture the current thread's call stack, and return it in a
// ProfilerBacktrace (usually for later use in marker function that take a
// ProfilerBacktrace). May be null if unsuccessful, or if the profiler is
// inactive.
UniqueProfilerBacktrace profiler_get_backtrace();
bool profiler_capture_backtrace(mozilla::ProfileChunkedBuffer& aChunkedBuffer);
struct ProfilerStats {
unsigned n = 0;