Backed out changeset aac6aee303fb (bug 1365309) for almost perma-timeout in many mochitest chunks on Windows 7 opt and pgo. r=backout

This commit is contained in:
Sebastian Hengst 2017-08-11 09:30:52 +02:00
Родитель b8464d1de1
Коммит 77e5e63914
2 изменённых файлов: 43 добавлений и 124 удалений

Просмотреть файл

@ -257,13 +257,18 @@ Sampler::Sampler(PSLockRef aLock)
{
#if defined(USE_EHABI_STACKWALK)
mozilla::EHABIStackWalkInit();
#elif defined(USE_LUL_STACKWALK)
bool createdLUL = false;
lul::LUL* lul = CorePS::Lul(aLock);
if (!lul) {
CorePS::SetLul(aLock, MakeUnique<lul::LUL>(logging_sink_for_LUL));
// Read all the unwind info currently available.
lul = CorePS::Lul(aLock);
read_procmaps(lul);
createdLUL = true;
}
#endif
// NOTE: We don't initialize LUL here, instead initializing it in
// SamplerThread's constructor. This is because with the
// profiler_suspend_and_sample_thread entry point, we want to be able to
// sample without waiting for LUL to be initialized.
// Request profiling signals.
struct sigaction sa;
sa.sa_sigaction = MOZ_SIGNAL_TRAMPOLINE(SigprofHandler);
@ -272,6 +277,21 @@ Sampler::Sampler(PSLockRef aLock)
if (sigaction(SIGPROF, &sa, &mOldSigprofHandler) != 0) {
MOZ_CRASH("Error installing SIGPROF handler in the profiler");
}
#if defined(USE_LUL_STACKWALK)
if (createdLUL) {
// Switch into unwind mode. After this point, we can't add or remove any
// unwind info to/from this LUL instance. The only thing we can do with
// it is Unwind() calls.
lul->EnableUnwinding();
// Has a test been requested?
if (PR_GetEnv("MOZ_PROFILER_LUL_TEST")) {
int nTests = 0, nTestsPassed = 0;
RunLulUnitTests(&nTests, &nTestsPassed, lul);
}
}
#endif
}
void
@ -393,27 +413,6 @@ SamplerThread::SamplerThread(PSLockRef aLock, uint32_t aActivityGeneration,
, mIntervalMicroseconds(
std::max(1, int(floor(aIntervalMilliseconds * 1000 + 0.5))))
{
#if defined(USE_LUL_STACKWALK)
lul::LUL* lul = CorePS::Lul(aLock);
if (!lul) {
CorePS::SetLul(aLock, MakeUnique<lul::LUL>(logging_sink_for_LUL));
// Read all the unwind info currently available.
lul = CorePS::Lul(aLock);
read_procmaps(lul);
// Switch into unwind mode. After this point, we can't add or remove any
// unwind info to/from this LUL instance. The only thing we can do with
// it is Unwind() calls.
lul->EnableUnwinding();
// Has a test been requested?
if (PR_GetEnv("MOZ_PROFILER_LUL_TEST")) {
int nTests = 0, nTestsPassed = 0;
RunLulUnitTests(&nTests, &nTestsPassed, lul);
}
}
#endif
// Start the sampling thread. It repeatedly sends a SIGPROF signal. Sending
// the signal ourselves instead of relying on itimer provides much better
// accuracy.

Просмотреть файл

@ -111,27 +111,6 @@
# define USE_LUL_STACKWALK
# include "lul/LulMain.h"
# include "lul/platform-linux-lul.h"
// On linux we use LUL for periodic samples and synchronous samples, but we use
// FramePointerStackWalk for backtrace samples when MOZ_PROFILING is enabled.
// (See the comment at the top of the file for a definition of
// periodic/synchronous/backtrace.).
//
// FramePointerStackWalk can produce incomplete stacks when the current entry is
// in a shared library without framepointers, however LUL can take a long time
// to initialize, which is undesirable for consumers of
// profiler_suspend_and_sample_thread like the Background Hang Reporter.
# if defined(MOZ_PROFILING)
# define USE_FRAME_POINTER_STACK_WALK
# endif
#endif
// We can only stackwalk without expensive initialization on platforms which
// support FramePointerStackWalk or MozStackWalk. LUL Stackwalking requires
// initializing LUL, and EHABIStackWalk requires initializing EHABI, both of
// which can be expensive.
#if defined(USE_FRAME_POINTER_STACK_WALK) || defined(USE_MOZ_STACK_WALK)
# define HAVE_FASTINIT_NATIVE_UNWIND
#endif
#ifdef MOZ_VALGRIND
@ -1041,62 +1020,45 @@ StackWalkCallback(uint32_t aFrameNumber, void* aPC, void* aSP, void* aClosure)
nativeStack->mPCs[nativeStack->mCount] = aPC;
nativeStack->mCount++;
}
#endif
#if defined(USE_FRAME_POINTER_STACK_WALK)
static void
DoFramePointerBacktrace(PSLockRef aLock, const ThreadInfo& aThreadInfo,
const Registers& aRegs, NativeStack& aNativeStack)
DoNativeBacktrace(PSLockRef aLock, const ThreadInfo& aThreadInfo,
const Registers& aRegs, NativeStack& aNativeStack)
{
// WARNING: this function runs within the profiler's "critical section".
// WARNING: this function might be called while the profiler is inactive, and
// cannot rely on ActivePS.
// Start with the current function. We use 0 as the frame number here because
// the FramePointerStackWalk() call below will use 1..N. This is a bit weird
// but it doesn't matter because StackWalkCallback() doesn't use the frame
// number argument.
// the FramePointerStackWalk() and MozStackWalkThread() calls below will use
// 1..N. This is a bit weird but it doesn't matter because
// StackWalkCallback() doesn't use the frame number argument.
StackWalkCallback(/* frameNum */ 0, aRegs.mPC, aRegs.mSP, &aNativeStack);
uint32_t maxFrames = uint32_t(MAX_NATIVE_FRAMES - aNativeStack.mCount);
#if defined(USE_FRAME_POINTER_STACK_WALK)
void* stackEnd = aThreadInfo.StackTop();
if (aRegs.mFP >= aRegs.mSP && aRegs.mFP <= stackEnd) {
FramePointerStackWalk(StackWalkCallback, /* skipFrames */ 0, maxFrames,
&aNativeStack, reinterpret_cast<void**>(aRegs.mFP),
stackEnd);
}
}
#endif
#if defined(USE_MOZ_STACK_WALK)
static void
DoMozStackWalkBacktrace(PSLockRef aLock, const ThreadInfo& aThreadInfo,
const Registers& aRegs, NativeStack& aNativeStack)
{
// WARNING: this function runs within the profiler's "critical section".
// WARNING: this function might be called while the profiler is inactive, and
// cannot rely on ActivePS.
// Start with the current function. We use 0 as the frame number here because
// the MozStackWalkThread() call below will use 1..N. This is a bit weird but
// it doesn't matter because StackWalkCallback() doesn't use the frame number
// argument.
StackWalkCallback(/* frameNum */ 0, aRegs.mPC, aRegs.mSP, &aNativeStack);
uint32_t maxFrames = uint32_t(MAX_NATIVE_FRAMES - aNativeStack.mCount);
#elif defined(USE_MOZ_STACK_WALK)
HANDLE thread = GetThreadHandle(aThreadInfo.GetPlatformData());
MOZ_ASSERT(thread);
MozStackWalkThread(StackWalkCallback, /* skipFrames */ 0, maxFrames,
&aNativeStack, thread, /* context */ nullptr);
#else
# error "bad configuration"
#endif
}
#endif
#ifdef USE_EHABI_STACKWALK
static void
DoEHABIBacktrace(PSLockRef aLock, const ThreadInfo& aThreadInfo,
const Registers& aRegs, NativeStack& aNativeStack)
DoNativeBacktrace(PSLockRef aLock, const ThreadInfo& aThreadInfo,
const Registers& aRegs, NativeStack& aNativeStack)
{
// WARNING: this function runs within the profiler's "critical section".
// WARNING: this function might be called while the profiler is inactive, and
@ -1176,8 +1138,8 @@ ASAN_memcpy(void* aDst, const void* aSrc, size_t aLen)
#endif
static void
DoLULBacktrace(PSLockRef aLock, const ThreadInfo& aThreadInfo,
const Registers& aRegs, NativeStack& aNativeStack)
DoNativeBacktrace(PSLockRef aLock, const ThreadInfo& aThreadInfo,
const Registers& aRegs, NativeStack& aNativeStack)
{
// WARNING: this function runs within the profiler's "critical section".
// WARNING: this function might be called while the profiler is inactive, and
@ -1301,30 +1263,6 @@ DoLULBacktrace(PSLockRef aLock, const ThreadInfo& aThreadInfo,
#endif
#ifdef HAVE_NATIVE_UNWIND
static void
DoNativeBacktrace(PSLockRef aLock, const ThreadInfo& aThreadInfo,
const Registers& aRegs, NativeStack& aNativeStack)
{
// This method determines which stackwalker is used for periodic and
// synchronous samples. (Backtrace samples are treated differently, see
// profiler_suspend_and_sample_thread() for details). The only part of the
// ordering that matters is that LUL must precede FRAME_POINTER, because on
// Linux they can both be present.
#if defined(USE_LUL_STACKWALK)
DoLULBacktrace(aLock, aThreadInfo, aRegs, aNativeStack);
#elif defined(USE_EHABI_STACKWALK)
DoEHABIBacktrace(aLock, aThreadInfo, aRegs, aNativeStack);
#elif defined(USE_FRAME_POINTER_STACK_WALK)
DoFramePointerBacktrace(aLock, aThreadInfo, aRegs, aNativeStack);
#elif defined(USE_MOZ_STACK_WALK)
DoMozStackWalkBacktrace(aLock, aThreadInfo, aRegs, aNativeStack);
#else
#error "Invalid configuration"
#endif
}
#endif
// Writes some components shared by periodic and synchronous profiles to
// ActivePS's ProfileBuffer. (This should only be called from DoSyncSample()
// and DoPeriodicSample().)
@ -3429,18 +3367,9 @@ profiler_suspend_and_sample_thread(
[&](const Registers& aRegs) {
// The target thread is now suspended. Collect a native backtrace, and
// call the callback.
#if defined(HAVE_FASTINIT_NATIVE_UNWIND)
#if defined(HAVE_NATIVE_UNWIND)
if (aSampleNative) {
// We can only use FramePointerStackWalk or MozStackWalk from
// suspend_and_sample_thread as other stackwalking methods may not be
// initialized.
# if defined(USE_FRAME_POINTER_STACK_WALK)
DoFramePointerBacktrace(lock, *info, aRegs, nativeStack);
# elif defined(USE_MOZ_STACK_WALK)
DoMozStackWalkBacktrace(lock, *info, aRegs, nativeStack);
# else
# error "Invalid configuration"
# endif
DoNativeBacktrace(lock, *info, aRegs, nativeStack);
}
#endif
aCallback(nativeStack.mPCs, nativeStack.mCount, info->IsMainThread());
@ -3485,18 +3414,9 @@ profiler_suspend_and_sample_thread(int aThreadId,
// The target thread is now suspended. Collect a native backtrace, and
// call the callback.
bool isSynchronous = false;
#if defined(HAVE_FASTINIT_NATIVE_UNWIND)
#if defined(HAVE_NATIVE_UNWIND)
if (aSampleNative) {
// We can only use FramePointerStackWalk or MozStackWalk from
// suspend_and_sample_thread as other stackwalking methods may not be
// initialized.
# if defined(USE_FRAME_POINTER_STACK_WALK)
DoFramePointerBacktrace(lock, *info, aRegs, nativeStack);
# elif defined(USE_MOZ_STACK_WALK)
DoMozStackWalkBacktrace(lock, *info, aRegs, nativeStack);
# else
# error "Invalid configuration"
# endif
DoNativeBacktrace(lock, *info, aRegs, nativeStack);
MergeStacks(aFeatures, isSynchronous, *info, aRegs, nativeStack,
aCollector);