зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1338957 (part 1) - Reformat PseudoStack.h. r=mstange.
This patch: - Reformats PseudoStack.h to more closely follow Mozilla style. - Rewrites some comments to make them more readable, e.g. by properly delimiting sentences with upper-case letters and full stops. - Replaces sMin() with std::min(), because <algorithm> no longer causes problems. - Reorders PseudoStack so that all the data members are at the end, which makes them easier to see.
This commit is contained in:
Родитель
7690af08a2
Коммит
b623b482a6
|
@ -1,4 +1,5 @@
|
|||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
@ -7,14 +8,18 @@
|
|||
#define PROFILER_PSEUDO_STACK_H_
|
||||
|
||||
#include "mozilla/ArrayUtils.h"
|
||||
#include <stdint.h>
|
||||
#include "js/ProfilingStack.h"
|
||||
#include <stdlib.h>
|
||||
#include "mozilla/Atomics.h"
|
||||
#include "nsISupportsImpl.h"
|
||||
|
||||
/* we duplicate this code here to avoid header dependencies
|
||||
* which make it more difficult to include in other places */
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
// We duplicate this code here to avoid header dependencies which make it more
|
||||
// difficult to include in other places.
|
||||
#if defined(_M_X64) || defined(__x86_64__)
|
||||
#define V8_HOST_ARCH_X64 1
|
||||
#elif defined(_M_IX86) || defined(__i386__) || defined(__i386)
|
||||
|
@ -30,8 +35,8 @@
|
|||
// or hardware to make sure the profile is consistent at
|
||||
// every point the signal can fire.
|
||||
#ifdef V8_HOST_ARCH_ARM
|
||||
// TODO Is there something cheaper that will prevent
|
||||
// memory stores from being reordered
|
||||
// TODO Is there something cheaper that will prevent memory stores from being
|
||||
// reordered?
|
||||
|
||||
typedef void (*LinuxKernelMemoryBarrierFunc)(void);
|
||||
LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) =
|
||||
|
@ -53,12 +58,6 @@ LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) =
|
|||
# error "Memory clobber not supported for your platform."
|
||||
#endif
|
||||
|
||||
// We can't include <algorithm> because it causes issues on OS X, so we use
|
||||
// our own min function.
|
||||
static inline uint32_t sMin(uint32_t l, uint32_t r) {
|
||||
return l < r ? l : r;
|
||||
}
|
||||
|
||||
// A stack entry exists to allow the JS engine to inform the Gecko Profiler of
|
||||
// the current backtrace, but also to instrument particular points in C++ in
|
||||
// case stack walking is not available on the platform we are running on.
|
||||
|
@ -77,26 +76,24 @@ class ProfilerLinkedList;
|
|||
class SpliceableJSONWriter;
|
||||
class UniqueStacks;
|
||||
|
||||
class ProfilerMarker {
|
||||
class ProfilerMarker
|
||||
{
|
||||
friend class ProfilerLinkedList<ProfilerMarker>;
|
||||
|
||||
public:
|
||||
explicit ProfilerMarker(const char* aMarkerName,
|
||||
ProfilerMarkerPayload* aPayload = nullptr,
|
||||
double aTime = 0);
|
||||
|
||||
~ProfilerMarker();
|
||||
|
||||
const char* GetMarkerName() const {
|
||||
return mMarkerName;
|
||||
}
|
||||
const char* GetMarkerName() const { return mMarkerName; }
|
||||
|
||||
void StreamJSON(SpliceableJSONWriter& aWriter, UniqueStacks& aUniqueStacks) const;
|
||||
void StreamJSON(SpliceableJSONWriter& aWriter,
|
||||
UniqueStacks& aUniqueStacks) const;
|
||||
|
||||
void SetGeneration(uint32_t aGenID);
|
||||
|
||||
bool HasExpired(uint32_t aGenID) const {
|
||||
return mGenID + 2 <= aGenID;
|
||||
}
|
||||
bool HasExpired(uint32_t aGenID) const { return mGenID + 2 <= aGenID; }
|
||||
|
||||
double GetTime() const;
|
||||
|
||||
|
@ -109,23 +106,24 @@ private:
|
|||
};
|
||||
|
||||
template<typename T>
|
||||
class ProfilerLinkedList {
|
||||
class ProfilerLinkedList
|
||||
{
|
||||
public:
|
||||
ProfilerLinkedList()
|
||||
: mHead(nullptr)
|
||||
, mTail(nullptr)
|
||||
{}
|
||||
|
||||
void insert(T* elem)
|
||||
void insert(T* aElem)
|
||||
{
|
||||
if (!mTail) {
|
||||
mHead = elem;
|
||||
mTail = elem;
|
||||
mHead = aElem;
|
||||
mTail = aElem;
|
||||
} else {
|
||||
mTail->mNext = elem;
|
||||
mTail = elem;
|
||||
mTail->mNext = aElem;
|
||||
mTail = aElem;
|
||||
}
|
||||
elem->mNext = nullptr;
|
||||
aElem->mNext = nullptr;
|
||||
}
|
||||
|
||||
T* popHead()
|
||||
|
@ -176,13 +174,13 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
// Insert an item into the list.
|
||||
// Must only be called from the owning thread.
|
||||
// Insert an item into the list. Must only be called from the owning thread.
|
||||
// Must not be called while the list from accessList() is being accessed.
|
||||
// In the profiler, we ensure that by interrupting the profiled thread
|
||||
// (which is the one that owns this list and calls insert() on it) until
|
||||
// we're done reading the list from the signal handler.
|
||||
void insert(T* aElement) {
|
||||
void insert(T* aElement)
|
||||
{
|
||||
MOZ_ASSERT(aElement);
|
||||
|
||||
mSignalLock = true;
|
||||
|
@ -200,10 +198,7 @@ public:
|
|||
// Function must be reentrant.
|
||||
ProfilerLinkedList<T>* accessList()
|
||||
{
|
||||
if (mSignalLock) {
|
||||
return nullptr;
|
||||
}
|
||||
return &mList;
|
||||
return mSignalLock ? nullptr : &mList;
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -215,10 +210,14 @@ private:
|
|||
};
|
||||
|
||||
// Stub eventMarker function for js-engine event generation.
|
||||
void ProfilerJSEventMarker(const char *event);
|
||||
void ProfilerJSEventMarker(const char* aEvent);
|
||||
|
||||
// the PseudoStack members are read by signal
|
||||
// handlers, so the mutation of them needs to be signal-safe.
|
||||
// Note that some of these fields (e.g. mSleepId, mPrivacyMode) aren't really
|
||||
// part of the PseudoStack, but they are part of this class so they can be
|
||||
// stored in TLS.
|
||||
//
|
||||
// The PseudoStack members are read by signal handlers, so the mutation of them
|
||||
// needs to be signal-safe.
|
||||
struct PseudoStack
|
||||
{
|
||||
public:
|
||||
|
@ -234,7 +233,8 @@ public:
|
|||
MOZ_COUNT_CTOR(PseudoStack);
|
||||
}
|
||||
|
||||
~PseudoStack() {
|
||||
~PseudoStack()
|
||||
{
|
||||
MOZ_COUNT_DTOR(PseudoStack);
|
||||
|
||||
// The label macros keep a reference to the PseudoStack to avoid a TLS
|
||||
|
@ -243,20 +243,24 @@ public:
|
|||
MOZ_RELEASE_ASSERT(mStackPointer == 0);
|
||||
}
|
||||
|
||||
// This is called on every profiler restart. Put things that should happen at that time here.
|
||||
void reinitializeOnResume() {
|
||||
// This is needed to cause an initial sample to be taken from sleeping threads. Otherwise sleeping
|
||||
// threads would not have any samples to copy forward while sleeping.
|
||||
// This is called on every profiler restart. Put things that should happen at
|
||||
// that time here.
|
||||
void reinitializeOnResume()
|
||||
{
|
||||
// This is needed to cause an initial sample to be taken from sleeping
|
||||
// threads. Otherwise sleeping threads would not have any samples to copy
|
||||
// forward while sleeping.
|
||||
mSleepId++;
|
||||
}
|
||||
|
||||
void addMarker(const char* aMarkerStr, ProfilerMarkerPayload* aPayload, double aTime)
|
||||
void addMarker(const char* aMarkerStr, ProfilerMarkerPayload* aPayload,
|
||||
double aTime)
|
||||
{
|
||||
ProfilerMarker* marker = new ProfilerMarker(aMarkerStr, aPayload, aTime);
|
||||
mPendingMarkers.insert(marker);
|
||||
}
|
||||
|
||||
// called within signal. Function must be reentrant
|
||||
// Called within signal. Function must be reentrant.
|
||||
ProfilerMarkerLinkedList* getPendingMarkers()
|
||||
{
|
||||
// The profiled thread is interrupted, so we can access the list safely.
|
||||
|
@ -265,33 +269,35 @@ public:
|
|||
return mPendingMarkers.accessList();
|
||||
}
|
||||
|
||||
void push(const char *aName, js::ProfileEntry::Category aCategory, uint32_t line)
|
||||
void push(const char* aName, js::ProfileEntry::Category aCategory,
|
||||
uint32_t line)
|
||||
{
|
||||
push(aName, aCategory, nullptr, false, line);
|
||||
}
|
||||
|
||||
void push(const char *aName, js::ProfileEntry::Category aCategory,
|
||||
void *aStackAddress, bool aCopy, uint32_t line)
|
||||
void push(const char* aName, js::ProfileEntry::Category aCategory,
|
||||
void* aStackAddress, bool aCopy, uint32_t line)
|
||||
{
|
||||
if (size_t(mStackPointer) >= mozilla::ArrayLength(mStack)) {
|
||||
mStackPointer++;
|
||||
return;
|
||||
}
|
||||
|
||||
volatile StackEntry &entry = mStack[mStackPointer];
|
||||
volatile StackEntry& entry = mStack[mStackPointer];
|
||||
|
||||
// Make sure we increment the pointer after the name has
|
||||
// been written such that mStack is always consistent.
|
||||
// Make sure we increment the pointer after the name has been written such
|
||||
// that mStack is always consistent.
|
||||
entry.initCppFrame(aStackAddress, line);
|
||||
entry.setLabel(aName);
|
||||
MOZ_ASSERT(entry.flags() == js::ProfileEntry::IS_CPP_ENTRY);
|
||||
entry.setCategory(aCategory);
|
||||
|
||||
// Track if mLabel needs a copy.
|
||||
if (aCopy)
|
||||
if (aCopy) {
|
||||
entry.setFlag(js::ProfileEntry::FRAME_LABEL_COPY);
|
||||
else
|
||||
} else {
|
||||
entry.unsetFlag(js::ProfileEntry::FRAME_LABEL_COPY);
|
||||
}
|
||||
|
||||
// Prevent the optimizer from re-ordering these instructions
|
||||
STORE_SEQUENCER();
|
||||
|
@ -303,10 +309,12 @@ public:
|
|||
|
||||
uint32_t stackSize() const
|
||||
{
|
||||
return sMin(mStackPointer, mozilla::sig_safe_t(mozilla::ArrayLength(mStack)));
|
||||
return std::min(mStackPointer,
|
||||
mozilla::sig_safe_t(mozilla::ArrayLength(mStack)));
|
||||
}
|
||||
|
||||
void sampleContext(JSContext* context) {
|
||||
void sampleContext(JSContext* context)
|
||||
{
|
||||
if (mContext && !context) {
|
||||
// On JS shut down, flush the current buffer as stringifying JIT samples
|
||||
// requires a live JSContext.
|
||||
|
@ -325,11 +333,13 @@ public:
|
|||
(js::ProfileEntry*) mStack,
|
||||
(uint32_t*) &mStackPointer,
|
||||
(uint32_t) mozilla::ArrayLength(mStack));
|
||||
if (mStartJSSampling)
|
||||
if (mStartJSSampling) {
|
||||
enableJSSampling();
|
||||
}
|
||||
}
|
||||
|
||||
void enableJSSampling() {
|
||||
void enableJSSampling()
|
||||
{
|
||||
if (mContext) {
|
||||
js::EnableContextProfilingStack(mContext, true);
|
||||
js::RegisterContextProfilingEventMarker(mContext, &ProfilerJSEventMarker);
|
||||
|
@ -339,18 +349,23 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
void jsOperationCallback() {
|
||||
if (mStartJSSampling)
|
||||
void jsOperationCallback()
|
||||
{
|
||||
if (mStartJSSampling) {
|
||||
enableJSSampling();
|
||||
}
|
||||
}
|
||||
|
||||
void disableJSSampling() {
|
||||
void disableJSSampling()
|
||||
{
|
||||
mStartJSSampling = false;
|
||||
if (mContext)
|
||||
if (mContext) {
|
||||
js::EnableContextProfilingStack(mContext, false);
|
||||
}
|
||||
}
|
||||
|
||||
size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const {
|
||||
size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const
|
||||
{
|
||||
size_t n = aMallocSizeOf(this);
|
||||
|
||||
// Measurement of the following members may be added later if DMD finds it
|
||||
|
@ -366,41 +381,13 @@ public:
|
|||
return n;
|
||||
}
|
||||
|
||||
// Keep a list of active checkpoints
|
||||
StackEntry volatile mStack[1024];
|
||||
enum SleepState { NOT_SLEEPING, SLEEPING_FIRST, SLEEPING_AGAIN };
|
||||
|
||||
private:
|
||||
// No copying.
|
||||
PseudoStack(const PseudoStack&) = delete;
|
||||
void operator=(const PseudoStack&) = delete;
|
||||
|
||||
void flushSamplerOnJSShutdown();
|
||||
|
||||
// Keep a list of pending markers that must be moved
|
||||
// to the circular buffer
|
||||
ProfilerSignalSafeLinkedList<ProfilerMarker> mPendingMarkers;
|
||||
// This may exceed the length of mStack, so instead use the stackSize() method
|
||||
// to determine the number of valid samples in mStack
|
||||
mozilla::sig_safe_t mStackPointer;
|
||||
// Incremented at every sleep/wake up of the thread
|
||||
int mSleepId;
|
||||
// Previous id observed. If this is not the same as mSleepId, this thread is not sleeping in the same place any more
|
||||
mozilla::Atomic<int> mSleepIdObserved;
|
||||
// Keeps tack of whether the thread is sleeping or not (1 when sleeping 0 when awake)
|
||||
mozilla::Atomic<int> mSleeping;
|
||||
|
||||
public:
|
||||
// The context which is being sampled
|
||||
JSContext *mContext;
|
||||
// Start JS Profiling when possible
|
||||
bool mStartJSSampling;
|
||||
bool mPrivacyMode;
|
||||
|
||||
enum SleepState {NOT_SLEEPING, SLEEPING_FIRST, SLEEPING_AGAIN};
|
||||
|
||||
// The first time this is called per sleep cycle we return SLEEPING_FIRST
|
||||
// and any other subsequent call within the same sleep cycle we return SLEEPING_AGAIN
|
||||
SleepState observeSleeping() {
|
||||
// The first time this is called per sleep cycle we return SLEEPING_FIRST.
|
||||
// And any other subsequent call within the same sleep cycle we return
|
||||
// SLEEPING_AGAIN.
|
||||
SleepState observeSleeping()
|
||||
{
|
||||
if (mSleeping != 0) {
|
||||
if (mSleepIdObserved == mSleepId) {
|
||||
return SLEEPING_AGAIN;
|
||||
|
@ -413,18 +400,58 @@ private:
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
// Call this whenever the current thread sleeps or wakes up
|
||||
// Calling setSleeping with the same value twice in a row is an error
|
||||
void setSleeping(int sleeping) {
|
||||
// Call this whenever the current thread sleeps or wakes up. Calling
|
||||
// setSleeping with the same value twice in a row is an error.
|
||||
void setSleeping(int sleeping)
|
||||
{
|
||||
MOZ_ASSERT(mSleeping != sleeping);
|
||||
mSleepId++;
|
||||
mSleeping = sleeping;
|
||||
}
|
||||
|
||||
bool isSleeping() {
|
||||
return !!mSleeping;
|
||||
}
|
||||
bool isSleeping() { return !!mSleeping; }
|
||||
|
||||
private:
|
||||
// No copying.
|
||||
PseudoStack(const PseudoStack&) = delete;
|
||||
void operator=(const PseudoStack&) = delete;
|
||||
|
||||
void flushSamplerOnJSShutdown();
|
||||
|
||||
public:
|
||||
// The list of active checkpoints.
|
||||
StackEntry volatile mStack[1024];
|
||||
|
||||
private:
|
||||
// A list of pending markers that must be moved to the circular buffer.
|
||||
ProfilerSignalSafeLinkedList<ProfilerMarker> mPendingMarkers;
|
||||
|
||||
// This may exceed the length of mStack, so instead use the stackSize() method
|
||||
// to determine the number of valid samples in mStack.
|
||||
mozilla::sig_safe_t mStackPointer;
|
||||
|
||||
// Incremented at every sleep/wake up of the thread.
|
||||
int mSleepId;
|
||||
|
||||
// Previous id observed. If this is not the same as mSleepId, this thread is
|
||||
// not sleeping in the same place any more.
|
||||
mozilla::Atomic<int> mSleepIdObserved;
|
||||
|
||||
// Keeps track of whether the thread is sleeping or not (1 when sleeping; 0
|
||||
// when awake).
|
||||
mozilla::Atomic<int> mSleeping;
|
||||
|
||||
public:
|
||||
// The context being sampled.
|
||||
JSContext* mContext;
|
||||
|
||||
private:
|
||||
// Start JS Profiling when possible.
|
||||
bool mStartJSSampling;
|
||||
|
||||
public:
|
||||
// Is private browsing on?
|
||||
bool mPrivacyMode;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
Загрузка…
Ссылка в новой задаче