Bug 1492121 - All necessary changes to make baseprofiler build - r=njn

Almost-mechanical changes include:
- Removed unneeded/incompatible #includes and functions (any JS- or XPCOM-
  dependent).
- Use std::string for strings and nsIDs.
- Use thin wrappers around mozilla::detail::MutexImpl for mutexes.
- Use hand-rolled AddRef&Release's for ref-counted classes -- could not use
  mfbt/RefCounted.h because of bug 1536656.
- Added some platform-specific polyfills, e.g.: MicrosecondsSince1970().
- Only record the main thread by default.
- Logging controlled by env-vars MOZ_BASE_PROFILER_{,DEBUG_,VERBOSE_}LOGGING.

This now builds (with --enable-base-profiler), but is not usable yet.

Differential Revision: https://phabricator.services.mozilla.com/D31924

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Gerald Squelart 2019-06-04 06:53:01 +00:00
Родитель 494369648d
Коммит 10e7d3bf93
36 изменённых файлов: 1403 добавлений и 3624 удалений

Просмотреть файл

@ -12,25 +12,26 @@
# include "BaseProfileJSONWriter.h"
PageInformation::PageInformation(const nsID& aDocShellId,
PageInformation::PageInformation(const std::string& aDocShellId,
uint32_t aDocShellHistoryId,
const nsCString& aUrl, bool aIsSubFrame)
const std::string& aUrl, bool aIsSubFrame)
: mDocShellId(aDocShellId),
mDocShellHistoryId(aDocShellHistoryId),
mUrl(aUrl),
mIsSubFrame(aIsSubFrame) {}
mIsSubFrame(aIsSubFrame),
mRefCnt(0) {}
bool PageInformation::Equals(PageInformation* aOtherPageInfo) {
return DocShellHistoryId() == aOtherPageInfo->DocShellHistoryId() &&
DocShellId().Equals(aOtherPageInfo->DocShellId()) &&
DocShellId() == aOtherPageInfo->DocShellId() &&
IsSubFrame() == aOtherPageInfo->IsSubFrame();
}
void PageInformation::StreamJSON(SpliceableJSONWriter& aWriter) {
aWriter.StartObjectElement();
aWriter.StringProperty("docshellId", nsIDToCString(DocShellId()).get());
aWriter.StringProperty("docshellId", DocShellId().c_str());
aWriter.DoubleProperty("historyId", DocShellHistoryId());
aWriter.StringProperty("url", Url().get());
aWriter.StringProperty("url", Url().c_str());
aWriter.BoolProperty("isSubFrame", IsSubFrame());
aWriter.EndObject();
}

Просмотреть файл

@ -7,11 +7,11 @@
#ifndef PageInformation_h
#define PageInformation_h
#include "mozilla/Atomics.h"
#include "mozilla/Maybe.h"
#include "mozilla/MemoryReporting.h"
#include "nsID.h"
#include "nsISupportsImpl.h"
#include "nsString.h"
#include <string>
class SpliceableJSONWriter;
@ -24,17 +24,26 @@ class SpliceableJSONWriter;
// it in the next page registration.
class PageInformation final {
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(PageInformation)
PageInformation(const nsID& aDocShellId, uint32_t aDocShellHistoryId,
const nsCString& aUrl, bool aIsSubFrame);
PageInformation(const std::string& aDocShellId, uint32_t aDocShellHistoryId,
const std::string& aUrl, bool aIsSubFrame);
// Using hand-rolled ref-counting, because RefCounted.h macros don't produce
// the same code between mozglue and libxul, see bug 1536656.
MFBT_API void AddRef() const { ++mRefCnt; }
MFBT_API void Release() const {
MOZ_ASSERT(int32_t(mRefCnt) > 0);
if (--mRefCnt) {
delete this;
}
}
size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
bool Equals(PageInformation* aOtherDocShellInfo);
void StreamJSON(SpliceableJSONWriter& aWriter);
uint32_t DocShellHistoryId() { return mDocShellHistoryId; }
const nsID& DocShellId() { return mDocShellId; }
const nsCString& Url() { return mUrl; }
const std::string& DocShellId() { return mDocShellId; }
const std::string& Url() { return mUrl; }
bool IsSubFrame() { return mIsSubFrame; }
mozilla::Maybe<uint64_t> BufferPositionWhenUnregistered() {
@ -46,9 +55,9 @@ class PageInformation final {
}
private:
const nsID mDocShellId;
const std::string mDocShellId;
const uint32_t mDocShellHistoryId;
const nsCString mUrl;
const std::string mUrl;
const bool mIsSubFrame;
// Holds the buffer position when DocShell is unregistered.
@ -56,7 +65,9 @@ class PageInformation final {
// not.
mozilla::Maybe<uint64_t> mBufferPositionWhenUnregistered;
virtual ~PageInformation() = default;
mutable mozilla::Atomic<int32_t, mozilla::MemoryOrdering::ReleaseAcquire,
mozilla::recordreplay::Behavior::DontPreserve>
mRefCnt;
};
#endif // PageInformation_h

Просмотреть файл

@ -12,10 +12,7 @@
# include "ProfilerMarker.h"
# include "jsfriendapi.h"
# include "mozilla/MathAlgorithms.h"
# include "nsJSPrincipals.h"
# include "nsScriptSecurityManager.h"
using namespace mozilla;
@ -120,24 +117,10 @@ size_t ProfileBuffer::SizeOfIncludingThis(
/* ProfileBufferCollector */
static bool IsChromeJSScript(JSScript* aScript) {
// WARNING: this function runs within the profiler's "critical section".
auto realm = js::GetScriptRealm(aScript);
return js::IsSystemRealm(realm);
}
void ProfileBufferCollector::CollectNativeLeafAddr(void* aAddr) {
mBuf.AddEntry(ProfileBufferEntry::NativeLeafAddr(aAddr));
}
void ProfileBufferCollector::CollectJitReturnAddr(void* aAddr) {
mBuf.AddEntry(ProfileBufferEntry::JitReturnAddr(aAddr));
}
void ProfileBufferCollector::CollectWasmFrame(const char* aLabel) {
mBuf.CollectCodeLocation("", aLabel, 0, Nothing(), Nothing(), Nothing());
}
void ProfileBufferCollector::CollectProfilingStackFrame(
const js::ProfilingStackFrame& aFrame) {
// WARNING: this function runs within the profiler's "critical section".
@ -151,34 +134,7 @@ void ProfileBufferCollector::CollectProfilingStackFrame(
Maybe<uint32_t> line;
Maybe<uint32_t> column;
if (aFrame.isJsFrame()) {
// There are two kinds of JS frames that get pushed onto the ProfilingStack.
//
// - label = "", dynamic string = <something>
// - label = "js::RunScript", dynamic string = nullptr
//
// The line number is only interesting in the first case.
if (label[0] == '\0') {
MOZ_ASSERT(dynamicString);
// We call aFrame.script() repeatedly -- rather than storing the result in
// a local variable in order -- to avoid rooting hazards.
if (aFrame.script()) {
isChromeJSEntry = IsChromeJSScript(aFrame.script());
if (aFrame.pc()) {
unsigned col = 0;
line = Some(JS_PCToLineNumber(aFrame.script(), aFrame.pc(), &col));
column = Some(col);
}
}
} else {
MOZ_ASSERT(strcmp(label, "js::RunScript") == 0 && !dynamicString);
}
} else {
MOZ_ASSERT(aFrame.isLabelFrame());
}
MOZ_ASSERT(aFrame.isLabelFrame());
if (dynamicString) {
// Adjust the dynamic string as necessary.

Просмотреть файл

@ -49,21 +49,10 @@ class ProfileBuffer final {
// Maximum size of a frameKey string that we'll handle.
static const size_t kMaxFrameKeyLength = 512;
// Add JIT frame information to aJITFrameInfo for any JitReturnAddr entries
// that are currently in the buffer at or after aRangeStart, in samples
// for the given thread.
void AddJITInfoForRange(uint64_t aRangeStart, int aThreadId,
JSContext* aContext,
JITFrameInfo& aJITFrameInfo) const;
// Stream JSON for samples in the buffer to aWriter, using the supplied
// UniqueStacks object.
// Only streams samples for the given thread ID and which were taken at or
// after aSinceTime.
// aUniqueStacks needs to contain information about any JIT frames that we
// might encounter in the buffer, before this method is called. In other
// words, you need to have called AddJITInfoForRange for every range that
// might contain JIT frame information before calling this method.
void StreamSamplesToJSON(SpliceableJSONWriter& aWriter, int aThreadId,
double aSinceTime,
UniqueStacks& aUniqueStacks) const;
@ -163,8 +152,6 @@ class ProfileBufferCollector final : public ProfilerStackCollector {
}
virtual void CollectNativeLeafAddr(void* aAddr) override;
virtual void CollectJitReturnAddr(void* aAddr) override;
virtual void CollectWasmFrame(const char* aLabel) override;
virtual void CollectProfilingStackFrame(
const js::ProfilingStackFrame& aFrame) override;

Просмотреть файл

@ -13,14 +13,9 @@
# include "platform.h"
# include "ProfileBuffer.h"
# include "js/TrackedOptimizationInfo.h"
# include "jsapi.h"
# include "jsfriendapi.h"
# include "mozilla/Logging.h"
# include "mozilla/Sprintf.h"
# include "mozilla/StackWalk.h"
# include "nsThreadUtils.h"
# include "nsXULAppAPI.h"
# include <ostream>
@ -120,51 +115,6 @@ void ProfileBufferEntry::CopyCharsInto(char (&aOutArray)[kNumChars]) const {
// END ProfileBufferEntry
////////////////////////////////////////////////////////////////////////
struct TypeInfo {
Maybe<nsCString> mKeyedBy;
Maybe<nsCString> mName;
Maybe<nsCString> mLocation;
Maybe<unsigned> mLineNumber;
};
template <typename LambdaT>
class ForEachTrackedOptimizationTypeInfoLambdaOp
: public JS::ForEachTrackedOptimizationTypeInfoOp {
public:
// aLambda needs to be a function with the following signature:
// void lambda(JS::TrackedTypeSite site, const char* mirType,
// const nsTArray<TypeInfo>& typeset)
// aLambda will be called once per entry.
explicit ForEachTrackedOptimizationTypeInfoLambdaOp(LambdaT&& aLambda)
: mLambda(aLambda) {}
// This is called 0 or more times per entry, *before* operator() is called
// for that entry.
void readType(const char* keyedBy, const char* name, const char* location,
const Maybe<unsigned>& lineno) override {
TypeInfo info = {keyedBy ? Some(nsCString(keyedBy)) : Nothing(),
name ? Some(nsCString(name)) : Nothing(),
location ? Some(nsCString(location)) : Nothing(), lineno};
mTypesetForUpcomingEntry.AppendElement(std::move(info));
}
void operator()(JS::TrackedTypeSite site, const char* mirType) override {
nsTArray<TypeInfo> typeset(std::move(mTypesetForUpcomingEntry));
mLambda(site, mirType, typeset);
}
private:
nsTArray<TypeInfo> mTypesetForUpcomingEntry;
LambdaT mLambda;
};
template <typename LambdaT>
ForEachTrackedOptimizationTypeInfoLambdaOp<LambdaT>
MakeForEachTrackedOptimizationTypeInfoLambdaOp(LambdaT&& aLambda) {
return ForEachTrackedOptimizationTypeInfoLambdaOp<LambdaT>(
std::forward<LambdaT>(aLambda));
}
// As mentioned in ProfileBufferEntry.h, the JSON format contains many
// arrays whose elements are laid out according to various schemas to help
// de-duplication. This RAII class helps write these arrays by keeping track of
@ -254,28 +204,6 @@ class MOZ_RAII AutoArraySchemaWriter {
uint32_t mNextFreeIndex;
};
template <typename LambdaT>
class ForEachTrackedOptimizationAttemptsLambdaOp
: public JS::ForEachTrackedOptimizationAttemptOp {
public:
explicit ForEachTrackedOptimizationAttemptsLambdaOp(LambdaT&& aLambda)
: mLambda(std::move(aLambda)) {}
void operator()(JS::TrackedStrategy aStrategy,
JS::TrackedOutcome aOutcome) override {
mLambda(aStrategy, aOutcome);
}
private:
LambdaT mLambda;
};
template <typename LambdaT>
ForEachTrackedOptimizationAttemptsLambdaOp<LambdaT>
MakeForEachTrackedOptimizationAttemptsLambdaOp(LambdaT&& aLambda) {
return ForEachTrackedOptimizationAttemptsLambdaOp<LambdaT>(
std::move(aLambda));
}
UniqueJSONStrings::UniqueJSONStrings() { mStringTableWriter.StartBareList(); }
UniqueJSONStrings::UniqueJSONStrings(const UniqueJSONStrings& aOther) {
@ -318,39 +246,6 @@ UniqueStacks::StackKey UniqueStacks::AppendFrame(const StackKey& aStack,
GetOrAddFrameIndex(aFrame));
}
JITFrameInfoForBufferRange JITFrameInfoForBufferRange::Clone() const {
JITFrameInfoForBufferRange::JITAddressToJITFramesMap jitAddressToJITFramesMap;
MOZ_RELEASE_ASSERT(
jitAddressToJITFramesMap.reserve(mJITAddressToJITFramesMap.count()));
for (auto iter = mJITAddressToJITFramesMap.iter(); !iter.done();
iter.next()) {
const mozilla::Vector<JITFrameKey>& srcKeys = iter.get().value();
mozilla::Vector<JITFrameKey> destKeys;
MOZ_RELEASE_ASSERT(destKeys.appendAll(srcKeys));
jitAddressToJITFramesMap.putNewInfallible(iter.get().key(),
std::move(destKeys));
}
JITFrameInfoForBufferRange::JITFrameToFrameJSONMap jitFrameToFrameJSONMap;
MOZ_RELEASE_ASSERT(
jitFrameToFrameJSONMap.reserve(mJITFrameToFrameJSONMap.count()));
for (auto iter = mJITFrameToFrameJSONMap.iter(); !iter.done(); iter.next()) {
jitFrameToFrameJSONMap.putNewInfallible(iter.get().key(),
iter.get().value());
}
return JITFrameInfoForBufferRange{mRangeStart, mRangeEnd,
std::move(jitAddressToJITFramesMap),
std::move(jitFrameToFrameJSONMap)};
}
JITFrameInfo::JITFrameInfo(const JITFrameInfo& aOther)
: mUniqueStrings(MakeUnique<UniqueJSONStrings>(*aOther.mUniqueStrings)) {
for (const JITFrameInfoForBufferRange& range : aOther.mRanges) {
MOZ_RELEASE_ASSERT(mRanges.append(range.Clone()));
}
}
bool UniqueStacks::FrameKey::NormalFrameData::operator==(
const NormalFrameData& aOther) const {
return mLocation == aOther.mLocation &&
@ -358,19 +253,7 @@ bool UniqueStacks::FrameKey::NormalFrameData::operator==(
mColumn == aOther.mColumn && mCategoryPair == aOther.mCategoryPair;
}
bool UniqueStacks::FrameKey::JITFrameData::operator==(
const JITFrameData& aOther) const {
return mCanonicalAddress == aOther.mCanonicalAddress &&
mDepth == aOther.mDepth && mRangeIndex == aOther.mRangeIndex;
}
// Consume aJITFrameInfo by stealing its string table and its JIT frame info
// ranges. The JIT frame info contains JSON which refers to strings from the
// JIT frame info's string table, so our string table needs to have the same
// strings at the same indices.
UniqueStacks::UniqueStacks(JITFrameInfo&& aJITFrameInfo)
: mUniqueStrings(std::move(aJITFrameInfo.mUniqueStrings)),
mJITInfoRanges(std::move(aJITFrameInfo.mRanges)) {
UniqueStacks::UniqueStacks() : mUniqueStrings(MakeUnique<UniqueJSONStrings>()) {
mFrameTableWriter.StartBareList();
mStackTableWriter.StartBareList();
}
@ -399,52 +282,6 @@ struct PositionInRangeComparator final {
}
};
Maybe<Vector<UniqueStacks::FrameKey>>
UniqueStacks::LookupFramesForJITAddressFromBufferPos(void* aJITAddress,
uint64_t aBufferPos) {
JITFrameInfoForBufferRange* rangeIter =
std::lower_bound(mJITInfoRanges.begin(), mJITInfoRanges.end(), aBufferPos,
[](const JITFrameInfoForBufferRange& aRange,
uint64_t aPos) { return aRange.mRangeEnd < aPos; });
MOZ_RELEASE_ASSERT(
rangeIter != mJITInfoRanges.end() &&
rangeIter->mRangeStart <= aBufferPos &&
aBufferPos < rangeIter->mRangeEnd,
"Buffer position of jit address needs to be in one of the ranges");
using JITFrameKey = JITFrameInfoForBufferRange::JITFrameKey;
const JITFrameInfoForBufferRange& jitFrameInfoRange = *rangeIter;
auto jitFrameKeys =
jitFrameInfoRange.mJITAddressToJITFramesMap.lookup(aJITAddress);
if (!jitFrameKeys) {
return Nothing();
}
// Map the array of JITFrameKeys to an array of FrameKeys, and ensure that
// each of the FrameKeys exists in mFrameToIndexMap.
Vector<FrameKey> frameKeys;
MOZ_RELEASE_ASSERT(frameKeys.initCapacity(jitFrameKeys->value().length()));
for (const JITFrameKey& jitFrameKey : jitFrameKeys->value()) {
FrameKey frameKey(jitFrameKey.mCanonicalAddress, jitFrameKey.mDepth,
rangeIter - mJITInfoRanges.begin());
uint32_t index = mFrameToIndexMap.count();
auto entry = mFrameToIndexMap.lookupForAdd(frameKey);
if (!entry) {
// We need to add this frame to our frame table. The JSON for this frame
// already exists in jitFrameInfoRange, we just need to splice it into
// the frame table and give it an index.
auto frameJSON =
jitFrameInfoRange.mJITFrameToFrameJSONMap.lookup(jitFrameKey);
MOZ_RELEASE_ASSERT(frameJSON, "Should have cached JSON for this frame");
mFrameTableWriter.Splice(frameJSON->value().get());
MOZ_RELEASE_ASSERT(mFrameToIndexMap.add(entry, frameKey, index));
}
MOZ_RELEASE_ASSERT(frameKeys.append(std::move(frameKey)));
}
return Some(std::move(frameKeys));
}
uint32_t UniqueStacks::GetOrAddFrameIndex(const FrameKey& aFrame) {
uint32_t count = mFrameToIndexMap.count();
auto entry = mFrameToIndexMap.lookupForAdd(aFrame);
@ -494,7 +331,7 @@ void UniqueStacks::StreamNonJITFrame(const FrameKey& aFrame) {
AutoArraySchemaWriter writer(mFrameTableWriter, *mUniqueStrings);
const NormalFrameData& data = aFrame.mData.as<NormalFrameData>();
writer.StringElement(LOCATION, data.mLocation.get());
writer.StringElement(LOCATION, data.mLocation.c_str());
writer.BoolElement(RELEVANT_FOR_JS, data.mRelevantForJS);
if (data.mLine.isSome()) {
writer.IntElement(LINE, *data.mLine);
@ -504,198 +341,18 @@ void UniqueStacks::StreamNonJITFrame(const FrameKey& aFrame) {
}
if (data.mCategoryPair.isSome()) {
const JS::ProfilingCategoryPairInfo& info =
JS::GetProfilingCategoryPairInfo(*data.mCategoryPair);
JS::GetBaseProfilingCategoryPairInfo(*data.mCategoryPair);
writer.IntElement(CATEGORY, uint32_t(info.mCategory));
}
}
static void StreamJITFrameOptimizations(
SpliceableJSONWriter& aWriter, UniqueJSONStrings& aUniqueStrings,
JSContext* aContext, const JS::ProfiledFrameHandle& aJITFrame) {
aWriter.StartObjectElement();
{
aWriter.StartArrayProperty("types");
{
auto op = MakeForEachTrackedOptimizationTypeInfoLambdaOp(
[&](JS::TrackedTypeSite site, const char* mirType,
const nsTArray<TypeInfo>& typeset) {
aWriter.StartObjectElement();
{
aUniqueStrings.WriteProperty(aWriter, "site",
JS::TrackedTypeSiteString(site));
aUniqueStrings.WriteProperty(aWriter, "mirType", mirType);
if (!typeset.IsEmpty()) {
aWriter.StartArrayProperty("typeset");
for (const TypeInfo& typeInfo : typeset) {
aWriter.StartObjectElement();
{
aUniqueStrings.WriteProperty(aWriter, "keyedBy",
typeInfo.mKeyedBy->get());
if (typeInfo.mName) {
aUniqueStrings.WriteProperty(aWriter, "name",
typeInfo.mName->get());
}
if (typeInfo.mLocation) {
aUniqueStrings.WriteProperty(aWriter, "location",
typeInfo.mLocation->get());
}
if (typeInfo.mLineNumber.isSome()) {
aWriter.IntProperty("line", *typeInfo.mLineNumber);
}
}
aWriter.EndObject();
}
aWriter.EndArray();
}
}
aWriter.EndObject();
});
aJITFrame.forEachOptimizationTypeInfo(op);
}
aWriter.EndArray();
JS::Rooted<JSScript*> script(aContext);
jsbytecode* pc;
aWriter.StartObjectProperty("attempts");
{
{
JSONSchemaWriter schema(aWriter);
schema.WriteField("strategy");
schema.WriteField("outcome");
}
aWriter.StartArrayProperty("data");
{
auto op = MakeForEachTrackedOptimizationAttemptsLambdaOp(
[&](JS::TrackedStrategy strategy, JS::TrackedOutcome outcome) {
enum Schema : uint32_t { STRATEGY = 0, OUTCOME = 1 };
AutoArraySchemaWriter writer(aWriter, aUniqueStrings);
writer.StringElement(STRATEGY,
JS::TrackedStrategyString(strategy));
writer.StringElement(OUTCOME, JS::TrackedOutcomeString(outcome));
});
aJITFrame.forEachOptimizationAttempt(op, script.address(), &pc);
}
aWriter.EndArray();
}
aWriter.EndObject();
if (JSAtom* name = js::GetPropertyNameFromPC(script, pc)) {
char buf[512];
JS_PutEscapedFlatString(buf, ArrayLength(buf), js::AtomToFlatString(name),
0);
aUniqueStrings.WriteProperty(aWriter, "propertyName", buf);
}
unsigned line, column;
line = JS_PCToLineNumber(script, pc, &column);
aWriter.IntProperty("line", line);
aWriter.IntProperty("column", column);
}
aWriter.EndObject();
}
static void StreamJITFrame(JSContext* aContext, SpliceableJSONWriter& aWriter,
UniqueJSONStrings& aUniqueStrings,
const JS::ProfiledFrameHandle& aJITFrame) {
enum Schema : uint32_t {
LOCATION = 0,
RELEVANT_FOR_JS = 1,
IMPLEMENTATION = 2,
OPTIMIZATIONS = 3,
LINE = 4,
COLUMN = 5,
CATEGORY = 6
};
AutoArraySchemaWriter writer(aWriter, aUniqueStrings);
writer.StringElement(LOCATION, aJITFrame.label());
writer.BoolElement(RELEVANT_FOR_JS, false);
JS::ProfilingFrameIterator::FrameKind frameKind = aJITFrame.frameKind();
MOZ_ASSERT(frameKind == JS::ProfilingFrameIterator::Frame_Ion ||
frameKind == JS::ProfilingFrameIterator::Frame_Baseline);
writer.StringElement(
IMPLEMENTATION,
frameKind == JS::ProfilingFrameIterator::Frame_Ion ? "ion" : "baseline");
if (aJITFrame.hasTrackedOptimizations()) {
writer.FreeFormElement(
OPTIMIZATIONS,
[&](SpliceableJSONWriter& aWriter, UniqueJSONStrings& aUniqueStrings) {
StreamJITFrameOptimizations(aWriter, aUniqueStrings, aContext,
aJITFrame);
});
}
}
struct CStringWriteFunc : public JSONWriteFunc {
nsACString& mBuffer; // The struct must not outlive this buffer
explicit CStringWriteFunc(nsACString& aBuffer) : mBuffer(aBuffer) {}
std::string& mBuffer; // The struct must not outlive this buffer
explicit CStringWriteFunc(std::string& aBuffer) : mBuffer(aBuffer) {}
void Write(const char* aStr) override { mBuffer.Append(aStr); }
void Write(const char* aStr) override { mBuffer += aStr; }
};
static nsCString JSONForJITFrame(JSContext* aContext,
const JS::ProfiledFrameHandle& aJITFrame,
UniqueJSONStrings& aUniqueStrings) {
nsCString json;
SpliceableJSONWriter writer(MakeUnique<CStringWriteFunc>(json));
StreamJITFrame(aContext, writer, aUniqueStrings, aJITFrame);
return json;
}
void JITFrameInfo::AddInfoForRange(
uint64_t aRangeStart, uint64_t aRangeEnd, JSContext* aCx,
const std::function<void(const std::function<void(void*)>&)>&
aJITAddressProvider) {
if (aRangeStart == aRangeEnd) {
return;
}
MOZ_RELEASE_ASSERT(aRangeStart < aRangeEnd);
if (!mRanges.empty()) {
const JITFrameInfoForBufferRange& prevRange = mRanges.back();
MOZ_RELEASE_ASSERT(prevRange.mRangeEnd <= aRangeStart,
"Ranges must be non-overlapping and added in-order.");
}
using JITFrameKey = JITFrameInfoForBufferRange::JITFrameKey;
JITFrameInfoForBufferRange::JITAddressToJITFramesMap jitAddressToJITFrameMap;
JITFrameInfoForBufferRange::JITFrameToFrameJSONMap jitFrameToFrameJSONMap;
aJITAddressProvider([&](void* aJITAddress) {
// Make sure that we have cached data for aJITAddress.
auto addressEntry = jitAddressToJITFrameMap.lookupForAdd(aJITAddress);
if (!addressEntry) {
Vector<JITFrameKey> jitFrameKeys;
for (JS::ProfiledFrameHandle handle :
JS::GetProfiledFrames(aCx, aJITAddress)) {
uint32_t depth = jitFrameKeys.length();
JITFrameKey jitFrameKey{handle.canonicalAddress(), depth};
auto frameEntry = jitFrameToFrameJSONMap.lookupForAdd(jitFrameKey);
if (!frameEntry) {
MOZ_RELEASE_ASSERT(jitFrameToFrameJSONMap.add(
frameEntry, jitFrameKey,
JSONForJITFrame(aCx, handle, *mUniqueStrings)));
}
MOZ_RELEASE_ASSERT(jitFrameKeys.append(jitFrameKey));
}
MOZ_RELEASE_ASSERT(jitAddressToJITFrameMap.add(addressEntry, aJITAddress,
std::move(jitFrameKeys)));
}
});
MOZ_RELEASE_ASSERT(mRanges.append(JITFrameInfoForBufferRange{
aRangeStart, aRangeEnd, std::move(jitAddressToJITFrameMap),
std::move(jitFrameToFrameJSONMap)}));
}
struct ProfileSample {
uint32_t mStack;
double mTime;
@ -1037,23 +694,33 @@ void ProfileBuffer::StreamSamplesToJSON(SpliceableJSONWriter& aWriter,
dynStrBuf[kMaxFrameKeyLength - 1] = '\0';
bool hasDynamicString = (i != 0);
nsCString frameLabel;
std::string frameLabel;
if (label[0] != '\0' && hasDynamicString) {
if (frameFlags & uint32_t(FrameFlags::STRING_TEMPLATE_METHOD)) {
frameLabel.AppendPrintf("%s.%s", label, dynStrBuf.get());
frameLabel += label;
frameLabel += '.';
frameLabel += dynStrBuf.get();
} else if (frameFlags &
uint32_t(FrameFlags::STRING_TEMPLATE_GETTER)) {
frameLabel.AppendPrintf("get %s.%s", label, dynStrBuf.get());
frameLabel += "get ";
frameLabel += label;
frameLabel += '.';
frameLabel += dynStrBuf.get();
} else if (frameFlags &
uint32_t(FrameFlags::STRING_TEMPLATE_SETTER)) {
frameLabel.AppendPrintf("set %s.%s", label, dynStrBuf.get());
frameLabel += "set ";
frameLabel += label;
frameLabel += '.';
frameLabel += dynStrBuf.get();
} else {
frameLabel.AppendPrintf("%s %s", label, dynStrBuf.get());
frameLabel += label;
frameLabel += ' ';
frameLabel += dynStrBuf.get();
}
} else if (hasDynamicString) {
frameLabel.Append(dynStrBuf.get());
frameLabel += dynStrBuf.get();
} else {
frameLabel.Append(label);
frameLabel += label;
}
Maybe<unsigned> line;
@ -1079,23 +746,6 @@ void ProfileBuffer::StreamSamplesToJSON(SpliceableJSONWriter& aWriter,
stack, UniqueStacks::FrameKey(std::move(frameLabel), relevantForJS,
line, column, categoryPair));
} else if (e.Get().IsJitReturnAddr()) {
numFrames++;
// A JIT frame may expand to multiple frames due to inlining.
void* pc = e.Get().GetPtr();
const Maybe<Vector<UniqueStacks::FrameKey>>& frameKeys =
aUniqueStacks.LookupFramesForJITAddressFromBufferPos(pc,
e.CurPos());
MOZ_RELEASE_ASSERT(frameKeys,
"Attempting to stream samples for a buffer range "
"for which we don't have JITFrameInfo?");
for (const UniqueStacks::FrameKey& frameKey : *frameKeys) {
stack = aUniqueStacks.AppendFrame(stack, frameKey);
}
e.Next();
} else {
break;
}
@ -1138,48 +788,6 @@ void ProfileBuffer::StreamSamplesToJSON(SpliceableJSONWriter& aWriter,
}
}
void ProfileBuffer::AddJITInfoForRange(uint64_t aRangeStart, int aThreadId,
JSContext* aContext,
JITFrameInfo& aJITFrameInfo) const {
// We can only process JitReturnAddr entries if we have a JSContext.
MOZ_RELEASE_ASSERT(aContext);
aRangeStart = std::max(aRangeStart, mRangeStart);
aJITFrameInfo.AddInfoForRange(
aRangeStart, mRangeEnd, aContext,
[&](const std::function<void(void*)>& aJITAddressConsumer) {
// Find all JitReturnAddr entries in the given range for the given
// thread, and call aJITAddressConsumer with those addresses.
EntryGetter e(*this, aRangeStart);
while (true) {
// Advance to the next ThreadId entry.
while (e.Has() && !e.Get().IsThreadId()) {
e.Next();
}
if (!e.Has()) {
break;
}
MOZ_ASSERT(e.Get().IsThreadId());
int threadId = e.Get().GetInt();
e.Next();
// Ignore samples that are for a different thread.
if (threadId != aThreadId) {
continue;
}
while (e.Has() && !e.Get().IsThreadId()) {
if (e.Get().IsJitReturnAddr()) {
aJITAddressConsumer(e.Get().GetPtr());
}
e.Next();
}
}
});
}
void ProfileBuffer::StreamMarkersToJSON(SpliceableJSONWriter& aWriter,
int aThreadId,
const TimeStamp& aProcessStartTime,
@ -1345,8 +953,6 @@ struct CounterKeyedSample {
using CounterKeyedSamples = Vector<CounterKeyedSample>;
static LazyLogModule sFuzzyfoxLog("Fuzzyfox");
using CounterMap = HashMap<uint64_t, CounterKeyedSamples>;
// HashMap lookup, if not found, a default value is inserted.
@ -1488,11 +1094,6 @@ void ProfileBuffer::StreamCountersToJSON(SpliceableJSONWriter& aWriter,
// Encode as deltas, and only encode if different than the last sample
if (i == 0 || samples[i].mNumber != previousNumber ||
samples[i].mCount != previousCount) {
if (i != 0 && samples[i].mTime >= samples[i - 1].mTime) {
MOZ_LOG(sFuzzyfoxLog, mozilla::LogLevel::Error,
("Fuzzyfox Profiler Assertion: %f >= %f", samples[i].mTime,
samples[i - 1].mTime));
}
MOZ_ASSERT(i == 0 || samples[i].mTime >= samples[i - 1].mTime);
MOZ_ASSERT(samples[i].mNumber >= previousNumber);
MOZ_ASSERT(samples[i].mNumber - previousNumber <=

Просмотреть файл

@ -10,16 +10,15 @@
#include "BaseProfileJSONWriter.h"
#include "gtest/MozGtestFriend.h"
#include "js/ProfilingCategory.h"
#include "js/ProfilingFrameIterator.h"
#include "js/TrackedOptimizationInfo.h"
#include "BaseProfilingCategory.h"
#include "mozilla/HashFunctions.h"
#include "mozilla/HashTable.h"
#include "mozilla/Maybe.h"
#include "mozilla/UniquePtr.h"
#include "mozilla/Variant.h"
#include "mozilla/Vector.h"
#include "nsString.h"
#include <string>
class ProfilerMarker;
@ -142,117 +141,20 @@ class UniqueJSONStrings {
mozilla::HashMap<mozilla::HashNumber, uint32_t> mStringHashToIndexMap;
};
// Contains all the information about JIT frames that is needed to stream stack
// frames for JitReturnAddr entries in the profiler buffer.
// Every return address (void*) is mapped to one or more JITFrameKeys, and
// every JITFrameKey is mapped to a JSON string for that frame.
// mRangeStart and mRangeEnd describe the range in the buffer for which this
// mapping is valid. Only JitReturnAddr entries within that buffer range can be
// processed using this JITFrameInfoForBufferRange object.
struct JITFrameInfoForBufferRange final {
JITFrameInfoForBufferRange Clone() const;
uint64_t mRangeStart;
uint64_t mRangeEnd; // mRangeEnd marks the first invalid index.
struct JITFrameKey {
bool operator==(const JITFrameKey& aOther) const {
return mCanonicalAddress == aOther.mCanonicalAddress &&
mDepth == aOther.mDepth;
}
bool operator!=(const JITFrameKey& aOther) const {
return !(*this == aOther);
}
void* mCanonicalAddress;
uint32_t mDepth;
};
struct JITFrameKeyHasher {
using Lookup = JITFrameKey;
static mozilla::HashNumber hash(const JITFrameKey& aLookup) {
mozilla::HashNumber hash = 0;
hash = mozilla::AddToHash(hash, aLookup.mCanonicalAddress);
hash = mozilla::AddToHash(hash, aLookup.mDepth);
return hash;
}
static bool match(const JITFrameKey& aKey, const JITFrameKey& aLookup) {
return aKey == aLookup;
}
static void rekey(JITFrameKey& aKey, const JITFrameKey& aNewKey) {
aKey = aNewKey;
}
};
using JITAddressToJITFramesMap =
mozilla::HashMap<void*, mozilla::Vector<JITFrameKey>>;
JITAddressToJITFramesMap mJITAddressToJITFramesMap;
using JITFrameToFrameJSONMap =
mozilla::HashMap<JITFrameKey, nsCString, JITFrameKeyHasher>;
JITFrameToFrameJSONMap mJITFrameToFrameJSONMap;
};
// Contains JITFrameInfoForBufferRange objects for multiple profiler buffer
// ranges.
struct JITFrameInfo final {
JITFrameInfo() : mUniqueStrings(mozilla::MakeUnique<UniqueJSONStrings>()) {}
MOZ_IMPLICIT JITFrameInfo(const JITFrameInfo& aOther);
// Creates a new JITFrameInfoForBufferRange object in mRanges by looking up
// information about the provided JIT return addresses using aCx.
// Addresses are provided like this:
// The caller of AddInfoForRange supplies a function in aJITAddressProvider.
// This function will be called once, synchronously, with an
// aJITAddressConsumer argument, which is a function that needs to be called
// for every address. That function can be called multiple times for the same
// address.
void AddInfoForRange(
uint64_t aRangeStart, uint64_t aRangeEnd, JSContext* aCx,
const std::function<void(const std::function<void(void*)>&)>&
aJITAddressProvider);
// Returns whether the information stored in this object is still relevant
// for any entries in the buffer.
bool HasExpired(uint64_t aCurrentBufferRangeStart) const {
if (mRanges.empty()) {
// No information means no relevant information. Allow this object to be
// discarded.
return true;
}
return mRanges.back().mRangeEnd <= aCurrentBufferRangeStart;
}
// The array of ranges of JIT frame information, sorted by buffer position.
// Ranges are non-overlapping.
// The JSON of the cached frames can contain string indexes, which refer
// to strings in mUniqueStrings.
mozilla::Vector<JITFrameInfoForBufferRange> mRanges;
// The string table which contains strings used in the frame JSON that's
// cached in mRanges.
mozilla::UniquePtr<UniqueJSONStrings> mUniqueStrings;
};
class UniqueStacks {
public:
struct FrameKey {
explicit FrameKey(const char* aLocation)
: mData(NormalFrameData{nsCString(aLocation), false, mozilla::Nothing(),
mozilla::Nothing()}) {}
: mData(NormalFrameData{std::string(aLocation), false,
mozilla::Nothing(), mozilla::Nothing()}) {}
FrameKey(nsCString&& aLocation, bool aRelevantForJS,
FrameKey(std::string&& aLocation, bool aRelevantForJS,
const mozilla::Maybe<unsigned>& aLine,
const mozilla::Maybe<unsigned>& aColumn,
const mozilla::Maybe<JS::ProfilingCategoryPair>& aCategoryPair)
: mData(NormalFrameData{aLocation, aRelevantForJS, aLine, aColumn,
aCategoryPair}) {}
FrameKey(void* aJITAddress, uint32_t aJITDepth, uint32_t aRangeIndex)
: mData(JITFrameData{aJITAddress, aJITDepth, aRangeIndex}) {}
FrameKey(const FrameKey& aToCopy) = default;
uint32_t Hash() const;
@ -263,20 +165,13 @@ class UniqueStacks {
struct NormalFrameData {
bool operator==(const NormalFrameData& aOther) const;
nsCString mLocation;
std::string mLocation;
bool mRelevantForJS;
mozilla::Maybe<unsigned> mLine;
mozilla::Maybe<unsigned> mColumn;
mozilla::Maybe<JS::ProfilingCategoryPair> mCategoryPair;
};
struct JITFrameData {
bool operator==(const JITFrameData& aOther) const;
void* mCanonicalAddress;
uint32_t mDepth;
uint32_t mRangeIndex;
};
mozilla::Variant<NormalFrameData, JITFrameData> mData;
mozilla::Variant<NormalFrameData> mData;
};
struct FrameKeyHasher {
@ -287,9 +182,9 @@ class UniqueStacks {
if (aLookup.mData.is<FrameKey::NormalFrameData>()) {
const FrameKey::NormalFrameData& data =
aLookup.mData.as<FrameKey::NormalFrameData>();
if (!data.mLocation.IsEmpty()) {
hash = mozilla::AddToHash(hash,
mozilla::HashString(data.mLocation.get()));
if (!data.mLocation.empty()) {
hash = mozilla::AddToHash(
hash, mozilla::HashString(data.mLocation.c_str()));
}
hash = mozilla::AddToHash(hash, data.mRelevantForJS);
if (data.mLine.isSome()) {
@ -302,12 +197,6 @@ class UniqueStacks {
hash = mozilla::AddToHash(hash,
static_cast<uint32_t>(*data.mCategoryPair));
}
} else {
const FrameKey::JITFrameData& data =
aLookup.mData.as<FrameKey::JITFrameData>();
hash = mozilla::AddToHash(hash, data.mCanonicalAddress);
hash = mozilla::AddToHash(hash, data.mDepth);
hash = mozilla::AddToHash(hash, data.mRangeIndex);
}
return hash;
}
@ -361,7 +250,7 @@ class UniqueStacks {
}
};
explicit UniqueStacks(JITFrameInfo&& aJITFrameInfo);
UniqueStacks();
// Return a StackKey for aFrame as the stack's root frame (no prefix).
MOZ_MUST_USE StackKey BeginStack(const FrameKey& aFrame);
@ -370,15 +259,6 @@ class UniqueStacks {
MOZ_MUST_USE StackKey AppendFrame(const StackKey& aStack,
const FrameKey& aFrame);
// Look up frame keys for the given JIT address, and ensure that our frame
// table has entries for the returned frame keys. The JSON for these frames
// is taken from mJITInfoRanges.
// aBufferPosition is needed in order to look up the correct JIT frame info
// object in mJITInfoRanges.
MOZ_MUST_USE mozilla::Maybe<mozilla::Vector<UniqueStacks::FrameKey>>
LookupFramesForJITAddressFromBufferPos(void* aJITAddress,
uint64_t aBufferPosition);
MOZ_MUST_USE uint32_t GetOrAddFrameIndex(const FrameKey& aFrame);
MOZ_MUST_USE uint32_t GetOrAddStackIndex(const StackKey& aStack);
@ -398,8 +278,6 @@ class UniqueStacks {
SpliceableChunkedJSONWriter mStackTableWriter;
mozilla::HashMap<StackKey, uint32_t, StackKeyHasher> mStackToIndexMap;
mozilla::Vector<JITFrameInfoForBufferRange> mJITInfoRanges;
};
//

Просмотреть файл

@ -13,51 +13,21 @@
# include "ProfileBuffer.h"
# include "BaseProfileJSONWriter.h"
# include "js/TraceLoggerAPI.h"
# include "mozilla/dom/ContentChild.h"
# if defined(GP_OS_darwin)
# include <pthread.h>
# endif
ProfiledThreadData::ProfiledThreadData(ThreadInfo* aThreadInfo,
nsIEventTarget* aEventTarget,
bool aIncludeResponsiveness)
: mThreadInfo(aThreadInfo) {
MOZ_COUNT_CTOR(ProfiledThreadData);
if (aIncludeResponsiveness) {
mResponsiveness.emplace(aEventTarget, aThreadInfo->IsMainThread());
}
}
ProfiledThreadData::ProfiledThreadData(ThreadInfo* aThreadInfo)
: mThreadInfo(aThreadInfo) {}
ProfiledThreadData::~ProfiledThreadData() {
MOZ_COUNT_DTOR(ProfiledThreadData);
}
ProfiledThreadData::~ProfiledThreadData() {}
void ProfiledThreadData::StreamJSON(const ProfileBuffer& aBuffer,
JSContext* aCx,
SpliceableJSONWriter& aWriter,
const nsACString& aProcessName,
const std::string& aProcessName,
const mozilla::TimeStamp& aProcessStartTime,
double aSinceTime, bool JSTracerEnabled) {
if (mJITFrameInfoForPreviousJSContexts &&
mJITFrameInfoForPreviousJSContexts->HasExpired(aBuffer.mRangeStart)) {
mJITFrameInfoForPreviousJSContexts = nullptr;
}
// If we have an existing JITFrameInfo in mJITFrameInfoForPreviousJSContexts,
// copy the data from it.
JITFrameInfo jitFrameInfo =
mJITFrameInfoForPreviousJSContexts
? JITFrameInfo(*mJITFrameInfoForPreviousJSContexts)
: JITFrameInfo();
if (aCx && mBufferPositionWhenReceivedJSContext) {
aBuffer.AddJITInfoForRange(*mBufferPositionWhenReceivedJSContext,
mThreadInfo->ThreadId(), aCx, jitFrameInfo);
}
UniqueStacks uniqueStacks(std::move(jitFrameInfo));
double aSinceTime) {
UniqueStacks uniqueStacks;
aWriter.Start();
{
@ -104,121 +74,26 @@ void ProfiledThreadData::StreamJSON(const ProfileBuffer& aBuffer,
aWriter.EndArray();
}
if (aCx && JSTracerEnabled) {
StreamTraceLoggerJSON(aCx, aWriter, aProcessStartTime);
}
aWriter.End();
}
void ProfiledThreadData::StreamTraceLoggerJSON(
JSContext* aCx, SpliceableJSONWriter& aWriter,
const mozilla::TimeStamp& aProcessStartTime) {
aWriter.StartObjectProperty("jsTracerEvents");
{
JS::AutoTraceLoggerLockGuard lockGuard;
JS::SpewTraceLoggerThread(aCx);
uint32_t length = 0;
// Collect Event Ids
aWriter.StartArrayProperty("events", mozilla::JSONWriter::SingleLineStyle);
{
JS::TraceLoggerIdBuffer collectionBuffer(lockGuard, aCx);
while (collectionBuffer.NextChunk()) {
for (uint32_t val : collectionBuffer) {
aWriter.IntElement(val);
length++;
}
}
}
aWriter.EndArray();
// Collect Event Timestamps
aWriter.StartArrayProperty("timestamps",
mozilla::JSONWriter::SingleLineStyle);
{
JS::TraceLoggerTimeStampBuffer collectionBuffer(lockGuard, aCx);
while (collectionBuffer.NextChunk()) {
for (mozilla::TimeStamp val : collectionBuffer) {
aWriter.DoubleElement((val - aProcessStartTime).ToMicroseconds());
}
}
}
aWriter.EndArray();
// Collect Event Durations
aWriter.StartArrayProperty("durations",
mozilla::JSONWriter::SingleLineStyle);
{
JS::TraceLoggerDurationBuffer collectionBuffer(lockGuard, aCx);
while (collectionBuffer.NextChunk()) {
for (double val : collectionBuffer) {
if (val == -1) {
aWriter.NullElement();
} else {
aWriter.DoubleElement(val);
}
}
}
}
aWriter.EndArray();
// Collect Event LineNo
aWriter.StartArrayProperty("line", mozilla::JSONWriter::SingleLineStyle);
{
JS::TraceLoggerLineNoBuffer collectionBuffer(lockGuard, aCx);
while (collectionBuffer.NextChunk()) {
for (int32_t val : collectionBuffer) {
if (val == -1) {
aWriter.NullElement();
} else {
aWriter.IntElement(val);
}
}
}
}
aWriter.EndArray();
// Collect Event ColNo
aWriter.StartArrayProperty("column", mozilla::JSONWriter::SingleLineStyle);
{
JS::TraceLoggerColNoBuffer collectionBuffer(lockGuard, aCx);
while (collectionBuffer.NextChunk()) {
for (int32_t val : collectionBuffer) {
if (val == -1) {
aWriter.NullElement();
} else {
aWriter.IntElement(val);
}
}
}
}
aWriter.EndArray();
aWriter.IntProperty("length", length);
}
aWriter.EndObject();
}
void StreamSamplesAndMarkers(const char* aName, int aThreadId,
const ProfileBuffer& aBuffer,
SpliceableJSONWriter& aWriter,
const nsACString& aProcessName,
const std::string& aProcessName,
const mozilla::TimeStamp& aProcessStartTime,
const mozilla::TimeStamp& aRegisterTime,
const mozilla::TimeStamp& aUnregisterTime,
double aSinceTime, UniqueStacks& aUniqueStacks) {
aWriter.StringProperty("processType",
XRE_ChildProcessTypeToString(XRE_GetProcessType()));
aWriter.StringProperty(
"processType",
"(unknown)" /* XRE_ChildProcessTypeToString(XRE_GetProcessType()) */);
aWriter.StringProperty("name", aName);
// Use given process name (if any), unless we're the parent process.
if (XRE_IsParentProcess()) {
aWriter.StringProperty("processName", "Parent Process");
} else if (!aProcessName.IsEmpty()) {
aWriter.StringProperty("processName", aProcessName.Data());
// Use given process name (if any).
if (!aProcessName.empty()) {
aWriter.StringProperty("processName", aProcessName.c_str());
}
aWriter.IntProperty("tid", static_cast<int64_t>(aThreadId));
@ -280,30 +155,4 @@ void StreamSamplesAndMarkers(const char* aName, int aThreadId,
aWriter.EndObject();
}
void ProfiledThreadData::NotifyAboutToLoseJSContext(
JSContext* aContext, const mozilla::TimeStamp& aProcessStartTime,
ProfileBuffer& aBuffer) {
if (!mBufferPositionWhenReceivedJSContext) {
return;
}
MOZ_RELEASE_ASSERT(aContext);
if (mJITFrameInfoForPreviousJSContexts &&
mJITFrameInfoForPreviousJSContexts->HasExpired(aBuffer.mRangeStart)) {
mJITFrameInfoForPreviousJSContexts = nullptr;
}
mozilla::UniquePtr<JITFrameInfo> jitFrameInfo =
mJITFrameInfoForPreviousJSContexts
? std::move(mJITFrameInfoForPreviousJSContexts)
: mozilla::MakeUnique<JITFrameInfo>();
aBuffer.AddJITInfoForRange(*mBufferPositionWhenReceivedJSContext,
mThreadInfo->ThreadId(), aContext, *jitFrameInfo);
mJITFrameInfoForPreviousJSContexts = std::move(jitFrameInfo);
mBufferPositionWhenReceivedJSContext = mozilla::Nothing();
}
#endif // MOZ_BASE_PROFILER

Просмотреть файл

@ -7,15 +7,17 @@
#ifndef ProfiledThreadData_h
#define ProfiledThreadData_h
#include "BaseProfilingStack.h"
#include "platform.h"
#include "ProfileBufferEntry.h"
#include "ThreadInfo.h"
#include "ThreadResponsiveness.h"
#include "js/ProfilingStack.h"
#include "mozilla/RefPtr.h"
#include "mozilla/TimeStamp.h"
#include "mozilla/UniquePtr.h"
#include <string>
class ProfileBuffer;
// This class contains information about a thread that is only relevant while
@ -43,12 +45,10 @@ class ProfileBuffer;
// when the profiler is stopped.
class ProfiledThreadData final {
public:
ProfiledThreadData(ThreadInfo* aThreadInfo, nsIEventTarget* aEventTarget,
bool aIncludeResponsiveness);
explicit ProfiledThreadData(ThreadInfo* aThreadInfo);
~ProfiledThreadData();
void NotifyUnregistered(uint64_t aBufferPosition) {
mResponsiveness.reset();
mLastSample = mozilla::Nothing();
MOZ_ASSERT(!mBufferPositionWhenReceivedJSContext,
"JSContext should have been cleared before the thread was "
@ -62,20 +62,10 @@ class ProfiledThreadData final {
mozilla::Maybe<uint64_t>& LastSample() { return mLastSample; }
void StreamJSON(const ProfileBuffer& aBuffer, JSContext* aCx,
SpliceableJSONWriter& aWriter, const nsACString& aProcessName,
void StreamJSON(const ProfileBuffer& aBuffer, SpliceableJSONWriter& aWriter,
const std::string& aProcessName,
const mozilla::TimeStamp& aProcessStartTime,
double aSinceTime, bool aJSTracerEnabled);
void StreamTraceLoggerJSON(JSContext* aCx, SpliceableJSONWriter& aWriter,
const mozilla::TimeStamp& aProcessStartTime);
// Returns nullptr if this is not the main thread, the responsiveness
// feature is not turned on, or if this thread is not being profiled.
ThreadResponsiveness* GetThreadResponsiveness() {
ThreadResponsiveness* responsiveness = mResponsiveness.ptrOr(nullptr);
return responsiveness;
}
double aSinceTime);
const RefPtr<ThreadInfo> Info() const { return mThreadInfo; }
@ -84,12 +74,6 @@ class ProfiledThreadData final {
mozilla::Some(aCurrentBufferPosition);
}
// Call this method when the JS entries inside the buffer are about to
// become invalid, i.e., just before JS shutdown.
void NotifyAboutToLoseJSContext(JSContext* aCx,
const mozilla::TimeStamp& aProcessStartTime,
ProfileBuffer& aBuffer);
private:
// Group A:
// The following fields are interesting for the entire lifetime of a
@ -98,20 +82,10 @@ class ProfiledThreadData final {
// This thread's thread info.
const RefPtr<ThreadInfo> mThreadInfo;
// Contains JSON for JIT frames from any JSContexts that were used for this
// thread in the past.
// Null if this thread has never lost a JSContext or if all samples from
// previous JSContexts have been evicted from the profiler buffer.
mozilla::UniquePtr<JITFrameInfo> mJITFrameInfoForPreviousJSContexts;
// Group B:
// The following fields are only used while this thread is alive and
// registered. They become Nothing() once the thread is unregistered.
// A helper object that instruments nsIThreads to obtain responsiveness
// information about their event loop.
mozilla::Maybe<ThreadResponsiveness> mResponsiveness;
// When sampling, this holds the position in ActivePS::mBuffer of the most
// recent sample for this thread, or Nothing() if there is no sample for this
// thread in the buffer.
@ -130,7 +104,7 @@ class ProfiledThreadData final {
void StreamSamplesAndMarkers(const char* aName, int aThreadId,
const ProfileBuffer& aBuffer,
SpliceableJSONWriter& aWriter,
const nsACString& aProcessName,
const std::string& aProcessName,
const mozilla::TimeStamp& aProcessStartTime,
const mozilla::TimeStamp& aRegisterTime,
const mozilla::TimeStamp& aUnregisterTime,

Просмотреть файл

@ -17,11 +17,9 @@
ProfilerBacktrace::ProfilerBacktrace(const char* aName, int aThreadId,
mozilla::UniquePtr<ProfileBuffer> aBuffer)
: mName(strdup(aName)), mThreadId(aThreadId), mBuffer(std::move(aBuffer)) {
MOZ_COUNT_CTOR(ProfilerBacktrace);
}
: mName(strdup(aName)), mThreadId(aThreadId), mBuffer(std::move(aBuffer)) {}
ProfilerBacktrace::~ProfilerBacktrace() { MOZ_COUNT_DTOR(ProfilerBacktrace); }
ProfilerBacktrace::~ProfilerBacktrace() {}
void ProfilerBacktrace::StreamJSON(SpliceableJSONWriter& aWriter,
const mozilla::TimeStamp& aProcessStartTime,
@ -30,8 +28,8 @@ void ProfilerBacktrace::StreamJSON(SpliceableJSONWriter& aWriter,
// ProfileBuffer::AddJITInfoForRange because mBuffer does not contain any
// JitReturnAddr entries. For synchronous samples, JIT frames get expanded
// at sample time.
StreamSamplesAndMarkers(mName.get(), mThreadId, *mBuffer.get(), aWriter,
NS_LITERAL_CSTRING(""), aProcessStartTime,
StreamSamplesAndMarkers(mName.get(), mThreadId, *mBuffer.get(), aWriter, "",
aProcessStartTime,
/* aRegisterTime */ mozilla::TimeStamp(),
/* aUnregisterTime */ mozilla::TimeStamp(),
/* aSinceTime */ 0, aUniqueStacks);

Просмотреть файл

@ -56,7 +56,7 @@ class ProfilerMarker {
aUniqueStacks.mUniqueStrings->WriteElement(aWriter, mMarkerName.get());
aWriter.DoubleElement(mTime);
const JS::ProfilingCategoryPairInfo& info =
JS::GetProfilingCategoryPairInfo(mCategoryPair);
JS::GetBaseProfilingCategoryPairInfo(mCategoryPair);
aWriter.IntElement(unsigned(info.mCategory));
// TODO: Store the callsite for this marker if available:
// if have location data

Просмотреть файл

@ -13,10 +13,7 @@
# include "BaseProfileJSONWriter.h"
# include "ProfilerBacktrace.h"
# include "gfxASurface.h"
# include "Layers.h"
# include "mozilla/Maybe.h"
# include "mozilla/net/HttpBaseChannel.h"
# include "mozilla/Sprintf.h"
# include <inttypes.h>
@ -45,7 +42,7 @@ void ProfilerMarkerPayload::StreamCommonProps(
WriteTime(aWriter, aProcessStartTime, mStartTime, "startTime");
WriteTime(aWriter, aProcessStartTime, mEndTime, "endTime");
if (mDocShellId) {
aWriter.StringProperty("docShellId", nsIDToCString(*mDocShellId).get());
aWriter.StringProperty("docShellId", mDocShellId->c_str());
}
if (mDocShellHistoryId) {
aWriter.DoubleProperty("docshellHistoryId", mDocShellHistoryId.ref());
@ -88,18 +85,16 @@ void UserTimingMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
StreamCommonProps("UserTiming", aWriter, aProcessStartTime, aUniqueStacks);
aWriter.StringProperty("name", NS_ConvertUTF16toUTF8(mName).get());
aWriter.StringProperty("name", mName.c_str());
aWriter.StringProperty("entryType", mEntryType);
if (mStartMark.isSome()) {
aWriter.StringProperty("startMark",
NS_ConvertUTF16toUTF8(mStartMark.value()).get());
aWriter.StringProperty("startMark", mStartMark.value().c_str());
} else {
aWriter.NullProperty("startMark");
}
if (mEndMark.isSome()) {
aWriter.StringProperty("endMark",
NS_ConvertUTF16toUTF8(mEndMark.value()).get());
aWriter.StringProperty("endMark", mEndMark.value().c_str());
} else {
aWriter.NullProperty("endMark");
}
@ -109,168 +104,15 @@ void TextMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
StreamCommonProps("Text", aWriter, aProcessStartTime, aUniqueStacks);
aWriter.StringProperty("name", mText.get());
aWriter.StringProperty("name", mText.c_str());
}
void LogMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
StreamCommonProps("Log", aWriter, aProcessStartTime, aUniqueStacks);
aWriter.StringProperty("name", mText.get());
aWriter.StringProperty("module", mModule.get());
}
void DOMEventMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
TracingMarkerPayload::StreamPayload(aWriter, aProcessStartTime,
aUniqueStacks);
WriteTime(aWriter, aProcessStartTime, mTimeStamp, "timeStamp");
aWriter.StringProperty("eventType", NS_ConvertUTF16toUTF8(mEventType).get());
}
void LayerTranslationMarkerPayload::StreamPayload(
SpliceableJSONWriter& aWriter, const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
StreamType("LayerTranslation", aWriter);
const size_t bufferSize = 32;
char buffer[bufferSize];
SprintfLiteral(buffer, "%p", mLayer);
aWriter.StringProperty("layer", buffer);
aWriter.IntProperty("x", mPoint.x);
aWriter.IntProperty("y", mPoint.y);
}
void VsyncMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
StreamType("VsyncTimestamp", aWriter);
}
static const char* GetNetworkState(NetworkLoadType aType) {
switch (aType) {
case NetworkLoadType::LOAD_START:
return "STATUS_START";
case NetworkLoadType::LOAD_STOP:
return "STATUS_STOP";
case NetworkLoadType::LOAD_REDIRECT:
return "STATUS_REDIRECT";
}
return "";
}
static const char* GetCacheState(
mozilla::net::CacheDisposition aCacheDisposition) {
switch (aCacheDisposition) {
case mozilla::net::kCacheUnresolved:
return "Unresolved";
case mozilla::net::kCacheHit:
return "Hit";
case mozilla::net::kCacheHitViaReval:
return "HitViaReval";
case mozilla::net::kCacheMissedViaReval:
return "MissedViaReval";
case mozilla::net::kCacheMissed:
return "Missed";
case mozilla::net::kCacheUnknown:
default:
return nullptr;
}
}
void NetworkMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
StreamCommonProps("Network", aWriter, aProcessStartTime, aUniqueStacks);
aWriter.IntProperty("id", mID);
const char* typeString = GetNetworkState(mType);
const char* cacheString = GetCacheState(mCacheDisposition);
// want to use aUniqueStacks.mUniqueStrings->WriteElement(aWriter,
// typeString);
aWriter.StringProperty("status", typeString);
if (cacheString) {
aWriter.StringProperty("cache", cacheString);
}
aWriter.IntProperty("pri", mPri);
if (mCount > 0) {
aWriter.IntProperty("count", mCount);
}
if (mURI) {
aWriter.StringProperty("URI", mURI.get());
}
if (mRedirectURI) {
aWriter.StringProperty("RedirectURI", mRedirectURI.get());
}
if (mType != NetworkLoadType::LOAD_START) {
WriteTime(aWriter, aProcessStartTime, mTimings.domainLookupStart,
"domainLookupStart");
WriteTime(aWriter, aProcessStartTime, mTimings.domainLookupEnd,
"domainLookupEnd");
WriteTime(aWriter, aProcessStartTime, mTimings.connectStart,
"connectStart");
WriteTime(aWriter, aProcessStartTime, mTimings.tcpConnectEnd,
"tcpConnectEnd");
WriteTime(aWriter, aProcessStartTime, mTimings.secureConnectionStart,
"secureConnectionStart");
WriteTime(aWriter, aProcessStartTime, mTimings.connectEnd, "connectEnd");
WriteTime(aWriter, aProcessStartTime, mTimings.requestStart,
"requestStart");
WriteTime(aWriter, aProcessStartTime, mTimings.responseStart,
"responseStart");
WriteTime(aWriter, aProcessStartTime, mTimings.responseEnd, "responseEnd");
}
}
void ScreenshotPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
StreamType("CompositorScreenshot", aWriter);
aUniqueStacks.mUniqueStrings->WriteProperty(aWriter, "url",
mScreenshotDataURL.get());
char hexWindowID[32];
SprintfLiteral(hexWindowID, "0x%" PRIXPTR, mWindowIdentifier);
aWriter.StringProperty("windowID", hexWindowID);
aWriter.DoubleProperty("windowWidth", mWindowSize.width);
aWriter.DoubleProperty("windowHeight", mWindowSize.height);
}
void GCSliceMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
MOZ_ASSERT(mTimingJSON);
StreamCommonProps("GCSlice", aWriter, aProcessStartTime, aUniqueStacks);
if (mTimingJSON) {
aWriter.SplicedJSONProperty("timings", mTimingJSON.get());
} else {
aWriter.NullProperty("timings");
}
}
void GCMajorMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
MOZ_ASSERT(mTimingJSON);
StreamCommonProps("GCMajor", aWriter, aProcessStartTime, aUniqueStacks);
if (mTimingJSON) {
aWriter.SplicedJSONProperty("timings", mTimingJSON.get());
} else {
aWriter.NullProperty("timings");
}
}
void GCMinorMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
MOZ_ASSERT(mTimingData);
StreamCommonProps("GCMinor", aWriter, aProcessStartTime, aUniqueStacks);
if (mTimingData) {
aWriter.SplicedJSONProperty("nursery", mTimingData.get());
} else {
aWriter.NullProperty("nursery");
}
aWriter.StringProperty("name", mText.c_str());
aWriter.StringProperty("module", mModule.c_str());
}
void HangMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
@ -280,18 +122,6 @@ void HangMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
aUniqueStacks);
}
void StyleMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
StreamCommonProps("Styles", aWriter, aProcessStartTime, aUniqueStacks);
aWriter.StringProperty("category", "Paint");
aWriter.IntProperty("elementsTraversed", mStats.mElementsTraversed);
aWriter.IntProperty("elementsStyled", mStats.mElementsStyled);
aWriter.IntProperty("elementsMatched", mStats.mElementsMatched);
aWriter.IntProperty("stylesShared", mStats.mStylesShared);
aWriter.IntProperty("stylesReused", mStats.mStylesReused);
}
void LongTaskMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {

Просмотреть файл

@ -8,516 +8,10 @@
#ifdef MOZ_BASE_PROFILER
# include "vm/GeckoProfiler-inl.h"
# include "BaseProfilingCategory.h"
# include "mozilla/ArrayUtils.h"
# include "mozilla/DebugOnly.h"
# include "mozilla/Sprintf.h"
# include "jsnum.h"
# include "gc/GC.h"
# include "gc/PublicIterators.h"
# include "jit/BaselineFrame.h"
# include "jit/BaselineJIT.h"
# include "jit/JitcodeMap.h"
# include "jit/JitFrames.h"
# include "jit/JitRealm.h"
# include "jit/JSJitFrameIter.h"
# include "js/TraceLoggerAPI.h"
# include "util/StringBuffer.h"
# include "vm/JSScript.h"
# include "gc/Marking-inl.h"
# include "vm/JSScript-inl.h"
using namespace js;
using mozilla::DebugOnly;
GeckoProfilerThread::GeckoProfilerThread()
: profilingStack_(nullptr), profilingStackIfEnabled_(nullptr) {}
GeckoProfilerRuntime::GeckoProfilerRuntime(JSRuntime* rt)
: rt(rt),
strings_(),
slowAssertions(false),
enabled_(false),
eventMarker_(nullptr) {
MOZ_ASSERT(rt != nullptr);
}
void GeckoProfilerThread::setProfilingStack(ProfilingStack* profilingStack,
bool enabled) {
profilingStack_ = profilingStack;
profilingStackIfEnabled_ = enabled ? profilingStack : nullptr;
}
void GeckoProfilerRuntime::setEventMarker(void (*fn)(const char*)) {
eventMarker_ = fn;
}
// Get a pointer to the top-most profiling frame, given the exit frame pointer.
static void* GetTopProfilingJitFrame(Activation* act) {
if (!act || !act->isJit()) {
return nullptr;
}
jit::JitActivation* jitActivation = act->asJit();
// If there is no exit frame set, just return.
if (!jitActivation->hasExitFP()) {
return nullptr;
}
// Skip wasm frames that might be in the way.
OnlyJSJitFrameIter iter(jitActivation);
if (iter.done()) {
return nullptr;
}
jit::JSJitProfilingFrameIterator jitIter(
(jit::CommonFrameLayout*)iter.frame().fp());
MOZ_ASSERT(!jitIter.done());
return jitIter.fp();
}
void GeckoProfilerRuntime::enable(bool enabled) {
JSContext* cx = rt->mainContextFromAnyThread();
MOZ_ASSERT(cx->geckoProfiler().infraInstalled());
if (enabled_ == enabled) {
return;
}
/*
* Ensure all future generated code will be instrumented, or that all
* currently instrumented code is discarded
*/
ReleaseAllJITCode(rt->defaultFreeOp());
// This function is called when the Gecko profiler makes a new Sampler
// (and thus, a new circular buffer). Set all current entries in the
// JitcodeGlobalTable as expired and reset the buffer range start.
if (rt->hasJitRuntime() && rt->jitRuntime()->hasJitcodeGlobalTable()) {
rt->jitRuntime()->getJitcodeGlobalTable()->setAllEntriesAsExpired();
}
rt->setProfilerSampleBufferRangeStart(0);
// Ensure that lastProfilingFrame is null for the main thread.
if (cx->jitActivation) {
cx->jitActivation->setLastProfilingFrame(nullptr);
cx->jitActivation->setLastProfilingCallSite(nullptr);
}
// Reset the tracelogger, if toggled on
JS::ResetTraceLogger();
enabled_ = enabled;
/* Toggle Gecko Profiler-related jumps on baseline jitcode.
* The call to |ReleaseAllJITCode| above will release most baseline jitcode,
* but not jitcode for scripts with active frames on the stack. These scripts
* need to have their profiler state toggled so they behave properly.
*/
jit::ToggleBaselineProfiling(rt, enabled);
// Update lastProfilingFrame to point to the top-most JS jit-frame currently
// on stack.
if (cx->jitActivation) {
// Walk through all activations, and set their lastProfilingFrame
// appropriately.
if (enabled) {
Activation* act = cx->activation();
void* lastProfilingFrame = GetTopProfilingJitFrame(act);
jit::JitActivation* jitActivation = cx->jitActivation;
while (jitActivation) {
jitActivation->setLastProfilingFrame(lastProfilingFrame);
jitActivation->setLastProfilingCallSite(nullptr);
jitActivation = jitActivation->prevJitActivation();
lastProfilingFrame = GetTopProfilingJitFrame(jitActivation);
}
} else {
jit::JitActivation* jitActivation = cx->jitActivation;
while (jitActivation) {
jitActivation->setLastProfilingFrame(nullptr);
jitActivation->setLastProfilingCallSite(nullptr);
jitActivation = jitActivation->prevJitActivation();
}
}
}
// WebAssembly code does not need to be released, but profiling string
// labels have to be generated so that they are available during async
// profiling stack iteration.
for (RealmsIter r(rt); !r.done(); r.next()) {
r->wasm.ensureProfilingLabels(enabled);
}
# ifdef JS_STRUCTURED_SPEW
// Enable the structured spewer if the environment variable is set.
if (enabled) {
cx->spewer().enableSpewing();
} else {
cx->spewer().disableSpewing();
}
# endif
}
/* Lookup the string for the function/script, creating one if necessary */
const char* GeckoProfilerRuntime::profileString(JSContext* cx,
JSScript* script) {
ProfileStringMap::AddPtr s = strings().lookupForAdd(script);
if (!s) {
UniqueChars str = allocProfileString(cx, script);
if (!str) {
return nullptr;
}
if (!strings().add(s, script, std::move(str))) {
ReportOutOfMemory(cx);
return nullptr;
}
}
return s->value().get();
}
void GeckoProfilerRuntime::onScriptFinalized(JSScript* script) {
/*
* This function is called whenever a script is destroyed, regardless of
* whether profiling has been turned on, so don't invoke a function on an
* invalid hash set. Also, even if profiling was enabled but then turned
* off, we still want to remove the string, so no check of enabled() is
* done.
*/
if (ProfileStringMap::Ptr entry = strings().lookup(script)) {
strings().remove(entry);
}
}
void GeckoProfilerRuntime::markEvent(const char* event) {
MOZ_ASSERT(enabled());
if (eventMarker_) {
JS::AutoSuppressGCAnalysis nogc;
eventMarker_(event);
}
}
bool GeckoProfilerThread::enter(JSContext* cx, JSScript* script) {
const char* dynamicString =
cx->runtime()->geckoProfiler().profileString(cx, script);
if (dynamicString == nullptr) {
return false;
}
# ifdef DEBUG
// In debug builds, assert the JS profiling stack frames already on the
// stack have a non-null pc. Only look at the top frames to avoid quadratic
// behavior.
uint32_t sp = profilingStack_->stackPointer;
if (sp > 0 && sp - 1 < profilingStack_->stackCapacity()) {
size_t start = (sp > 4) ? sp - 4 : 0;
for (size_t i = start; i < sp - 1; i++) {
MOZ_ASSERT_IF(profilingStack_->frames[i].isJsFrame(),
profilingStack_->frames[i].pc());
}
}
# endif
profilingStack_->pushJsFrame("", dynamicString, script, script->code());
return true;
}
void GeckoProfilerThread::exit(JSContext* cx, JSScript* script) {
profilingStack_->pop();
# ifdef DEBUG
/* Sanity check to make sure push/pop balanced */
uint32_t sp = profilingStack_->stackPointer;
if (sp < profilingStack_->stackCapacity()) {
JSRuntime* rt = script->runtimeFromMainThread();
const char* dynamicString = rt->geckoProfiler().profileString(cx, script);
/* Can't fail lookup because we should already be in the set */
MOZ_ASSERT(dynamicString);
// Bug 822041
if (!profilingStack_->frames[sp].isJsFrame()) {
fprintf(stderr, "--- ABOUT TO FAIL ASSERTION ---\n");
fprintf(stderr, " frames=%p size=%u/%u\n", (void*)profilingStack_->frames,
uint32_t(profilingStack_->stackPointer),
profilingStack_->stackCapacity());
for (int32_t i = sp; i >= 0; i--) {
ProfilingStackFrame& frame = profilingStack_->frames[i];
if (frame.isJsFrame()) {
fprintf(stderr, " [%d] JS %s\n", i, frame.dynamicString());
} else {
fprintf(stderr, " [%d] Label %s\n", i, frame.dynamicString());
}
}
}
ProfilingStackFrame& frame = profilingStack_->frames[sp];
MOZ_ASSERT(frame.isJsFrame());
MOZ_ASSERT(frame.script() == script);
MOZ_ASSERT(strcmp((const char*)frame.dynamicString(), dynamicString) == 0);
}
# endif
}
/*
* Serializes the script/function pair into a "descriptive string" which is
* allowed to fail. This function cannot trigger a GC because it could finalize
* some scripts, resize the hash table of profile strings, and invalidate the
* AddPtr held while invoking allocProfileString.
*/
/* static */
UniqueChars GeckoProfilerRuntime::allocProfileString(JSContext* cx,
JSScript* script) {
// Note: this profiler string is regexp-matched by
// devtools/client/profiler/cleopatra/js/parserWorker.js.
// If the script has a function, try calculating its name.
bool hasName = false;
size_t nameLength = 0;
UniqueChars nameStr;
JSFunction* func = script->functionDelazifying();
if (func && func->displayAtom()) {
nameStr = StringToNewUTF8CharsZ(cx, *func->displayAtom());
if (!nameStr) {
return nullptr;
}
nameLength = strlen(nameStr.get());
hasName = true;
}
// Calculate filename length.
const char* filenameStr = script->filename() ? script->filename() : "(null)";
size_t filenameLength = strlen(filenameStr);
// Calculate line + column length.
bool hasLineAndColumn = false;
size_t lineAndColumnLength = 0;
char lineAndColumnStr[30];
if (hasName || script->functionNonDelazifying() || script->isForEval()) {
lineAndColumnLength = SprintfLiteral(lineAndColumnStr, "%u:%u",
script->lineno(), script->column());
hasLineAndColumn = true;
}
// Full profile string for scripts with functions is:
// FuncName (FileName:Lineno:Column)
// Full profile string for scripts without functions is:
// FileName:Lineno:Column
// Full profile string for scripts without functions and without lines is:
// FileName
// Calculate full string length.
size_t fullLength = 0;
if (hasName) {
MOZ_ASSERT(hasLineAndColumn);
fullLength = nameLength + 2 + filenameLength + 1 + lineAndColumnLength + 1;
} else if (hasLineAndColumn) {
fullLength = filenameLength + 1 + lineAndColumnLength;
} else {
fullLength = filenameLength;
}
// Allocate string.
UniqueChars str(cx->pod_malloc<char>(fullLength + 1));
if (!str) {
return nullptr;
}
size_t cur = 0;
// Fill string with function name if needed.
if (hasName) {
memcpy(str.get() + cur, nameStr.get(), nameLength);
cur += nameLength;
str[cur++] = ' ';
str[cur++] = '(';
}
// Fill string with filename chars.
memcpy(str.get() + cur, filenameStr, filenameLength);
cur += filenameLength;
// Fill line + column chars.
if (hasLineAndColumn) {
str[cur++] = ':';
memcpy(str.get() + cur, lineAndColumnStr, lineAndColumnLength);
cur += lineAndColumnLength;
}
// Terminal ')' if necessary.
if (hasName) {
str[cur++] = ')';
}
MOZ_ASSERT(cur == fullLength);
str[cur] = 0;
return str;
}
void GeckoProfilerThread::trace(JSTracer* trc) {
if (profilingStack_) {
size_t size = profilingStack_->stackSize();
for (size_t i = 0; i < size; i++) {
profilingStack_->frames[i].trace(trc);
}
}
}
void GeckoProfilerRuntime::fixupStringsMapAfterMovingGC() {
for (ProfileStringMap::Enum e(strings()); !e.empty(); e.popFront()) {
JSScript* script = e.front().key();
if (IsForwarded(script)) {
script = Forwarded(script);
e.rekeyFront(script);
}
}
}
# ifdef JSGC_HASH_TABLE_CHECKS
void GeckoProfilerRuntime::checkStringsMapAfterMovingGC() {
for (auto r = strings().all(); !r.empty(); r.popFront()) {
JSScript* script = r.front().key();
CheckGCThingAfterMovingGC(script);
auto ptr = strings().lookup(script);
MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &r.front());
}
}
# endif
void ProfilingStackFrame::trace(JSTracer* trc) {
if (isJsFrame()) {
JSScript* s = rawScript();
TraceNullableRoot(trc, &s, "ProfilingStackFrame script");
spOrScript = s;
}
}
GeckoProfilerBaselineOSRMarker::GeckoProfilerBaselineOSRMarker(
JSContext* cx,
bool hasProfilerFrame MOZ_GUARD_OBJECT_NOTIFIER_PARAM_IN_IMPL)
: profiler(&cx->geckoProfiler()) {
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
if (!hasProfilerFrame || !cx->runtime()->geckoProfiler().enabled()) {
profiler = nullptr;
return;
}
uint32_t sp = profiler->profilingStack_->stackPointer;
if (sp >= profiler->profilingStack_->stackCapacity()) {
profiler = nullptr;
return;
}
spBefore_ = sp;
if (sp == 0) {
return;
}
ProfilingStackFrame& frame = profiler->profilingStack_->frames[sp - 1];
MOZ_ASSERT(!frame.isOSRFrame());
frame.setIsOSRFrame(true);
}
GeckoProfilerBaselineOSRMarker::~GeckoProfilerBaselineOSRMarker() {
if (profiler == nullptr) {
return;
}
uint32_t sp = profiler->stackPointer();
MOZ_ASSERT(spBefore_ == sp);
if (sp == 0) {
return;
}
ProfilingStackFrame& frame = profiler->stack()[sp - 1];
MOZ_ASSERT(frame.isOSRFrame());
frame.setIsOSRFrame(false);
}
JS_PUBLIC_API JSScript* ProfilingStackFrame::script() const {
MOZ_ASSERT(isJsFrame());
auto script = reinterpret_cast<JSScript*>(spOrScript.operator void*());
if (!script) {
return nullptr;
}
// If profiling is supressed then we can't trust the script pointers to be
// valid as they could be in the process of being moved by a compacting GC
// (although it's still OK to get the runtime from them).
JSContext* cx = script->runtimeFromAnyThread()->mainContextFromAnyThread();
if (!cx->isProfilerSamplingEnabled()) {
return nullptr;
}
MOZ_ASSERT(!IsForwarded(script));
return script;
}
JS_FRIEND_API jsbytecode* ProfilingStackFrame::pc() const {
MOZ_ASSERT(isJsFrame());
if (pcOffsetIfJS_ == NullPCOffset) {
return nullptr;
}
JSScript* script = this->script();
return script ? script->offsetToPC(pcOffsetIfJS_) : nullptr;
}
/* static */
int32_t ProfilingStackFrame::pcToOffset(JSScript* aScript, jsbytecode* aPc) {
return aPc ? aScript->pcToOffset(aPc) : NullPCOffset;
}
void ProfilingStackFrame::setPC(jsbytecode* pc) {
MOZ_ASSERT(isJsFrame());
JSScript* script = this->script();
MOZ_ASSERT(
script); // This should not be called while profiling is suppressed.
pcOffsetIfJS_ = pcToOffset(script, pc);
}
JS_FRIEND_API void js::SetContextProfilingStack(
JSContext* cx, ProfilingStack* profilingStack) {
cx->geckoProfiler().setProfilingStack(
profilingStack, cx->runtime()->geckoProfiler().enabled());
}
JS_FRIEND_API void js::EnableContextProfilingStack(JSContext* cx,
bool enabled) {
cx->geckoProfiler().enable(enabled);
cx->runtime()->geckoProfiler().enable(enabled);
}
JS_FRIEND_API void js::RegisterContextProfilingEventMarker(
JSContext* cx, void (*fn)(const char*)) {
MOZ_ASSERT(cx->runtime()->geckoProfiler().enabled());
cx->runtime()->geckoProfiler().setEventMarker(fn);
}
AutoSuppressProfilerSampling::AutoSuppressProfilerSampling(
JSContext* cx MOZ_GUARD_OBJECT_NOTIFIER_PARAM_IN_IMPL)
: cx_(cx), previouslyEnabled_(cx->isProfilerSamplingEnabled()) {
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
if (previouslyEnabled_) {
cx_->disableProfilerSampling();
}
}
AutoSuppressProfilerSampling::~AutoSuppressProfilerSampling() {
if (previouslyEnabled_) {
cx_->enableProfilerSampling();
}
}
# include "mozilla/Assertions.h"
namespace JS {
@ -560,7 +54,7 @@ const ProfilingCategoryPairInfo sProfilingCategoryPairInfo[] = {
// clang-format on
JS_FRIEND_API const ProfilingCategoryPairInfo& GetProfilingCategoryPairInfo(
const ProfilingCategoryPairInfo& GetBaseProfilingCategoryPairInfo(
ProfilingCategoryPair aCategoryPair) {
static_assert(
MOZ_ARRAY_LENGTH(sProfilingCategoryPairInfo) ==

Просмотреть файл

@ -8,7 +8,7 @@
#ifdef MOZ_BASE_PROFILER
# include "js/ProfilingStack.h"
# include "BaseProfilingStack.h"
# include "mozilla/IntegerRange.h"
# include "mozilla/UniquePtr.h"

Просмотреть файл

@ -10,18 +10,11 @@
# include "RegisteredThread.h"
RegisteredThread::RegisteredThread(ThreadInfo* aInfo, nsIEventTarget* aThread,
void* aStackTop)
RegisteredThread::RegisteredThread(ThreadInfo* aInfo, void* aStackTop)
: mRacyRegisteredThread(aInfo->ThreadId()),
mPlatformData(AllocPlatformData(aInfo->ThreadId())),
mStackTop(aStackTop),
mThreadInfo(aInfo),
mThread(aThread),
mContext(nullptr),
mJSSampling(INACTIVE),
mJSFlags(0) {
MOZ_COUNT_CTOR(RegisteredThread);
mThreadInfo(aInfo) {
// We don't have to guess on mac
# if defined(GP_OS_darwin)
pthread_t self = pthread_self();
@ -29,7 +22,7 @@ RegisteredThread::RegisteredThread(ThreadInfo* aInfo, nsIEventTarget* aThread,
# endif
}
RegisteredThread::~RegisteredThread() { MOZ_COUNT_DTOR(RegisteredThread); }
RegisteredThread::~RegisteredThread() {}
size_t RegisteredThread::SizeOfIncludingThis(
mozilla::MallocSizeOf aMallocSizeOf) const {

Просмотреть файл

@ -12,10 +12,7 @@
#include "BaseProfilerMarkerPayload.h"
#include "ThreadInfo.h"
#include "js/TraceLoggerAPI.h"
#include "jsapi.h"
#include "mozilla/UniquePtr.h"
#include "nsIEventTarget.h"
// This class contains the state for a single thread that is accessible without
// protection from gPSMutex in platform.cpp. Because there is no external
@ -25,11 +22,9 @@
class RacyRegisteredThread final {
public:
explicit RacyRegisteredThread(int aThreadId)
: mThreadId(aThreadId), mSleep(AWAKE), mIsBeingProfiled(false) {
MOZ_COUNT_CTOR(RacyRegisteredThread);
}
: mThreadId(aThreadId), mSleep(AWAKE), mIsBeingProfiled(false) {}
~RacyRegisteredThread() { MOZ_COUNT_DTOR(RacyRegisteredThread); }
~RacyRegisteredThread() {}
void SetIsBeingProfiled(bool aIsBeingProfiled) {
mIsBeingProfiled = aIsBeingProfiled;
@ -164,7 +159,7 @@ class RacyRegisteredThread final {
// protected by the profiler state lock.
class RegisteredThread final {
public:
RegisteredThread(ThreadInfo* aInfo, nsIEventTarget* aThread, void* aStackTop);
RegisteredThread(ThreadInfo* aInfo, void* aStackTop);
~RegisteredThread();
class RacyRegisteredThread& RacyRegisteredThread() {
@ -179,89 +174,7 @@ class RegisteredThread final {
size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
// Set the JSContext of the thread to be sampled. Sampling cannot begin until
// this has been set.
void SetJSContext(JSContext* aContext) {
// This function runs on-thread.
MOZ_ASSERT(aContext && !mContext);
mContext = aContext;
// We give the JS engine a non-owning reference to the ProfilingStack. It's
// important that the JS engine doesn't touch this once the thread dies.
js::SetContextProfilingStack(aContext,
&RacyRegisteredThread().ProfilingStack());
}
void ClearJSContext() {
// This function runs on-thread.
mContext = nullptr;
}
JSContext* GetJSContext() const { return mContext; }
const RefPtr<ThreadInfo> Info() const { return mThreadInfo; }
const nsCOMPtr<nsIEventTarget> GetEventTarget() const { return mThread; }
// Request that this thread start JS sampling. JS sampling won't actually
// start until a subsequent PollJSSampling() call occurs *and* mContext has
// been set.
void StartJSSampling(uint32_t aJSFlags) {
// This function runs on-thread or off-thread.
MOZ_RELEASE_ASSERT(mJSSampling == INACTIVE ||
mJSSampling == INACTIVE_REQUESTED);
mJSSampling = ACTIVE_REQUESTED;
mJSFlags = aJSFlags;
}
// Request that this thread stop JS sampling. JS sampling won't actually stop
// until a subsequent PollJSSampling() call occurs.
void StopJSSampling() {
// This function runs on-thread or off-thread.
MOZ_RELEASE_ASSERT(mJSSampling == ACTIVE ||
mJSSampling == ACTIVE_REQUESTED);
mJSSampling = INACTIVE_REQUESTED;
}
// Poll to see if JS sampling should be started/stopped.
void PollJSSampling() {
// This function runs on-thread.
// We can't start/stop profiling until we have the thread's JSContext.
if (mContext) {
// It is possible for mJSSampling to go through the following sequences.
//
// - INACTIVE, ACTIVE_REQUESTED, INACTIVE_REQUESTED, INACTIVE
//
// - ACTIVE, INACTIVE_REQUESTED, ACTIVE_REQUESTED, ACTIVE
//
// Therefore, the if and else branches here aren't always interleaved.
// This is ok because the JS engine can handle that.
//
if (mJSSampling == ACTIVE_REQUESTED) {
mJSSampling = ACTIVE;
js::EnableContextProfilingStack(mContext, true);
JS_SetGlobalJitCompilerOption(mContext,
JSJITCOMPILER_TRACK_OPTIMIZATIONS,
TrackOptimizationsEnabled());
if (JSTracerEnabled()) {
JS::StartTraceLogger(mContext);
}
js::RegisterContextProfilingEventMarker(mContext,
profiler_add_js_marker);
} else if (mJSSampling == INACTIVE_REQUESTED) {
mJSSampling = INACTIVE;
js::EnableContextProfilingStack(mContext, false);
if (JSTracerEnabled()) {
JS::StopTraceLogger(mContext);
}
}
}
}
private:
class RacyRegisteredThread mRacyRegisteredThread;
@ -270,68 +183,6 @@ class RegisteredThread final {
const void* mStackTop;
const RefPtr<ThreadInfo> mThreadInfo;
const nsCOMPtr<nsIEventTarget> mThread;
// If this is a JS thread, this is its JSContext, which is required for any
// JS sampling.
JSContext* mContext;
// The profiler needs to start and stop JS sampling of JS threads at various
// times. However, the JS engine can only do the required actions on the
// JS thread itself ("on-thread"), not from another thread ("off-thread").
// Therefore, we have the following two-step process.
//
// - The profiler requests (on-thread or off-thread) that the JS sampling be
// started/stopped, by changing mJSSampling to the appropriate REQUESTED
// state.
//
// - The relevant JS thread polls (on-thread) for changes to mJSSampling.
// When it sees a REQUESTED state, it performs the appropriate actions to
// actually start/stop JS sampling, and changes mJSSampling out of the
// REQUESTED state.
//
// The state machine is as follows.
//
// INACTIVE --> ACTIVE_REQUESTED
// ^ ^ |
// | _/ |
// | _/ |
// | / |
// | v v
// INACTIVE_REQUESTED <-- ACTIVE
//
// The polling is done in the following two ways.
//
// - Via the interrupt callback mechanism; the JS thread must call
// profiler_js_interrupt_callback() from its own interrupt callback.
// This is how sampling must be started/stopped for threads where the
// request was made off-thread.
//
// - When {Start,Stop}JSSampling() is called on-thread, we can immediately
// follow it with a PollJSSampling() call to avoid the delay between the
// two steps. Likewise, setJSContext() calls PollJSSampling().
//
// One non-obvious thing about all this: these JS sampling requests are made
// on all threads, even non-JS threads. mContext needs to also be set (via
// setJSContext(), which can only happen for JS threads) for any JS sampling
// to actually happen.
//
enum {
INACTIVE = 0,
ACTIVE_REQUESTED = 1,
ACTIVE = 2,
INACTIVE_REQUESTED = 3,
} mJSSampling;
uint32_t mJSFlags;
bool TrackOptimizationsEnabled() {
return mJSFlags & uint32_t(JSSamplingFlags::TrackOptimizations);
}
bool JSTracerEnabled() {
return mJSFlags & uint32_t(JSSamplingFlags::TraceLogging);
}
};
#endif // RegisteredThread_h

Просмотреть файл

@ -7,9 +7,8 @@
#ifndef ThreadInfo_h
#define ThreadInfo_h
#include "mozilla/Atomics.h"
#include "mozilla/TimeStamp.h"
#include "nsISupportsImpl.h"
#include "nsString.h"
// This class contains information about a thread which needs to be stored
// across restarts of the profiler and which can be useful even after the
@ -17,32 +16,43 @@
// It uses threadsafe refcounting and only contains immutable data.
class ThreadInfo final {
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(ThreadInfo)
ThreadInfo(
const char* aName, int aThreadId, bool aIsMainThread,
const mozilla::TimeStamp& aRegisterTime = mozilla::TimeStamp::Now())
: mName(aName),
mRegisterTime(aRegisterTime),
mThreadId(aThreadId),
mIsMainThread(aIsMainThread) {
mIsMainThread(aIsMainThread),
mRefCnt(0) {
// I don't know if we can assert this. But we should warn.
MOZ_ASSERT(aThreadId >= 0, "native thread ID is < 0");
MOZ_ASSERT(aThreadId <= INT32_MAX, "native thread ID is > INT32_MAX");
}
const char* Name() const { return mName.get(); }
// Using hand-rolled ref-counting, because RefCounted.h macros don't produce
// the same code between mozglue and libxul, see bug 1536656.
MFBT_API void AddRef() const { ++mRefCnt; }
MFBT_API void Release() const {
MOZ_ASSERT(int32_t(mRefCnt) > 0);
if (--mRefCnt == 0) {
delete this;
}
}
const char* Name() const { return mName.c_str(); }
mozilla::TimeStamp RegisterTime() const { return mRegisterTime; }
int ThreadId() const { return mThreadId; }
bool IsMainThread() const { return mIsMainThread; }
private:
~ThreadInfo() {}
const nsCString mName;
const std::string mName;
const mozilla::TimeStamp mRegisterTime;
const int mThreadId;
const bool mIsMainThread;
mutable mozilla::Atomic<int32_t, mozilla::MemoryOrdering::ReleaseAcquire,
mozilla::recordreplay::Behavior::DontPreserve>
mRefCnt;
};
#endif // ThreadInfo_h

Просмотреть файл

@ -14,7 +14,7 @@
# endif
# include "VTuneProfiler.h"
# include "mozilla/Bootstrap.h"
# include <memory>
using namespace std;
@ -26,63 +26,67 @@ void VTuneProfiler::Initialize() {
// If it wasn't this function always returns 0, otherwise it returns
// incrementing numbers, if the library was found this wastes 2 events but
// that should be okay.
__itt_event testEvent =
__itt_event_create("Test event", strlen("Test event"));
testEvent = __itt_event_create("Test event 2", strlen("Test event 2"));
// TODO re-implement here if vtune is needed
// __itt_event testEvent =
// __itt_event_create("Test event", strlen("Test event"));
// testEvent = __itt_event_create("Test event 2", strlen("Test event 2"));
if (testEvent) {
mInstance = new VTuneProfiler();
}
// if (testEvent) {
// mInstance = new VTuneProfiler();
// }
}
void VTuneProfiler::Shutdown() {}
void VTuneProfiler::TraceInternal(const char* aName, TracingKind aKind) {
string str(aName);
// TODO re-implement here if vtune is needed
// string str(aName);
auto iter = mStrings.find(str);
// auto iter = mStrings.find(str);
__itt_event event;
if (iter != mStrings.end()) {
event = iter->second;
} else {
event = __itt_event_create(aName, str.length());
mStrings.insert({str, event});
}
// __itt_event event;
// if (iter != mStrings.end()) {
// event = iter->second;
// } else {
// event = __itt_event_create(aName, str.length());
// mStrings.insert({str, event});
// }
if (aKind == TRACING_INTERVAL_START || aKind == TRACING_EVENT) {
// VTune will consider starts not matched with an end to be single point in
// time events.
__itt_event_start(event);
} else {
__itt_event_end(event);
}
// if (aKind == TRACING_INTERVAL_START || aKind == TRACING_EVENT) {
// // VTune will consider starts not matched with an end to be single point
// in
// // time events.
// __itt_event_start(event);
// } else {
// __itt_event_end(event);
// }
}
void VTuneProfiler::RegisterThreadInternal(const char* aName) {
string str(aName);
// TODO re-implement here if vtune is needed
// string str(aName);
if (!str.compare("GeckoMain")) {
// Process main thread.
switch (XRE_GetProcessType()) {
case GeckoProcessType::GeckoProcessType_Default:
__itt_thread_set_name("Main Process");
break;
case GeckoProcessType::GeckoProcessType_Content:
__itt_thread_set_name("Content Process");
break;
case GeckoProcessType::GeckoProcessType_GMPlugin:
__itt_thread_set_name("Plugin Process");
break;
case GeckoProcessType::GeckoProcessType_GPU:
__itt_thread_set_name("GPU Process");
break;
default:
__itt_thread_set_name("Unknown Process");
}
return;
}
__itt_thread_set_name(aName);
// if (!str.compare("GeckoMain")) {
// // Process main thread.
// switch (XRE_GetProcessType()) {
// case GeckoProcessType::GeckoProcessType_Default:
// __itt_thread_set_name("Main Process");
// break;
// case GeckoProcessType::GeckoProcessType_Content:
// __itt_thread_set_name("Content Process");
// break;
// case GeckoProcessType::GeckoProcessType_GMPlugin:
// __itt_thread_set_name("Plugin Process");
// break;
// case GeckoProcessType::GeckoProcessType_GPU:
// __itt_thread_set_name("GPU Process");
// break;
// default:
// __itt_thread_set_name("Unknown Process");
// }
// return;
// }
// __itt_thread_set_name(aName);
}
#endif // MOZ_BASE_PROFILER

Просмотреть файл

@ -80,6 +80,12 @@ int profiler_current_thread_id() {
#endif
}
static int64_t MicrosecondsSince1970() {
struct timeval tv;
gettimeofday(&tv, NULL);
return int64_t(tv.tv_sec) * 1000000 + int64_t(tv.tv_usec);
}
void* GetStackTop(void* aGuess) { return aGuess; }
static void PopulateRegsFromContext(Registers& aRegs, ucontext_t* aContext) {
@ -127,9 +133,9 @@ int tgkill(pid_t tgid, pid_t tid, int signalno) {
class PlatformData {
public:
explicit PlatformData(int aThreadId) { MOZ_COUNT_CTOR(PlatformData); }
explicit PlatformData(int aThreadId) {}
~PlatformData() { MOZ_COUNT_DTOR(PlatformData); }
~PlatformData() {}
};
////////////////////////////////////////////////////////////////////////
@ -396,7 +402,7 @@ SamplerThread::SamplerThread(PSLockRef aLock, uint32_t aActivityGeneration,
lul->EnableUnwinding();
// Has a test been requested?
if (PR_GetEnv("MOZ_PROFILER_LUL_TEST")) {
if (getenv("MOZ_PROFILER_LUL_TEST")) {
int nTests = 0, nTestsPassed = 0;
RunLulUnitTests(&nTests, &nTestsPassed, lul);
}
@ -468,7 +474,7 @@ void SamplerThread::Stop(PSLockRef aLock) {
static void paf_prepare() {
MOZ_RELEASE_ASSERT(CorePS::Exists());
PSAutoLock lock(gPSMutex);
PSAutoLock lock;
if (ActivePS::Exists(lock)) {
ActivePS::SetWasPaused(lock, ActivePS::IsPaused(lock));
@ -480,7 +486,7 @@ static void paf_prepare() {
static void paf_parent() {
MOZ_RELEASE_ASSERT(CorePS::Exists());
PSAutoLock lock(gPSMutex);
PSAutoLock lock;
if (ActivePS::Exists(lock)) {
ActivePS::SetIsPaused(lock, ActivePS::WasPaused(lock));
@ -503,11 +509,13 @@ static void PlatformInit(PSLockRef aLock) {}
// Context used by synchronous samples. It's safe to have a single one because
// only one synchronous sample can be taken at a time (due to
// profiler_get_backtrace()'s PSAutoLock).
ucontext_t sSyncUContext;
// ucontext_t sSyncUContext;
void Registers::SyncPopulate() {
if (!getcontext(&sSyncUContext)) {
PopulateRegsFromContext(*this, &sSyncUContext);
}
// TODO port getcontext from breakpad, if profiler_get_backtrace is needed.
MOZ_CRASH("profiler_get_backtrace() unsupported");
// if (!getcontext(&sSyncUContext)) {
// PopulateRegsFromContext(*this, &sSyncUContext);
// }
}
#endif

Просмотреть файл

@ -39,6 +39,12 @@ int profiler_current_thread_id() {
return static_cast<int>(static_cast<pid_t>(syscall(SYS_thread_selfid)));
}
static int64_t MicrosecondsSince1970() {
struct timeval tv;
gettimeofday(&tv, NULL);
return int64_t(tv.tv_sec) * 1000000 + int64_t(tv.tv_usec);
}
void* GetStackTop(void* aGuess) {
pthread_t thread = pthread_self();
return pthread_get_stackaddr_np(thread);
@ -46,15 +52,11 @@ void* GetStackTop(void* aGuess) {
class PlatformData {
public:
explicit PlatformData(int aThreadId) : mProfiledThread(mach_thread_self()) {
MOZ_COUNT_CTOR(PlatformData);
}
explicit PlatformData(int aThreadId) : mProfiledThread(mach_thread_self()) {}
~PlatformData() {
// Deallocate Mach port for thread.
mach_port_deallocate(mach_task_self(), mProfiledThread);
MOZ_COUNT_DTOR(PlatformData);
}
thread_act_t ProfiledThread() { return mProfiledThread; }

Просмотреть файл

@ -44,6 +44,21 @@ int profiler_current_thread_id() {
return int(threadId);
}
static int64_t MicrosecondsSince1970() {
int64_t prt;
FILETIME ft;
SYSTEMTIME st;
GetSystemTime(&st);
SystemTimeToFileTime(&st, &ft);
static_assert(sizeof(ft) == sizeof(prt), "Expect FILETIME to be 64 bits");
memcpy(&prt, &ft, sizeof(prt));
const int64_t epochBias = 116444736000000000LL;
prt = (prt - epochBias) / 10;
return prt;
}
void* GetStackTop(void* aGuess) {
PNT_TIB pTib = reinterpret_cast<PNT_TIB>(NtCurrentTeb());
return reinterpret_cast<void*>(pTib->StackBase);
@ -78,16 +93,13 @@ class PlatformData {
explicit PlatformData(int aThreadId)
: mProfiledThread(OpenThread(THREAD_GET_CONTEXT | THREAD_SUSPEND_RESUME |
THREAD_QUERY_INFORMATION,
false, aThreadId)) {
MOZ_COUNT_CTOR(PlatformData);
}
false, aThreadId)) {}
~PlatformData() {
if (mProfiledThread != nullptr) {
CloseHandle(mProfiledThread);
mProfiledThread = nullptr;
}
MOZ_COUNT_DTOR(PlatformData);
}
HANDLE ProfiledThread() { return mProfiledThread; }

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -34,34 +34,62 @@
#include "BaseProfiler.h"
#include "mozilla/Logging.h"
#include "mozilla/PlatformMutex.h"
#include "mozilla/UniquePtr.h"
#include "mozilla/Vector.h"
#include "nsString.h"
#include <functional>
#include <stdint.h>
#include <string>
extern mozilla::LazyLogModule gProfilerLog;
bool BaseProfilerLogTest(int aLevelToTest);
// These are for MOZ_LOG="prof:3" or higher. It's the default logging level for
// the profiler, and should be used sparingly.
#define LOG_TEST MOZ_LOG_TEST(gProfilerLog, mozilla::LogLevel::Info)
#define LOG(arg, ...) \
MOZ_LOG(gProfilerLog, mozilla::LogLevel::Info, \
("[%d] " arg, profiler_current_process_id(), ##__VA_ARGS__))
// These are for MOZ_BASE_PROFILER_LOGGING and above. It's the default logging
// level for the profiler, and should be used sparingly.
#define LOG_TEST BaseProfilerLogTest(3)
#define LOG(arg, ...) \
do { \
if (LOG_TEST) { \
printf("[I %d/%d] " arg "\n", profiler_current_process_id(), \
profiler_current_thread_id(), ##__VA_ARGS__); \
} \
} while (0)
// These are for MOZ_LOG="prof:4" or higher. It should be used for logging that
// is somewhat more verbose than LOG.
#define DEBUG_LOG_TEST MOZ_LOG_TEST(gProfilerLog, mozilla::LogLevel::Debug)
#define DEBUG_LOG(arg, ...) \
MOZ_LOG(gProfilerLog, mozilla::LogLevel::Debug, \
("[%d] " arg, profiler_current_process_id(), ##__VA_ARGS__))
// These are for MOZ_BASE_PROFILER_DEBUG_LOGGING. It should be used for logging
// that is somewhat more verbose than LOG.
#define DEBUG_LOG_TEST BaseProfilerLogTest(4)
#define DEBUG_LOG(arg, ...) \
do { \
if (DEBUG_LOG_TEST) { \
printf("[D %d/%d] " arg "\n", profiler_current_process_id(), \
profiler_current_thread_id(), ##__VA_ARGS__); \
} \
} while (0)
// These are for MOZ_BASE_PROFILER_VERBOSE_LOGGING. It should be used for
// logging that is somewhat more verbose than DEBUG_LOG.
#define VERBOSE_LOG_TEST BaseProfilerLogTest(5)
#define VERBOSE_LOG(arg, ...) \
do { \
if (VERBOSE_LOG_TEST) { \
printf("[V %d/%d] " arg "\n", profiler_current_process_id(), \
profiler_current_thread_id(), ##__VA_ARGS__); \
} \
} while (0)
// Thin shell around mozglue PlatformMutex, for Base Profiler internal use.
// Does not preserve behavior in JS record/replay.
class PSMutex : private mozilla::detail::MutexImpl {
public:
PSMutex()
: mozilla::detail::MutexImpl(
mozilla::recordreplay::Behavior::DontPreserve) {}
void Lock() { mozilla::detail::MutexImpl::lock(); }
void Unlock() { mozilla::detail::MutexImpl::unlock(); }
};
typedef uint8_t* Address;
// ----------------------------------------------------------------------------
// Miscellaneous
class PlatformData;
// We can't new/delete the type safely without defining it
@ -74,11 +102,6 @@ typedef mozilla::UniquePtr<PlatformData, PlatformDataDestructor>
UniquePlatformData;
UniquePlatformData AllocPlatformData(int aThreadId);
namespace mozilla {
class JSONWriter;
}
void AppendSharedLibraries(mozilla::JSONWriter& aWriter);
// Convert the array of strings to a bitfield.
uint32_t ParseFeaturesFromStringArray(const char** aFeatures,
uint32_t aFeatureCount,
@ -96,10 +119,10 @@ enum class JSSamplingFlags {
};
// Record an exit profile from a child process.
void profiler_received_exit_profile(const nsCString& aExitProfile);
void profiler_received_exit_profile(const std::string& aExitProfile);
// Extract all received exit profiles that have not yet expired (i.e., they
// still intersect with this process' buffer range).
mozilla::Vector<nsCString> profiler_move_exit_profiles();
mozilla::Vector<std::string> profiler_move_exit_profiles();
#endif /* ndef TOOLS_PLATFORM_H_ */

Просмотреть файл

@ -21,15 +21,17 @@
# include "platform.h"
# include "mozilla/Sprintf.h"
# include "mozilla/Unused.h"
# include "nsDebug.h"
# include "nsNativeCharsetUtils.h"
# include <nsTArray.h>
# include "common/linux/file_id.h"
# include <algorithm>
# include <arpa/inet.h>
# include <dlfcn.h>
# include <elf.h>
# include <fcntl.h>
# include <features.h>
# include <sys/mman.h>
# include <sys/stat.h>
# include <sys/types.h>
# include <vector>
# if defined(GP_OS_linux)
# include <link.h> // dl_phdr_info
@ -43,6 +45,594 @@ extern "C" MOZ_EXPORT __attribute__((weak)) int dl_iterate_phdr(
# error "Unexpected configuration"
# endif
// ----------------------------------------------------------------------------
// Starting imports from toolkit/crashreporter/google-breakpad/, as needed by
// this file when moved to mozglue.
// Imported from
// toolkit/crashreporter/google-breakpad/src/common/memory_range.h.
// A lightweight wrapper with a pointer and a length to encapsulate a contiguous
// range of memory. It provides helper methods for checked access of a subrange
// of the memory. Its implemementation does not allocate memory or call into
// libc functions, and is thus safer to use in a crashed environment.
class MemoryRange {
public:
MemoryRange() : data_(NULL), length_(0) {}
MemoryRange(const void* data, size_t length) { Set(data, length); }
// Returns true if this memory range contains no data.
bool IsEmpty() const {
// Set() guarantees that |length_| is zero if |data_| is NULL.
return length_ == 0;
}
// Resets to an empty range.
void Reset() {
data_ = NULL;
length_ = 0;
}
// Sets this memory range to point to |data| and its length to |length|.
void Set(const void* data, size_t length) {
data_ = reinterpret_cast<const uint8_t*>(data);
// Always set |length_| to zero if |data_| is NULL.
length_ = data ? length : 0;
}
// Returns true if this range covers a subrange of |sub_length| bytes
// at |sub_offset| bytes of this memory range, or false otherwise.
bool Covers(size_t sub_offset, size_t sub_length) const {
// The following checks verify that:
// 1. sub_offset is within [ 0 .. length_ - 1 ]
// 2. sub_offset + sub_length is within
// [ sub_offset .. length_ ]
return sub_offset < length_ && sub_offset + sub_length >= sub_offset &&
sub_offset + sub_length <= length_;
}
// Returns a raw data pointer to a subrange of |sub_length| bytes at
// |sub_offset| bytes of this memory range, or NULL if the subrange
// is out of bounds.
const void* GetData(size_t sub_offset, size_t sub_length) const {
return Covers(sub_offset, sub_length) ? (data_ + sub_offset) : NULL;
}
// Same as the two-argument version of GetData() but uses sizeof(DataType)
// as the subrange length and returns an |DataType| pointer for convenience.
template <typename DataType>
const DataType* GetData(size_t sub_offset) const {
return reinterpret_cast<const DataType*>(
GetData(sub_offset, sizeof(DataType)));
}
// Returns a raw pointer to the |element_index|-th element of an array
// of elements of length |element_size| starting at |sub_offset| bytes
// of this memory range, or NULL if the element is out of bounds.
const void* GetArrayElement(size_t element_offset, size_t element_size,
unsigned element_index) const {
size_t sub_offset = element_offset + element_index * element_size;
return GetData(sub_offset, element_size);
}
// Same as the three-argument version of GetArrayElement() but deduces
// the element size using sizeof(ElementType) and returns an |ElementType|
// pointer for convenience.
template <typename ElementType>
const ElementType* GetArrayElement(size_t element_offset,
unsigned element_index) const {
return reinterpret_cast<const ElementType*>(
GetArrayElement(element_offset, sizeof(ElementType), element_index));
}
// Returns a subrange of |sub_length| bytes at |sub_offset| bytes of
// this memory range, or an empty range if the subrange is out of bounds.
MemoryRange Subrange(size_t sub_offset, size_t sub_length) const {
return Covers(sub_offset, sub_length)
? MemoryRange(data_ + sub_offset, sub_length)
: MemoryRange();
}
// Returns a pointer to the beginning of this memory range.
const uint8_t* data() const { return data_; }
// Returns the length, in bytes, of this memory range.
size_t length() const { return length_; }
private:
// Pointer to the beginning of this memory range.
const uint8_t* data_;
// Length, in bytes, of this memory range.
size_t length_;
};
// Imported from
// toolkit/crashreporter/google-breakpad/src/common/linux/memory_mapped_file.h
// and inlined .cc.
// A utility class for mapping a file into memory for read-only access of the
// file content. Its implementation avoids calling into libc functions by
// directly making system calls for open, close, mmap, and munmap.
class MemoryMappedFile {
public:
MemoryMappedFile() {}
// Constructor that calls Map() to map a file at |path| into memory.
// If Map() fails, the object behaves as if it is default constructed.
MemoryMappedFile(const char* path, size_t offset) { Map(path, offset); }
MemoryMappedFile(const MemoryMappedFile&) = delete;
MemoryMappedFile& operator=(const MemoryMappedFile&) = delete;
~MemoryMappedFile() {}
// Maps a file at |path| into memory, which can then be accessed via
// content() as a MemoryRange object or via data(), and returns true on
// success. Mapping an empty file will succeed but with data() and size()
// returning NULL and 0, respectively. An existing mapping is unmapped
// before a new mapping is created.
bool Map(const char* path, size_t offset) {
Unmap();
int fd = open(path, O_RDONLY, 0);
if (fd == -1) {
return false;
}
# if defined(__x86_64__) || defined(__aarch64__) || \
(defined(__mips__) && _MIPS_SIM == _ABI64)
struct stat st;
if (fstat(fd, &st) == -1 || st.st_size < 0) {
# else
struct stat64 st;
if (fstat64(fd, &st) == -1 || st.st_size < 0) {
# endif
close(fd);
return false;
}
// Strangely file size can be negative, but we check above that it is not.
size_t file_len = static_cast<size_t>(st.st_size);
// If the file does not extend beyond the offset, simply use an empty
// MemoryRange and return true. Don't bother to call mmap()
// even though mmap() can handle an empty file on some platforms.
if (offset >= file_len) {
close(fd);
return true;
}
void* data = mmap(NULL, file_len, PROT_READ, MAP_PRIVATE, fd, offset);
close(fd);
if (data == MAP_FAILED) {
return false;
}
content_.Set(data, file_len - offset);
return true;
}
// Unmaps the memory for the mapped file. It's a no-op if no file is
// mapped.
void Unmap() {
if (content_.data()) {
munmap(const_cast<uint8_t*>(content_.data()), content_.length());
content_.Set(NULL, 0);
}
}
// Returns a MemoryRange object that covers the memory for the mapped
// file. The MemoryRange object is empty if no file is mapped.
const MemoryRange& content() const { return content_; }
// Returns a pointer to the beginning of the memory for the mapped file.
// or NULL if no file is mapped or the mapped file is empty.
const void* data() const { return content_.data(); }
// Returns the size in bytes of the mapped file, or zero if no file
// is mapped.
size_t size() const { return content_.length(); }
private:
// Mapped file content as a MemoryRange object.
MemoryRange content_;
};
// Imported from
// toolkit/crashreporter/google-breakpad/src/common/linux/file_id.h and inlined
// .cc.
// GNU binutils' ld defaults to 'sha1', which is 160 bits == 20 bytes,
// so this is enough to fit that, which most binaries will use.
// This is just a sensible default for vectors so most callers can get away with
// stack allocation.
static const size_t kDefaultBuildIdSize = 20;
// Used in a few places for backwards-compatibility.
typedef struct {
uint32_t data1;
uint16_t data2;
uint16_t data3;
uint8_t data4[8];
} MDGUID; /* GUID */
const size_t kMDGUIDSize = sizeof(MDGUID);
class FileID {
public:
explicit FileID(const char* path) : path_(path) {}
~FileID() {}
// Load the identifier for the elf file path specified in the constructor into
// |identifier|.
//
// The current implementation will look for a .note.gnu.build-id
// section and use that as the file id, otherwise it falls back to
// XORing the first 4096 bytes of the .text section to generate an identifier.
bool ElfFileIdentifier(std::vector<uint8_t>& identifier) {
MemoryMappedFile mapped_file(path_.c_str(), 0);
if (!mapped_file.data()) // Should probably check if size >= ElfW(Ehdr)?
return false;
return ElfFileIdentifierFromMappedFile(mapped_file.data(), identifier);
}
// Traits classes so consumers can write templatized code to deal
// with specific ELF bits.
struct ElfClass32 {
typedef Elf32_Addr Addr;
typedef Elf32_Ehdr Ehdr;
typedef Elf32_Nhdr Nhdr;
typedef Elf32_Phdr Phdr;
typedef Elf32_Shdr Shdr;
typedef Elf32_Half Half;
typedef Elf32_Off Off;
typedef Elf32_Sym Sym;
typedef Elf32_Word Word;
static const int kClass = ELFCLASS32;
static const uint16_t kMachine = EM_386;
static const size_t kAddrSize = sizeof(Elf32_Addr);
static constexpr const char* kMachineName = "x86";
};
struct ElfClass64 {
typedef Elf64_Addr Addr;
typedef Elf64_Ehdr Ehdr;
typedef Elf64_Nhdr Nhdr;
typedef Elf64_Phdr Phdr;
typedef Elf64_Shdr Shdr;
typedef Elf64_Half Half;
typedef Elf64_Off Off;
typedef Elf64_Sym Sym;
typedef Elf64_Word Word;
static const int kClass = ELFCLASS64;
static const uint16_t kMachine = EM_X86_64;
static const size_t kAddrSize = sizeof(Elf64_Addr);
static constexpr const char* kMachineName = "x86_64";
};
// Internal helper method, exposed for convenience for callers
// that already have more info.
template <typename ElfClass>
static const typename ElfClass::Shdr* FindElfSectionByName(
const char* name, typename ElfClass::Word section_type,
const typename ElfClass::Shdr* sections, const char* section_names,
const char* names_end, int nsection) {
if (!name || !sections || nsection == 0) {
return NULL;
}
int name_len = strlen(name);
if (name_len == 0) return NULL;
for (int i = 0; i < nsection; ++i) {
const char* section_name = section_names + sections[i].sh_name;
if (sections[i].sh_type == section_type &&
names_end - section_name >= name_len + 1 &&
strcmp(name, section_name) == 0) {
return sections + i;
}
}
return NULL;
}
struct ElfSegment {
const void* start;
size_t size;
};
// Convert an offset from an Elf header into a pointer to the mapped
// address in the current process. Takes an extra template parameter
// to specify the return type to avoid having to dynamic_cast the
// result.
template <typename ElfClass, typename T>
static const T* GetOffset(const typename ElfClass::Ehdr* elf_header,
typename ElfClass::Off offset) {
return reinterpret_cast<const T*>(reinterpret_cast<uintptr_t>(elf_header) +
offset);
}
// ELF note name and desc are 32-bits word padded.
# define NOTE_PADDING(a) ((a + 3) & ~3)
static bool ElfClassBuildIDNoteIdentifier(const void* section, size_t length,
std::vector<uint8_t>& identifier) {
static_assert(sizeof(ElfClass32::Nhdr) == sizeof(ElfClass64::Nhdr),
"Elf32_Nhdr and Elf64_Nhdr should be the same");
typedef typename ElfClass32::Nhdr Nhdr;
const void* section_end = reinterpret_cast<const char*>(section) + length;
const Nhdr* note_header = reinterpret_cast<const Nhdr*>(section);
while (reinterpret_cast<const void*>(note_header) < section_end) {
if (note_header->n_type == NT_GNU_BUILD_ID) break;
note_header = reinterpret_cast<const Nhdr*>(
reinterpret_cast<const char*>(note_header) + sizeof(Nhdr) +
NOTE_PADDING(note_header->n_namesz) +
NOTE_PADDING(note_header->n_descsz));
}
if (reinterpret_cast<const void*>(note_header) >= section_end ||
note_header->n_descsz == 0) {
return false;
}
const uint8_t* build_id = reinterpret_cast<const uint8_t*>(note_header) +
sizeof(Nhdr) +
NOTE_PADDING(note_header->n_namesz);
identifier.insert(identifier.end(), build_id,
build_id + note_header->n_descsz);
return true;
}
template <typename ElfClass>
static bool FindElfClassSection(const char* elf_base,
const char* section_name,
typename ElfClass::Word section_type,
const void** section_start,
size_t* section_size) {
typedef typename ElfClass::Ehdr Ehdr;
typedef typename ElfClass::Shdr Shdr;
if (!elf_base || !section_start || !section_size) {
return false;
}
if (strncmp(elf_base, ELFMAG, SELFMAG) != 0) {
return false;
}
const Ehdr* elf_header = reinterpret_cast<const Ehdr*>(elf_base);
if (elf_header->e_ident[EI_CLASS] != ElfClass::kClass) {
return false;
}
const Shdr* sections =
GetOffset<ElfClass, Shdr>(elf_header, elf_header->e_shoff);
const Shdr* section_names = sections + elf_header->e_shstrndx;
const char* names =
GetOffset<ElfClass, char>(elf_header, section_names->sh_offset);
const char* names_end = names + section_names->sh_size;
const Shdr* section =
FindElfSectionByName<ElfClass>(section_name, section_type, sections,
names, names_end, elf_header->e_shnum);
if (section != NULL && section->sh_size > 0) {
*section_start = elf_base + section->sh_offset;
*section_size = section->sh_size;
}
return true;
}
template <typename ElfClass>
static bool FindElfClassSegment(const char* elf_base,
typename ElfClass::Word segment_type,
std::vector<ElfSegment>* segments) {
typedef typename ElfClass::Ehdr Ehdr;
typedef typename ElfClass::Phdr Phdr;
if (!elf_base || !segments) {
return false;
}
if (strncmp(elf_base, ELFMAG, SELFMAG) != 0) {
return false;
}
const Ehdr* elf_header = reinterpret_cast<const Ehdr*>(elf_base);
if (elf_header->e_ident[EI_CLASS] != ElfClass::kClass) {
return false;
}
const Phdr* phdrs =
GetOffset<ElfClass, Phdr>(elf_header, elf_header->e_phoff);
for (int i = 0; i < elf_header->e_phnum; ++i) {
if (phdrs[i].p_type == segment_type) {
ElfSegment seg = {};
seg.start = elf_base + phdrs[i].p_offset;
seg.size = phdrs[i].p_filesz;
segments->push_back(seg);
}
}
return true;
}
static bool IsValidElf(const void* elf_base) {
return strncmp(reinterpret_cast<const char*>(elf_base), ELFMAG, SELFMAG) ==
0;
}
static int ElfClass(const void* elf_base) {
const ElfW(Ehdr)* elf_header =
reinterpret_cast<const ElfW(Ehdr)*>(elf_base);
return elf_header->e_ident[EI_CLASS];
}
static bool FindElfSection(const void* elf_mapped_base,
const char* section_name, uint32_t section_type,
const void** section_start, size_t* section_size) {
if (!elf_mapped_base || !section_start || !section_size) {
return false;
}
*section_start = NULL;
*section_size = 0;
if (!IsValidElf(elf_mapped_base)) return false;
int cls = ElfClass(elf_mapped_base);
const char* elf_base = static_cast<const char*>(elf_mapped_base);
if (cls == ELFCLASS32) {
return FindElfClassSection<ElfClass32>(elf_base, section_name,
section_type, section_start,
section_size) &&
*section_start != NULL;
} else if (cls == ELFCLASS64) {
return FindElfClassSection<ElfClass64>(elf_base, section_name,
section_type, section_start,
section_size) &&
*section_start != NULL;
}
return false;
}
static bool FindElfSegments(const void* elf_mapped_base,
uint32_t segment_type,
std::vector<ElfSegment>* segments) {
if (!elf_mapped_base || !segments) {
return false;
}
if (!IsValidElf(elf_mapped_base)) return false;
int cls = ElfClass(elf_mapped_base);
const char* elf_base = static_cast<const char*>(elf_mapped_base);
if (cls == ELFCLASS32) {
return FindElfClassSegment<ElfClass32>(elf_base, segment_type, segments);
} else if (cls == ELFCLASS64) {
return FindElfClassSegment<ElfClass64>(elf_base, segment_type, segments);
}
return false;
}
// Attempt to locate a .note.gnu.build-id section in an ELF binary
// and copy it into |identifier|.
static bool FindElfBuildIDNote(const void* elf_mapped_base,
std::vector<uint8_t>& identifier) {
// lld normally creates 2 PT_NOTEs, gold normally creates 1.
std::vector<ElfSegment> segs;
if (FindElfSegments(elf_mapped_base, PT_NOTE, &segs)) {
for (ElfSegment& seg : segs) {
if (ElfClassBuildIDNoteIdentifier(seg.start, seg.size, identifier)) {
return true;
}
}
}
void* note_section;
size_t note_size;
if (FindElfSection(elf_mapped_base, ".note.gnu.build-id", SHT_NOTE,
(const void**)&note_section, &note_size)) {
return ElfClassBuildIDNoteIdentifier(note_section, note_size, identifier);
}
return false;
}
// Attempt to locate the .text section of an ELF binary and generate
// a simple hash by XORing the first page worth of bytes into |identifier|.
static bool HashElfTextSection(const void* elf_mapped_base,
std::vector<uint8_t>& identifier) {
identifier.resize(kMDGUIDSize);
void* text_section;
size_t text_size;
if (!FindElfSection(elf_mapped_base, ".text", SHT_PROGBITS,
(const void**)&text_section, &text_size) ||
text_size == 0) {
return false;
}
// Only provide |kMDGUIDSize| bytes to keep identifiers produced by this
// function backwards-compatible.
memset(&identifier[0], 0, kMDGUIDSize);
const uint8_t* ptr = reinterpret_cast<const uint8_t*>(text_section);
const uint8_t* ptr_end =
ptr + std::min(text_size, static_cast<size_t>(4096));
while (ptr < ptr_end) {
for (unsigned i = 0; i < kMDGUIDSize; i++) identifier[i] ^= ptr[i];
ptr += kMDGUIDSize;
}
return true;
}
// Load the identifier for the elf file mapped into memory at |base| into
// |identifier|. Return false if the identifier could not be created for this
// file.
static bool ElfFileIdentifierFromMappedFile(
const void* base, std::vector<uint8_t>& identifier) {
// Look for a build id note first.
if (FindElfBuildIDNote(base, identifier)) return true;
// Fall back on hashing the first page of the text section.
return HashElfTextSection(base, identifier);
}
// These three functions are not ever called in an unsafe context, so it's OK
// to allocate memory and use libc.
static std::string bytes_to_hex_string(const uint8_t* bytes, size_t count) {
std::string result;
for (unsigned int idx = 0; idx < count; ++idx) {
char buf[3];
SprintfLiteral(buf, "%02X", bytes[idx]);
result.append(buf);
}
return result;
}
// Convert the |identifier| data to a string. The string will
// be formatted as a UUID in all uppercase without dashes.
// (e.g., 22F065BBFC9C49F780FE26A7CEBD7BCE).
static std::string ConvertIdentifierToUUIDString(
const std::vector<uint8_t>& identifier) {
uint8_t identifier_swapped[kMDGUIDSize] = {0};
// Endian-ness swap to match dump processor expectation.
memcpy(identifier_swapped, &identifier[0],
std::min(kMDGUIDSize, identifier.size()));
uint32_t* data1 = reinterpret_cast<uint32_t*>(identifier_swapped);
*data1 = htonl(*data1);
uint16_t* data2 = reinterpret_cast<uint16_t*>(identifier_swapped + 4);
*data2 = htons(*data2);
uint16_t* data3 = reinterpret_cast<uint16_t*>(identifier_swapped + 6);
*data3 = htons(*data3);
return bytes_to_hex_string(identifier_swapped, kMDGUIDSize);
}
// Convert the entire |identifier| data to a hex string.
static std::string ConvertIdentifierToString(
const std::vector<uint8_t>& identifier) {
return bytes_to_hex_string(&identifier[0], identifier.size());
}
private:
// Storage for the path specified
std::string path_;
};
// End of imports from toolkit/crashreporter/google-breakpad/.
// ----------------------------------------------------------------------------
struct LoadedLibraryInfo {
LoadedLibraryInfo(const char* aName, unsigned long aBaseAddress,
unsigned long aFirstMappingStart,
@ -52,34 +642,28 @@ struct LoadedLibraryInfo {
mFirstMappingStart(aFirstMappingStart),
mLastMappingEnd(aLastMappingEnd) {}
nsCString mName;
std::string mName;
unsigned long mBaseAddress;
unsigned long mFirstMappingStart;
unsigned long mLastMappingEnd;
};
# if defined(GP_OS_android)
static void outputMapperLog(const char* aBuf) { LOG("%s", aBuf); }
static void outputMapperLog(const char* aBuf) { /* LOG("%s", aBuf); */
}
# endif
static nsCString IDtoUUIDString(
const google_breakpad::wasteful_vector<uint8_t>& aIdentifier) {
using namespace google_breakpad;
nsCString uuid;
const std::string str = FileID::ConvertIdentifierToUUIDString(aIdentifier);
uuid.Append(str.c_str(), str.size());
static std::string IDtoUUIDString(const std::vector<uint8_t>& aIdentifier) {
std::string uuid = FileID::ConvertIdentifierToUUIDString(aIdentifier);
// This is '0', not '\0', since it represents the breakpad id age.
uuid.Append('0');
uuid += '0';
return uuid;
}
// Get the breakpad Id for the binary file pointed by bin_name
static nsCString getId(const char* bin_name) {
using namespace google_breakpad;
PageAllocator allocator;
auto_wasteful_vector<uint8_t, kDefaultBuildIdSize> identifier(&allocator);
static std::string getId(const char* bin_name) {
std::vector<uint8_t> identifier;
identifier.reserve(kDefaultBuildIdSize);
# if defined(GP_OS_android)
if (nsDependentCString(bin_name).Find("!/") != kNotFound) {
@ -99,30 +683,26 @@ static nsCString getId(const char* bin_name) {
return IDtoUUIDString(identifier);
}
return EmptyCString();
return {};
}
static SharedLibrary SharedLibraryAtPath(const char* path,
unsigned long libStart,
unsigned long libEnd,
unsigned long offset = 0) {
nsAutoString pathStr;
mozilla::Unused << NS_WARN_IF(
NS_FAILED(NS_CopyNativeToUnicode(nsDependentCString(path), pathStr)));
std::string pathStr = path;
nsAutoString nameStr = pathStr;
int32_t pos = nameStr.RFindChar('/');
if (pos != kNotFound) {
nameStr.Cut(0, pos + 1);
}
size_t pos = pathStr.rfind('\\');
std::string nameStr =
(pos != std::string::npos) ? pathStr.substr(pos + 1) : pathStr;
return SharedLibrary(libStart, libEnd, offset, getId(path), nameStr, pathStr,
nameStr, pathStr, EmptyCString(), "");
nameStr, pathStr, std::string{}, "");
}
static int dl_iterate_callback(struct dl_phdr_info* dl_info, size_t size,
void* data) {
auto libInfoList = reinterpret_cast<nsTArray<LoadedLibraryInfo>*>(data);
auto libInfoList = reinterpret_cast<std::vector<LoadedLibraryInfo>*>(data);
if (dl_info->dlpi_phnum <= 0) return 0;
@ -144,8 +724,8 @@ static int dl_iterate_callback(struct dl_phdr_info* dl_info, size_t size,
}
}
libInfoList->AppendElement(LoadedLibraryInfo(
dl_info->dlpi_name, baseAddress, firstMappingStart, lastMappingEnd));
libInfoList->push_back(LoadedLibraryInfo(dl_info->dlpi_name, baseAddress,
firstMappingStart, lastMappingEnd));
return 0;
}
@ -164,7 +744,7 @@ SharedLibraryInfo SharedLibraryInfo::GetInfoForSelf() {
// readlink failed for whatever reason. Note this, but keep going.
exeName[0] = '\0';
exeNameLen = 0;
LOG("SharedLibraryInfo::GetInfoForSelf(): readlink failed");
// LOG("SharedLibraryInfo::GetInfoForSelf(): readlink failed");
} else {
// Assert no buffer overflow.
MOZ_RELEASE_ASSERT(exeNameLen >= 0 &&
@ -206,8 +786,8 @@ SharedLibraryInfo SharedLibraryInfo::GetInfoForSelf() {
continue;
}
if (ret != 5 && ret != 4) {
LOG("SharedLibraryInfo::GetInfoForSelf(): "
"reading /proc/self/maps failed");
// LOG("SharedLibraryInfo::GetInfoForSelf(): "
// "reading /proc/self/maps failed");
continue;
}
@ -223,22 +803,22 @@ SharedLibraryInfo SharedLibraryInfo::GetInfoForSelf() {
info.AddSharedLibrary(
SharedLibraryAtPath(modulePath, start, end, offset));
if (info.GetSize() > 10000) {
LOG("SharedLibraryInfo::GetInfoForSelf(): "
"implausibly large number of mappings acquired");
// LOG("SharedLibraryInfo::GetInfoForSelf(): "
// "implausibly large number of mappings acquired");
break;
}
}
# endif
}
nsTArray<LoadedLibraryInfo> libInfoList;
std::vector<LoadedLibraryInfo> libInfoList;
// We collect the bulk of the library info using dl_iterate_phdr.
dl_iterate_phdr(dl_iterate_callback, &libInfoList);
for (const auto& libInfo : libInfoList) {
info.AddSharedLibrary(
SharedLibraryAtPath(libInfo.mName.get(), libInfo.mFirstMappingStart,
SharedLibraryAtPath(libInfo.mName.c_str(), libInfo.mFirstMappingStart,
libInfo.mLastMappingEnd,
libInfo.mFirstMappingStart - libInfo.mBaseAddress));
}
@ -250,7 +830,7 @@ SharedLibraryInfo SharedLibraryInfo::GetInfoForSelf() {
// executable's name to that entry.
for (size_t i = 0; i < info.GetSize(); i++) {
SharedLibrary& lib = info.GetMutableEntry(i);
if (lib.GetStart() == exeExeAddr && lib.GetNativeDebugPath().empty()) {
if (lib.GetStart() == exeExeAddr && lib.GetDebugPath().empty()) {
lib = SharedLibraryAtPath(exeName, lib.GetStart(), lib.GetEnd(),
lib.GetOffset());

Просмотреть файл

@ -9,10 +9,9 @@
# include "BaseProfilerSharedLibraries.h"
# include "ClearOnShutdown.h"
# include "mozilla/StaticMutex.h"
# include "platform.h"
# include "mozilla/Unused.h"
# include "nsNativeCharsetUtils.h"
# include <AvailabilityMacros.h>
# include <dlfcn.h>
@ -49,7 +48,21 @@ struct NativeSharedLibrary {
std::string path;
};
static std::vector<NativeSharedLibrary>* sSharedLibrariesList = nullptr;
static mozilla::StaticMutex sSharedLibrariesMutex;
class MOZ_RAII SharedLibrariesLock {
public:
SharedLibrariesLock() { sSharedLibrariesMutex.Lock(); }
~SharedLibrariesLock() { sSharedLibrariesMutex.Unlock(); }
SharedLibrariesLock(const SharedLibrariesLock&) = delete;
void operator=(const SharedLibrariesLock&) = delete;
private:
static mozilla::baseprofiler::PSMutex sSharedLibrariesMutex;
};
mozilla::baseprofiler::PSMutex SharedLibrariesLock::sSharedLibrariesMutex;
static void SharedLibraryAddImage(const struct mach_header* mh,
intptr_t vmaddr_slide) {
@ -63,7 +76,7 @@ static void SharedLibraryAddImage(const struct mach_header* mh,
return;
}
mozilla::StaticMutexAutoLock lock(sSharedLibrariesMutex);
SharedLibrariesLock lock;
if (!sSharedLibrariesList) {
return;
}
@ -79,7 +92,7 @@ static void SharedLibraryRemoveImage(const struct mach_header* mh,
// it to the right type here.
auto header = reinterpret_cast<const platform_mach_header*>(mh);
mozilla::StaticMutexAutoLock lock(sSharedLibrariesMutex);
SharedLibrariesLock lock;
if (!sSharedLibrariesList) {
return;
}
@ -131,54 +144,37 @@ static void addSharedLibrary(const platform_mach_header* header,
reinterpret_cast<const char*>(cmd) + cmd->cmdsize);
}
nsAutoCString uuid;
std::string uuid;
if (uuid_bytes != nullptr) {
uuid.AppendPrintf(
"%02X"
"%02X"
"%02X"
"%02X"
"%02X"
"%02X"
"%02X"
"%02X"
"%02X"
"%02X"
"%02X"
"%02X"
"%02X"
"%02X"
"%02X"
"%02X"
"0" /* breakpad id age */,
uuid_bytes[0], uuid_bytes[1], uuid_bytes[2], uuid_bytes[3],
uuid_bytes[4], uuid_bytes[5], uuid_bytes[6], uuid_bytes[7],
uuid_bytes[8], uuid_bytes[9], uuid_bytes[10], uuid_bytes[11],
uuid_bytes[12], uuid_bytes[13], uuid_bytes[14], uuid_bytes[15]);
static constexpr char digits[16] = {'0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', 'A', 'B', 'C', 'D', 'E', 'F'};
for (int i = 0; i < 15; ++i) {
uint8_t byte = uuid_bytes[i];
uuid += digits[byte >> 4];
uuid += digits[byte & 0xFu];
}
// breakpad id age.
uuid += '0';
}
nsAutoString pathStr;
mozilla::Unused << NS_WARN_IF(
NS_FAILED(NS_CopyNativeToUnicode(nsDependentCString(path), pathStr)));
std::string pathStr = path;
nsAutoString nameStr = pathStr;
int32_t pos = nameStr.RFindChar('/');
if (pos != kNotFound) {
nameStr.Cut(0, pos + 1);
}
size_t pos = pathStr.rfind('\\');
std::string nameStr =
(pos != std::string::npos) ? pathStr.substr(pos + 1) : pathStr;
const NXArchInfo* archInfo =
NXGetArchInfoFromCpuType(header->cputype, header->cpusubtype);
info.AddSharedLibrary(SharedLibrary(start, start + size, 0, uuid, nameStr,
pathStr, nameStr, pathStr, EmptyCString(),
pathStr, nameStr, pathStr, std::string{},
archInfo ? archInfo->name : ""));
}
// Translate the statically stored sSharedLibrariesList information into a
// SharedLibraryInfo object.
SharedLibraryInfo SharedLibraryInfo::GetInfoForSelf() {
mozilla::StaticMutexAutoLock lock(sSharedLibrariesMutex);
SharedLibrariesLock lock;
SharedLibraryInfo sharedLibraryInfo;
for (auto& info : *sSharedLibrariesList) {

Просмотреть файл

@ -13,12 +13,11 @@
# include <psapi.h>
# include "BaseProfilerSharedLibraries.h"
# include "nsWindowsHelpers.h"
# include "mozilla/UniquePtr.h"
# include "mozilla/Unused.h"
# include "nsNativeCharsetUtils.h"
# include "nsPrintfCString.h"
# include "nsReadableUtils.h"
# include <string>
# define CV_SIGNATURE 0x53445352 // 'SDSR'
@ -31,8 +30,41 @@ struct CodeViewRecord70 {
char pdbFileName[1];
};
static bool GetPdbInfo(uintptr_t aStart, nsID& aSignature, uint32_t& aAge,
char** aPdbName) {
static constexpr char digits[16] = {'0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', 'A', 'B', 'C', 'D', 'E', 'F'};
static void AppendHex(const unsigned char* aBegin, const unsigned char* aEnd,
std::string& aOut) {
for (const unsigned char* p = aBegin; p < aEnd; ++p) {
unsigned char c = *p;
aOut += digits[c >> 4];
aOut += digits[c & 0xFu];
}
}
static constexpr bool WITH_PADDING = true;
static constexpr bool WITHOUT_PADDING = false;
template <typename T>
static void AppendHex(T aValue, std::string& aOut, bool aWithPadding) {
for (int i = sizeof(T) * 2 - 1; i >= 0; --i) {
unsigned nibble = (aValue >> (i * 4)) & 0xFu;
// If no-padding requested, skip starting zeroes -- unless we're on the very
// last nibble (so we don't output a blank).
if (!aWithPadding && i != 0) {
if (nibble == 0) {
// Requested no padding, skip zeroes.
continue;
}
// Requested no padding, got first non-zero, pretend we now want padding
// so we don't skip zeroes anymore.
aWithPadding = true;
}
aOut += digits[nibble];
}
}
static bool GetPdbInfo(uintptr_t aStart, std::string& aSignature,
uint32_t& aAge, char** aPdbName) {
if (!aStart) {
return false;
}
@ -69,10 +101,13 @@ static bool GetPdbInfo(uintptr_t aStart, nsID& aSignature, uint32_t& aAge,
aAge = debugInfo->pdbAge;
GUID& pdbSignature = debugInfo->pdbSignature;
aSignature.m0 = pdbSignature.Data1;
aSignature.m1 = pdbSignature.Data2;
aSignature.m2 = pdbSignature.Data3;
memcpy(aSignature.m3, pdbSignature.Data4, sizeof(pdbSignature.Data4));
AppendHex(pdbSignature.Data1, aSignature, WITH_PADDING);
AppendHex(pdbSignature.Data2, aSignature, WITH_PADDING);
AppendHex(pdbSignature.Data3, aSignature, WITH_PADDING);
AppendHex(reinterpret_cast<const unsigned char*>(&pdbSignature.Data4),
reinterpret_cast<const unsigned char*>(&pdbSignature.Data4) +
sizeof(pdbSignature.Data4),
aSignature);
// The PDB file name could be different from module filename, so report both
// e.g. The PDB for C:\Windows\SysWOW64\ntdll.dll is wntdll.pdb
@ -81,32 +116,31 @@ static bool GetPdbInfo(uintptr_t aStart, nsID& aSignature, uint32_t& aAge,
return true;
}
static nsCString GetVersion(WCHAR* dllPath) {
DWORD infoSize = GetFileVersionInfoSizeW(dllPath, nullptr);
static std::string GetVersion(char* dllPath) {
DWORD infoSize = GetFileVersionInfoSizeA(dllPath, nullptr);
if (infoSize == 0) {
return EmptyCString();
return {};
}
mozilla::UniquePtr<unsigned char[]> infoData =
mozilla::MakeUnique<unsigned char[]>(infoSize);
if (!GetFileVersionInfoW(dllPath, 0, infoSize, infoData.get())) {
return EmptyCString();
if (!GetFileVersionInfoA(dllPath, 0, infoSize, infoData.get())) {
return {};
}
VS_FIXEDFILEINFO* vInfo;
UINT vInfoLen;
if (!VerQueryValueW(infoData.get(), L"\\", (LPVOID*)&vInfo, &vInfoLen)) {
return EmptyCString();
return {};
}
if (!vInfo) {
return EmptyCString();
return {};
}
nsPrintfCString version("%d.%d.%d.%d", vInfo->dwFileVersionMS >> 16,
vInfo->dwFileVersionMS & 0xFFFF,
vInfo->dwFileVersionLS >> 16,
vInfo->dwFileVersionLS & 0xFFFF);
return std::move(version);
return std::to_string(vInfo->dwFileVersionMS >> 16) + '.' +
std::to_string(vInfo->dwFileVersionMS & 0xFFFF) + '.' +
std::to_string(vInfo->dwFileVersionLS >> 16) + '.' +
std::to_string(vInfo->dwFileVersionLS & 0xFFFF);
}
SharedLibraryInfo SharedLibraryInfo::GetInfoForSelf() {
@ -133,13 +167,13 @@ SharedLibraryInfo SharedLibraryInfo::GetInfoForSelf() {
}
for (unsigned int i = 0; i < modulesNum; i++) {
nsAutoString pdbPathStr;
nsAutoString pdbNameStr;
std::string pdbPathStr;
std::string pdbNameStr;
char* pdbName = NULL;
WCHAR modulePath[MAX_PATH + 1];
char modulePath[MAX_PATH + 1];
if (!GetModuleFileNameEx(hProcess, hMods[i], modulePath,
sizeof(modulePath) / sizeof(WCHAR))) {
sizeof(modulePath) / sizeof(char))) {
continue;
}
@ -149,13 +183,14 @@ SharedLibraryInfo SharedLibraryInfo::GetInfoForSelf() {
continue;
}
nsCString breakpadId;
std::string breakpadId;
// Load the module again to make sure that its handle will remain
// valid as we attempt to read the PDB information from it. We load the
// DLL as a datafile so that if the module actually gets unloaded between
// the call to EnumProcessModules and the following LoadLibraryEx, we don't
// end up running the now newly loaded module's DllMain function. If the
// module is already loaded, LoadLibraryEx just increments its refcount.
// the call to EnumProcessModules and the following LoadLibraryEx, we
// don't end up running the now newly loaded module's DllMain function. If
// the module is already loaded, LoadLibraryEx just increments its
// refcount.
//
// Note that because of the race condition above, merely loading the DLL
// again is not safe enough, therefore we also need to make sure that we
@ -164,37 +199,28 @@ SharedLibraryInfo SharedLibraryInfo::GetInfoForSelf() {
HMODULE handleLock =
LoadLibraryEx(modulePath, NULL, LOAD_LIBRARY_AS_DATAFILE);
MEMORY_BASIC_INFORMATION vmemInfo = {0};
nsID pdbSig;
std::string pdbSig;
uint32_t pdbAge;
if (handleLock &&
sizeof(vmemInfo) ==
VirtualQuery(module.lpBaseOfDll, &vmemInfo, sizeof(vmemInfo)) &&
vmemInfo.State == MEM_COMMIT &&
GetPdbInfo((uintptr_t)module.lpBaseOfDll, pdbSig, pdbAge, &pdbName)) {
MOZ_ASSERT(breakpadId.IsEmpty());
breakpadId.AppendPrintf(
"%08X" // m0
"%04X%04X" // m1,m2
"%02X%02X%02X%02X%02X%02X%02X%02X" // m3
"%X", // pdbAge
pdbSig.m0, pdbSig.m1, pdbSig.m2, pdbSig.m3[0], pdbSig.m3[1],
pdbSig.m3[2], pdbSig.m3[3], pdbSig.m3[4], pdbSig.m3[5], pdbSig.m3[6],
pdbSig.m3[7], pdbAge);
MOZ_ASSERT(breakpadId.empty());
breakpadId += pdbSig;
AppendHex(pdbAge, breakpadId, WITHOUT_PADDING);
pdbPathStr = NS_ConvertUTF8toUTF16(pdbName);
pdbNameStr = pdbPathStr;
int32_t pos = pdbNameStr.RFindChar('\\');
if (pos != kNotFound) {
pdbNameStr.Cut(0, pos + 1);
}
pdbPathStr = pdbName;
size_t pos = pdbPathStr.rfind('\\');
pdbNameStr =
(pos != std::string::npos) ? pdbPathStr.substr(pos + 1) : pdbPathStr;
}
nsAutoString modulePathStr(modulePath);
nsAutoString moduleNameStr = modulePathStr;
int32_t pos = moduleNameStr.RFindChar('\\');
if (pos != kNotFound) {
moduleNameStr.Cut(0, pos + 1);
}
std::string modulePathStr(modulePath);
size_t pos = modulePathStr.rfind('\\');
std::string moduleNameStr = (pos != std::string::npos)
? modulePathStr.substr(pos + 1)
: modulePathStr;
SharedLibrary shlib((uintptr_t)module.lpBaseOfDll,
(uintptr_t)module.lpBaseOfDll + module.SizeOfImage,

Просмотреть файл

@ -31,10 +31,6 @@ MFBT_API size_t __dl_get_mappable_length(void* handle);
MFBT_API void* __dl_mmap(void* handle, void* addr, size_t length, off_t offset);
MFBT_API void __dl_munmap(void* handle, void* addr, size_t length);
}
// The following are for get_installation_lib_dir()
# include "nsString.h"
# include "nsDirectoryServiceUtils.h"
# include "nsDirectoryServiceDefs.h"
# endif
// A helper function for creating failure error messages in
@ -100,28 +96,6 @@ bool AutoObjectMapperPOSIX::Map(/*OUT*/ void** start, /*OUT*/ size_t* length,
}
# if defined(GP_OS_android)
// A helper function for AutoObjectMapperFaultyLib::Map. Finds out
// where the installation's lib directory is, since we'll have to look
// in there to get hold of libmozglue.so. Returned C string is heap
// allocated and the caller must deallocate it.
static char* get_installation_lib_dir() {
nsCOMPtr<nsIProperties> directoryService(
do_GetService(NS_DIRECTORY_SERVICE_CONTRACTID));
if (!directoryService) {
return nullptr;
}
nsCOMPtr<nsIFile> greDir;
nsresult rv = directoryService->Get(NS_GRE_DIR, NS_GET_IID(nsIFile),
getter_AddRefs(greDir));
if (NS_FAILED(rv)) return nullptr;
nsCString path;
rv = greDir->GetNativePath(path);
if (NS_FAILED(rv)) {
return nullptr;
}
return strdup(path.get());
}
AutoObjectMapperFaultyLib::AutoObjectMapperFaultyLib(void (*aLog)(const char*))
: AutoObjectMapperPOSIX(aLog), mHdl(nullptr) {}
@ -148,45 +122,7 @@ bool AutoObjectMapperFaultyLib::Map(/*OUT*/ void** start,
/*OUT*/ size_t* length,
std::string fileName) {
MOZ_ASSERT(!mHdl);
if (fileName == "libmozglue.so") {
// Do (2) in the comment above.
char* libdir = get_installation_lib_dir();
if (libdir) {
fileName = std::string(libdir) + "/lib/" + fileName;
free(libdir);
}
// Hand the problem off to the standard mapper.
return AutoObjectMapperPOSIX::Map(start, length, fileName);
} else {
// Do cases (1) and (3) in the comment above. We have to
// grapple with faulty.lib directly.
void* hdl = dlopen(fileName.c_str(), RTLD_GLOBAL | RTLD_LAZY);
if (!hdl) {
failedToMessage(mLog, "get handle for ELF file", fileName);
return false;
}
size_t sz = __dl_get_mappable_length(hdl);
if (sz == 0) {
dlclose(hdl);
failedToMessage(mLog, "get size for ELF file", fileName);
return false;
}
void* image = __dl_mmap(hdl, nullptr, sz, 0);
if (image == MAP_FAILED) {
dlclose(hdl);
failedToMessage(mLog, "mmap ELF file", fileName);
return false;
}
mHdl = hdl;
mImage = *start = image;
mSize = *length = sz;
return true;
}
return false;
}
# endif // defined(GP_OS_android)

Просмотреть файл

@ -51,6 +51,7 @@
# include <errno.h>
# include <fcntl.h>
# include <libgen.h>
# include <stdio.h>
# include <string.h>
# include <sys/mman.h>

Просмотреть файл

@ -34,7 +34,7 @@ void read_procmaps(lul::LUL* aLUL) {
for (size_t i = 0; i < info.GetSize(); i++) {
const SharedLibrary& lib = info.GetEntry(i);
std::string nativePath = lib.GetNativeDebugPath();
std::string nativePath = lib.GetDebugPath();
# if defined(GP_OS_android)
// We're using faulty.lib. Use a special-case object mapper.
@ -52,7 +52,7 @@ void read_procmaps(lul::LUL* aLUL) {
if (ok && image && size > 0) {
aLUL->NotifyAfterMap(lib.GetStart(), lib.GetEnd() - lib.GetStart(),
nativePath.c_str(), image);
} else if (!ok && lib.GetDebugName().IsEmpty()) {
} else if (!ok && lib.GetDebugName().empty()) {
// The object has no name and (as a consequence) the mapper failed to map
// it. This happens on Linux, where GetInfoForSelf() produces such a
// mapping for the VDSO. This is a problem on x86-{linux,android} because
@ -74,11 +74,13 @@ void read_procmaps(lul::LUL* aLUL) {
// LUL needs a callback for its logging sink.
void logging_sink_for_LUL(const char* str) {
// These are only printed when Verbose logging is enabled (e.g. with
// MOZ_LOG="prof:5"). This is because LUL's logging is much more verbose than
// the rest of the profiler's logging, which occurs at the Info (3) and Debug
// (4) levels.
MOZ_LOG(gProfilerLog, mozilla::LogLevel::Verbose,
("[%d] %s", profiler_current_process_id(), str));
// MOZ_BASE_PROFILER_VERBOSE_LOGGING=1). This is because LUL's logging is much
// more verbose than the rest of the profiler's logging, which occurs at the
// Info (3) and Debug (4) levels.
// FIXME: This causes a build failure in memory/replace/dmd/test/SmokeDMD (!)
// and other places, because it doesn't link the implementation in
// platform.cpp.
// VERBOSE_LOG("[%d] %s", profiler_current_process_id(), str);
}
#endif // MOZ_BASE_PROFILER

Просмотреть файл

@ -8,6 +8,8 @@
#include "platform.h"
#include "BaseProfiler.h"
// Find out, in a platform-dependent way, where the code modules got
// mapped in the process' virtual address space, and get |aLUL| to
// load unwind info for them.

Просмотреть файл

@ -3,8 +3,8 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef PROFILEJSONWRITER_H
#define PROFILEJSONWRITER_H
#ifndef BASEPROFILEJSONWRITER_H
#define BASEPROFILEJSONWRITER_H
#include "BaseProfiler.h"
@ -144,4 +144,4 @@ class JSONSchemaWriter {
~JSONSchemaWriter() { mWriter.EndObject(); }
};
#endif // PROFILEJSONWRITER_H
#endif // BASEPROFILEJSONWRITER_H

Просмотреть файл

@ -13,8 +13,8 @@
// (markers) used for filtering. The samples include both native stacks and
// platform-independent "label stack" frames.
#ifndef GeckoProfiler_h
#define GeckoProfiler_h
#ifndef BaseProfiler_h
#define BaseProfiler_h
// Everything in here is also safe to include unconditionally, and only defines
// empty macros if MOZ_GECKO_PROFILER or MOZ_BASE_PROFILER is unset.
@ -30,7 +30,7 @@
# else
// Other platforms are currently not supported. But you may uncomment the
// following line to enable Base Profiler in your build.
//# define MOZ_BASE_PROFILER
//# define MOZ_BASE_PROFILER
# endif
#endif // MOZ_GECKO_PROFILER
@ -54,23 +54,15 @@
# define AUTO_PROFILER_THREAD_SLEEP
# define AUTO_PROFILER_THREAD_WAKE
# define PROFILER_JS_INTERRUPT_CALLBACK()
# define PROFILER_SET_JS_CONTEXT(cx)
# define PROFILER_CLEAR_JS_CONTEXT()
# define AUTO_PROFILER_LABEL(label, categoryPair)
# define AUTO_PROFILER_LABEL_CATEGORY_PAIR(categoryPair)
# define AUTO_PROFILER_LABEL_DYNAMIC_CSTR(label, categoryPair, cStr)
# define AUTO_PROFILER_LABEL_DYNAMIC_NSCSTRING(label, categoryPair, nsCStr)
# define AUTO_PROFILER_LABEL_DYNAMIC_LOSSY_NSSTRING(label, categoryPair, nsStr)
# define AUTO_PROFILER_LABEL_DYNAMIC_STRING(label, categoryPair, str)
# define AUTO_PROFILER_LABEL_FAST(label, categoryPair, ctx)
# define AUTO_PROFILER_LABEL_DYNAMIC_FAST(label, dynamicString, categoryPair, \
ctx, flags)
# define PROFILER_ADD_MARKER(markerName, categoryPair)
# define PROFILER_ADD_NETWORK_MARKER(uri, pri, channel, type, start, end, \
count, cache, timings, redirect)
# define DECLARE_DOCSHELL_AND_HISTORY_ID(docShell)
# define PROFILER_TRACING(categoryString, markerName, categoryPair, kind)
@ -87,9 +79,8 @@
#else // !MOZ_BASE_PROFILER
# include "js/ProfilingStack.h"
# include "js/RootingAPI.h"
# include "js/TypeDecls.h"
# include "BaseProfilingStack.h"
# include "mozilla/Assertions.h"
# include "mozilla/Atomics.h"
# include "mozilla/Attributes.h"
@ -99,22 +90,13 @@
# include "mozilla/ThreadLocal.h"
# include "mozilla/TimeStamp.h"
# include "mozilla/UniquePtr.h"
# include "nscore.h"
# include "nsID.h"
# include "nsString.h"
# include <stdint.h>
# include <string>
class ProfilerBacktrace;
class ProfilerMarkerPayload;
class SpliceableJSONWriter;
namespace mozilla {
namespace net {
struct TimingStruct;
enum CacheDisposition : uint8_t;
} // namespace net
} // namespace mozilla
class nsIURI;
namespace mozilla {
class MallocAllocPolicy;
@ -240,12 +222,12 @@ class RacyFeatures {
// We combine the active bit with the feature bits so they can be read or
// written in a single atomic operation. Accesses to this atomic are not
// recorded by web replay as they may occur at non-deterministic points.
static mozilla::Atomic<uint32_t, mozilla::MemoryOrdering::Relaxed,
recordreplay::Behavior::DontPreserve>
MFBT_DATA static mozilla::Atomic<uint32_t, mozilla::MemoryOrdering::Relaxed,
recordreplay::Behavior::DontPreserve>
sActiveAndFeatures;
};
bool IsThreadBeingProfiled();
MFBT_API bool IsThreadBeingProfiled();
} // namespace detail
} // namespace profiler
@ -278,14 +260,14 @@ static constexpr uint32_t PROFILER_DEFAULT_STARTUP_ENTRIES =
// also be started. This call must happen before any other profiler calls
// (except profiler_start(), which will call profiler_init() if it hasn't
// already run).
void profiler_init(void* stackTop);
MFBT_API void profiler_init(void* stackTop);
# define AUTO_PROFILER_INIT mozilla::AutoProfilerInit PROFILER_RAII
// Clean up the profiler module, stopping it if required. This function may
// also save a shutdown profile if requested. No profiler calls should happen
// after this point and all profiling stack labels should have been popped.
void profiler_shutdown();
MFBT_API void profiler_shutdown();
// Start the profiler -- initializing it first if necessary -- with the
// selected options. Stops and restarts the profiler if it is already active.
@ -303,21 +285,21 @@ void profiler_shutdown();
// (b) the filter is of the form "pid:<n>" where n is the process
// id of the process that the thread is running in.
// "aDuration" is the duration of entries in the profiler's circular buffer.
void profiler_start(
MFBT_API void profiler_start(
uint32_t aCapacity, double aInterval, uint32_t aFeatures,
const char** aFilters, uint32_t aFilterCount,
const mozilla::Maybe<double>& aDuration = mozilla::Nothing());
// Stop the profiler and discard the profile without saving it. A no-op if the
// profiler is inactive. After stopping the profiler is "inactive".
void profiler_stop();
MFBT_API void profiler_stop();
// If the profiler is inactive, start it. If it's already active, restart it if
// the requested settings differ from the current settings. Both the check and
// the state change are performed while the profiler state is locked.
// The only difference to profiler_start is that the current buffer contents are
// not discarded if the profiler is already running with the requested settings.
void profiler_ensure_started(
MFBT_API void profiler_ensure_started(
uint32_t aCapacity, double aInterval, uint32_t aFeatures,
const char** aFilters, uint32_t aFilterCount,
const mozilla::Maybe<double>& aDuration = mozilla::Nothing());
@ -334,8 +316,9 @@ void profiler_ensure_started(
profiler_register_thread(name, &stackTop); \
} while (0)
# define PROFILER_UNREGISTER_THREAD() profiler_unregister_thread()
ProfilingStack* profiler_register_thread(const char* name, void* guessStackTop);
void profiler_unregister_thread();
MFBT_API ProfilingStack* profiler_register_thread(const char* name,
void* guessStackTop);
MFBT_API void profiler_unregister_thread();
// Register pages with the profiler.
//
@ -355,19 +338,21 @@ void profiler_unregister_thread();
// "aHistoryId" is the ID of the history entry on the given docShell.
// "aUrl" is the URL of the page.
// "aIsSubFrame" is true if the page is a sub frame.
void profiler_register_page(const nsID& aDocShellId, uint32_t aHistoryId,
const nsCString& aUrl, bool aIsSubFrame);
MFBT_API void profiler_register_page(const std::string& aDocShellId,
uint32_t aHistoryId,
const std::string& aUrl, bool aIsSubFrame);
// Unregister pages with the profiler.
//
// Take a docShellId and unregister all the page entries that have the given ID.
void profiler_unregister_pages(const nsID& aRegisteredDocShellId);
MFBT_API void profiler_unregister_pages(
const std::string& aRegisteredDocShellId);
// Remove all registered and unregistered pages in the profiler.
void profiler_clear_all_pages();
class BaseProfilerCount;
void profiler_add_sampled_counter(BaseProfilerCount* aCounter);
void profiler_remove_sampled_counter(BaseProfilerCount* aCounter);
MFBT_API void profiler_add_sampled_counter(BaseProfilerCount* aCounter);
MFBT_API void profiler_remove_sampled_counter(BaseProfilerCount* aCounter);
// Register and unregister a thread within a scope.
# define AUTO_PROFILER_REGISTER_THREAD(name) \
@ -379,15 +364,15 @@ void profiler_remove_sampled_counter(BaseProfilerCount* aCounter);
// Timeline markers will still be stored. This feature will keep JavaScript
// profiling enabled, thus allowing toggling the profiler without invalidating
// the JIT.
void profiler_pause();
void profiler_resume();
MFBT_API void profiler_pause();
MFBT_API void profiler_resume();
// These functions tell the profiler that a thread went to sleep so that we can
// avoid sampling it while it's sleeping. Calling profiler_thread_sleep()
// twice without an intervening profiler_thread_wake() is an error. All three
// functions operate the same whether the profiler is active or inactive.
void profiler_thread_sleep();
void profiler_thread_wake();
MFBT_API void profiler_thread_sleep();
MFBT_API void profiler_thread_wake();
// Mark a thread as asleep/awake within a scope.
# define AUTO_PROFILER_THREAD_SLEEP \
@ -395,18 +380,6 @@ void profiler_thread_wake();
# define AUTO_PROFILER_THREAD_WAKE \
mozilla::AutoProfilerThreadWake PROFILER_RAII
// Called by the JSRuntime's operation callback. This is used to start profiling
// on auxiliary threads. Operates the same whether the profiler is active or
// not.
# define PROFILER_JS_INTERRUPT_CALLBACK() profiler_js_interrupt_callback()
void profiler_js_interrupt_callback();
// Set and clear the current thread's JSContext.
# define PROFILER_SET_JS_CONTEXT(cx) profiler_set_js_context(cx)
# define PROFILER_CLEAR_JS_CONTEXT() profiler_clear_js_context()
void profiler_set_js_context(JSContext* aCx);
void profiler_clear_js_context();
//---------------------------------------------------------------------------
// Get information from the profiler
//---------------------------------------------------------------------------
@ -439,40 +412,40 @@ inline bool profiler_thread_is_being_profiled() {
}
// Is the profiler active and paused? Returns false if the profiler is inactive.
bool profiler_is_paused();
MFBT_API bool profiler_is_paused();
// Is the current thread sleeping?
bool profiler_thread_is_sleeping();
MFBT_API bool profiler_thread_is_sleeping();
// Get all the features supported by the profiler that are accepted by
// profiler_start(). The result is the same whether the profiler is active or
// not.
uint32_t profiler_get_available_features();
MFBT_API uint32_t profiler_get_available_features();
// Check if a profiler feature (specified via the ProfilerFeature type) is
// active. Returns false if the profiler is inactive. Note: the return value
// can become immediately out-of-date, much like the return value of
// profiler_is_active().
bool profiler_feature_active(uint32_t aFeature);
MFBT_API bool profiler_feature_active(uint32_t aFeature);
// Get the params used to start the profiler. Returns 0 and an empty vector
// (via outparams) if the profile is inactive. It's possible that the features
// returned may be slightly different to those requested due to required
// adjustments.
void profiler_get_start_params(
MFBT_API void profiler_get_start_params(
int* aEntrySize, mozilla::Maybe<double>* aDuration, double* aInterval,
uint32_t* aFeatures,
mozilla::Vector<const char*, 0, mozilla::MallocAllocPolicy>* aFilters);
// The number of milliseconds since the process started. Operates the same
// whether the profiler is active or inactive.
double profiler_time();
MFBT_API double profiler_time();
// Get the current process's ID.
int profiler_current_process_id();
MFBT_API int profiler_current_process_id();
// Get the current thread's ID.
int profiler_current_thread_id();
MFBT_API int profiler_current_thread_id();
// An object of this class is passed to profiler_suspend_and_sample_thread().
// For each stack frame, one of the Collect methods will be called.
@ -499,10 +472,6 @@ class ProfilerStackCollector {
virtual void CollectNativeLeafAddr(void* aAddr) = 0;
virtual void CollectJitReturnAddr(void* aAddr) = 0;
virtual void CollectWasmFrame(const char* aLabel) = 0;
virtual void CollectProfilingStackFrame(
const js::ProfilingStackFrame& aFrame) = 0;
};
@ -511,12 +480,12 @@ class ProfilerStackCollector {
// profiling stack, JS stack, and (optionally) native stack, passing the
// collected frames into aCollector. aFeatures dictates which compiler features
// are used. |Privacy| and |Leaf| are the only relevant ones.
void profiler_suspend_and_sample_thread(int aThreadId, uint32_t aFeatures,
ProfilerStackCollector& aCollector,
bool aSampleNative = true);
MFBT_API void profiler_suspend_and_sample_thread(
int aThreadId, uint32_t aFeatures, ProfilerStackCollector& aCollector,
bool aSampleNative = true);
struct ProfilerBacktraceDestructor {
void operator()(ProfilerBacktrace*);
MFBT_API void operator()(ProfilerBacktrace*);
};
using UniqueProfilerBacktrace =
@ -524,7 +493,7 @@ using UniqueProfilerBacktrace =
// Immediately capture the current thread's call stack and return it. A no-op
// if the profiler is inactive or in privacy mode.
UniqueProfilerBacktrace profiler_get_backtrace();
MFBT_API UniqueProfilerBacktrace profiler_get_backtrace();
struct ProfilerBufferInfo {
uint64_t mRangeStart;
@ -538,7 +507,7 @@ struct ProfilerBufferInfo {
// This information may be useful to a user-interface displaying the current
// status of the profiler, allowing the user to get a sense for how fast the
// buffer is being written to, and how much data is visible.
mozilla::Maybe<ProfilerBufferInfo> profiler_get_buffer_info();
MFBT_API mozilla::Maybe<ProfilerBufferInfo> profiler_get_buffer_info();
//---------------------------------------------------------------------------
// Put profiling data into the profiler (labels and markers)
@ -591,38 +560,20 @@ mozilla::Maybe<ProfilerBufferInfo> profiler_get_buffer_info();
mozilla::AutoProfilerLabel PROFILER_RAII( \
label, cStr, JS::ProfilingCategoryPair::categoryPair)
// Similar to AUTO_PROFILER_LABEL_DYNAMIC_CSTR, but takes an nsACString.
// Similar to AUTO_PROFILER_LABEL_DYNAMIC_CSTR, but takes an std::string.
//
// Note: The use of the Maybe<>s ensures the scopes for the dynamic string and
// the AutoProfilerLabel are appropriate, while also not incurring the runtime
// cost of the string assignment unless the profiler is active. Therefore,
// unlike AUTO_PROFILER_LABEL and AUTO_PROFILER_LABEL_DYNAMIC_CSTR, this macro
// doesn't push/pop a label when the profiler is inactive.
# define AUTO_PROFILER_LABEL_DYNAMIC_NSCSTRING(label, categoryPair, nsCStr) \
mozilla::Maybe<nsAutoCString> autoCStr; \
mozilla::Maybe<mozilla::AutoProfilerLabel> raiiObjectNsCString; \
if (profiler_is_active()) { \
autoCStr.emplace(nsCStr); \
raiiObjectNsCString.emplace(label, autoCStr->get(), \
JS::ProfilingCategoryPair::categoryPair); \
}
// Similar to AUTO_PROFILER_LABEL_DYNAMIC_CSTR, but takes an nsString that is
// is lossily converted to an ASCII string.
//
// Note: The use of the Maybe<>s ensures the scopes for the converted dynamic
// string and the AutoProfilerLabel are appropriate, while also not incurring
// the runtime cost of the string conversion unless the profiler is active.
// Therefore, unlike AUTO_PROFILER_LABEL and AUTO_PROFILER_LABEL_DYNAMIC_CSTR,
// this macro doesn't push/pop a label when the profiler is inactive.
# define AUTO_PROFILER_LABEL_DYNAMIC_LOSSY_NSSTRING(label, categoryPair, \
nsStr) \
mozilla::Maybe<NS_LossyConvertUTF16toASCII> asciiStr; \
mozilla::Maybe<mozilla::AutoProfilerLabel> raiiObjectLossyNsString; \
if (profiler_is_active()) { \
asciiStr.emplace(nsStr); \
raiiObjectLossyNsString.emplace( \
label, asciiStr->get(), JS::ProfilingCategoryPair::categoryPair); \
# define AUTO_PROFILER_LABEL_DYNAMIC_STRING(label, categoryPair, str) \
mozilla::Maybe<std::string> autoStr; \
mozilla::Maybe<mozilla::AutoProfilerLabel> raiiObjectString; \
if (profiler_is_active()) { \
autoStr.emplace(str); \
raiiObjectString.emplace(label, autoStr->c_str(), \
JS::ProfilingCategoryPair::categoryPair); \
}
// Similar to AUTO_PROFILER_LABEL, but accepting a JSContext* parameter, and a
@ -655,33 +606,19 @@ mozilla::Maybe<ProfilerBufferInfo> profiler_get_buffer_info();
# define PROFILER_ADD_MARKER(markerName, categoryPair) \
profiler_add_marker(markerName, JS::ProfilingCategoryPair::categoryPair)
void profiler_add_marker(const char* aMarkerName,
JS::ProfilingCategoryPair aCategoryPair);
void profiler_add_marker(const char* aMarkerName,
JS::ProfilingCategoryPair aCategoryPair,
mozilla::UniquePtr<ProfilerMarkerPayload> aPayload);
void profiler_add_js_marker(const char* aMarkerName);
MFBT_API void profiler_add_marker(const char* aMarkerName,
JS::ProfilingCategoryPair aCategoryPair);
MFBT_API void profiler_add_marker(
const char* aMarkerName, JS::ProfilingCategoryPair aCategoryPair,
mozilla::UniquePtr<ProfilerMarkerPayload> aPayload);
MFBT_API void profiler_add_js_marker(const char* aMarkerName);
// Insert a marker in the profile timeline for a specified thread.
void profiler_add_marker_for_thread(
MFBT_API void profiler_add_marker_for_thread(
int aThreadId, JS::ProfilingCategoryPair aCategoryPair,
const char* aMarkerName,
mozilla::UniquePtr<ProfilerMarkerPayload> aPayload);
enum class NetworkLoadType { LOAD_START, LOAD_STOP, LOAD_REDIRECT };
# define PROFILER_ADD_NETWORK_MARKER(uri, pri, channel, type, start, end, \
count, cache, timings, redirect) \
profiler_add_network_marker(uri, pri, channel, type, start, end, count, \
cache, timings, redirect)
void profiler_add_network_marker(
nsIURI* aURI, int32_t aPriority, uint64_t aChannelId, NetworkLoadType aType,
mozilla::TimeStamp aStart, mozilla::TimeStamp aEnd, int64_t aCount,
mozilla::net::CacheDisposition aCacheDisposition,
const mozilla::net::TimingStruct* aTimings = nullptr,
nsIURI* aRedirectURI = nullptr);
enum TracingKind {
TRACING_EVENT,
TRACING_INTERVAL_START,
@ -690,7 +627,7 @@ enum TracingKind {
// Helper macro to retrieve DocShellId and DocShellHistoryId from docShell
# define DECLARE_DOCSHELL_AND_HISTORY_ID(docShell) \
mozilla::Maybe<nsID> docShellId; \
mozilla::Maybe<std::string> docShellId; \
mozilla::Maybe<uint32_t> docShellHistoryId; \
if (docShell) { \
docShellId = mozilla::Some(docShell->HistoryID()); \
@ -719,16 +656,16 @@ enum TracingKind {
JS::ProfilingCategoryPair::categoryPair, kind, \
docShellId, docShellHistoryId)
void profiler_tracing(
MFBT_API void profiler_tracing(
const char* aCategoryString, const char* aMarkerName,
JS::ProfilingCategoryPair aCategoryPair, TracingKind aKind,
const mozilla::Maybe<nsID>& aDocShellId = mozilla::Nothing(),
const mozilla::Maybe<std::string>& aDocShellId = mozilla::Nothing(),
const mozilla::Maybe<uint32_t>& aDocShellHistoryId = mozilla::Nothing());
void profiler_tracing(
MFBT_API void profiler_tracing(
const char* aCategoryString, const char* aMarkerName,
JS::ProfilingCategoryPair aCategoryPair, TracingKind aKind,
UniqueProfilerBacktrace aCause,
const mozilla::Maybe<nsID>& aDocShellId = mozilla::Nothing(),
const mozilla::Maybe<std::string>& aDocShellId = mozilla::Nothing(),
const mozilla::Maybe<uint32_t>& aDocShellHistoryId = mozilla::Nothing());
// Adds a START/END pair of tracing markers.
@ -749,19 +686,19 @@ void profiler_tracing(
// markers will still be displayed in the same "row" in the UI.
// Another difference is that text markers combine the start and end markers
// into one marker.
void profiler_add_text_marker(
const char* aMarkerName, const nsACString& aText,
MFBT_API void profiler_add_text_marker(
const char* aMarkerName, const std::string& aText,
JS::ProfilingCategoryPair aCategoryPair,
const mozilla::TimeStamp& aStartTime, const mozilla::TimeStamp& aEndTime,
const mozilla::Maybe<nsID>& aDocShellId = mozilla::Nothing(),
const mozilla::Maybe<std::string>& aDocShellId = mozilla::Nothing(),
const mozilla::Maybe<uint32_t>& aDocShellHistoryId = mozilla::Nothing(),
UniqueProfilerBacktrace aCause = nullptr);
class MOZ_RAII AutoProfilerTextMarker {
public:
AutoProfilerTextMarker(const char* aMarkerName, const nsACString& aText,
AutoProfilerTextMarker(const char* aMarkerName, const std::string& aText,
JS::ProfilingCategoryPair aCategoryPair,
const mozilla::Maybe<nsID>& aDocShellId,
const mozilla::Maybe<std::string>& aDocShellId,
const mozilla::Maybe<uint32_t>& aDocShellHistoryId,
UniqueProfilerBacktrace&& aCause =
nullptr MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
@ -784,11 +721,11 @@ class MOZ_RAII AutoProfilerTextMarker {
protected:
MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
const char* mMarkerName;
nsCString mText;
std::string mText;
const JS::ProfilingCategoryPair mCategoryPair;
mozilla::TimeStamp mStartTime;
UniqueProfilerBacktrace mCause;
const mozilla::Maybe<nsID> mDocShellId;
const mozilla::Maybe<std::string> mDocShellId;
const mozilla::Maybe<uint32_t> mDocShellHistoryId;
};
@ -817,30 +754,24 @@ class MOZ_RAII AutoProfilerTextMarker {
//---------------------------------------------------------------------------
// Set a user-friendly process name, used in JSON stream.
void profiler_set_process_name(const nsACString& aProcessName);
MFBT_API void profiler_set_process_name(const std::string& aProcessName);
// Get the profile encoded as a JSON string. A no-op (returning nullptr) if the
// profiler is inactive.
// If aIsShuttingDown is true, the current time is included as the process
// shutdown time in the JSON's "meta" object.
mozilla::UniquePtr<char[]> profiler_get_profile(double aSinceTime = 0,
bool aIsShuttingDown = false);
MFBT_API mozilla::UniquePtr<char[]> profiler_get_profile(
double aSinceTime = 0, bool aIsShuttingDown = false);
// Write the profile for this process (excluding subprocesses) into aWriter.
// Returns false if the profiler is inactive.
bool profiler_stream_json_for_this_process(SpliceableJSONWriter& aWriter,
double aSinceTime = 0,
bool aIsShuttingDown = false);
MFBT_API bool profiler_stream_json_for_this_process(
SpliceableJSONWriter& aWriter, double aSinceTime = 0,
bool aIsShuttingDown = false);
// Get the profile and write it into a file. A no-op if the profile is
// inactive.
//
// This function is 'extern "C"' so that it is easily callable from a debugger
// in a build without debugging information (a workaround for
// http://llvm.org/bugs/show_bug.cgi?id=22211).
extern "C" {
void profiler_save_profile_to_file(const char* aFilename);
}
MFBT_API void profiler_save_profile_to_file(const char* aFilename);
//---------------------------------------------------------------------------
// RAII classes
@ -930,19 +861,7 @@ class MOZ_RAII AutoProfilerLabel {
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
// Get the ProfilingStack from TLS.
Push(sProfilingStack.get(), aLabel, aDynamicString, aCategoryPair, aFlags);
}
// This is the AUTO_PROFILER_LABEL_FAST variant. It retrieves the
// ProfilingStack from the JSContext and does nothing if the profiler is
// inactive.
AutoProfilerLabel(JSContext* aJSContext, const char* aLabel,
const char* aDynamicString,
JS::ProfilingCategoryPair aCategoryPair,
uint32_t aFlags MOZ_GUARD_OBJECT_NOTIFIER_PARAM) {
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
Push(js::GetContextProfilingStackIfEnabled(aJSContext), aLabel,
aDynamicString, aCategoryPair, aFlags);
Push(GetProfilingStack(), aLabel, aDynamicString, aCategoryPair, aFlags);
}
void Push(ProfilingStack* aProfilingStack, const char* aLabel,
@ -965,6 +884,8 @@ class MOZ_RAII AutoProfilerLabel {
}
}
MFBT_API static ProfilingStack* GetProfilingStack();
private:
MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
@ -981,7 +902,7 @@ class MOZ_RAII AutoProfilerTracing {
public:
AutoProfilerTracing(const char* aCategoryString, const char* aMarkerName,
JS::ProfilingCategoryPair aCategoryPair,
const mozilla::Maybe<nsID>& aDocShellId,
const mozilla::Maybe<std::string>& aDocShellId,
const mozilla::Maybe<uint32_t>& aDocShellHistoryId
MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
: mCategoryString(aCategoryString),
@ -997,7 +918,7 @@ class MOZ_RAII AutoProfilerTracing {
AutoProfilerTracing(const char* aCategoryString, const char* aMarkerName,
JS::ProfilingCategoryPair aCategoryPair,
UniqueProfilerBacktrace aBacktrace,
const mozilla::Maybe<nsID>& aDocShellId,
const mozilla::Maybe<std::string>& aDocShellId,
const mozilla::Maybe<uint32_t>& aDocShellHistoryId
MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
: mCategoryString(aCategoryString),
@ -1021,7 +942,7 @@ class MOZ_RAII AutoProfilerTracing {
const char* mCategoryString;
const char* mMarkerName;
const JS::ProfilingCategoryPair mCategoryPair;
const mozilla::Maybe<nsID> mDocShellId;
const mozilla::Maybe<std::string> mDocShellId;
const mozilla::Maybe<uint32_t> mDocShellHistoryId;
};
@ -1030,11 +951,11 @@ class MOZ_RAII AutoProfilerTracing {
// to make that child process start with the same profiler settings as
// in the current process. The given function is invoked once for
// each variable to be set.
void GetProfilerEnvVarsForChildProcess(
MFBT_API void GetProfilerEnvVarsForChildProcess(
std::function<void(const char* key, const char* value)>&& aSetEnv);
} // namespace mozilla
#endif // !MOZ_BASE_PROFILER
#endif // GeckoProfiler_h
#endif // BaseProfiler_h

Просмотреть файл

@ -4,8 +4,8 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef ProfilerCounts_h
#define ProfilerCounts_h
#ifndef BaseProfilerCounts_h
#define BaseProfilerCounts_h
#ifndef MOZ_BASE_PROFILER
@ -21,8 +21,8 @@
# include "mozilla/Atomics.h"
class BaseProfilerCount;
void profiler_add_sampled_counter(BaseProfilerCount* aCounter);
void profiler_remove_sampled_counter(BaseProfilerCount* aCounter);
MFBT_API void profiler_add_sampled_counter(BaseProfilerCount* aCounter);
MFBT_API void profiler_remove_sampled_counter(BaseProfilerCount* aCounter);
typedef mozilla::Atomic<int64_t, mozilla::MemoryOrdering::Relaxed>
ProfilerAtomicSigned;
@ -268,4 +268,4 @@ class ProfilerCounterTotal final : public BaseProfilerCount {
#endif // !MOZ_BASE_PROFILER
#endif // ProfilerCounts_h
#endif // BaseProfilerCounts_h

Просмотреть файл

@ -4,8 +4,8 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef ProfilerMarkerPayload_h
#define ProfilerMarkerPayload_h
#ifndef BaseProfilerMarkerPayload_h
#define BaseProfilerMarkerPayload_h
#include "BaseProfiler.h"
@ -19,19 +19,6 @@
#include "mozilla/TimeStamp.h"
#include "mozilla/UniquePtr.h"
#include "mozilla/UniquePtrExtensions.h"
#include "mozilla/net/TimingStruct.h"
#include "nsString.h"
#include "js/Utility.h"
#include "gfxASurface.h"
#include "mozilla/ServoTraversalStatistics.h"
namespace mozilla {
namespace layers {
class Layer;
} // namespace layers
} // namespace mozilla
class SpliceableJSONWriter;
class UniqueStacks;
@ -44,7 +31,7 @@ class UniqueStacks;
class ProfilerMarkerPayload {
public:
explicit ProfilerMarkerPayload(
const mozilla::Maybe<nsID>& aDocShellId = mozilla::Nothing(),
const mozilla::Maybe<std::string>& aDocShellId = mozilla::Nothing(),
const mozilla::Maybe<uint32_t>& aDocShellHistoryId = mozilla::Nothing(),
UniqueProfilerBacktrace aStack = nullptr)
: mStack(std::move(aStack)),
@ -53,7 +40,7 @@ class ProfilerMarkerPayload {
ProfilerMarkerPayload(
const mozilla::TimeStamp& aStartTime, const mozilla::TimeStamp& aEndTime,
const mozilla::Maybe<nsID>& aDocShellId = mozilla::Nothing(),
const mozilla::Maybe<std::string>& aDocShellId = mozilla::Nothing(),
const mozilla::Maybe<uint32_t>& aDocShellHistoryId = mozilla::Nothing(),
UniqueProfilerBacktrace aStack = nullptr)
: mStartTime(aStartTime),
@ -71,10 +58,12 @@ class ProfilerMarkerPayload {
mozilla::TimeStamp GetStartTime() const { return mStartTime; }
protected:
void StreamType(const char* aMarkerType, SpliceableJSONWriter& aWriter);
void StreamCommonProps(const char* aMarkerType, SpliceableJSONWriter& aWriter,
const mozilla::TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks);
MFBT_API void StreamType(const char* aMarkerType,
SpliceableJSONWriter& aWriter);
MFBT_API void StreamCommonProps(const char* aMarkerType,
SpliceableJSONWriter& aWriter,
const mozilla::TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks);
void SetStack(UniqueProfilerBacktrace aStack) { mStack = std::move(aStack); }
@ -83,7 +72,7 @@ class ProfilerMarkerPayload {
mDocShellHistoryId = aDocShellHistoryId;
}
void SetDocShellId(const mozilla::Maybe<nsID>& aDocShellId) {
void SetDocShellId(const mozilla::Maybe<std::string>& aDocShellId) {
mDocShellId = aDocShellId;
}
@ -91,7 +80,7 @@ class ProfilerMarkerPayload {
mozilla::TimeStamp mStartTime;
mozilla::TimeStamp mEndTime;
UniqueProfilerBacktrace mStack;
mozilla::Maybe<nsID> mDocShellId;
mozilla::Maybe<std::string> mDocShellId;
mozilla::Maybe<uint32_t> mDocShellHistoryId;
};
@ -106,7 +95,7 @@ class TracingMarkerPayload : public ProfilerMarkerPayload {
public:
TracingMarkerPayload(
const char* aCategory, TracingKind aKind,
const mozilla::Maybe<nsID>& aDocShellId = mozilla::Nothing(),
const mozilla::Maybe<std::string>& aDocShellId = mozilla::Nothing(),
const mozilla::Maybe<uint32_t>& aDocShellHistoryId = mozilla::Nothing(),
UniqueProfilerBacktrace aCause = nullptr)
: mCategory(aCategory), mKind(aKind) {
@ -147,41 +136,23 @@ class FileIOMarkerPayload : public ProfilerMarkerPayload {
mozilla::UniqueFreePtr<char> mFilename;
};
class DOMEventMarkerPayload : public TracingMarkerPayload {
public:
DOMEventMarkerPayload(const nsAString& aEventType,
const mozilla::TimeStamp& aTimeStamp,
const char* aCategory, TracingKind aKind,
const mozilla::Maybe<nsID>& aDocShellId,
const mozilla::Maybe<uint32_t>& aDocShellHistoryId)
: TracingMarkerPayload(aCategory, aKind, aDocShellId, aDocShellHistoryId),
mTimeStamp(aTimeStamp),
mEventType(aEventType) {}
DECL_STREAM_PAYLOAD
private:
mozilla::TimeStamp mTimeStamp;
nsString mEventType;
};
class UserTimingMarkerPayload : public ProfilerMarkerPayload {
public:
UserTimingMarkerPayload(const nsAString& aName,
UserTimingMarkerPayload(const std::string& aName,
const mozilla::TimeStamp& aStartTime,
const mozilla::Maybe<nsID>& aDocShellId,
const mozilla::Maybe<std::string>& aDocShellId,
const mozilla::Maybe<uint32_t>& aDocShellHistoryId)
: ProfilerMarkerPayload(aStartTime, aStartTime, aDocShellId,
aDocShellHistoryId),
mEntryType("mark"),
mName(aName) {}
UserTimingMarkerPayload(const nsAString& aName,
const mozilla::Maybe<nsString>& aStartMark,
const mozilla::Maybe<nsString>& aEndMark,
UserTimingMarkerPayload(const std::string& aName,
const mozilla::Maybe<std::string>& aStartMark,
const mozilla::Maybe<std::string>& aEndMark,
const mozilla::TimeStamp& aStartTime,
const mozilla::TimeStamp& aEndTime,
const mozilla::Maybe<nsID>& aDocShellId,
const mozilla::Maybe<std::string>& aDocShellId,
const mozilla::Maybe<uint32_t>& aDocShellHistoryId)
: ProfilerMarkerPayload(aStartTime, aEndTime, aDocShellId,
aDocShellHistoryId),
@ -195,136 +166,9 @@ class UserTimingMarkerPayload : public ProfilerMarkerPayload {
private:
// Either "mark" or "measure".
const char* mEntryType;
nsString mName;
mozilla::Maybe<nsString> mStartMark;
mozilla::Maybe<nsString> mEndMark;
};
// Contains the translation applied to a 2d layer so we can track the layer
// position at each frame.
class LayerTranslationMarkerPayload : public ProfilerMarkerPayload {
public:
LayerTranslationMarkerPayload(mozilla::layers::Layer* aLayer,
mozilla::gfx::Point aPoint,
mozilla::TimeStamp aStartTime)
: ProfilerMarkerPayload(aStartTime, aStartTime),
mLayer(aLayer),
mPoint(aPoint) {}
DECL_STREAM_PAYLOAD
private:
mozilla::layers::Layer* mLayer;
mozilla::gfx::Point mPoint;
};
#include "Units.h" // For ScreenIntPoint
// Tracks when a vsync occurs according to the HardwareComposer.
class VsyncMarkerPayload : public ProfilerMarkerPayload {
public:
explicit VsyncMarkerPayload(mozilla::TimeStamp aVsyncTimestamp)
: ProfilerMarkerPayload(aVsyncTimestamp, aVsyncTimestamp) {}
DECL_STREAM_PAYLOAD
};
class NetworkMarkerPayload : public ProfilerMarkerPayload {
public:
NetworkMarkerPayload(int64_t aID, const char* aURI, NetworkLoadType aType,
const mozilla::TimeStamp& aStartTime,
const mozilla::TimeStamp& aEndTime, int32_t aPri,
int64_t aCount,
mozilla::net::CacheDisposition aCacheDisposition,
const mozilla::net::TimingStruct* aTimings = nullptr,
const char* aRedirectURI = nullptr)
: ProfilerMarkerPayload(aStartTime, aEndTime, mozilla::Nothing()),
mID(aID),
mURI(aURI ? strdup(aURI) : nullptr),
mRedirectURI(aRedirectURI && (strlen(aRedirectURI) > 0)
? strdup(aRedirectURI)
: nullptr),
mType(aType),
mPri(aPri),
mCount(aCount),
mCacheDisposition(aCacheDisposition) {
if (aTimings) {
mTimings = *aTimings;
}
}
DECL_STREAM_PAYLOAD
private:
int64_t mID;
mozilla::UniqueFreePtr<char> mURI;
mozilla::UniqueFreePtr<char> mRedirectURI;
NetworkLoadType mType;
int32_t mPri;
int64_t mCount;
mozilla::net::TimingStruct mTimings;
mozilla::net::CacheDisposition mCacheDisposition;
};
class ScreenshotPayload : public ProfilerMarkerPayload {
public:
explicit ScreenshotPayload(mozilla::TimeStamp aTimeStamp,
nsCString&& aScreenshotDataURL,
const mozilla::gfx::IntSize& aWindowSize,
uintptr_t aWindowIdentifier)
: ProfilerMarkerPayload(aTimeStamp, mozilla::TimeStamp()),
mScreenshotDataURL(std::move(aScreenshotDataURL)),
mWindowSize(aWindowSize),
mWindowIdentifier(aWindowIdentifier) {}
DECL_STREAM_PAYLOAD
private:
nsCString mScreenshotDataURL;
mozilla::gfx::IntSize mWindowSize;
uintptr_t mWindowIdentifier;
};
class GCSliceMarkerPayload : public ProfilerMarkerPayload {
public:
GCSliceMarkerPayload(const mozilla::TimeStamp& aStartTime,
const mozilla::TimeStamp& aEndTime,
JS::UniqueChars&& aTimingJSON)
: ProfilerMarkerPayload(aStartTime, aEndTime),
mTimingJSON(std::move(aTimingJSON)) {}
DECL_STREAM_PAYLOAD
private:
JS::UniqueChars mTimingJSON;
};
class GCMajorMarkerPayload : public ProfilerMarkerPayload {
public:
GCMajorMarkerPayload(const mozilla::TimeStamp& aStartTime,
const mozilla::TimeStamp& aEndTime,
JS::UniqueChars&& aTimingJSON)
: ProfilerMarkerPayload(aStartTime, aEndTime),
mTimingJSON(std::move(aTimingJSON)) {}
DECL_STREAM_PAYLOAD
private:
JS::UniqueChars mTimingJSON;
};
class GCMinorMarkerPayload : public ProfilerMarkerPayload {
public:
GCMinorMarkerPayload(const mozilla::TimeStamp& aStartTime,
const mozilla::TimeStamp& aEndTime,
JS::UniqueChars&& aTimingData)
: ProfilerMarkerPayload(aStartTime, aEndTime),
mTimingData(std::move(aTimingData)) {}
DECL_STREAM_PAYLOAD
private:
JS::UniqueChars mTimingData;
std::string mName;
mozilla::Maybe<std::string> mStartMark;
mozilla::Maybe<std::string> mEndMark;
};
class HangMarkerPayload : public ProfilerMarkerPayload {
@ -337,28 +181,6 @@ class HangMarkerPayload : public ProfilerMarkerPayload {
private:
};
class StyleMarkerPayload : public ProfilerMarkerPayload {
public:
StyleMarkerPayload(const mozilla::TimeStamp& aStartTime,
const mozilla::TimeStamp& aEndTime,
UniqueProfilerBacktrace aCause,
const mozilla::ServoTraversalStatistics& aStats,
const mozilla::Maybe<nsID>& aDocShellId,
const mozilla::Maybe<uint32_t>& aDocShellHistoryId)
: ProfilerMarkerPayload(aStartTime, aEndTime, aDocShellId,
aDocShellHistoryId),
mStats(aStats) {
if (aCause) {
SetStack(std::move(aCause));
}
}
DECL_STREAM_PAYLOAD
private:
mozilla::ServoTraversalStatistics mStats;
};
class LongTaskMarkerPayload : public ProfilerMarkerPayload {
public:
LongTaskMarkerPayload(const mozilla::TimeStamp& aStartTime,
@ -370,27 +192,27 @@ class LongTaskMarkerPayload : public ProfilerMarkerPayload {
class TextMarkerPayload : public ProfilerMarkerPayload {
public:
TextMarkerPayload(const nsACString& aText,
TextMarkerPayload(const std::string& aText,
const mozilla::TimeStamp& aStartTime)
: ProfilerMarkerPayload(aStartTime, aStartTime), mText(aText) {}
TextMarkerPayload(const nsACString& aText,
TextMarkerPayload(const std::string& aText,
const mozilla::TimeStamp& aStartTime,
const mozilla::TimeStamp& aEndTime)
: ProfilerMarkerPayload(aStartTime, aEndTime), mText(aText) {}
TextMarkerPayload(const nsACString& aText,
TextMarkerPayload(const std::string& aText,
const mozilla::TimeStamp& aStartTime,
const mozilla::Maybe<nsID>& aDocShellId,
const mozilla::Maybe<std::string>& aDocShellId,
const mozilla::Maybe<uint32_t>& aDocShellHistoryId)
: ProfilerMarkerPayload(aStartTime, aStartTime, aDocShellId,
aDocShellHistoryId),
mText(aText) {}
TextMarkerPayload(const nsACString& aText,
TextMarkerPayload(const std::string& aText,
const mozilla::TimeStamp& aStartTime,
const mozilla::TimeStamp& aEndTime,
const mozilla::Maybe<nsID>& aDocShellId,
const mozilla::Maybe<std::string>& aDocShellId,
const mozilla::Maybe<uint32_t>& aDocShellHistoryId,
UniqueProfilerBacktrace aCause = nullptr)
: ProfilerMarkerPayload(aStartTime, aEndTime, aDocShellId,
@ -400,7 +222,7 @@ class TextMarkerPayload : public ProfilerMarkerPayload {
DECL_STREAM_PAYLOAD
private:
nsCString mText;
std::string mText;
};
class LogMarkerPayload : public ProfilerMarkerPayload {
@ -414,8 +236,8 @@ class LogMarkerPayload : public ProfilerMarkerPayload {
DECL_STREAM_PAYLOAD
private:
nsAutoCStringN<32> mModule; // longest known LazyLogModule name is ~24
nsCString mText;
std::string mModule; // longest known LazyLogModule name is ~24
std::string mText;
};
#endif // ProfilerMarkerPayload_h
#endif // BaseProfilerMarkerPayload_h

Просмотреть файл

@ -13,10 +13,6 @@
# error Do not #include this header when MOZ_BASE_PROFILER is not #defined.
#endif
#include "nsNativeCharsetUtils.h"
#include "nsString.h"
#include <nsID.h>
#include <algorithm>
#include <stdint.h>
#include <stdlib.h>
@ -26,9 +22,9 @@
class SharedLibrary {
public:
SharedLibrary(uintptr_t aStart, uintptr_t aEnd, uintptr_t aOffset,
const nsCString& aBreakpadId, const nsString& aModuleName,
const nsString& aModulePath, const nsString& aDebugName,
const nsString& aDebugPath, const nsCString& aVersion,
const std::string& aBreakpadId, const std::string& aModuleName,
const std::string& aModulePath, const std::string& aDebugName,
const std::string& aDebugPath, const std::string& aVersion,
const char* aArch)
: mStart(aStart),
mEnd(aEnd),
@ -83,19 +79,12 @@ class SharedLibrary {
uintptr_t GetStart() const { return mStart; }
uintptr_t GetEnd() const { return mEnd; }
uintptr_t GetOffset() const { return mOffset; }
const nsCString& GetBreakpadId() const { return mBreakpadId; }
const nsString& GetModuleName() const { return mModuleName; }
const nsString& GetModulePath() const { return mModulePath; }
const std::string GetNativeDebugPath() const {
nsAutoCString debugPathStr;
NS_CopyUnicodeToNative(mDebugPath, debugPathStr);
return debugPathStr.get();
}
const nsString& GetDebugName() const { return mDebugName; }
const nsString& GetDebugPath() const { return mDebugPath; }
const nsCString& GetVersion() const { return mVersion; }
const std::string& GetBreakpadId() const { return mBreakpadId; }
const std::string& GetModuleName() const { return mModuleName; }
const std::string& GetModulePath() const { return mModulePath; }
const std::string& GetDebugName() const { return mDebugName; }
const std::string& GetDebugPath() const { return mDebugPath; }
const std::string& GetVersion() const { return mVersion; }
const std::string& GetArch() const { return mArch; }
private:
@ -104,12 +93,12 @@ class SharedLibrary {
uintptr_t mStart;
uintptr_t mEnd;
uintptr_t mOffset;
nsCString mBreakpadId;
nsString mModuleName;
nsString mModulePath;
nsString mDebugName;
nsString mDebugPath;
nsCString mVersion;
std::string mBreakpadId;
std::string mModuleName;
std::string mModulePath;
std::string mDebugName;
std::string mDebugPath;
std::string mVersion;
std::string mArch;
};

Просмотреть файл

@ -4,8 +4,8 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef js_ProfilingCategory_h
#define js_ProfilingCategory_h
#ifndef BaseProfilingCategory_h
#define BaseProfilingCategory_h
#include "BaseProfiler.h"
@ -13,7 +13,9 @@
# error Do not #include this header when MOZ_BASE_PROFILER is not #defined.
#endif
#include "jstypes.h" // JS_FRIEND_API
#include "mozilla/Types.h"
#include <cstdint>
// clang-format off
@ -121,9 +123,9 @@ struct ProfilingCategoryPairInfo {
const char* mLabel;
};
JS_FRIEND_API const ProfilingCategoryPairInfo& GetProfilingCategoryPairInfo(
MFBT_API const ProfilingCategoryPairInfo& GetBaseProfilingCategoryPairInfo(
ProfilingCategoryPair aCategoryPair);
} // namespace JS
#endif /* js_ProfilingCategory_h */
#endif /* BaseProfilingCategory_h */

Просмотреть файл

@ -4,8 +4,12 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef js_ProfilingStack_h
#define js_ProfilingStack_h
#ifndef BaseProfilingStack_h
#define BaseProfilingStack_h
#include "BaseProfilingCategory.h"
#include "mozilla/Atomics.h"
#include "BaseProfiler.h"
@ -16,25 +20,6 @@
#include <algorithm>
#include <stdint.h>
#include "jstypes.h"
#include "js/ProfilingCategory.h"
#include "js/TypeDecls.h"
#include "js/Utility.h"
#ifdef JS_BROKEN_GCC_ATTRIBUTE_WARNING
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wattributes"
#endif // JS_BROKEN_GCC_ATTRIBUTE_WARNING
class JS_PUBLIC_API JSTracer;
#ifdef JS_BROKEN_GCC_ATTRIBUTE_WARNING
# pragma GCC diagnostic pop
#endif // JS_BROKEN_GCC_ATTRIBUTE_WARNING
class ProfilingStack;
// This file defines the classes ProfilingStack and ProfilingStackFrame.
// The ProfilingStack manages an array of ProfilingStackFrames.
// It keeps track of the "label stack" and the JS interpreter stack.
@ -168,8 +153,6 @@ class ProfilingStackFrame {
mozilla::recordreplay::Behavior::DontPreserve>
flagsAndCategoryPair_;
static int32_t pcToOffset(JSScript* aScript, jsbytecode* aPc);
public:
ProfilingStackFrame() = default;
ProfilingStackFrame& operator=(const ProfilingStackFrame& other) {
@ -270,7 +253,7 @@ class ProfilingStackFrame {
uint32_t(Flags::LABEL_DETERMINED_BY_CATEGORY_PAIR)) {
auto categoryPair = JS::ProfilingCategoryPair(
flagsAndCategoryPair >> uint32_t(Flags::FLAGS_BITCOUNT));
return JS::GetProfilingCategoryPairInfo(categoryPair).mLabel;
return JS::GetBaseProfilingCategoryPairInfo(categoryPair).mLabel;
}
return label_;
}
@ -302,11 +285,11 @@ class ProfilingStackFrame {
}
void initJsFrame(const char* aLabel, const char* aDynamicString,
JSScript* aScript, jsbytecode* aPc) {
void* /* JSScript* */ aScript, int32_t aOffset) {
label_ = aLabel;
dynamicString_ = aDynamicString;
spOrScript = aScript;
pcOffsetIfJS_ = pcToOffset(aScript, aPc);
pcOffsetIfJS_ = aOffset;
flagsAndCategoryPair_ =
uint32_t(Flags::IS_JS_FRAME) | (uint32_t(JS::ProfilingCategoryPair::JS)
<< uint32_t(Flags::FLAGS_BITCOUNT));
@ -327,20 +310,25 @@ class ProfilingStackFrame {
return spOrScript;
}
JS_PUBLIC_API JSScript* script() const;
// Note that the pointer returned might be invalid.
JSScript* rawScript() const {
void* rawScript() const {
MOZ_ASSERT(isJsFrame());
void* script = spOrScript;
return static_cast<JSScript*>(script);
return spOrScript;
}
void setRawScript(void* aScript) {
MOZ_ASSERT(isJsFrame());
spOrScript = aScript;
}
// We can't know the layout of JSScript, so look in vm/GeckoProfiler.cpp.
JS_FRIEND_API jsbytecode* pc() const;
void setPC(jsbytecode* pc);
int32_t pcOffset() const {
MOZ_ASSERT(isJsFrame());
return pcOffsetIfJS_;
}
void trace(JSTracer* trc);
void setPCOffset(int32_t aOffset) {
MOZ_ASSERT(isJsFrame());
pcOffsetIfJS_ = aOffset;
}
// The offset of a pc into a script's code can actually be 0, so to
// signify a nullptr pc, use a -1 index. This is checked against in
@ -348,18 +336,10 @@ class ProfilingStackFrame {
static const int32_t NullPCOffset = -1;
};
JS_FRIEND_API void SetContextProfilingStack(JSContext* cx,
ProfilingStack* profilingStack);
// GetContextProfilingStack also exists, but it's defined in RootingAPI.h.
JS_FRIEND_API void EnableContextProfilingStack(JSContext* cx, bool enabled);
JS_FRIEND_API void RegisterContextProfilingEventMarker(JSContext* cx,
void (*fn)(const char*));
} // namespace js
class ProfilingStack;
namespace JS {
typedef ProfilingStack* (*RegisterThreadCallback)(const char* threadName,
@ -367,7 +347,7 @@ typedef ProfilingStack* (*RegisterThreadCallback)(const char* threadName,
typedef void (*UnregisterThreadCallback)();
JS_FRIEND_API void SetProfilingThreadCallbacks(
MFBT_API void SetProfilingThreadCallbacks(
RegisterThreadCallback registerThread,
UnregisterThreadCallback unregisterThread);
@ -396,7 +376,7 @@ class ProfilingStack final {
public:
ProfilingStack() : stackPointer(0) {}
~ProfilingStack();
MFBT_API ~ProfilingStack();
void pushLabelFrame(const char* label, const char* dynamicString, void* sp,
JS::ProfilingCategoryPair categoryPair,
@ -438,8 +418,8 @@ class ProfilingStack final {
stackPointer = oldStackPointer + 1;
}
void pushJsFrame(const char* label, const char* dynamicString,
JSScript* script, jsbytecode* pc) {
void pushJsOffsetFrame(const char* label, const char* dynamicString,
void* script, int32_t offset) {
// This thread is the only one that ever changes the value of
// stackPointer. Only load the atomic once.
uint32_t oldStackPointer = stackPointer;
@ -447,7 +427,7 @@ class ProfilingStack final {
if (MOZ_UNLIKELY(oldStackPointer >= capacity)) {
ensureCapacitySlow();
}
frames[oldStackPointer].initJsFrame(label, dynamicString, script, pc);
frames[oldStackPointer].initJsFrame(label, dynamicString, script, offset);
// This must happen at the end, see the comment in pushLabelFrame.
stackPointer = stackPointer + 1;
@ -470,7 +450,7 @@ class ProfilingStack final {
private:
// Out of line path for expanding the buffer, since otherwise this would get
// inlined in every DOM WebIDL call.
MOZ_COLD void ensureCapacitySlow();
MFBT_API MOZ_COLD void ensureCapacitySlow();
// No copying.
ProfilingStack(const ProfilingStack&) = delete;
@ -525,7 +505,7 @@ class GeckoProfilerThread {
ProfilingStack* profilingStackIfEnabled_;
public:
GeckoProfilerThread();
MFBT_API GeckoProfilerThread();
uint32_t stackPointer() {
MOZ_ASSERT(infraInstalled());
@ -544,26 +524,12 @@ class GeckoProfilerThread {
*/
bool infraInstalled() { return profilingStack_ != nullptr; }
void setProfilingStack(ProfilingStack* profilingStack, bool enabled);
MFBT_API void setProfilingStack(ProfilingStack* profilingStack, bool enabled);
void enable(bool enable) {
profilingStackIfEnabled_ = enable ? profilingStack_ : nullptr;
}
void trace(JSTracer* trc);
/*
* Functions which are the actual instrumentation to track run information
*
* - enter: a function has started to execute
* - updatePC: updates the pc information about where a function
* is currently executing
* - exit: this function has ceased execution, and no further
* entries/exits will be made
*/
bool enter(JSContext* cx, JSScript* script);
void exit(JSContext* cx, JSScript* script);
inline void updatePC(JSContext* cx, JSScript* script, jsbytecode* pc);
};
} // namespace js
#endif /* js_ProfilingStack_h */
#endif /* BaseProfilingStack_h */